commit 4114fe70d7c3d500b9af66326ea7f2d7af77c75f Author: wangxujie Date: Mon Jan 20 13:22:16 2025 +0800 first commit diff --git a/.mxproject b/.mxproject new file mode 100644 index 0000000..7804383 --- /dev/null +++ b/.mxproject @@ -0,0 +1,44 @@ +[PreviousLibFiles] +LibFiles=Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_hal_rcc.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_hal_rcc_ex.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_ll_bus.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_ll_rcc.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_ll_system.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_ll_utils.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_hal_flash.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_hal_flash_ex.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_hal_flash_ramfunc.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_hal_gpio.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_hal_gpio_ex.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_ll_gpio.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_hal_dma_ex.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_hal_dma.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_ll_dma.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_ll_dmamux.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_hal_pwr.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_hal_pwr_ex.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_ll_pwr.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_hal_cortex.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_ll_cortex.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_hal.h;Drivers\STM32F4xx_HAL_Driver\Inc\Legacy\stm32_hal_legacy.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_hal_def.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_hal_exti.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_ll_exti.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_hal_eth.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_hal_spi.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_ll_spi.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_hal_tim.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_ll_tim.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_hal_tim_ex.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_hal_uart.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_ll_usart.h;Drivers\BSP\Components\lan8742\lan8742.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\ccp.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\chap_ms.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\chap-md5.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\chap-new.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\eap.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\eui64.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\fsm.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\ipcp.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\ipv6cp.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\lcp.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\magic.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\mppe.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\ppp.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\ppp_impl.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\ppp_opts.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\pppapi.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\pppcrypt.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\pppdebug.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\pppoe.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\pppol2tp.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\pppos.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\upap.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\vj.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\ecp.h;Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_rcc.c;Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_rcc_ex.c;Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_flash.c;Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_flash_ex.c;Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_flash_ramfunc.c;Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_gpio.c;Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_dma_ex.c;Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_dma.c;Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_pwr.c;Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_pwr_ex.c;Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_cortex.c;Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal.c;Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_exti.c;Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_eth.c;Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_spi.c;Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_tim.c;Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_tim_ex.c;Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_uart.c;Drivers\BSP\Components\lan8742\lan8742.c;Middlewares\Third_Party\LwIP\src\netif\ppp\auth.c;Middlewares\Third_Party\LwIP\src\netif\ppp\ccp.c;Middlewares\Third_Party\LwIP\src\netif\ppp\chap_ms.c;Middlewares\Third_Party\LwIP\src\netif\ppp\chap-md5.c;Middlewares\Third_Party\LwIP\src\netif\ppp\chap-new.c;Middlewares\Third_Party\LwIP\src\netif\ppp\demand.c;Middlewares\Third_Party\LwIP\src\netif\ppp\eap.c;Middlewares\Third_Party\LwIP\src\netif\ppp\eui64.c;Middlewares\Third_Party\LwIP\src\netif\ppp\fsm.c;Middlewares\Third_Party\LwIP\src\netif\ppp\ipcp.c;Middlewares\Third_Party\LwIP\src\netif\ppp\ipv6cp.c;Middlewares\Third_Party\LwIP\src\netif\ppp\lcp.c;Middlewares\Third_Party\LwIP\src\netif\ppp\magic.c;Middlewares\Third_Party\LwIP\src\netif\ppp\mppe.c;Middlewares\Third_Party\LwIP\src\netif\ppp\multilink.c;Middlewares\Third_Party\LwIP\src\netif\ppp\ppp.c;Middlewares\Third_Party\LwIP\src\netif\ppp\pppapi.c;Middlewares\Third_Party\LwIP\src\netif\ppp\pppcrypt.c;Middlewares\Third_Party\LwIP\src\netif\ppp\pppoe.c;Middlewares\Third_Party\LwIP\src\netif\ppp\pppol2tp.c;Middlewares\Third_Party\LwIP\src\netif\ppp\pppos.c;Middlewares\Third_Party\LwIP\src\netif\ppp\upap.c;Middlewares\Third_Party\LwIP\src\netif\ppp\utils.c;Middlewares\Third_Party\LwIP\src\netif\ppp\vj.c;Middlewares\Third_Party\LwIP\src\netif\bridgeif.c;Middlewares\Third_Party\LwIP\src\netif\bridgeif_fdb.c;Middlewares\Third_Party\LwIP\src\netif\ethernet.c;Middlewares\Third_Party\LwIP\src\netif\lowpan6.c;Middlewares\Third_Party\LwIP\src\netif\lowpan6_ble.c;Middlewares\Third_Party\LwIP\src\netif\lowpan6_common.c;Middlewares\Third_Party\LwIP\src\netif\slipif.c;Middlewares\Third_Party\LwIP\src\netif\zepif.c;Middlewares\Third_Party\LwIP\src\netif\ppp\ecp.c;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_hal_rcc.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_hal_rcc_ex.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_ll_bus.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_ll_rcc.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_ll_system.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_ll_utils.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_hal_flash.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_hal_flash_ex.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_hal_flash_ramfunc.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_hal_gpio.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_hal_gpio_ex.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_ll_gpio.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_hal_dma_ex.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_hal_dma.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_ll_dma.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_ll_dmamux.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_hal_pwr.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_hal_pwr_ex.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_ll_pwr.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_hal_cortex.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_ll_cortex.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_hal.h;Drivers\STM32F4xx_HAL_Driver\Inc\Legacy\stm32_hal_legacy.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_hal_def.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_hal_exti.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_ll_exti.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_hal_eth.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_hal_spi.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_ll_spi.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_hal_tim.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_ll_tim.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_hal_tim_ex.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_hal_uart.h;Drivers\STM32F4xx_HAL_Driver\Inc\stm32f4xx_ll_usart.h;Drivers\BSP\Components\lan8742\lan8742.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\ccp.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\chap_ms.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\chap-md5.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\chap-new.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\eap.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\eui64.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\fsm.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\ipcp.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\ipv6cp.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\lcp.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\magic.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\mppe.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\ppp.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\ppp_impl.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\ppp_opts.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\pppapi.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\pppcrypt.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\pppdebug.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\pppoe.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\pppol2tp.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\pppos.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\upap.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\vj.h;Middlewares\Third_Party\LwIP\src\include\netif\ppp\ecp.h;Drivers\CMSIS\Device\ST\STM32F4xx\Include\stm32f407xx.h;Drivers\CMSIS\Device\ST\STM32F4xx\Include\stm32f4xx.h;Drivers\CMSIS\Device\ST\STM32F4xx\Include\system_stm32f4xx.h;Drivers\CMSIS\Device\ST\STM32F4xx\Include\system_stm32f4xx.h;Drivers\CMSIS\Device\ST\STM32F4xx\Source\Templates\system_stm32f4xx.c;Middlewares\Third_Party\LwIP\src\include\lwip\altcp.h;Middlewares\Third_Party\LwIP\src\include\lwip\altcp_tcp.h;Middlewares\Third_Party\LwIP\src\include\lwip\altcp_tls.h;Middlewares\Third_Party\LwIP\src\include\lwip\api.h;Middlewares\Third_Party\LwIP\src\include\lwip\arch.h;Middlewares\Third_Party\LwIP\src\include\lwip\autoip.h;Middlewares\Third_Party\LwIP\src\include\lwip\debug.h;Middlewares\Third_Party\LwIP\src\include\lwip\def.h;Middlewares\Third_Party\LwIP\src\include\lwip\dhcp.h;Middlewares\Third_Party\LwIP\src\include\lwip\dhcp6.h;Middlewares\Third_Party\LwIP\src\include\lwip\dns.h;Middlewares\Third_Party\LwIP\src\include\lwip\err.h;Middlewares\Third_Party\LwIP\src\include\lwip\errno.h;Middlewares\Third_Party\LwIP\src\include\lwip\etharp.h;Middlewares\Third_Party\LwIP\src\include\lwip\ethip6.h;Middlewares\Third_Party\LwIP\src\include\lwip\icmp.h;Middlewares\Third_Party\LwIP\src\include\lwip\icmp6.h;Middlewares\Third_Party\LwIP\src\include\lwip\if_api.h;Middlewares\Third_Party\LwIP\src\include\lwip\igmp.h;Middlewares\Third_Party\LwIP\src\include\lwip\inet.h;Middlewares\Third_Party\LwIP\src\include\lwip\inet_chksum.h;Middlewares\Third_Party\LwIP\src\include\lwip\init.h;Middlewares\Third_Party\LwIP\src\include\lwip\ip.h;Middlewares\Third_Party\LwIP\src\include\lwip\ip4.h;Middlewares\Third_Party\LwIP\src\include\lwip\ip4_addr.h;Middlewares\Third_Party\LwIP\src\include\lwip\ip4_frag.h;Middlewares\Third_Party\LwIP\src\include\lwip\ip6.h;Middlewares\Third_Party\LwIP\src\include\lwip\ip6_addr.h;Middlewares\Third_Party\LwIP\src\include\lwip\ip6_frag.h;Middlewares\Third_Party\LwIP\src\include\lwip\ip6_zone.h;Middlewares\Third_Party\LwIP\src\include\lwip\ip_addr.h;Middlewares\Third_Party\LwIP\src\include\lwip\mem.h;Middlewares\Third_Party\LwIP\src\include\lwip\memp.h;Middlewares\Third_Party\LwIP\src\include\lwip\mld6.h;Middlewares\Third_Party\LwIP\src\include\lwip\nd6.h;Middlewares\Third_Party\LwIP\src\include\lwip\netbuf.h;Middlewares\Third_Party\LwIP\src\include\lwip\netdb.h;Middlewares\Third_Party\LwIP\src\include\lwip\netif.h;Middlewares\Third_Party\LwIP\src\include\lwip\netifapi.h;Middlewares\Third_Party\LwIP\src\include\lwip\opt.h;Middlewares\Third_Party\LwIP\src\include\lwip\pbuf.h;Middlewares\Third_Party\LwIP\src\include\lwip\raw.h;Middlewares\Third_Party\LwIP\src\include\lwip\sio.h;Middlewares\Third_Party\LwIP\src\include\lwip\snmp.h;Middlewares\Third_Party\LwIP\src\include\lwip\sockets.h;Middlewares\Third_Party\LwIP\src\include\lwip\stats.h;Middlewares\Third_Party\LwIP\src\include\lwip\sys.h;Middlewares\Third_Party\LwIP\src\include\lwip\tcp.h;Middlewares\Third_Party\LwIP\src\include\lwip\tcpbase.h;Middlewares\Third_Party\LwIP\src\include\lwip\tcpip.h;Middlewares\Third_Party\LwIP\src\include\lwip\timeouts.h;Middlewares\Third_Party\LwIP\src\include\lwip\udp.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\altcp_proxyconnect.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\altcp_tls_mbedtls_opts.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\fs.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\httpd.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\httpd_opts.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\http_client.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\lwiperf.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\mdns.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\mdns_opts.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\mdns_priv.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\mqtt.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\mqtt_opts.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\mqtt_priv.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\netbiosns.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\netbiosns_opts.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\smtp.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\smtp_opts.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\snmp.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\snmpv3.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\snmp_core.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\snmp_mib2.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\snmp_opts.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\snmp_scalar.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\snmp_snmpv2_framework.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\snmp_snmpv2_usm.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\snmp_table.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\snmp_threadsync.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\sntp.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\sntp_opts.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\tftp_opts.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\tftp_server.h;Middlewares\Third_Party\LwIP\src\include\lwip\priv\altcp_priv.h;Middlewares\Third_Party\LwIP\src\include\lwip\priv\api_msg.h;Middlewares\Third_Party\LwIP\src\include\lwip\priv\memp_priv.h;Middlewares\Third_Party\LwIP\src\include\lwip\priv\memp_std.h;Middlewares\Third_Party\LwIP\src\include\lwip\priv\mem_priv.h;Middlewares\Third_Party\LwIP\src\include\lwip\priv\nd6_priv.h;Middlewares\Third_Party\LwIP\src\include\lwip\priv\raw_priv.h;Middlewares\Third_Party\LwIP\src\include\lwip\priv\sockets_priv.h;Middlewares\Third_Party\LwIP\src\include\lwip\priv\tcpip_priv.h;Middlewares\Third_Party\LwIP\src\include\lwip\priv\tcp_priv.h;Middlewares\Third_Party\LwIP\src\include\lwip\prot\autoip.h;Middlewares\Third_Party\LwIP\src\include\lwip\prot\dhcp.h;Middlewares\Third_Party\LwIP\src\include\lwip\prot\dhcp6.h;Middlewares\Third_Party\LwIP\src\include\lwip\prot\dns.h;Middlewares\Third_Party\LwIP\src\include\lwip\prot\etharp.h;Middlewares\Third_Party\LwIP\src\include\lwip\prot\ethernet.h;Middlewares\Third_Party\LwIP\src\include\lwip\prot\iana.h;Middlewares\Third_Party\LwIP\src\include\lwip\prot\icmp.h;Middlewares\Third_Party\LwIP\src\include\lwip\prot\icmp6.h;Middlewares\Third_Party\LwIP\src\include\lwip\prot\ieee.h;Middlewares\Third_Party\LwIP\src\include\lwip\prot\igmp.h;Middlewares\Third_Party\LwIP\src\include\lwip\prot\ip.h;Middlewares\Third_Party\LwIP\src\include\lwip\prot\ip4.h;Middlewares\Third_Party\LwIP\src\include\lwip\prot\ip6.h;Middlewares\Third_Party\LwIP\src\include\lwip\prot\mld6.h;Middlewares\Third_Party\LwIP\src\include\lwip\prot\nd6.h;Middlewares\Third_Party\LwIP\src\include\lwip\prot\tcp.h;Middlewares\Third_Party\LwIP\src\include\lwip\prot\udp.h;Middlewares\Third_Party\LwIP\src\include\netif\bridgeif.h;Middlewares\Third_Party\LwIP\src\include\netif\bridgeif_opts.h;Middlewares\Third_Party\LwIP\src\include\netif\etharp.h;Middlewares\Third_Party\LwIP\src\include\netif\ethernet.h;Middlewares\Third_Party\LwIP\src\include\netif\ieee802154.h;Middlewares\Third_Party\LwIP\src\include\netif\lowpan6.h;Middlewares\Third_Party\LwIP\src\include\netif\lowpan6_ble.h;Middlewares\Third_Party\LwIP\src\include\netif\lowpan6_common.h;Middlewares\Third_Party\LwIP\src\include\netif\lowpan6_opts.h;Middlewares\Third_Party\LwIP\src\include\netif\slipif.h;Middlewares\Third_Party\LwIP\src\include\netif\zepif.h;Middlewares\Third_Party\LwIP\src\include\compat\posix\netdb.h;Middlewares\Third_Party\LwIP\src\include\compat\posix\arpa\inet.h;Middlewares\Third_Party\LwIP\src\include\compat\posix\net\if.h;Middlewares\Third_Party\LwIP\src\include\compat\posix\sys\socket.h;Middlewares\Third_Party\LwIP\src\include\compat\stdc\errno.h;Middlewares\Third_Party\LwIP\system\arch\bpstruct.h;Middlewares\Third_Party\LwIP\system\arch\cc.h;Middlewares\Third_Party\LwIP\system\arch\cpu.h;Middlewares\Third_Party\LwIP\system\arch\epstruct.h;Middlewares\Third_Party\LwIP\system\arch\init.h;Middlewares\Third_Party\LwIP\system\arch\lib.h;Middlewares\Third_Party\LwIP\system\arch\perf.h;Middlewares\Third_Party\LwIP\system\arch\sys_arch.h;Middlewares\Third_Party\LwIP\src\api\api_lib.c;Middlewares\Third_Party\LwIP\src\api\api_msg.c;Middlewares\Third_Party\LwIP\src\api\err.c;Middlewares\Third_Party\LwIP\src\api\if_api.c;Middlewares\Third_Party\LwIP\src\api\netbuf.c;Middlewares\Third_Party\LwIP\src\api\netdb.c;Middlewares\Third_Party\LwIP\src\api\netifapi.c;Middlewares\Third_Party\LwIP\src\api\sockets.c;Middlewares\Third_Party\LwIP\src\api\tcpip.c;Middlewares\Third_Party\LwIP\src\core\altcp.c;Middlewares\Third_Party\LwIP\src\core\altcp_alloc.c;Middlewares\Third_Party\LwIP\src\core\altcp_tcp.c;Middlewares\Third_Party\LwIP\src\core\def.c;Middlewares\Third_Party\LwIP\src\core\dns.c;Middlewares\Third_Party\LwIP\src\core\inet_chksum.c;Middlewares\Third_Party\LwIP\src\core\init.c;Middlewares\Third_Party\LwIP\src\core\ip.c;Middlewares\Third_Party\LwIP\src\core\mem.c;Middlewares\Third_Party\LwIP\src\core\memp.c;Middlewares\Third_Party\LwIP\src\core\netif.c;Middlewares\Third_Party\LwIP\src\core\pbuf.c;Middlewares\Third_Party\LwIP\src\core\raw.c;Middlewares\Third_Party\LwIP\src\core\stats.c;Middlewares\Third_Party\LwIP\src\core\sys.c;Middlewares\Third_Party\LwIP\src\core\tcp.c;Middlewares\Third_Party\LwIP\src\core\tcp_in.c;Middlewares\Third_Party\LwIP\src\core\tcp_out.c;Middlewares\Third_Party\LwIP\src\core\timeouts.c;Middlewares\Third_Party\LwIP\src\core\udp.c;Middlewares\Third_Party\LwIP\src\core\ipv4\autoip.c;Middlewares\Third_Party\LwIP\src\core\ipv4\dhcp.c;Middlewares\Third_Party\LwIP\src\core\ipv4\etharp.c;Middlewares\Third_Party\LwIP\src\core\ipv4\icmp.c;Middlewares\Third_Party\LwIP\src\core\ipv4\igmp.c;Middlewares\Third_Party\LwIP\src\core\ipv4\ip4.c;Middlewares\Third_Party\LwIP\src\core\ipv4\ip4_addr.c;Middlewares\Third_Party\LwIP\src\core\ipv4\ip4_frag.c;Middlewares\Third_Party\LwIP\src\core\ipv6\dhcp6.c;Middlewares\Third_Party\LwIP\src\core\ipv6\ethip6.c;Middlewares\Third_Party\LwIP\src\core\ipv6\icmp6.c;Middlewares\Third_Party\LwIP\src\core\ipv6\inet6.c;Middlewares\Third_Party\LwIP\src\core\ipv6\ip6.c;Middlewares\Third_Party\LwIP\src\core\ipv6\ip6_addr.c;Middlewares\Third_Party\LwIP\src\core\ipv6\ip6_frag.c;Middlewares\Third_Party\LwIP\src\core\ipv6\mld6.c;Middlewares\Third_Party\LwIP\src\core\ipv6\nd6.c;Middlewares\Third_Party\LwIP\src\apps\mqtt\mqtt.c;Middlewares\Third_Party\LwIP\src\include\lwip\altcp.h;Middlewares\Third_Party\LwIP\src\include\lwip\altcp_tcp.h;Middlewares\Third_Party\LwIP\src\include\lwip\altcp_tls.h;Middlewares\Third_Party\LwIP\src\include\lwip\api.h;Middlewares\Third_Party\LwIP\src\include\lwip\arch.h;Middlewares\Third_Party\LwIP\src\include\lwip\autoip.h;Middlewares\Third_Party\LwIP\src\include\lwip\debug.h;Middlewares\Third_Party\LwIP\src\include\lwip\def.h;Middlewares\Third_Party\LwIP\src\include\lwip\dhcp.h;Middlewares\Third_Party\LwIP\src\include\lwip\dhcp6.h;Middlewares\Third_Party\LwIP\src\include\lwip\dns.h;Middlewares\Third_Party\LwIP\src\include\lwip\err.h;Middlewares\Third_Party\LwIP\src\include\lwip\errno.h;Middlewares\Third_Party\LwIP\src\include\lwip\etharp.h;Middlewares\Third_Party\LwIP\src\include\lwip\ethip6.h;Middlewares\Third_Party\LwIP\src\include\lwip\icmp.h;Middlewares\Third_Party\LwIP\src\include\lwip\icmp6.h;Middlewares\Third_Party\LwIP\src\include\lwip\if_api.h;Middlewares\Third_Party\LwIP\src\include\lwip\igmp.h;Middlewares\Third_Party\LwIP\src\include\lwip\inet.h;Middlewares\Third_Party\LwIP\src\include\lwip\inet_chksum.h;Middlewares\Third_Party\LwIP\src\include\lwip\init.h;Middlewares\Third_Party\LwIP\src\include\lwip\ip.h;Middlewares\Third_Party\LwIP\src\include\lwip\ip4.h;Middlewares\Third_Party\LwIP\src\include\lwip\ip4_addr.h;Middlewares\Third_Party\LwIP\src\include\lwip\ip4_frag.h;Middlewares\Third_Party\LwIP\src\include\lwip\ip6.h;Middlewares\Third_Party\LwIP\src\include\lwip\ip6_addr.h;Middlewares\Third_Party\LwIP\src\include\lwip\ip6_frag.h;Middlewares\Third_Party\LwIP\src\include\lwip\ip6_zone.h;Middlewares\Third_Party\LwIP\src\include\lwip\ip_addr.h;Middlewares\Third_Party\LwIP\src\include\lwip\mem.h;Middlewares\Third_Party\LwIP\src\include\lwip\memp.h;Middlewares\Third_Party\LwIP\src\include\lwip\mld6.h;Middlewares\Third_Party\LwIP\src\include\lwip\nd6.h;Middlewares\Third_Party\LwIP\src\include\lwip\netbuf.h;Middlewares\Third_Party\LwIP\src\include\lwip\netdb.h;Middlewares\Third_Party\LwIP\src\include\lwip\netif.h;Middlewares\Third_Party\LwIP\src\include\lwip\netifapi.h;Middlewares\Third_Party\LwIP\src\include\lwip\opt.h;Middlewares\Third_Party\LwIP\src\include\lwip\pbuf.h;Middlewares\Third_Party\LwIP\src\include\lwip\raw.h;Middlewares\Third_Party\LwIP\src\include\lwip\sio.h;Middlewares\Third_Party\LwIP\src\include\lwip\snmp.h;Middlewares\Third_Party\LwIP\src\include\lwip\sockets.h;Middlewares\Third_Party\LwIP\src\include\lwip\stats.h;Middlewares\Third_Party\LwIP\src\include\lwip\sys.h;Middlewares\Third_Party\LwIP\src\include\lwip\tcp.h;Middlewares\Third_Party\LwIP\src\include\lwip\tcpbase.h;Middlewares\Third_Party\LwIP\src\include\lwip\tcpip.h;Middlewares\Third_Party\LwIP\src\include\lwip\timeouts.h;Middlewares\Third_Party\LwIP\src\include\lwip\udp.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\altcp_proxyconnect.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\altcp_tls_mbedtls_opts.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\fs.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\httpd.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\httpd_opts.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\http_client.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\lwiperf.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\mdns.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\mdns_opts.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\mdns_priv.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\mqtt.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\mqtt_opts.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\mqtt_priv.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\netbiosns.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\netbiosns_opts.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\smtp.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\smtp_opts.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\snmp.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\snmpv3.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\snmp_core.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\snmp_mib2.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\snmp_opts.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\snmp_scalar.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\snmp_snmpv2_framework.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\snmp_snmpv2_usm.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\snmp_table.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\snmp_threadsync.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\sntp.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\sntp_opts.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\tftp_opts.h;Middlewares\Third_Party\LwIP\src\include\lwip\apps\tftp_server.h;Middlewares\Third_Party\LwIP\src\include\lwip\priv\altcp_priv.h;Middlewares\Third_Party\LwIP\src\include\lwip\priv\api_msg.h;Middlewares\Third_Party\LwIP\src\include\lwip\priv\memp_priv.h;Middlewares\Third_Party\LwIP\src\include\lwip\priv\memp_std.h;Middlewares\Third_Party\LwIP\src\include\lwip\priv\mem_priv.h;Middlewares\Third_Party\LwIP\src\include\lwip\priv\nd6_priv.h;Middlewares\Third_Party\LwIP\src\include\lwip\priv\raw_priv.h;Middlewares\Third_Party\LwIP\src\include\lwip\priv\sockets_priv.h;Middlewares\Third_Party\LwIP\src\include\lwip\priv\tcpip_priv.h;Middlewares\Third_Party\LwIP\src\include\lwip\priv\tcp_priv.h;Middlewares\Third_Party\LwIP\src\include\lwip\prot\autoip.h;Middlewares\Third_Party\LwIP\src\include\lwip\prot\dhcp.h;Middlewares\Third_Party\LwIP\src\include\lwip\prot\dhcp6.h;Middlewares\Third_Party\LwIP\src\include\lwip\prot\dns.h;Middlewares\Third_Party\LwIP\src\include\lwip\prot\etharp.h;Middlewares\Third_Party\LwIP\src\include\lwip\prot\ethernet.h;Middlewares\Third_Party\LwIP\src\include\lwip\prot\iana.h;Middlewares\Third_Party\LwIP\src\include\lwip\prot\icmp.h;Middlewares\Third_Party\LwIP\src\include\lwip\prot\icmp6.h;Middlewares\Third_Party\LwIP\src\include\lwip\prot\ieee.h;Middlewares\Third_Party\LwIP\src\include\lwip\prot\igmp.h;Middlewares\Third_Party\LwIP\src\include\lwip\prot\ip.h;Middlewares\Third_Party\LwIP\src\include\lwip\prot\ip4.h;Middlewares\Third_Party\LwIP\src\include\lwip\prot\ip6.h;Middlewares\Third_Party\LwIP\src\include\lwip\prot\mld6.h;Middlewares\Third_Party\LwIP\src\include\lwip\prot\nd6.h;Middlewares\Third_Party\LwIP\src\include\lwip\prot\tcp.h;Middlewares\Third_Party\LwIP\src\include\lwip\prot\udp.h;Middlewares\Third_Party\LwIP\src\include\netif\bridgeif.h;Middlewares\Third_Party\LwIP\src\include\netif\bridgeif_opts.h;Middlewares\Third_Party\LwIP\src\include\netif\etharp.h;Middlewares\Third_Party\LwIP\src\include\netif\ethernet.h;Middlewares\Third_Party\LwIP\src\include\netif\ieee802154.h;Middlewares\Third_Party\LwIP\src\include\netif\lowpan6.h;Middlewares\Third_Party\LwIP\src\include\netif\lowpan6_ble.h;Middlewares\Third_Party\LwIP\src\include\netif\lowpan6_common.h;Middlewares\Third_Party\LwIP\src\include\netif\lowpan6_opts.h;Middlewares\Third_Party\LwIP\src\include\netif\slipif.h;Middlewares\Third_Party\LwIP\src\include\netif\zepif.h;Middlewares\Third_Party\LwIP\src\include\compat\posix\netdb.h;Middlewares\Third_Party\LwIP\src\include\compat\posix\arpa\inet.h;Middlewares\Third_Party\LwIP\src\include\compat\posix\net\if.h;Middlewares\Third_Party\LwIP\src\include\compat\posix\sys\socket.h;Middlewares\Third_Party\LwIP\src\include\compat\stdc\errno.h;Middlewares\Third_Party\LwIP\system\arch\bpstruct.h;Middlewares\Third_Party\LwIP\system\arch\cc.h;Middlewares\Third_Party\LwIP\system\arch\cpu.h;Middlewares\Third_Party\LwIP\system\arch\epstruct.h;Middlewares\Third_Party\LwIP\system\arch\init.h;Middlewares\Third_Party\LwIP\system\arch\lib.h;Middlewares\Third_Party\LwIP\system\arch\perf.h;Middlewares\Third_Party\LwIP\system\arch\sys_arch.h;Drivers\CMSIS\Include\cachel1_armv7.h;Drivers\CMSIS\Include\cmsis_armcc.h;Drivers\CMSIS\Include\cmsis_armclang.h;Drivers\CMSIS\Include\cmsis_armclang_ltm.h;Drivers\CMSIS\Include\cmsis_compiler.h;Drivers\CMSIS\Include\cmsis_gcc.h;Drivers\CMSIS\Include\cmsis_iccarm.h;Drivers\CMSIS\Include\cmsis_version.h;Drivers\CMSIS\Include\core_armv81mml.h;Drivers\CMSIS\Include\core_armv8mbl.h;Drivers\CMSIS\Include\core_armv8mml.h;Drivers\CMSIS\Include\core_cm0.h;Drivers\CMSIS\Include\core_cm0plus.h;Drivers\CMSIS\Include\core_cm1.h;Drivers\CMSIS\Include\core_cm23.h;Drivers\CMSIS\Include\core_cm3.h;Drivers\CMSIS\Include\core_cm33.h;Drivers\CMSIS\Include\core_cm35p.h;Drivers\CMSIS\Include\core_cm4.h;Drivers\CMSIS\Include\core_cm55.h;Drivers\CMSIS\Include\core_cm7.h;Drivers\CMSIS\Include\core_cm85.h;Drivers\CMSIS\Include\core_sc000.h;Drivers\CMSIS\Include\core_sc300.h;Drivers\CMSIS\Include\core_starmc1.h;Drivers\CMSIS\Include\mpu_armv7.h;Drivers\CMSIS\Include\mpu_armv8.h;Drivers\CMSIS\Include\pac_armv81.h;Drivers\CMSIS\Include\pmu_armv8.h;Drivers\CMSIS\Include\tz_context.h; + +[PreviousUsedKeilFiles] +SourceFiles=..\Core\Src\main.c;..\Core\Src\gpio.c;..\Core\Src\dma.c;..\LWIP\App\lwip.c;..\LWIP\Target\ethernetif.c;..\Core\Src\spi.c;..\Core\Src\tim.c;..\Core\Src\usart.c;..\Core\Src\stm32f4xx_it.c;..\Core\Src\stm32f4xx_hal_msp.c;..\Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_rcc.c;..\Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_rcc_ex.c;..\Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_flash.c;..\Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_flash_ex.c;..\Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_flash_ramfunc.c;..\Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_gpio.c;..\Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_dma_ex.c;..\Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_dma.c;..\Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_pwr.c;..\Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_pwr_ex.c;..\Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_cortex.c;..\Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal.c;..\Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_exti.c;..\Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_eth.c;..\Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_spi.c;..\Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_tim.c;..\Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_tim_ex.c;..\Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_uart.c;..\Drivers\BSP\Components\lan8742\lan8742.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\auth.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\ccp.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\chap_ms.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\chap-md5.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\chap-new.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\demand.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\eap.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\eui64.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\fsm.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\ipcp.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\ipv6cp.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\lcp.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\magic.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\mppe.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\multilink.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\ppp.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\pppapi.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\pppcrypt.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\pppoe.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\pppol2tp.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\pppos.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\upap.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\utils.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\vj.c;..\Middlewares\Third_Party\LwIP\src\netif\bridgeif.c;..\Middlewares\Third_Party\LwIP\src\netif\bridgeif_fdb.c;..\Middlewares\Third_Party\LwIP\src\netif\ethernet.c;..\Middlewares\Third_Party\LwIP\src\netif\lowpan6.c;..\Middlewares\Third_Party\LwIP\src\netif\lowpan6_ble.c;..\Middlewares\Third_Party\LwIP\src\netif\lowpan6_common.c;..\Middlewares\Third_Party\LwIP\src\netif\slipif.c;..\Middlewares\Third_Party\LwIP\src\netif\zepif.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\ecp.c;..\Drivers\CMSIS\Device\ST\STM32F4xx\Source\Templates\system_stm32f4xx.c;..\Middlewares\Third_Party\LwIP\src\api\api_lib.c;..\Middlewares\Third_Party\LwIP\src\api\api_msg.c;..\Middlewares\Third_Party\LwIP\src\api\err.c;..\Middlewares\Third_Party\LwIP\src\api\if_api.c;..\Middlewares\Third_Party\LwIP\src\api\netbuf.c;..\Middlewares\Third_Party\LwIP\src\api\netdb.c;..\Middlewares\Third_Party\LwIP\src\api\netifapi.c;..\Middlewares\Third_Party\LwIP\src\api\sockets.c;..\Middlewares\Third_Party\LwIP\src\api\tcpip.c;..\Middlewares\Third_Party\LwIP\src\core\altcp.c;..\Middlewares\Third_Party\LwIP\src\core\altcp_alloc.c;..\Middlewares\Third_Party\LwIP\src\core\altcp_tcp.c;..\Middlewares\Third_Party\LwIP\src\core\def.c;..\Middlewares\Third_Party\LwIP\src\core\dns.c;..\Middlewares\Third_Party\LwIP\src\core\inet_chksum.c;..\Middlewares\Third_Party\LwIP\src\core\init.c;..\Middlewares\Third_Party\LwIP\src\core\ip.c;..\Middlewares\Third_Party\LwIP\src\core\mem.c;..\Middlewares\Third_Party\LwIP\src\core\memp.c;..\Middlewares\Third_Party\LwIP\src\core\netif.c;..\Middlewares\Third_Party\LwIP\src\core\pbuf.c;..\Middlewares\Third_Party\LwIP\src\core\raw.c;..\Middlewares\Third_Party\LwIP\src\core\stats.c;..\Middlewares\Third_Party\LwIP\src\core\sys.c;..\Middlewares\Third_Party\LwIP\src\core\tcp.c;..\Middlewares\Third_Party\LwIP\src\core\tcp_in.c;..\Middlewares\Third_Party\LwIP\src\core\tcp_out.c;..\Middlewares\Third_Party\LwIP\src\core\timeouts.c;..\Middlewares\Third_Party\LwIP\src\core\udp.c;..\Middlewares\Third_Party\LwIP\src\core\ipv4\autoip.c;..\Middlewares\Third_Party\LwIP\src\core\ipv4\dhcp.c;..\Middlewares\Third_Party\LwIP\src\core\ipv4\etharp.c;..\Middlewares\Third_Party\LwIP\src\core\ipv4\icmp.c;..\Middlewares\Third_Party\LwIP\src\core\ipv4\igmp.c;..\Middlewares\Third_Party\LwIP\src\core\ipv4\ip4.c;..\Middlewares\Third_Party\LwIP\src\core\ipv4\ip4_addr.c;..\Middlewares\Third_Party\LwIP\src\core\ipv4\ip4_frag.c;..\Middlewares\Third_Party\LwIP\src\core\ipv6\dhcp6.c;..\Middlewares\Third_Party\LwIP\src\core\ipv6\ethip6.c;..\Middlewares\Third_Party\LwIP\src\core\ipv6\icmp6.c;..\Middlewares\Third_Party\LwIP\src\core\ipv6\inet6.c;..\Middlewares\Third_Party\LwIP\src\core\ipv6\ip6.c;..\Middlewares\Third_Party\LwIP\src\core\ipv6\ip6_addr.c;..\Middlewares\Third_Party\LwIP\src\core\ipv6\ip6_frag.c;..\Middlewares\Third_Party\LwIP\src\core\ipv6\mld6.c;..\Middlewares\Third_Party\LwIP\src\core\ipv6\nd6.c;..\Middlewares\Third_Party\LwIP\src\apps\mqtt\mqtt.c;..\Core\Src\system_stm32f4xx.c;..\Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_rcc.c;..\Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_rcc_ex.c;..\Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_flash.c;..\Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_flash_ex.c;..\Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_flash_ramfunc.c;..\Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_gpio.c;..\Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_dma_ex.c;..\Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_dma.c;..\Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_pwr.c;..\Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_pwr_ex.c;..\Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_cortex.c;..\Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal.c;..\Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_exti.c;..\Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_eth.c;..\Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_spi.c;..\Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_tim.c;..\Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_tim_ex.c;..\Drivers\STM32F4xx_HAL_Driver\Src\stm32f4xx_hal_uart.c;..\Drivers\BSP\Components\lan8742\lan8742.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\auth.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\ccp.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\chap_ms.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\chap-md5.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\chap-new.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\demand.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\eap.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\eui64.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\fsm.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\ipcp.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\ipv6cp.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\lcp.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\magic.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\mppe.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\multilink.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\ppp.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\pppapi.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\pppcrypt.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\pppoe.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\pppol2tp.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\pppos.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\upap.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\utils.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\vj.c;..\Middlewares\Third_Party\LwIP\src\netif\bridgeif.c;..\Middlewares\Third_Party\LwIP\src\netif\bridgeif_fdb.c;..\Middlewares\Third_Party\LwIP\src\netif\ethernet.c;..\Middlewares\Third_Party\LwIP\src\netif\lowpan6.c;..\Middlewares\Third_Party\LwIP\src\netif\lowpan6_ble.c;..\Middlewares\Third_Party\LwIP\src\netif\lowpan6_common.c;..\Middlewares\Third_Party\LwIP\src\netif\slipif.c;..\Middlewares\Third_Party\LwIP\src\netif\zepif.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\ecp.c;..\Drivers\CMSIS\Device\ST\STM32F4xx\Source\Templates\system_stm32f4xx.c;..\Middlewares\Third_Party\LwIP\src\api\api_lib.c;..\Middlewares\Third_Party\LwIP\src\api\api_msg.c;..\Middlewares\Third_Party\LwIP\src\api\err.c;..\Middlewares\Third_Party\LwIP\src\api\if_api.c;..\Middlewares\Third_Party\LwIP\src\api\netbuf.c;..\Middlewares\Third_Party\LwIP\src\api\netdb.c;..\Middlewares\Third_Party\LwIP\src\api\netifapi.c;..\Middlewares\Third_Party\LwIP\src\api\sockets.c;..\Middlewares\Third_Party\LwIP\src\api\tcpip.c;..\Middlewares\Third_Party\LwIP\src\core\altcp.c;..\Middlewares\Third_Party\LwIP\src\core\altcp_alloc.c;..\Middlewares\Third_Party\LwIP\src\core\altcp_tcp.c;..\Middlewares\Third_Party\LwIP\src\core\def.c;..\Middlewares\Third_Party\LwIP\src\core\dns.c;..\Middlewares\Third_Party\LwIP\src\core\inet_chksum.c;..\Middlewares\Third_Party\LwIP\src\core\init.c;..\Middlewares\Third_Party\LwIP\src\core\ip.c;..\Middlewares\Third_Party\LwIP\src\core\mem.c;..\Middlewares\Third_Party\LwIP\src\core\memp.c;..\Middlewares\Third_Party\LwIP\src\core\netif.c;..\Middlewares\Third_Party\LwIP\src\core\pbuf.c;..\Middlewares\Third_Party\LwIP\src\core\raw.c;..\Middlewares\Third_Party\LwIP\src\core\stats.c;..\Middlewares\Third_Party\LwIP\src\core\sys.c;..\Middlewares\Third_Party\LwIP\src\core\tcp.c;..\Middlewares\Third_Party\LwIP\src\core\tcp_in.c;..\Middlewares\Third_Party\LwIP\src\core\tcp_out.c;..\Middlewares\Third_Party\LwIP\src\core\timeouts.c;..\Middlewares\Third_Party\LwIP\src\core\udp.c;..\Middlewares\Third_Party\LwIP\src\core\ipv4\autoip.c;..\Middlewares\Third_Party\LwIP\src\core\ipv4\dhcp.c;..\Middlewares\Third_Party\LwIP\src\core\ipv4\etharp.c;..\Middlewares\Third_Party\LwIP\src\core\ipv4\icmp.c;..\Middlewares\Third_Party\LwIP\src\core\ipv4\igmp.c;..\Middlewares\Third_Party\LwIP\src\core\ipv4\ip4.c;..\Middlewares\Third_Party\LwIP\src\core\ipv4\ip4_addr.c;..\Middlewares\Third_Party\LwIP\src\core\ipv4\ip4_frag.c;..\Middlewares\Third_Party\LwIP\src\core\ipv6\dhcp6.c;..\Middlewares\Third_Party\LwIP\src\core\ipv6\ethip6.c;..\Middlewares\Third_Party\LwIP\src\core\ipv6\icmp6.c;..\Middlewares\Third_Party\LwIP\src\core\ipv6\inet6.c;..\Middlewares\Third_Party\LwIP\src\core\ipv6\ip6.c;..\Middlewares\Third_Party\LwIP\src\core\ipv6\ip6_addr.c;..\Middlewares\Third_Party\LwIP\src\core\ipv6\ip6_frag.c;..\Middlewares\Third_Party\LwIP\src\core\ipv6\mld6.c;..\Middlewares\Third_Party\LwIP\src\core\ipv6\nd6.c;..\Middlewares\Third_Party\LwIP\src\apps\mqtt\mqtt.c;..\Core\Src\system_stm32f4xx.c;;;..\Middlewares\Third_Party\LwIP\src\netif\ppp\auth.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\ccp.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\chap_ms.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\chap-md5.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\chap-new.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\demand.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\eap.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\eui64.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\fsm.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\ipcp.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\ipv6cp.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\lcp.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\magic.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\mppe.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\multilink.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\ppp.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\pppapi.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\pppcrypt.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\pppoe.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\pppol2tp.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\pppos.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\upap.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\utils.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\vj.c;..\Middlewares\Third_Party\LwIP\src\netif\bridgeif.c;..\Middlewares\Third_Party\LwIP\src\netif\bridgeif_fdb.c;..\Middlewares\Third_Party\LwIP\src\netif\ethernet.c;..\Middlewares\Third_Party\LwIP\src\netif\lowpan6.c;..\Middlewares\Third_Party\LwIP\src\netif\lowpan6_ble.c;..\Middlewares\Third_Party\LwIP\src\netif\lowpan6_common.c;..\Middlewares\Third_Party\LwIP\src\netif\slipif.c;..\Middlewares\Third_Party\LwIP\src\netif\zepif.c;..\Middlewares\Third_Party\LwIP\src\netif\ppp\ecp.c;..\Middlewares\Third_Party\LwIP\src\api\api_lib.c;..\Middlewares\Third_Party\LwIP\src\api\api_msg.c;..\Middlewares\Third_Party\LwIP\src\api\err.c;..\Middlewares\Third_Party\LwIP\src\api\if_api.c;..\Middlewares\Third_Party\LwIP\src\api\netbuf.c;..\Middlewares\Third_Party\LwIP\src\api\netdb.c;..\Middlewares\Third_Party\LwIP\src\api\netifapi.c;..\Middlewares\Third_Party\LwIP\src\api\sockets.c;..\Middlewares\Third_Party\LwIP\src\api\tcpip.c;..\Middlewares\Third_Party\LwIP\src\core\altcp.c;..\Middlewares\Third_Party\LwIP\src\core\altcp_alloc.c;..\Middlewares\Third_Party\LwIP\src\core\altcp_tcp.c;..\Middlewares\Third_Party\LwIP\src\core\def.c;..\Middlewares\Third_Party\LwIP\src\core\dns.c;..\Middlewares\Third_Party\LwIP\src\core\inet_chksum.c;..\Middlewares\Third_Party\LwIP\src\core\init.c;..\Middlewares\Third_Party\LwIP\src\core\ip.c;..\Middlewares\Third_Party\LwIP\src\core\mem.c;..\Middlewares\Third_Party\LwIP\src\core\memp.c;..\Middlewares\Third_Party\LwIP\src\core\netif.c;..\Middlewares\Third_Party\LwIP\src\core\pbuf.c;..\Middlewares\Third_Party\LwIP\src\core\raw.c;..\Middlewares\Third_Party\LwIP\src\core\stats.c;..\Middlewares\Third_Party\LwIP\src\core\sys.c;..\Middlewares\Third_Party\LwIP\src\core\tcp.c;..\Middlewares\Third_Party\LwIP\src\core\tcp_in.c;..\Middlewares\Third_Party\LwIP\src\core\tcp_out.c;..\Middlewares\Third_Party\LwIP\src\core\timeouts.c;..\Middlewares\Third_Party\LwIP\src\core\udp.c;..\Middlewares\Third_Party\LwIP\src\core\ipv4\autoip.c;..\Middlewares\Third_Party\LwIP\src\core\ipv4\dhcp.c;..\Middlewares\Third_Party\LwIP\src\core\ipv4\etharp.c;..\Middlewares\Third_Party\LwIP\src\core\ipv4\icmp.c;..\Middlewares\Third_Party\LwIP\src\core\ipv4\igmp.c;..\Middlewares\Third_Party\LwIP\src\core\ipv4\ip4.c;..\Middlewares\Third_Party\LwIP\src\core\ipv4\ip4_addr.c;..\Middlewares\Third_Party\LwIP\src\core\ipv4\ip4_frag.c;..\Middlewares\Third_Party\LwIP\src\core\ipv6\dhcp6.c;..\Middlewares\Third_Party\LwIP\src\core\ipv6\ethip6.c;..\Middlewares\Third_Party\LwIP\src\core\ipv6\icmp6.c;..\Middlewares\Third_Party\LwIP\src\core\ipv6\inet6.c;..\Middlewares\Third_Party\LwIP\src\core\ipv6\ip6.c;..\Middlewares\Third_Party\LwIP\src\core\ipv6\ip6_addr.c;..\Middlewares\Third_Party\LwIP\src\core\ipv6\ip6_frag.c;..\Middlewares\Third_Party\LwIP\src\core\ipv6\mld6.c;..\Middlewares\Third_Party\LwIP\src\core\ipv6\nd6.c;..\Middlewares\Third_Party\LwIP\src\apps\mqtt\mqtt.c; +HeaderPath=..\Middlewares\Third_Party\LwIP\src\include;..\Middlewares\Third_Party\LwIP\system;..\Middlewares\Third_Party\LwIP\src\include;..\Middlewares\Third_Party\LwIP\system;..\Drivers\STM32F4xx_HAL_Driver\Inc;..\Drivers\STM32F4xx_HAL_Driver\Inc\Legacy;..\Drivers\BSP\Components\lan8742;..\Middlewares\Third_Party\LwIP\src\include\netif\ppp;..\Drivers\CMSIS\Device\ST\STM32F4xx\Include;..\Middlewares\Third_Party\LwIP\src\include\lwip;..\Middlewares\Third_Party\LwIP\src\include\lwip\apps;..\Middlewares\Third_Party\LwIP\src\include\lwip\priv;..\Middlewares\Third_Party\LwIP\src\include\lwip\prot;..\Middlewares\Third_Party\LwIP\src\include\netif;..\Middlewares\Third_Party\LwIP\src\include\compat\posix;..\Middlewares\Third_Party\LwIP\src\include\compat\posix\arpa;..\Middlewares\Third_Party\LwIP\src\include\compat\posix\net;..\Middlewares\Third_Party\LwIP\src\include\compat\posix\sys;..\Middlewares\Third_Party\LwIP\src\include\compat\stdc;..\Middlewares\Third_Party\LwIP\system\arch;..\Drivers\CMSIS\Include;..\Core\Inc;..\LWIP\App;..\LWIP\Target; +CDefines=USE_HAL_DRIVER;STM32F407xx;USE_HAL_DRIVER;USE_HAL_DRIVER; + +[PreviousGenFiles] +AdvancedFolderStructure=true +HeaderFileListSize=11 +HeaderFiles#0=..\Core\Inc\gpio.h +HeaderFiles#1=..\Core\Inc\dma.h +HeaderFiles#2=..\LWIP\App\lwip.h +HeaderFiles#3=..\LWIP\Target\lwipopts.h +HeaderFiles#4=..\LWIP\Target\ethernetif.h +HeaderFiles#5=..\Core\Inc\spi.h +HeaderFiles#6=..\Core\Inc\tim.h +HeaderFiles#7=..\Core\Inc\usart.h +HeaderFiles#8=..\Core\Inc\stm32f4xx_it.h +HeaderFiles#9=..\Core\Inc\stm32f4xx_hal_conf.h +HeaderFiles#10=..\Core\Inc\main.h +HeaderFolderListSize=3 +HeaderPath#0=..\Core\Inc +HeaderPath#1=..\LWIP\App +HeaderPath#2=..\LWIP\Target +HeaderFiles=; +SourceFileListSize=10 +SourceFiles#0=..\Core\Src\gpio.c +SourceFiles#1=..\Core\Src\dma.c +SourceFiles#2=..\LWIP\App\lwip.c +SourceFiles#3=..\LWIP\Target\ethernetif.c +SourceFiles#4=..\Core\Src\spi.c +SourceFiles#5=..\Core\Src\tim.c +SourceFiles#6=..\Core\Src\usart.c +SourceFiles#7=..\Core\Src\stm32f4xx_it.c +SourceFiles#8=..\Core\Src\stm32f4xx_hal_msp.c +SourceFiles#9=..\Core\Src\main.c +SourceFolderListSize=3 +SourcePath#0=..\Core\Src +SourcePath#1=..\LWIP\App +SourcePath#2=..\LWIP\Target +SourceFiles=; + diff --git a/.vscode/c_cpp_properties.json b/.vscode/c_cpp_properties.json new file mode 100644 index 0000000..c06bbfd --- /dev/null +++ b/.vscode/c_cpp_properties.json @@ -0,0 +1,21 @@ +{ + "configurations": [ + { + "name": "Win32", + "includePath": [ + "${workspaceFolder}/**" + ], + "defines": [ + "_DEBUG", + "UNICODE", + "_UNICODE" + ], + "windowsSdkVersion": "10.0.22621.0", + "compilerPath": "cl.exe", + "cStandard": "c17", + "cppStandard": "c++17", + "intelliSenseMode": "windows-msvc-x64" + } + ], + "version": 4 +} \ No newline at end of file diff --git a/.vscode/extensions.json b/.vscode/extensions.json new file mode 100644 index 0000000..7e257db --- /dev/null +++ b/.vscode/extensions.json @@ -0,0 +1,3 @@ +{ + "recommendations": [] +} \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000..7c21372 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,38 @@ +{ + "C_Cpp.errorSquiggles": "disabled", + "files.associations": { + "main.h": "c", + "leds.h": "c", + "tim.h": "c", + "usart.h": "c", + "lwip.h": "c", + "stdio.h": "c", + "gpio.h": "c", + "xutility": "c", + "stm32f4xx_it.h": "c", + "spi.h": "c", + "dma.h": "c", + "stm32f4xx_ll_cortex.h": "c", + "stm32f4xx_ll_utils.h": "c", + "stm32f4xx_hal.h": "c", + "tcpclient.h": "c", + "stm32f4xx_hal_conf.h": "c", + "compare": "c", + "cstddef": "c", + "limits": "c", + "type_traits": "c", + "xmemory": "c", + "dac161s997.h": "c", + "cstring": "c", + "user_spi.h": "c", + "algorithm": "c", + "optional": "c", + "xtr1common": "c", + "ad7124.h": "c", + "ad7124_regs.h": "c", + "tcpserverc.h": "c", + "cstdlib": "c", + "ht1200m.h": "c", + "string.h": "c" + } +} \ No newline at end of file diff --git a/Core/Inc/dma.h b/Core/Inc/dma.h new file mode 100644 index 0000000..493d98e --- /dev/null +++ b/Core/Inc/dma.h @@ -0,0 +1,52 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * @file dma.h + * @brief This file contains all the function prototypes for + * the dma.c file + ****************************************************************************** + * @attention + * + * Copyright (c) 2025 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ +/* USER CODE END Header */ +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __DMA_H__ +#define __DMA_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "main.h" + +/* DMA memory to memory transfer handles -------------------------------------*/ + +/* USER CODE BEGIN Includes */ + +/* USER CODE END Includes */ + +/* USER CODE BEGIN Private defines */ + +/* USER CODE END Private defines */ + +void MX_DMA_Init(void); + +/* USER CODE BEGIN Prototypes */ + +/* USER CODE END Prototypes */ + +#ifdef __cplusplus +} +#endif + +#endif /* __DMA_H__ */ + diff --git a/Core/Inc/gpio.h b/Core/Inc/gpio.h new file mode 100644 index 0000000..6d9fc61 --- /dev/null +++ b/Core/Inc/gpio.h @@ -0,0 +1,62 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * @file gpio.h + * @brief This file contains all the function prototypes for + * the gpio.c file + ****************************************************************************** + * @attention + * + * Copyright (c) 2024 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ +/* USER CODE END Header */ +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __GPIO_H__ +#define __GPIO_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "main.h" + +/* USER CODE BEGIN Includes */ + +/* USER CODE END Includes */ + +/* USER CODE BEGIN Private defines */ + typedef enum + { + DI_1, + DI_2, + DI_3, + DI_4, + DO_1, + DO_2, + DO_3, + DO_4, + DO_EN, + DI_DO_MAX, + } gpio_e; +/* USER CODE END Private defines */ + +void MX_GPIO_Init(void); + +/* USER CODE BEGIN Prototypes */ + extern void gpio_do_test(gpio_e gpio_num, GPIO_PinState state); + extern GPIO_PinState gpio_di_test(gpio_e gpio_num); +/* USER CODE END Prototypes */ + +#ifdef __cplusplus +} +#endif +#endif /*__ GPIO_H__ */ + diff --git a/Core/Inc/main.h b/Core/Inc/main.h new file mode 100644 index 0000000..f1c3266 --- /dev/null +++ b/Core/Inc/main.h @@ -0,0 +1,177 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * @file main.h + * @brief : Header for main.c file. + * This file contains the common defines of the application. + ****************************************************************************** + * @attention + * + * Copyright (c) 2024 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ +/* USER CODE END Header */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __MAIN_H +#define __MAIN_H + +#ifdef __cplusplus +extern "C" +{ +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx_hal.h" + +/* Private includes ----------------------------------------------------------*/ +/* USER CODE BEGIN Includes */ +#include "stdio.h" +#include "tcpclient.h" +#include "tcpserverc.h" +#include "leds.h" +#include +/* USER CODE END Includes */ + +/* Exported types ------------------------------------------------------------*/ +/* USER CODE BEGIN ET */ +extern uint8_t tcp_echo_flags; // 标志位,连接成功置1 +#define ARRAY_LEN(arr) (sizeof(arr)) / (sizeof(arr[0])) +typedef struct +{ + uint16_t rx_num; // 接收到数据个数 + uint8_t rx_data[512]; // 接收到数据区 + uint8_t rx_data_temp[512]; // 接收到数据缓冲区 + uint8_t tx_data[512]; // 发送数据区 +} uart_t; + +extern uart_t lcd_uart4; +extern uart_t ble2_uart3; +extern uart_t ble1_uart6; +extern uart_t hart1_uart5; +extern uart_t hart2_uart2; +#define DEST_IP_ADDR0 192 +#define DEST_IP_ADDR1 168 +#define DEST_IP_ADDR2 1 +#define DEST_IP_ADDR3 111 + +#define DEST_PORT 5001 + +#define UDP_SEREVER_PORT 5002 +#define UDP_CLIENT_PORT 5002 + +#define LOCAL_PORT 5001 + + /* USER CODE END ET */ + + /* Exported constants --------------------------------------------------------*/ + /* USER CODE BEGIN EC */ + + /* USER CODE END EC */ + + /* Exported macro ------------------------------------------------------------*/ + /* USER CODE BEGIN EM */ + + /* USER CODE END EM */ + + /* Exported functions prototypes ---------------------------------------------*/ + void Error_Handler(void); + +/* USER CODE BEGIN EFP */ + +/* USER CODE END EFP */ + +/* Private defines -----------------------------------------------------------*/ +#define ETH_RESET_Pin GPIO_PIN_0 +#define ETH_RESET_GPIO_Port GPIOC +#define LED3_R_Pin GPIO_PIN_1 +#define LED3_R_GPIO_Port GPIOB +#define LED3_G_Pin GPIO_PIN_2 +#define LED3_G_GPIO_Port GPIOB +#define LED3_Y_Pin GPIO_PIN_7 +#define LED3_Y_GPIO_Port GPIOE +#define LED2_R_Pin GPIO_PIN_12 +#define LED2_R_GPIO_Port GPIOE +#define LED2_G_Pin GPIO_PIN_13 +#define LED2_G_GPIO_Port GPIOE +#define LED2_Y_Pin GPIO_PIN_14 +#define LED2_Y_GPIO_Port GPIOE +#define BLE2_TX_Pin GPIO_PIN_8 +#define BLE2_TX_GPIO_Port GPIOD +#define BLE2_RX_Pin GPIO_PIN_9 +#define BLE2_RX_GPIO_Port GPIOD +#define DO_CH4_Pin GPIO_PIN_11 +#define DO_CH4_GPIO_Port GPIOD +#define DO_CH3_Pin GPIO_PIN_12 +#define DO_CH3_GPIO_Port GPIOD +#define DO_CH1_Pin GPIO_PIN_13 +#define DO_CH1_GPIO_Port GPIOD +#define DO_CH2_Pin GPIO_PIN_14 +#define DO_CH2_GPIO_Port GPIOD +#define DO_EN_Pin GPIO_PIN_15 +#define DO_EN_GPIO_Port GPIOD +#define BLE1_TX_Pin GPIO_PIN_6 +#define BLE1_TX_GPIO_Port GPIOC +#define BLE1_RX_Pin GPIO_PIN_7 +#define BLE1_RX_GPIO_Port GPIOC +#define DI_CH1_Pin GPIO_PIN_8 +#define DI_CH1_GPIO_Port GPIOC +#define DI_CH2_Pin GPIO_PIN_9 +#define DI_CH2_GPIO_Port GPIOC +#define HART1_RST_Pin GPIO_PIN_8 +#define HART1_RST_GPIO_Port GPIOA +#define DI_CH3_Pin GPIO_PIN_11 +#define DI_CH3_GPIO_Port GPIOA +#define DI_CH4_Pin GPIO_PIN_12 +#define DI_CH4_GPIO_Port GPIOA +#define HART_CLK_Pin GPIO_PIN_15 +#define HART_CLK_GPIO_Port GPIOA +#define LCD_TX_Pin GPIO_PIN_10 +#define LCD_TX_GPIO_Port GPIOC +#define LCD_RX_Pin GPIO_PIN_11 +#define LCD_RX_GPIO_Port GPIOC +#define HART1_TX_Pin GPIO_PIN_12 +#define HART1_TX_GPIO_Port GPIOC +#define HART1_RTS_Pin GPIO_PIN_0 +#define HART1_RTS_GPIO_Port GPIOD +#define HART1_OCD_Pin GPIO_PIN_1 +#define HART1_OCD_GPIO_Port GPIOD +#define HART1_OCD_EXTI_IRQn EXTI1_IRQn +#define HART1_RX_Pin GPIO_PIN_2 +#define HART1_RX_GPIO_Port GPIOD +#define HART2_OCD_Pin GPIO_PIN_3 +#define HART2_OCD_GPIO_Port GPIOD +#define HART2_OCD_EXTI_IRQn EXTI3_IRQn +#define HART2_RTS_Pin GPIO_PIN_4 +#define HART2_RTS_GPIO_Port GPIOD +#define HART2_TX_Pin GPIO_PIN_5 +#define HART2_TX_GPIO_Port GPIOD +#define HART2_RX_Pin GPIO_PIN_6 +#define HART2_RX_GPIO_Port GPIOD +#define HART2_RST_Pin GPIO_PIN_7 +#define HART2_RST_GPIO_Port GPIOD +#define DAC1_CS_Pin GPIO_PIN_6 +#define DAC1_CS_GPIO_Port GPIOB +#define DAC2_CS_Pin GPIO_PIN_7 +#define DAC2_CS_GPIO_Port GPIOB +#define ADC_CS_Pin GPIO_PIN_0 +#define ADC_CS_GPIO_Port GPIOE +#define AD7124_SYNC_Pin GPIO_PIN_1 +#define AD7124_SYNC_GPIO_Port GPIOE + +/* USER CODE BEGIN Private defines */ +#define TRUE 0 +#define FAIL -1 + /* USER CODE END Private defines */ + +#ifdef __cplusplus +} +#endif + +#endif /* __MAIN_H */ diff --git a/Core/Inc/spi.h b/Core/Inc/spi.h new file mode 100644 index 0000000..0bd89bb --- /dev/null +++ b/Core/Inc/spi.h @@ -0,0 +1,52 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * @file spi.h + * @brief This file contains all the function prototypes for + * the spi.c file + ****************************************************************************** + * @attention + * + * Copyright (c) 2025 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ +/* USER CODE END Header */ +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __SPI_H__ +#define __SPI_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "main.h" + +/* USER CODE BEGIN Includes */ + +/* USER CODE END Includes */ + +extern SPI_HandleTypeDef hspi1; + +/* USER CODE BEGIN Private defines */ + +/* USER CODE END Private defines */ + +void MX_SPI1_Init(void); + +/* USER CODE BEGIN Prototypes */ + +/* USER CODE END Prototypes */ + +#ifdef __cplusplus +} +#endif + +#endif /* __SPI_H__ */ + diff --git a/Core/Inc/stm32f4xx_hal_conf.h b/Core/Inc/stm32f4xx_hal_conf.h new file mode 100644 index 0000000..eaa58b6 --- /dev/null +++ b/Core/Inc/stm32f4xx_hal_conf.h @@ -0,0 +1,498 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * @file stm32f4xx_hal_conf_template.h + * @author MCD Application Team + * @brief HAL configuration template file. + * This file should be copied to the application folder and renamed + * to stm32f4xx_hal_conf.h. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ +/* USER CODE END Header */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F4xx_HAL_CONF_H +#define __STM32F4xx_HAL_CONF_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Exported types ------------------------------------------------------------*/ +/* Exported constants --------------------------------------------------------*/ + +/* ########################## Module Selection ############################## */ +/** + * @brief This is the list of modules to be used in the HAL driver + */ +#define HAL_MODULE_ENABLED + + /* #define HAL_CRYP_MODULE_ENABLED */ +/* #define HAL_ADC_MODULE_ENABLED */ +/* #define HAL_CAN_MODULE_ENABLED */ +/* #define HAL_CRC_MODULE_ENABLED */ +/* #define HAL_CAN_LEGACY_MODULE_ENABLED */ +/* #define HAL_DAC_MODULE_ENABLED */ +/* #define HAL_DCMI_MODULE_ENABLED */ +/* #define HAL_DMA2D_MODULE_ENABLED */ +#define HAL_ETH_MODULE_ENABLED +/* #define HAL_ETH_LEGACY_MODULE_ENABLED */ +/* #define HAL_NAND_MODULE_ENABLED */ +/* #define HAL_NOR_MODULE_ENABLED */ +/* #define HAL_PCCARD_MODULE_ENABLED */ +/* #define HAL_SRAM_MODULE_ENABLED */ +/* #define HAL_SDRAM_MODULE_ENABLED */ +/* #define HAL_HASH_MODULE_ENABLED */ +/* #define HAL_I2C_MODULE_ENABLED */ +/* #define HAL_I2S_MODULE_ENABLED */ +/* #define HAL_IWDG_MODULE_ENABLED */ +/* #define HAL_LTDC_MODULE_ENABLED */ +/* #define HAL_RNG_MODULE_ENABLED */ +/* #define HAL_RTC_MODULE_ENABLED */ +/* #define HAL_SAI_MODULE_ENABLED */ +/* #define HAL_SD_MODULE_ENABLED */ +/* #define HAL_MMC_MODULE_ENABLED */ +#define HAL_SPI_MODULE_ENABLED +#define HAL_TIM_MODULE_ENABLED +#define HAL_UART_MODULE_ENABLED +/* #define HAL_USART_MODULE_ENABLED */ +/* #define HAL_IRDA_MODULE_ENABLED */ +/* #define HAL_SMARTCARD_MODULE_ENABLED */ +/* #define HAL_SMBUS_MODULE_ENABLED */ +/* #define HAL_WWDG_MODULE_ENABLED */ +/* #define HAL_PCD_MODULE_ENABLED */ +/* #define HAL_HCD_MODULE_ENABLED */ +/* #define HAL_DSI_MODULE_ENABLED */ +/* #define HAL_QSPI_MODULE_ENABLED */ +/* #define HAL_QSPI_MODULE_ENABLED */ +/* #define HAL_CEC_MODULE_ENABLED */ +/* #define HAL_FMPI2C_MODULE_ENABLED */ +/* #define HAL_FMPSMBUS_MODULE_ENABLED */ +/* #define HAL_SPDIFRX_MODULE_ENABLED */ +/* #define HAL_DFSDM_MODULE_ENABLED */ +/* #define HAL_LPTIM_MODULE_ENABLED */ +#define HAL_GPIO_MODULE_ENABLED +#define HAL_EXTI_MODULE_ENABLED +#define HAL_DMA_MODULE_ENABLED +#define HAL_RCC_MODULE_ENABLED +#define HAL_FLASH_MODULE_ENABLED +#define HAL_PWR_MODULE_ENABLED +#define HAL_CORTEX_MODULE_ENABLED + +/* ########################## HSE/HSI Values adaptation ##################### */ +/** + * @brief Adjust the value of External High Speed oscillator (HSE) used in your application. + * This value is used by the RCC HAL module to compute the system frequency + * (when HSE is used as system clock source, directly or through the PLL). + */ +#if !defined (HSE_VALUE) + #define HSE_VALUE 11059200U /*!< Value of the External oscillator in Hz */ +#endif /* HSE_VALUE */ + +#if !defined (HSE_STARTUP_TIMEOUT) + #define HSE_STARTUP_TIMEOUT 100U /*!< Time out for HSE start up, in ms */ +#endif /* HSE_STARTUP_TIMEOUT */ + +/** + * @brief Internal High Speed oscillator (HSI) value. + * This value is used by the RCC HAL module to compute the system frequency + * (when HSI is used as system clock source, directly or through the PLL). + */ +#if !defined (HSI_VALUE) + #define HSI_VALUE ((uint32_t)16000000U) /*!< Value of the Internal oscillator in Hz*/ +#endif /* HSI_VALUE */ + +/** + * @brief Internal Low Speed oscillator (LSI) value. + */ +#if !defined (LSI_VALUE) + #define LSI_VALUE 32000U /*!< LSI Typical Value in Hz*/ +#endif /* LSI_VALUE */ /*!< Value of the Internal Low Speed oscillator in Hz + The real value may vary depending on the variations + in voltage and temperature.*/ +/** + * @brief External Low Speed oscillator (LSE) value. + */ +#if !defined (LSE_VALUE) + #define LSE_VALUE 32768U /*!< Value of the External Low Speed oscillator in Hz */ +#endif /* LSE_VALUE */ + +#if !defined (LSE_STARTUP_TIMEOUT) + #define LSE_STARTUP_TIMEOUT 5000U /*!< Time out for LSE start up, in ms */ +#endif /* LSE_STARTUP_TIMEOUT */ + +/** + * @brief External clock source for I2S peripheral + * This value is used by the I2S HAL module to compute the I2S clock source + * frequency, this source is inserted directly through I2S_CKIN pad. + */ +#if !defined (EXTERNAL_CLOCK_VALUE) + #define EXTERNAL_CLOCK_VALUE 12288000U /*!< Value of the External audio frequency in Hz*/ +#endif /* EXTERNAL_CLOCK_VALUE */ + +/* Tip: To avoid modifying this file each time you need to use different HSE, + === you can define the HSE value in your toolchain compiler preprocessor. */ + +/* ########################### System Configuration ######################### */ +/** + * @brief This is the HAL system configuration section + */ +#define VDD_VALUE 3300U /*!< Value of VDD in mv */ +#define TICK_INT_PRIORITY 15U /*!< tick interrupt priority */ +#define USE_RTOS 0U +#define PREFETCH_ENABLE 1U +#define INSTRUCTION_CACHE_ENABLE 1U +#define DATA_CACHE_ENABLE 1U + +#define USE_HAL_ADC_REGISTER_CALLBACKS 0U /* ADC register callback disabled */ +#define USE_HAL_CAN_REGISTER_CALLBACKS 0U /* CAN register callback disabled */ +#define USE_HAL_CEC_REGISTER_CALLBACKS 0U /* CEC register callback disabled */ +#define USE_HAL_CRYP_REGISTER_CALLBACKS 0U /* CRYP register callback disabled */ +#define USE_HAL_DAC_REGISTER_CALLBACKS 0U /* DAC register callback disabled */ +#define USE_HAL_DCMI_REGISTER_CALLBACKS 0U /* DCMI register callback disabled */ +#define USE_HAL_DFSDM_REGISTER_CALLBACKS 0U /* DFSDM register callback disabled */ +#define USE_HAL_DMA2D_REGISTER_CALLBACKS 0U /* DMA2D register callback disabled */ +#define USE_HAL_DSI_REGISTER_CALLBACKS 0U /* DSI register callback disabled */ +#define USE_HAL_ETH_REGISTER_CALLBACKS 0U /* ETH register callback disabled */ +#define USE_HAL_HASH_REGISTER_CALLBACKS 0U /* HASH register callback disabled */ +#define USE_HAL_HCD_REGISTER_CALLBACKS 0U /* HCD register callback disabled */ +#define USE_HAL_I2C_REGISTER_CALLBACKS 0U /* I2C register callback disabled */ +#define USE_HAL_FMPI2C_REGISTER_CALLBACKS 0U /* FMPI2C register callback disabled */ +#define USE_HAL_FMPSMBUS_REGISTER_CALLBACKS 0U /* FMPSMBUS register callback disabled */ +#define USE_HAL_I2S_REGISTER_CALLBACKS 0U /* I2S register callback disabled */ +#define USE_HAL_IRDA_REGISTER_CALLBACKS 0U /* IRDA register callback disabled */ +#define USE_HAL_LPTIM_REGISTER_CALLBACKS 0U /* LPTIM register callback disabled */ +#define USE_HAL_LTDC_REGISTER_CALLBACKS 0U /* LTDC register callback disabled */ +#define USE_HAL_MMC_REGISTER_CALLBACKS 0U /* MMC register callback disabled */ +#define USE_HAL_NAND_REGISTER_CALLBACKS 0U /* NAND register callback disabled */ +#define USE_HAL_NOR_REGISTER_CALLBACKS 0U /* NOR register callback disabled */ +#define USE_HAL_PCCARD_REGISTER_CALLBACKS 0U /* PCCARD register callback disabled */ +#define USE_HAL_PCD_REGISTER_CALLBACKS 0U /* PCD register callback disabled */ +#define USE_HAL_QSPI_REGISTER_CALLBACKS 0U /* QSPI register callback disabled */ +#define USE_HAL_RNG_REGISTER_CALLBACKS 0U /* RNG register callback disabled */ +#define USE_HAL_RTC_REGISTER_CALLBACKS 0U /* RTC register callback disabled */ +#define USE_HAL_SAI_REGISTER_CALLBACKS 0U /* SAI register callback disabled */ +#define USE_HAL_SD_REGISTER_CALLBACKS 0U /* SD register callback disabled */ +#define USE_HAL_SMARTCARD_REGISTER_CALLBACKS 0U /* SMARTCARD register callback disabled */ +#define USE_HAL_SDRAM_REGISTER_CALLBACKS 0U /* SDRAM register callback disabled */ +#define USE_HAL_SRAM_REGISTER_CALLBACKS 0U /* SRAM register callback disabled */ +#define USE_HAL_SPDIFRX_REGISTER_CALLBACKS 0U /* SPDIFRX register callback disabled */ +#define USE_HAL_SMBUS_REGISTER_CALLBACKS 0U /* SMBUS register callback disabled */ +#define USE_HAL_SPI_REGISTER_CALLBACKS 0U /* SPI register callback disabled */ +#define USE_HAL_TIM_REGISTER_CALLBACKS 0U /* TIM register callback disabled */ +#define USE_HAL_UART_REGISTER_CALLBACKS 0U /* UART register callback disabled */ +#define USE_HAL_USART_REGISTER_CALLBACKS 0U /* USART register callback disabled */ +#define USE_HAL_WWDG_REGISTER_CALLBACKS 0U /* WWDG register callback disabled */ + +/* ########################## Assert Selection ############################## */ +/** + * @brief Uncomment the line below to expanse the "assert_param" macro in the + * HAL drivers code + */ +/* #define USE_FULL_ASSERT 1U */ + +/* ################## Ethernet peripheral configuration ##################### */ + +/* Section 1 : Ethernet peripheral configuration */ + +/* MAC ADDRESS: MAC_ADDR0:MAC_ADDR1:MAC_ADDR2:MAC_ADDR3:MAC_ADDR4:MAC_ADDR5 */ +#define MAC_ADDR0 2U +#define MAC_ADDR1 0U +#define MAC_ADDR2 0U +#define MAC_ADDR3 0U +#define MAC_ADDR4 0U +#define MAC_ADDR5 0U + +/* Definition of the Ethernet driver buffers size and count */ +#define ETH_RX_BUF_SIZE 1536 /* buffer size for receive */ +#define ETH_TX_BUF_SIZE ETH_MAX_PACKET_SIZE /* buffer size for transmit */ +#define ETH_RXBUFNB 4U /* 4 Rx buffers of size ETH_RX_BUF_SIZE */ +#define ETH_TXBUFNB 4U /* 4 Tx buffers of size ETH_TX_BUF_SIZE */ + +/* Section 2: PHY configuration section */ + +/* LAN8742A PHY Address*/ +#define LAN8742A_PHY_ADDRESS 0x00U +/* PHY Reset delay these values are based on a 1 ms Systick interrupt*/ +#define PHY_RESET_DELAY 0x000000FFU +/* PHY Configuration delay */ +#define PHY_CONFIG_DELAY 0x00000FFFU + +#define PHY_READ_TO 0x0000FFFFU +#define PHY_WRITE_TO 0x0000FFFFU + +/* Section 3: Common PHY Registers */ + +#define PHY_BCR ((uint16_t)0x0000U) /*!< Transceiver Basic Control Register */ +#define PHY_BSR ((uint16_t)0x0001U) /*!< Transceiver Basic Status Register */ + +#define PHY_RESET ((uint16_t)0x8000U) /*!< PHY Reset */ +#define PHY_LOOPBACK ((uint16_t)0x4000U) /*!< Select loop-back mode */ +#define PHY_FULLDUPLEX_100M ((uint16_t)0x2100U) /*!< Set the full-duplex mode at 100 Mb/s */ +#define PHY_HALFDUPLEX_100M ((uint16_t)0x2000U) /*!< Set the half-duplex mode at 100 Mb/s */ +#define PHY_FULLDUPLEX_10M ((uint16_t)0x0100U) /*!< Set the full-duplex mode at 10 Mb/s */ +#define PHY_HALFDUPLEX_10M ((uint16_t)0x0000U) /*!< Set the half-duplex mode at 10 Mb/s */ +#define PHY_AUTONEGOTIATION ((uint16_t)0x1000U) /*!< Enable auto-negotiation function */ +#define PHY_RESTART_AUTONEGOTIATION ((uint16_t)0x0200U) /*!< Restart auto-negotiation function */ +#define PHY_POWERDOWN ((uint16_t)0x0800U) /*!< Select the power down mode */ +#define PHY_ISOLATE ((uint16_t)0x0400U) /*!< Isolate PHY from MII */ + +#define PHY_AUTONEGO_COMPLETE ((uint16_t)0x0020U) /*!< Auto-Negotiation process completed */ +#define PHY_LINKED_STATUS ((uint16_t)0x0004U) /*!< Valid link established */ +#define PHY_JABBER_DETECTION ((uint16_t)0x0002U) /*!< Jabber condition detected */ + +/* Section 4: Extended PHY Registers */ +#define PHY_SR ((uint16_t)0x001FU) /*!< PHY status register Offset */ + +#define PHY_SPEED_STATUS ((uint16_t)0x0004U) /*!< PHY Speed mask */ +#define PHY_DUPLEX_STATUS ((uint16_t)0x0010U) /*!< PHY Duplex mask */ + +#define PHY_ISFR ((uint16_t)0x1DU) /*!< PHY Interrupt Source Flag register Offset */ +#define PHY_ISFR_INT4 ((uint16_t)0x0010U) /*!< PHY Link down inturrupt */ + +/* ################## SPI peripheral configuration ########################## */ + +/* CRC FEATURE: Use to activate CRC feature inside HAL SPI Driver +* Activated: CRC code is present inside driver +* Deactivated: CRC code cleaned from driver +*/ + +#define USE_SPI_CRC 0U + +/* Includes ------------------------------------------------------------------*/ +/** + * @brief Include module's header file + */ + +#ifdef HAL_RCC_MODULE_ENABLED + #include "stm32f4xx_hal_rcc.h" +#endif /* HAL_RCC_MODULE_ENABLED */ + +#ifdef HAL_GPIO_MODULE_ENABLED + #include "stm32f4xx_hal_gpio.h" +#endif /* HAL_GPIO_MODULE_ENABLED */ + +#ifdef HAL_EXTI_MODULE_ENABLED + #include "stm32f4xx_hal_exti.h" +#endif /* HAL_EXTI_MODULE_ENABLED */ + +#ifdef HAL_DMA_MODULE_ENABLED + #include "stm32f4xx_hal_dma.h" +#endif /* HAL_DMA_MODULE_ENABLED */ + +#ifdef HAL_CORTEX_MODULE_ENABLED + #include "stm32f4xx_hal_cortex.h" +#endif /* HAL_CORTEX_MODULE_ENABLED */ + +#ifdef HAL_ADC_MODULE_ENABLED + #include "stm32f4xx_hal_adc.h" +#endif /* HAL_ADC_MODULE_ENABLED */ + +#ifdef HAL_CAN_MODULE_ENABLED + #include "stm32f4xx_hal_can.h" +#endif /* HAL_CAN_MODULE_ENABLED */ + +#ifdef HAL_CAN_LEGACY_MODULE_ENABLED + #include "stm32f4xx_hal_can_legacy.h" +#endif /* HAL_CAN_LEGACY_MODULE_ENABLED */ + +#ifdef HAL_CRC_MODULE_ENABLED + #include "stm32f4xx_hal_crc.h" +#endif /* HAL_CRC_MODULE_ENABLED */ + +#ifdef HAL_CRYP_MODULE_ENABLED + #include "stm32f4xx_hal_cryp.h" +#endif /* HAL_CRYP_MODULE_ENABLED */ + +#ifdef HAL_DMA2D_MODULE_ENABLED + #include "stm32f4xx_hal_dma2d.h" +#endif /* HAL_DMA2D_MODULE_ENABLED */ + +#ifdef HAL_DAC_MODULE_ENABLED + #include "stm32f4xx_hal_dac.h" +#endif /* HAL_DAC_MODULE_ENABLED */ + +#ifdef HAL_DCMI_MODULE_ENABLED + #include "stm32f4xx_hal_dcmi.h" +#endif /* HAL_DCMI_MODULE_ENABLED */ + +#ifdef HAL_ETH_MODULE_ENABLED + #include "stm32f4xx_hal_eth.h" +#endif /* HAL_ETH_MODULE_ENABLED */ + +#ifdef HAL_ETH_LEGACY_MODULE_ENABLED + #include "stm32f4xx_hal_eth_legacy.h" +#endif /* HAL_ETH_LEGACY_MODULE_ENABLED */ + +#ifdef HAL_FLASH_MODULE_ENABLED + #include "stm32f4xx_hal_flash.h" +#endif /* HAL_FLASH_MODULE_ENABLED */ + +#ifdef HAL_SRAM_MODULE_ENABLED + #include "stm32f4xx_hal_sram.h" +#endif /* HAL_SRAM_MODULE_ENABLED */ + +#ifdef HAL_NOR_MODULE_ENABLED + #include "stm32f4xx_hal_nor.h" +#endif /* HAL_NOR_MODULE_ENABLED */ + +#ifdef HAL_NAND_MODULE_ENABLED + #include "stm32f4xx_hal_nand.h" +#endif /* HAL_NAND_MODULE_ENABLED */ + +#ifdef HAL_PCCARD_MODULE_ENABLED + #include "stm32f4xx_hal_pccard.h" +#endif /* HAL_PCCARD_MODULE_ENABLED */ + +#ifdef HAL_SDRAM_MODULE_ENABLED + #include "stm32f4xx_hal_sdram.h" +#endif /* HAL_SDRAM_MODULE_ENABLED */ + +#ifdef HAL_HASH_MODULE_ENABLED + #include "stm32f4xx_hal_hash.h" +#endif /* HAL_HASH_MODULE_ENABLED */ + +#ifdef HAL_I2C_MODULE_ENABLED + #include "stm32f4xx_hal_i2c.h" +#endif /* HAL_I2C_MODULE_ENABLED */ + +#ifdef HAL_SMBUS_MODULE_ENABLED + #include "stm32f4xx_hal_smbus.h" +#endif /* HAL_SMBUS_MODULE_ENABLED */ + +#ifdef HAL_I2S_MODULE_ENABLED + #include "stm32f4xx_hal_i2s.h" +#endif /* HAL_I2S_MODULE_ENABLED */ + +#ifdef HAL_IWDG_MODULE_ENABLED + #include "stm32f4xx_hal_iwdg.h" +#endif /* HAL_IWDG_MODULE_ENABLED */ + +#ifdef HAL_LTDC_MODULE_ENABLED + #include "stm32f4xx_hal_ltdc.h" +#endif /* HAL_LTDC_MODULE_ENABLED */ + +#ifdef HAL_PWR_MODULE_ENABLED + #include "stm32f4xx_hal_pwr.h" +#endif /* HAL_PWR_MODULE_ENABLED */ + +#ifdef HAL_RNG_MODULE_ENABLED + #include "stm32f4xx_hal_rng.h" +#endif /* HAL_RNG_MODULE_ENABLED */ + +#ifdef HAL_RTC_MODULE_ENABLED + #include "stm32f4xx_hal_rtc.h" +#endif /* HAL_RTC_MODULE_ENABLED */ + +#ifdef HAL_SAI_MODULE_ENABLED + #include "stm32f4xx_hal_sai.h" +#endif /* HAL_SAI_MODULE_ENABLED */ + +#ifdef HAL_SD_MODULE_ENABLED + #include "stm32f4xx_hal_sd.h" +#endif /* HAL_SD_MODULE_ENABLED */ + +#ifdef HAL_SPI_MODULE_ENABLED + #include "stm32f4xx_hal_spi.h" +#endif /* HAL_SPI_MODULE_ENABLED */ + +#ifdef HAL_TIM_MODULE_ENABLED + #include "stm32f4xx_hal_tim.h" +#endif /* HAL_TIM_MODULE_ENABLED */ + +#ifdef HAL_UART_MODULE_ENABLED + #include "stm32f4xx_hal_uart.h" +#endif /* HAL_UART_MODULE_ENABLED */ + +#ifdef HAL_USART_MODULE_ENABLED + #include "stm32f4xx_hal_usart.h" +#endif /* HAL_USART_MODULE_ENABLED */ + +#ifdef HAL_IRDA_MODULE_ENABLED + #include "stm32f4xx_hal_irda.h" +#endif /* HAL_IRDA_MODULE_ENABLED */ + +#ifdef HAL_SMARTCARD_MODULE_ENABLED + #include "stm32f4xx_hal_smartcard.h" +#endif /* HAL_SMARTCARD_MODULE_ENABLED */ + +#ifdef HAL_WWDG_MODULE_ENABLED + #include "stm32f4xx_hal_wwdg.h" +#endif /* HAL_WWDG_MODULE_ENABLED */ + +#ifdef HAL_PCD_MODULE_ENABLED + #include "stm32f4xx_hal_pcd.h" +#endif /* HAL_PCD_MODULE_ENABLED */ + +#ifdef HAL_HCD_MODULE_ENABLED + #include "stm32f4xx_hal_hcd.h" +#endif /* HAL_HCD_MODULE_ENABLED */ + +#ifdef HAL_DSI_MODULE_ENABLED + #include "stm32f4xx_hal_dsi.h" +#endif /* HAL_DSI_MODULE_ENABLED */ + +#ifdef HAL_QSPI_MODULE_ENABLED + #include "stm32f4xx_hal_qspi.h" +#endif /* HAL_QSPI_MODULE_ENABLED */ + +#ifdef HAL_CEC_MODULE_ENABLED + #include "stm32f4xx_hal_cec.h" +#endif /* HAL_CEC_MODULE_ENABLED */ + +#ifdef HAL_FMPI2C_MODULE_ENABLED + #include "stm32f4xx_hal_fmpi2c.h" +#endif /* HAL_FMPI2C_MODULE_ENABLED */ + +#ifdef HAL_FMPSMBUS_MODULE_ENABLED + #include "stm32f4xx_hal_fmpsmbus.h" +#endif /* HAL_FMPSMBUS_MODULE_ENABLED */ + +#ifdef HAL_SPDIFRX_MODULE_ENABLED + #include "stm32f4xx_hal_spdifrx.h" +#endif /* HAL_SPDIFRX_MODULE_ENABLED */ + +#ifdef HAL_DFSDM_MODULE_ENABLED + #include "stm32f4xx_hal_dfsdm.h" +#endif /* HAL_DFSDM_MODULE_ENABLED */ + +#ifdef HAL_LPTIM_MODULE_ENABLED + #include "stm32f4xx_hal_lptim.h" +#endif /* HAL_LPTIM_MODULE_ENABLED */ + +#ifdef HAL_MMC_MODULE_ENABLED + #include "stm32f4xx_hal_mmc.h" +#endif /* HAL_MMC_MODULE_ENABLED */ + +/* Exported macro ------------------------------------------------------------*/ +#ifdef USE_FULL_ASSERT +/** + * @brief The assert_param macro is used for function's parameters check. + * @param expr If expr is false, it calls assert_failed function + * which reports the name of the source file and the source + * line number of the call that failed. + * If expr is true, it returns no value. + * @retval None + */ + #define assert_param(expr) ((expr) ? (void)0U : assert_failed((uint8_t *)__FILE__, __LINE__)) +/* Exported functions ------------------------------------------------------- */ + void assert_failed(uint8_t* file, uint32_t line); +#else + #define assert_param(expr) ((void)0U) +#endif /* USE_FULL_ASSERT */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F4xx_HAL_CONF_H */ diff --git a/Core/Inc/stm32f4xx_it.h b/Core/Inc/stm32f4xx_it.h new file mode 100644 index 0000000..169760e --- /dev/null +++ b/Core/Inc/stm32f4xx_it.h @@ -0,0 +1,81 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * @file stm32f4xx_it.h + * @brief This file contains the headers of the interrupt handlers. + ****************************************************************************** + * @attention + * + * Copyright (c) 2024 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ +/* USER CODE END Header */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F4xx_IT_H +#define __STM32F4xx_IT_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Private includes ----------------------------------------------------------*/ +/* USER CODE BEGIN Includes */ + +/* USER CODE END Includes */ + +/* Exported types ------------------------------------------------------------*/ +/* USER CODE BEGIN ET */ + +/* USER CODE END ET */ + +/* Exported constants --------------------------------------------------------*/ +/* USER CODE BEGIN EC */ + +/* USER CODE END EC */ + +/* Exported macro ------------------------------------------------------------*/ +/* USER CODE BEGIN EM */ + +/* USER CODE END EM */ + +/* Exported functions prototypes ---------------------------------------------*/ +void NMI_Handler(void); +void HardFault_Handler(void); +void MemManage_Handler(void); +void BusFault_Handler(void); +void UsageFault_Handler(void); +void SVC_Handler(void); +void DebugMon_Handler(void); +void PendSV_Handler(void); +void SysTick_Handler(void); +void EXTI1_IRQHandler(void); +void EXTI3_IRQHandler(void); +void DMA1_Stream1_IRQHandler(void); +void DMA1_Stream2_IRQHandler(void); +void DMA1_Stream3_IRQHandler(void); +void DMA1_Stream4_IRQHandler(void); +void TIM3_IRQHandler(void); +void USART2_IRQHandler(void); +void USART3_IRQHandler(void); +void UART4_IRQHandler(void); +void UART5_IRQHandler(void); +void DMA2_Stream1_IRQHandler(void); +void ETH_IRQHandler(void); +void DMA2_Stream6_IRQHandler(void); +void USART6_IRQHandler(void); +/* USER CODE BEGIN EFP */ + +/* USER CODE END EFP */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F4xx_IT_H */ diff --git a/Core/Inc/tim.h b/Core/Inc/tim.h new file mode 100644 index 0000000..a898331 --- /dev/null +++ b/Core/Inc/tim.h @@ -0,0 +1,101 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * @file tim.h + * @brief This file contains all the function prototypes for + * the tim.c file + ****************************************************************************** + * @attention + * + * Copyright (c) 2024 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ +/* USER CODE END Header */ +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __TIM_H__ +#define __TIM_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "main.h" + +/* USER CODE BEGIN Includes */ + +/* USER CODE END Includes */ + +extern TIM_HandleTypeDef htim2; + +extern TIM_HandleTypeDef htim3; + +/* USER CODE BEGIN Private defines */ +#define IS_TIM_IT_FLAG(TIMx) (LL_TIM_IsActiveFlag_UPDATE(TIMx) == 1) + +/** + * @brief TIM interrupt handler. + * @param TIMx TIM instance. + */ +#define TIM_IRQ_HANDLER(TIMx) \ + do \ + { \ + if (LL_TIM_IsActiveFlag_CC1(TIMx) == SET) \ + { \ + if (LL_TIM_IsEnabledIT_CC1(TIMx) == SET) \ + { \ + LL_TIM_ClearFlag_CC1(TIMx); \ + } \ + } \ + if (LL_TIM_IsActiveFlag_CC2(TIMx) == SET) \ + { \ + if (LL_TIM_IsEnabledIT_CC2(TIMx) == SET) \ + { \ + LL_TIM_ClearFlag_CC2(TIMx); \ + } \ + } \ + if (LL_TIM_IsActiveFlag_CC3(TIMx) == SET) \ + { \ + if (LL_TIM_IsEnabledIT_CC3(TIMx) == SET) \ + { \ + LL_TIM_ClearFlag_CC3(TIMx); \ + } \ + } \ + if (LL_TIM_IsActiveFlag_CC4(TIMx) == SET) \ + { \ + if (LL_TIM_IsEnabledIT_CC4(TIMx) == SET) \ + { \ + LL_TIM_ClearFlag_CC4(TIMx); \ + } \ + } \ + if (LL_TIM_IsActiveFlag_UPDATE(TIMx) == SET) \ + { \ + if (LL_TIM_IsEnabledIT_UPDATE(TIMx) == SET) \ + { \ + LL_TIM_ClearFlag_UPDATE(TIMx); \ + } \ + } \ + } while (__LINE__ == -1) +/* USER CODE END Private defines */ + +void MX_TIM2_Init(void); +void MX_TIM3_Init(void); + +void HAL_TIM_MspPostInit(TIM_HandleTypeDef *htim); + +/* USER CODE BEGIN Prototypes */ + +/* USER CODE END Prototypes */ + +#ifdef __cplusplus +} +#endif + +#endif /* __TIM_H__ */ + diff --git a/Core/Inc/usart.h b/Core/Inc/usart.h new file mode 100644 index 0000000..ed830b0 --- /dev/null +++ b/Core/Inc/usart.h @@ -0,0 +1,69 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * @file usart.h + * @brief This file contains all the function prototypes for + * the usart.c file + ****************************************************************************** + * @attention + * + * Copyright (c) 2025 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ +/* USER CODE END Header */ +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __USART_H__ +#define __USART_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "main.h" + +/* USER CODE BEGIN Includes */ + +/* USER CODE END Includes */ + +extern UART_HandleTypeDef huart4; + +extern UART_HandleTypeDef huart5; + +extern UART_HandleTypeDef huart2; + +extern UART_HandleTypeDef huart3; + +extern UART_HandleTypeDef huart6; + +/* USER CODE BEGIN Private defines */ + + // #define BUFFER_SIZE 100 + // extern volatile uint8_t rx_len; // 接收一帧数据的长度 + // extern volatile uint8_t recv_end_flag; // 一帧数据接收完成标志 + // extern uint8_t rx_buffer[BUFFER_SIZE]; // 接收数据缓存数组 + +/* USER CODE END Private defines */ + +void MX_UART4_Init(void); +void MX_UART5_Init(void); +void MX_USART2_UART_Init(void); +void MX_USART3_UART_Init(void); +void MX_USART6_UART_Init(void); + +/* USER CODE BEGIN Prototypes */ + void dma_usart_send(UART_HandleTypeDef *huart, uint8_t *buf, uint8_t len); // 串口发送封装 +/* USER CODE END Prototypes */ + +#ifdef __cplusplus +} +#endif + +#endif /* __USART_H__ */ + diff --git a/Core/Src/dma.c b/Core/Src/dma.c new file mode 100644 index 0000000..d9fcf42 --- /dev/null +++ b/Core/Src/dma.c @@ -0,0 +1,71 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * @file dma.c + * @brief This file provides code for the configuration + * of all the requested memory to memory DMA transfers. + ****************************************************************************** + * @attention + * + * Copyright (c) 2025 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ +/* USER CODE END Header */ + +/* Includes ------------------------------------------------------------------*/ +#include "dma.h" + +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ + +/*----------------------------------------------------------------------------*/ +/* Configure DMA */ +/*----------------------------------------------------------------------------*/ + +/* USER CODE BEGIN 1 */ + +/* USER CODE END 1 */ + +/** + * Enable DMA controller clock + */ +void MX_DMA_Init(void) +{ + + /* DMA controller clock enable */ + __HAL_RCC_DMA2_CLK_ENABLE(); + __HAL_RCC_DMA1_CLK_ENABLE(); + + /* DMA interrupt init */ + /* DMA1_Stream1_IRQn interrupt configuration */ + HAL_NVIC_SetPriority(DMA1_Stream1_IRQn, 0, 0); + HAL_NVIC_EnableIRQ(DMA1_Stream1_IRQn); + /* DMA1_Stream2_IRQn interrupt configuration */ + HAL_NVIC_SetPriority(DMA1_Stream2_IRQn, 0, 0); + HAL_NVIC_EnableIRQ(DMA1_Stream2_IRQn); + /* DMA1_Stream3_IRQn interrupt configuration */ + HAL_NVIC_SetPriority(DMA1_Stream3_IRQn, 0, 0); + HAL_NVIC_EnableIRQ(DMA1_Stream3_IRQn); + /* DMA1_Stream4_IRQn interrupt configuration */ + HAL_NVIC_SetPriority(DMA1_Stream4_IRQn, 0, 0); + HAL_NVIC_EnableIRQ(DMA1_Stream4_IRQn); + /* DMA2_Stream1_IRQn interrupt configuration */ + HAL_NVIC_SetPriority(DMA2_Stream1_IRQn, 0, 0); + HAL_NVIC_EnableIRQ(DMA2_Stream1_IRQn); + /* DMA2_Stream6_IRQn interrupt configuration */ + HAL_NVIC_SetPriority(DMA2_Stream6_IRQn, 0, 0); + HAL_NVIC_EnableIRQ(DMA2_Stream6_IRQn); + +} + +/* USER CODE BEGIN 2 */ + +/* USER CODE END 2 */ + diff --git a/Core/Src/gpio.c b/Core/Src/gpio.c new file mode 100644 index 0000000..5cc59bc --- /dev/null +++ b/Core/Src/gpio.c @@ -0,0 +1,211 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * @file gpio.c + * @brief This file provides code for the configuration + * of all used GPIO pins. + ****************************************************************************** + * @attention + * + * Copyright (c) 2024 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ +/* USER CODE END Header */ + +/* Includes ------------------------------------------------------------------*/ +#include "gpio.h" + +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ + +/*----------------------------------------------------------------------------*/ +/* Configure GPIO */ +/*----------------------------------------------------------------------------*/ +/* USER CODE BEGIN 1 */ + +/* USER CODE END 1 */ + +/** Configure pins as + * Analog + * Input + * Output + * EVENT_OUT + * EXTI +*/ +void MX_GPIO_Init(void) +{ + + GPIO_InitTypeDef GPIO_InitStruct = {0}; + + /* GPIO Ports Clock Enable */ + __HAL_RCC_GPIOH_CLK_ENABLE(); + __HAL_RCC_GPIOC_CLK_ENABLE(); + __HAL_RCC_GPIOA_CLK_ENABLE(); + __HAL_RCC_GPIOB_CLK_ENABLE(); + __HAL_RCC_GPIOE_CLK_ENABLE(); + __HAL_RCC_GPIOD_CLK_ENABLE(); + + /*Configure GPIO pin Output Level */ + HAL_GPIO_WritePin(ETH_RESET_GPIO_Port, ETH_RESET_Pin, GPIO_PIN_SET); + + /*Configure GPIO pin Output Level */ + HAL_GPIO_WritePin(GPIOB, LED3_R_Pin|LED3_G_Pin|DAC1_CS_Pin|DAC2_CS_Pin, GPIO_PIN_SET); + + /*Configure GPIO pin Output Level */ + HAL_GPIO_WritePin(GPIOE, LED3_Y_Pin|LED2_R_Pin|LED2_G_Pin|LED2_Y_Pin + |ADC_CS_Pin|AD7124_SYNC_Pin, GPIO_PIN_SET); + + /*Configure GPIO pin Output Level */ + HAL_GPIO_WritePin(GPIOD, DO_CH4_Pin|DO_CH3_Pin|DO_CH1_Pin|DO_CH2_Pin + |DO_EN_Pin|HART1_RTS_Pin|HART2_RTS_Pin, GPIO_PIN_SET); + + /*Configure GPIO pin Output Level */ + HAL_GPIO_WritePin(HART1_RST_GPIO_Port, HART1_RST_Pin, GPIO_PIN_SET); + + /*Configure GPIO pin Output Level */ + HAL_GPIO_WritePin(HART2_RST_GPIO_Port, HART2_RST_Pin, GPIO_PIN_RESET); + + /*Configure GPIO pin : ETH_RESET_Pin */ + GPIO_InitStruct.Pin = ETH_RESET_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_OUTPUT_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_MEDIUM; + HAL_GPIO_Init(ETH_RESET_GPIO_Port, &GPIO_InitStruct); + + /*Configure GPIO pins : LED3_R_Pin LED3_G_Pin */ + GPIO_InitStruct.Pin = LED3_R_Pin|LED3_G_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_OUTPUT_PP; + GPIO_InitStruct.Pull = GPIO_PULLUP; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_MEDIUM; + HAL_GPIO_Init(GPIOB, &GPIO_InitStruct); + + /*Configure GPIO pins : LED3_Y_Pin LED2_R_Pin LED2_G_Pin LED2_Y_Pin */ + GPIO_InitStruct.Pin = LED3_Y_Pin|LED2_R_Pin|LED2_G_Pin|LED2_Y_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_OUTPUT_PP; + GPIO_InitStruct.Pull = GPIO_PULLUP; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_MEDIUM; + HAL_GPIO_Init(GPIOE, &GPIO_InitStruct); + + /*Configure GPIO pins : DO_CH4_Pin DO_CH3_Pin DO_CH1_Pin DO_CH2_Pin */ + GPIO_InitStruct.Pin = DO_CH4_Pin|DO_CH3_Pin|DO_CH1_Pin|DO_CH2_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_OUTPUT_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH; + HAL_GPIO_Init(GPIOD, &GPIO_InitStruct); + + /*Configure GPIO pins : DO_EN_Pin HART1_RTS_Pin HART2_RTS_Pin HART2_RST_Pin */ + GPIO_InitStruct.Pin = DO_EN_Pin|HART1_RTS_Pin|HART2_RTS_Pin|HART2_RST_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_OUTPUT_PP; + GPIO_InitStruct.Pull = GPIO_PULLUP; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH; + HAL_GPIO_Init(GPIOD, &GPIO_InitStruct); + + /*Configure GPIO pins : DI_CH1_Pin DI_CH2_Pin */ + GPIO_InitStruct.Pin = DI_CH1_Pin|DI_CH2_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_INPUT; + GPIO_InitStruct.Pull = GPIO_PULLUP; + HAL_GPIO_Init(GPIOC, &GPIO_InitStruct); + + /*Configure GPIO pin : HART1_RST_Pin */ + GPIO_InitStruct.Pin = HART1_RST_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_OUTPUT_PP; + GPIO_InitStruct.Pull = GPIO_PULLUP; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH; + HAL_GPIO_Init(HART1_RST_GPIO_Port, &GPIO_InitStruct); + + /*Configure GPIO pins : DI_CH3_Pin DI_CH4_Pin */ + GPIO_InitStruct.Pin = DI_CH3_Pin|DI_CH4_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_INPUT; + GPIO_InitStruct.Pull = GPIO_PULLUP; + HAL_GPIO_Init(GPIOA, &GPIO_InitStruct); + + /*Configure GPIO pins : HART1_OCD_Pin HART2_OCD_Pin */ + GPIO_InitStruct.Pin = HART1_OCD_Pin|HART2_OCD_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_IT_FALLING; + GPIO_InitStruct.Pull = GPIO_PULLUP; + HAL_GPIO_Init(GPIOD, &GPIO_InitStruct); + + /*Configure GPIO pins : DAC1_CS_Pin DAC2_CS_Pin */ + GPIO_InitStruct.Pin = DAC1_CS_Pin|DAC2_CS_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_OUTPUT_PP; + GPIO_InitStruct.Pull = GPIO_PULLUP; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH; + HAL_GPIO_Init(GPIOB, &GPIO_InitStruct); + + /*Configure GPIO pin : ADC_CS_Pin */ + GPIO_InitStruct.Pin = ADC_CS_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_OUTPUT_PP; + GPIO_InitStruct.Pull = GPIO_PULLUP; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH; + HAL_GPIO_Init(ADC_CS_GPIO_Port, &GPIO_InitStruct); + + /*Configure GPIO pin : AD7124_SYNC_Pin */ + GPIO_InitStruct.Pin = AD7124_SYNC_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_OUTPUT_PP; + GPIO_InitStruct.Pull = GPIO_PULLDOWN; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH; + HAL_GPIO_Init(AD7124_SYNC_GPIO_Port, &GPIO_InitStruct); + + /* EXTI interrupt init*/ + HAL_NVIC_SetPriority(EXTI1_IRQn, 0, 0); + HAL_NVIC_EnableIRQ(EXTI1_IRQn); + + HAL_NVIC_SetPriority(EXTI3_IRQn, 0, 0); + HAL_NVIC_EnableIRQ(EXTI3_IRQn); + +} + +/* USER CODE BEGIN 2 */ +void gpio_do_test(gpio_e gpio_num, GPIO_PinState state) +{ + HAL_GPIO_WritePin(DO_EN_GPIO_Port, DO_EN_Pin, GPIO_PIN_RESET); + switch (gpio_num) + { + case DO_1: + HAL_GPIO_WritePin(DO_CH1_GPIO_Port, DO_CH1_Pin, state); + break; + case DO_2: + HAL_GPIO_WritePin(DO_CH2_GPIO_Port, DO_CH2_Pin, state); + break; + case DO_3: + HAL_GPIO_WritePin(DO_CH3_GPIO_Port, DO_CH3_Pin, state); + break; + case DO_4: + HAL_GPIO_WritePin(DO_CH4_GPIO_Port, DO_CH4_Pin, state); + break; + default: + break; + } +} + +GPIO_PinState gpio_di_test(gpio_e gpio_num) +{ + GPIO_PinState state; + switch (gpio_num) + { + case DI_1: + state = HAL_GPIO_ReadPin(DI_CH1_GPIO_Port, DI_CH1_Pin); + break; + case DI_2: + state = HAL_GPIO_ReadPin(DI_CH2_GPIO_Port, DI_CH2_Pin); + break; + case DI_3: + state = HAL_GPIO_ReadPin(DI_CH3_GPIO_Port, DI_CH3_Pin); + break; + case DI_4: + state = HAL_GPIO_ReadPin(DI_CH4_GPIO_Port, DI_CH4_Pin); + break; + default: + state = GPIO_PIN_RESET; + break; + } + return state; +} +/* USER CODE END 2 */ diff --git a/Core/Src/main.c b/Core/Src/main.c new file mode 100644 index 0000000..4e841fd --- /dev/null +++ b/Core/Src/main.c @@ -0,0 +1,270 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * @file main.c + * @brief : Main program body + ****************************************************************************** + * @attention + * + * Copyright (c) 2024 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ +/* USER CODE END Header */ +/* Includes ------------------------------------------------------------------*/ +#include "main.h" +#include "dma.h" +#include "lwip.h" +#include "spi.h" +#include "tim.h" +#include "usart.h" +#include "gpio.h" + +/* Private includes ----------------------------------------------------------*/ +/* USER CODE BEGIN Includes */ +#include "stdio.h" +#include "dac161s997.h" +#include "lwip/api.h" +#include "lwip/tcp.h" +#include "ad7124.h" +#include "ht1200m.h" +/* USER CODE END Includes */ + +/* Private typedef -----------------------------------------------------------*/ +/* USER CODE BEGIN PTD */ + +/* USER CODE END PTD */ + +/* Private define ------------------------------------------------------------*/ +/* USER CODE BEGIN PD */ + +/* USER CODE END PD */ + +/* Private macro -------------------------------------------------------------*/ +/* USER CODE BEGIN PM */ + +/* USER CODE END PM */ + +/* Private variables ---------------------------------------------------------*/ + +/* USER CODE BEGIN PV */ + +/* USER CODE END PV */ + +/* Private function prototypes -----------------------------------------------*/ +void SystemClock_Config(void); +/* USER CODE BEGIN PFP */ + +/* USER CODE END PFP */ + +/* Private user code ---------------------------------------------------------*/ +/* USER CODE BEGIN 0 */ +uart_t lcd_uart4 = {0}; +uart_t ble1_uart6 = {0}; +uart_t ble2_uart3 = {0}; +uart_t hart2_uart2 = {0}; +uart_t hart1_uart5 = {0}; +float current_buff[2] = {12.0f, 12.0f}; +uint8_t tcp_echo_flags = 0; +/* USER CODE END 0 */ + +/** + * @brief The application entry point. + * @retval int + */ +int main(void) +{ + + /* USER CODE BEGIN 1 */ + + /* USER CODE END 1 */ + + /* MCU Configuration--------------------------------------------------------*/ + + /* Reset of all peripherals, Initializes the Flash interface and the Systick. */ + HAL_Init(); + + /* USER CODE BEGIN Init */ + + /* USER CODE END Init */ + + /* Configure the system clock */ + SystemClock_Config(); + + /* USER CODE BEGIN SysInit */ + + /* USER CODE END SysInit */ + + /* Initialize all configured peripherals */ + MX_GPIO_Init(); + MX_DMA_Init(); + MX_LWIP_Init(); + MX_TIM3_Init(); + MX_SPI1_Init(); + MX_USART6_UART_Init(); + MX_UART4_Init(); + MX_TIM2_Init(); + MX_UART5_Init(); + MX_USART2_UART_Init(); + MX_USART3_UART_Init(); + /* USER CODE BEGIN 2 */ + tcp_echo_init(); // 服务器初始化 TCP_Client_Init(); // 客户端初始化 + ad7124_setup(); // AD7124初始化 + dac161s997_init(); // DAC161S997初始化 + + HAL_GPIO_WritePin(DO_EN_GPIO_Port, DO_EN_Pin, GPIO_PIN_SET); // DO输出使能 + + HAL_UARTEx_ReceiveToIdle_DMA(&huart4, lcd_uart4.rx_data_temp, ARRAY_LEN(lcd_uart4.rx_data_temp)); + HAL_UARTEx_ReceiveToIdle_DMA(&huart6, ble1_uart6.rx_data_temp, ARRAY_LEN(ble1_uart6.rx_data_temp)); + HAL_UARTEx_ReceiveToIdle_DMA(&huart3, ble2_uart3.rx_data_temp, ARRAY_LEN(ble2_uart3.rx_data_temp)); + hart_ht1200m_reset(); // 复位HT1200M模块 + HAL_TIM_PWM_Start(&htim2, TIM_CHANNEL_1); // 启动PWM输出,用于驱动HT1200M模块 + /* USER CODE END 2 */ + + /* Infinite loop */ + /* USER CODE BEGIN WHILE */ + while (1) + { + /* USER CODE END WHILE */ + + /* USER CODE BEGIN 3 */ + MX_LWIP_Process(); + ad7124_get_analog(STOP_NC_ADC); + + dac161s997_output(DAC161S997_1, current_buff[0]); + dac161s997_output(DAC161S997_2, current_buff[1]); + } + /* USER CODE END 3 */ +} + +/** + * @brief System Clock Configuration + * @retval None + */ +void SystemClock_Config(void) +{ + RCC_OscInitTypeDef RCC_OscInitStruct = {0}; + RCC_ClkInitTypeDef RCC_ClkInitStruct = {0}; + + /** Configure the main internal regulator output voltage + */ + __HAL_RCC_PWR_CLK_ENABLE(); + __HAL_PWR_VOLTAGESCALING_CONFIG(PWR_REGULATOR_VOLTAGE_SCALE1); + + /** Initializes the RCC Oscillators according to the specified parameters + * in the RCC_OscInitTypeDef structure. + */ + RCC_OscInitStruct.OscillatorType = RCC_OSCILLATORTYPE_HSE; + RCC_OscInitStruct.HSEState = RCC_HSE_ON; + RCC_OscInitStruct.PLL.PLLState = RCC_PLL_ON; + RCC_OscInitStruct.PLL.PLLSource = RCC_PLLSOURCE_HSE; + RCC_OscInitStruct.PLL.PLLM = 10; + RCC_OscInitStruct.PLL.PLLN = 200; + RCC_OscInitStruct.PLL.PLLP = RCC_PLLP_DIV2; + RCC_OscInitStruct.PLL.PLLQ = 4; + if (HAL_RCC_OscConfig(&RCC_OscInitStruct) != HAL_OK) + { + Error_Handler(); + } + + /** Initializes the CPU, AHB and APB buses clocks + */ + RCC_ClkInitStruct.ClockType = RCC_CLOCKTYPE_HCLK | RCC_CLOCKTYPE_SYSCLK | RCC_CLOCKTYPE_PCLK1 | RCC_CLOCKTYPE_PCLK2; + RCC_ClkInitStruct.SYSCLKSource = RCC_SYSCLKSOURCE_PLLCLK; + RCC_ClkInitStruct.AHBCLKDivider = RCC_SYSCLK_DIV1; + RCC_ClkInitStruct.APB1CLKDivider = RCC_HCLK_DIV4; + RCC_ClkInitStruct.APB2CLKDivider = RCC_HCLK_DIV4; + + if (HAL_RCC_ClockConfig(&RCC_ClkInitStruct, FLASH_LATENCY_3) != HAL_OK) + { + Error_Handler(); + } +} + +/* USER CODE BEGIN 4 */ +/****************************************************************************** + * 函  数: HAL_UARTEx_RxEventCallback + * 功  能: DMA+空闲中断回调函数 + * 参  数: UART_HandleTypeDef  *huart   // 触发的串口 + *          uint16_t             Size    // 接收字节 + * 返回值: 无 +******************************************************************************/ +void HAL_UARTEx_RxEventCallback(UART_HandleTypeDef *huart, uint16_t Size) +{ + if (huart == &huart4) + { + __HAL_UNLOCK(huart); + lcd_uart4.rx_num = Size; + memset(lcd_uart4.rx_data, 0, ARRAY_LEN(lcd_uart4.rx_data)); + memcpy(lcd_uart4.rx_data, lcd_uart4.rx_data_temp, Size); + // 网口透传 + user_send_data(lcd_uart4.rx_data, Size); + HAL_UARTEx_ReceiveToIdle_DMA(&huart4, lcd_uart4.rx_data_temp, ARRAY_LEN(lcd_uart4.rx_data_temp)); + } + if (huart == &huart6) + { + __HAL_UNLOCK(huart); + ble1_uart6.rx_num = Size; + memset(ble1_uart6.rx_data, 0, ARRAY_LEN(ble1_uart6.rx_data)); + memcpy(ble1_uart6.rx_data, ble1_uart6.rx_data_temp, Size); + user_send_data(ble1_uart6.rx_data, Size); + HAL_UARTEx_ReceiveToIdle_DMA(&huart6, ble1_uart6.rx_data_temp, ARRAY_LEN(ble1_uart6.rx_data_temp)); + } + if (huart == &huart3) + { + __HAL_UNLOCK(huart); + ble2_uart3.rx_num = Size; + memset(ble2_uart3.rx_data, 0, ARRAY_LEN(ble2_uart3.rx_data)); + memcpy(ble2_uart3.rx_data, ble2_uart3.rx_data_temp, Size); + user_send_data(ble2_uart3.rx_data, Size); + HAL_UARTEx_ReceiveToIdle_DMA(&huart3, ble2_uart3.rx_data_temp, ARRAY_LEN(ble2_uart3.rx_data_temp)); + } +} + +void HAL_TIM_PeriodElapsedCallback(TIM_HandleTypeDef *htim) +{ + if (htim == (&htim3)) + { + // User code to be executed on capture event for TIM3 + HAL_GPIO_TogglePin(LED2_Y_GPIO_Port, LED2_Y_Pin); + } +} + +/* USER CODE END 4 */ + +/** + * @brief This function is executed in case of error occurrence. + * @retval None + */ +void Error_Handler(void) +{ + /* USER CODE BEGIN Error_Handler_Debug */ + /* User can add his own implementation to report the HAL error return state */ + __disable_irq(); + while (1) + { + } + /* USER CODE END Error_Handler_Debug */ +} + +#ifdef USE_FULL_ASSERT +/** + * @brief Reports the name of the source file and the source line number + * where the assert_param error has occurred. + * @param file: pointer to the source file name + * @param line: assert_param error line source number + * @retval None + */ +void assert_failed(uint8_t *file, uint32_t line) +{ + /* USER CODE BEGIN 6 */ + /* User can add his own implementation to report the file name and line number, + ex: printf("Wrong parameters value: file %s on line %d\r\n", file, line) */ + /* USER CODE END 6 */ +} +#endif /* USE_FULL_ASSERT */ diff --git a/Core/Src/spi.c b/Core/Src/spi.c new file mode 100644 index 0000000..f1fc8b4 --- /dev/null +++ b/Core/Src/spi.c @@ -0,0 +1,118 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * @file spi.c + * @brief This file provides code for the configuration + * of the SPI instances. + ****************************************************************************** + * @attention + * + * Copyright (c) 2025 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ +/* USER CODE END Header */ +/* Includes ------------------------------------------------------------------*/ +#include "spi.h" + +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ + +SPI_HandleTypeDef hspi1; + +/* SPI1 init function */ +void MX_SPI1_Init(void) +{ + + /* USER CODE BEGIN SPI1_Init 0 */ + + /* USER CODE END SPI1_Init 0 */ + + /* USER CODE BEGIN SPI1_Init 1 */ + + /* USER CODE END SPI1_Init 1 */ + hspi1.Instance = SPI1; + hspi1.Init.Mode = SPI_MODE_MASTER; + hspi1.Init.Direction = SPI_DIRECTION_2LINES; + hspi1.Init.DataSize = SPI_DATASIZE_8BIT; + hspi1.Init.CLKPolarity = SPI_POLARITY_HIGH; + hspi1.Init.CLKPhase = SPI_PHASE_2EDGE; + hspi1.Init.NSS = SPI_NSS_SOFT; + hspi1.Init.BaudRatePrescaler = SPI_BAUDRATEPRESCALER_256; + hspi1.Init.FirstBit = SPI_FIRSTBIT_MSB; + hspi1.Init.TIMode = SPI_TIMODE_DISABLE; + hspi1.Init.CRCCalculation = SPI_CRCCALCULATION_DISABLE; + hspi1.Init.CRCPolynomial = 10; + if (HAL_SPI_Init(&hspi1) != HAL_OK) + { + Error_Handler(); + } + /* USER CODE BEGIN SPI1_Init 2 */ + /* USER CODE END SPI1_Init 2 */ + +} + +void HAL_SPI_MspInit(SPI_HandleTypeDef* spiHandle) +{ + + GPIO_InitTypeDef GPIO_InitStruct = {0}; + if(spiHandle->Instance==SPI1) + { + /* USER CODE BEGIN SPI1_MspInit 0 */ + + /* USER CODE END SPI1_MspInit 0 */ + /* SPI1 clock enable */ + __HAL_RCC_SPI1_CLK_ENABLE(); + + __HAL_RCC_GPIOB_CLK_ENABLE(); + /**SPI1 GPIO Configuration + PB3 ------> SPI1_SCK + PB4 ------> SPI1_MISO + PB5 ------> SPI1_MOSI + */ + GPIO_InitStruct.Pin = GPIO_PIN_3|GPIO_PIN_4|GPIO_PIN_5; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF5_SPI1; + HAL_GPIO_Init(GPIOB, &GPIO_InitStruct); + + /* USER CODE BEGIN SPI1_MspInit 1 */ + + /* USER CODE END SPI1_MspInit 1 */ + } +} + +void HAL_SPI_MspDeInit(SPI_HandleTypeDef* spiHandle) +{ + + if(spiHandle->Instance==SPI1) + { + /* USER CODE BEGIN SPI1_MspDeInit 0 */ + + /* USER CODE END SPI1_MspDeInit 0 */ + /* Peripheral clock disable */ + __HAL_RCC_SPI1_CLK_DISABLE(); + + /**SPI1 GPIO Configuration + PB3 ------> SPI1_SCK + PB4 ------> SPI1_MISO + PB5 ------> SPI1_MOSI + */ + HAL_GPIO_DeInit(GPIOB, GPIO_PIN_3|GPIO_PIN_4|GPIO_PIN_5); + + /* USER CODE BEGIN SPI1_MspDeInit 1 */ + + /* USER CODE END SPI1_MspDeInit 1 */ + } +} + +/* USER CODE BEGIN 1 */ + +/* USER CODE END 1 */ diff --git a/Core/Src/stm32f4xx_hal_msp.c b/Core/Src/stm32f4xx_hal_msp.c new file mode 100644 index 0000000..178202d --- /dev/null +++ b/Core/Src/stm32f4xx_hal_msp.c @@ -0,0 +1,82 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * @file stm32f4xx_hal_msp.c + * @brief This file provides code for the MSP Initialization + * and de-Initialization codes. + ****************************************************************************** + * @attention + * + * Copyright (c) 2024 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ +/* USER CODE END Header */ + +/* Includes ------------------------------------------------------------------*/ +#include "main.h" +/* USER CODE BEGIN Includes */ + +/* USER CODE END Includes */ + +/* Private typedef -----------------------------------------------------------*/ +/* USER CODE BEGIN TD */ + +/* USER CODE END TD */ + +/* Private define ------------------------------------------------------------*/ +/* USER CODE BEGIN Define */ + +/* USER CODE END Define */ + +/* Private macro -------------------------------------------------------------*/ +/* USER CODE BEGIN Macro */ + +/* USER CODE END Macro */ + +/* Private variables ---------------------------------------------------------*/ +/* USER CODE BEGIN PV */ + +/* USER CODE END PV */ + +/* Private function prototypes -----------------------------------------------*/ +/* USER CODE BEGIN PFP */ + +/* USER CODE END PFP */ + +/* External functions --------------------------------------------------------*/ +/* USER CODE BEGIN ExternalFunctions */ + +/* USER CODE END ExternalFunctions */ + +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ +/** + * Initializes the Global MSP. + */ +void HAL_MspInit(void) +{ + + /* USER CODE BEGIN MspInit 0 */ + + /* USER CODE END MspInit 0 */ + + __HAL_RCC_SYSCFG_CLK_ENABLE(); + __HAL_RCC_PWR_CLK_ENABLE(); + + /* System interrupt init*/ + + /* USER CODE BEGIN MspInit 1 */ + + /* USER CODE END MspInit 1 */ +} + +/* USER CODE BEGIN 1 */ + +/* USER CODE END 1 */ diff --git a/Core/Src/stm32f4xx_it.c b/Core/Src/stm32f4xx_it.c new file mode 100644 index 0000000..4ef80d7 --- /dev/null +++ b/Core/Src/stm32f4xx_it.c @@ -0,0 +1,456 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * @file stm32f4xx_it.c + * @brief Interrupt Service Routines. + ****************************************************************************** + * @attention + * + * Copyright (c) 2024 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ +/* USER CODE END Header */ + +/* Includes ------------------------------------------------------------------*/ +#include "main.h" +#include "stm32f4xx_it.h" +/* Private includes ----------------------------------------------------------*/ +/* USER CODE BEGIN Includes */ +#include "tim.h" +#include "usart.h" +#include "tcpserverc.h" +/* USER CODE END Includes */ + +/* Private typedef -----------------------------------------------------------*/ +/* USER CODE BEGIN TD */ +/* USER CODE END TD */ + +/* Private define ------------------------------------------------------------*/ +/* USER CODE BEGIN PD */ +extern struct tcp_pcb *server_pcb1; +/* USER CODE END PD */ + +/* Private macro -------------------------------------------------------------*/ +/* USER CODE BEGIN PM */ + +/* USER CODE END PM */ + +/* Private variables ---------------------------------------------------------*/ +/* USER CODE BEGIN PV */ + +/* USER CODE END PV */ + +/* Private function prototypes -----------------------------------------------*/ +/* USER CODE BEGIN PFP */ + +/* USER CODE END PFP */ + +/* Private user code ---------------------------------------------------------*/ +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ + +/* External variables --------------------------------------------------------*/ +extern ETH_HandleTypeDef heth; +extern TIM_HandleTypeDef htim3; +extern DMA_HandleTypeDef hdma_uart4_rx; +extern DMA_HandleTypeDef hdma_uart4_tx; +extern DMA_HandleTypeDef hdma_usart3_rx; +extern DMA_HandleTypeDef hdma_usart3_tx; +extern DMA_HandleTypeDef hdma_usart6_rx; +extern DMA_HandleTypeDef hdma_usart6_tx; +extern UART_HandleTypeDef huart4; +extern UART_HandleTypeDef huart5; +extern UART_HandleTypeDef huart2; +extern UART_HandleTypeDef huart3; +extern UART_HandleTypeDef huart6; +/* USER CODE BEGIN EV */ + +/* USER CODE END EV */ + +/******************************************************************************/ +/* Cortex-M4 Processor Interruption and Exception Handlers */ +/******************************************************************************/ +/** + * @brief This function handles Non maskable interrupt. + */ +void NMI_Handler(void) +{ + /* USER CODE BEGIN NonMaskableInt_IRQn 0 */ + + /* USER CODE END NonMaskableInt_IRQn 0 */ + /* USER CODE BEGIN NonMaskableInt_IRQn 1 */ + while (1) + { + } + /* USER CODE END NonMaskableInt_IRQn 1 */ +} + +/** + * @brief This function handles Hard fault interrupt. + */ +void HardFault_Handler(void) +{ + /* USER CODE BEGIN HardFault_IRQn 0 */ + + /* USER CODE END HardFault_IRQn 0 */ + while (1) + { + /* USER CODE BEGIN W1_HardFault_IRQn 0 */ + /* USER CODE END W1_HardFault_IRQn 0 */ + } +} + +/** + * @brief This function handles Memory management fault. + */ +void MemManage_Handler(void) +{ + /* USER CODE BEGIN MemoryManagement_IRQn 0 */ + + /* USER CODE END MemoryManagement_IRQn 0 */ + while (1) + { + /* USER CODE BEGIN W1_MemoryManagement_IRQn 0 */ + /* USER CODE END W1_MemoryManagement_IRQn 0 */ + } +} + +/** + * @brief This function handles Pre-fetch fault, memory access fault. + */ +void BusFault_Handler(void) +{ + /* USER CODE BEGIN BusFault_IRQn 0 */ + + /* USER CODE END BusFault_IRQn 0 */ + while (1) + { + /* USER CODE BEGIN W1_BusFault_IRQn 0 */ + /* USER CODE END W1_BusFault_IRQn 0 */ + } +} + +/** + * @brief This function handles Undefined instruction or illegal state. + */ +void UsageFault_Handler(void) +{ + /* USER CODE BEGIN UsageFault_IRQn 0 */ + + /* USER CODE END UsageFault_IRQn 0 */ + while (1) + { + /* USER CODE BEGIN W1_UsageFault_IRQn 0 */ + /* USER CODE END W1_UsageFault_IRQn 0 */ + } +} + +/** + * @brief This function handles System service call via SWI instruction. + */ +void SVC_Handler(void) +{ + /* USER CODE BEGIN SVCall_IRQn 0 */ + + /* USER CODE END SVCall_IRQn 0 */ + /* USER CODE BEGIN SVCall_IRQn 1 */ + + /* USER CODE END SVCall_IRQn 1 */ +} + +/** + * @brief This function handles Debug monitor. + */ +void DebugMon_Handler(void) +{ + /* USER CODE BEGIN DebugMonitor_IRQn 0 */ + + /* USER CODE END DebugMonitor_IRQn 0 */ + /* USER CODE BEGIN DebugMonitor_IRQn 1 */ + + /* USER CODE END DebugMonitor_IRQn 1 */ +} + +/** + * @brief This function handles Pendable request for system service. + */ +void PendSV_Handler(void) +{ + /* USER CODE BEGIN PendSV_IRQn 0 */ + + /* USER CODE END PendSV_IRQn 0 */ + /* USER CODE BEGIN PendSV_IRQn 1 */ + + /* USER CODE END PendSV_IRQn 1 */ +} + +/** + * @brief This function handles System tick timer. + */ +void SysTick_Handler(void) +{ + /* USER CODE BEGIN SysTick_IRQn 0 */ + + /* USER CODE END SysTick_IRQn 0 */ + HAL_IncTick(); + /* USER CODE BEGIN SysTick_IRQn 1 */ + + /* USER CODE END SysTick_IRQn 1 */ +} + +/******************************************************************************/ +/* STM32F4xx Peripheral Interrupt Handlers */ +/* Add here the Interrupt Handlers for the used peripherals. */ +/* For the available peripheral interrupt handler names, */ +/* please refer to the startup file (startup_stm32f4xx.s). */ +/******************************************************************************/ + +/** + * @brief This function handles EXTI line1 interrupt. + */ +void EXTI1_IRQHandler(void) +{ + /* USER CODE BEGIN EXTI1_IRQn 0 */ + + /* USER CODE END EXTI1_IRQn 0 */ + HAL_GPIO_EXTI_IRQHandler(HART1_OCD_Pin); + /* USER CODE BEGIN EXTI1_IRQn 1 */ + HAL_GPIO_TogglePin(LED3_G_GPIO_Port, LED3_G_Pin); + /* USER CODE END EXTI1_IRQn 1 */ +} + +/** + * @brief This function handles EXTI line3 interrupt. + */ +void EXTI3_IRQHandler(void) +{ + /* USER CODE BEGIN EXTI3_IRQn 0 */ + + /* USER CODE END EXTI3_IRQn 0 */ + HAL_GPIO_EXTI_IRQHandler(HART2_OCD_Pin); + /* USER CODE BEGIN EXTI3_IRQn 1 */ + HAL_GPIO_TogglePin(LED3_R_GPIO_Port, LED3_R_Pin); + /* USER CODE END EXTI3_IRQn 1 */ +} + +/** + * @brief This function handles DMA1 stream1 global interrupt. + */ +void DMA1_Stream1_IRQHandler(void) +{ + /* USER CODE BEGIN DMA1_Stream1_IRQn 0 */ + + /* USER CODE END DMA1_Stream1_IRQn 0 */ + HAL_DMA_IRQHandler(&hdma_usart3_rx); + /* USER CODE BEGIN DMA1_Stream1_IRQn 1 */ + + /* USER CODE END DMA1_Stream1_IRQn 1 */ +} + +/** + * @brief This function handles DMA1 stream2 global interrupt. + */ +void DMA1_Stream2_IRQHandler(void) +{ + /* USER CODE BEGIN DMA1_Stream2_IRQn 0 */ + + /* USER CODE END DMA1_Stream2_IRQn 0 */ + HAL_DMA_IRQHandler(&hdma_uart4_rx); + /* USER CODE BEGIN DMA1_Stream2_IRQn 1 */ + + /* USER CODE END DMA1_Stream2_IRQn 1 */ +} + +/** + * @brief This function handles DMA1 stream3 global interrupt. + */ +void DMA1_Stream3_IRQHandler(void) +{ + /* USER CODE BEGIN DMA1_Stream3_IRQn 0 */ + + /* USER CODE END DMA1_Stream3_IRQn 0 */ + HAL_DMA_IRQHandler(&hdma_usart3_tx); + /* USER CODE BEGIN DMA1_Stream3_IRQn 1 */ + + /* USER CODE END DMA1_Stream3_IRQn 1 */ +} + +/** + * @brief This function handles DMA1 stream4 global interrupt. + */ +void DMA1_Stream4_IRQHandler(void) +{ + /* USER CODE BEGIN DMA1_Stream4_IRQn 0 */ + + /* USER CODE END DMA1_Stream4_IRQn 0 */ + HAL_DMA_IRQHandler(&hdma_uart4_tx); + /* USER CODE BEGIN DMA1_Stream4_IRQn 1 */ + + /* USER CODE END DMA1_Stream4_IRQn 1 */ +} + +/** + * @brief This function handles TIM3 global interrupt. + */ +void TIM3_IRQHandler(void) +{ + /* USER CODE BEGIN TIM3_IRQn 0 */ + + /* USER CODE END TIM3_IRQn 0 */ + HAL_TIM_IRQHandler(&htim3); + /* USER CODE BEGIN TIM3_IRQn 1 */ + + /* USER CODE END TIM3_IRQn 1 */ +} + +/** + * @brief This function handles USART2 global interrupt. + */ +void USART2_IRQHandler(void) +{ + uint8_t receive_data = 0; + /* USER CODE BEGIN USART2_IRQn 0 */ + if (__HAL_UART_GET_FLAG(&huart2, UART_FLAG_RXNE) != RESET) + { + HAL_UART_Receive(&huart2, &receive_data, 1, 100); + hart2_uart2.rx_data[hart2_uart2.rx_num] = receive_data; + hart2_uart2.rx_num++; + __HAL_UART_CLEAR_IDLEFLAG(&huart2); + } + // 空闲中断 + if (__HAL_UART_GET_FLAG(&huart2, UART_FLAG_IDLE) != RESET) + { + if (tcp_echo_flags == 1) + { + user_send_data(hart2_uart2.rx_data, hart2_uart2.rx_num); + } + hart2_uart2.rx_num = 0; + __HAL_UART_CLEAR_IDLEFLAG(&huart2); + } + /* USER CODE END USART2_IRQn 0 */ + HAL_UART_IRQHandler(&huart2); + /* USER CODE BEGIN USART2_IRQn 1 */ + + /* USER CODE END USART2_IRQn 1 */ +} + +/** + * @brief This function handles USART3 global interrupt. + */ +void USART3_IRQHandler(void) +{ + /* USER CODE BEGIN USART3_IRQn 0 */ + + /* USER CODE END USART3_IRQn 0 */ + HAL_UART_IRQHandler(&huart3); + /* USER CODE BEGIN USART3_IRQn 1 */ + + /* USER CODE END USART3_IRQn 1 */ +} + +/** + * @brief This function handles UART4 global interrupt. + */ +void UART4_IRQHandler(void) +{ + /* USER CODE BEGIN UART4_IRQn 0 */ + /* USER CODE END UART4_IRQn 0 */ + HAL_UART_IRQHandler(&huart4); + /* USER CODE BEGIN UART4_IRQn 1 */ + + /* USER CODE END UART4_IRQn 1 */ +} + +/** + * @brief This function handles UART5 global interrupt. + */ +void UART5_IRQHandler(void) +{ + /* USER CODE BEGIN UART5_IRQn 0 */ + uint8_t receive_data = 0; + if (__HAL_UART_GET_FLAG(&huart5, UART_FLAG_RXNE) != RESET) + { + HAL_UART_Receive(&huart5, &receive_data, 1, 1); + hart1_uart5.rx_data[hart1_uart5.rx_num] = receive_data; + hart1_uart5.rx_num++; + __HAL_UART_CLEAR_IDLEFLAG(&huart5); + } + // 空闲中断 + if (__HAL_UART_GET_FLAG(&huart5, UART_FLAG_IDLE) != RESET) + { + user_send_data(hart1_uart5.rx_data, hart1_uart5.rx_num); + hart1_uart5.rx_num = 0; + __HAL_UART_CLEAR_IDLEFLAG(&huart5); + } + /* USER CODE END UART5_IRQn 0 */ + HAL_UART_IRQHandler(&huart5); + /* USER CODE BEGIN UART5_IRQn 1 */ + /* USER CODE END UART5_IRQn 1 */ +} + +/** + * @brief This function handles DMA2 stream1 global interrupt. + */ +void DMA2_Stream1_IRQHandler(void) +{ + /* USER CODE BEGIN DMA2_Stream1_IRQn 0 */ + + /* USER CODE END DMA2_Stream1_IRQn 0 */ + HAL_DMA_IRQHandler(&hdma_usart6_rx); + /* USER CODE BEGIN DMA2_Stream1_IRQn 1 */ + + /* USER CODE END DMA2_Stream1_IRQn 1 */ +} + +/** + * @brief This function handles Ethernet global interrupt. + */ +void ETH_IRQHandler(void) +{ + /* USER CODE BEGIN ETH_IRQn 0 */ + + /* USER CODE END ETH_IRQn 0 */ + HAL_ETH_IRQHandler(&heth); + /* USER CODE BEGIN ETH_IRQn 1 */ + + /* USER CODE END ETH_IRQn 1 */ +} + +/** + * @brief This function handles DMA2 stream6 global interrupt. + */ +void DMA2_Stream6_IRQHandler(void) +{ + /* USER CODE BEGIN DMA2_Stream6_IRQn 0 */ + + /* USER CODE END DMA2_Stream6_IRQn 0 */ + HAL_DMA_IRQHandler(&hdma_usart6_tx); + /* USER CODE BEGIN DMA2_Stream6_IRQn 1 */ + + /* USER CODE END DMA2_Stream6_IRQn 1 */ +} + +/** + * @brief This function handles USART6 global interrupt. + */ +void USART6_IRQHandler(void) +{ + /* USER CODE BEGIN USART6_IRQn 0 */ + + /* USER CODE END USART6_IRQn 0 */ + HAL_UART_IRQHandler(&huart6); + /* USER CODE BEGIN USART6_IRQn 1 */ + + /* USER CODE END USART6_IRQn 1 */ +} + +/* USER CODE BEGIN 1 */ + +/* USER CODE END 1 */ diff --git a/Core/Src/system_stm32f4xx.c b/Core/Src/system_stm32f4xx.c new file mode 100644 index 0000000..3bd40f7 --- /dev/null +++ b/Core/Src/system_stm32f4xx.c @@ -0,0 +1,747 @@ +/** + ****************************************************************************** + * @file system_stm32f4xx.c + * @author MCD Application Team + * @brief CMSIS Cortex-M4 Device Peripheral Access Layer System Source File. + * + * This file provides two functions and one global variable to be called from + * user application: + * - SystemInit(): This function is called at startup just after reset and + * before branch to main program. This call is made inside + * the "startup_stm32f4xx.s" file. + * + * - SystemCoreClock variable: Contains the core clock (HCLK), it can be used + * by the user application to setup the SysTick + * timer or configure other parameters. + * + * - SystemCoreClockUpdate(): Updates the variable SystemCoreClock and must + * be called whenever the core clock is changed + * during program execution. + * + * + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/** @addtogroup CMSIS + * @{ + */ + +/** @addtogroup stm32f4xx_system + * @{ + */ + +/** @addtogroup STM32F4xx_System_Private_Includes + * @{ + */ + + +#include "stm32f4xx.h" + +#if !defined (HSE_VALUE) + #define HSE_VALUE ((uint32_t)25000000) /*!< Default value of the External oscillator in Hz */ +#endif /* HSE_VALUE */ + +#if !defined (HSI_VALUE) + #define HSI_VALUE ((uint32_t)16000000) /*!< Value of the Internal oscillator in Hz*/ +#endif /* HSI_VALUE */ + +/** + * @} + */ + +/** @addtogroup STM32F4xx_System_Private_TypesDefinitions + * @{ + */ + +/** + * @} + */ + +/** @addtogroup STM32F4xx_System_Private_Defines + * @{ + */ + +/************************* Miscellaneous Configuration ************************/ +/*!< Uncomment the following line if you need to use external SRAM or SDRAM as data memory */ +#if defined(STM32F405xx) || defined(STM32F415xx) || defined(STM32F407xx) || defined(STM32F417xx)\ + || defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx)\ + || defined(STM32F469xx) || defined(STM32F479xx) || defined(STM32F412Zx) || defined(STM32F412Vx) +/* #define DATA_IN_ExtSRAM */ +#endif /* STM32F40xxx || STM32F41xxx || STM32F42xxx || STM32F43xxx || STM32F469xx || STM32F479xx ||\ + STM32F412Zx || STM32F412Vx */ + +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx)\ + || defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) +/* #define DATA_IN_ExtSDRAM */ +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F446xx || STM32F469xx ||\ + STM32F479xx */ + +/* Note: Following vector table addresses must be defined in line with linker + configuration. */ +/*!< Uncomment the following line if you need to relocate the vector table + anywhere in Flash or Sram, else the vector table is kept at the automatic + remap of boot address selected */ +/* #define USER_VECT_TAB_ADDRESS */ + +#if defined(USER_VECT_TAB_ADDRESS) +/*!< Uncomment the following line if you need to relocate your vector Table + in Sram else user remap will be done in Flash. */ +/* #define VECT_TAB_SRAM */ +#if defined(VECT_TAB_SRAM) +#define VECT_TAB_BASE_ADDRESS SRAM_BASE /*!< Vector Table base address field. + This value must be a multiple of 0x200. */ +#define VECT_TAB_OFFSET 0x00000000U /*!< Vector Table base offset field. + This value must be a multiple of 0x200. */ +#else +#define VECT_TAB_BASE_ADDRESS FLASH_BASE /*!< Vector Table base address field. + This value must be a multiple of 0x200. */ +#define VECT_TAB_OFFSET 0x00000000U /*!< Vector Table base offset field. + This value must be a multiple of 0x200. */ +#endif /* VECT_TAB_SRAM */ +#endif /* USER_VECT_TAB_ADDRESS */ +/******************************************************************************/ + +/** + * @} + */ + +/** @addtogroup STM32F4xx_System_Private_Macros + * @{ + */ + +/** + * @} + */ + +/** @addtogroup STM32F4xx_System_Private_Variables + * @{ + */ + /* This variable is updated in three ways: + 1) by calling CMSIS function SystemCoreClockUpdate() + 2) by calling HAL API function HAL_RCC_GetHCLKFreq() + 3) each time HAL_RCC_ClockConfig() is called to configure the system clock frequency + Note: If you use this function to configure the system clock; then there + is no need to call the 2 first functions listed above, since SystemCoreClock + variable is updated automatically. + */ +uint32_t SystemCoreClock = 16000000; +const uint8_t AHBPrescTable[16] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 6, 7, 8, 9}; +const uint8_t APBPrescTable[8] = {0, 0, 0, 0, 1, 2, 3, 4}; +/** + * @} + */ + +/** @addtogroup STM32F4xx_System_Private_FunctionPrototypes + * @{ + */ + +#if defined (DATA_IN_ExtSRAM) || defined (DATA_IN_ExtSDRAM) + static void SystemInit_ExtMemCtl(void); +#endif /* DATA_IN_ExtSRAM || DATA_IN_ExtSDRAM */ + +/** + * @} + */ + +/** @addtogroup STM32F4xx_System_Private_Functions + * @{ + */ + +/** + * @brief Setup the microcontroller system + * Initialize the FPU setting, vector table location and External memory + * configuration. + * @param None + * @retval None + */ +void SystemInit(void) +{ + /* FPU settings ------------------------------------------------------------*/ + #if (__FPU_PRESENT == 1) && (__FPU_USED == 1) + SCB->CPACR |= ((3UL << 10*2)|(3UL << 11*2)); /* set CP10 and CP11 Full Access */ + #endif + +#if defined (DATA_IN_ExtSRAM) || defined (DATA_IN_ExtSDRAM) + SystemInit_ExtMemCtl(); +#endif /* DATA_IN_ExtSRAM || DATA_IN_ExtSDRAM */ + + /* Configure the Vector Table location -------------------------------------*/ +#if defined(USER_VECT_TAB_ADDRESS) + SCB->VTOR = VECT_TAB_BASE_ADDRESS | VECT_TAB_OFFSET; /* Vector Table Relocation in Internal SRAM */ +#endif /* USER_VECT_TAB_ADDRESS */ +} + +/** + * @brief Update SystemCoreClock variable according to Clock Register Values. + * The SystemCoreClock variable contains the core clock (HCLK), it can + * be used by the user application to setup the SysTick timer or configure + * other parameters. + * + * @note Each time the core clock (HCLK) changes, this function must be called + * to update SystemCoreClock variable value. Otherwise, any configuration + * based on this variable will be incorrect. + * + * @note - The system frequency computed by this function is not the real + * frequency in the chip. It is calculated based on the predefined + * constant and the selected clock source: + * + * - If SYSCLK source is HSI, SystemCoreClock will contain the HSI_VALUE(*) + * + * - If SYSCLK source is HSE, SystemCoreClock will contain the HSE_VALUE(**) + * + * - If SYSCLK source is PLL, SystemCoreClock will contain the HSE_VALUE(**) + * or HSI_VALUE(*) multiplied/divided by the PLL factors. + * + * (*) HSI_VALUE is a constant defined in stm32f4xx_hal_conf.h file (default value + * 16 MHz) but the real value may vary depending on the variations + * in voltage and temperature. + * + * (**) HSE_VALUE is a constant defined in stm32f4xx_hal_conf.h file (its value + * depends on the application requirements), user has to ensure that HSE_VALUE + * is same as the real frequency of the crystal used. Otherwise, this function + * may have wrong result. + * + * - The result of this function could be not correct when using fractional + * value for HSE crystal. + * + * @param None + * @retval None + */ +void SystemCoreClockUpdate(void) +{ + uint32_t tmp = 0, pllvco = 0, pllp = 2, pllsource = 0, pllm = 2; + + /* Get SYSCLK source -------------------------------------------------------*/ + tmp = RCC->CFGR & RCC_CFGR_SWS; + + switch (tmp) + { + case 0x00: /* HSI used as system clock source */ + SystemCoreClock = HSI_VALUE; + break; + case 0x04: /* HSE used as system clock source */ + SystemCoreClock = HSE_VALUE; + break; + case 0x08: /* PLL used as system clock source */ + + /* PLL_VCO = (HSE_VALUE or HSI_VALUE / PLL_M) * PLL_N + SYSCLK = PLL_VCO / PLL_P + */ + pllsource = (RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) >> 22; + pllm = RCC->PLLCFGR & RCC_PLLCFGR_PLLM; + + if (pllsource != 0) + { + /* HSE used as PLL clock source */ + pllvco = (HSE_VALUE / pllm) * ((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> 6); + } + else + { + /* HSI used as PLL clock source */ + pllvco = (HSI_VALUE / pllm) * ((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> 6); + } + + pllp = (((RCC->PLLCFGR & RCC_PLLCFGR_PLLP) >>16) + 1 ) *2; + SystemCoreClock = pllvco/pllp; + break; + default: + SystemCoreClock = HSI_VALUE; + break; + } + /* Compute HCLK frequency --------------------------------------------------*/ + /* Get HCLK prescaler */ + tmp = AHBPrescTable[((RCC->CFGR & RCC_CFGR_HPRE) >> 4)]; + /* HCLK frequency */ + SystemCoreClock >>= tmp; +} + +#if defined (DATA_IN_ExtSRAM) && defined (DATA_IN_ExtSDRAM) +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx)\ + || defined(STM32F469xx) || defined(STM32F479xx) +/** + * @brief Setup the external memory controller. + * Called in startup_stm32f4xx.s before jump to main. + * This function configures the external memories (SRAM/SDRAM) + * This SRAM/SDRAM will be used as program data memory (including heap and stack). + * @param None + * @retval None + */ +void SystemInit_ExtMemCtl(void) +{ + __IO uint32_t tmp = 0x00; + + register uint32_t tmpreg = 0, timeout = 0xFFFF; + register __IO uint32_t index; + + /* Enable GPIOC, GPIOD, GPIOE, GPIOF, GPIOG, GPIOH and GPIOI interface clock */ + RCC->AHB1ENR |= 0x000001F8; + + /* Delay after an RCC peripheral clock enabling */ + tmp = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOCEN); + + /* Connect PDx pins to FMC Alternate function */ + GPIOD->AFR[0] = 0x00CCC0CC; + GPIOD->AFR[1] = 0xCCCCCCCC; + /* Configure PDx pins in Alternate function mode */ + GPIOD->MODER = 0xAAAA0A8A; + /* Configure PDx pins speed to 100 MHz */ + GPIOD->OSPEEDR = 0xFFFF0FCF; + /* Configure PDx pins Output type to push-pull */ + GPIOD->OTYPER = 0x00000000; + /* No pull-up, pull-down for PDx pins */ + GPIOD->PUPDR = 0x00000000; + + /* Connect PEx pins to FMC Alternate function */ + GPIOE->AFR[0] = 0xC00CC0CC; + GPIOE->AFR[1] = 0xCCCCCCCC; + /* Configure PEx pins in Alternate function mode */ + GPIOE->MODER = 0xAAAA828A; + /* Configure PEx pins speed to 100 MHz */ + GPIOE->OSPEEDR = 0xFFFFC3CF; + /* Configure PEx pins Output type to push-pull */ + GPIOE->OTYPER = 0x00000000; + /* No pull-up, pull-down for PEx pins */ + GPIOE->PUPDR = 0x00000000; + + /* Connect PFx pins to FMC Alternate function */ + GPIOF->AFR[0] = 0xCCCCCCCC; + GPIOF->AFR[1] = 0xCCCCCCCC; + /* Configure PFx pins in Alternate function mode */ + GPIOF->MODER = 0xAA800AAA; + /* Configure PFx pins speed to 50 MHz */ + GPIOF->OSPEEDR = 0xAA800AAA; + /* Configure PFx pins Output type to push-pull */ + GPIOF->OTYPER = 0x00000000; + /* No pull-up, pull-down for PFx pins */ + GPIOF->PUPDR = 0x00000000; + + /* Connect PGx pins to FMC Alternate function */ + GPIOG->AFR[0] = 0xCCCCCCCC; + GPIOG->AFR[1] = 0xCCCCCCCC; + /* Configure PGx pins in Alternate function mode */ + GPIOG->MODER = 0xAAAAAAAA; + /* Configure PGx pins speed to 50 MHz */ + GPIOG->OSPEEDR = 0xAAAAAAAA; + /* Configure PGx pins Output type to push-pull */ + GPIOG->OTYPER = 0x00000000; + /* No pull-up, pull-down for PGx pins */ + GPIOG->PUPDR = 0x00000000; + + /* Connect PHx pins to FMC Alternate function */ + GPIOH->AFR[0] = 0x00C0CC00; + GPIOH->AFR[1] = 0xCCCCCCCC; + /* Configure PHx pins in Alternate function mode */ + GPIOH->MODER = 0xAAAA08A0; + /* Configure PHx pins speed to 50 MHz */ + GPIOH->OSPEEDR = 0xAAAA08A0; + /* Configure PHx pins Output type to push-pull */ + GPIOH->OTYPER = 0x00000000; + /* No pull-up, pull-down for PHx pins */ + GPIOH->PUPDR = 0x00000000; + + /* Connect PIx pins to FMC Alternate function */ + GPIOI->AFR[0] = 0xCCCCCCCC; + GPIOI->AFR[1] = 0x00000CC0; + /* Configure PIx pins in Alternate function mode */ + GPIOI->MODER = 0x0028AAAA; + /* Configure PIx pins speed to 50 MHz */ + GPIOI->OSPEEDR = 0x0028AAAA; + /* Configure PIx pins Output type to push-pull */ + GPIOI->OTYPER = 0x00000000; + /* No pull-up, pull-down for PIx pins */ + GPIOI->PUPDR = 0x00000000; + +/*-- FMC Configuration -------------------------------------------------------*/ + /* Enable the FMC interface clock */ + RCC->AHB3ENR |= 0x00000001; + /* Delay after an RCC peripheral clock enabling */ + tmp = READ_BIT(RCC->AHB3ENR, RCC_AHB3ENR_FMCEN); + + FMC_Bank5_6->SDCR[0] = 0x000019E4; + FMC_Bank5_6->SDTR[0] = 0x01115351; + + /* SDRAM initialization sequence */ + /* Clock enable command */ + FMC_Bank5_6->SDCMR = 0x00000011; + tmpreg = FMC_Bank5_6->SDSR & 0x00000020; + while((tmpreg != 0) && (timeout-- > 0)) + { + tmpreg = FMC_Bank5_6->SDSR & 0x00000020; + } + + /* Delay */ + for (index = 0; index<1000; index++); + + /* PALL command */ + FMC_Bank5_6->SDCMR = 0x00000012; + tmpreg = FMC_Bank5_6->SDSR & 0x00000020; + timeout = 0xFFFF; + while((tmpreg != 0) && (timeout-- > 0)) + { + tmpreg = FMC_Bank5_6->SDSR & 0x00000020; + } + + /* Auto refresh command */ + FMC_Bank5_6->SDCMR = 0x00000073; + tmpreg = FMC_Bank5_6->SDSR & 0x00000020; + timeout = 0xFFFF; + while((tmpreg != 0) && (timeout-- > 0)) + { + tmpreg = FMC_Bank5_6->SDSR & 0x00000020; + } + + /* MRD register program */ + FMC_Bank5_6->SDCMR = 0x00046014; + tmpreg = FMC_Bank5_6->SDSR & 0x00000020; + timeout = 0xFFFF; + while((tmpreg != 0) && (timeout-- > 0)) + { + tmpreg = FMC_Bank5_6->SDSR & 0x00000020; + } + + /* Set refresh count */ + tmpreg = FMC_Bank5_6->SDRTR; + FMC_Bank5_6->SDRTR = (tmpreg | (0x0000027C<<1)); + + /* Disable write protection */ + tmpreg = FMC_Bank5_6->SDCR[0]; + FMC_Bank5_6->SDCR[0] = (tmpreg & 0xFFFFFDFF); + +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) + /* Configure and enable Bank1_SRAM2 */ + FMC_Bank1->BTCR[2] = 0x00001011; + FMC_Bank1->BTCR[3] = 0x00000201; + FMC_Bank1E->BWTR[2] = 0x0fffffff; +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx */ +#if defined(STM32F469xx) || defined(STM32F479xx) + /* Configure and enable Bank1_SRAM2 */ + FMC_Bank1->BTCR[2] = 0x00001091; + FMC_Bank1->BTCR[3] = 0x00110212; + FMC_Bank1E->BWTR[2] = 0x0fffffff; +#endif /* STM32F469xx || STM32F479xx */ + + (void)(tmp); +} +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F469xx || STM32F479xx */ +#elif defined (DATA_IN_ExtSRAM) || defined (DATA_IN_ExtSDRAM) +/** + * @brief Setup the external memory controller. + * Called in startup_stm32f4xx.s before jump to main. + * This function configures the external memories (SRAM/SDRAM) + * This SRAM/SDRAM will be used as program data memory (including heap and stack). + * @param None + * @retval None + */ +void SystemInit_ExtMemCtl(void) +{ + __IO uint32_t tmp = 0x00; +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx)\ + || defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) +#if defined (DATA_IN_ExtSDRAM) + register uint32_t tmpreg = 0, timeout = 0xFFFF; + register __IO uint32_t index; + +#if defined(STM32F446xx) + /* Enable GPIOA, GPIOC, GPIOD, GPIOE, GPIOF, GPIOG interface + clock */ + RCC->AHB1ENR |= 0x0000007D; +#else + /* Enable GPIOC, GPIOD, GPIOE, GPIOF, GPIOG, GPIOH and GPIOI interface + clock */ + RCC->AHB1ENR |= 0x000001F8; +#endif /* STM32F446xx */ + /* Delay after an RCC peripheral clock enabling */ + tmp = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOCEN); + +#if defined(STM32F446xx) + /* Connect PAx pins to FMC Alternate function */ + GPIOA->AFR[0] |= 0xC0000000; + GPIOA->AFR[1] |= 0x00000000; + /* Configure PDx pins in Alternate function mode */ + GPIOA->MODER |= 0x00008000; + /* Configure PDx pins speed to 50 MHz */ + GPIOA->OSPEEDR |= 0x00008000; + /* Configure PDx pins Output type to push-pull */ + GPIOA->OTYPER |= 0x00000000; + /* No pull-up, pull-down for PDx pins */ + GPIOA->PUPDR |= 0x00000000; + + /* Connect PCx pins to FMC Alternate function */ + GPIOC->AFR[0] |= 0x00CC0000; + GPIOC->AFR[1] |= 0x00000000; + /* Configure PDx pins in Alternate function mode */ + GPIOC->MODER |= 0x00000A00; + /* Configure PDx pins speed to 50 MHz */ + GPIOC->OSPEEDR |= 0x00000A00; + /* Configure PDx pins Output type to push-pull */ + GPIOC->OTYPER |= 0x00000000; + /* No pull-up, pull-down for PDx pins */ + GPIOC->PUPDR |= 0x00000000; +#endif /* STM32F446xx */ + + /* Connect PDx pins to FMC Alternate function */ + GPIOD->AFR[0] = 0x000000CC; + GPIOD->AFR[1] = 0xCC000CCC; + /* Configure PDx pins in Alternate function mode */ + GPIOD->MODER = 0xA02A000A; + /* Configure PDx pins speed to 50 MHz */ + GPIOD->OSPEEDR = 0xA02A000A; + /* Configure PDx pins Output type to push-pull */ + GPIOD->OTYPER = 0x00000000; + /* No pull-up, pull-down for PDx pins */ + GPIOD->PUPDR = 0x00000000; + + /* Connect PEx pins to FMC Alternate function */ + GPIOE->AFR[0] = 0xC00000CC; + GPIOE->AFR[1] = 0xCCCCCCCC; + /* Configure PEx pins in Alternate function mode */ + GPIOE->MODER = 0xAAAA800A; + /* Configure PEx pins speed to 50 MHz */ + GPIOE->OSPEEDR = 0xAAAA800A; + /* Configure PEx pins Output type to push-pull */ + GPIOE->OTYPER = 0x00000000; + /* No pull-up, pull-down for PEx pins */ + GPIOE->PUPDR = 0x00000000; + + /* Connect PFx pins to FMC Alternate function */ + GPIOF->AFR[0] = 0xCCCCCCCC; + GPIOF->AFR[1] = 0xCCCCCCCC; + /* Configure PFx pins in Alternate function mode */ + GPIOF->MODER = 0xAA800AAA; + /* Configure PFx pins speed to 50 MHz */ + GPIOF->OSPEEDR = 0xAA800AAA; + /* Configure PFx pins Output type to push-pull */ + GPIOF->OTYPER = 0x00000000; + /* No pull-up, pull-down for PFx pins */ + GPIOF->PUPDR = 0x00000000; + + /* Connect PGx pins to FMC Alternate function */ + GPIOG->AFR[0] = 0xCCCCCCCC; + GPIOG->AFR[1] = 0xCCCCCCCC; + /* Configure PGx pins in Alternate function mode */ + GPIOG->MODER = 0xAAAAAAAA; + /* Configure PGx pins speed to 50 MHz */ + GPIOG->OSPEEDR = 0xAAAAAAAA; + /* Configure PGx pins Output type to push-pull */ + GPIOG->OTYPER = 0x00000000; + /* No pull-up, pull-down for PGx pins */ + GPIOG->PUPDR = 0x00000000; + +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx)\ + || defined(STM32F469xx) || defined(STM32F479xx) + /* Connect PHx pins to FMC Alternate function */ + GPIOH->AFR[0] = 0x00C0CC00; + GPIOH->AFR[1] = 0xCCCCCCCC; + /* Configure PHx pins in Alternate function mode */ + GPIOH->MODER = 0xAAAA08A0; + /* Configure PHx pins speed to 50 MHz */ + GPIOH->OSPEEDR = 0xAAAA08A0; + /* Configure PHx pins Output type to push-pull */ + GPIOH->OTYPER = 0x00000000; + /* No pull-up, pull-down for PHx pins */ + GPIOH->PUPDR = 0x00000000; + + /* Connect PIx pins to FMC Alternate function */ + GPIOI->AFR[0] = 0xCCCCCCCC; + GPIOI->AFR[1] = 0x00000CC0; + /* Configure PIx pins in Alternate function mode */ + GPIOI->MODER = 0x0028AAAA; + /* Configure PIx pins speed to 50 MHz */ + GPIOI->OSPEEDR = 0x0028AAAA; + /* Configure PIx pins Output type to push-pull */ + GPIOI->OTYPER = 0x00000000; + /* No pull-up, pull-down for PIx pins */ + GPIOI->PUPDR = 0x00000000; +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F469xx || STM32F479xx */ + +/*-- FMC Configuration -------------------------------------------------------*/ + /* Enable the FMC interface clock */ + RCC->AHB3ENR |= 0x00000001; + /* Delay after an RCC peripheral clock enabling */ + tmp = READ_BIT(RCC->AHB3ENR, RCC_AHB3ENR_FMCEN); + + /* Configure and enable SDRAM bank1 */ +#if defined(STM32F446xx) + FMC_Bank5_6->SDCR[0] = 0x00001954; +#else + FMC_Bank5_6->SDCR[0] = 0x000019E4; +#endif /* STM32F446xx */ + FMC_Bank5_6->SDTR[0] = 0x01115351; + + /* SDRAM initialization sequence */ + /* Clock enable command */ + FMC_Bank5_6->SDCMR = 0x00000011; + tmpreg = FMC_Bank5_6->SDSR & 0x00000020; + while((tmpreg != 0) && (timeout-- > 0)) + { + tmpreg = FMC_Bank5_6->SDSR & 0x00000020; + } + + /* Delay */ + for (index = 0; index<1000; index++); + + /* PALL command */ + FMC_Bank5_6->SDCMR = 0x00000012; + tmpreg = FMC_Bank5_6->SDSR & 0x00000020; + timeout = 0xFFFF; + while((tmpreg != 0) && (timeout-- > 0)) + { + tmpreg = FMC_Bank5_6->SDSR & 0x00000020; + } + + /* Auto refresh command */ +#if defined(STM32F446xx) + FMC_Bank5_6->SDCMR = 0x000000F3; +#else + FMC_Bank5_6->SDCMR = 0x00000073; +#endif /* STM32F446xx */ + tmpreg = FMC_Bank5_6->SDSR & 0x00000020; + timeout = 0xFFFF; + while((tmpreg != 0) && (timeout-- > 0)) + { + tmpreg = FMC_Bank5_6->SDSR & 0x00000020; + } + + /* MRD register program */ +#if defined(STM32F446xx) + FMC_Bank5_6->SDCMR = 0x00044014; +#else + FMC_Bank5_6->SDCMR = 0x00046014; +#endif /* STM32F446xx */ + tmpreg = FMC_Bank5_6->SDSR & 0x00000020; + timeout = 0xFFFF; + while((tmpreg != 0) && (timeout-- > 0)) + { + tmpreg = FMC_Bank5_6->SDSR & 0x00000020; + } + + /* Set refresh count */ + tmpreg = FMC_Bank5_6->SDRTR; +#if defined(STM32F446xx) + FMC_Bank5_6->SDRTR = (tmpreg | (0x0000050C<<1)); +#else + FMC_Bank5_6->SDRTR = (tmpreg | (0x0000027C<<1)); +#endif /* STM32F446xx */ + + /* Disable write protection */ + tmpreg = FMC_Bank5_6->SDCR[0]; + FMC_Bank5_6->SDCR[0] = (tmpreg & 0xFFFFFDFF); +#endif /* DATA_IN_ExtSDRAM */ +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F446xx || STM32F469xx || STM32F479xx */ + +#if defined(STM32F405xx) || defined(STM32F415xx) || defined(STM32F407xx) || defined(STM32F417xx)\ + || defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx)\ + || defined(STM32F469xx) || defined(STM32F479xx) || defined(STM32F412Zx) || defined(STM32F412Vx) + +#if defined(DATA_IN_ExtSRAM) +/*-- GPIOs Configuration -----------------------------------------------------*/ + /* Enable GPIOD, GPIOE, GPIOF and GPIOG interface clock */ + RCC->AHB1ENR |= 0x00000078; + /* Delay after an RCC peripheral clock enabling */ + tmp = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIODEN); + + /* Connect PDx pins to FMC Alternate function */ + GPIOD->AFR[0] = 0x00CCC0CC; + GPIOD->AFR[1] = 0xCCCCCCCC; + /* Configure PDx pins in Alternate function mode */ + GPIOD->MODER = 0xAAAA0A8A; + /* Configure PDx pins speed to 100 MHz */ + GPIOD->OSPEEDR = 0xFFFF0FCF; + /* Configure PDx pins Output type to push-pull */ + GPIOD->OTYPER = 0x00000000; + /* No pull-up, pull-down for PDx pins */ + GPIOD->PUPDR = 0x00000000; + + /* Connect PEx pins to FMC Alternate function */ + GPIOE->AFR[0] = 0xC00CC0CC; + GPIOE->AFR[1] = 0xCCCCCCCC; + /* Configure PEx pins in Alternate function mode */ + GPIOE->MODER = 0xAAAA828A; + /* Configure PEx pins speed to 100 MHz */ + GPIOE->OSPEEDR = 0xFFFFC3CF; + /* Configure PEx pins Output type to push-pull */ + GPIOE->OTYPER = 0x00000000; + /* No pull-up, pull-down for PEx pins */ + GPIOE->PUPDR = 0x00000000; + + /* Connect PFx pins to FMC Alternate function */ + GPIOF->AFR[0] = 0x00CCCCCC; + GPIOF->AFR[1] = 0xCCCC0000; + /* Configure PFx pins in Alternate function mode */ + GPIOF->MODER = 0xAA000AAA; + /* Configure PFx pins speed to 100 MHz */ + GPIOF->OSPEEDR = 0xFF000FFF; + /* Configure PFx pins Output type to push-pull */ + GPIOF->OTYPER = 0x00000000; + /* No pull-up, pull-down for PFx pins */ + GPIOF->PUPDR = 0x00000000; + + /* Connect PGx pins to FMC Alternate function */ + GPIOG->AFR[0] = 0x00CCCCCC; + GPIOG->AFR[1] = 0x000000C0; + /* Configure PGx pins in Alternate function mode */ + GPIOG->MODER = 0x00085AAA; + /* Configure PGx pins speed to 100 MHz */ + GPIOG->OSPEEDR = 0x000CAFFF; + /* Configure PGx pins Output type to push-pull */ + GPIOG->OTYPER = 0x00000000; + /* No pull-up, pull-down for PGx pins */ + GPIOG->PUPDR = 0x00000000; + +/*-- FMC/FSMC Configuration --------------------------------------------------*/ + /* Enable the FMC/FSMC interface clock */ + RCC->AHB3ENR |= 0x00000001; + +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) + /* Delay after an RCC peripheral clock enabling */ + tmp = READ_BIT(RCC->AHB3ENR, RCC_AHB3ENR_FMCEN); + /* Configure and enable Bank1_SRAM2 */ + FMC_Bank1->BTCR[2] = 0x00001011; + FMC_Bank1->BTCR[3] = 0x00000201; + FMC_Bank1E->BWTR[2] = 0x0fffffff; +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx */ +#if defined(STM32F469xx) || defined(STM32F479xx) + /* Delay after an RCC peripheral clock enabling */ + tmp = READ_BIT(RCC->AHB3ENR, RCC_AHB3ENR_FMCEN); + /* Configure and enable Bank1_SRAM2 */ + FMC_Bank1->BTCR[2] = 0x00001091; + FMC_Bank1->BTCR[3] = 0x00110212; + FMC_Bank1E->BWTR[2] = 0x0fffffff; +#endif /* STM32F469xx || STM32F479xx */ +#if defined(STM32F405xx) || defined(STM32F415xx) || defined(STM32F407xx)|| defined(STM32F417xx)\ + || defined(STM32F412Zx) || defined(STM32F412Vx) + /* Delay after an RCC peripheral clock enabling */ + tmp = READ_BIT(RCC->AHB3ENR, RCC_AHB3ENR_FSMCEN); + /* Configure and enable Bank1_SRAM2 */ + FSMC_Bank1->BTCR[2] = 0x00001011; + FSMC_Bank1->BTCR[3] = 0x00000201; + FSMC_Bank1E->BWTR[2] = 0x0FFFFFFF; +#endif /* STM32F405xx || STM32F415xx || STM32F407xx || STM32F417xx || STM32F412Zx || STM32F412Vx */ + +#endif /* DATA_IN_ExtSRAM */ +#endif /* STM32F405xx || STM32F415xx || STM32F407xx || STM32F417xx || STM32F427xx || STM32F437xx ||\ + STM32F429xx || STM32F439xx || STM32F469xx || STM32F479xx || STM32F412Zx || STM32F412Vx */ + (void)(tmp); +} +#endif /* DATA_IN_ExtSRAM && DATA_IN_ExtSDRAM */ +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ diff --git a/Core/Src/tim.c b/Core/Src/tim.c new file mode 100644 index 0000000..34c3a54 --- /dev/null +++ b/Core/Src/tim.c @@ -0,0 +1,215 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * @file tim.c + * @brief This file provides code for the configuration + * of the TIM instances. + ****************************************************************************** + * @attention + * + * Copyright (c) 2024 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ +/* USER CODE END Header */ +/* Includes ------------------------------------------------------------------*/ +#include "tim.h" + +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ + +TIM_HandleTypeDef htim2; +TIM_HandleTypeDef htim3; + +/* TIM2 init function */ +void MX_TIM2_Init(void) +{ + + /* USER CODE BEGIN TIM2_Init 0 */ + + /* USER CODE END TIM2_Init 0 */ + + TIM_ClockConfigTypeDef sClockSourceConfig = {0}; + TIM_MasterConfigTypeDef sMasterConfig = {0}; + TIM_OC_InitTypeDef sConfigOC = {0}; + + /* USER CODE BEGIN TIM2_Init 1 */ + + /* USER CODE END TIM2_Init 1 */ + htim2.Instance = TIM2; + htim2.Init.Prescaler = 0; + htim2.Init.CounterMode = TIM_COUNTERMODE_UP; + htim2.Init.Period = 119; + htim2.Init.ClockDivision = TIM_CLOCKDIVISION_DIV1; + htim2.Init.AutoReloadPreload = TIM_AUTORELOAD_PRELOAD_DISABLE; + if (HAL_TIM_Base_Init(&htim2) != HAL_OK) + { + Error_Handler(); + } + sClockSourceConfig.ClockSource = TIM_CLOCKSOURCE_INTERNAL; + if (HAL_TIM_ConfigClockSource(&htim2, &sClockSourceConfig) != HAL_OK) + { + Error_Handler(); + } + if (HAL_TIM_PWM_Init(&htim2) != HAL_OK) + { + Error_Handler(); + } + sMasterConfig.MasterOutputTrigger = TIM_TRGO_RESET; + sMasterConfig.MasterSlaveMode = TIM_MASTERSLAVEMODE_DISABLE; + if (HAL_TIMEx_MasterConfigSynchronization(&htim2, &sMasterConfig) != HAL_OK) + { + Error_Handler(); + } + sConfigOC.OCMode = TIM_OCMODE_PWM1; + sConfigOC.Pulse = 60; + sConfigOC.OCPolarity = TIM_OCPOLARITY_HIGH; + sConfigOC.OCFastMode = TIM_OCFAST_DISABLE; + if (HAL_TIM_PWM_ConfigChannel(&htim2, &sConfigOC, TIM_CHANNEL_1) != HAL_OK) + { + Error_Handler(); + } + /* USER CODE BEGIN TIM2_Init 2 */ + + /* USER CODE END TIM2_Init 2 */ + HAL_TIM_MspPostInit(&htim2); + +} +/* TIM3 init function */ +void MX_TIM3_Init(void) +{ + + /* USER CODE BEGIN TIM3_Init 0 */ + + /* USER CODE END TIM3_Init 0 */ + + TIM_ClockConfigTypeDef sClockSourceConfig = {0}; + TIM_MasterConfigTypeDef sMasterConfig = {0}; + + /* USER CODE BEGIN TIM3_Init 1 */ + + /* USER CODE END TIM3_Init 1 */ + htim3.Instance = TIM3; + htim3.Init.Prescaler = 55295; + htim3.Init.CounterMode = TIM_COUNTERMODE_UP; + htim3.Init.Period = 999; + htim3.Init.ClockDivision = TIM_CLOCKDIVISION_DIV1; + htim3.Init.AutoReloadPreload = TIM_AUTORELOAD_PRELOAD_ENABLE; + if (HAL_TIM_Base_Init(&htim3) != HAL_OK) + { + Error_Handler(); + } + sClockSourceConfig.ClockSource = TIM_CLOCKSOURCE_INTERNAL; + if (HAL_TIM_ConfigClockSource(&htim3, &sClockSourceConfig) != HAL_OK) + { + Error_Handler(); + } + sMasterConfig.MasterOutputTrigger = TIM_TRGO_RESET; + sMasterConfig.MasterSlaveMode = TIM_MASTERSLAVEMODE_DISABLE; + if (HAL_TIMEx_MasterConfigSynchronization(&htim3, &sMasterConfig) != HAL_OK) + { + Error_Handler(); + } + /* USER CODE BEGIN TIM3_Init 2 */ + HAL_TIM_Base_Start_IT(&htim3); + /* USER CODE END TIM3_Init 2 */ + +} + +void HAL_TIM_Base_MspInit(TIM_HandleTypeDef* tim_baseHandle) +{ + + if(tim_baseHandle->Instance==TIM2) + { + /* USER CODE BEGIN TIM2_MspInit 0 */ + + /* USER CODE END TIM2_MspInit 0 */ + /* TIM2 clock enable */ + __HAL_RCC_TIM2_CLK_ENABLE(); + /* USER CODE BEGIN TIM2_MspInit 1 */ + + /* USER CODE END TIM2_MspInit 1 */ + } + else if(tim_baseHandle->Instance==TIM3) + { + /* USER CODE BEGIN TIM3_MspInit 0 */ + + /* USER CODE END TIM3_MspInit 0 */ + /* TIM3 clock enable */ + __HAL_RCC_TIM3_CLK_ENABLE(); + + /* TIM3 interrupt Init */ + HAL_NVIC_SetPriority(TIM3_IRQn, 0, 0); + HAL_NVIC_EnableIRQ(TIM3_IRQn); + /* USER CODE BEGIN TIM3_MspInit 1 */ + + /* USER CODE END TIM3_MspInit 1 */ + } +} +void HAL_TIM_MspPostInit(TIM_HandleTypeDef* timHandle) +{ + + GPIO_InitTypeDef GPIO_InitStruct = {0}; + if(timHandle->Instance==TIM2) + { + /* USER CODE BEGIN TIM2_MspPostInit 0 */ + + /* USER CODE END TIM2_MspPostInit 0 */ + + __HAL_RCC_GPIOA_CLK_ENABLE(); + /**TIM2 GPIO Configuration + PA15 ------> TIM2_CH1 + */ + GPIO_InitStruct.Pin = HART_CLK_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF1_TIM2; + HAL_GPIO_Init(HART_CLK_GPIO_Port, &GPIO_InitStruct); + + /* USER CODE BEGIN TIM2_MspPostInit 1 */ + + /* USER CODE END TIM2_MspPostInit 1 */ + } + +} + +void HAL_TIM_Base_MspDeInit(TIM_HandleTypeDef* tim_baseHandle) +{ + + if(tim_baseHandle->Instance==TIM2) + { + /* USER CODE BEGIN TIM2_MspDeInit 0 */ + + /* USER CODE END TIM2_MspDeInit 0 */ + /* Peripheral clock disable */ + __HAL_RCC_TIM2_CLK_DISABLE(); + /* USER CODE BEGIN TIM2_MspDeInit 1 */ + + /* USER CODE END TIM2_MspDeInit 1 */ + } + else if(tim_baseHandle->Instance==TIM3) + { + /* USER CODE BEGIN TIM3_MspDeInit 0 */ + + /* USER CODE END TIM3_MspDeInit 0 */ + /* Peripheral clock disable */ + __HAL_RCC_TIM3_CLK_DISABLE(); + + /* TIM3 interrupt Deinit */ + HAL_NVIC_DisableIRQ(TIM3_IRQn); + /* USER CODE BEGIN TIM3_MspDeInit 1 */ + + /* USER CODE END TIM3_MspDeInit 1 */ + } +} + +/* USER CODE BEGIN 1 */ + +/* USER CODE END 1 */ diff --git a/Core/Src/usart.c b/Core/Src/usart.c new file mode 100644 index 0000000..c5f9bd8 --- /dev/null +++ b/Core/Src/usart.c @@ -0,0 +1,577 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * @file usart.c + * @brief This file provides code for the configuration + * of the USART instances. + ****************************************************************************** + * @attention + * + * Copyright (c) 2025 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ +/* USER CODE END Header */ +/* Includes ------------------------------------------------------------------*/ +#include "usart.h" + +/* USER CODE BEGIN 0 */ +#include "ht1200m.h" +/* USER CODE END 0 */ + +UART_HandleTypeDef huart4; +UART_HandleTypeDef huart5; +UART_HandleTypeDef huart2; +UART_HandleTypeDef huart3; +UART_HandleTypeDef huart6; +DMA_HandleTypeDef hdma_uart4_rx; +DMA_HandleTypeDef hdma_uart4_tx; +DMA_HandleTypeDef hdma_usart3_rx; +DMA_HandleTypeDef hdma_usart3_tx; +DMA_HandleTypeDef hdma_usart6_rx; +DMA_HandleTypeDef hdma_usart6_tx; + +/* UART4 init function */ +void MX_UART4_Init(void) +{ + + /* USER CODE BEGIN UART4_Init 0 */ + + /* USER CODE END UART4_Init 0 */ + + /* USER CODE BEGIN UART4_Init 1 */ + + /* USER CODE END UART4_Init 1 */ + huart4.Instance = UART4; + huart4.Init.BaudRate = 115200; + huart4.Init.WordLength = UART_WORDLENGTH_8B; + huart4.Init.StopBits = UART_STOPBITS_1; + huart4.Init.Parity = UART_PARITY_NONE; + huart4.Init.Mode = UART_MODE_TX_RX; + huart4.Init.HwFlowCtl = UART_HWCONTROL_NONE; + huart4.Init.OverSampling = UART_OVERSAMPLING_16; + if (HAL_UART_Init(&huart4) != HAL_OK) + { + Error_Handler(); + } + /* USER CODE BEGIN UART4_Init 2 */ + //__HAL_UART_ENABLE_IT(&huart4, UART_IT_IDLE); // 使能IDLE中断 + /* USER CODE END UART4_Init 2 */ +} +/* UART5 init function */ +void MX_UART5_Init(void) +{ + + /* USER CODE BEGIN UART5_Init 0 */ + + /* USER CODE END UART5_Init 0 */ + + /* USER CODE BEGIN UART5_Init 1 */ + + /* USER CODE END UART5_Init 1 */ + huart5.Instance = UART5; + huart5.Init.BaudRate = 1200; + huart5.Init.WordLength = UART_WORDLENGTH_9B; + huart5.Init.StopBits = UART_STOPBITS_1; + huart5.Init.Parity = UART_PARITY_ODD; + huart5.Init.Mode = UART_MODE_TX_RX; + huart5.Init.HwFlowCtl = UART_HWCONTROL_NONE; + huart5.Init.OverSampling = UART_OVERSAMPLING_16; + if (HAL_UART_Init(&huart5) != HAL_OK) + { + Error_Handler(); + } + /* USER CODE BEGIN UART5_Init 2 */ + // HAL_UART_Receive_IT(&huart5, &hart1_uart5.rx_data, 1); + __HAL_UART_ENABLE_IT(&huart5, UART_IT_RXNE); // 接收中断 + __HAL_UART_ENABLE_IT(&huart5, UART_IT_IDLE); // 空闲中断 + /* USER CODE END UART5_Init 2 */ +} +/* USART2 init function */ + +void MX_USART2_UART_Init(void) +{ + + /* USER CODE BEGIN USART2_Init 0 */ + + /* USER CODE END USART2_Init 0 */ + + /* USER CODE BEGIN USART2_Init 1 */ + + /* USER CODE END USART2_Init 1 */ + huart2.Instance = USART2; + huart2.Init.BaudRate = 1200; + huart2.Init.WordLength = UART_WORDLENGTH_9B; + huart2.Init.StopBits = UART_STOPBITS_1; + huart2.Init.Parity = UART_PARITY_ODD; + huart2.Init.Mode = UART_MODE_TX_RX; + huart2.Init.HwFlowCtl = UART_HWCONTROL_NONE; + huart2.Init.OverSampling = UART_OVERSAMPLING_16; + if (HAL_UART_Init(&huart2) != HAL_OK) + { + Error_Handler(); + } + /* USER CODE BEGIN USART2_Init 2 */ + __HAL_UART_ENABLE_IT(&huart2, UART_IT_RXNE); // 接收中断 + __HAL_UART_ENABLE_IT(&huart2, UART_IT_IDLE); // 使能IDLE中断 + /* USER CODE END USART2_Init 2 */ +} +/* USART3 init function */ + +void MX_USART3_UART_Init(void) +{ + + /* USER CODE BEGIN USART3_Init 0 */ + + /* USER CODE END USART3_Init 0 */ + + /* USER CODE BEGIN USART3_Init 1 */ + + /* USER CODE END USART3_Init 1 */ + huart3.Instance = USART3; + huart3.Init.BaudRate = 115200; + huart3.Init.WordLength = UART_WORDLENGTH_8B; + huart3.Init.StopBits = UART_STOPBITS_1; + huart3.Init.Parity = UART_PARITY_NONE; + huart3.Init.Mode = UART_MODE_TX_RX; + huart3.Init.HwFlowCtl = UART_HWCONTROL_NONE; + huart3.Init.OverSampling = UART_OVERSAMPLING_16; + if (HAL_UART_Init(&huart3) != HAL_OK) + { + Error_Handler(); + } + /* USER CODE BEGIN USART3_Init 2 */ + + /* USER CODE END USART3_Init 2 */ +} +/* USART6 init function */ + +void MX_USART6_UART_Init(void) +{ + + /* USER CODE BEGIN USART6_Init 0 */ + + /* USER CODE END USART6_Init 0 */ + + /* USER CODE BEGIN USART6_Init 1 */ + + /* USER CODE END USART6_Init 1 */ + huart6.Instance = USART6; + huart6.Init.BaudRate = 115200; + huart6.Init.WordLength = UART_WORDLENGTH_8B; + huart6.Init.StopBits = UART_STOPBITS_1; + huart6.Init.Parity = UART_PARITY_NONE; + huart6.Init.Mode = UART_MODE_TX_RX; + huart6.Init.HwFlowCtl = UART_HWCONTROL_NONE; + huart6.Init.OverSampling = UART_OVERSAMPLING_16; + if (HAL_UART_Init(&huart6) != HAL_OK) + { + Error_Handler(); + } + /* USER CODE BEGIN USART6_Init 2 */ + + /* USER CODE END USART6_Init 2 */ +} + +void HAL_UART_MspInit(UART_HandleTypeDef *uartHandle) +{ + + GPIO_InitTypeDef GPIO_InitStruct = {0}; + if (uartHandle->Instance == UART4) + { + /* USER CODE BEGIN UART4_MspInit 0 */ + + /* USER CODE END UART4_MspInit 0 */ + /* UART4 clock enable */ + __HAL_RCC_UART4_CLK_ENABLE(); + + __HAL_RCC_GPIOC_CLK_ENABLE(); + /**UART4 GPIO Configuration + PC10 ------> UART4_TX + PC11 ------> UART4_RX + */ + GPIO_InitStruct.Pin = LCD_TX_Pin | LCD_RX_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF8_UART4; + HAL_GPIO_Init(GPIOC, &GPIO_InitStruct); + + /* UART4 DMA Init */ + /* UART4_RX Init */ + hdma_uart4_rx.Instance = DMA1_Stream2; + hdma_uart4_rx.Init.Channel = DMA_CHANNEL_4; + hdma_uart4_rx.Init.Direction = DMA_PERIPH_TO_MEMORY; + hdma_uart4_rx.Init.PeriphInc = DMA_PINC_DISABLE; + hdma_uart4_rx.Init.MemInc = DMA_MINC_ENABLE; + hdma_uart4_rx.Init.PeriphDataAlignment = DMA_PDATAALIGN_BYTE; + hdma_uart4_rx.Init.MemDataAlignment = DMA_MDATAALIGN_BYTE; + hdma_uart4_rx.Init.Mode = DMA_NORMAL; + hdma_uart4_rx.Init.Priority = DMA_PRIORITY_VERY_HIGH; + hdma_uart4_rx.Init.FIFOMode = DMA_FIFOMODE_DISABLE; + if (HAL_DMA_Init(&hdma_uart4_rx) != HAL_OK) + { + Error_Handler(); + } + + __HAL_LINKDMA(uartHandle, hdmarx, hdma_uart4_rx); + + /* UART4_TX Init */ + hdma_uart4_tx.Instance = DMA1_Stream4; + hdma_uart4_tx.Init.Channel = DMA_CHANNEL_4; + hdma_uart4_tx.Init.Direction = DMA_MEMORY_TO_PERIPH; + hdma_uart4_tx.Init.PeriphInc = DMA_PINC_DISABLE; + hdma_uart4_tx.Init.MemInc = DMA_MINC_ENABLE; + hdma_uart4_tx.Init.PeriphDataAlignment = DMA_PDATAALIGN_BYTE; + hdma_uart4_tx.Init.MemDataAlignment = DMA_MDATAALIGN_BYTE; + hdma_uart4_tx.Init.Mode = DMA_NORMAL; + hdma_uart4_tx.Init.Priority = DMA_PRIORITY_VERY_HIGH; + hdma_uart4_tx.Init.FIFOMode = DMA_FIFOMODE_DISABLE; + if (HAL_DMA_Init(&hdma_uart4_tx) != HAL_OK) + { + Error_Handler(); + } + + __HAL_LINKDMA(uartHandle, hdmatx, hdma_uart4_tx); + + /* UART4 interrupt Init */ + HAL_NVIC_SetPriority(UART4_IRQn, 0, 0); + HAL_NVIC_EnableIRQ(UART4_IRQn); + /* USER CODE BEGIN UART4_MspInit 1 */ + + /* USER CODE END UART4_MspInit 1 */ + } + else if (uartHandle->Instance == UART5) + { + /* USER CODE BEGIN UART5_MspInit 0 */ + + /* USER CODE END UART5_MspInit 0 */ + /* UART5 clock enable */ + __HAL_RCC_UART5_CLK_ENABLE(); + + __HAL_RCC_GPIOC_CLK_ENABLE(); + __HAL_RCC_GPIOD_CLK_ENABLE(); + /**UART5 GPIO Configuration + PC12 ------> UART5_TX + PD2 ------> UART5_RX + */ + GPIO_InitStruct.Pin = HART1_TX_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF8_UART5; + HAL_GPIO_Init(HART1_TX_GPIO_Port, &GPIO_InitStruct); + + GPIO_InitStruct.Pin = HART1_RX_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF8_UART5; + HAL_GPIO_Init(HART1_RX_GPIO_Port, &GPIO_InitStruct); + + /* UART5 interrupt Init */ + HAL_NVIC_SetPriority(UART5_IRQn, 0, 0); + HAL_NVIC_EnableIRQ(UART5_IRQn); + /* USER CODE BEGIN UART5_MspInit 1 */ + + /* USER CODE END UART5_MspInit 1 */ + } + else if (uartHandle->Instance == USART2) + { + /* USER CODE BEGIN USART2_MspInit 0 */ + + /* USER CODE END USART2_MspInit 0 */ + /* USART2 clock enable */ + __HAL_RCC_USART2_CLK_ENABLE(); + + __HAL_RCC_GPIOD_CLK_ENABLE(); + /**USART2 GPIO Configuration + PD5 ------> USART2_TX + PD6 ------> USART2_RX + */ + GPIO_InitStruct.Pin = HART2_TX_Pin | HART2_RX_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF7_USART2; + HAL_GPIO_Init(GPIOD, &GPIO_InitStruct); + + /* USART2 interrupt Init */ + HAL_NVIC_SetPriority(USART2_IRQn, 0, 0); + HAL_NVIC_EnableIRQ(USART2_IRQn); + /* USER CODE BEGIN USART2_MspInit 1 */ + + /* USER CODE END USART2_MspInit 1 */ + } + else if (uartHandle->Instance == USART3) + { + /* USER CODE BEGIN USART3_MspInit 0 */ + + /* USER CODE END USART3_MspInit 0 */ + /* USART3 clock enable */ + __HAL_RCC_USART3_CLK_ENABLE(); + + __HAL_RCC_GPIOD_CLK_ENABLE(); + /**USART3 GPIO Configuration + PD8 ------> USART3_TX + PD9 ------> USART3_RX + */ + GPIO_InitStruct.Pin = BLE2_TX_Pin | BLE2_RX_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF7_USART3; + HAL_GPIO_Init(GPIOD, &GPIO_InitStruct); + + /* USART3 DMA Init */ + /* USART3_RX Init */ + hdma_usart3_rx.Instance = DMA1_Stream1; + hdma_usart3_rx.Init.Channel = DMA_CHANNEL_4; + hdma_usart3_rx.Init.Direction = DMA_PERIPH_TO_MEMORY; + hdma_usart3_rx.Init.PeriphInc = DMA_PINC_DISABLE; + hdma_usart3_rx.Init.MemInc = DMA_MINC_ENABLE; + hdma_usart3_rx.Init.PeriphDataAlignment = DMA_PDATAALIGN_BYTE; + hdma_usart3_rx.Init.MemDataAlignment = DMA_MDATAALIGN_BYTE; + hdma_usart3_rx.Init.Mode = DMA_NORMAL; + hdma_usart3_rx.Init.Priority = DMA_PRIORITY_LOW; + hdma_usart3_rx.Init.FIFOMode = DMA_FIFOMODE_DISABLE; + if (HAL_DMA_Init(&hdma_usart3_rx) != HAL_OK) + { + Error_Handler(); + } + + __HAL_LINKDMA(uartHandle, hdmarx, hdma_usart3_rx); + + /* USART3_TX Init */ + hdma_usart3_tx.Instance = DMA1_Stream3; + hdma_usart3_tx.Init.Channel = DMA_CHANNEL_4; + hdma_usart3_tx.Init.Direction = DMA_MEMORY_TO_PERIPH; + hdma_usart3_tx.Init.PeriphInc = DMA_PINC_DISABLE; + hdma_usart3_tx.Init.MemInc = DMA_MINC_ENABLE; + hdma_usart3_tx.Init.PeriphDataAlignment = DMA_PDATAALIGN_BYTE; + hdma_usart3_tx.Init.MemDataAlignment = DMA_MDATAALIGN_BYTE; + hdma_usart3_tx.Init.Mode = DMA_NORMAL; + hdma_usart3_tx.Init.Priority = DMA_PRIORITY_LOW; + hdma_usart3_tx.Init.FIFOMode = DMA_FIFOMODE_DISABLE; + if (HAL_DMA_Init(&hdma_usart3_tx) != HAL_OK) + { + Error_Handler(); + } + + __HAL_LINKDMA(uartHandle, hdmatx, hdma_usart3_tx); + + /* USART3 interrupt Init */ + HAL_NVIC_SetPriority(USART3_IRQn, 0, 0); + HAL_NVIC_EnableIRQ(USART3_IRQn); + /* USER CODE BEGIN USART3_MspInit 1 */ + + /* USER CODE END USART3_MspInit 1 */ + } + else if (uartHandle->Instance == USART6) + { + /* USER CODE BEGIN USART6_MspInit 0 */ + + /* USER CODE END USART6_MspInit 0 */ + /* USART6 clock enable */ + __HAL_RCC_USART6_CLK_ENABLE(); + + __HAL_RCC_GPIOC_CLK_ENABLE(); + /**USART6 GPIO Configuration + PC6 ------> USART6_TX + PC7 ------> USART6_RX + */ + GPIO_InitStruct.Pin = BLE1_TX_Pin | BLE1_RX_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF8_USART6; + HAL_GPIO_Init(GPIOC, &GPIO_InitStruct); + + /* USART6 DMA Init */ + /* USART6_RX Init */ + hdma_usart6_rx.Instance = DMA2_Stream1; + hdma_usart6_rx.Init.Channel = DMA_CHANNEL_5; + hdma_usart6_rx.Init.Direction = DMA_PERIPH_TO_MEMORY; + hdma_usart6_rx.Init.PeriphInc = DMA_PINC_DISABLE; + hdma_usart6_rx.Init.MemInc = DMA_MINC_ENABLE; + hdma_usart6_rx.Init.PeriphDataAlignment = DMA_PDATAALIGN_BYTE; + hdma_usart6_rx.Init.MemDataAlignment = DMA_MDATAALIGN_BYTE; + hdma_usart6_rx.Init.Mode = DMA_NORMAL; + hdma_usart6_rx.Init.Priority = DMA_PRIORITY_LOW; + hdma_usart6_rx.Init.FIFOMode = DMA_FIFOMODE_DISABLE; + if (HAL_DMA_Init(&hdma_usart6_rx) != HAL_OK) + { + Error_Handler(); + } + + __HAL_LINKDMA(uartHandle, hdmarx, hdma_usart6_rx); + + /* USART6_TX Init */ + hdma_usart6_tx.Instance = DMA2_Stream6; + hdma_usart6_tx.Init.Channel = DMA_CHANNEL_5; + hdma_usart6_tx.Init.Direction = DMA_MEMORY_TO_PERIPH; + hdma_usart6_tx.Init.PeriphInc = DMA_PINC_DISABLE; + hdma_usart6_tx.Init.MemInc = DMA_MINC_ENABLE; + hdma_usart6_tx.Init.PeriphDataAlignment = DMA_PDATAALIGN_BYTE; + hdma_usart6_tx.Init.MemDataAlignment = DMA_MDATAALIGN_BYTE; + hdma_usart6_tx.Init.Mode = DMA_NORMAL; + hdma_usart6_tx.Init.Priority = DMA_PRIORITY_LOW; + hdma_usart6_tx.Init.FIFOMode = DMA_FIFOMODE_DISABLE; + if (HAL_DMA_Init(&hdma_usart6_tx) != HAL_OK) + { + Error_Handler(); + } + + __HAL_LINKDMA(uartHandle, hdmatx, hdma_usart6_tx); + + /* USART6 interrupt Init */ + HAL_NVIC_SetPriority(USART6_IRQn, 0, 0); + HAL_NVIC_EnableIRQ(USART6_IRQn); + /* USER CODE BEGIN USART6_MspInit 1 */ + + /* USER CODE END USART6_MspInit 1 */ + } +} + +void HAL_UART_MspDeInit(UART_HandleTypeDef *uartHandle) +{ + + if (uartHandle->Instance == UART4) + { + /* USER CODE BEGIN UART4_MspDeInit 0 */ + + /* USER CODE END UART4_MspDeInit 0 */ + /* Peripheral clock disable */ + __HAL_RCC_UART4_CLK_DISABLE(); + + /**UART4 GPIO Configuration + PC10 ------> UART4_TX + PC11 ------> UART4_RX + */ + HAL_GPIO_DeInit(GPIOC, LCD_TX_Pin | LCD_RX_Pin); + + /* UART4 DMA DeInit */ + HAL_DMA_DeInit(uartHandle->hdmarx); + HAL_DMA_DeInit(uartHandle->hdmatx); + + /* UART4 interrupt Deinit */ + HAL_NVIC_DisableIRQ(UART4_IRQn); + /* USER CODE BEGIN UART4_MspDeInit 1 */ + + /* USER CODE END UART4_MspDeInit 1 */ + } + else if (uartHandle->Instance == UART5) + { + /* USER CODE BEGIN UART5_MspDeInit 0 */ + + /* USER CODE END UART5_MspDeInit 0 */ + /* Peripheral clock disable */ + __HAL_RCC_UART5_CLK_DISABLE(); + + /**UART5 GPIO Configuration + PC12 ------> UART5_TX + PD2 ------> UART5_RX + */ + HAL_GPIO_DeInit(HART1_TX_GPIO_Port, HART1_TX_Pin); + + HAL_GPIO_DeInit(HART1_RX_GPIO_Port, HART1_RX_Pin); + + /* UART5 interrupt Deinit */ + HAL_NVIC_DisableIRQ(UART5_IRQn); + /* USER CODE BEGIN UART5_MspDeInit 1 */ + + /* USER CODE END UART5_MspDeInit 1 */ + } + else if (uartHandle->Instance == USART2) + { + /* USER CODE BEGIN USART2_MspDeInit 0 */ + + /* USER CODE END USART2_MspDeInit 0 */ + /* Peripheral clock disable */ + __HAL_RCC_USART2_CLK_DISABLE(); + + /**USART2 GPIO Configuration + PD5 ------> USART2_TX + PD6 ------> USART2_RX + */ + HAL_GPIO_DeInit(GPIOD, HART2_TX_Pin | HART2_RX_Pin); + + /* USART2 interrupt Deinit */ + HAL_NVIC_DisableIRQ(USART2_IRQn); + /* USER CODE BEGIN USART2_MspDeInit 1 */ + + /* USER CODE END USART2_MspDeInit 1 */ + } + else if (uartHandle->Instance == USART3) + { + /* USER CODE BEGIN USART3_MspDeInit 0 */ + + /* USER CODE END USART3_MspDeInit 0 */ + /* Peripheral clock disable */ + __HAL_RCC_USART3_CLK_DISABLE(); + + /**USART3 GPIO Configuration + PD8 ------> USART3_TX + PD9 ------> USART3_RX + */ + HAL_GPIO_DeInit(GPIOD, BLE2_TX_Pin | BLE2_RX_Pin); + + /* USART3 DMA DeInit */ + HAL_DMA_DeInit(uartHandle->hdmarx); + HAL_DMA_DeInit(uartHandle->hdmatx); + + /* USART3 interrupt Deinit */ + HAL_NVIC_DisableIRQ(USART3_IRQn); + /* USER CODE BEGIN USART3_MspDeInit 1 */ + + /* USER CODE END USART3_MspDeInit 1 */ + } + else if (uartHandle->Instance == USART6) + { + /* USER CODE BEGIN USART6_MspDeInit 0 */ + + /* USER CODE END USART6_MspDeInit 0 */ + /* Peripheral clock disable */ + __HAL_RCC_USART6_CLK_DISABLE(); + + /**USART6 GPIO Configuration + PC6 ------> USART6_TX + PC7 ------> USART6_RX + */ + HAL_GPIO_DeInit(GPIOC, BLE1_TX_Pin | BLE1_RX_Pin); + + /* USART6 DMA DeInit */ + HAL_DMA_DeInit(uartHandle->hdmarx); + HAL_DMA_DeInit(uartHandle->hdmatx); + + /* USART6 interrupt Deinit */ + HAL_NVIC_DisableIRQ(USART6_IRQn); + /* USER CODE BEGIN USART6_MspDeInit 1 */ + + /* USER CODE END USART6_MspDeInit 1 */ + } +} + +/* USER CODE BEGIN 1 */ +/* +********************************************************************************************************* +* 函 数 名: dma_usart_send +* 功能说明: 串口发送功能函数 +* 形 参: buf,len +* 返 回 值: 无 +********************************************************************************************************* +*/ +void dma_usart_send(UART_HandleTypeDef *huart, uint8_t *buf, uint8_t len) // 串口发送封装 +{ + if (HAL_UART_Transmit_DMA(huart, buf, len) != HAL_OK) // 判断是否发送正常,如果出现异常则进入异常中断函数 + { + Error_Handler(); + } +} + +/* USER CODE END 1 */ diff --git a/Documents/datasheet/AD7124-8_cn.pdf b/Documents/datasheet/AD7124-8_cn.pdf new file mode 100644 index 0000000..19afce4 Binary files /dev/null and b/Documents/datasheet/AD7124-8_cn.pdf differ diff --git a/Documents/datasheet/ad7124-8.pdf b/Documents/datasheet/ad7124-8.pdf new file mode 100644 index 0000000..210dac1 Binary files /dev/null and b/Documents/datasheet/ad7124-8.pdf differ diff --git a/Documents/datasheet/dac161s997.pdf b/Documents/datasheet/dac161s997.pdf new file mode 100644 index 0000000..33a78fc Binary files /dev/null and b/Documents/datasheet/dac161s997.pdf differ diff --git a/Documents/datasheet/ht1200m.pdf b/Documents/datasheet/ht1200m.pdf new file mode 100644 index 0000000..ec24fc3 Binary files /dev/null and b/Documents/datasheet/ht1200m.pdf differ diff --git a/Documents/schematic diagram/AgingTestBox_V1.0-电气原理图-20241213.pdf b/Documents/schematic diagram/AgingTestBox_V1.0-电气原理图-20241213.pdf new file mode 100644 index 0000000..661604a Binary files /dev/null and b/Documents/schematic diagram/AgingTestBox_V1.0-电气原理图-20241213.pdf differ diff --git a/Drivers/BSP/Components/lan8742/lan8742.c b/Drivers/BSP/Components/lan8742/lan8742.c new file mode 100644 index 0000000..18a1309 --- /dev/null +++ b/Drivers/BSP/Components/lan8742/lan8742.c @@ -0,0 +1,613 @@ +/** + ****************************************************************************** + * @file lan8742.c + * @author MCD Application Team + * @brief This file provides a set of functions needed to manage the LAN742 + * PHY devices. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "lan8742.h" + +/** @addtogroup BSP + * @{ + */ + +/** @addtogroup Component + * @{ + */ + +/** @defgroup LAN8742 LAN8742 + * @{ + */ + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/** @defgroup LAN8742_Private_Defines LAN8742 Private Defines + * @{ + */ +#define LAN8742_MAX_DEV_ADDR ((uint32_t)31U) +/** + * @} + */ + +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ +/** @defgroup LAN8742_Private_Functions LAN8742 Private Functions + * @{ + */ + +/** + * @brief Register IO functions to component object + * @param pObj: device object of LAN8742_Object_t. + * @param ioctx: holds device IO functions. + * @retval LAN8742_STATUS_OK if OK + * LAN8742_STATUS_ERROR if missing mandatory function + */ +int32_t LAN8742_RegisterBusIO(lan8742_Object_t *pObj, lan8742_IOCtx_t *ioctx) +{ + if(!pObj || !ioctx->ReadReg || !ioctx->WriteReg || !ioctx->GetTick) + { + return LAN8742_STATUS_ERROR; + } + + pObj->IO.Init = ioctx->Init; + pObj->IO.DeInit = ioctx->DeInit; + pObj->IO.ReadReg = ioctx->ReadReg; + pObj->IO.WriteReg = ioctx->WriteReg; + pObj->IO.GetTick = ioctx->GetTick; + + return LAN8742_STATUS_OK; +} + +/** + * @brief Initialize the lan8742 and configure the needed hardware resources + * @param pObj: device object LAN8742_Object_t. + * @retval LAN8742_STATUS_OK if OK + * LAN8742_STATUS_ADDRESS_ERROR if cannot find device address + * LAN8742_STATUS_READ_ERROR if cannot read register + */ + int32_t LAN8742_Init(lan8742_Object_t *pObj) + { + uint32_t regvalue = 0, addr = 0; + int32_t status = LAN8742_STATUS_OK; + + if(pObj->Is_Initialized == 0) + { + if(pObj->IO.Init != 0) + { + /* GPIO and Clocks initialization */ + pObj->IO.Init(); + } + + /* for later check */ + pObj->DevAddr = LAN8742_MAX_DEV_ADDR + 1; + + /* Get the device address from special mode register */ + for(addr = 0; addr <= LAN8742_MAX_DEV_ADDR; addr ++) + { + if(pObj->IO.ReadReg(addr, LAN8742_SMR, ®value) < 0) + { + status = LAN8742_STATUS_READ_ERROR; + /* Can't read from this device address + continue with next address */ + continue; + } + + if((regvalue & LAN8742_SMR_PHY_ADDR) == addr) + { + pObj->DevAddr = addr; + status = LAN8742_STATUS_OK; + break; + } + } + + if(pObj->DevAddr > LAN8742_MAX_DEV_ADDR) + { + status = LAN8742_STATUS_ADDRESS_ERROR; + } + + /* if device address is matched */ + if(status == LAN8742_STATUS_OK) + { + pObj->Is_Initialized = 1; + } + } + + return status; + } + +/** + * @brief De-Initialize the lan8742 and it's hardware resources + * @param pObj: device object LAN8742_Object_t. + * @retval None + */ +int32_t LAN8742_DeInit(lan8742_Object_t *pObj) +{ + if(pObj->Is_Initialized) + { + if(pObj->IO.DeInit != 0) + { + if(pObj->IO.DeInit() < 0) + { + return LAN8742_STATUS_ERROR; + } + } + + pObj->Is_Initialized = 0; + } + + return LAN8742_STATUS_OK; +} + +/** + * @brief Disable the LAN8742 power down mode. + * @param pObj: device object LAN8742_Object_t. + * @retval LAN8742_STATUS_OK if OK + * LAN8742_STATUS_READ_ERROR if cannot read register + * LAN8742_STATUS_WRITE_ERROR if cannot write to register + */ +int32_t LAN8742_DisablePowerDownMode(lan8742_Object_t *pObj) +{ + uint32_t readval = 0; + int32_t status = LAN8742_STATUS_OK; + + if(pObj->IO.ReadReg(pObj->DevAddr, LAN8742_BCR, &readval) >= 0) + { + readval &= ~LAN8742_BCR_POWER_DOWN; + + /* Apply configuration */ + if(pObj->IO.WriteReg(pObj->DevAddr, LAN8742_BCR, readval) < 0) + { + status = LAN8742_STATUS_WRITE_ERROR; + } + } + else + { + status = LAN8742_STATUS_READ_ERROR; + } + + return status; +} + +/** + * @brief Enable the LAN8742 power down mode. + * @param pObj: device object LAN8742_Object_t. + * @retval LAN8742_STATUS_OK if OK + * LAN8742_STATUS_READ_ERROR if cannot read register + * LAN8742_STATUS_WRITE_ERROR if cannot write to register + */ +int32_t LAN8742_EnablePowerDownMode(lan8742_Object_t *pObj) +{ + uint32_t readval = 0; + int32_t status = LAN8742_STATUS_OK; + + if(pObj->IO.ReadReg(pObj->DevAddr, LAN8742_BCR, &readval) >= 0) + { + readval |= LAN8742_BCR_POWER_DOWN; + + /* Apply configuration */ + if(pObj->IO.WriteReg(pObj->DevAddr, LAN8742_BCR, readval) < 0) + { + status = LAN8742_STATUS_WRITE_ERROR; + } + } + else + { + status = LAN8742_STATUS_READ_ERROR; + } + + return status; +} + +/** + * @brief Start the auto negotiation process. + * @param pObj: device object LAN8742_Object_t. + * @retval LAN8742_STATUS_OK if OK + * LAN8742_STATUS_READ_ERROR if cannot read register + * LAN8742_STATUS_WRITE_ERROR if cannot write to register + */ +int32_t LAN8742_StartAutoNego(lan8742_Object_t *pObj) +{ + uint32_t readval = 0; + int32_t status = LAN8742_STATUS_OK; + + if(pObj->IO.ReadReg(pObj->DevAddr, LAN8742_BCR, &readval) >= 0) + { + readval |= LAN8742_BCR_AUTONEGO_EN; + + /* Apply configuration */ + if(pObj->IO.WriteReg(pObj->DevAddr, LAN8742_BCR, readval) < 0) + { + status = LAN8742_STATUS_WRITE_ERROR; + } + } + else + { + status = LAN8742_STATUS_READ_ERROR; + } + + return status; +} + +/** + * @brief Get the link state of LAN8742 device. + * @param pObj: Pointer to device object. + * @param pLinkState: Pointer to link state + * @retval LAN8742_STATUS_LINK_DOWN if link is down + * LAN8742_STATUS_AUTONEGO_NOTDONE if Auto nego not completed + * LAN8742_STATUS_100MBITS_FULLDUPLEX if 100Mb/s FD + * LAN8742_STATUS_100MBITS_HALFDUPLEX if 100Mb/s HD + * LAN8742_STATUS_10MBITS_FULLDUPLEX if 10Mb/s FD + * LAN8742_STATUS_10MBITS_HALFDUPLEX if 10Mb/s HD + * LAN8742_STATUS_READ_ERROR if cannot read register + * LAN8742_STATUS_WRITE_ERROR if cannot write to register + */ +int32_t LAN8742_GetLinkState(lan8742_Object_t *pObj) +{ + uint32_t readval = 0; + + /* Read Status register */ + if(pObj->IO.ReadReg(pObj->DevAddr, LAN8742_BSR, &readval) < 0) + { + return LAN8742_STATUS_READ_ERROR; + } + + /* Read Status register again */ + if(pObj->IO.ReadReg(pObj->DevAddr, LAN8742_BSR, &readval) < 0) + { + return LAN8742_STATUS_READ_ERROR; + } + + if((readval & LAN8742_BSR_LINK_STATUS) == 0) + { + /* Return Link Down status */ + return LAN8742_STATUS_LINK_DOWN; + } + + /* Check Auto negotiation */ + if(pObj->IO.ReadReg(pObj->DevAddr, LAN8742_BCR, &readval) < 0) + { + return LAN8742_STATUS_READ_ERROR; + } + + if((readval & LAN8742_BCR_AUTONEGO_EN) != LAN8742_BCR_AUTONEGO_EN) + { + if(((readval & LAN8742_BCR_SPEED_SELECT) == LAN8742_BCR_SPEED_SELECT) && ((readval & LAN8742_BCR_DUPLEX_MODE) == LAN8742_BCR_DUPLEX_MODE)) + { + return LAN8742_STATUS_100MBITS_FULLDUPLEX; + } + else if ((readval & LAN8742_BCR_SPEED_SELECT) == LAN8742_BCR_SPEED_SELECT) + { + return LAN8742_STATUS_100MBITS_HALFDUPLEX; + } + else if ((readval & LAN8742_BCR_DUPLEX_MODE) == LAN8742_BCR_DUPLEX_MODE) + { + return LAN8742_STATUS_10MBITS_FULLDUPLEX; + } + else + { + return LAN8742_STATUS_10MBITS_HALFDUPLEX; + } + } + else /* Auto Nego enabled */ + { + if(pObj->IO.ReadReg(pObj->DevAddr, LAN8742_PHYSCSR, &readval) < 0) + { + return LAN8742_STATUS_READ_ERROR; + } + + /* Check if auto nego not done */ + if((readval & LAN8742_PHYSCSR_AUTONEGO_DONE) == 0) + { + return LAN8742_STATUS_AUTONEGO_NOTDONE; + } + + if((readval & LAN8742_PHYSCSR_HCDSPEEDMASK) == LAN8742_PHYSCSR_100BTX_FD) + { + return LAN8742_STATUS_100MBITS_FULLDUPLEX; + } + else if ((readval & LAN8742_PHYSCSR_HCDSPEEDMASK) == LAN8742_PHYSCSR_100BTX_HD) + { + return LAN8742_STATUS_100MBITS_HALFDUPLEX; + } + else if ((readval & LAN8742_PHYSCSR_HCDSPEEDMASK) == LAN8742_PHYSCSR_10BT_FD) + { + return LAN8742_STATUS_10MBITS_FULLDUPLEX; + } + else + { + return LAN8742_STATUS_10MBITS_HALFDUPLEX; + } + } +} + +/** + * @brief Set the link state of LAN8742 device. + * @param pObj: Pointer to device object. + * @param pLinkState: link state can be one of the following + * LAN8742_STATUS_100MBITS_FULLDUPLEX if 100Mb/s FD + * LAN8742_STATUS_100MBITS_HALFDUPLEX if 100Mb/s HD + * LAN8742_STATUS_10MBITS_FULLDUPLEX if 10Mb/s FD + * LAN8742_STATUS_10MBITS_HALFDUPLEX if 10Mb/s HD + * @retval LAN8742_STATUS_OK if OK + * LAN8742_STATUS_ERROR if parameter error + * LAN8742_STATUS_READ_ERROR if cannot read register + * LAN8742_STATUS_WRITE_ERROR if cannot write to register + */ +int32_t LAN8742_SetLinkState(lan8742_Object_t *pObj, uint32_t LinkState) +{ + uint32_t bcrvalue = 0; + int32_t status = LAN8742_STATUS_OK; + + if(pObj->IO.ReadReg(pObj->DevAddr, LAN8742_BCR, &bcrvalue) >= 0) + { + /* Disable link config (Auto nego, speed and duplex) */ + bcrvalue &= ~(LAN8742_BCR_AUTONEGO_EN | LAN8742_BCR_SPEED_SELECT | LAN8742_BCR_DUPLEX_MODE); + + if(LinkState == LAN8742_STATUS_100MBITS_FULLDUPLEX) + { + bcrvalue |= (LAN8742_BCR_SPEED_SELECT | LAN8742_BCR_DUPLEX_MODE); + } + else if (LinkState == LAN8742_STATUS_100MBITS_HALFDUPLEX) + { + bcrvalue |= LAN8742_BCR_SPEED_SELECT; + } + else if (LinkState == LAN8742_STATUS_10MBITS_FULLDUPLEX) + { + bcrvalue |= LAN8742_BCR_DUPLEX_MODE; + } + else + { + /* Wrong link status parameter */ + status = LAN8742_STATUS_ERROR; + } + } + else + { + status = LAN8742_STATUS_READ_ERROR; + } + + if(status == LAN8742_STATUS_OK) + { + /* Apply configuration */ + if(pObj->IO.WriteReg(pObj->DevAddr, LAN8742_BCR, bcrvalue) < 0) + { + status = LAN8742_STATUS_WRITE_ERROR; + } + } + + return status; +} + +/** + * @brief Enable loopback mode. + * @param pObj: Pointer to device object. + * @retval LAN8742_STATUS_OK if OK + * LAN8742_STATUS_READ_ERROR if cannot read register + * LAN8742_STATUS_WRITE_ERROR if cannot write to register + */ +int32_t LAN8742_EnableLoopbackMode(lan8742_Object_t *pObj) +{ + uint32_t readval = 0; + int32_t status = LAN8742_STATUS_OK; + + if(pObj->IO.ReadReg(pObj->DevAddr, LAN8742_BCR, &readval) >= 0) + { + readval |= LAN8742_BCR_LOOPBACK; + + /* Apply configuration */ + if(pObj->IO.WriteReg(pObj->DevAddr, LAN8742_BCR, readval) < 0) + { + status = LAN8742_STATUS_WRITE_ERROR; + } + } + else + { + status = LAN8742_STATUS_READ_ERROR; + } + + return status; +} + +/** + * @brief Disable loopback mode. + * @param pObj: Pointer to device object. + * @retval LAN8742_STATUS_OK if OK + * LAN8742_STATUS_READ_ERROR if cannot read register + * LAN8742_STATUS_WRITE_ERROR if cannot write to register + */ +int32_t LAN8742_DisableLoopbackMode(lan8742_Object_t *pObj) +{ + uint32_t readval = 0; + int32_t status = LAN8742_STATUS_OK; + + if(pObj->IO.ReadReg(pObj->DevAddr, LAN8742_BCR, &readval) >= 0) + { + readval &= ~LAN8742_BCR_LOOPBACK; + + /* Apply configuration */ + if(pObj->IO.WriteReg(pObj->DevAddr, LAN8742_BCR, readval) < 0) + { + status = LAN8742_STATUS_WRITE_ERROR; + } + } + else + { + status = LAN8742_STATUS_READ_ERROR; + } + + return status; +} + +/** + * @brief Enable IT source. + * @param pObj: Pointer to device object. + * @param Interrupt: IT source to be enabled + * should be a value or a combination of the following: + * LAN8742_WOL_IT + * LAN8742_ENERGYON_IT + * LAN8742_AUTONEGO_COMPLETE_IT + * LAN8742_REMOTE_FAULT_IT + * LAN8742_LINK_DOWN_IT + * LAN8742_AUTONEGO_LP_ACK_IT + * LAN8742_PARALLEL_DETECTION_FAULT_IT + * LAN8742_AUTONEGO_PAGE_RECEIVED_IT + * @retval LAN8742_STATUS_OK if OK + * LAN8742_STATUS_READ_ERROR if cannot read register + * LAN8742_STATUS_WRITE_ERROR if cannot write to register + */ +int32_t LAN8742_EnableIT(lan8742_Object_t *pObj, uint32_t Interrupt) +{ + uint32_t readval = 0; + int32_t status = LAN8742_STATUS_OK; + + if(pObj->IO.ReadReg(pObj->DevAddr, LAN8742_IMR, &readval) >= 0) + { + readval |= Interrupt; + + /* Apply configuration */ + if(pObj->IO.WriteReg(pObj->DevAddr, LAN8742_IMR, readval) < 0) + { + status = LAN8742_STATUS_WRITE_ERROR; + } + } + else + { + status = LAN8742_STATUS_READ_ERROR; + } + + return status; +} + +/** + * @brief Disable IT source. + * @param pObj: Pointer to device object. + * @param Interrupt: IT source to be disabled + * should be a value or a combination of the following: + * LAN8742_WOL_IT + * LAN8742_ENERGYON_IT + * LAN8742_AUTONEGO_COMPLETE_IT + * LAN8742_REMOTE_FAULT_IT + * LAN8742_LINK_DOWN_IT + * LAN8742_AUTONEGO_LP_ACK_IT + * LAN8742_PARALLEL_DETECTION_FAULT_IT + * LAN8742_AUTONEGO_PAGE_RECEIVED_IT + * @retval LAN8742_STATUS_OK if OK + * LAN8742_STATUS_READ_ERROR if cannot read register + * LAN8742_STATUS_WRITE_ERROR if cannot write to register + */ +int32_t LAN8742_DisableIT(lan8742_Object_t *pObj, uint32_t Interrupt) +{ + uint32_t readval = 0; + int32_t status = LAN8742_STATUS_OK; + + if(pObj->IO.ReadReg(pObj->DevAddr, LAN8742_IMR, &readval) >= 0) + { + readval &= ~Interrupt; + + /* Apply configuration */ + if(pObj->IO.WriteReg(pObj->DevAddr, LAN8742_IMR, readval) < 0) + { + status = LAN8742_STATUS_WRITE_ERROR; + } + } + else + { + status = LAN8742_STATUS_READ_ERROR; + } + + return status; +} + +/** + * @brief Clear IT flag. + * @param pObj: Pointer to device object. + * @param Interrupt: IT flag to be cleared + * should be a value or a combination of the following: + * LAN8742_WOL_IT + * LAN8742_ENERGYON_IT + * LAN8742_AUTONEGO_COMPLETE_IT + * LAN8742_REMOTE_FAULT_IT + * LAN8742_LINK_DOWN_IT + * LAN8742_AUTONEGO_LP_ACK_IT + * LAN8742_PARALLEL_DETECTION_FAULT_IT + * LAN8742_AUTONEGO_PAGE_RECEIVED_IT + * @retval LAN8742_STATUS_OK if OK + * LAN8742_STATUS_READ_ERROR if cannot read register + */ +int32_t LAN8742_ClearIT(lan8742_Object_t *pObj, uint32_t Interrupt) +{ + uint32_t readval = 0; + int32_t status = LAN8742_STATUS_OK; + + if(pObj->IO.ReadReg(pObj->DevAddr, LAN8742_ISFR, &readval) < 0) + { + status = LAN8742_STATUS_READ_ERROR; + } + + return status; +} + +/** + * @brief Get IT Flag status. + * @param pObj: Pointer to device object. + * @param Interrupt: IT Flag to be checked, + * should be a value or a combination of the following: + * LAN8742_WOL_IT + * LAN8742_ENERGYON_IT + * LAN8742_AUTONEGO_COMPLETE_IT + * LAN8742_REMOTE_FAULT_IT + * LAN8742_LINK_DOWN_IT + * LAN8742_AUTONEGO_LP_ACK_IT + * LAN8742_PARALLEL_DETECTION_FAULT_IT + * LAN8742_AUTONEGO_PAGE_RECEIVED_IT + * @retval 1 IT flag is SET + * 0 IT flag is RESET + * LAN8742_STATUS_READ_ERROR if cannot read register + */ +int32_t LAN8742_GetITStatus(lan8742_Object_t *pObj, uint32_t Interrupt) +{ + uint32_t readval = 0; + int32_t status = 0; + + if(pObj->IO.ReadReg(pObj->DevAddr, LAN8742_ISFR, &readval) >= 0) + { + status = ((readval & Interrupt) == Interrupt); + } + else + { + status = LAN8742_STATUS_READ_ERROR; + } + + return status; +} + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ diff --git a/Drivers/BSP/Components/lan8742/lan8742.h b/Drivers/BSP/Components/lan8742/lan8742.h new file mode 100644 index 0000000..dc3da16 --- /dev/null +++ b/Drivers/BSP/Components/lan8742/lan8742.h @@ -0,0 +1,446 @@ +/** + ****************************************************************************** + * @file lan8742.h + * @author MCD Application Team + * @brief This file contains all the functions prototypes for the + * lan8742.c PHY driver. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef LAN8742_H +#define LAN8742_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include + +/** @addtogroup BSP + * @{ + */ + +/** @addtogroup Component + * @{ + */ + +/** @defgroup LAN8742 + * @{ + */ +/* Exported constants --------------------------------------------------------*/ +/** @defgroup LAN8742_Exported_Constants LAN8742 Exported Constants + * @{ + */ + +/** @defgroup LAN8742_Registers_Mapping LAN8742 Registers Mapping + * @{ + */ +#define LAN8742_BCR ((uint16_t)0x0000U) +#define LAN8742_BSR ((uint16_t)0x0001U) +#define LAN8742_PHYI1R ((uint16_t)0x0002U) +#define LAN8742_PHYI2R ((uint16_t)0x0003U) +#define LAN8742_ANAR ((uint16_t)0x0004U) +#define LAN8742_ANLPAR ((uint16_t)0x0005U) +#define LAN8742_ANER ((uint16_t)0x0006U) +#define LAN8742_ANNPTR ((uint16_t)0x0007U) +#define LAN8742_ANNPRR ((uint16_t)0x0008U) +#define LAN8742_MMDACR ((uint16_t)0x000DU) +#define LAN8742_MMDAADR ((uint16_t)0x000EU) +#define LAN8742_ENCTR ((uint16_t)0x0010U) +#define LAN8742_MCSR ((uint16_t)0x0011U) +#define LAN8742_SMR ((uint16_t)0x0012U) +#define LAN8742_TPDCR ((uint16_t)0x0018U) +#define LAN8742_TCSR ((uint16_t)0x0019U) +#define LAN8742_SECR ((uint16_t)0x001AU) +#define LAN8742_SCSIR ((uint16_t)0x001BU) +#define LAN8742_CLR ((uint16_t)0x001CU) +#define LAN8742_ISFR ((uint16_t)0x001DU) +#define LAN8742_IMR ((uint16_t)0x001EU) +#define LAN8742_PHYSCSR ((uint16_t)0x001FU) +/** + * @} + */ + +/** @defgroup LAN8742_BCR_Bit_Definition LAN8742 BCR Bit Definition + * @{ + */ +#define LAN8742_BCR_SOFT_RESET ((uint16_t)0x8000U) +#define LAN8742_BCR_LOOPBACK ((uint16_t)0x4000U) +#define LAN8742_BCR_SPEED_SELECT ((uint16_t)0x2000U) +#define LAN8742_BCR_AUTONEGO_EN ((uint16_t)0x1000U) +#define LAN8742_BCR_POWER_DOWN ((uint16_t)0x0800U) +#define LAN8742_BCR_ISOLATE ((uint16_t)0x0400U) +#define LAN8742_BCR_RESTART_AUTONEGO ((uint16_t)0x0200U) +#define LAN8742_BCR_DUPLEX_MODE ((uint16_t)0x0100U) +/** + * @} + */ + +/** @defgroup LAN8742_BSR_Bit_Definition LAN8742 BSR Bit Definition + * @{ + */ +#define LAN8742_BSR_100BASE_T4 ((uint16_t)0x8000U) +#define LAN8742_BSR_100BASE_TX_FD ((uint16_t)0x4000U) +#define LAN8742_BSR_100BASE_TX_HD ((uint16_t)0x2000U) +#define LAN8742_BSR_10BASE_T_FD ((uint16_t)0x1000U) +#define LAN8742_BSR_10BASE_T_HD ((uint16_t)0x0800U) +#define LAN8742_BSR_100BASE_T2_FD ((uint16_t)0x0400U) +#define LAN8742_BSR_100BASE_T2_HD ((uint16_t)0x0200U) +#define LAN8742_BSR_EXTENDED_STATUS ((uint16_t)0x0100U) +#define LAN8742_BSR_AUTONEGO_CPLT ((uint16_t)0x0020U) +#define LAN8742_BSR_REMOTE_FAULT ((uint16_t)0x0010U) +#define LAN8742_BSR_AUTONEGO_ABILITY ((uint16_t)0x0008U) +#define LAN8742_BSR_LINK_STATUS ((uint16_t)0x0004U) +#define LAN8742_BSR_JABBER_DETECT ((uint16_t)0x0002U) +#define LAN8742_BSR_EXTENDED_CAP ((uint16_t)0x0001U) +/** + * @} + */ + +/** @defgroup LAN8742_PHYI1R_Bit_Definition LAN8742 PHYI1R Bit Definition + * @{ + */ +#define LAN8742_PHYI1R_OUI_3_18 ((uint16_t)0xFFFFU) +/** + * @} + */ + +/** @defgroup LAN8742_PHYI2R_Bit_Definition LAN8742 PHYI2R Bit Definition + * @{ + */ +#define LAN8742_PHYI2R_OUI_19_24 ((uint16_t)0xFC00U) +#define LAN8742_PHYI2R_MODEL_NBR ((uint16_t)0x03F0U) +#define LAN8742_PHYI2R_REVISION_NBR ((uint16_t)0x000FU) +/** + * @} + */ + +/** @defgroup LAN8742_ANAR_Bit_Definition LAN8742 ANAR Bit Definition + * @{ + */ +#define LAN8742_ANAR_NEXT_PAGE ((uint16_t)0x8000U) +#define LAN8742_ANAR_REMOTE_FAULT ((uint16_t)0x2000U) +#define LAN8742_ANAR_PAUSE_OPERATION ((uint16_t)0x0C00U) +#define LAN8742_ANAR_PO_NOPAUSE ((uint16_t)0x0000U) +#define LAN8742_ANAR_PO_SYMMETRIC_PAUSE ((uint16_t)0x0400U) +#define LAN8742_ANAR_PO_ASYMMETRIC_PAUSE ((uint16_t)0x0800U) +#define LAN8742_ANAR_PO_ADVERTISE_SUPPORT ((uint16_t)0x0C00U) +#define LAN8742_ANAR_100BASE_TX_FD ((uint16_t)0x0100U) +#define LAN8742_ANAR_100BASE_TX ((uint16_t)0x0080U) +#define LAN8742_ANAR_10BASE_T_FD ((uint16_t)0x0040U) +#define LAN8742_ANAR_10BASE_T ((uint16_t)0x0020U) +#define LAN8742_ANAR_SELECTOR_FIELD ((uint16_t)0x000FU) +/** + * @} + */ + +/** @defgroup LAN8742_ANLPAR_Bit_Definition LAN8742 ANLPAR Bit Definition + * @{ + */ +#define LAN8742_ANLPAR_NEXT_PAGE ((uint16_t)0x8000U) +#define LAN8742_ANLPAR_REMOTE_FAULT ((uint16_t)0x2000U) +#define LAN8742_ANLPAR_PAUSE_OPERATION ((uint16_t)0x0C00U) +#define LAN8742_ANLPAR_PO_NOPAUSE ((uint16_t)0x0000U) +#define LAN8742_ANLPAR_PO_SYMMETRIC_PAUSE ((uint16_t)0x0400U) +#define LAN8742_ANLPAR_PO_ASYMMETRIC_PAUSE ((uint16_t)0x0800U) +#define LAN8742_ANLPAR_PO_ADVERTISE_SUPPORT ((uint16_t)0x0C00U) +#define LAN8742_ANLPAR_100BASE_TX_FD ((uint16_t)0x0100U) +#define LAN8742_ANLPAR_100BASE_TX ((uint16_t)0x0080U) +#define LAN8742_ANLPAR_10BASE_T_FD ((uint16_t)0x0040U) +#define LAN8742_ANLPAR_10BASE_T ((uint16_t)0x0020U) +#define LAN8742_ANLPAR_SELECTOR_FIELD ((uint16_t)0x000FU) +/** + * @} + */ + +/** @defgroup LAN8742_ANER_Bit_Definition LAN8742 ANER Bit Definition + * @{ + */ +#define LAN8742_ANER_RX_NP_LOCATION_ABLE ((uint16_t)0x0040U) +#define LAN8742_ANER_RX_NP_STORAGE_LOCATION ((uint16_t)0x0020U) +#define LAN8742_ANER_PARALLEL_DETECT_FAULT ((uint16_t)0x0010U) +#define LAN8742_ANER_LP_NP_ABLE ((uint16_t)0x0008U) +#define LAN8742_ANER_NP_ABLE ((uint16_t)0x0004U) +#define LAN8742_ANER_PAGE_RECEIVED ((uint16_t)0x0002U) +#define LAN8742_ANER_LP_AUTONEG_ABLE ((uint16_t)0x0001U) +/** + * @} + */ + +/** @defgroup LAN8742_ANNPTR_Bit_Definition LAN8742 ANNPTR Bit Definition + * @{ + */ +#define LAN8742_ANNPTR_NEXT_PAGE ((uint16_t)0x8000U) +#define LAN8742_ANNPTR_MESSAGE_PAGE ((uint16_t)0x2000U) +#define LAN8742_ANNPTR_ACK2 ((uint16_t)0x1000U) +#define LAN8742_ANNPTR_TOGGLE ((uint16_t)0x0800U) +#define LAN8742_ANNPTR_MESSAGGE_CODE ((uint16_t)0x07FFU) +/** + * @} + */ + +/** @defgroup LAN8742_ANNPRR_Bit_Definition LAN8742 ANNPRR Bit Definition + * @{ + */ +#define LAN8742_ANNPTR_NEXT_PAGE ((uint16_t)0x8000U) +#define LAN8742_ANNPRR_ACK ((uint16_t)0x4000U) +#define LAN8742_ANNPRR_MESSAGE_PAGE ((uint16_t)0x2000U) +#define LAN8742_ANNPRR_ACK2 ((uint16_t)0x1000U) +#define LAN8742_ANNPRR_TOGGLE ((uint16_t)0x0800U) +#define LAN8742_ANNPRR_MESSAGGE_CODE ((uint16_t)0x07FFU) +/** + * @} + */ + +/** @defgroup LAN8742_MMDACR_Bit_Definition LAN8742 MMDACR Bit Definition + * @{ + */ +#define LAN8742_MMDACR_MMD_FUNCTION ((uint16_t)0xC000U) +#define LAN8742_MMDACR_MMD_FUNCTION_ADDR ((uint16_t)0x0000U) +#define LAN8742_MMDACR_MMD_FUNCTION_DATA ((uint16_t)0x4000U) +#define LAN8742_MMDACR_MMD_DEV_ADDR ((uint16_t)0x001FU) +/** + * @} + */ + +/** @defgroup LAN8742_ENCTR_Bit_Definition LAN8742 ENCTR Bit Definition + * @{ + */ +#define LAN8742_ENCTR_TX_ENABLE ((uint16_t)0x8000U) +#define LAN8742_ENCTR_TX_TIMER ((uint16_t)0x6000U) +#define LAN8742_ENCTR_TX_TIMER_1S ((uint16_t)0x0000U) +#define LAN8742_ENCTR_TX_TIMER_768MS ((uint16_t)0x2000U) +#define LAN8742_ENCTR_TX_TIMER_512MS ((uint16_t)0x4000U) +#define LAN8742_ENCTR_TX_TIMER_265MS ((uint16_t)0x6000U) +#define LAN8742_ENCTR_RX_ENABLE ((uint16_t)0x1000U) +#define LAN8742_ENCTR_RX_MAX_INTERVAL ((uint16_t)0x0C00U) +#define LAN8742_ENCTR_RX_MAX_INTERVAL_64MS ((uint16_t)0x0000U) +#define LAN8742_ENCTR_RX_MAX_INTERVAL_256MS ((uint16_t)0x0400U) +#define LAN8742_ENCTR_RX_MAX_INTERVAL_512MS ((uint16_t)0x0800U) +#define LAN8742_ENCTR_RX_MAX_INTERVAL_1S ((uint16_t)0x0C00U) +#define LAN8742_ENCTR_EX_CROSS_OVER ((uint16_t)0x0002U) +#define LAN8742_ENCTR_EX_MANUAL_CROSS_OVER ((uint16_t)0x0001U) +/** + * @} + */ + +/** @defgroup LAN8742_MCSR_Bit_Definition LAN8742 MCSR Bit Definition + * @{ + */ +#define LAN8742_MCSR_EDPWRDOWN ((uint16_t)0x2000U) +#define LAN8742_MCSR_FARLOOPBACK ((uint16_t)0x0200U) +#define LAN8742_MCSR_ALTINT ((uint16_t)0x0040U) +#define LAN8742_MCSR_ENERGYON ((uint16_t)0x0002U) +/** + * @} + */ + +/** @defgroup LAN8742_SMR_Bit_Definition LAN8742 SMR Bit Definition + * @{ + */ +#define LAN8742_SMR_MODE ((uint16_t)0x00E0U) +#define LAN8742_SMR_PHY_ADDR ((uint16_t)0x001FU) +/** + * @} + */ + +/** @defgroup LAN8742_TPDCR_Bit_Definition LAN8742 TPDCR Bit Definition + * @{ + */ +#define LAN8742_TPDCR_DELAY_IN ((uint16_t)0x8000U) +#define LAN8742_TPDCR_LINE_BREAK_COUNTER ((uint16_t)0x7000U) +#define LAN8742_TPDCR_PATTERN_HIGH ((uint16_t)0x0FC0U) +#define LAN8742_TPDCR_PATTERN_LOW ((uint16_t)0x003FU) +/** + * @} + */ + +/** @defgroup LAN8742_TCSR_Bit_Definition LAN8742 TCSR Bit Definition + * @{ + */ +#define LAN8742_TCSR_TDR_ENABLE ((uint16_t)0x8000U) +#define LAN8742_TCSR_TDR_AD_FILTER_ENABLE ((uint16_t)0x4000U) +#define LAN8742_TCSR_TDR_CH_CABLE_TYPE ((uint16_t)0x0600U) +#define LAN8742_TCSR_TDR_CH_CABLE_DEFAULT ((uint16_t)0x0000U) +#define LAN8742_TCSR_TDR_CH_CABLE_SHORTED ((uint16_t)0x0200U) +#define LAN8742_TCSR_TDR_CH_CABLE_OPEN ((uint16_t)0x0400U) +#define LAN8742_TCSR_TDR_CH_CABLE_MATCH ((uint16_t)0x0600U) +#define LAN8742_TCSR_TDR_CH_STATUS ((uint16_t)0x0100U) +#define LAN8742_TCSR_TDR_CH_LENGTH ((uint16_t)0x00FFU) +/** + * @} + */ + +/** @defgroup LAN8742_SCSIR_Bit_Definition LAN8742 SCSIR Bit Definition + * @{ + */ +#define LAN8742_SCSIR_AUTO_MDIX_ENABLE ((uint16_t)0x8000U) +#define LAN8742_SCSIR_CHANNEL_SELECT ((uint16_t)0x2000U) +#define LAN8742_SCSIR_SQE_DISABLE ((uint16_t)0x0800U) +#define LAN8742_SCSIR_XPOLALITY ((uint16_t)0x0010U) +/** + * @} + */ + +/** @defgroup LAN8742_CLR_Bit_Definition LAN8742 CLR Bit Definition + * @{ + */ +#define LAN8742_CLR_CABLE_LENGTH ((uint16_t)0xF000U) +/** + * @} + */ + +/** @defgroup LAN8742_IMR_ISFR_Bit_Definition LAN8742 IMR ISFR Bit Definition + * @{ + */ +#define LAN8742_INT_8 ((uint16_t)0x0100U) +#define LAN8742_INT_7 ((uint16_t)0x0080U) +#define LAN8742_INT_6 ((uint16_t)0x0040U) +#define LAN8742_INT_5 ((uint16_t)0x0020U) +#define LAN8742_INT_4 ((uint16_t)0x0010U) +#define LAN8742_INT_3 ((uint16_t)0x0008U) +#define LAN8742_INT_2 ((uint16_t)0x0004U) +#define LAN8742_INT_1 ((uint16_t)0x0002U) +/** + * @} + */ + +/** @defgroup LAN8742_PHYSCSR_Bit_Definition LAN8742 PHYSCSR Bit Definition + * @{ + */ +#define LAN8742_PHYSCSR_AUTONEGO_DONE ((uint16_t)0x1000U) +#define LAN8742_PHYSCSR_HCDSPEEDMASK ((uint16_t)0x001CU) +#define LAN8742_PHYSCSR_10BT_HD ((uint16_t)0x0004U) +#define LAN8742_PHYSCSR_10BT_FD ((uint16_t)0x0014U) +#define LAN8742_PHYSCSR_100BTX_HD ((uint16_t)0x0008U) +#define LAN8742_PHYSCSR_100BTX_FD ((uint16_t)0x0018U) +/** + * @} + */ + +/** @defgroup LAN8742_Status LAN8742 Status + * @{ + */ + +#define LAN8742_STATUS_READ_ERROR ((int32_t)-5) +#define LAN8742_STATUS_WRITE_ERROR ((int32_t)-4) +#define LAN8742_STATUS_ADDRESS_ERROR ((int32_t)-3) +#define LAN8742_STATUS_RESET_TIMEOUT ((int32_t)-2) +#define LAN8742_STATUS_ERROR ((int32_t)-1) +#define LAN8742_STATUS_OK ((int32_t) 0) +#define LAN8742_STATUS_LINK_DOWN ((int32_t) 1) +#define LAN8742_STATUS_100MBITS_FULLDUPLEX ((int32_t) 2) +#define LAN8742_STATUS_100MBITS_HALFDUPLEX ((int32_t) 3) +#define LAN8742_STATUS_10MBITS_FULLDUPLEX ((int32_t) 4) +#define LAN8742_STATUS_10MBITS_HALFDUPLEX ((int32_t) 5) +#define LAN8742_STATUS_AUTONEGO_NOTDONE ((int32_t) 6) +/** + * @} + */ + +/** @defgroup LAN8742_IT_Flags LAN8742 IT Flags + * @{ + */ +#define LAN8742_WOL_IT LAN8742_INT_8 +#define LAN8742_ENERGYON_IT LAN8742_INT_7 +#define LAN8742_AUTONEGO_COMPLETE_IT LAN8742_INT_6 +#define LAN8742_REMOTE_FAULT_IT LAN8742_INT_5 +#define LAN8742_LINK_DOWN_IT LAN8742_INT_4 +#define LAN8742_AUTONEGO_LP_ACK_IT LAN8742_INT_3 +#define LAN8742_PARALLEL_DETECTION_FAULT_IT LAN8742_INT_2 +#define LAN8742_AUTONEGO_PAGE_RECEIVED_IT LAN8742_INT_1 +/** + * @} + */ + +/** + * @} + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup LAN8742_Exported_Types LAN8742 Exported Types + * @{ + */ +typedef int32_t (*lan8742_Init_Func) (void); +typedef int32_t (*lan8742_DeInit_Func) (void); +typedef int32_t (*lan8742_ReadReg_Func) (uint32_t, uint32_t, uint32_t *); +typedef int32_t (*lan8742_WriteReg_Func) (uint32_t, uint32_t, uint32_t); +typedef int32_t (*lan8742_GetTick_Func) (void); + +typedef struct +{ + lan8742_Init_Func Init; + lan8742_DeInit_Func DeInit; + lan8742_WriteReg_Func WriteReg; + lan8742_ReadReg_Func ReadReg; + lan8742_GetTick_Func GetTick; +} lan8742_IOCtx_t; + + +typedef struct +{ + uint32_t DevAddr; + uint32_t Is_Initialized; + lan8742_IOCtx_t IO; + void *pData; +}lan8742_Object_t; +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/* Exported functions --------------------------------------------------------*/ +/** @defgroup LAN8742_Exported_Functions LAN8742 Exported Functions + * @{ + */ +int32_t LAN8742_RegisterBusIO(lan8742_Object_t *pObj, lan8742_IOCtx_t *ioctx); +int32_t LAN8742_Init(lan8742_Object_t *pObj); +int32_t LAN8742_DeInit(lan8742_Object_t *pObj); +int32_t LAN8742_DisablePowerDownMode(lan8742_Object_t *pObj); +int32_t LAN8742_EnablePowerDownMode(lan8742_Object_t *pObj); +int32_t LAN8742_StartAutoNego(lan8742_Object_t *pObj); +int32_t LAN8742_GetLinkState(lan8742_Object_t *pObj); +int32_t LAN8742_SetLinkState(lan8742_Object_t *pObj, uint32_t LinkState); +int32_t LAN8742_EnableLoopbackMode(lan8742_Object_t *pObj); +int32_t LAN8742_DisableLoopbackMode(lan8742_Object_t *pObj); +int32_t LAN8742_EnableIT(lan8742_Object_t *pObj, uint32_t Interrupt); +int32_t LAN8742_DisableIT(lan8742_Object_t *pObj, uint32_t Interrupt); +int32_t LAN8742_ClearIT(lan8742_Object_t *pObj, uint32_t Interrupt); +int32_t LAN8742_GetITStatus(lan8742_Object_t *pObj, uint32_t Interrupt); +/** + * @} + */ + +#ifdef __cplusplus +} +#endif +#endif /* LAN8742_H */ + + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ diff --git a/Drivers/CMSIS/Device/ST/STM32F4xx/Include/stm32f407xx.h b/Drivers/CMSIS/Device/ST/STM32F4xx/Include/stm32f407xx.h new file mode 100644 index 0000000..4f2df22 --- /dev/null +++ b/Drivers/CMSIS/Device/ST/STM32F4xx/Include/stm32f407xx.h @@ -0,0 +1,15610 @@ +/** + ****************************************************************************** + * @file stm32f407xx.h + * @author MCD Application Team + * @brief CMSIS STM32F407xx Device Peripheral Access Layer Header File. + * + * This file contains: + * - Data structures and the address mapping for all peripherals + * - peripherals registers declarations and bits definition + * - Macros to access peripheral's registers hardware + * + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/** @addtogroup CMSIS_Device + * @{ + */ + +/** @addtogroup stm32f407xx + * @{ + */ + +#ifndef __STM32F407xx_H +#define __STM32F407xx_H + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** @addtogroup Configuration_section_for_CMSIS + * @{ + */ + +/** + * @brief Configuration of the Cortex-M4 Processor and Core Peripherals + */ +#define __CM4_REV 0x0001U /*!< Core revision r0p1 */ +#define __MPU_PRESENT 1U /*!< STM32F4XX provides an MPU */ +#define __NVIC_PRIO_BITS 4U /*!< STM32F4XX uses 4 Bits for the Priority Levels */ +#define __Vendor_SysTickConfig 0U /*!< Set to 1 if different SysTick Config is used */ +#define __FPU_PRESENT 1U /*!< FPU present */ + +/** + * @} + */ + +/** @addtogroup Peripheral_interrupt_number_definition + * @{ + */ + +/** + * @brief STM32F4XX Interrupt Number Definition, according to the selected device + * in @ref Library_configuration_section + */ +typedef enum +{ + /****** Cortex-M4 Processor Exceptions Numbers ****************************************************************/ + NonMaskableInt_IRQn = -14, /*!< 2 Non Maskable Interrupt */ + MemoryManagement_IRQn = -12, /*!< 4 Cortex-M4 Memory Management Interrupt */ + BusFault_IRQn = -11, /*!< 5 Cortex-M4 Bus Fault Interrupt */ + UsageFault_IRQn = -10, /*!< 6 Cortex-M4 Usage Fault Interrupt */ + SVCall_IRQn = -5, /*!< 11 Cortex-M4 SV Call Interrupt */ + DebugMonitor_IRQn = -4, /*!< 12 Cortex-M4 Debug Monitor Interrupt */ + PendSV_IRQn = -2, /*!< 14 Cortex-M4 Pend SV Interrupt */ + SysTick_IRQn = -1, /*!< 15 Cortex-M4 System Tick Interrupt */ + /****** STM32 specific Interrupt Numbers **********************************************************************/ + WWDG_IRQn = 0, /*!< Window WatchDog Interrupt */ + PVD_IRQn = 1, /*!< PVD through EXTI Line detection Interrupt */ + TAMP_STAMP_IRQn = 2, /*!< Tamper and TimeStamp interrupts through the EXTI line */ + RTC_WKUP_IRQn = 3, /*!< RTC Wakeup interrupt through the EXTI line */ + FLASH_IRQn = 4, /*!< FLASH global Interrupt */ + RCC_IRQn = 5, /*!< RCC global Interrupt */ + EXTI0_IRQn = 6, /*!< EXTI Line0 Interrupt */ + EXTI1_IRQn = 7, /*!< EXTI Line1 Interrupt */ + EXTI2_IRQn = 8, /*!< EXTI Line2 Interrupt */ + EXTI3_IRQn = 9, /*!< EXTI Line3 Interrupt */ + EXTI4_IRQn = 10, /*!< EXTI Line4 Interrupt */ + DMA1_Stream0_IRQn = 11, /*!< DMA1 Stream 0 global Interrupt */ + DMA1_Stream1_IRQn = 12, /*!< DMA1 Stream 1 global Interrupt */ + DMA1_Stream2_IRQn = 13, /*!< DMA1 Stream 2 global Interrupt */ + DMA1_Stream3_IRQn = 14, /*!< DMA1 Stream 3 global Interrupt */ + DMA1_Stream4_IRQn = 15, /*!< DMA1 Stream 4 global Interrupt */ + DMA1_Stream5_IRQn = 16, /*!< DMA1 Stream 5 global Interrupt */ + DMA1_Stream6_IRQn = 17, /*!< DMA1 Stream 6 global Interrupt */ + ADC_IRQn = 18, /*!< ADC1, ADC2 and ADC3 global Interrupts */ + CAN1_TX_IRQn = 19, /*!< CAN1 TX Interrupt */ + CAN1_RX0_IRQn = 20, /*!< CAN1 RX0 Interrupt */ + CAN1_RX1_IRQn = 21, /*!< CAN1 RX1 Interrupt */ + CAN1_SCE_IRQn = 22, /*!< CAN1 SCE Interrupt */ + EXTI9_5_IRQn = 23, /*!< External Line[9:5] Interrupts */ + TIM1_BRK_TIM9_IRQn = 24, /*!< TIM1 Break interrupt and TIM9 global interrupt */ + TIM1_UP_TIM10_IRQn = 25, /*!< TIM1 Update Interrupt and TIM10 global interrupt */ + TIM1_TRG_COM_TIM11_IRQn = 26, /*!< TIM1 Trigger and Commutation Interrupt and TIM11 global interrupt */ + TIM1_CC_IRQn = 27, /*!< TIM1 Capture Compare Interrupt */ + TIM2_IRQn = 28, /*!< TIM2 global Interrupt */ + TIM3_IRQn = 29, /*!< TIM3 global Interrupt */ + TIM4_IRQn = 30, /*!< TIM4 global Interrupt */ + I2C1_EV_IRQn = 31, /*!< I2C1 Event Interrupt */ + I2C1_ER_IRQn = 32, /*!< I2C1 Error Interrupt */ + I2C2_EV_IRQn = 33, /*!< I2C2 Event Interrupt */ + I2C2_ER_IRQn = 34, /*!< I2C2 Error Interrupt */ + SPI1_IRQn = 35, /*!< SPI1 global Interrupt */ + SPI2_IRQn = 36, /*!< SPI2 global Interrupt */ + USART1_IRQn = 37, /*!< USART1 global Interrupt */ + USART2_IRQn = 38, /*!< USART2 global Interrupt */ + USART3_IRQn = 39, /*!< USART3 global Interrupt */ + EXTI15_10_IRQn = 40, /*!< External Line[15:10] Interrupts */ + RTC_Alarm_IRQn = 41, /*!< RTC Alarm (A and B) through EXTI Line Interrupt */ + OTG_FS_WKUP_IRQn = 42, /*!< USB OTG FS Wakeup through EXTI line interrupt */ + TIM8_BRK_TIM12_IRQn = 43, /*!< TIM8 Break Interrupt and TIM12 global interrupt */ + TIM8_UP_TIM13_IRQn = 44, /*!< TIM8 Update Interrupt and TIM13 global interrupt */ + TIM8_TRG_COM_TIM14_IRQn = 45, /*!< TIM8 Trigger and Commutation Interrupt and TIM14 global interrupt */ + TIM8_CC_IRQn = 46, /*!< TIM8 Capture Compare global interrupt */ + DMA1_Stream7_IRQn = 47, /*!< DMA1 Stream7 Interrupt */ + FSMC_IRQn = 48, /*!< FSMC global Interrupt */ + SDIO_IRQn = 49, /*!< SDIO global Interrupt */ + TIM5_IRQn = 50, /*!< TIM5 global Interrupt */ + SPI3_IRQn = 51, /*!< SPI3 global Interrupt */ + UART4_IRQn = 52, /*!< UART4 global Interrupt */ + UART5_IRQn = 53, /*!< UART5 global Interrupt */ + TIM6_DAC_IRQn = 54, /*!< TIM6 global and DAC1&2 underrun error interrupts */ + TIM7_IRQn = 55, /*!< TIM7 global interrupt */ + DMA2_Stream0_IRQn = 56, /*!< DMA2 Stream 0 global Interrupt */ + DMA2_Stream1_IRQn = 57, /*!< DMA2 Stream 1 global Interrupt */ + DMA2_Stream2_IRQn = 58, /*!< DMA2 Stream 2 global Interrupt */ + DMA2_Stream3_IRQn = 59, /*!< DMA2 Stream 3 global Interrupt */ + DMA2_Stream4_IRQn = 60, /*!< DMA2 Stream 4 global Interrupt */ + ETH_IRQn = 61, /*!< Ethernet global Interrupt */ + ETH_WKUP_IRQn = 62, /*!< Ethernet Wakeup through EXTI line Interrupt */ + CAN2_TX_IRQn = 63, /*!< CAN2 TX Interrupt */ + CAN2_RX0_IRQn = 64, /*!< CAN2 RX0 Interrupt */ + CAN2_RX1_IRQn = 65, /*!< CAN2 RX1 Interrupt */ + CAN2_SCE_IRQn = 66, /*!< CAN2 SCE Interrupt */ + OTG_FS_IRQn = 67, /*!< USB OTG FS global Interrupt */ + DMA2_Stream5_IRQn = 68, /*!< DMA2 Stream 5 global interrupt */ + DMA2_Stream6_IRQn = 69, /*!< DMA2 Stream 6 global interrupt */ + DMA2_Stream7_IRQn = 70, /*!< DMA2 Stream 7 global interrupt */ + USART6_IRQn = 71, /*!< USART6 global interrupt */ + I2C3_EV_IRQn = 72, /*!< I2C3 event interrupt */ + I2C3_ER_IRQn = 73, /*!< I2C3 error interrupt */ + OTG_HS_EP1_OUT_IRQn = 74, /*!< USB OTG HS End Point 1 Out global interrupt */ + OTG_HS_EP1_IN_IRQn = 75, /*!< USB OTG HS End Point 1 In global interrupt */ + OTG_HS_WKUP_IRQn = 76, /*!< USB OTG HS Wakeup through EXTI interrupt */ + OTG_HS_IRQn = 77, /*!< USB OTG HS global interrupt */ + DCMI_IRQn = 78, /*!< DCMI global interrupt */ + RNG_IRQn = 80, /*!< RNG global Interrupt */ + FPU_IRQn = 81 /*!< FPU global interrupt */ +} IRQn_Type; +/* Legacy define */ +#define HASH_RNG_IRQn RNG_IRQn + +/** + * @} + */ + +#include "core_cm4.h" /* Cortex-M4 processor and core peripherals */ +#include "system_stm32f4xx.h" +#include + +/** @addtogroup Peripheral_registers_structures + * @{ + */ + +/** + * @brief Analog to Digital Converter + */ + +typedef struct +{ + __IO uint32_t SR; /*!< ADC status register, Address offset: 0x00 */ + __IO uint32_t CR1; /*!< ADC control register 1, Address offset: 0x04 */ + __IO uint32_t CR2; /*!< ADC control register 2, Address offset: 0x08 */ + __IO uint32_t SMPR1; /*!< ADC sample time register 1, Address offset: 0x0C */ + __IO uint32_t SMPR2; /*!< ADC sample time register 2, Address offset: 0x10 */ + __IO uint32_t JOFR1; /*!< ADC injected channel data offset register 1, Address offset: 0x14 */ + __IO uint32_t JOFR2; /*!< ADC injected channel data offset register 2, Address offset: 0x18 */ + __IO uint32_t JOFR3; /*!< ADC injected channel data offset register 3, Address offset: 0x1C */ + __IO uint32_t JOFR4; /*!< ADC injected channel data offset register 4, Address offset: 0x20 */ + __IO uint32_t HTR; /*!< ADC watchdog higher threshold register, Address offset: 0x24 */ + __IO uint32_t LTR; /*!< ADC watchdog lower threshold register, Address offset: 0x28 */ + __IO uint32_t SQR1; /*!< ADC regular sequence register 1, Address offset: 0x2C */ + __IO uint32_t SQR2; /*!< ADC regular sequence register 2, Address offset: 0x30 */ + __IO uint32_t SQR3; /*!< ADC regular sequence register 3, Address offset: 0x34 */ + __IO uint32_t JSQR; /*!< ADC injected sequence register, Address offset: 0x38*/ + __IO uint32_t JDR1; /*!< ADC injected data register 1, Address offset: 0x3C */ + __IO uint32_t JDR2; /*!< ADC injected data register 2, Address offset: 0x40 */ + __IO uint32_t JDR3; /*!< ADC injected data register 3, Address offset: 0x44 */ + __IO uint32_t JDR4; /*!< ADC injected data register 4, Address offset: 0x48 */ + __IO uint32_t DR; /*!< ADC regular data register, Address offset: 0x4C */ +} ADC_TypeDef; + +typedef struct +{ + __IO uint32_t CSR; /*!< ADC Common status register, Address offset: ADC1 base address + 0x300 */ + __IO uint32_t CCR; /*!< ADC common control register, Address offset: ADC1 base address + 0x304 */ + __IO uint32_t CDR; /*!< ADC common regular data register for dual + AND triple modes, Address offset: ADC1 base address + 0x308 */ +} ADC_Common_TypeDef; + + +/** + * @brief Controller Area Network TxMailBox + */ + +typedef struct +{ + __IO uint32_t TIR; /*!< CAN TX mailbox identifier register */ + __IO uint32_t TDTR; /*!< CAN mailbox data length control and time stamp register */ + __IO uint32_t TDLR; /*!< CAN mailbox data low register */ + __IO uint32_t TDHR; /*!< CAN mailbox data high register */ +} CAN_TxMailBox_TypeDef; + +/** + * @brief Controller Area Network FIFOMailBox + */ + +typedef struct +{ + __IO uint32_t RIR; /*!< CAN receive FIFO mailbox identifier register */ + __IO uint32_t RDTR; /*!< CAN receive FIFO mailbox data length control and time stamp register */ + __IO uint32_t RDLR; /*!< CAN receive FIFO mailbox data low register */ + __IO uint32_t RDHR; /*!< CAN receive FIFO mailbox data high register */ +} CAN_FIFOMailBox_TypeDef; + +/** + * @brief Controller Area Network FilterRegister + */ + +typedef struct +{ + __IO uint32_t FR1; /*!< CAN Filter bank register 1 */ + __IO uint32_t FR2; /*!< CAN Filter bank register 1 */ +} CAN_FilterRegister_TypeDef; + +/** + * @brief Controller Area Network + */ + +typedef struct +{ + __IO uint32_t MCR; /*!< CAN master control register, Address offset: 0x00 */ + __IO uint32_t MSR; /*!< CAN master status register, Address offset: 0x04 */ + __IO uint32_t TSR; /*!< CAN transmit status register, Address offset: 0x08 */ + __IO uint32_t RF0R; /*!< CAN receive FIFO 0 register, Address offset: 0x0C */ + __IO uint32_t RF1R; /*!< CAN receive FIFO 1 register, Address offset: 0x10 */ + __IO uint32_t IER; /*!< CAN interrupt enable register, Address offset: 0x14 */ + __IO uint32_t ESR; /*!< CAN error status register, Address offset: 0x18 */ + __IO uint32_t BTR; /*!< CAN bit timing register, Address offset: 0x1C */ + uint32_t RESERVED0[88]; /*!< Reserved, 0x020 - 0x17F */ + CAN_TxMailBox_TypeDef sTxMailBox[3]; /*!< CAN Tx MailBox, Address offset: 0x180 - 0x1AC */ + CAN_FIFOMailBox_TypeDef sFIFOMailBox[2]; /*!< CAN FIFO MailBox, Address offset: 0x1B0 - 0x1CC */ + uint32_t RESERVED1[12]; /*!< Reserved, 0x1D0 - 0x1FF */ + __IO uint32_t FMR; /*!< CAN filter master register, Address offset: 0x200 */ + __IO uint32_t FM1R; /*!< CAN filter mode register, Address offset: 0x204 */ + uint32_t RESERVED2; /*!< Reserved, 0x208 */ + __IO uint32_t FS1R; /*!< CAN filter scale register, Address offset: 0x20C */ + uint32_t RESERVED3; /*!< Reserved, 0x210 */ + __IO uint32_t FFA1R; /*!< CAN filter FIFO assignment register, Address offset: 0x214 */ + uint32_t RESERVED4; /*!< Reserved, 0x218 */ + __IO uint32_t FA1R; /*!< CAN filter activation register, Address offset: 0x21C */ + uint32_t RESERVED5[8]; /*!< Reserved, 0x220-0x23F */ + CAN_FilterRegister_TypeDef sFilterRegister[28]; /*!< CAN Filter Register, Address offset: 0x240-0x31C */ +} CAN_TypeDef; + +/** + * @brief CRC calculation unit + */ + +typedef struct +{ + __IO uint32_t DR; /*!< CRC Data register, Address offset: 0x00 */ + __IO uint8_t IDR; /*!< CRC Independent data register, Address offset: 0x04 */ + uint8_t RESERVED0; /*!< Reserved, 0x05 */ + uint16_t RESERVED1; /*!< Reserved, 0x06 */ + __IO uint32_t CR; /*!< CRC Control register, Address offset: 0x08 */ +} CRC_TypeDef; + +/** + * @brief Digital to Analog Converter + */ + +typedef struct +{ + __IO uint32_t CR; /*!< DAC control register, Address offset: 0x00 */ + __IO uint32_t SWTRIGR; /*!< DAC software trigger register, Address offset: 0x04 */ + __IO uint32_t DHR12R1; /*!< DAC channel1 12-bit right-aligned data holding register, Address offset: 0x08 */ + __IO uint32_t DHR12L1; /*!< DAC channel1 12-bit left aligned data holding register, Address offset: 0x0C */ + __IO uint32_t DHR8R1; /*!< DAC channel1 8-bit right aligned data holding register, Address offset: 0x10 */ + __IO uint32_t DHR12R2; /*!< DAC channel2 12-bit right aligned data holding register, Address offset: 0x14 */ + __IO uint32_t DHR12L2; /*!< DAC channel2 12-bit left aligned data holding register, Address offset: 0x18 */ + __IO uint32_t DHR8R2; /*!< DAC channel2 8-bit right-aligned data holding register, Address offset: 0x1C */ + __IO uint32_t DHR12RD; /*!< Dual DAC 12-bit right-aligned data holding register, Address offset: 0x20 */ + __IO uint32_t DHR12LD; /*!< DUAL DAC 12-bit left aligned data holding register, Address offset: 0x24 */ + __IO uint32_t DHR8RD; /*!< DUAL DAC 8-bit right aligned data holding register, Address offset: 0x28 */ + __IO uint32_t DOR1; /*!< DAC channel1 data output register, Address offset: 0x2C */ + __IO uint32_t DOR2; /*!< DAC channel2 data output register, Address offset: 0x30 */ + __IO uint32_t SR; /*!< DAC status register, Address offset: 0x34 */ +} DAC_TypeDef; + +/** + * @brief Debug MCU + */ + +typedef struct +{ + __IO uint32_t IDCODE; /*!< MCU device ID code, Address offset: 0x00 */ + __IO uint32_t CR; /*!< Debug MCU configuration register, Address offset: 0x04 */ + __IO uint32_t APB1FZ; /*!< Debug MCU APB1 freeze register, Address offset: 0x08 */ + __IO uint32_t APB2FZ; /*!< Debug MCU APB2 freeze register, Address offset: 0x0C */ +} DBGMCU_TypeDef; + +/** + * @brief DCMI + */ + +typedef struct +{ + __IO uint32_t CR; /*!< DCMI control register 1, Address offset: 0x00 */ + __IO uint32_t SR; /*!< DCMI status register, Address offset: 0x04 */ + __IO uint32_t RISR; /*!< DCMI raw interrupt status register, Address offset: 0x08 */ + __IO uint32_t IER; /*!< DCMI interrupt enable register, Address offset: 0x0C */ + __IO uint32_t MISR; /*!< DCMI masked interrupt status register, Address offset: 0x10 */ + __IO uint32_t ICR; /*!< DCMI interrupt clear register, Address offset: 0x14 */ + __IO uint32_t ESCR; /*!< DCMI embedded synchronization code register, Address offset: 0x18 */ + __IO uint32_t ESUR; /*!< DCMI embedded synchronization unmask register, Address offset: 0x1C */ + __IO uint32_t CWSTRTR; /*!< DCMI crop window start, Address offset: 0x20 */ + __IO uint32_t CWSIZER; /*!< DCMI crop window size, Address offset: 0x24 */ + __IO uint32_t DR; /*!< DCMI data register, Address offset: 0x28 */ +} DCMI_TypeDef; + +/** + * @brief DMA Controller + */ + +typedef struct +{ + __IO uint32_t CR; /*!< DMA stream x configuration register */ + __IO uint32_t NDTR; /*!< DMA stream x number of data register */ + __IO uint32_t PAR; /*!< DMA stream x peripheral address register */ + __IO uint32_t M0AR; /*!< DMA stream x memory 0 address register */ + __IO uint32_t M1AR; /*!< DMA stream x memory 1 address register */ + __IO uint32_t FCR; /*!< DMA stream x FIFO control register */ +} DMA_Stream_TypeDef; + +typedef struct +{ + __IO uint32_t LISR; /*!< DMA low interrupt status register, Address offset: 0x00 */ + __IO uint32_t HISR; /*!< DMA high interrupt status register, Address offset: 0x04 */ + __IO uint32_t LIFCR; /*!< DMA low interrupt flag clear register, Address offset: 0x08 */ + __IO uint32_t HIFCR; /*!< DMA high interrupt flag clear register, Address offset: 0x0C */ +} DMA_TypeDef; + +/** + * @brief Ethernet MAC + */ + +typedef struct +{ + __IO uint32_t MACCR; + __IO uint32_t MACFFR; + __IO uint32_t MACHTHR; + __IO uint32_t MACHTLR; + __IO uint32_t MACMIIAR; + __IO uint32_t MACMIIDR; + __IO uint32_t MACFCR; + __IO uint32_t MACVLANTR; /* 8 */ + uint32_t RESERVED0[2]; + __IO uint32_t MACRWUFFR; /* 11 */ + __IO uint32_t MACPMTCSR; + uint32_t RESERVED1; + __IO uint32_t MACDBGR; + __IO uint32_t MACSR; /* 15 */ + __IO uint32_t MACIMR; + __IO uint32_t MACA0HR; + __IO uint32_t MACA0LR; + __IO uint32_t MACA1HR; + __IO uint32_t MACA1LR; + __IO uint32_t MACA2HR; + __IO uint32_t MACA2LR; + __IO uint32_t MACA3HR; + __IO uint32_t MACA3LR; /* 24 */ + uint32_t RESERVED2[40]; + __IO uint32_t MMCCR; /* 65 */ + __IO uint32_t MMCRIR; + __IO uint32_t MMCTIR; + __IO uint32_t MMCRIMR; + __IO uint32_t MMCTIMR; /* 69 */ + uint32_t RESERVED3[14]; + __IO uint32_t MMCTGFSCCR; /* 84 */ + __IO uint32_t MMCTGFMSCCR; + uint32_t RESERVED4[5]; + __IO uint32_t MMCTGFCR; + uint32_t RESERVED5[10]; + __IO uint32_t MMCRFCECR; + __IO uint32_t MMCRFAECR; + uint32_t RESERVED6[10]; + __IO uint32_t MMCRGUFCR; + uint32_t RESERVED7[334]; + __IO uint32_t PTPTSCR; + __IO uint32_t PTPSSIR; + __IO uint32_t PTPTSHR; + __IO uint32_t PTPTSLR; + __IO uint32_t PTPTSHUR; + __IO uint32_t PTPTSLUR; + __IO uint32_t PTPTSAR; + __IO uint32_t PTPTTHR; + __IO uint32_t PTPTTLR; + __IO uint32_t RESERVED8; + __IO uint32_t PTPTSSR; + uint32_t RESERVED9[565]; + __IO uint32_t DMABMR; + __IO uint32_t DMATPDR; + __IO uint32_t DMARPDR; + __IO uint32_t DMARDLAR; + __IO uint32_t DMATDLAR; + __IO uint32_t DMASR; + __IO uint32_t DMAOMR; + __IO uint32_t DMAIER; + __IO uint32_t DMAMFBOCR; + __IO uint32_t DMARSWTR; + uint32_t RESERVED10[8]; + __IO uint32_t DMACHTDR; + __IO uint32_t DMACHRDR; + __IO uint32_t DMACHTBAR; + __IO uint32_t DMACHRBAR; +} ETH_TypeDef; + +/** + * @brief External Interrupt/Event Controller + */ + +typedef struct +{ + __IO uint32_t IMR; /*!< EXTI Interrupt mask register, Address offset: 0x00 */ + __IO uint32_t EMR; /*!< EXTI Event mask register, Address offset: 0x04 */ + __IO uint32_t RTSR; /*!< EXTI Rising trigger selection register, Address offset: 0x08 */ + __IO uint32_t FTSR; /*!< EXTI Falling trigger selection register, Address offset: 0x0C */ + __IO uint32_t SWIER; /*!< EXTI Software interrupt event register, Address offset: 0x10 */ + __IO uint32_t PR; /*!< EXTI Pending register, Address offset: 0x14 */ +} EXTI_TypeDef; + +/** + * @brief FLASH Registers + */ + +typedef struct +{ + __IO uint32_t ACR; /*!< FLASH access control register, Address offset: 0x00 */ + __IO uint32_t KEYR; /*!< FLASH key register, Address offset: 0x04 */ + __IO uint32_t OPTKEYR; /*!< FLASH option key register, Address offset: 0x08 */ + __IO uint32_t SR; /*!< FLASH status register, Address offset: 0x0C */ + __IO uint32_t CR; /*!< FLASH control register, Address offset: 0x10 */ + __IO uint32_t OPTCR; /*!< FLASH option control register , Address offset: 0x14 */ + __IO uint32_t OPTCR1; /*!< FLASH option control register 1, Address offset: 0x18 */ +} FLASH_TypeDef; + + + +/** + * @brief Flexible Static Memory Controller + */ + +typedef struct +{ + __IO uint32_t BTCR[8]; /*!< NOR/PSRAM chip-select control register(BCR) and chip-select timing register(BTR), Address offset: 0x00-1C */ +} FSMC_Bank1_TypeDef; + +/** + * @brief Flexible Static Memory Controller Bank1E + */ + +typedef struct +{ + __IO uint32_t BWTR[7]; /*!< NOR/PSRAM write timing registers, Address offset: 0x104-0x11C */ +} FSMC_Bank1E_TypeDef; + +/** + * @brief Flexible Static Memory Controller Bank2 + */ + +typedef struct +{ + __IO uint32_t PCR2; /*!< NAND Flash control register 2, Address offset: 0x60 */ + __IO uint32_t SR2; /*!< NAND Flash FIFO status and interrupt register 2, Address offset: 0x64 */ + __IO uint32_t PMEM2; /*!< NAND Flash Common memory space timing register 2, Address offset: 0x68 */ + __IO uint32_t PATT2; /*!< NAND Flash Attribute memory space timing register 2, Address offset: 0x6C */ + uint32_t RESERVED0; /*!< Reserved, 0x70 */ + __IO uint32_t ECCR2; /*!< NAND Flash ECC result registers 2, Address offset: 0x74 */ + uint32_t RESERVED1; /*!< Reserved, 0x78 */ + uint32_t RESERVED2; /*!< Reserved, 0x7C */ + __IO uint32_t PCR3; /*!< NAND Flash control register 3, Address offset: 0x80 */ + __IO uint32_t SR3; /*!< NAND Flash FIFO status and interrupt register 3, Address offset: 0x84 */ + __IO uint32_t PMEM3; /*!< NAND Flash Common memory space timing register 3, Address offset: 0x88 */ + __IO uint32_t PATT3; /*!< NAND Flash Attribute memory space timing register 3, Address offset: 0x8C */ + uint32_t RESERVED3; /*!< Reserved, 0x90 */ + __IO uint32_t ECCR3; /*!< NAND Flash ECC result registers 3, Address offset: 0x94 */ +} FSMC_Bank2_3_TypeDef; + +/** + * @brief Flexible Static Memory Controller Bank4 + */ + +typedef struct +{ + __IO uint32_t PCR4; /*!< PC Card control register 4, Address offset: 0xA0 */ + __IO uint32_t SR4; /*!< PC Card FIFO status and interrupt register 4, Address offset: 0xA4 */ + __IO uint32_t PMEM4; /*!< PC Card Common memory space timing register 4, Address offset: 0xA8 */ + __IO uint32_t PATT4; /*!< PC Card Attribute memory space timing register 4, Address offset: 0xAC */ + __IO uint32_t PIO4; /*!< PC Card I/O space timing register 4, Address offset: 0xB0 */ +} FSMC_Bank4_TypeDef; + +/** + * @brief General Purpose I/O + */ + +typedef struct +{ + __IO uint32_t MODER; /*!< GPIO port mode register, Address offset: 0x00 */ + __IO uint32_t OTYPER; /*!< GPIO port output type register, Address offset: 0x04 */ + __IO uint32_t OSPEEDR; /*!< GPIO port output speed register, Address offset: 0x08 */ + __IO uint32_t PUPDR; /*!< GPIO port pull-up/pull-down register, Address offset: 0x0C */ + __IO uint32_t IDR; /*!< GPIO port input data register, Address offset: 0x10 */ + __IO uint32_t ODR; /*!< GPIO port output data register, Address offset: 0x14 */ + __IO uint32_t BSRR; /*!< GPIO port bit set/reset register, Address offset: 0x18 */ + __IO uint32_t LCKR; /*!< GPIO port configuration lock register, Address offset: 0x1C */ + __IO uint32_t AFR[2]; /*!< GPIO alternate function registers, Address offset: 0x20-0x24 */ +} GPIO_TypeDef; + +/** + * @brief System configuration controller + */ + +typedef struct +{ + __IO uint32_t MEMRMP; /*!< SYSCFG memory remap register, Address offset: 0x00 */ + __IO uint32_t PMC; /*!< SYSCFG peripheral mode configuration register, Address offset: 0x04 */ + __IO uint32_t EXTICR[4]; /*!< SYSCFG external interrupt configuration registers, Address offset: 0x08-0x14 */ + uint32_t RESERVED[2]; /*!< Reserved, 0x18-0x1C */ + __IO uint32_t CMPCR; /*!< SYSCFG Compensation cell control register, Address offset: 0x20 */ +} SYSCFG_TypeDef; + +/** + * @brief Inter-integrated Circuit Interface + */ + +typedef struct +{ + __IO uint32_t CR1; /*!< I2C Control register 1, Address offset: 0x00 */ + __IO uint32_t CR2; /*!< I2C Control register 2, Address offset: 0x04 */ + __IO uint32_t OAR1; /*!< I2C Own address register 1, Address offset: 0x08 */ + __IO uint32_t OAR2; /*!< I2C Own address register 2, Address offset: 0x0C */ + __IO uint32_t DR; /*!< I2C Data register, Address offset: 0x10 */ + __IO uint32_t SR1; /*!< I2C Status register 1, Address offset: 0x14 */ + __IO uint32_t SR2; /*!< I2C Status register 2, Address offset: 0x18 */ + __IO uint32_t CCR; /*!< I2C Clock control register, Address offset: 0x1C */ + __IO uint32_t TRISE; /*!< I2C TRISE register, Address offset: 0x20 */ +} I2C_TypeDef; + +/** + * @brief Independent WATCHDOG + */ + +typedef struct +{ + __IO uint32_t KR; /*!< IWDG Key register, Address offset: 0x00 */ + __IO uint32_t PR; /*!< IWDG Prescaler register, Address offset: 0x04 */ + __IO uint32_t RLR; /*!< IWDG Reload register, Address offset: 0x08 */ + __IO uint32_t SR; /*!< IWDG Status register, Address offset: 0x0C */ +} IWDG_TypeDef; + + +/** + * @brief Power Control + */ + +typedef struct +{ + __IO uint32_t CR; /*!< PWR power control register, Address offset: 0x00 */ + __IO uint32_t CSR; /*!< PWR power control/status register, Address offset: 0x04 */ +} PWR_TypeDef; + +/** + * @brief Reset and Clock Control + */ + +typedef struct +{ + __IO uint32_t CR; /*!< RCC clock control register, Address offset: 0x00 */ + __IO uint32_t PLLCFGR; /*!< RCC PLL configuration register, Address offset: 0x04 */ + __IO uint32_t CFGR; /*!< RCC clock configuration register, Address offset: 0x08 */ + __IO uint32_t CIR; /*!< RCC clock interrupt register, Address offset: 0x0C */ + __IO uint32_t AHB1RSTR; /*!< RCC AHB1 peripheral reset register, Address offset: 0x10 */ + __IO uint32_t AHB2RSTR; /*!< RCC AHB2 peripheral reset register, Address offset: 0x14 */ + __IO uint32_t AHB3RSTR; /*!< RCC AHB3 peripheral reset register, Address offset: 0x18 */ + uint32_t RESERVED0; /*!< Reserved, 0x1C */ + __IO uint32_t APB1RSTR; /*!< RCC APB1 peripheral reset register, Address offset: 0x20 */ + __IO uint32_t APB2RSTR; /*!< RCC APB2 peripheral reset register, Address offset: 0x24 */ + uint32_t RESERVED1[2]; /*!< Reserved, 0x28-0x2C */ + __IO uint32_t AHB1ENR; /*!< RCC AHB1 peripheral clock register, Address offset: 0x30 */ + __IO uint32_t AHB2ENR; /*!< RCC AHB2 peripheral clock register, Address offset: 0x34 */ + __IO uint32_t AHB3ENR; /*!< RCC AHB3 peripheral clock register, Address offset: 0x38 */ + uint32_t RESERVED2; /*!< Reserved, 0x3C */ + __IO uint32_t APB1ENR; /*!< RCC APB1 peripheral clock enable register, Address offset: 0x40 */ + __IO uint32_t APB2ENR; /*!< RCC APB2 peripheral clock enable register, Address offset: 0x44 */ + uint32_t RESERVED3[2]; /*!< Reserved, 0x48-0x4C */ + __IO uint32_t AHB1LPENR; /*!< RCC AHB1 peripheral clock enable in low power mode register, Address offset: 0x50 */ + __IO uint32_t AHB2LPENR; /*!< RCC AHB2 peripheral clock enable in low power mode register, Address offset: 0x54 */ + __IO uint32_t AHB3LPENR; /*!< RCC AHB3 peripheral clock enable in low power mode register, Address offset: 0x58 */ + uint32_t RESERVED4; /*!< Reserved, 0x5C */ + __IO uint32_t APB1LPENR; /*!< RCC APB1 peripheral clock enable in low power mode register, Address offset: 0x60 */ + __IO uint32_t APB2LPENR; /*!< RCC APB2 peripheral clock enable in low power mode register, Address offset: 0x64 */ + uint32_t RESERVED5[2]; /*!< Reserved, 0x68-0x6C */ + __IO uint32_t BDCR; /*!< RCC Backup domain control register, Address offset: 0x70 */ + __IO uint32_t CSR; /*!< RCC clock control & status register, Address offset: 0x74 */ + uint32_t RESERVED6[2]; /*!< Reserved, 0x78-0x7C */ + __IO uint32_t SSCGR; /*!< RCC spread spectrum clock generation register, Address offset: 0x80 */ + __IO uint32_t PLLI2SCFGR; /*!< RCC PLLI2S configuration register, Address offset: 0x84 */ +} RCC_TypeDef; + +/** + * @brief Real-Time Clock + */ + +typedef struct +{ + __IO uint32_t TR; /*!< RTC time register, Address offset: 0x00 */ + __IO uint32_t DR; /*!< RTC date register, Address offset: 0x04 */ + __IO uint32_t CR; /*!< RTC control register, Address offset: 0x08 */ + __IO uint32_t ISR; /*!< RTC initialization and status register, Address offset: 0x0C */ + __IO uint32_t PRER; /*!< RTC prescaler register, Address offset: 0x10 */ + __IO uint32_t WUTR; /*!< RTC wakeup timer register, Address offset: 0x14 */ + __IO uint32_t CALIBR; /*!< RTC calibration register, Address offset: 0x18 */ + __IO uint32_t ALRMAR; /*!< RTC alarm A register, Address offset: 0x1C */ + __IO uint32_t ALRMBR; /*!< RTC alarm B register, Address offset: 0x20 */ + __IO uint32_t WPR; /*!< RTC write protection register, Address offset: 0x24 */ + __IO uint32_t SSR; /*!< RTC sub second register, Address offset: 0x28 */ + __IO uint32_t SHIFTR; /*!< RTC shift control register, Address offset: 0x2C */ + __IO uint32_t TSTR; /*!< RTC time stamp time register, Address offset: 0x30 */ + __IO uint32_t TSDR; /*!< RTC time stamp date register, Address offset: 0x34 */ + __IO uint32_t TSSSR; /*!< RTC time-stamp sub second register, Address offset: 0x38 */ + __IO uint32_t CALR; /*!< RTC calibration register, Address offset: 0x3C */ + __IO uint32_t TAFCR; /*!< RTC tamper and alternate function configuration register, Address offset: 0x40 */ + __IO uint32_t ALRMASSR;/*!< RTC alarm A sub second register, Address offset: 0x44 */ + __IO uint32_t ALRMBSSR;/*!< RTC alarm B sub second register, Address offset: 0x48 */ + uint32_t RESERVED7; /*!< Reserved, 0x4C */ + __IO uint32_t BKP0R; /*!< RTC backup register 1, Address offset: 0x50 */ + __IO uint32_t BKP1R; /*!< RTC backup register 1, Address offset: 0x54 */ + __IO uint32_t BKP2R; /*!< RTC backup register 2, Address offset: 0x58 */ + __IO uint32_t BKP3R; /*!< RTC backup register 3, Address offset: 0x5C */ + __IO uint32_t BKP4R; /*!< RTC backup register 4, Address offset: 0x60 */ + __IO uint32_t BKP5R; /*!< RTC backup register 5, Address offset: 0x64 */ + __IO uint32_t BKP6R; /*!< RTC backup register 6, Address offset: 0x68 */ + __IO uint32_t BKP7R; /*!< RTC backup register 7, Address offset: 0x6C */ + __IO uint32_t BKP8R; /*!< RTC backup register 8, Address offset: 0x70 */ + __IO uint32_t BKP9R; /*!< RTC backup register 9, Address offset: 0x74 */ + __IO uint32_t BKP10R; /*!< RTC backup register 10, Address offset: 0x78 */ + __IO uint32_t BKP11R; /*!< RTC backup register 11, Address offset: 0x7C */ + __IO uint32_t BKP12R; /*!< RTC backup register 12, Address offset: 0x80 */ + __IO uint32_t BKP13R; /*!< RTC backup register 13, Address offset: 0x84 */ + __IO uint32_t BKP14R; /*!< RTC backup register 14, Address offset: 0x88 */ + __IO uint32_t BKP15R; /*!< RTC backup register 15, Address offset: 0x8C */ + __IO uint32_t BKP16R; /*!< RTC backup register 16, Address offset: 0x90 */ + __IO uint32_t BKP17R; /*!< RTC backup register 17, Address offset: 0x94 */ + __IO uint32_t BKP18R; /*!< RTC backup register 18, Address offset: 0x98 */ + __IO uint32_t BKP19R; /*!< RTC backup register 19, Address offset: 0x9C */ +} RTC_TypeDef; + +/** + * @brief SD host Interface + */ + +typedef struct +{ + __IO uint32_t POWER; /*!< SDIO power control register, Address offset: 0x00 */ + __IO uint32_t CLKCR; /*!< SDI clock control register, Address offset: 0x04 */ + __IO uint32_t ARG; /*!< SDIO argument register, Address offset: 0x08 */ + __IO uint32_t CMD; /*!< SDIO command register, Address offset: 0x0C */ + __IO const uint32_t RESPCMD; /*!< SDIO command response register, Address offset: 0x10 */ + __IO const uint32_t RESP1; /*!< SDIO response 1 register, Address offset: 0x14 */ + __IO const uint32_t RESP2; /*!< SDIO response 2 register, Address offset: 0x18 */ + __IO const uint32_t RESP3; /*!< SDIO response 3 register, Address offset: 0x1C */ + __IO const uint32_t RESP4; /*!< SDIO response 4 register, Address offset: 0x20 */ + __IO uint32_t DTIMER; /*!< SDIO data timer register, Address offset: 0x24 */ + __IO uint32_t DLEN; /*!< SDIO data length register, Address offset: 0x28 */ + __IO uint32_t DCTRL; /*!< SDIO data control register, Address offset: 0x2C */ + __IO const uint32_t DCOUNT; /*!< SDIO data counter register, Address offset: 0x30 */ + __IO const uint32_t STA; /*!< SDIO status register, Address offset: 0x34 */ + __IO uint32_t ICR; /*!< SDIO interrupt clear register, Address offset: 0x38 */ + __IO uint32_t MASK; /*!< SDIO mask register, Address offset: 0x3C */ + uint32_t RESERVED0[2]; /*!< Reserved, 0x40-0x44 */ + __IO const uint32_t FIFOCNT; /*!< SDIO FIFO counter register, Address offset: 0x48 */ + uint32_t RESERVED1[13]; /*!< Reserved, 0x4C-0x7C */ + __IO uint32_t FIFO; /*!< SDIO data FIFO register, Address offset: 0x80 */ +} SDIO_TypeDef; + +/** + * @brief Serial Peripheral Interface + */ + +typedef struct +{ + __IO uint32_t CR1; /*!< SPI control register 1 (not used in I2S mode), Address offset: 0x00 */ + __IO uint32_t CR2; /*!< SPI control register 2, Address offset: 0x04 */ + __IO uint32_t SR; /*!< SPI status register, Address offset: 0x08 */ + __IO uint32_t DR; /*!< SPI data register, Address offset: 0x0C */ + __IO uint32_t CRCPR; /*!< SPI CRC polynomial register (not used in I2S mode), Address offset: 0x10 */ + __IO uint32_t RXCRCR; /*!< SPI RX CRC register (not used in I2S mode), Address offset: 0x14 */ + __IO uint32_t TXCRCR; /*!< SPI TX CRC register (not used in I2S mode), Address offset: 0x18 */ + __IO uint32_t I2SCFGR; /*!< SPI_I2S configuration register, Address offset: 0x1C */ + __IO uint32_t I2SPR; /*!< SPI_I2S prescaler register, Address offset: 0x20 */ +} SPI_TypeDef; + + +/** + * @brief TIM + */ + +typedef struct +{ + __IO uint32_t CR1; /*!< TIM control register 1, Address offset: 0x00 */ + __IO uint32_t CR2; /*!< TIM control register 2, Address offset: 0x04 */ + __IO uint32_t SMCR; /*!< TIM slave mode control register, Address offset: 0x08 */ + __IO uint32_t DIER; /*!< TIM DMA/interrupt enable register, Address offset: 0x0C */ + __IO uint32_t SR; /*!< TIM status register, Address offset: 0x10 */ + __IO uint32_t EGR; /*!< TIM event generation register, Address offset: 0x14 */ + __IO uint32_t CCMR1; /*!< TIM capture/compare mode register 1, Address offset: 0x18 */ + __IO uint32_t CCMR2; /*!< TIM capture/compare mode register 2, Address offset: 0x1C */ + __IO uint32_t CCER; /*!< TIM capture/compare enable register, Address offset: 0x20 */ + __IO uint32_t CNT; /*!< TIM counter register, Address offset: 0x24 */ + __IO uint32_t PSC; /*!< TIM prescaler, Address offset: 0x28 */ + __IO uint32_t ARR; /*!< TIM auto-reload register, Address offset: 0x2C */ + __IO uint32_t RCR; /*!< TIM repetition counter register, Address offset: 0x30 */ + __IO uint32_t CCR1; /*!< TIM capture/compare register 1, Address offset: 0x34 */ + __IO uint32_t CCR2; /*!< TIM capture/compare register 2, Address offset: 0x38 */ + __IO uint32_t CCR3; /*!< TIM capture/compare register 3, Address offset: 0x3C */ + __IO uint32_t CCR4; /*!< TIM capture/compare register 4, Address offset: 0x40 */ + __IO uint32_t BDTR; /*!< TIM break and dead-time register, Address offset: 0x44 */ + __IO uint32_t DCR; /*!< TIM DMA control register, Address offset: 0x48 */ + __IO uint32_t DMAR; /*!< TIM DMA address for full transfer, Address offset: 0x4C */ + __IO uint32_t OR; /*!< TIM option register, Address offset: 0x50 */ +} TIM_TypeDef; + +/** + * @brief Universal Synchronous Asynchronous Receiver Transmitter + */ + +typedef struct +{ + __IO uint32_t SR; /*!< USART Status register, Address offset: 0x00 */ + __IO uint32_t DR; /*!< USART Data register, Address offset: 0x04 */ + __IO uint32_t BRR; /*!< USART Baud rate register, Address offset: 0x08 */ + __IO uint32_t CR1; /*!< USART Control register 1, Address offset: 0x0C */ + __IO uint32_t CR2; /*!< USART Control register 2, Address offset: 0x10 */ + __IO uint32_t CR3; /*!< USART Control register 3, Address offset: 0x14 */ + __IO uint32_t GTPR; /*!< USART Guard time and prescaler register, Address offset: 0x18 */ +} USART_TypeDef; + +/** + * @brief Window WATCHDOG + */ + +typedef struct +{ + __IO uint32_t CR; /*!< WWDG Control register, Address offset: 0x00 */ + __IO uint32_t CFR; /*!< WWDG Configuration register, Address offset: 0x04 */ + __IO uint32_t SR; /*!< WWDG Status register, Address offset: 0x08 */ +} WWDG_TypeDef; + +/** + * @brief RNG + */ + +typedef struct +{ + __IO uint32_t CR; /*!< RNG control register, Address offset: 0x00 */ + __IO uint32_t SR; /*!< RNG status register, Address offset: 0x04 */ + __IO uint32_t DR; /*!< RNG data register, Address offset: 0x08 */ +} RNG_TypeDef; + +/** + * @brief USB_OTG_Core_Registers + */ +typedef struct +{ + __IO uint32_t GOTGCTL; /*!< USB_OTG Control and Status Register 000h */ + __IO uint32_t GOTGINT; /*!< USB_OTG Interrupt Register 004h */ + __IO uint32_t GAHBCFG; /*!< Core AHB Configuration Register 008h */ + __IO uint32_t GUSBCFG; /*!< Core USB Configuration Register 00Ch */ + __IO uint32_t GRSTCTL; /*!< Core Reset Register 010h */ + __IO uint32_t GINTSTS; /*!< Core Interrupt Register 014h */ + __IO uint32_t GINTMSK; /*!< Core Interrupt Mask Register 018h */ + __IO uint32_t GRXSTSR; /*!< Receive Sts Q Read Register 01Ch */ + __IO uint32_t GRXSTSP; /*!< Receive Sts Q Read & POP Register 020h */ + __IO uint32_t GRXFSIZ; /*!< Receive FIFO Size Register 024h */ + __IO uint32_t DIEPTXF0_HNPTXFSIZ; /*!< EP0 / Non Periodic Tx FIFO Size Register 028h */ + __IO uint32_t HNPTXSTS; /*!< Non Periodic Tx FIFO/Queue Sts reg 02Ch */ + uint32_t Reserved30[2]; /*!< Reserved 030h */ + __IO uint32_t GCCFG; /*!< General Purpose IO Register 038h */ + __IO uint32_t CID; /*!< User ID Register 03Ch */ + uint32_t Reserved40[48]; /*!< Reserved 0x40-0xFF */ + __IO uint32_t HPTXFSIZ; /*!< Host Periodic Tx FIFO Size Reg 100h */ + __IO uint32_t DIEPTXF[0x0F]; /*!< dev Periodic Transmit FIFO */ +} USB_OTG_GlobalTypeDef; + +/** + * @brief USB_OTG_device_Registers + */ +typedef struct +{ + __IO uint32_t DCFG; /*!< dev Configuration Register 800h */ + __IO uint32_t DCTL; /*!< dev Control Register 804h */ + __IO uint32_t DSTS; /*!< dev Status Register (RO) 808h */ + uint32_t Reserved0C; /*!< Reserved 80Ch */ + __IO uint32_t DIEPMSK; /*!< dev IN Endpoint Mask 810h */ + __IO uint32_t DOEPMSK; /*!< dev OUT Endpoint Mask 814h */ + __IO uint32_t DAINT; /*!< dev All Endpoints Itr Reg 818h */ + __IO uint32_t DAINTMSK; /*!< dev All Endpoints Itr Mask 81Ch */ + uint32_t Reserved20; /*!< Reserved 820h */ + uint32_t Reserved9; /*!< Reserved 824h */ + __IO uint32_t DVBUSDIS; /*!< dev VBUS discharge Register 828h */ + __IO uint32_t DVBUSPULSE; /*!< dev VBUS Pulse Register 82Ch */ + __IO uint32_t DTHRCTL; /*!< dev threshold 830h */ + __IO uint32_t DIEPEMPMSK; /*!< dev empty msk 834h */ + __IO uint32_t DEACHINT; /*!< dedicated EP interrupt 838h */ + __IO uint32_t DEACHMSK; /*!< dedicated EP msk 83Ch */ + uint32_t Reserved40; /*!< dedicated EP mask 840h */ + __IO uint32_t DINEP1MSK; /*!< dedicated EP mask 844h */ + uint32_t Reserved44[15]; /*!< Reserved 844-87Ch */ + __IO uint32_t DOUTEP1MSK; /*!< dedicated EP msk 884h */ +} USB_OTG_DeviceTypeDef; + +/** + * @brief USB_OTG_IN_Endpoint-Specific_Register + */ +typedef struct +{ + __IO uint32_t DIEPCTL; /*!< dev IN Endpoint Control Reg 900h + (ep_num * 20h) + 00h */ + uint32_t Reserved04; /*!< Reserved 900h + (ep_num * 20h) + 04h */ + __IO uint32_t DIEPINT; /*!< dev IN Endpoint Itr Reg 900h + (ep_num * 20h) + 08h */ + uint32_t Reserved0C; /*!< Reserved 900h + (ep_num * 20h) + 0Ch */ + __IO uint32_t DIEPTSIZ; /*!< IN Endpoint Txfer Size 900h + (ep_num * 20h) + 10h */ + __IO uint32_t DIEPDMA; /*!< IN Endpoint DMA Address Reg 900h + (ep_num * 20h) + 14h */ + __IO uint32_t DTXFSTS; /*!< IN Endpoint Tx FIFO Status Reg 900h + (ep_num * 20h) + 18h */ + uint32_t Reserved18; /*!< Reserved 900h+(ep_num*20h)+1Ch-900h+ (ep_num * 20h) + 1Ch */ +} USB_OTG_INEndpointTypeDef; + +/** + * @brief USB_OTG_OUT_Endpoint-Specific_Registers + */ +typedef struct +{ + __IO uint32_t DOEPCTL; /*!< dev OUT Endpoint Control Reg B00h + (ep_num * 20h) + 00h */ + uint32_t Reserved04; /*!< Reserved B00h + (ep_num * 20h) + 04h */ + __IO uint32_t DOEPINT; /*!< dev OUT Endpoint Itr Reg B00h + (ep_num * 20h) + 08h */ + uint32_t Reserved0C; /*!< Reserved B00h + (ep_num * 20h) + 0Ch */ + __IO uint32_t DOEPTSIZ; /*!< dev OUT Endpoint Txfer Size B00h + (ep_num * 20h) + 10h */ + __IO uint32_t DOEPDMA; /*!< dev OUT Endpoint DMA Address B00h + (ep_num * 20h) + 14h */ + uint32_t Reserved18[2]; /*!< Reserved B00h + (ep_num * 20h) + 18h - B00h + (ep_num * 20h) + 1Ch */ +} USB_OTG_OUTEndpointTypeDef; + +/** + * @brief USB_OTG_Host_Mode_Register_Structures + */ +typedef struct +{ + __IO uint32_t HCFG; /*!< Host Configuration Register 400h */ + __IO uint32_t HFIR; /*!< Host Frame Interval Register 404h */ + __IO uint32_t HFNUM; /*!< Host Frame Nbr/Frame Remaining 408h */ + uint32_t Reserved40C; /*!< Reserved 40Ch */ + __IO uint32_t HPTXSTS; /*!< Host Periodic Tx FIFO/ Queue Status 410h */ + __IO uint32_t HAINT; /*!< Host All Channels Interrupt Register 414h */ + __IO uint32_t HAINTMSK; /*!< Host All Channels Interrupt Mask 418h */ +} USB_OTG_HostTypeDef; + +/** + * @brief USB_OTG_Host_Channel_Specific_Registers + */ +typedef struct +{ + __IO uint32_t HCCHAR; /*!< Host Channel Characteristics Register 500h */ + __IO uint32_t HCSPLT; /*!< Host Channel Split Control Register 504h */ + __IO uint32_t HCINT; /*!< Host Channel Interrupt Register 508h */ + __IO uint32_t HCINTMSK; /*!< Host Channel Interrupt Mask Register 50Ch */ + __IO uint32_t HCTSIZ; /*!< Host Channel Transfer Size Register 510h */ + __IO uint32_t HCDMA; /*!< Host Channel DMA Address Register 514h */ + uint32_t Reserved[2]; /*!< Reserved */ +} USB_OTG_HostChannelTypeDef; + +/** + * @} + */ + +/** @addtogroup Peripheral_memory_map + * @{ + */ +#define FLASH_BASE 0x08000000UL /*!< FLASH(up to 1 MB) base address in the alias region */ +#define CCMDATARAM_BASE 0x10000000UL /*!< CCM(core coupled memory) data RAM(64 KB) base address in the alias region */ +#define SRAM1_BASE 0x20000000UL /*!< SRAM1(112 KB) base address in the alias region */ +#define SRAM2_BASE 0x2001C000UL /*!< SRAM2(16 KB) base address in the alias region */ +#define PERIPH_BASE 0x40000000UL /*!< Peripheral base address in the alias region */ +#define BKPSRAM_BASE 0x40024000UL /*!< Backup SRAM(4 KB) base address in the alias region */ +#define FSMC_R_BASE 0xA0000000UL /*!< FSMC registers base address */ +#define SRAM1_BB_BASE 0x22000000UL /*!< SRAM1(112 KB) base address in the bit-band region */ +#define SRAM2_BB_BASE 0x22380000UL /*!< SRAM2(16 KB) base address in the bit-band region */ +#define PERIPH_BB_BASE 0x42000000UL /*!< Peripheral base address in the bit-band region */ +#define BKPSRAM_BB_BASE 0x42480000UL /*!< Backup SRAM(4 KB) base address in the bit-band region */ +#define FLASH_END 0x080FFFFFUL /*!< FLASH end address */ +#define FLASH_OTP_BASE 0x1FFF7800UL /*!< Base address of : (up to 528 Bytes) embedded FLASH OTP Area */ +#define FLASH_OTP_END 0x1FFF7A0FUL /*!< End address of : (up to 528 Bytes) embedded FLASH OTP Area */ +#define CCMDATARAM_END 0x1000FFFFUL /*!< CCM data RAM end address */ + +/* Legacy defines */ +#define SRAM_BASE SRAM1_BASE +#define SRAM_BB_BASE SRAM1_BB_BASE + +/*!< Peripheral memory map */ +#define APB1PERIPH_BASE PERIPH_BASE +#define APB2PERIPH_BASE (PERIPH_BASE + 0x00010000UL) +#define AHB1PERIPH_BASE (PERIPH_BASE + 0x00020000UL) +#define AHB2PERIPH_BASE (PERIPH_BASE + 0x10000000UL) + +/*!< APB1 peripherals */ +#define TIM2_BASE (APB1PERIPH_BASE + 0x0000UL) +#define TIM3_BASE (APB1PERIPH_BASE + 0x0400UL) +#define TIM4_BASE (APB1PERIPH_BASE + 0x0800UL) +#define TIM5_BASE (APB1PERIPH_BASE + 0x0C00UL) +#define TIM6_BASE (APB1PERIPH_BASE + 0x1000UL) +#define TIM7_BASE (APB1PERIPH_BASE + 0x1400UL) +#define TIM12_BASE (APB1PERIPH_BASE + 0x1800UL) +#define TIM13_BASE (APB1PERIPH_BASE + 0x1C00UL) +#define TIM14_BASE (APB1PERIPH_BASE + 0x2000UL) +#define RTC_BASE (APB1PERIPH_BASE + 0x2800UL) +#define WWDG_BASE (APB1PERIPH_BASE + 0x2C00UL) +#define IWDG_BASE (APB1PERIPH_BASE + 0x3000UL) +#define I2S2ext_BASE (APB1PERIPH_BASE + 0x3400UL) +#define SPI2_BASE (APB1PERIPH_BASE + 0x3800UL) +#define SPI3_BASE (APB1PERIPH_BASE + 0x3C00UL) +#define I2S3ext_BASE (APB1PERIPH_BASE + 0x4000UL) +#define USART2_BASE (APB1PERIPH_BASE + 0x4400UL) +#define USART3_BASE (APB1PERIPH_BASE + 0x4800UL) +#define UART4_BASE (APB1PERIPH_BASE + 0x4C00UL) +#define UART5_BASE (APB1PERIPH_BASE + 0x5000UL) +#define I2C1_BASE (APB1PERIPH_BASE + 0x5400UL) +#define I2C2_BASE (APB1PERIPH_BASE + 0x5800UL) +#define I2C3_BASE (APB1PERIPH_BASE + 0x5C00UL) +#define CAN1_BASE (APB1PERIPH_BASE + 0x6400UL) +#define CAN2_BASE (APB1PERIPH_BASE + 0x6800UL) +#define PWR_BASE (APB1PERIPH_BASE + 0x7000UL) +#define DAC_BASE (APB1PERIPH_BASE + 0x7400UL) + +/*!< APB2 peripherals */ +#define TIM1_BASE (APB2PERIPH_BASE + 0x0000UL) +#define TIM8_BASE (APB2PERIPH_BASE + 0x0400UL) +#define USART1_BASE (APB2PERIPH_BASE + 0x1000UL) +#define USART6_BASE (APB2PERIPH_BASE + 0x1400UL) +#define ADC1_BASE (APB2PERIPH_BASE + 0x2000UL) +#define ADC2_BASE (APB2PERIPH_BASE + 0x2100UL) +#define ADC3_BASE (APB2PERIPH_BASE + 0x2200UL) +#define ADC123_COMMON_BASE (APB2PERIPH_BASE + 0x2300UL) +/* Legacy define */ +#define ADC_BASE ADC123_COMMON_BASE +#define SDIO_BASE (APB2PERIPH_BASE + 0x2C00UL) +#define SPI1_BASE (APB2PERIPH_BASE + 0x3000UL) +#define SYSCFG_BASE (APB2PERIPH_BASE + 0x3800UL) +#define EXTI_BASE (APB2PERIPH_BASE + 0x3C00UL) +#define TIM9_BASE (APB2PERIPH_BASE + 0x4000UL) +#define TIM10_BASE (APB2PERIPH_BASE + 0x4400UL) +#define TIM11_BASE (APB2PERIPH_BASE + 0x4800UL) + +/*!< AHB1 peripherals */ +#define GPIOA_BASE (AHB1PERIPH_BASE + 0x0000UL) +#define GPIOB_BASE (AHB1PERIPH_BASE + 0x0400UL) +#define GPIOC_BASE (AHB1PERIPH_BASE + 0x0800UL) +#define GPIOD_BASE (AHB1PERIPH_BASE + 0x0C00UL) +#define GPIOE_BASE (AHB1PERIPH_BASE + 0x1000UL) +#define GPIOF_BASE (AHB1PERIPH_BASE + 0x1400UL) +#define GPIOG_BASE (AHB1PERIPH_BASE + 0x1800UL) +#define GPIOH_BASE (AHB1PERIPH_BASE + 0x1C00UL) +#define GPIOI_BASE (AHB1PERIPH_BASE + 0x2000UL) +#define CRC_BASE (AHB1PERIPH_BASE + 0x3000UL) +#define RCC_BASE (AHB1PERIPH_BASE + 0x3800UL) +#define FLASH_R_BASE (AHB1PERIPH_BASE + 0x3C00UL) +#define DMA1_BASE (AHB1PERIPH_BASE + 0x6000UL) +#define DMA1_Stream0_BASE (DMA1_BASE + 0x010UL) +#define DMA1_Stream1_BASE (DMA1_BASE + 0x028UL) +#define DMA1_Stream2_BASE (DMA1_BASE + 0x040UL) +#define DMA1_Stream3_BASE (DMA1_BASE + 0x058UL) +#define DMA1_Stream4_BASE (DMA1_BASE + 0x070UL) +#define DMA1_Stream5_BASE (DMA1_BASE + 0x088UL) +#define DMA1_Stream6_BASE (DMA1_BASE + 0x0A0UL) +#define DMA1_Stream7_BASE (DMA1_BASE + 0x0B8UL) +#define DMA2_BASE (AHB1PERIPH_BASE + 0x6400UL) +#define DMA2_Stream0_BASE (DMA2_BASE + 0x010UL) +#define DMA2_Stream1_BASE (DMA2_BASE + 0x028UL) +#define DMA2_Stream2_BASE (DMA2_BASE + 0x040UL) +#define DMA2_Stream3_BASE (DMA2_BASE + 0x058UL) +#define DMA2_Stream4_BASE (DMA2_BASE + 0x070UL) +#define DMA2_Stream5_BASE (DMA2_BASE + 0x088UL) +#define DMA2_Stream6_BASE (DMA2_BASE + 0x0A0UL) +#define DMA2_Stream7_BASE (DMA2_BASE + 0x0B8UL) +#define ETH_BASE (AHB1PERIPH_BASE + 0x8000UL) +#define ETH_MAC_BASE (ETH_BASE) +#define ETH_MMC_BASE (ETH_BASE + 0x0100UL) +#define ETH_PTP_BASE (ETH_BASE + 0x0700UL) +#define ETH_DMA_BASE (ETH_BASE + 0x1000UL) + +/*!< AHB2 peripherals */ +#define DCMI_BASE (AHB2PERIPH_BASE + 0x50000UL) +#define RNG_BASE (AHB2PERIPH_BASE + 0x60800UL) + +/*!< FSMC Bankx registers base address */ +#define FSMC_Bank1_R_BASE (FSMC_R_BASE + 0x0000UL) +#define FSMC_Bank1E_R_BASE (FSMC_R_BASE + 0x0104UL) +#define FSMC_Bank2_3_R_BASE (FSMC_R_BASE + 0x0060UL) +#define FSMC_Bank4_R_BASE (FSMC_R_BASE + 0x00A0UL) + + +/*!< Debug MCU registers base address */ +#define DBGMCU_BASE 0xE0042000UL +/*!< USB registers base address */ +#define USB_OTG_HS_PERIPH_BASE 0x40040000UL +#define USB_OTG_FS_PERIPH_BASE 0x50000000UL + +#define USB_OTG_GLOBAL_BASE 0x000UL +#define USB_OTG_DEVICE_BASE 0x800UL +#define USB_OTG_IN_ENDPOINT_BASE 0x900UL +#define USB_OTG_OUT_ENDPOINT_BASE 0xB00UL +#define USB_OTG_EP_REG_SIZE 0x20UL +#define USB_OTG_HOST_BASE 0x400UL +#define USB_OTG_HOST_PORT_BASE 0x440UL +#define USB_OTG_HOST_CHANNEL_BASE 0x500UL +#define USB_OTG_HOST_CHANNEL_SIZE 0x20UL +#define USB_OTG_PCGCCTL_BASE 0xE00UL +#define USB_OTG_FIFO_BASE 0x1000UL +#define USB_OTG_FIFO_SIZE 0x1000UL + +#define UID_BASE 0x1FFF7A10UL /*!< Unique device ID register base address */ +#define FLASHSIZE_BASE 0x1FFF7A22UL /*!< FLASH Size register base address */ +#define PACKAGE_BASE 0x1FFF7BF0UL /*!< Package size register base address */ +/** + * @} + */ + +/** @addtogroup Peripheral_declaration + * @{ + */ +#define TIM2 ((TIM_TypeDef *) TIM2_BASE) +#define TIM3 ((TIM_TypeDef *) TIM3_BASE) +#define TIM4 ((TIM_TypeDef *) TIM4_BASE) +#define TIM5 ((TIM_TypeDef *) TIM5_BASE) +#define TIM6 ((TIM_TypeDef *) TIM6_BASE) +#define TIM7 ((TIM_TypeDef *) TIM7_BASE) +#define TIM12 ((TIM_TypeDef *) TIM12_BASE) +#define TIM13 ((TIM_TypeDef *) TIM13_BASE) +#define TIM14 ((TIM_TypeDef *) TIM14_BASE) +#define RTC ((RTC_TypeDef *) RTC_BASE) +#define WWDG ((WWDG_TypeDef *) WWDG_BASE) +#define IWDG ((IWDG_TypeDef *) IWDG_BASE) +#define I2S2ext ((SPI_TypeDef *) I2S2ext_BASE) +#define SPI2 ((SPI_TypeDef *) SPI2_BASE) +#define SPI3 ((SPI_TypeDef *) SPI3_BASE) +#define I2S3ext ((SPI_TypeDef *) I2S3ext_BASE) +#define USART2 ((USART_TypeDef *) USART2_BASE) +#define USART3 ((USART_TypeDef *) USART3_BASE) +#define UART4 ((USART_TypeDef *) UART4_BASE) +#define UART5 ((USART_TypeDef *) UART5_BASE) +#define I2C1 ((I2C_TypeDef *) I2C1_BASE) +#define I2C2 ((I2C_TypeDef *) I2C2_BASE) +#define I2C3 ((I2C_TypeDef *) I2C3_BASE) +#define CAN1 ((CAN_TypeDef *) CAN1_BASE) +#define CAN2 ((CAN_TypeDef *) CAN2_BASE) +#define PWR ((PWR_TypeDef *) PWR_BASE) +#define DAC1 ((DAC_TypeDef *) DAC_BASE) +#define DAC ((DAC_TypeDef *) DAC_BASE) /* Kept for legacy purpose */ +#define TIM1 ((TIM_TypeDef *) TIM1_BASE) +#define TIM8 ((TIM_TypeDef *) TIM8_BASE) +#define USART1 ((USART_TypeDef *) USART1_BASE) +#define USART6 ((USART_TypeDef *) USART6_BASE) +#define ADC1 ((ADC_TypeDef *) ADC1_BASE) +#define ADC2 ((ADC_TypeDef *) ADC2_BASE) +#define ADC3 ((ADC_TypeDef *) ADC3_BASE) +#define ADC123_COMMON ((ADC_Common_TypeDef *) ADC123_COMMON_BASE) +/* Legacy define */ +#define ADC ADC123_COMMON +#define SDIO ((SDIO_TypeDef *) SDIO_BASE) +#define SPI1 ((SPI_TypeDef *) SPI1_BASE) +#define SYSCFG ((SYSCFG_TypeDef *) SYSCFG_BASE) +#define EXTI ((EXTI_TypeDef *) EXTI_BASE) +#define TIM9 ((TIM_TypeDef *) TIM9_BASE) +#define TIM10 ((TIM_TypeDef *) TIM10_BASE) +#define TIM11 ((TIM_TypeDef *) TIM11_BASE) +#define GPIOA ((GPIO_TypeDef *) GPIOA_BASE) +#define GPIOB ((GPIO_TypeDef *) GPIOB_BASE) +#define GPIOC ((GPIO_TypeDef *) GPIOC_BASE) +#define GPIOD ((GPIO_TypeDef *) GPIOD_BASE) +#define GPIOE ((GPIO_TypeDef *) GPIOE_BASE) +#define GPIOF ((GPIO_TypeDef *) GPIOF_BASE) +#define GPIOG ((GPIO_TypeDef *) GPIOG_BASE) +#define GPIOH ((GPIO_TypeDef *) GPIOH_BASE) +#define GPIOI ((GPIO_TypeDef *) GPIOI_BASE) +#define CRC ((CRC_TypeDef *) CRC_BASE) +#define RCC ((RCC_TypeDef *) RCC_BASE) +#define FLASH ((FLASH_TypeDef *) FLASH_R_BASE) +#define DMA1 ((DMA_TypeDef *) DMA1_BASE) +#define DMA1_Stream0 ((DMA_Stream_TypeDef *) DMA1_Stream0_BASE) +#define DMA1_Stream1 ((DMA_Stream_TypeDef *) DMA1_Stream1_BASE) +#define DMA1_Stream2 ((DMA_Stream_TypeDef *) DMA1_Stream2_BASE) +#define DMA1_Stream3 ((DMA_Stream_TypeDef *) DMA1_Stream3_BASE) +#define DMA1_Stream4 ((DMA_Stream_TypeDef *) DMA1_Stream4_BASE) +#define DMA1_Stream5 ((DMA_Stream_TypeDef *) DMA1_Stream5_BASE) +#define DMA1_Stream6 ((DMA_Stream_TypeDef *) DMA1_Stream6_BASE) +#define DMA1_Stream7 ((DMA_Stream_TypeDef *) DMA1_Stream7_BASE) +#define DMA2 ((DMA_TypeDef *) DMA2_BASE) +#define DMA2_Stream0 ((DMA_Stream_TypeDef *) DMA2_Stream0_BASE) +#define DMA2_Stream1 ((DMA_Stream_TypeDef *) DMA2_Stream1_BASE) +#define DMA2_Stream2 ((DMA_Stream_TypeDef *) DMA2_Stream2_BASE) +#define DMA2_Stream3 ((DMA_Stream_TypeDef *) DMA2_Stream3_BASE) +#define DMA2_Stream4 ((DMA_Stream_TypeDef *) DMA2_Stream4_BASE) +#define DMA2_Stream5 ((DMA_Stream_TypeDef *) DMA2_Stream5_BASE) +#define DMA2_Stream6 ((DMA_Stream_TypeDef *) DMA2_Stream6_BASE) +#define DMA2_Stream7 ((DMA_Stream_TypeDef *) DMA2_Stream7_BASE) +#define ETH ((ETH_TypeDef *) ETH_BASE) +#define DCMI ((DCMI_TypeDef *) DCMI_BASE) +#define RNG ((RNG_TypeDef *) RNG_BASE) +#define FSMC_Bank1 ((FSMC_Bank1_TypeDef *) FSMC_Bank1_R_BASE) +#define FSMC_Bank1E ((FSMC_Bank1E_TypeDef *) FSMC_Bank1E_R_BASE) +#define FSMC_Bank2_3 ((FSMC_Bank2_3_TypeDef *) FSMC_Bank2_3_R_BASE) +#define FSMC_Bank4 ((FSMC_Bank4_TypeDef *) FSMC_Bank4_R_BASE) +#define DBGMCU ((DBGMCU_TypeDef *) DBGMCU_BASE) +#define USB_OTG_FS ((USB_OTG_GlobalTypeDef *) USB_OTG_FS_PERIPH_BASE) +#define USB_OTG_HS ((USB_OTG_GlobalTypeDef *) USB_OTG_HS_PERIPH_BASE) + +/** + * @} + */ + +/** @addtogroup Exported_constants + * @{ + */ + +/** @addtogroup Hardware_Constant_Definition + * @{ + */ +#define LSI_STARTUP_TIME 40U /*!< LSI Maximum startup time in us */ +/** + * @} + */ + +/** @addtogroup Peripheral_Registers_Bits_Definition +* @{ +*/ + +/******************************************************************************/ +/* Peripheral Registers_Bits_Definition */ +/******************************************************************************/ + +/******************************************************************************/ +/* */ +/* Analog to Digital Converter */ +/* */ +/******************************************************************************/ +/* + * @brief Specific device feature definitions (not present on all devices in the STM32F4 series) + */ +#define ADC_MULTIMODE_SUPPORT /*!> SCB_CCSIDR_ASSOCIATIVITY_Pos) +#define CCSIDR_SETS(x) (((x) & SCB_CCSIDR_NUMSETS_Msk ) >> SCB_CCSIDR_NUMSETS_Pos ) + +#ifndef __SCB_DCACHE_LINE_SIZE +#define __SCB_DCACHE_LINE_SIZE 32U /*!< Cortex-M7 cache line size is fixed to 32 bytes (8 words). See also register SCB_CCSIDR */ +#endif + +#ifndef __SCB_ICACHE_LINE_SIZE +#define __SCB_ICACHE_LINE_SIZE 32U /*!< Cortex-M7 cache line size is fixed to 32 bytes (8 words). See also register SCB_CCSIDR */ +#endif + +/** + \brief Enable I-Cache + \details Turns on I-Cache + */ +__STATIC_FORCEINLINE void SCB_EnableICache (void) +{ + #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U) + if (SCB->CCR & SCB_CCR_IC_Msk) return; /* return if ICache is already enabled */ + + __DSB(); + __ISB(); + SCB->ICIALLU = 0UL; /* invalidate I-Cache */ + __DSB(); + __ISB(); + SCB->CCR |= (uint32_t)SCB_CCR_IC_Msk; /* enable I-Cache */ + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Disable I-Cache + \details Turns off I-Cache + */ +__STATIC_FORCEINLINE void SCB_DisableICache (void) +{ + #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U) + __DSB(); + __ISB(); + SCB->CCR &= ~(uint32_t)SCB_CCR_IC_Msk; /* disable I-Cache */ + SCB->ICIALLU = 0UL; /* invalidate I-Cache */ + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Invalidate I-Cache + \details Invalidates I-Cache + */ +__STATIC_FORCEINLINE void SCB_InvalidateICache (void) +{ + #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U) + __DSB(); + __ISB(); + SCB->ICIALLU = 0UL; + __DSB(); + __ISB(); + #endif +} + + +/** + \brief I-Cache Invalidate by address + \details Invalidates I-Cache for the given address. + I-Cache is invalidated starting from a 32 byte aligned address in 32 byte granularity. + I-Cache memory blocks which are part of given address + given size are invalidated. + \param[in] addr address + \param[in] isize size of memory block (in number of bytes) +*/ +__STATIC_FORCEINLINE void SCB_InvalidateICache_by_Addr (volatile void *addr, int32_t isize) +{ + #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U) + if ( isize > 0 ) { + int32_t op_size = isize + (((uint32_t)addr) & (__SCB_ICACHE_LINE_SIZE - 1U)); + uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_ICACHE_LINE_SIZE - 1U) */; + + __DSB(); + + do { + SCB->ICIMVAU = op_addr; /* register accepts only 32byte aligned values, only bits 31..5 are valid */ + op_addr += __SCB_ICACHE_LINE_SIZE; + op_size -= __SCB_ICACHE_LINE_SIZE; + } while ( op_size > 0 ); + + __DSB(); + __ISB(); + } + #endif +} + + +/** + \brief Enable D-Cache + \details Turns on D-Cache + */ +__STATIC_FORCEINLINE void SCB_EnableDCache (void) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + + if (SCB->CCR & SCB_CCR_DC_Msk) return; /* return if DCache is already enabled */ + + SCB->CSSELR = 0U; /* select Level 1 data cache */ + __DSB(); + + ccsidr = SCB->CCSIDR; + + /* invalidate D-Cache */ + sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + do { + ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + do { + SCB->DCISW = (((sets << SCB_DCISW_SET_Pos) & SCB_DCISW_SET_Msk) | + ((ways << SCB_DCISW_WAY_Pos) & SCB_DCISW_WAY_Msk) ); + #if defined ( __CC_ARM ) + __schedule_barrier(); + #endif + } while (ways-- != 0U); + } while(sets-- != 0U); + __DSB(); + + SCB->CCR |= (uint32_t)SCB_CCR_DC_Msk; /* enable D-Cache */ + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Disable D-Cache + \details Turns off D-Cache + */ +__STATIC_FORCEINLINE void SCB_DisableDCache (void) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + + SCB->CSSELR = 0U; /* select Level 1 data cache */ + __DSB(); + + SCB->CCR &= ~(uint32_t)SCB_CCR_DC_Msk; /* disable D-Cache */ + __DSB(); + + ccsidr = SCB->CCSIDR; + + /* clean & invalidate D-Cache */ + sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + do { + ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + do { + SCB->DCCISW = (((sets << SCB_DCCISW_SET_Pos) & SCB_DCCISW_SET_Msk) | + ((ways << SCB_DCCISW_WAY_Pos) & SCB_DCCISW_WAY_Msk) ); + #if defined ( __CC_ARM ) + __schedule_barrier(); + #endif + } while (ways-- != 0U); + } while(sets-- != 0U); + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Invalidate D-Cache + \details Invalidates D-Cache + */ +__STATIC_FORCEINLINE void SCB_InvalidateDCache (void) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + + SCB->CSSELR = 0U; /* select Level 1 data cache */ + __DSB(); + + ccsidr = SCB->CCSIDR; + + /* invalidate D-Cache */ + sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + do { + ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + do { + SCB->DCISW = (((sets << SCB_DCISW_SET_Pos) & SCB_DCISW_SET_Msk) | + ((ways << SCB_DCISW_WAY_Pos) & SCB_DCISW_WAY_Msk) ); + #if defined ( __CC_ARM ) + __schedule_barrier(); + #endif + } while (ways-- != 0U); + } while(sets-- != 0U); + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Clean D-Cache + \details Cleans D-Cache + */ +__STATIC_FORCEINLINE void SCB_CleanDCache (void) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + + SCB->CSSELR = 0U; /* select Level 1 data cache */ + __DSB(); + + ccsidr = SCB->CCSIDR; + + /* clean D-Cache */ + sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + do { + ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + do { + SCB->DCCSW = (((sets << SCB_DCCSW_SET_Pos) & SCB_DCCSW_SET_Msk) | + ((ways << SCB_DCCSW_WAY_Pos) & SCB_DCCSW_WAY_Msk) ); + #if defined ( __CC_ARM ) + __schedule_barrier(); + #endif + } while (ways-- != 0U); + } while(sets-- != 0U); + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Clean & Invalidate D-Cache + \details Cleans and Invalidates D-Cache + */ +__STATIC_FORCEINLINE void SCB_CleanInvalidateDCache (void) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + + SCB->CSSELR = 0U; /* select Level 1 data cache */ + __DSB(); + + ccsidr = SCB->CCSIDR; + + /* clean & invalidate D-Cache */ + sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + do { + ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + do { + SCB->DCCISW = (((sets << SCB_DCCISW_SET_Pos) & SCB_DCCISW_SET_Msk) | + ((ways << SCB_DCCISW_WAY_Pos) & SCB_DCCISW_WAY_Msk) ); + #if defined ( __CC_ARM ) + __schedule_barrier(); + #endif + } while (ways-- != 0U); + } while(sets-- != 0U); + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief D-Cache Invalidate by address + \details Invalidates D-Cache for the given address. + D-Cache is invalidated starting from a 32 byte aligned address in 32 byte granularity. + D-Cache memory blocks which are part of given address + given size are invalidated. + \param[in] addr address + \param[in] dsize size of memory block (in number of bytes) +*/ +__STATIC_FORCEINLINE void SCB_InvalidateDCache_by_Addr (volatile void *addr, int32_t dsize) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + if ( dsize > 0 ) { + int32_t op_size = dsize + (((uint32_t)addr) & (__SCB_DCACHE_LINE_SIZE - 1U)); + uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_DCACHE_LINE_SIZE - 1U) */; + + __DSB(); + + do { + SCB->DCIMVAC = op_addr; /* register accepts only 32byte aligned values, only bits 31..5 are valid */ + op_addr += __SCB_DCACHE_LINE_SIZE; + op_size -= __SCB_DCACHE_LINE_SIZE; + } while ( op_size > 0 ); + + __DSB(); + __ISB(); + } + #endif +} + + +/** + \brief D-Cache Clean by address + \details Cleans D-Cache for the given address + D-Cache is cleaned starting from a 32 byte aligned address in 32 byte granularity. + D-Cache memory blocks which are part of given address + given size are cleaned. + \param[in] addr address + \param[in] dsize size of memory block (in number of bytes) +*/ +__STATIC_FORCEINLINE void SCB_CleanDCache_by_Addr (volatile void *addr, int32_t dsize) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + if ( dsize > 0 ) { + int32_t op_size = dsize + (((uint32_t)addr) & (__SCB_DCACHE_LINE_SIZE - 1U)); + uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_DCACHE_LINE_SIZE - 1U) */; + + __DSB(); + + do { + SCB->DCCMVAC = op_addr; /* register accepts only 32byte aligned values, only bits 31..5 are valid */ + op_addr += __SCB_DCACHE_LINE_SIZE; + op_size -= __SCB_DCACHE_LINE_SIZE; + } while ( op_size > 0 ); + + __DSB(); + __ISB(); + } + #endif +} + + +/** + \brief D-Cache Clean and Invalidate by address + \details Cleans and invalidates D_Cache for the given address + D-Cache is cleaned and invalidated starting from a 32 byte aligned address in 32 byte granularity. + D-Cache memory blocks which are part of given address + given size are cleaned and invalidated. + \param[in] addr address (aligned to 32-byte boundary) + \param[in] dsize size of memory block (in number of bytes) +*/ +__STATIC_FORCEINLINE void SCB_CleanInvalidateDCache_by_Addr (volatile void *addr, int32_t dsize) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + if ( dsize > 0 ) { + int32_t op_size = dsize + (((uint32_t)addr) & (__SCB_DCACHE_LINE_SIZE - 1U)); + uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_DCACHE_LINE_SIZE - 1U) */; + + __DSB(); + + do { + SCB->DCCIMVAC = op_addr; /* register accepts only 32byte aligned values, only bits 31..5 are valid */ + op_addr += __SCB_DCACHE_LINE_SIZE; + op_size -= __SCB_DCACHE_LINE_SIZE; + } while ( op_size > 0 ); + + __DSB(); + __ISB(); + } + #endif +} + +/*@} end of CMSIS_Core_CacheFunctions */ + +#endif /* ARM_CACHEL1_ARMV7_H */ diff --git a/Drivers/CMSIS/Include/cmsis_armcc.h b/Drivers/CMSIS/Include/cmsis_armcc.h new file mode 100644 index 0000000..a955d47 --- /dev/null +++ b/Drivers/CMSIS/Include/cmsis_armcc.h @@ -0,0 +1,888 @@ +/**************************************************************************//** + * @file cmsis_armcc.h + * @brief CMSIS compiler ARMCC (Arm Compiler 5) header file + * @version V5.3.2 + * @date 27. May 2021 + ******************************************************************************/ +/* + * Copyright (c) 2009-2021 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CMSIS_ARMCC_H +#define __CMSIS_ARMCC_H + + +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 400677) + #error "Please use Arm Compiler Toolchain V4.0.677 or later!" +#endif + +/* CMSIS compiler control architecture macros */ +#if ((defined (__TARGET_ARCH_6_M ) && (__TARGET_ARCH_6_M == 1)) || \ + (defined (__TARGET_ARCH_6S_M ) && (__TARGET_ARCH_6S_M == 1)) ) + #define __ARM_ARCH_6M__ 1 +#endif + +#if (defined (__TARGET_ARCH_7_M ) && (__TARGET_ARCH_7_M == 1)) + #define __ARM_ARCH_7M__ 1 +#endif + +#if (defined (__TARGET_ARCH_7E_M) && (__TARGET_ARCH_7E_M == 1)) + #define __ARM_ARCH_7EM__ 1 +#endif + + /* __ARM_ARCH_8M_BASE__ not applicable */ + /* __ARM_ARCH_8M_MAIN__ not applicable */ + /* __ARM_ARCH_8_1M_MAIN__ not applicable */ + +/* CMSIS compiler control DSP macros */ +#if ((defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) + #define __ARM_FEATURE_DSP 1 +#endif + +/* CMSIS compiler specific defines */ +#ifndef __ASM + #define __ASM __asm +#endif +#ifndef __INLINE + #define __INLINE __inline +#endif +#ifndef __STATIC_INLINE + #define __STATIC_INLINE static __inline +#endif +#ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE static __forceinline +#endif +#ifndef __NO_RETURN + #define __NO_RETURN __declspec(noreturn) +#endif +#ifndef __USED + #define __USED __attribute__((used)) +#endif +#ifndef __WEAK + #define __WEAK __attribute__((weak)) +#endif +#ifndef __PACKED + #define __PACKED __attribute__((packed)) +#endif +#ifndef __PACKED_STRUCT + #define __PACKED_STRUCT __packed struct +#endif +#ifndef __PACKED_UNION + #define __PACKED_UNION __packed union +#endif +#ifndef __UNALIGNED_UINT32 /* deprecated */ + #define __UNALIGNED_UINT32(x) (*((__packed uint32_t *)(x))) +#endif +#ifndef __UNALIGNED_UINT16_WRITE + #define __UNALIGNED_UINT16_WRITE(addr, val) ((*((__packed uint16_t *)(addr))) = (val)) +#endif +#ifndef __UNALIGNED_UINT16_READ + #define __UNALIGNED_UINT16_READ(addr) (*((const __packed uint16_t *)(addr))) +#endif +#ifndef __UNALIGNED_UINT32_WRITE + #define __UNALIGNED_UINT32_WRITE(addr, val) ((*((__packed uint32_t *)(addr))) = (val)) +#endif +#ifndef __UNALIGNED_UINT32_READ + #define __UNALIGNED_UINT32_READ(addr) (*((const __packed uint32_t *)(addr))) +#endif +#ifndef __ALIGNED + #define __ALIGNED(x) __attribute__((aligned(x))) +#endif +#ifndef __RESTRICT + #define __RESTRICT __restrict +#endif +#ifndef __COMPILER_BARRIER + #define __COMPILER_BARRIER() __memory_changed() +#endif + +/* ######################### Startup and Lowlevel Init ######################## */ + +#ifndef __PROGRAM_START +#define __PROGRAM_START __main +#endif + +#ifndef __INITIAL_SP +#define __INITIAL_SP Image$$ARM_LIB_STACK$$ZI$$Limit +#endif + +#ifndef __STACK_LIMIT +#define __STACK_LIMIT Image$$ARM_LIB_STACK$$ZI$$Base +#endif + +#ifndef __VECTOR_TABLE +#define __VECTOR_TABLE __Vectors +#endif + +#ifndef __VECTOR_TABLE_ATTRIBUTE +#define __VECTOR_TABLE_ATTRIBUTE __attribute__((used, section("RESET"))) +#endif + +/* ########################## Core Instruction Access ######################### */ +/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface + Access to dedicated instructions + @{ +*/ + +/** + \brief No Operation + \details No Operation does nothing. This instruction can be used for code alignment purposes. + */ +#define __NOP __nop + + +/** + \brief Wait For Interrupt + \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. + */ +#define __WFI __wfi + + +/** + \brief Wait For Event + \details Wait For Event is a hint instruction that permits the processor to enter + a low-power state until one of a number of events occurs. + */ +#define __WFE __wfe + + +/** + \brief Send Event + \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. + */ +#define __SEV __sev + + +/** + \brief Instruction Synchronization Barrier + \details Instruction Synchronization Barrier flushes the pipeline in the processor, + so that all instructions following the ISB are fetched from cache or memory, + after the instruction has been completed. + */ +#define __ISB() __isb(0xF) + +/** + \brief Data Synchronization Barrier + \details Acts as a special kind of Data Memory Barrier. + It completes when all explicit memory accesses before this instruction complete. + */ +#define __DSB() __dsb(0xF) + +/** + \brief Data Memory Barrier + \details Ensures the apparent order of the explicit memory operations before + and after the instruction, without ensuring their completion. + */ +#define __DMB() __dmb(0xF) + + +/** + \brief Reverse byte order (32 bit) + \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. + \param [in] value Value to reverse + \return Reversed value + */ +#define __REV __rev + + +/** + \brief Reverse byte order (16 bit) + \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. + \param [in] value Value to reverse + \return Reversed value + */ +#ifndef __NO_EMBEDDED_ASM +__attribute__((section(".rev16_text"))) __STATIC_INLINE __ASM uint32_t __REV16(uint32_t value) +{ + rev16 r0, r0 + bx lr +} +#endif + + +/** + \brief Reverse byte order (16 bit) + \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. + \param [in] value Value to reverse + \return Reversed value + */ +#ifndef __NO_EMBEDDED_ASM +__attribute__((section(".revsh_text"))) __STATIC_INLINE __ASM int16_t __REVSH(int16_t value) +{ + revsh r0, r0 + bx lr +} +#endif + + +/** + \brief Rotate Right in unsigned value (32 bit) + \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. + \param [in] op1 Value to rotate + \param [in] op2 Number of Bits to rotate + \return Rotated value + */ +#define __ROR __ror + + +/** + \brief Breakpoint + \details Causes the processor to enter Debug state. + Debug tools can use this to investigate system state when the instruction at a particular address is reached. + \param [in] value is ignored by the processor. + If required, a debugger can use it to store additional information about the breakpoint. + */ +#define __BKPT(value) __breakpoint(value) + + +/** + \brief Reverse bit order of value + \details Reverses the bit order of the given value. + \param [in] value Value to reverse + \return Reversed value + */ +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) + #define __RBIT __rbit +#else +__attribute__((always_inline)) __STATIC_INLINE uint32_t __RBIT(uint32_t value) +{ + uint32_t result; + uint32_t s = (4U /*sizeof(v)*/ * 8U) - 1U; /* extra shift needed at end */ + + result = value; /* r will be reversed bits of v; first get LSB of v */ + for (value >>= 1U; value != 0U; value >>= 1U) + { + result <<= 1U; + result |= value & 1U; + s--; + } + result <<= s; /* shift when v's highest bits are zero */ + return result; +} +#endif + + +/** + \brief Count leading zeros + \details Counts the number of leading zeros of a data value. + \param [in] value Value to count the leading zeros + \return number of leading zeros in value + */ +#define __CLZ __clz + + +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) + +/** + \brief LDR Exclusive (8 bit) + \details Executes a exclusive LDR instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) + #define __LDREXB(ptr) ((uint8_t ) __ldrex(ptr)) +#else + #define __LDREXB(ptr) _Pragma("push") _Pragma("diag_suppress 3731") ((uint8_t ) __ldrex(ptr)) _Pragma("pop") +#endif + + +/** + \brief LDR Exclusive (16 bit) + \details Executes a exclusive LDR instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) + #define __LDREXH(ptr) ((uint16_t) __ldrex(ptr)) +#else + #define __LDREXH(ptr) _Pragma("push") _Pragma("diag_suppress 3731") ((uint16_t) __ldrex(ptr)) _Pragma("pop") +#endif + + +/** + \brief LDR Exclusive (32 bit) + \details Executes a exclusive LDR instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) + #define __LDREXW(ptr) ((uint32_t ) __ldrex(ptr)) +#else + #define __LDREXW(ptr) _Pragma("push") _Pragma("diag_suppress 3731") ((uint32_t ) __ldrex(ptr)) _Pragma("pop") +#endif + + +/** + \brief STR Exclusive (8 bit) + \details Executes a exclusive STR instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) + #define __STREXB(value, ptr) __strex(value, ptr) +#else + #define __STREXB(value, ptr) _Pragma("push") _Pragma("diag_suppress 3731") __strex(value, ptr) _Pragma("pop") +#endif + + +/** + \brief STR Exclusive (16 bit) + \details Executes a exclusive STR instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) + #define __STREXH(value, ptr) __strex(value, ptr) +#else + #define __STREXH(value, ptr) _Pragma("push") _Pragma("diag_suppress 3731") __strex(value, ptr) _Pragma("pop") +#endif + + +/** + \brief STR Exclusive (32 bit) + \details Executes a exclusive STR instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) + #define __STREXW(value, ptr) __strex(value, ptr) +#else + #define __STREXW(value, ptr) _Pragma("push") _Pragma("diag_suppress 3731") __strex(value, ptr) _Pragma("pop") +#endif + + +/** + \brief Remove the exclusive lock + \details Removes the exclusive lock which is created by LDREX. + */ +#define __CLREX __clrex + + +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +#define __SSAT __ssat + + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +#define __USAT __usat + + +/** + \brief Rotate Right with Extend (32 bit) + \details Moves each bit of a bitstring right by one bit. + The carry input is shifted in at the left end of the bitstring. + \param [in] value Value to rotate + \return Rotated value + */ +#ifndef __NO_EMBEDDED_ASM +__attribute__((section(".rrx_text"))) __STATIC_INLINE __ASM uint32_t __RRX(uint32_t value) +{ + rrx r0, r0 + bx lr +} +#endif + + +/** + \brief LDRT Unprivileged (8 bit) + \details Executes a Unprivileged LDRT instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +#define __LDRBT(ptr) ((uint8_t ) __ldrt(ptr)) + + +/** + \brief LDRT Unprivileged (16 bit) + \details Executes a Unprivileged LDRT instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +#define __LDRHT(ptr) ((uint16_t) __ldrt(ptr)) + + +/** + \brief LDRT Unprivileged (32 bit) + \details Executes a Unprivileged LDRT instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +#define __LDRT(ptr) ((uint32_t ) __ldrt(ptr)) + + +/** + \brief STRT Unprivileged (8 bit) + \details Executes a Unprivileged STRT instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +#define __STRBT(value, ptr) __strt(value, ptr) + + +/** + \brief STRT Unprivileged (16 bit) + \details Executes a Unprivileged STRT instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +#define __STRHT(value, ptr) __strt(value, ptr) + + +/** + \brief STRT Unprivileged (32 bit) + \details Executes a Unprivileged STRT instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +#define __STRT(value, ptr) __strt(value, ptr) + +#else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */ + +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +__attribute__((always_inline)) __STATIC_INLINE int32_t __SSAT(int32_t val, uint32_t sat) +{ + if ((sat >= 1U) && (sat <= 32U)) + { + const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); + const int32_t min = -1 - max ; + if (val > max) + { + return max; + } + else if (val < min) + { + return min; + } + } + return val; +} + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +__attribute__((always_inline)) __STATIC_INLINE uint32_t __USAT(int32_t val, uint32_t sat) +{ + if (sat <= 31U) + { + const uint32_t max = ((1U << sat) - 1U); + if (val > (int32_t)max) + { + return max; + } + else if (val < 0) + { + return 0U; + } + } + return (uint32_t)val; +} + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */ + +/*@}*/ /* end of group CMSIS_Core_InstructionInterface */ + + +/* ########################### Core Function Access ########################### */ +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions + @{ + */ + +/** + \brief Enable IRQ Interrupts + \details Enables IRQ interrupts by clearing special-purpose register PRIMASK. + Can only be executed in Privileged modes. + */ +/* intrinsic void __enable_irq(); */ + + +/** + \brief Disable IRQ Interrupts + \details Disables IRQ interrupts by setting special-purpose register PRIMASK. + Can only be executed in Privileged modes. + */ +/* intrinsic void __disable_irq(); */ + +/** + \brief Get Control Register + \details Returns the content of the Control Register. + \return Control Register value + */ +__STATIC_INLINE uint32_t __get_CONTROL(void) +{ + register uint32_t __regControl __ASM("control"); + return(__regControl); +} + + +/** + \brief Set Control Register + \details Writes the given value to the Control Register. + \param [in] control Control Register value to set + */ +__STATIC_INLINE void __set_CONTROL(uint32_t control) +{ + register uint32_t __regControl __ASM("control"); + __regControl = control; + __ISB(); +} + + +/** + \brief Get IPSR Register + \details Returns the content of the IPSR Register. + \return IPSR Register value + */ +__STATIC_INLINE uint32_t __get_IPSR(void) +{ + register uint32_t __regIPSR __ASM("ipsr"); + return(__regIPSR); +} + + +/** + \brief Get APSR Register + \details Returns the content of the APSR Register. + \return APSR Register value + */ +__STATIC_INLINE uint32_t __get_APSR(void) +{ + register uint32_t __regAPSR __ASM("apsr"); + return(__regAPSR); +} + + +/** + \brief Get xPSR Register + \details Returns the content of the xPSR Register. + \return xPSR Register value + */ +__STATIC_INLINE uint32_t __get_xPSR(void) +{ + register uint32_t __regXPSR __ASM("xpsr"); + return(__regXPSR); +} + + +/** + \brief Get Process Stack Pointer + \details Returns the current value of the Process Stack Pointer (PSP). + \return PSP Register value + */ +__STATIC_INLINE uint32_t __get_PSP(void) +{ + register uint32_t __regProcessStackPointer __ASM("psp"); + return(__regProcessStackPointer); +} + + +/** + \brief Set Process Stack Pointer + \details Assigns the given value to the Process Stack Pointer (PSP). + \param [in] topOfProcStack Process Stack Pointer value to set + */ +__STATIC_INLINE void __set_PSP(uint32_t topOfProcStack) +{ + register uint32_t __regProcessStackPointer __ASM("psp"); + __regProcessStackPointer = topOfProcStack; +} + + +/** + \brief Get Main Stack Pointer + \details Returns the current value of the Main Stack Pointer (MSP). + \return MSP Register value + */ +__STATIC_INLINE uint32_t __get_MSP(void) +{ + register uint32_t __regMainStackPointer __ASM("msp"); + return(__regMainStackPointer); +} + + +/** + \brief Set Main Stack Pointer + \details Assigns the given value to the Main Stack Pointer (MSP). + \param [in] topOfMainStack Main Stack Pointer value to set + */ +__STATIC_INLINE void __set_MSP(uint32_t topOfMainStack) +{ + register uint32_t __regMainStackPointer __ASM("msp"); + __regMainStackPointer = topOfMainStack; +} + + +/** + \brief Get Priority Mask + \details Returns the current state of the priority mask bit from the Priority Mask Register. + \return Priority Mask value + */ +__STATIC_INLINE uint32_t __get_PRIMASK(void) +{ + register uint32_t __regPriMask __ASM("primask"); + return(__regPriMask); +} + + +/** + \brief Set Priority Mask + \details Assigns the given value to the Priority Mask Register. + \param [in] priMask Priority Mask + */ +__STATIC_INLINE void __set_PRIMASK(uint32_t priMask) +{ + register uint32_t __regPriMask __ASM("primask"); + __regPriMask = (priMask); +} + + +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) + +/** + \brief Enable FIQ + \details Enables FIQ interrupts by clearing special-purpose register FAULTMASK. + Can only be executed in Privileged modes. + */ +#define __enable_fault_irq __enable_fiq + + +/** + \brief Disable FIQ + \details Disables FIQ interrupts by setting special-purpose register FAULTMASK. + Can only be executed in Privileged modes. + */ +#define __disable_fault_irq __disable_fiq + + +/** + \brief Get Base Priority + \details Returns the current value of the Base Priority register. + \return Base Priority register value + */ +__STATIC_INLINE uint32_t __get_BASEPRI(void) +{ + register uint32_t __regBasePri __ASM("basepri"); + return(__regBasePri); +} + + +/** + \brief Set Base Priority + \details Assigns the given value to the Base Priority register. + \param [in] basePri Base Priority value to set + */ +__STATIC_INLINE void __set_BASEPRI(uint32_t basePri) +{ + register uint32_t __regBasePri __ASM("basepri"); + __regBasePri = (basePri & 0xFFU); +} + + +/** + \brief Set Base Priority with condition + \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, + or the new value increases the BASEPRI priority level. + \param [in] basePri Base Priority value to set + */ +__STATIC_INLINE void __set_BASEPRI_MAX(uint32_t basePri) +{ + register uint32_t __regBasePriMax __ASM("basepri_max"); + __regBasePriMax = (basePri & 0xFFU); +} + + +/** + \brief Get Fault Mask + \details Returns the current value of the Fault Mask register. + \return Fault Mask register value + */ +__STATIC_INLINE uint32_t __get_FAULTMASK(void) +{ + register uint32_t __regFaultMask __ASM("faultmask"); + return(__regFaultMask); +} + + +/** + \brief Set Fault Mask + \details Assigns the given value to the Fault Mask register. + \param [in] faultMask Fault Mask value to set + */ +__STATIC_INLINE void __set_FAULTMASK(uint32_t faultMask) +{ + register uint32_t __regFaultMask __ASM("faultmask"); + __regFaultMask = (faultMask & (uint32_t)1U); +} + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */ + + +/** + \brief Get FPSCR + \details Returns the current value of the Floating Point Status/Control register. + \return Floating Point Status/Control register value + */ +__STATIC_INLINE uint32_t __get_FPSCR(void) +{ +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) + register uint32_t __regfpscr __ASM("fpscr"); + return(__regfpscr); +#else + return(0U); +#endif +} + + +/** + \brief Set FPSCR + \details Assigns the given value to the Floating Point Status/Control register. + \param [in] fpscr Floating Point Status/Control value to set + */ +__STATIC_INLINE void __set_FPSCR(uint32_t fpscr) +{ +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) + register uint32_t __regfpscr __ASM("fpscr"); + __regfpscr = (fpscr); +#else + (void)fpscr; +#endif +} + + +/*@} end of CMSIS_Core_RegAccFunctions */ + + +/* ################### Compiler specific Intrinsics ########################### */ +/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics + Access to dedicated SIMD instructions + @{ +*/ + +#if ((defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) + +#define __SADD8 __sadd8 +#define __QADD8 __qadd8 +#define __SHADD8 __shadd8 +#define __UADD8 __uadd8 +#define __UQADD8 __uqadd8 +#define __UHADD8 __uhadd8 +#define __SSUB8 __ssub8 +#define __QSUB8 __qsub8 +#define __SHSUB8 __shsub8 +#define __USUB8 __usub8 +#define __UQSUB8 __uqsub8 +#define __UHSUB8 __uhsub8 +#define __SADD16 __sadd16 +#define __QADD16 __qadd16 +#define __SHADD16 __shadd16 +#define __UADD16 __uadd16 +#define __UQADD16 __uqadd16 +#define __UHADD16 __uhadd16 +#define __SSUB16 __ssub16 +#define __QSUB16 __qsub16 +#define __SHSUB16 __shsub16 +#define __USUB16 __usub16 +#define __UQSUB16 __uqsub16 +#define __UHSUB16 __uhsub16 +#define __SASX __sasx +#define __QASX __qasx +#define __SHASX __shasx +#define __UASX __uasx +#define __UQASX __uqasx +#define __UHASX __uhasx +#define __SSAX __ssax +#define __QSAX __qsax +#define __SHSAX __shsax +#define __USAX __usax +#define __UQSAX __uqsax +#define __UHSAX __uhsax +#define __USAD8 __usad8 +#define __USADA8 __usada8 +#define __SSAT16 __ssat16 +#define __USAT16 __usat16 +#define __UXTB16 __uxtb16 +#define __UXTAB16 __uxtab16 +#define __SXTB16 __sxtb16 +#define __SXTAB16 __sxtab16 +#define __SMUAD __smuad +#define __SMUADX __smuadx +#define __SMLAD __smlad +#define __SMLADX __smladx +#define __SMLALD __smlald +#define __SMLALDX __smlaldx +#define __SMUSD __smusd +#define __SMUSDX __smusdx +#define __SMLSD __smlsd +#define __SMLSDX __smlsdx +#define __SMLSLD __smlsld +#define __SMLSLDX __smlsldx +#define __SEL __sel +#define __QADD __qadd +#define __QSUB __qsub + +#define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \ + ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) ) + +#define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \ + ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) ) + +#define __SMMLA(ARG1,ARG2,ARG3) ( (int32_t)((((int64_t)(ARG1) * (ARG2)) + \ + ((int64_t)(ARG3) << 32U) ) >> 32U)) + +#define __SXTB16_RORn(ARG1, ARG2) __SXTB16(__ROR(ARG1, ARG2)) + +#define __SXTAB16_RORn(ARG1, ARG2, ARG3) __SXTAB16(ARG1, __ROR(ARG2, ARG3)) + +#endif /* ((defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */ +/*@} end of group CMSIS_SIMD_intrinsics */ + + +#endif /* __CMSIS_ARMCC_H */ diff --git a/Drivers/CMSIS/Include/cmsis_armclang.h b/Drivers/CMSIS/Include/cmsis_armclang.h new file mode 100644 index 0000000..6911417 --- /dev/null +++ b/Drivers/CMSIS/Include/cmsis_armclang.h @@ -0,0 +1,1503 @@ +/**************************************************************************//** + * @file cmsis_armclang.h + * @brief CMSIS compiler armclang (Arm Compiler 6) header file + * @version V5.4.3 + * @date 27. May 2021 + ******************************************************************************/ +/* + * Copyright (c) 2009-2021 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*lint -esym(9058, IRQn)*/ /* disable MISRA 2012 Rule 2.4 for IRQn */ + +#ifndef __CMSIS_ARMCLANG_H +#define __CMSIS_ARMCLANG_H + +#pragma clang system_header /* treat file as system include file */ + +/* CMSIS compiler specific defines */ +#ifndef __ASM + #define __ASM __asm +#endif +#ifndef __INLINE + #define __INLINE __inline +#endif +#ifndef __STATIC_INLINE + #define __STATIC_INLINE static __inline +#endif +#ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE __attribute__((always_inline)) static __inline +#endif +#ifndef __NO_RETURN + #define __NO_RETURN __attribute__((__noreturn__)) +#endif +#ifndef __USED + #define __USED __attribute__((used)) +#endif +#ifndef __WEAK + #define __WEAK __attribute__((weak)) +#endif +#ifndef __PACKED + #define __PACKED __attribute__((packed, aligned(1))) +#endif +#ifndef __PACKED_STRUCT + #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) +#endif +#ifndef __PACKED_UNION + #define __PACKED_UNION union __attribute__((packed, aligned(1))) +#endif +#ifndef __UNALIGNED_UINT32 /* deprecated */ + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" +/*lint -esym(9058, T_UINT32)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT32 */ + struct __attribute__((packed)) T_UINT32 { uint32_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v) +#endif +#ifndef __UNALIGNED_UINT16_WRITE + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" +/*lint -esym(9058, T_UINT16_WRITE)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT16_WRITE */ + __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) +#endif +#ifndef __UNALIGNED_UINT16_READ + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" +/*lint -esym(9058, T_UINT16_READ)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT16_READ */ + __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) +#endif +#ifndef __UNALIGNED_UINT32_WRITE + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" +/*lint -esym(9058, T_UINT32_WRITE)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT32_WRITE */ + __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) +#endif +#ifndef __UNALIGNED_UINT32_READ + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" +/*lint -esym(9058, T_UINT32_READ)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT32_READ */ + __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) +#endif +#ifndef __ALIGNED + #define __ALIGNED(x) __attribute__((aligned(x))) +#endif +#ifndef __RESTRICT + #define __RESTRICT __restrict +#endif +#ifndef __COMPILER_BARRIER + #define __COMPILER_BARRIER() __ASM volatile("":::"memory") +#endif + +/* ######################### Startup and Lowlevel Init ######################## */ + +#ifndef __PROGRAM_START +#define __PROGRAM_START __main +#endif + +#ifndef __INITIAL_SP +#define __INITIAL_SP Image$$ARM_LIB_STACK$$ZI$$Limit +#endif + +#ifndef __STACK_LIMIT +#define __STACK_LIMIT Image$$ARM_LIB_STACK$$ZI$$Base +#endif + +#ifndef __VECTOR_TABLE +#define __VECTOR_TABLE __Vectors +#endif + +#ifndef __VECTOR_TABLE_ATTRIBUTE +#define __VECTOR_TABLE_ATTRIBUTE __attribute__((used, section("RESET"))) +#endif + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +#ifndef __STACK_SEAL +#define __STACK_SEAL Image$$STACKSEAL$$ZI$$Base +#endif + +#ifndef __TZ_STACK_SEAL_SIZE +#define __TZ_STACK_SEAL_SIZE 8U +#endif + +#ifndef __TZ_STACK_SEAL_VALUE +#define __TZ_STACK_SEAL_VALUE 0xFEF5EDA5FEF5EDA5ULL +#endif + + +__STATIC_FORCEINLINE void __TZ_set_STACKSEAL_S (uint32_t* stackTop) { + *((uint64_t *)stackTop) = __TZ_STACK_SEAL_VALUE; +} +#endif + + +/* ########################## Core Instruction Access ######################### */ +/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface + Access to dedicated instructions + @{ +*/ + +/* Define macros for porting to both thumb1 and thumb2. + * For thumb1, use low register (r0-r7), specified by constraint "l" + * Otherwise, use general registers, specified by constraint "r" */ +#if defined (__thumb__) && !defined (__thumb2__) +#define __CMSIS_GCC_OUT_REG(r) "=l" (r) +#define __CMSIS_GCC_RW_REG(r) "+l" (r) +#define __CMSIS_GCC_USE_REG(r) "l" (r) +#else +#define __CMSIS_GCC_OUT_REG(r) "=r" (r) +#define __CMSIS_GCC_RW_REG(r) "+r" (r) +#define __CMSIS_GCC_USE_REG(r) "r" (r) +#endif + +/** + \brief No Operation + \details No Operation does nothing. This instruction can be used for code alignment purposes. + */ +#define __NOP __builtin_arm_nop + +/** + \brief Wait For Interrupt + \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. + */ +#define __WFI __builtin_arm_wfi + + +/** + \brief Wait For Event + \details Wait For Event is a hint instruction that permits the processor to enter + a low-power state until one of a number of events occurs. + */ +#define __WFE __builtin_arm_wfe + + +/** + \brief Send Event + \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. + */ +#define __SEV __builtin_arm_sev + + +/** + \brief Instruction Synchronization Barrier + \details Instruction Synchronization Barrier flushes the pipeline in the processor, + so that all instructions following the ISB are fetched from cache or memory, + after the instruction has been completed. + */ +#define __ISB() __builtin_arm_isb(0xF) + +/** + \brief Data Synchronization Barrier + \details Acts as a special kind of Data Memory Barrier. + It completes when all explicit memory accesses before this instruction complete. + */ +#define __DSB() __builtin_arm_dsb(0xF) + + +/** + \brief Data Memory Barrier + \details Ensures the apparent order of the explicit memory operations before + and after the instruction, without ensuring their completion. + */ +#define __DMB() __builtin_arm_dmb(0xF) + + +/** + \brief Reverse byte order (32 bit) + \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. + \param [in] value Value to reverse + \return Reversed value + */ +#define __REV(value) __builtin_bswap32(value) + + +/** + \brief Reverse byte order (16 bit) + \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. + \param [in] value Value to reverse + \return Reversed value + */ +#define __REV16(value) __ROR(__REV(value), 16) + + +/** + \brief Reverse byte order (16 bit) + \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. + \param [in] value Value to reverse + \return Reversed value + */ +#define __REVSH(value) (int16_t)__builtin_bswap16(value) + + +/** + \brief Rotate Right in unsigned value (32 bit) + \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. + \param [in] op1 Value to rotate + \param [in] op2 Number of Bits to rotate + \return Rotated value + */ +__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) +{ + op2 %= 32U; + if (op2 == 0U) + { + return op1; + } + return (op1 >> op2) | (op1 << (32U - op2)); +} + + +/** + \brief Breakpoint + \details Causes the processor to enter Debug state. + Debug tools can use this to investigate system state when the instruction at a particular address is reached. + \param [in] value is ignored by the processor. + If required, a debugger can use it to store additional information about the breakpoint. + */ +#define __BKPT(value) __ASM volatile ("bkpt "#value) + + +/** + \brief Reverse bit order of value + \details Reverses the bit order of the given value. + \param [in] value Value to reverse + \return Reversed value + */ +#define __RBIT __builtin_arm_rbit + +/** + \brief Count leading zeros + \details Counts the number of leading zeros of a data value. + \param [in] value Value to count the leading zeros + \return number of leading zeros in value + */ +__STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value) +{ + /* Even though __builtin_clz produces a CLZ instruction on ARM, formally + __builtin_clz(0) is undefined behaviour, so handle this case specially. + This guarantees ARM-compatible results if happening to compile on a non-ARM + target, and ensures the compiler doesn't decide to activate any + optimisations using the logic "value was passed to __builtin_clz, so it + is non-zero". + ARM Compiler 6.10 and possibly earlier will optimise this test away, leaving a + single CLZ instruction. + */ + if (value == 0U) + { + return 32U; + } + return __builtin_clz(value); +} + + +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) + +/** + \brief LDR Exclusive (8 bit) + \details Executes a exclusive LDR instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +#define __LDREXB (uint8_t)__builtin_arm_ldrex + + +/** + \brief LDR Exclusive (16 bit) + \details Executes a exclusive LDR instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +#define __LDREXH (uint16_t)__builtin_arm_ldrex + + +/** + \brief LDR Exclusive (32 bit) + \details Executes a exclusive LDR instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +#define __LDREXW (uint32_t)__builtin_arm_ldrex + + +/** + \brief STR Exclusive (8 bit) + \details Executes a exclusive STR instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STREXB (uint32_t)__builtin_arm_strex + + +/** + \brief STR Exclusive (16 bit) + \details Executes a exclusive STR instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STREXH (uint32_t)__builtin_arm_strex + + +/** + \brief STR Exclusive (32 bit) + \details Executes a exclusive STR instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STREXW (uint32_t)__builtin_arm_strex + + +/** + \brief Remove the exclusive lock + \details Removes the exclusive lock which is created by LDREX. + */ +#define __CLREX __builtin_arm_clrex + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */ + + +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) + +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +#define __SSAT __builtin_arm_ssat + + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +#define __USAT __builtin_arm_usat + + +/** + \brief Rotate Right with Extend (32 bit) + \details Moves each bit of a bitstring right by one bit. + The carry input is shifted in at the left end of the bitstring. + \param [in] value Value to rotate + \return Rotated value + */ +__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) +{ + uint32_t result; + + __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + return(result); +} + + +/** + \brief LDRT Unprivileged (8 bit) + \details Executes a Unprivileged LDRT instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint8_t) result); /* Add explicit type cast here */ +} + + +/** + \brief LDRT Unprivileged (16 bit) + \details Executes a Unprivileged LDRT instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint16_t) result); /* Add explicit type cast here */ +} + + +/** + \brief LDRT Unprivileged (32 bit) + \details Executes a Unprivileged LDRT instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); + return(result); +} + + +/** + \brief STRT Unprivileged (8 bit) + \details Executes a Unprivileged STRT instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr) +{ + __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +} + + +/** + \brief STRT Unprivileged (16 bit) + \details Executes a Unprivileged STRT instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr) +{ + __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +} + + +/** + \brief STRT Unprivileged (32 bit) + \details Executes a Unprivileged STRT instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) +{ + __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) ); +} + +#else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */ + +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) +{ + if ((sat >= 1U) && (sat <= 32U)) + { + const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); + const int32_t min = -1 - max ; + if (val > max) + { + return max; + } + else if (val < min) + { + return min; + } + } + return val; +} + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) +{ + if (sat <= 31U) + { + const uint32_t max = ((1U << sat) - 1U); + if (val > (int32_t)max) + { + return max; + } + else if (val < 0) + { + return 0U; + } + } + return (uint32_t)val; +} + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */ + + +#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) + +/** + \brief Load-Acquire (8 bit) + \details Executes a LDAB instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint8_t) result); +} + + +/** + \brief Load-Acquire (16 bit) + \details Executes a LDAH instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint16_t) result); +} + + +/** + \brief Load-Acquire (32 bit) + \details Executes a LDA instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +__STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr) +{ + uint32_t result; + + __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return(result); +} + + +/** + \brief Store-Release (8 bit) + \details Executes a STLB instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr) +{ + __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +} + + +/** + \brief Store-Release (16 bit) + \details Executes a STLH instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr) +{ + __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +} + + +/** + \brief Store-Release (32 bit) + \details Executes a STL instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) +{ + __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +} + + +/** + \brief Load-Acquire Exclusive (8 bit) + \details Executes a LDAB exclusive instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +#define __LDAEXB (uint8_t)__builtin_arm_ldaex + + +/** + \brief Load-Acquire Exclusive (16 bit) + \details Executes a LDAH exclusive instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +#define __LDAEXH (uint16_t)__builtin_arm_ldaex + + +/** + \brief Load-Acquire Exclusive (32 bit) + \details Executes a LDA exclusive instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +#define __LDAEX (uint32_t)__builtin_arm_ldaex + + +/** + \brief Store-Release Exclusive (8 bit) + \details Executes a STLB exclusive instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STLEXB (uint32_t)__builtin_arm_stlex + + +/** + \brief Store-Release Exclusive (16 bit) + \details Executes a STLH exclusive instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STLEXH (uint32_t)__builtin_arm_stlex + + +/** + \brief Store-Release Exclusive (32 bit) + \details Executes a STL exclusive instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STLEX (uint32_t)__builtin_arm_stlex + +#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */ + +/*@}*/ /* end of group CMSIS_Core_InstructionInterface */ + + +/* ########################### Core Function Access ########################### */ +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions + @{ + */ + +/** + \brief Enable IRQ Interrupts + \details Enables IRQ interrupts by clearing special-purpose register PRIMASK. + Can only be executed in Privileged modes. + */ +#ifndef __ARM_COMPAT_H +__STATIC_FORCEINLINE void __enable_irq(void) +{ + __ASM volatile ("cpsie i" : : : "memory"); +} +#endif + + +/** + \brief Disable IRQ Interrupts + \details Disables IRQ interrupts by setting special-purpose register PRIMASK. + Can only be executed in Privileged modes. + */ +#ifndef __ARM_COMPAT_H +__STATIC_FORCEINLINE void __disable_irq(void) +{ + __ASM volatile ("cpsid i" : : : "memory"); +} +#endif + + +/** + \brief Get Control Register + \details Returns the content of the Control Register. + \return Control Register value + */ +__STATIC_FORCEINLINE uint32_t __get_CONTROL(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, control" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Control Register (non-secure) + \details Returns the content of the non-secure Control Register when in secure mode. + \return non-secure Control Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, control_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Control Register + \details Writes the given value to the Control Register. + \param [in] control Control Register value to set + */ +__STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) +{ + __ASM volatile ("MSR control, %0" : : "r" (control) : "memory"); + __ISB(); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Control Register (non-secure) + \details Writes the given value to the non-secure Control Register when in secure state. + \param [in] control Control Register value to set + */ +__STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) +{ + __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory"); + __ISB(); +} +#endif + + +/** + \brief Get IPSR Register + \details Returns the content of the IPSR Register. + \return IPSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_IPSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, ipsr" : "=r" (result) ); + return(result); +} + + +/** + \brief Get APSR Register + \details Returns the content of the APSR Register. + \return APSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_APSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, apsr" : "=r" (result) ); + return(result); +} + + +/** + \brief Get xPSR Register + \details Returns the content of the xPSR Register. + \return xPSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_xPSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, xpsr" : "=r" (result) ); + return(result); +} + + +/** + \brief Get Process Stack Pointer + \details Returns the current value of the Process Stack Pointer (PSP). + \return PSP Register value + */ +__STATIC_FORCEINLINE uint32_t __get_PSP(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, psp" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Process Stack Pointer (non-secure) + \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state. + \return PSP Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, psp_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Process Stack Pointer + \details Assigns the given value to the Process Stack Pointer (PSP). + \param [in] topOfProcStack Process Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack) +{ + __ASM volatile ("MSR psp, %0" : : "r" (topOfProcStack) : ); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Process Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state. + \param [in] topOfProcStack Process Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack) +{ + __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : ); +} +#endif + + +/** + \brief Get Main Stack Pointer + \details Returns the current value of the Main Stack Pointer (MSP). + \return MSP Register value + */ +__STATIC_FORCEINLINE uint32_t __get_MSP(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, msp" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Main Stack Pointer (non-secure) + \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state. + \return MSP Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, msp_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Main Stack Pointer + \details Assigns the given value to the Main Stack Pointer (MSP). + \param [in] topOfMainStack Main Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack) +{ + __ASM volatile ("MSR msp, %0" : : "r" (topOfMainStack) : ); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Main Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state. + \param [in] topOfMainStack Main Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack) +{ + __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : ); +} +#endif + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Stack Pointer (non-secure) + \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state. + \return SP Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, sp_ns" : "=r" (result) ); + return(result); +} + + +/** + \brief Set Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state. + \param [in] topOfStack Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack) +{ + __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : ); +} +#endif + + +/** + \brief Get Priority Mask + \details Returns the current state of the priority mask bit from the Priority Mask Register. + \return Priority Mask value + */ +__STATIC_FORCEINLINE uint32_t __get_PRIMASK(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, primask" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Priority Mask (non-secure) + \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state. + \return Priority Mask value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, primask_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Priority Mask + \details Assigns the given value to the Priority Mask Register. + \param [in] priMask Priority Mask + */ +__STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask) +{ + __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory"); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Priority Mask (non-secure) + \details Assigns the given value to the non-secure Priority Mask Register when in secure state. + \param [in] priMask Priority Mask + */ +__STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask) +{ + __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory"); +} +#endif + + +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) +/** + \brief Enable FIQ + \details Enables FIQ interrupts by clearing special-purpose register FAULTMASK. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __enable_fault_irq(void) +{ + __ASM volatile ("cpsie f" : : : "memory"); +} + + +/** + \brief Disable FIQ + \details Disables FIQ interrupts by setting special-purpose register FAULTMASK. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __disable_fault_irq(void) +{ + __ASM volatile ("cpsid f" : : : "memory"); +} + + +/** + \brief Get Base Priority + \details Returns the current value of the Base Priority register. + \return Base Priority register value + */ +__STATIC_FORCEINLINE uint32_t __get_BASEPRI(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, basepri" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Base Priority (non-secure) + \details Returns the current value of the non-secure Base Priority register when in secure state. + \return Base Priority register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Base Priority + \details Assigns the given value to the Base Priority register. + \param [in] basePri Base Priority value to set + */ +__STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri) +{ + __ASM volatile ("MSR basepri, %0" : : "r" (basePri) : "memory"); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Base Priority (non-secure) + \details Assigns the given value to the non-secure Base Priority register when in secure state. + \param [in] basePri Base Priority value to set + */ +__STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) +{ + __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); +} +#endif + + +/** + \brief Set Base Priority with condition + \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, + or the new value increases the BASEPRI priority level. + \param [in] basePri Base Priority value to set + */ +__STATIC_FORCEINLINE void __set_BASEPRI_MAX(uint32_t basePri) +{ + __ASM volatile ("MSR basepri_max, %0" : : "r" (basePri) : "memory"); +} + + +/** + \brief Get Fault Mask + \details Returns the current value of the Fault Mask register. + \return Fault Mask register value + */ +__STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, faultmask" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Fault Mask (non-secure) + \details Returns the current value of the non-secure Fault Mask register when in secure state. + \return Fault Mask register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Fault Mask + \details Assigns the given value to the Fault Mask register. + \param [in] faultMask Fault Mask value to set + */ +__STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask) +{ + __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory"); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Fault Mask (non-secure) + \details Assigns the given value to the non-secure Fault Mask register when in secure state. + \param [in] faultMask Fault Mask value to set + */ +__STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) +{ + __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); +} +#endif + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */ + + +#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) + +/** + \brief Get Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always in non-secure + mode. + + \details Returns the current value of the Process Stack Pointer Limit (PSPLIM). + \return PSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __get_PSPLIM(void) +{ +#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, psplim" : "=r" (result) ); + return result; +#endif +} + +#if (defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Process Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always in non-secure + mode. + + \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \return PSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void) +{ +#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) ) + // without main extensions, the non-secure PSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) ); + return result; +#endif +} +#endif + + +/** + \brief Set Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored in non-secure + mode. + + \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM). + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + */ +__STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) +{ +#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)ProcStackPtrLimit; +#else + __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit)); +#endif +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Process Stack Pointer (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored in non-secure + mode. + + \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + */ +__STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit) +{ +#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) ) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)ProcStackPtrLimit; +#else + __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit)); +#endif +} +#endif + + +/** + \brief Get Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the Main Stack Pointer Limit (MSPLIM). + \return MSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) +{ +#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, msplim" : "=r" (result) ); + return result; +#endif +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. + \return MSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) +{ +#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) ) + // without main extensions, the non-secure MSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); + return result; +#endif +} +#endif + + +/** + \brief Set Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM). + \param [in] MainStackPtrLimit Main Stack Pointer Limit value to set + */ +__STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) +{ +#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + (void)MainStackPtrLimit; +#else + __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit)); +#endif +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. + \param [in] MainStackPtrLimit Main Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) +{ +#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) ) + // without main extensions, the non-secure MSPLIM is RAZ/WI + (void)MainStackPtrLimit; +#else + __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); +#endif +} +#endif + +#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */ + +/** + \brief Get FPSCR + \details Returns the current value of the Floating Point Status/Control register. + \return Floating Point Status/Control register value + */ +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) +#define __get_FPSCR (uint32_t)__builtin_arm_get_fpscr +#else +#define __get_FPSCR() ((uint32_t)0U) +#endif + +/** + \brief Set FPSCR + \details Assigns the given value to the Floating Point Status/Control register. + \param [in] fpscr Floating Point Status/Control value to set + */ +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) +#define __set_FPSCR __builtin_arm_set_fpscr +#else +#define __set_FPSCR(x) ((void)(x)) +#endif + + +/*@} end of CMSIS_Core_RegAccFunctions */ + + +/* ################### Compiler specific Intrinsics ########################### */ +/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics + Access to dedicated SIMD instructions + @{ +*/ + +#if (defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1)) + +#define __SADD8 __builtin_arm_sadd8 +#define __QADD8 __builtin_arm_qadd8 +#define __SHADD8 __builtin_arm_shadd8 +#define __UADD8 __builtin_arm_uadd8 +#define __UQADD8 __builtin_arm_uqadd8 +#define __UHADD8 __builtin_arm_uhadd8 +#define __SSUB8 __builtin_arm_ssub8 +#define __QSUB8 __builtin_arm_qsub8 +#define __SHSUB8 __builtin_arm_shsub8 +#define __USUB8 __builtin_arm_usub8 +#define __UQSUB8 __builtin_arm_uqsub8 +#define __UHSUB8 __builtin_arm_uhsub8 +#define __SADD16 __builtin_arm_sadd16 +#define __QADD16 __builtin_arm_qadd16 +#define __SHADD16 __builtin_arm_shadd16 +#define __UADD16 __builtin_arm_uadd16 +#define __UQADD16 __builtin_arm_uqadd16 +#define __UHADD16 __builtin_arm_uhadd16 +#define __SSUB16 __builtin_arm_ssub16 +#define __QSUB16 __builtin_arm_qsub16 +#define __SHSUB16 __builtin_arm_shsub16 +#define __USUB16 __builtin_arm_usub16 +#define __UQSUB16 __builtin_arm_uqsub16 +#define __UHSUB16 __builtin_arm_uhsub16 +#define __SASX __builtin_arm_sasx +#define __QASX __builtin_arm_qasx +#define __SHASX __builtin_arm_shasx +#define __UASX __builtin_arm_uasx +#define __UQASX __builtin_arm_uqasx +#define __UHASX __builtin_arm_uhasx +#define __SSAX __builtin_arm_ssax +#define __QSAX __builtin_arm_qsax +#define __SHSAX __builtin_arm_shsax +#define __USAX __builtin_arm_usax +#define __UQSAX __builtin_arm_uqsax +#define __UHSAX __builtin_arm_uhsax +#define __USAD8 __builtin_arm_usad8 +#define __USADA8 __builtin_arm_usada8 +#define __SSAT16 __builtin_arm_ssat16 +#define __USAT16 __builtin_arm_usat16 +#define __UXTB16 __builtin_arm_uxtb16 +#define __UXTAB16 __builtin_arm_uxtab16 +#define __SXTB16 __builtin_arm_sxtb16 +#define __SXTAB16 __builtin_arm_sxtab16 +#define __SMUAD __builtin_arm_smuad +#define __SMUADX __builtin_arm_smuadx +#define __SMLAD __builtin_arm_smlad +#define __SMLADX __builtin_arm_smladx +#define __SMLALD __builtin_arm_smlald +#define __SMLALDX __builtin_arm_smlaldx +#define __SMUSD __builtin_arm_smusd +#define __SMUSDX __builtin_arm_smusdx +#define __SMLSD __builtin_arm_smlsd +#define __SMLSDX __builtin_arm_smlsdx +#define __SMLSLD __builtin_arm_smlsld +#define __SMLSLDX __builtin_arm_smlsldx +#define __SEL __builtin_arm_sel +#define __QADD __builtin_arm_qadd +#define __QSUB __builtin_arm_qsub + +#define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \ + ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) ) + +#define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \ + ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) ) + +#define __SXTB16_RORn(ARG1, ARG2) __SXTB16(__ROR(ARG1, ARG2)) + +#define __SXTAB16_RORn(ARG1, ARG2, ARG3) __SXTAB16(ARG1, __ROR(ARG2, ARG3)) + +__STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) +{ + int32_t result; + + __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +#endif /* (__ARM_FEATURE_DSP == 1) */ +/*@} end of group CMSIS_SIMD_intrinsics */ + + +#endif /* __CMSIS_ARMCLANG_H */ diff --git a/Drivers/CMSIS/Include/cmsis_armclang_ltm.h b/Drivers/CMSIS/Include/cmsis_armclang_ltm.h new file mode 100644 index 0000000..1e255d5 --- /dev/null +++ b/Drivers/CMSIS/Include/cmsis_armclang_ltm.h @@ -0,0 +1,1928 @@ +/**************************************************************************//** + * @file cmsis_armclang_ltm.h + * @brief CMSIS compiler armclang (Arm Compiler 6) header file + * @version V1.5.3 + * @date 27. May 2021 + ******************************************************************************/ +/* + * Copyright (c) 2018-2021 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*lint -esym(9058, IRQn)*/ /* disable MISRA 2012 Rule 2.4 for IRQn */ + +#ifndef __CMSIS_ARMCLANG_H +#define __CMSIS_ARMCLANG_H + +#pragma clang system_header /* treat file as system include file */ + +/* CMSIS compiler specific defines */ +#ifndef __ASM + #define __ASM __asm +#endif +#ifndef __INLINE + #define __INLINE __inline +#endif +#ifndef __STATIC_INLINE + #define __STATIC_INLINE static __inline +#endif +#ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE __attribute__((always_inline)) static __inline +#endif +#ifndef __NO_RETURN + #define __NO_RETURN __attribute__((__noreturn__)) +#endif +#ifndef __USED + #define __USED __attribute__((used)) +#endif +#ifndef __WEAK + #define __WEAK __attribute__((weak)) +#endif +#ifndef __PACKED + #define __PACKED __attribute__((packed, aligned(1))) +#endif +#ifndef __PACKED_STRUCT + #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) +#endif +#ifndef __PACKED_UNION + #define __PACKED_UNION union __attribute__((packed, aligned(1))) +#endif +#ifndef __UNALIGNED_UINT32 /* deprecated */ + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" +/*lint -esym(9058, T_UINT32)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT32 */ + struct __attribute__((packed)) T_UINT32 { uint32_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v) +#endif +#ifndef __UNALIGNED_UINT16_WRITE + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" +/*lint -esym(9058, T_UINT16_WRITE)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT16_WRITE */ + __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) +#endif +#ifndef __UNALIGNED_UINT16_READ + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" +/*lint -esym(9058, T_UINT16_READ)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT16_READ */ + __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) +#endif +#ifndef __UNALIGNED_UINT32_WRITE + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" +/*lint -esym(9058, T_UINT32_WRITE)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT32_WRITE */ + __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) +#endif +#ifndef __UNALIGNED_UINT32_READ + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" +/*lint -esym(9058, T_UINT32_READ)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT32_READ */ + __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) +#endif +#ifndef __ALIGNED + #define __ALIGNED(x) __attribute__((aligned(x))) +#endif +#ifndef __RESTRICT + #define __RESTRICT __restrict +#endif +#ifndef __COMPILER_BARRIER + #define __COMPILER_BARRIER() __ASM volatile("":::"memory") +#endif + +/* ######################### Startup and Lowlevel Init ######################## */ + +#ifndef __PROGRAM_START +#define __PROGRAM_START __main +#endif + +#ifndef __INITIAL_SP +#define __INITIAL_SP Image$$ARM_LIB_STACK$$ZI$$Limit +#endif + +#ifndef __STACK_LIMIT +#define __STACK_LIMIT Image$$ARM_LIB_STACK$$ZI$$Base +#endif + +#ifndef __VECTOR_TABLE +#define __VECTOR_TABLE __Vectors +#endif + +#ifndef __VECTOR_TABLE_ATTRIBUTE +#define __VECTOR_TABLE_ATTRIBUTE __attribute__((used, section("RESET"))) +#endif + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +#ifndef __STACK_SEAL +#define __STACK_SEAL Image$$STACKSEAL$$ZI$$Base +#endif + +#ifndef __TZ_STACK_SEAL_SIZE +#define __TZ_STACK_SEAL_SIZE 8U +#endif + +#ifndef __TZ_STACK_SEAL_VALUE +#define __TZ_STACK_SEAL_VALUE 0xFEF5EDA5FEF5EDA5ULL +#endif + + +__STATIC_FORCEINLINE void __TZ_set_STACKSEAL_S (uint32_t* stackTop) { + *((uint64_t *)stackTop) = __TZ_STACK_SEAL_VALUE; +} +#endif + + +/* ########################## Core Instruction Access ######################### */ +/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface + Access to dedicated instructions + @{ +*/ + +/* Define macros for porting to both thumb1 and thumb2. + * For thumb1, use low register (r0-r7), specified by constraint "l" + * Otherwise, use general registers, specified by constraint "r" */ +#if defined (__thumb__) && !defined (__thumb2__) +#define __CMSIS_GCC_OUT_REG(r) "=l" (r) +#define __CMSIS_GCC_USE_REG(r) "l" (r) +#else +#define __CMSIS_GCC_OUT_REG(r) "=r" (r) +#define __CMSIS_GCC_USE_REG(r) "r" (r) +#endif + +/** + \brief No Operation + \details No Operation does nothing. This instruction can be used for code alignment purposes. + */ +#define __NOP __builtin_arm_nop + +/** + \brief Wait For Interrupt + \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. + */ +#define __WFI __builtin_arm_wfi + + +/** + \brief Wait For Event + \details Wait For Event is a hint instruction that permits the processor to enter + a low-power state until one of a number of events occurs. + */ +#define __WFE __builtin_arm_wfe + + +/** + \brief Send Event + \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. + */ +#define __SEV __builtin_arm_sev + + +/** + \brief Instruction Synchronization Barrier + \details Instruction Synchronization Barrier flushes the pipeline in the processor, + so that all instructions following the ISB are fetched from cache or memory, + after the instruction has been completed. + */ +#define __ISB() __builtin_arm_isb(0xF) + +/** + \brief Data Synchronization Barrier + \details Acts as a special kind of Data Memory Barrier. + It completes when all explicit memory accesses before this instruction complete. + */ +#define __DSB() __builtin_arm_dsb(0xF) + + +/** + \brief Data Memory Barrier + \details Ensures the apparent order of the explicit memory operations before + and after the instruction, without ensuring their completion. + */ +#define __DMB() __builtin_arm_dmb(0xF) + + +/** + \brief Reverse byte order (32 bit) + \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. + \param [in] value Value to reverse + \return Reversed value + */ +#define __REV(value) __builtin_bswap32(value) + + +/** + \brief Reverse byte order (16 bit) + \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. + \param [in] value Value to reverse + \return Reversed value + */ +#define __REV16(value) __ROR(__REV(value), 16) + + +/** + \brief Reverse byte order (16 bit) + \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. + \param [in] value Value to reverse + \return Reversed value + */ +#define __REVSH(value) (int16_t)__builtin_bswap16(value) + + +/** + \brief Rotate Right in unsigned value (32 bit) + \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. + \param [in] op1 Value to rotate + \param [in] op2 Number of Bits to rotate + \return Rotated value + */ +__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) +{ + op2 %= 32U; + if (op2 == 0U) + { + return op1; + } + return (op1 >> op2) | (op1 << (32U - op2)); +} + + +/** + \brief Breakpoint + \details Causes the processor to enter Debug state. + Debug tools can use this to investigate system state when the instruction at a particular address is reached. + \param [in] value is ignored by the processor. + If required, a debugger can use it to store additional information about the breakpoint. + */ +#define __BKPT(value) __ASM volatile ("bkpt "#value) + + +/** + \brief Reverse bit order of value + \details Reverses the bit order of the given value. + \param [in] value Value to reverse + \return Reversed value + */ +#define __RBIT __builtin_arm_rbit + +/** + \brief Count leading zeros + \details Counts the number of leading zeros of a data value. + \param [in] value Value to count the leading zeros + \return number of leading zeros in value + */ +__STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value) +{ + /* Even though __builtin_clz produces a CLZ instruction on ARM, formally + __builtin_clz(0) is undefined behaviour, so handle this case specially. + This guarantees ARM-compatible results if happening to compile on a non-ARM + target, and ensures the compiler doesn't decide to activate any + optimisations using the logic "value was passed to __builtin_clz, so it + is non-zero". + ARM Compiler 6.10 and possibly earlier will optimise this test away, leaving a + single CLZ instruction. + */ + if (value == 0U) + { + return 32U; + } + return __builtin_clz(value); +} + + +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) +/** + \brief LDR Exclusive (8 bit) + \details Executes a exclusive LDR instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +#define __LDREXB (uint8_t)__builtin_arm_ldrex + + +/** + \brief LDR Exclusive (16 bit) + \details Executes a exclusive LDR instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +#define __LDREXH (uint16_t)__builtin_arm_ldrex + + +/** + \brief LDR Exclusive (32 bit) + \details Executes a exclusive LDR instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +#define __LDREXW (uint32_t)__builtin_arm_ldrex + + +/** + \brief STR Exclusive (8 bit) + \details Executes a exclusive STR instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STREXB (uint32_t)__builtin_arm_strex + + +/** + \brief STR Exclusive (16 bit) + \details Executes a exclusive STR instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STREXH (uint32_t)__builtin_arm_strex + + +/** + \brief STR Exclusive (32 bit) + \details Executes a exclusive STR instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STREXW (uint32_t)__builtin_arm_strex + + +/** + \brief Remove the exclusive lock + \details Removes the exclusive lock which is created by LDREX. + */ +#define __CLREX __builtin_arm_clrex + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ + + +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) + +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +#define __SSAT __builtin_arm_ssat + + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +#define __USAT __builtin_arm_usat + + +/** + \brief Rotate Right with Extend (32 bit) + \details Moves each bit of a bitstring right by one bit. + The carry input is shifted in at the left end of the bitstring. + \param [in] value Value to rotate + \return Rotated value + */ +__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) +{ + uint32_t result; + + __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + return(result); +} + + +/** + \brief LDRT Unprivileged (8 bit) + \details Executes a Unprivileged LDRT instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint8_t) result); /* Add explicit type cast here */ +} + + +/** + \brief LDRT Unprivileged (16 bit) + \details Executes a Unprivileged LDRT instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint16_t) result); /* Add explicit type cast here */ +} + + +/** + \brief LDRT Unprivileged (32 bit) + \details Executes a Unprivileged LDRT instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); + return(result); +} + + +/** + \brief STRT Unprivileged (8 bit) + \details Executes a Unprivileged STRT instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr) +{ + __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +} + + +/** + \brief STRT Unprivileged (16 bit) + \details Executes a Unprivileged STRT instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr) +{ + __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +} + + +/** + \brief STRT Unprivileged (32 bit) + \details Executes a Unprivileged STRT instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) +{ + __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) ); +} + +#else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ + +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) +{ + if ((sat >= 1U) && (sat <= 32U)) + { + const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); + const int32_t min = -1 - max ; + if (val > max) + { + return max; + } + else if (val < min) + { + return min; + } + } + return val; +} + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) +{ + if (sat <= 31U) + { + const uint32_t max = ((1U << sat) - 1U); + if (val > (int32_t)max) + { + return max; + } + else if (val < 0) + { + return 0U; + } + } + return (uint32_t)val; +} + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ + + +#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) +/** + \brief Load-Acquire (8 bit) + \details Executes a LDAB instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint8_t) result); +} + + +/** + \brief Load-Acquire (16 bit) + \details Executes a LDAH instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint16_t) result); +} + + +/** + \brief Load-Acquire (32 bit) + \details Executes a LDA instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +__STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr) +{ + uint32_t result; + + __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return(result); +} + + +/** + \brief Store-Release (8 bit) + \details Executes a STLB instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr) +{ + __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +} + + +/** + \brief Store-Release (16 bit) + \details Executes a STLH instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr) +{ + __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +} + + +/** + \brief Store-Release (32 bit) + \details Executes a STL instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) +{ + __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +} + + +/** + \brief Load-Acquire Exclusive (8 bit) + \details Executes a LDAB exclusive instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +#define __LDAEXB (uint8_t)__builtin_arm_ldaex + + +/** + \brief Load-Acquire Exclusive (16 bit) + \details Executes a LDAH exclusive instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +#define __LDAEXH (uint16_t)__builtin_arm_ldaex + + +/** + \brief Load-Acquire Exclusive (32 bit) + \details Executes a LDA exclusive instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +#define __LDAEX (uint32_t)__builtin_arm_ldaex + + +/** + \brief Store-Release Exclusive (8 bit) + \details Executes a STLB exclusive instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STLEXB (uint32_t)__builtin_arm_stlex + + +/** + \brief Store-Release Exclusive (16 bit) + \details Executes a STLH exclusive instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STLEXH (uint32_t)__builtin_arm_stlex + + +/** + \brief Store-Release Exclusive (32 bit) + \details Executes a STL exclusive instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STLEX (uint32_t)__builtin_arm_stlex + +#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ + +/*@}*/ /* end of group CMSIS_Core_InstructionInterface */ + + +/* ########################### Core Function Access ########################### */ +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions + @{ + */ + +/** + \brief Enable IRQ Interrupts + \details Enables IRQ interrupts by clearing special-purpose register PRIMASK. + Can only be executed in Privileged modes. + */ +#ifndef __ARM_COMPAT_H +__STATIC_FORCEINLINE void __enable_irq(void) +{ + __ASM volatile ("cpsie i" : : : "memory"); +} +#endif + + +/** + \brief Disable IRQ Interrupts + \details Disables IRQ interrupts by setting special-purpose register PRIMASK. + Can only be executed in Privileged modes. + */ +#ifndef __ARM_COMPAT_H +__STATIC_FORCEINLINE void __disable_irq(void) +{ + __ASM volatile ("cpsid i" : : : "memory"); +} +#endif + + +/** + \brief Get Control Register + \details Returns the content of the Control Register. + \return Control Register value + */ +__STATIC_FORCEINLINE uint32_t __get_CONTROL(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, control" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Control Register (non-secure) + \details Returns the content of the non-secure Control Register when in secure mode. + \return non-secure Control Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, control_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Control Register + \details Writes the given value to the Control Register. + \param [in] control Control Register value to set + */ +__STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) +{ + __ASM volatile ("MSR control, %0" : : "r" (control) : "memory"); + __ISB(); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Control Register (non-secure) + \details Writes the given value to the non-secure Control Register when in secure state. + \param [in] control Control Register value to set + */ +__STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) +{ + __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory"); + __ISB(); +} +#endif + + +/** + \brief Get IPSR Register + \details Returns the content of the IPSR Register. + \return IPSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_IPSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, ipsr" : "=r" (result) ); + return(result); +} + + +/** + \brief Get APSR Register + \details Returns the content of the APSR Register. + \return APSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_APSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, apsr" : "=r" (result) ); + return(result); +} + + +/** + \brief Get xPSR Register + \details Returns the content of the xPSR Register. + \return xPSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_xPSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, xpsr" : "=r" (result) ); + return(result); +} + + +/** + \brief Get Process Stack Pointer + \details Returns the current value of the Process Stack Pointer (PSP). + \return PSP Register value + */ +__STATIC_FORCEINLINE uint32_t __get_PSP(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, psp" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Process Stack Pointer (non-secure) + \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state. + \return PSP Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, psp_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Process Stack Pointer + \details Assigns the given value to the Process Stack Pointer (PSP). + \param [in] topOfProcStack Process Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack) +{ + __ASM volatile ("MSR psp, %0" : : "r" (topOfProcStack) : ); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Process Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state. + \param [in] topOfProcStack Process Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack) +{ + __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : ); +} +#endif + + +/** + \brief Get Main Stack Pointer + \details Returns the current value of the Main Stack Pointer (MSP). + \return MSP Register value + */ +__STATIC_FORCEINLINE uint32_t __get_MSP(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, msp" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Main Stack Pointer (non-secure) + \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state. + \return MSP Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, msp_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Main Stack Pointer + \details Assigns the given value to the Main Stack Pointer (MSP). + \param [in] topOfMainStack Main Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack) +{ + __ASM volatile ("MSR msp, %0" : : "r" (topOfMainStack) : ); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Main Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state. + \param [in] topOfMainStack Main Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack) +{ + __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : ); +} +#endif + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Stack Pointer (non-secure) + \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state. + \return SP Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, sp_ns" : "=r" (result) ); + return(result); +} + + +/** + \brief Set Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state. + \param [in] topOfStack Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack) +{ + __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : ); +} +#endif + + +/** + \brief Get Priority Mask + \details Returns the current state of the priority mask bit from the Priority Mask Register. + \return Priority Mask value + */ +__STATIC_FORCEINLINE uint32_t __get_PRIMASK(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, primask" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Priority Mask (non-secure) + \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state. + \return Priority Mask value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, primask_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Priority Mask + \details Assigns the given value to the Priority Mask Register. + \param [in] priMask Priority Mask + */ +__STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask) +{ + __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory"); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Priority Mask (non-secure) + \details Assigns the given value to the non-secure Priority Mask Register when in secure state. + \param [in] priMask Priority Mask + */ +__STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask) +{ + __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory"); +} +#endif + + +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) +/** + \brief Enable FIQ + \details Enables FIQ interrupts by clearing special-purpose register FAULTMASK. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __enable_fault_irq(void) +{ + __ASM volatile ("cpsie f" : : : "memory"); +} + + +/** + \brief Disable FIQ + \details Disables FIQ interrupts by setting special-purpose register FAULTMASK. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __disable_fault_irq(void) +{ + __ASM volatile ("cpsid f" : : : "memory"); +} + + +/** + \brief Get Base Priority + \details Returns the current value of the Base Priority register. + \return Base Priority register value + */ +__STATIC_FORCEINLINE uint32_t __get_BASEPRI(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, basepri" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Base Priority (non-secure) + \details Returns the current value of the non-secure Base Priority register when in secure state. + \return Base Priority register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Base Priority + \details Assigns the given value to the Base Priority register. + \param [in] basePri Base Priority value to set + */ +__STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri) +{ + __ASM volatile ("MSR basepri, %0" : : "r" (basePri) : "memory"); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Base Priority (non-secure) + \details Assigns the given value to the non-secure Base Priority register when in secure state. + \param [in] basePri Base Priority value to set + */ +__STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) +{ + __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); +} +#endif + + +/** + \brief Set Base Priority with condition + \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, + or the new value increases the BASEPRI priority level. + \param [in] basePri Base Priority value to set + */ +__STATIC_FORCEINLINE void __set_BASEPRI_MAX(uint32_t basePri) +{ + __ASM volatile ("MSR basepri_max, %0" : : "r" (basePri) : "memory"); +} + + +/** + \brief Get Fault Mask + \details Returns the current value of the Fault Mask register. + \return Fault Mask register value + */ +__STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, faultmask" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Fault Mask (non-secure) + \details Returns the current value of the non-secure Fault Mask register when in secure state. + \return Fault Mask register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Fault Mask + \details Assigns the given value to the Fault Mask register. + \param [in] faultMask Fault Mask value to set + */ +__STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask) +{ + __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory"); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Fault Mask (non-secure) + \details Assigns the given value to the non-secure Fault Mask register when in secure state. + \param [in] faultMask Fault Mask value to set + */ +__STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) +{ + __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); +} +#endif + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ + + +#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) + +/** + \brief Get Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always in non-secure + mode. + + \details Returns the current value of the Process Stack Pointer Limit (PSPLIM). + \return PSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __get_PSPLIM(void) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, psplim" : "=r" (result) ); + return result; +#endif +} + +#if (defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Process Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always in non-secure + mode. + + \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \return PSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) ); + return result; +#endif +} +#endif + + +/** + \brief Set Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored in non-secure + mode. + + \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM). + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + */ +__STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)ProcStackPtrLimit; +#else + __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit)); +#endif +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Process Stack Pointer (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored in non-secure + mode. + + \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + */ +__STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)ProcStackPtrLimit; +#else + __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit)); +#endif +} +#endif + + +/** + \brief Get Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the Main Stack Pointer Limit (MSPLIM). + \return MSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, msplim" : "=r" (result) ); + return result; +#endif +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. + \return MSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); + return result; +#endif +} +#endif + + +/** + \brief Set Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM). + \param [in] MainStackPtrLimit Main Stack Pointer Limit value to set + */ +__STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + (void)MainStackPtrLimit; +#else + __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit)); +#endif +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. + \param [in] MainStackPtrLimit Main Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + (void)MainStackPtrLimit; +#else + __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); +#endif +} +#endif + +#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ + +/** + \brief Get FPSCR + \details Returns the current value of the Floating Point Status/Control register. + \return Floating Point Status/Control register value + */ +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) +#define __get_FPSCR (uint32_t)__builtin_arm_get_fpscr +#else +#define __get_FPSCR() ((uint32_t)0U) +#endif + +/** + \brief Set FPSCR + \details Assigns the given value to the Floating Point Status/Control register. + \param [in] fpscr Floating Point Status/Control value to set + */ +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) +#define __set_FPSCR __builtin_arm_set_fpscr +#else +#define __set_FPSCR(x) ((void)(x)) +#endif + + +/*@} end of CMSIS_Core_RegAccFunctions */ + + +/* ################### Compiler specific Intrinsics ########################### */ +/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics + Access to dedicated SIMD instructions + @{ +*/ + +#if (defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1)) + +__STATIC_FORCEINLINE uint32_t __SADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + + +__STATIC_FORCEINLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __USUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + + +__STATIC_FORCEINLINE uint32_t __SADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __USUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SSAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QSAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __USAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __USAD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +#define __SSAT16(ARG1,ARG2) \ +({ \ + int32_t __RES, __ARG1 = (ARG1); \ + __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \ + __RES; \ + }) + +#define __USAT16(ARG1,ARG2) \ +({ \ + uint32_t __RES, __ARG1 = (ARG1); \ + __ASM ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \ + __RES; \ + }) + +__STATIC_FORCEINLINE uint32_t __UXTB16(uint32_t op1) +{ + uint32_t result; + + __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1)); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SXTB16(uint32_t op1) +{ + uint32_t result; + + __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1)); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +__STATIC_FORCEINLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc) +{ + union llreg_u{ + uint32_t w32[2]; + uint64_t w64; + } llr; + llr.w64 = acc; + +#ifndef __ARMEB__ /* Little endian */ + __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); +#else /* Big endian */ + __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); +#endif + + return(llr.w64); +} + +__STATIC_FORCEINLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc) +{ + union llreg_u{ + uint32_t w32[2]; + uint64_t w64; + } llr; + llr.w64 = acc; + +#ifndef __ARMEB__ /* Little endian */ + __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); +#else /* Big endian */ + __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); +#endif + + return(llr.w64); +} + +__STATIC_FORCEINLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +__STATIC_FORCEINLINE uint64_t __SMLSLD (uint32_t op1, uint32_t op2, uint64_t acc) +{ + union llreg_u{ + uint32_t w32[2]; + uint64_t w64; + } llr; + llr.w64 = acc; + +#ifndef __ARMEB__ /* Little endian */ + __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); +#else /* Big endian */ + __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); +#endif + + return(llr.w64); +} + +__STATIC_FORCEINLINE uint64_t __SMLSLDX (uint32_t op1, uint32_t op2, uint64_t acc) +{ + union llreg_u{ + uint32_t w32[2]; + uint64_t w64; + } llr; + llr.w64 = acc; + +#ifndef __ARMEB__ /* Little endian */ + __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); +#else /* Big endian */ + __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); +#endif + + return(llr.w64); +} + +__STATIC_FORCEINLINE uint32_t __SEL (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE int32_t __QADD( int32_t op1, int32_t op2) +{ + int32_t result; + + __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE int32_t __QSUB( int32_t op1, int32_t op2) +{ + int32_t result; + + __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +#define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \ + ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) ) + +#define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \ + ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) ) + +#define __SXTB16_RORn(ARG1, ARG2) __SXTB16(__ROR(ARG1, ARG2)) + +#define __SXTAB16_RORn(ARG1, ARG2, ARG3) __SXTAB16(ARG1, __ROR(ARG2, ARG3)) + +__STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) +{ + int32_t result; + + __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +#endif /* (__ARM_FEATURE_DSP == 1) */ +/*@} end of group CMSIS_SIMD_intrinsics */ + + +#endif /* __CMSIS_ARMCLANG_H */ diff --git a/Drivers/CMSIS/Include/cmsis_compiler.h b/Drivers/CMSIS/Include/cmsis_compiler.h new file mode 100644 index 0000000..adbf296 --- /dev/null +++ b/Drivers/CMSIS/Include/cmsis_compiler.h @@ -0,0 +1,283 @@ +/**************************************************************************//** + * @file cmsis_compiler.h + * @brief CMSIS compiler generic header file + * @version V5.1.0 + * @date 09. October 2018 + ******************************************************************************/ +/* + * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CMSIS_COMPILER_H +#define __CMSIS_COMPILER_H + +#include + +/* + * Arm Compiler 4/5 + */ +#if defined ( __CC_ARM ) + #include "cmsis_armcc.h" + + +/* + * Arm Compiler 6.6 LTM (armclang) + */ +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) && (__ARMCC_VERSION < 6100100) + #include "cmsis_armclang_ltm.h" + + /* + * Arm Compiler above 6.10.1 (armclang) + */ +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6100100) + #include "cmsis_armclang.h" + + +/* + * GNU Compiler + */ +#elif defined ( __GNUC__ ) + #include "cmsis_gcc.h" + + +/* + * IAR Compiler + */ +#elif defined ( __ICCARM__ ) + #include + + +/* + * TI Arm Compiler + */ +#elif defined ( __TI_ARM__ ) + #include + + #ifndef __ASM + #define __ASM __asm + #endif + #ifndef __INLINE + #define __INLINE inline + #endif + #ifndef __STATIC_INLINE + #define __STATIC_INLINE static inline + #endif + #ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE __STATIC_INLINE + #endif + #ifndef __NO_RETURN + #define __NO_RETURN __attribute__((noreturn)) + #endif + #ifndef __USED + #define __USED __attribute__((used)) + #endif + #ifndef __WEAK + #define __WEAK __attribute__((weak)) + #endif + #ifndef __PACKED + #define __PACKED __attribute__((packed)) + #endif + #ifndef __PACKED_STRUCT + #define __PACKED_STRUCT struct __attribute__((packed)) + #endif + #ifndef __PACKED_UNION + #define __PACKED_UNION union __attribute__((packed)) + #endif + #ifndef __UNALIGNED_UINT32 /* deprecated */ + struct __attribute__((packed)) T_UINT32 { uint32_t v; }; + #define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v) + #endif + #ifndef __UNALIGNED_UINT16_WRITE + __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; + #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void*)(addr))->v) = (val)) + #endif + #ifndef __UNALIGNED_UINT16_READ + __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; + #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) + #endif + #ifndef __UNALIGNED_UINT32_WRITE + __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; + #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) + #endif + #ifndef __UNALIGNED_UINT32_READ + __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; + #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) + #endif + #ifndef __ALIGNED + #define __ALIGNED(x) __attribute__((aligned(x))) + #endif + #ifndef __RESTRICT + #define __RESTRICT __restrict + #endif + #ifndef __COMPILER_BARRIER + #warning No compiler specific solution for __COMPILER_BARRIER. __COMPILER_BARRIER is ignored. + #define __COMPILER_BARRIER() (void)0 + #endif + + +/* + * TASKING Compiler + */ +#elif defined ( __TASKING__ ) + /* + * The CMSIS functions have been implemented as intrinsics in the compiler. + * Please use "carm -?i" to get an up to date list of all intrinsics, + * Including the CMSIS ones. + */ + + #ifndef __ASM + #define __ASM __asm + #endif + #ifndef __INLINE + #define __INLINE inline + #endif + #ifndef __STATIC_INLINE + #define __STATIC_INLINE static inline + #endif + #ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE __STATIC_INLINE + #endif + #ifndef __NO_RETURN + #define __NO_RETURN __attribute__((noreturn)) + #endif + #ifndef __USED + #define __USED __attribute__((used)) + #endif + #ifndef __WEAK + #define __WEAK __attribute__((weak)) + #endif + #ifndef __PACKED + #define __PACKED __packed__ + #endif + #ifndef __PACKED_STRUCT + #define __PACKED_STRUCT struct __packed__ + #endif + #ifndef __PACKED_UNION + #define __PACKED_UNION union __packed__ + #endif + #ifndef __UNALIGNED_UINT32 /* deprecated */ + struct __packed__ T_UINT32 { uint32_t v; }; + #define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v) + #endif + #ifndef __UNALIGNED_UINT16_WRITE + __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; + #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) + #endif + #ifndef __UNALIGNED_UINT16_READ + __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; + #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) + #endif + #ifndef __UNALIGNED_UINT32_WRITE + __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; + #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) + #endif + #ifndef __UNALIGNED_UINT32_READ + __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; + #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) + #endif + #ifndef __ALIGNED + #define __ALIGNED(x) __align(x) + #endif + #ifndef __RESTRICT + #warning No compiler specific solution for __RESTRICT. __RESTRICT is ignored. + #define __RESTRICT + #endif + #ifndef __COMPILER_BARRIER + #warning No compiler specific solution for __COMPILER_BARRIER. __COMPILER_BARRIER is ignored. + #define __COMPILER_BARRIER() (void)0 + #endif + + +/* + * COSMIC Compiler + */ +#elif defined ( __CSMC__ ) + #include + + #ifndef __ASM + #define __ASM _asm + #endif + #ifndef __INLINE + #define __INLINE inline + #endif + #ifndef __STATIC_INLINE + #define __STATIC_INLINE static inline + #endif + #ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE __STATIC_INLINE + #endif + #ifndef __NO_RETURN + // NO RETURN is automatically detected hence no warning here + #define __NO_RETURN + #endif + #ifndef __USED + #warning No compiler specific solution for __USED. __USED is ignored. + #define __USED + #endif + #ifndef __WEAK + #define __WEAK __weak + #endif + #ifndef __PACKED + #define __PACKED @packed + #endif + #ifndef __PACKED_STRUCT + #define __PACKED_STRUCT @packed struct + #endif + #ifndef __PACKED_UNION + #define __PACKED_UNION @packed union + #endif + #ifndef __UNALIGNED_UINT32 /* deprecated */ + @packed struct T_UINT32 { uint32_t v; }; + #define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v) + #endif + #ifndef __UNALIGNED_UINT16_WRITE + __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; + #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) + #endif + #ifndef __UNALIGNED_UINT16_READ + __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; + #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) + #endif + #ifndef __UNALIGNED_UINT32_WRITE + __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; + #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) + #endif + #ifndef __UNALIGNED_UINT32_READ + __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; + #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) + #endif + #ifndef __ALIGNED + #warning No compiler specific solution for __ALIGNED. __ALIGNED is ignored. + #define __ALIGNED(x) + #endif + #ifndef __RESTRICT + #warning No compiler specific solution for __RESTRICT. __RESTRICT is ignored. + #define __RESTRICT + #endif + #ifndef __COMPILER_BARRIER + #warning No compiler specific solution for __COMPILER_BARRIER. __COMPILER_BARRIER is ignored. + #define __COMPILER_BARRIER() (void)0 + #endif + + +#else + #error Unknown compiler. +#endif + + +#endif /* __CMSIS_COMPILER_H */ + diff --git a/Drivers/CMSIS/Include/cmsis_gcc.h b/Drivers/CMSIS/Include/cmsis_gcc.h new file mode 100644 index 0000000..67bda4e --- /dev/null +++ b/Drivers/CMSIS/Include/cmsis_gcc.h @@ -0,0 +1,2211 @@ +/**************************************************************************//** + * @file cmsis_gcc.h + * @brief CMSIS compiler GCC header file + * @version V5.4.1 + * @date 27. May 2021 + ******************************************************************************/ +/* + * Copyright (c) 2009-2021 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CMSIS_GCC_H +#define __CMSIS_GCC_H + +/* ignore some GCC warnings */ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wsign-conversion" +#pragma GCC diagnostic ignored "-Wconversion" +#pragma GCC diagnostic ignored "-Wunused-parameter" + +/* Fallback for __has_builtin */ +#ifndef __has_builtin + #define __has_builtin(x) (0) +#endif + +/* CMSIS compiler specific defines */ +#ifndef __ASM + #define __ASM __asm +#endif +#ifndef __INLINE + #define __INLINE inline +#endif +#ifndef __STATIC_INLINE + #define __STATIC_INLINE static inline +#endif +#ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE __attribute__((always_inline)) static inline +#endif +#ifndef __NO_RETURN + #define __NO_RETURN __attribute__((__noreturn__)) +#endif +#ifndef __USED + #define __USED __attribute__((used)) +#endif +#ifndef __WEAK + #define __WEAK __attribute__((weak)) +#endif +#ifndef __PACKED + #define __PACKED __attribute__((packed, aligned(1))) +#endif +#ifndef __PACKED_STRUCT + #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) +#endif +#ifndef __PACKED_UNION + #define __PACKED_UNION union __attribute__((packed, aligned(1))) +#endif +#ifndef __UNALIGNED_UINT32 /* deprecated */ + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wpacked" + #pragma GCC diagnostic ignored "-Wattributes" + struct __attribute__((packed)) T_UINT32 { uint32_t v; }; + #pragma GCC diagnostic pop + #define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v) +#endif +#ifndef __UNALIGNED_UINT16_WRITE + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wpacked" + #pragma GCC diagnostic ignored "-Wattributes" + __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; + #pragma GCC diagnostic pop + #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) +#endif +#ifndef __UNALIGNED_UINT16_READ + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wpacked" + #pragma GCC diagnostic ignored "-Wattributes" + __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; + #pragma GCC diagnostic pop + #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) +#endif +#ifndef __UNALIGNED_UINT32_WRITE + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wpacked" + #pragma GCC diagnostic ignored "-Wattributes" + __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; + #pragma GCC diagnostic pop + #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) +#endif +#ifndef __UNALIGNED_UINT32_READ + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wpacked" + #pragma GCC diagnostic ignored "-Wattributes" + __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; + #pragma GCC diagnostic pop + #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) +#endif +#ifndef __ALIGNED + #define __ALIGNED(x) __attribute__((aligned(x))) +#endif +#ifndef __RESTRICT + #define __RESTRICT __restrict +#endif +#ifndef __COMPILER_BARRIER + #define __COMPILER_BARRIER() __ASM volatile("":::"memory") +#endif + +/* ######################### Startup and Lowlevel Init ######################## */ + +#ifndef __PROGRAM_START + +/** + \brief Initializes data and bss sections + \details This default implementations initialized all data and additional bss + sections relying on .copy.table and .zero.table specified properly + in the used linker script. + + */ +__STATIC_FORCEINLINE __NO_RETURN void __cmsis_start(void) +{ + extern void _start(void) __NO_RETURN; + + typedef struct { + uint32_t const* src; + uint32_t* dest; + uint32_t wlen; + } __copy_table_t; + + typedef struct { + uint32_t* dest; + uint32_t wlen; + } __zero_table_t; + + extern const __copy_table_t __copy_table_start__; + extern const __copy_table_t __copy_table_end__; + extern const __zero_table_t __zero_table_start__; + extern const __zero_table_t __zero_table_end__; + + for (__copy_table_t const* pTable = &__copy_table_start__; pTable < &__copy_table_end__; ++pTable) { + for(uint32_t i=0u; iwlen; ++i) { + pTable->dest[i] = pTable->src[i]; + } + } + + for (__zero_table_t const* pTable = &__zero_table_start__; pTable < &__zero_table_end__; ++pTable) { + for(uint32_t i=0u; iwlen; ++i) { + pTable->dest[i] = 0u; + } + } + + _start(); +} + +#define __PROGRAM_START __cmsis_start +#endif + +#ifndef __INITIAL_SP +#define __INITIAL_SP __StackTop +#endif + +#ifndef __STACK_LIMIT +#define __STACK_LIMIT __StackLimit +#endif + +#ifndef __VECTOR_TABLE +#define __VECTOR_TABLE __Vectors +#endif + +#ifndef __VECTOR_TABLE_ATTRIBUTE +#define __VECTOR_TABLE_ATTRIBUTE __attribute__((used, section(".vectors"))) +#endif + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +#ifndef __STACK_SEAL +#define __STACK_SEAL __StackSeal +#endif + +#ifndef __TZ_STACK_SEAL_SIZE +#define __TZ_STACK_SEAL_SIZE 8U +#endif + +#ifndef __TZ_STACK_SEAL_VALUE +#define __TZ_STACK_SEAL_VALUE 0xFEF5EDA5FEF5EDA5ULL +#endif + + +__STATIC_FORCEINLINE void __TZ_set_STACKSEAL_S (uint32_t* stackTop) { + *((uint64_t *)stackTop) = __TZ_STACK_SEAL_VALUE; +} +#endif + + +/* ########################## Core Instruction Access ######################### */ +/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface + Access to dedicated instructions + @{ +*/ + +/* Define macros for porting to both thumb1 and thumb2. + * For thumb1, use low register (r0-r7), specified by constraint "l" + * Otherwise, use general registers, specified by constraint "r" */ +#if defined (__thumb__) && !defined (__thumb2__) +#define __CMSIS_GCC_OUT_REG(r) "=l" (r) +#define __CMSIS_GCC_RW_REG(r) "+l" (r) +#define __CMSIS_GCC_USE_REG(r) "l" (r) +#else +#define __CMSIS_GCC_OUT_REG(r) "=r" (r) +#define __CMSIS_GCC_RW_REG(r) "+r" (r) +#define __CMSIS_GCC_USE_REG(r) "r" (r) +#endif + +/** + \brief No Operation + \details No Operation does nothing. This instruction can be used for code alignment purposes. + */ +#define __NOP() __ASM volatile ("nop") + +/** + \brief Wait For Interrupt + \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. + */ +#define __WFI() __ASM volatile ("wfi":::"memory") + + +/** + \brief Wait For Event + \details Wait For Event is a hint instruction that permits the processor to enter + a low-power state until one of a number of events occurs. + */ +#define __WFE() __ASM volatile ("wfe":::"memory") + + +/** + \brief Send Event + \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. + */ +#define __SEV() __ASM volatile ("sev") + + +/** + \brief Instruction Synchronization Barrier + \details Instruction Synchronization Barrier flushes the pipeline in the processor, + so that all instructions following the ISB are fetched from cache or memory, + after the instruction has been completed. + */ +__STATIC_FORCEINLINE void __ISB(void) +{ + __ASM volatile ("isb 0xF":::"memory"); +} + + +/** + \brief Data Synchronization Barrier + \details Acts as a special kind of Data Memory Barrier. + It completes when all explicit memory accesses before this instruction complete. + */ +__STATIC_FORCEINLINE void __DSB(void) +{ + __ASM volatile ("dsb 0xF":::"memory"); +} + + +/** + \brief Data Memory Barrier + \details Ensures the apparent order of the explicit memory operations before + and after the instruction, without ensuring their completion. + */ +__STATIC_FORCEINLINE void __DMB(void) +{ + __ASM volatile ("dmb 0xF":::"memory"); +} + + +/** + \brief Reverse byte order (32 bit) + \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. + \param [in] value Value to reverse + \return Reversed value + */ +__STATIC_FORCEINLINE uint32_t __REV(uint32_t value) +{ +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5) + return __builtin_bswap32(value); +#else + uint32_t result; + + __ASM ("rev %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + return result; +#endif +} + + +/** + \brief Reverse byte order (16 bit) + \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. + \param [in] value Value to reverse + \return Reversed value + */ +__STATIC_FORCEINLINE uint32_t __REV16(uint32_t value) +{ + uint32_t result; + + __ASM ("rev16 %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + return result; +} + + +/** + \brief Reverse byte order (16 bit) + \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. + \param [in] value Value to reverse + \return Reversed value + */ +__STATIC_FORCEINLINE int16_t __REVSH(int16_t value) +{ +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) + return (int16_t)__builtin_bswap16(value); +#else + int16_t result; + + __ASM ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + return result; +#endif +} + + +/** + \brief Rotate Right in unsigned value (32 bit) + \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. + \param [in] op1 Value to rotate + \param [in] op2 Number of Bits to rotate + \return Rotated value + */ +__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) +{ + op2 %= 32U; + if (op2 == 0U) + { + return op1; + } + return (op1 >> op2) | (op1 << (32U - op2)); +} + + +/** + \brief Breakpoint + \details Causes the processor to enter Debug state. + Debug tools can use this to investigate system state when the instruction at a particular address is reached. + \param [in] value is ignored by the processor. + If required, a debugger can use it to store additional information about the breakpoint. + */ +#define __BKPT(value) __ASM volatile ("bkpt "#value) + + +/** + \brief Reverse bit order of value + \details Reverses the bit order of the given value. + \param [in] value Value to reverse + \return Reversed value + */ +__STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value) +{ + uint32_t result; + +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) + __ASM ("rbit %0, %1" : "=r" (result) : "r" (value) ); +#else + uint32_t s = (4U /*sizeof(v)*/ * 8U) - 1U; /* extra shift needed at end */ + + result = value; /* r will be reversed bits of v; first get LSB of v */ + for (value >>= 1U; value != 0U; value >>= 1U) + { + result <<= 1U; + result |= value & 1U; + s--; + } + result <<= s; /* shift when v's highest bits are zero */ +#endif + return result; +} + + +/** + \brief Count leading zeros + \details Counts the number of leading zeros of a data value. + \param [in] value Value to count the leading zeros + \return number of leading zeros in value + */ +__STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value) +{ + /* Even though __builtin_clz produces a CLZ instruction on ARM, formally + __builtin_clz(0) is undefined behaviour, so handle this case specially. + This guarantees ARM-compatible results if happening to compile on a non-ARM + target, and ensures the compiler doesn't decide to activate any + optimisations using the logic "value was passed to __builtin_clz, so it + is non-zero". + ARM GCC 7.3 and possibly earlier will optimise this test away, leaving a + single CLZ instruction. + */ + if (value == 0U) + { + return 32U; + } + return __builtin_clz(value); +} + + +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) +/** + \brief LDR Exclusive (8 bit) + \details Executes a exclusive LDR instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__STATIC_FORCEINLINE uint8_t __LDREXB(volatile uint8_t *addr) +{ + uint32_t result; + +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) + __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) ); +#else + /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not + accepted by assembler. So has to use following less efficient pattern. + */ + __ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) : "memory" ); +#endif + return ((uint8_t) result); /* Add explicit type cast here */ +} + + +/** + \brief LDR Exclusive (16 bit) + \details Executes a exclusive LDR instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__STATIC_FORCEINLINE uint16_t __LDREXH(volatile uint16_t *addr) +{ + uint32_t result; + +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) + __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) ); +#else + /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not + accepted by assembler. So has to use following less efficient pattern. + */ + __ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) : "memory" ); +#endif + return ((uint16_t) result); /* Add explicit type cast here */ +} + + +/** + \brief LDR Exclusive (32 bit) + \details Executes a exclusive LDR instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +__STATIC_FORCEINLINE uint32_t __LDREXW(volatile uint32_t *addr) +{ + uint32_t result; + + __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) ); + return(result); +} + + +/** + \brief STR Exclusive (8 bit) + \details Executes a exclusive STR instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +__STATIC_FORCEINLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr) +{ + uint32_t result; + + __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); + return(result); +} + + +/** + \brief STR Exclusive (16 bit) + \details Executes a exclusive STR instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +__STATIC_FORCEINLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr) +{ + uint32_t result; + + __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); + return(result); +} + + +/** + \brief STR Exclusive (32 bit) + \details Executes a exclusive STR instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +__STATIC_FORCEINLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr) +{ + uint32_t result; + + __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) ); + return(result); +} + + +/** + \brief Remove the exclusive lock + \details Removes the exclusive lock which is created by LDREX. + */ +__STATIC_FORCEINLINE void __CLREX(void) +{ + __ASM volatile ("clrex" ::: "memory"); +} + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ + + +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] ARG1 Value to be saturated + \param [in] ARG2 Bit position to saturate to (1..32) + \return Saturated value + */ +#define __SSAT(ARG1, ARG2) \ +__extension__ \ +({ \ + int32_t __RES, __ARG1 = (ARG1); \ + __ASM volatile ("ssat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \ + __RES; \ + }) + + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] ARG1 Value to be saturated + \param [in] ARG2 Bit position to saturate to (0..31) + \return Saturated value + */ +#define __USAT(ARG1, ARG2) \ +__extension__ \ +({ \ + uint32_t __RES, __ARG1 = (ARG1); \ + __ASM volatile ("usat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \ + __RES; \ + }) + + +/** + \brief Rotate Right with Extend (32 bit) + \details Moves each bit of a bitstring right by one bit. + The carry input is shifted in at the left end of the bitstring. + \param [in] value Value to rotate + \return Rotated value + */ +__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) +{ + uint32_t result; + + __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + return(result); +} + + +/** + \brief LDRT Unprivileged (8 bit) + \details Executes a Unprivileged LDRT instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) +{ + uint32_t result; + +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) + __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); +#else + /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not + accepted by assembler. So has to use following less efficient pattern. + */ + __ASM volatile ("ldrbt %0, [%1]" : "=r" (result) : "r" (ptr) : "memory" ); +#endif + return ((uint8_t) result); /* Add explicit type cast here */ +} + + +/** + \brief LDRT Unprivileged (16 bit) + \details Executes a Unprivileged LDRT instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) +{ + uint32_t result; + +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) + __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); +#else + /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not + accepted by assembler. So has to use following less efficient pattern. + */ + __ASM volatile ("ldrht %0, [%1]" : "=r" (result) : "r" (ptr) : "memory" ); +#endif + return ((uint16_t) result); /* Add explicit type cast here */ +} + + +/** + \brief LDRT Unprivileged (32 bit) + \details Executes a Unprivileged LDRT instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); + return(result); +} + + +/** + \brief STRT Unprivileged (8 bit) + \details Executes a Unprivileged STRT instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr) +{ + __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +} + + +/** + \brief STRT Unprivileged (16 bit) + \details Executes a Unprivileged STRT instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr) +{ + __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +} + + +/** + \brief STRT Unprivileged (32 bit) + \details Executes a Unprivileged STRT instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) +{ + __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) ); +} + +#else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ + +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) +{ + if ((sat >= 1U) && (sat <= 32U)) + { + const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); + const int32_t min = -1 - max ; + if (val > max) + { + return max; + } + else if (val < min) + { + return min; + } + } + return val; +} + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) +{ + if (sat <= 31U) + { + const uint32_t max = ((1U << sat) - 1U); + if (val > (int32_t)max) + { + return max; + } + else if (val < 0) + { + return 0U; + } + } + return (uint32_t)val; +} + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ + + +#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) +/** + \brief Load-Acquire (8 bit) + \details Executes a LDAB instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint8_t) result); +} + + +/** + \brief Load-Acquire (16 bit) + \details Executes a LDAH instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint16_t) result); +} + + +/** + \brief Load-Acquire (32 bit) + \details Executes a LDA instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +__STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr) +{ + uint32_t result; + + __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return(result); +} + + +/** + \brief Store-Release (8 bit) + \details Executes a STLB instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr) +{ + __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +} + + +/** + \brief Store-Release (16 bit) + \details Executes a STLH instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr) +{ + __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +} + + +/** + \brief Store-Release (32 bit) + \details Executes a STL instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) +{ + __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +} + + +/** + \brief Load-Acquire Exclusive (8 bit) + \details Executes a LDAB exclusive instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__STATIC_FORCEINLINE uint8_t __LDAEXB(volatile uint8_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldaexb %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint8_t) result); +} + + +/** + \brief Load-Acquire Exclusive (16 bit) + \details Executes a LDAH exclusive instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__STATIC_FORCEINLINE uint16_t __LDAEXH(volatile uint16_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldaexh %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint16_t) result); +} + + +/** + \brief Load-Acquire Exclusive (32 bit) + \details Executes a LDA exclusive instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +__STATIC_FORCEINLINE uint32_t __LDAEX(volatile uint32_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldaex %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return(result); +} + + +/** + \brief Store-Release Exclusive (8 bit) + \details Executes a STLB exclusive instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +__STATIC_FORCEINLINE uint32_t __STLEXB(uint8_t value, volatile uint8_t *ptr) +{ + uint32_t result; + + __ASM volatile ("stlexb %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); + return(result); +} + + +/** + \brief Store-Release Exclusive (16 bit) + \details Executes a STLH exclusive instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +__STATIC_FORCEINLINE uint32_t __STLEXH(uint16_t value, volatile uint16_t *ptr) +{ + uint32_t result; + + __ASM volatile ("stlexh %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); + return(result); +} + + +/** + \brief Store-Release Exclusive (32 bit) + \details Executes a STL exclusive instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +__STATIC_FORCEINLINE uint32_t __STLEX(uint32_t value, volatile uint32_t *ptr) +{ + uint32_t result; + + __ASM volatile ("stlex %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); + return(result); +} + +#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ + +/*@}*/ /* end of group CMSIS_Core_InstructionInterface */ + + +/* ########################### Core Function Access ########################### */ +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions + @{ + */ + +/** + \brief Enable IRQ Interrupts + \details Enables IRQ interrupts by clearing special-purpose register PRIMASK. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __enable_irq(void) +{ + __ASM volatile ("cpsie i" : : : "memory"); +} + + +/** + \brief Disable IRQ Interrupts + \details Disables IRQ interrupts by setting special-purpose register PRIMASK. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __disable_irq(void) +{ + __ASM volatile ("cpsid i" : : : "memory"); +} + + +/** + \brief Get Control Register + \details Returns the content of the Control Register. + \return Control Register value + */ +__STATIC_FORCEINLINE uint32_t __get_CONTROL(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, control" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Control Register (non-secure) + \details Returns the content of the non-secure Control Register when in secure mode. + \return non-secure Control Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, control_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Control Register + \details Writes the given value to the Control Register. + \param [in] control Control Register value to set + */ +__STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) +{ + __ASM volatile ("MSR control, %0" : : "r" (control) : "memory"); + __ISB(); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Control Register (non-secure) + \details Writes the given value to the non-secure Control Register when in secure state. + \param [in] control Control Register value to set + */ +__STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) +{ + __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory"); + __ISB(); +} +#endif + + +/** + \brief Get IPSR Register + \details Returns the content of the IPSR Register. + \return IPSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_IPSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, ipsr" : "=r" (result) ); + return(result); +} + + +/** + \brief Get APSR Register + \details Returns the content of the APSR Register. + \return APSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_APSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, apsr" : "=r" (result) ); + return(result); +} + + +/** + \brief Get xPSR Register + \details Returns the content of the xPSR Register. + \return xPSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_xPSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, xpsr" : "=r" (result) ); + return(result); +} + + +/** + \brief Get Process Stack Pointer + \details Returns the current value of the Process Stack Pointer (PSP). + \return PSP Register value + */ +__STATIC_FORCEINLINE uint32_t __get_PSP(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, psp" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Process Stack Pointer (non-secure) + \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state. + \return PSP Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, psp_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Process Stack Pointer + \details Assigns the given value to the Process Stack Pointer (PSP). + \param [in] topOfProcStack Process Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack) +{ + __ASM volatile ("MSR psp, %0" : : "r" (topOfProcStack) : ); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Process Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state. + \param [in] topOfProcStack Process Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack) +{ + __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : ); +} +#endif + + +/** + \brief Get Main Stack Pointer + \details Returns the current value of the Main Stack Pointer (MSP). + \return MSP Register value + */ +__STATIC_FORCEINLINE uint32_t __get_MSP(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, msp" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Main Stack Pointer (non-secure) + \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state. + \return MSP Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, msp_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Main Stack Pointer + \details Assigns the given value to the Main Stack Pointer (MSP). + \param [in] topOfMainStack Main Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack) +{ + __ASM volatile ("MSR msp, %0" : : "r" (topOfMainStack) : ); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Main Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state. + \param [in] topOfMainStack Main Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack) +{ + __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : ); +} +#endif + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Stack Pointer (non-secure) + \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state. + \return SP Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, sp_ns" : "=r" (result) ); + return(result); +} + + +/** + \brief Set Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state. + \param [in] topOfStack Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack) +{ + __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : ); +} +#endif + + +/** + \brief Get Priority Mask + \details Returns the current state of the priority mask bit from the Priority Mask Register. + \return Priority Mask value + */ +__STATIC_FORCEINLINE uint32_t __get_PRIMASK(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, primask" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Priority Mask (non-secure) + \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state. + \return Priority Mask value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, primask_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Priority Mask + \details Assigns the given value to the Priority Mask Register. + \param [in] priMask Priority Mask + */ +__STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask) +{ + __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory"); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Priority Mask (non-secure) + \details Assigns the given value to the non-secure Priority Mask Register when in secure state. + \param [in] priMask Priority Mask + */ +__STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask) +{ + __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory"); +} +#endif + + +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) +/** + \brief Enable FIQ + \details Enables FIQ interrupts by clearing special-purpose register FAULTMASK. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __enable_fault_irq(void) +{ + __ASM volatile ("cpsie f" : : : "memory"); +} + + +/** + \brief Disable FIQ + \details Disables FIQ interrupts by setting special-purpose register FAULTMASK. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __disable_fault_irq(void) +{ + __ASM volatile ("cpsid f" : : : "memory"); +} + + +/** + \brief Get Base Priority + \details Returns the current value of the Base Priority register. + \return Base Priority register value + */ +__STATIC_FORCEINLINE uint32_t __get_BASEPRI(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, basepri" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Base Priority (non-secure) + \details Returns the current value of the non-secure Base Priority register when in secure state. + \return Base Priority register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Base Priority + \details Assigns the given value to the Base Priority register. + \param [in] basePri Base Priority value to set + */ +__STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri) +{ + __ASM volatile ("MSR basepri, %0" : : "r" (basePri) : "memory"); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Base Priority (non-secure) + \details Assigns the given value to the non-secure Base Priority register when in secure state. + \param [in] basePri Base Priority value to set + */ +__STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) +{ + __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); +} +#endif + + +/** + \brief Set Base Priority with condition + \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, + or the new value increases the BASEPRI priority level. + \param [in] basePri Base Priority value to set + */ +__STATIC_FORCEINLINE void __set_BASEPRI_MAX(uint32_t basePri) +{ + __ASM volatile ("MSR basepri_max, %0" : : "r" (basePri) : "memory"); +} + + +/** + \brief Get Fault Mask + \details Returns the current value of the Fault Mask register. + \return Fault Mask register value + */ +__STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, faultmask" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Fault Mask (non-secure) + \details Returns the current value of the non-secure Fault Mask register when in secure state. + \return Fault Mask register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Fault Mask + \details Assigns the given value to the Fault Mask register. + \param [in] faultMask Fault Mask value to set + */ +__STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask) +{ + __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory"); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Fault Mask (non-secure) + \details Assigns the given value to the non-secure Fault Mask register when in secure state. + \param [in] faultMask Fault Mask value to set + */ +__STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) +{ + __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); +} +#endif + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ + + +#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) + +/** + \brief Get Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always in non-secure + mode. + + \details Returns the current value of the Process Stack Pointer Limit (PSPLIM). + \return PSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __get_PSPLIM(void) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, psplim" : "=r" (result) ); + return result; +#endif +} + +#if (defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Process Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \return PSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) ); + return result; +#endif +} +#endif + + +/** + \brief Set Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored in non-secure + mode. + + \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM). + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + */ +__STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)ProcStackPtrLimit; +#else + __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit)); +#endif +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Process Stack Pointer (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + */ +__STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)ProcStackPtrLimit; +#else + __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit)); +#endif +} +#endif + + +/** + \brief Get Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always in non-secure + mode. + + \details Returns the current value of the Main Stack Pointer Limit (MSPLIM). + \return MSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, msplim" : "=r" (result) ); + return result; +#endif +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. + \return MSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); + return result; +#endif +} +#endif + + +/** + \brief Set Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored in non-secure + mode. + + \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM). + \param [in] MainStackPtrLimit Main Stack Pointer Limit value to set + */ +__STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + (void)MainStackPtrLimit; +#else + __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit)); +#endif +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. + \param [in] MainStackPtrLimit Main Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + (void)MainStackPtrLimit; +#else + __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); +#endif +} +#endif + +#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ + + +/** + \brief Get FPSCR + \details Returns the current value of the Floating Point Status/Control register. + \return Floating Point Status/Control register value + */ +__STATIC_FORCEINLINE uint32_t __get_FPSCR(void) +{ +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) +#if __has_builtin(__builtin_arm_get_fpscr) +// Re-enable using built-in when GCC has been fixed +// || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2) + /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */ + return __builtin_arm_get_fpscr(); +#else + uint32_t result; + + __ASM volatile ("VMRS %0, fpscr" : "=r" (result) ); + return(result); +#endif +#else + return(0U); +#endif +} + + +/** + \brief Set FPSCR + \details Assigns the given value to the Floating Point Status/Control register. + \param [in] fpscr Floating Point Status/Control value to set + */ +__STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr) +{ +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) +#if __has_builtin(__builtin_arm_set_fpscr) +// Re-enable using built-in when GCC has been fixed +// || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2) + /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */ + __builtin_arm_set_fpscr(fpscr); +#else + __ASM volatile ("VMSR fpscr, %0" : : "r" (fpscr) : "vfpcc", "memory"); +#endif +#else + (void)fpscr; +#endif +} + + +/*@} end of CMSIS_Core_RegAccFunctions */ + + +/* ################### Compiler specific Intrinsics ########################### */ +/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics + Access to dedicated SIMD instructions + @{ +*/ + +#if (defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1)) + +__STATIC_FORCEINLINE uint32_t __SADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + + +__STATIC_FORCEINLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __USUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + + +__STATIC_FORCEINLINE uint32_t __SADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __USUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SSAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QSAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __USAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __USAD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +#define __SSAT16(ARG1, ARG2) \ +__extension__ \ +({ \ + int32_t __RES, __ARG1 = (ARG1); \ + __ASM volatile ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \ + __RES; \ + }) + +#define __USAT16(ARG1, ARG2) \ +__extension__ \ +({ \ + uint32_t __RES, __ARG1 = (ARG1); \ + __ASM volatile ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \ + __RES; \ + }) + +__STATIC_FORCEINLINE uint32_t __UXTB16(uint32_t op1) +{ + uint32_t result; + + __ASM ("uxtb16 %0, %1" : "=r" (result) : "r" (op1)); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SXTB16(uint32_t op1) +{ + uint32_t result; + + __ASM ("sxtb16 %0, %1" : "=r" (result) : "r" (op1)); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SXTB16_RORn(uint32_t op1, uint32_t rotate) +{ + uint32_t result; + if (__builtin_constant_p(rotate) && ((rotate == 8U) || (rotate == 16U) || (rotate == 24U))) { + __ASM volatile ("sxtb16 %0, %1, ROR %2" : "=r" (result) : "r" (op1), "i" (rotate) ); + } else { + result = __SXTB16(__ROR(op1, rotate)) ; + } + return result; +} + +__STATIC_FORCEINLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SXTAB16_RORn(uint32_t op1, uint32_t op2, uint32_t rotate) +{ + uint32_t result; + if (__builtin_constant_p(rotate) && ((rotate == 8U) || (rotate == 16U) || (rotate == 24U))) { + __ASM volatile ("sxtab16 %0, %1, %2, ROR %3" : "=r" (result) : "r" (op1) , "r" (op2) , "i" (rotate)); + } else { + result = __SXTAB16(op1, __ROR(op2, rotate)); + } + return result; +} + + +__STATIC_FORCEINLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +__STATIC_FORCEINLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc) +{ + union llreg_u{ + uint32_t w32[2]; + uint64_t w64; + } llr; + llr.w64 = acc; + +#ifndef __ARMEB__ /* Little endian */ + __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); +#else /* Big endian */ + __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); +#endif + + return(llr.w64); +} + +__STATIC_FORCEINLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc) +{ + union llreg_u{ + uint32_t w32[2]; + uint64_t w64; + } llr; + llr.w64 = acc; + +#ifndef __ARMEB__ /* Little endian */ + __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); +#else /* Big endian */ + __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); +#endif + + return(llr.w64); +} + +__STATIC_FORCEINLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +__STATIC_FORCEINLINE uint64_t __SMLSLD (uint32_t op1, uint32_t op2, uint64_t acc) +{ + union llreg_u{ + uint32_t w32[2]; + uint64_t w64; + } llr; + llr.w64 = acc; + +#ifndef __ARMEB__ /* Little endian */ + __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); +#else /* Big endian */ + __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); +#endif + + return(llr.w64); +} + +__STATIC_FORCEINLINE uint64_t __SMLSLDX (uint32_t op1, uint32_t op2, uint64_t acc) +{ + union llreg_u{ + uint32_t w32[2]; + uint64_t w64; + } llr; + llr.w64 = acc; + +#ifndef __ARMEB__ /* Little endian */ + __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); +#else /* Big endian */ + __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); +#endif + + return(llr.w64); +} + +__STATIC_FORCEINLINE uint32_t __SEL (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE int32_t __QADD( int32_t op1, int32_t op2) +{ + int32_t result; + + __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE int32_t __QSUB( int32_t op1, int32_t op2) +{ + int32_t result; + + __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + + +#define __PKHBT(ARG1,ARG2,ARG3) \ +__extension__ \ +({ \ + uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ + __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ + __RES; \ + }) + +#define __PKHTB(ARG1,ARG2,ARG3) \ +__extension__ \ +({ \ + uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ + if (ARG3 == 0) \ + __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \ + else \ + __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ + __RES; \ + }) + + +__STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) +{ + int32_t result; + + __ASM ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +#endif /* (__ARM_FEATURE_DSP == 1) */ +/*@} end of group CMSIS_SIMD_intrinsics */ + + +#pragma GCC diagnostic pop + +#endif /* __CMSIS_GCC_H */ diff --git a/Drivers/CMSIS/Include/cmsis_iccarm.h b/Drivers/CMSIS/Include/cmsis_iccarm.h new file mode 100644 index 0000000..65b824b --- /dev/null +++ b/Drivers/CMSIS/Include/cmsis_iccarm.h @@ -0,0 +1,1002 @@ +/**************************************************************************//** + * @file cmsis_iccarm.h + * @brief CMSIS compiler ICCARM (IAR Compiler for Arm) header file + * @version V5.3.0 + * @date 14. April 2021 + ******************************************************************************/ + +//------------------------------------------------------------------------------ +// +// Copyright (c) 2017-2021 IAR Systems +// Copyright (c) 2017-2021 Arm Limited. All rights reserved. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License") +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +//------------------------------------------------------------------------------ + + +#ifndef __CMSIS_ICCARM_H__ +#define __CMSIS_ICCARM_H__ + +#ifndef __ICCARM__ + #error This file should only be compiled by ICCARM +#endif + +#pragma system_include + +#define __IAR_FT _Pragma("inline=forced") __intrinsic + +#if (__VER__ >= 8000000) + #define __ICCARM_V8 1 +#else + #define __ICCARM_V8 0 +#endif + +#ifndef __ALIGNED + #if __ICCARM_V8 + #define __ALIGNED(x) __attribute__((aligned(x))) + #elif (__VER__ >= 7080000) + /* Needs IAR language extensions */ + #define __ALIGNED(x) __attribute__((aligned(x))) + #else + #warning No compiler specific solution for __ALIGNED.__ALIGNED is ignored. + #define __ALIGNED(x) + #endif +#endif + + +/* Define compiler macros for CPU architecture, used in CMSIS 5. + */ +#if __ARM_ARCH_6M__ || __ARM_ARCH_7M__ || __ARM_ARCH_7EM__ || __ARM_ARCH_8M_BASE__ || __ARM_ARCH_8M_MAIN__ +/* Macros already defined */ +#else + #if defined(__ARM8M_MAINLINE__) || defined(__ARM8EM_MAINLINE__) + #define __ARM_ARCH_8M_MAIN__ 1 + #elif defined(__ARM8M_BASELINE__) + #define __ARM_ARCH_8M_BASE__ 1 + #elif defined(__ARM_ARCH_PROFILE) && __ARM_ARCH_PROFILE == 'M' + #if __ARM_ARCH == 6 + #define __ARM_ARCH_6M__ 1 + #elif __ARM_ARCH == 7 + #if __ARM_FEATURE_DSP + #define __ARM_ARCH_7EM__ 1 + #else + #define __ARM_ARCH_7M__ 1 + #endif + #endif /* __ARM_ARCH */ + #endif /* __ARM_ARCH_PROFILE == 'M' */ +#endif + +/* Alternativ core deduction for older ICCARM's */ +#if !defined(__ARM_ARCH_6M__) && !defined(__ARM_ARCH_7M__) && !defined(__ARM_ARCH_7EM__) && \ + !defined(__ARM_ARCH_8M_BASE__) && !defined(__ARM_ARCH_8M_MAIN__) + #if defined(__ARM6M__) && (__CORE__ == __ARM6M__) + #define __ARM_ARCH_6M__ 1 + #elif defined(__ARM7M__) && (__CORE__ == __ARM7M__) + #define __ARM_ARCH_7M__ 1 + #elif defined(__ARM7EM__) && (__CORE__ == __ARM7EM__) + #define __ARM_ARCH_7EM__ 1 + #elif defined(__ARM8M_BASELINE__) && (__CORE == __ARM8M_BASELINE__) + #define __ARM_ARCH_8M_BASE__ 1 + #elif defined(__ARM8M_MAINLINE__) && (__CORE == __ARM8M_MAINLINE__) + #define __ARM_ARCH_8M_MAIN__ 1 + #elif defined(__ARM8EM_MAINLINE__) && (__CORE == __ARM8EM_MAINLINE__) + #define __ARM_ARCH_8M_MAIN__ 1 + #else + #error "Unknown target." + #endif +#endif + + + +#if defined(__ARM_ARCH_6M__) && __ARM_ARCH_6M__==1 + #define __IAR_M0_FAMILY 1 +#elif defined(__ARM_ARCH_8M_BASE__) && __ARM_ARCH_8M_BASE__==1 + #define __IAR_M0_FAMILY 1 +#else + #define __IAR_M0_FAMILY 0 +#endif + + +#ifndef __ASM + #define __ASM __asm +#endif + +#ifndef __COMPILER_BARRIER + #define __COMPILER_BARRIER() __ASM volatile("":::"memory") +#endif + +#ifndef __INLINE + #define __INLINE inline +#endif + +#ifndef __NO_RETURN + #if __ICCARM_V8 + #define __NO_RETURN __attribute__((__noreturn__)) + #else + #define __NO_RETURN _Pragma("object_attribute=__noreturn") + #endif +#endif + +#ifndef __PACKED + #if __ICCARM_V8 + #define __PACKED __attribute__((packed, aligned(1))) + #else + /* Needs IAR language extensions */ + #define __PACKED __packed + #endif +#endif + +#ifndef __PACKED_STRUCT + #if __ICCARM_V8 + #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) + #else + /* Needs IAR language extensions */ + #define __PACKED_STRUCT __packed struct + #endif +#endif + +#ifndef __PACKED_UNION + #if __ICCARM_V8 + #define __PACKED_UNION union __attribute__((packed, aligned(1))) + #else + /* Needs IAR language extensions */ + #define __PACKED_UNION __packed union + #endif +#endif + +#ifndef __RESTRICT + #if __ICCARM_V8 + #define __RESTRICT __restrict + #else + /* Needs IAR language extensions */ + #define __RESTRICT restrict + #endif +#endif + +#ifndef __STATIC_INLINE + #define __STATIC_INLINE static inline +#endif + +#ifndef __FORCEINLINE + #define __FORCEINLINE _Pragma("inline=forced") +#endif + +#ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE __FORCEINLINE __STATIC_INLINE +#endif + +#ifndef __UNALIGNED_UINT16_READ +#pragma language=save +#pragma language=extended +__IAR_FT uint16_t __iar_uint16_read(void const *ptr) +{ + return *(__packed uint16_t*)(ptr); +} +#pragma language=restore +#define __UNALIGNED_UINT16_READ(PTR) __iar_uint16_read(PTR) +#endif + + +#ifndef __UNALIGNED_UINT16_WRITE +#pragma language=save +#pragma language=extended +__IAR_FT void __iar_uint16_write(void const *ptr, uint16_t val) +{ + *(__packed uint16_t*)(ptr) = val;; +} +#pragma language=restore +#define __UNALIGNED_UINT16_WRITE(PTR,VAL) __iar_uint16_write(PTR,VAL) +#endif + +#ifndef __UNALIGNED_UINT32_READ +#pragma language=save +#pragma language=extended +__IAR_FT uint32_t __iar_uint32_read(void const *ptr) +{ + return *(__packed uint32_t*)(ptr); +} +#pragma language=restore +#define __UNALIGNED_UINT32_READ(PTR) __iar_uint32_read(PTR) +#endif + +#ifndef __UNALIGNED_UINT32_WRITE +#pragma language=save +#pragma language=extended +__IAR_FT void __iar_uint32_write(void const *ptr, uint32_t val) +{ + *(__packed uint32_t*)(ptr) = val;; +} +#pragma language=restore +#define __UNALIGNED_UINT32_WRITE(PTR,VAL) __iar_uint32_write(PTR,VAL) +#endif + +#ifndef __UNALIGNED_UINT32 /* deprecated */ +#pragma language=save +#pragma language=extended +__packed struct __iar_u32 { uint32_t v; }; +#pragma language=restore +#define __UNALIGNED_UINT32(PTR) (((struct __iar_u32 *)(PTR))->v) +#endif + +#ifndef __USED + #if __ICCARM_V8 + #define __USED __attribute__((used)) + #else + #define __USED _Pragma("__root") + #endif +#endif + +#undef __WEAK /* undo the definition from DLib_Defaults.h */ +#ifndef __WEAK + #if __ICCARM_V8 + #define __WEAK __attribute__((weak)) + #else + #define __WEAK _Pragma("__weak") + #endif +#endif + +#ifndef __PROGRAM_START +#define __PROGRAM_START __iar_program_start +#endif + +#ifndef __INITIAL_SP +#define __INITIAL_SP CSTACK$$Limit +#endif + +#ifndef __STACK_LIMIT +#define __STACK_LIMIT CSTACK$$Base +#endif + +#ifndef __VECTOR_TABLE +#define __VECTOR_TABLE __vector_table +#endif + +#ifndef __VECTOR_TABLE_ATTRIBUTE +#define __VECTOR_TABLE_ATTRIBUTE @".intvec" +#endif + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +#ifndef __STACK_SEAL +#define __STACK_SEAL STACKSEAL$$Base +#endif + +#ifndef __TZ_STACK_SEAL_SIZE +#define __TZ_STACK_SEAL_SIZE 8U +#endif + +#ifndef __TZ_STACK_SEAL_VALUE +#define __TZ_STACK_SEAL_VALUE 0xFEF5EDA5FEF5EDA5ULL +#endif + +__STATIC_FORCEINLINE void __TZ_set_STACKSEAL_S (uint32_t* stackTop) { + *((uint64_t *)stackTop) = __TZ_STACK_SEAL_VALUE; +} +#endif + +#ifndef __ICCARM_INTRINSICS_VERSION__ + #define __ICCARM_INTRINSICS_VERSION__ 0 +#endif + +#if __ICCARM_INTRINSICS_VERSION__ == 2 + + #if defined(__CLZ) + #undef __CLZ + #endif + #if defined(__REVSH) + #undef __REVSH + #endif + #if defined(__RBIT) + #undef __RBIT + #endif + #if defined(__SSAT) + #undef __SSAT + #endif + #if defined(__USAT) + #undef __USAT + #endif + + #include "iccarm_builtin.h" + + #define __disable_fault_irq __iar_builtin_disable_fiq + #define __disable_irq __iar_builtin_disable_interrupt + #define __enable_fault_irq __iar_builtin_enable_fiq + #define __enable_irq __iar_builtin_enable_interrupt + #define __arm_rsr __iar_builtin_rsr + #define __arm_wsr __iar_builtin_wsr + + + #define __get_APSR() (__arm_rsr("APSR")) + #define __get_BASEPRI() (__arm_rsr("BASEPRI")) + #define __get_CONTROL() (__arm_rsr("CONTROL")) + #define __get_FAULTMASK() (__arm_rsr("FAULTMASK")) + + #if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) + #define __get_FPSCR() (__arm_rsr("FPSCR")) + #define __set_FPSCR(VALUE) (__arm_wsr("FPSCR", (VALUE))) + #else + #define __get_FPSCR() ( 0 ) + #define __set_FPSCR(VALUE) ((void)VALUE) + #endif + + #define __get_IPSR() (__arm_rsr("IPSR")) + #define __get_MSP() (__arm_rsr("MSP")) + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + #define __get_MSPLIM() (0U) + #else + #define __get_MSPLIM() (__arm_rsr("MSPLIM")) + #endif + #define __get_PRIMASK() (__arm_rsr("PRIMASK")) + #define __get_PSP() (__arm_rsr("PSP")) + + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + #define __get_PSPLIM() (0U) + #else + #define __get_PSPLIM() (__arm_rsr("PSPLIM")) + #endif + + #define __get_xPSR() (__arm_rsr("xPSR")) + + #define __set_BASEPRI(VALUE) (__arm_wsr("BASEPRI", (VALUE))) + #define __set_BASEPRI_MAX(VALUE) (__arm_wsr("BASEPRI_MAX", (VALUE))) + +__STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) +{ + __arm_wsr("CONTROL", control); + __iar_builtin_ISB(); +} + + #define __set_FAULTMASK(VALUE) (__arm_wsr("FAULTMASK", (VALUE))) + #define __set_MSP(VALUE) (__arm_wsr("MSP", (VALUE))) + + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + #define __set_MSPLIM(VALUE) ((void)(VALUE)) + #else + #define __set_MSPLIM(VALUE) (__arm_wsr("MSPLIM", (VALUE))) + #endif + #define __set_PRIMASK(VALUE) (__arm_wsr("PRIMASK", (VALUE))) + #define __set_PSP(VALUE) (__arm_wsr("PSP", (VALUE))) + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + #define __set_PSPLIM(VALUE) ((void)(VALUE)) + #else + #define __set_PSPLIM(VALUE) (__arm_wsr("PSPLIM", (VALUE))) + #endif + + #define __TZ_get_CONTROL_NS() (__arm_rsr("CONTROL_NS")) + +__STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) +{ + __arm_wsr("CONTROL_NS", control); + __iar_builtin_ISB(); +} + + #define __TZ_get_PSP_NS() (__arm_rsr("PSP_NS")) + #define __TZ_set_PSP_NS(VALUE) (__arm_wsr("PSP_NS", (VALUE))) + #define __TZ_get_MSP_NS() (__arm_rsr("MSP_NS")) + #define __TZ_set_MSP_NS(VALUE) (__arm_wsr("MSP_NS", (VALUE))) + #define __TZ_get_SP_NS() (__arm_rsr("SP_NS")) + #define __TZ_set_SP_NS(VALUE) (__arm_wsr("SP_NS", (VALUE))) + #define __TZ_get_PRIMASK_NS() (__arm_rsr("PRIMASK_NS")) + #define __TZ_set_PRIMASK_NS(VALUE) (__arm_wsr("PRIMASK_NS", (VALUE))) + #define __TZ_get_BASEPRI_NS() (__arm_rsr("BASEPRI_NS")) + #define __TZ_set_BASEPRI_NS(VALUE) (__arm_wsr("BASEPRI_NS", (VALUE))) + #define __TZ_get_FAULTMASK_NS() (__arm_rsr("FAULTMASK_NS")) + #define __TZ_set_FAULTMASK_NS(VALUE)(__arm_wsr("FAULTMASK_NS", (VALUE))) + + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + #define __TZ_get_PSPLIM_NS() (0U) + #define __TZ_set_PSPLIM_NS(VALUE) ((void)(VALUE)) + #else + #define __TZ_get_PSPLIM_NS() (__arm_rsr("PSPLIM_NS")) + #define __TZ_set_PSPLIM_NS(VALUE) (__arm_wsr("PSPLIM_NS", (VALUE))) + #endif + + #define __TZ_get_MSPLIM_NS() (__arm_rsr("MSPLIM_NS")) + #define __TZ_set_MSPLIM_NS(VALUE) (__arm_wsr("MSPLIM_NS", (VALUE))) + + #define __NOP __iar_builtin_no_operation + + #define __CLZ __iar_builtin_CLZ + #define __CLREX __iar_builtin_CLREX + + #define __DMB __iar_builtin_DMB + #define __DSB __iar_builtin_DSB + #define __ISB __iar_builtin_ISB + + #define __LDREXB __iar_builtin_LDREXB + #define __LDREXH __iar_builtin_LDREXH + #define __LDREXW __iar_builtin_LDREX + + #define __RBIT __iar_builtin_RBIT + #define __REV __iar_builtin_REV + #define __REV16 __iar_builtin_REV16 + + __IAR_FT int16_t __REVSH(int16_t val) + { + return (int16_t) __iar_builtin_REVSH(val); + } + + #define __ROR __iar_builtin_ROR + #define __RRX __iar_builtin_RRX + + #define __SEV __iar_builtin_SEV + + #if !__IAR_M0_FAMILY + #define __SSAT __iar_builtin_SSAT + #endif + + #define __STREXB __iar_builtin_STREXB + #define __STREXH __iar_builtin_STREXH + #define __STREXW __iar_builtin_STREX + + #if !__IAR_M0_FAMILY + #define __USAT __iar_builtin_USAT + #endif + + #define __WFE __iar_builtin_WFE + #define __WFI __iar_builtin_WFI + + #if __ARM_MEDIA__ + #define __SADD8 __iar_builtin_SADD8 + #define __QADD8 __iar_builtin_QADD8 + #define __SHADD8 __iar_builtin_SHADD8 + #define __UADD8 __iar_builtin_UADD8 + #define __UQADD8 __iar_builtin_UQADD8 + #define __UHADD8 __iar_builtin_UHADD8 + #define __SSUB8 __iar_builtin_SSUB8 + #define __QSUB8 __iar_builtin_QSUB8 + #define __SHSUB8 __iar_builtin_SHSUB8 + #define __USUB8 __iar_builtin_USUB8 + #define __UQSUB8 __iar_builtin_UQSUB8 + #define __UHSUB8 __iar_builtin_UHSUB8 + #define __SADD16 __iar_builtin_SADD16 + #define __QADD16 __iar_builtin_QADD16 + #define __SHADD16 __iar_builtin_SHADD16 + #define __UADD16 __iar_builtin_UADD16 + #define __UQADD16 __iar_builtin_UQADD16 + #define __UHADD16 __iar_builtin_UHADD16 + #define __SSUB16 __iar_builtin_SSUB16 + #define __QSUB16 __iar_builtin_QSUB16 + #define __SHSUB16 __iar_builtin_SHSUB16 + #define __USUB16 __iar_builtin_USUB16 + #define __UQSUB16 __iar_builtin_UQSUB16 + #define __UHSUB16 __iar_builtin_UHSUB16 + #define __SASX __iar_builtin_SASX + #define __QASX __iar_builtin_QASX + #define __SHASX __iar_builtin_SHASX + #define __UASX __iar_builtin_UASX + #define __UQASX __iar_builtin_UQASX + #define __UHASX __iar_builtin_UHASX + #define __SSAX __iar_builtin_SSAX + #define __QSAX __iar_builtin_QSAX + #define __SHSAX __iar_builtin_SHSAX + #define __USAX __iar_builtin_USAX + #define __UQSAX __iar_builtin_UQSAX + #define __UHSAX __iar_builtin_UHSAX + #define __USAD8 __iar_builtin_USAD8 + #define __USADA8 __iar_builtin_USADA8 + #define __SSAT16 __iar_builtin_SSAT16 + #define __USAT16 __iar_builtin_USAT16 + #define __UXTB16 __iar_builtin_UXTB16 + #define __UXTAB16 __iar_builtin_UXTAB16 + #define __SXTB16 __iar_builtin_SXTB16 + #define __SXTAB16 __iar_builtin_SXTAB16 + #define __SMUAD __iar_builtin_SMUAD + #define __SMUADX __iar_builtin_SMUADX + #define __SMMLA __iar_builtin_SMMLA + #define __SMLAD __iar_builtin_SMLAD + #define __SMLADX __iar_builtin_SMLADX + #define __SMLALD __iar_builtin_SMLALD + #define __SMLALDX __iar_builtin_SMLALDX + #define __SMUSD __iar_builtin_SMUSD + #define __SMUSDX __iar_builtin_SMUSDX + #define __SMLSD __iar_builtin_SMLSD + #define __SMLSDX __iar_builtin_SMLSDX + #define __SMLSLD __iar_builtin_SMLSLD + #define __SMLSLDX __iar_builtin_SMLSLDX + #define __SEL __iar_builtin_SEL + #define __QADD __iar_builtin_QADD + #define __QSUB __iar_builtin_QSUB + #define __PKHBT __iar_builtin_PKHBT + #define __PKHTB __iar_builtin_PKHTB + #endif + +#else /* __ICCARM_INTRINSICS_VERSION__ == 2 */ + + #if __IAR_M0_FAMILY + /* Avoid clash between intrinsics.h and arm_math.h when compiling for Cortex-M0. */ + #define __CLZ __cmsis_iar_clz_not_active + #define __SSAT __cmsis_iar_ssat_not_active + #define __USAT __cmsis_iar_usat_not_active + #define __RBIT __cmsis_iar_rbit_not_active + #define __get_APSR __cmsis_iar_get_APSR_not_active + #endif + + + #if (!((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) )) + #define __get_FPSCR __cmsis_iar_get_FPSR_not_active + #define __set_FPSCR __cmsis_iar_set_FPSR_not_active + #endif + + #ifdef __INTRINSICS_INCLUDED + #error intrinsics.h is already included previously! + #endif + + #include + + #if __IAR_M0_FAMILY + /* Avoid clash between intrinsics.h and arm_math.h when compiling for Cortex-M0. */ + #undef __CLZ + #undef __SSAT + #undef __USAT + #undef __RBIT + #undef __get_APSR + + __STATIC_INLINE uint8_t __CLZ(uint32_t data) + { + if (data == 0U) { return 32U; } + + uint32_t count = 0U; + uint32_t mask = 0x80000000U; + + while ((data & mask) == 0U) + { + count += 1U; + mask = mask >> 1U; + } + return count; + } + + __STATIC_INLINE uint32_t __RBIT(uint32_t v) + { + uint8_t sc = 31U; + uint32_t r = v; + for (v >>= 1U; v; v >>= 1U) + { + r <<= 1U; + r |= v & 1U; + sc--; + } + return (r << sc); + } + + __STATIC_INLINE uint32_t __get_APSR(void) + { + uint32_t res; + __asm("MRS %0,APSR" : "=r" (res)); + return res; + } + + #endif + + #if (!((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) )) + #undef __get_FPSCR + #undef __set_FPSCR + #define __get_FPSCR() (0) + #define __set_FPSCR(VALUE) ((void)VALUE) + #endif + + #pragma diag_suppress=Pe940 + #pragma diag_suppress=Pe177 + + #define __enable_irq __enable_interrupt + #define __disable_irq __disable_interrupt + #define __NOP __no_operation + + #define __get_xPSR __get_PSR + + #if (!defined(__ARM_ARCH_6M__) || __ARM_ARCH_6M__==0) + + __IAR_FT uint32_t __LDREXW(uint32_t volatile *ptr) + { + return __LDREX((unsigned long *)ptr); + } + + __IAR_FT uint32_t __STREXW(uint32_t value, uint32_t volatile *ptr) + { + return __STREX(value, (unsigned long *)ptr); + } + #endif + + + /* __CORTEX_M is defined in core_cm0.h, core_cm3.h and core_cm4.h. */ + #if (__CORTEX_M >= 0x03) + + __IAR_FT uint32_t __RRX(uint32_t value) + { + uint32_t result; + __ASM volatile("RRX %0, %1" : "=r"(result) : "r" (value)); + return(result); + } + + __IAR_FT void __set_BASEPRI_MAX(uint32_t value) + { + __asm volatile("MSR BASEPRI_MAX,%0"::"r" (value)); + } + + + #define __enable_fault_irq __enable_fiq + #define __disable_fault_irq __disable_fiq + + + #endif /* (__CORTEX_M >= 0x03) */ + + __IAR_FT uint32_t __ROR(uint32_t op1, uint32_t op2) + { + return (op1 >> op2) | (op1 << ((sizeof(op1)*8)-op2)); + } + + #if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) + + __IAR_FT uint32_t __get_MSPLIM(void) + { + uint32_t res; + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE ) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + res = 0U; + #else + __asm volatile("MRS %0,MSPLIM" : "=r" (res)); + #endif + return res; + } + + __IAR_FT void __set_MSPLIM(uint32_t value) + { + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE ) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + (void)value; + #else + __asm volatile("MSR MSPLIM,%0" :: "r" (value)); + #endif + } + + __IAR_FT uint32_t __get_PSPLIM(void) + { + uint32_t res; + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE ) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + res = 0U; + #else + __asm volatile("MRS %0,PSPLIM" : "=r" (res)); + #endif + return res; + } + + __IAR_FT void __set_PSPLIM(uint32_t value) + { + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE ) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)value; + #else + __asm volatile("MSR PSPLIM,%0" :: "r" (value)); + #endif + } + + __IAR_FT uint32_t __TZ_get_CONTROL_NS(void) + { + uint32_t res; + __asm volatile("MRS %0,CONTROL_NS" : "=r" (res)); + return res; + } + + __IAR_FT void __TZ_set_CONTROL_NS(uint32_t value) + { + __asm volatile("MSR CONTROL_NS,%0" :: "r" (value)); + __iar_builtin_ISB(); + } + + __IAR_FT uint32_t __TZ_get_PSP_NS(void) + { + uint32_t res; + __asm volatile("MRS %0,PSP_NS" : "=r" (res)); + return res; + } + + __IAR_FT void __TZ_set_PSP_NS(uint32_t value) + { + __asm volatile("MSR PSP_NS,%0" :: "r" (value)); + } + + __IAR_FT uint32_t __TZ_get_MSP_NS(void) + { + uint32_t res; + __asm volatile("MRS %0,MSP_NS" : "=r" (res)); + return res; + } + + __IAR_FT void __TZ_set_MSP_NS(uint32_t value) + { + __asm volatile("MSR MSP_NS,%0" :: "r" (value)); + } + + __IAR_FT uint32_t __TZ_get_SP_NS(void) + { + uint32_t res; + __asm volatile("MRS %0,SP_NS" : "=r" (res)); + return res; + } + __IAR_FT void __TZ_set_SP_NS(uint32_t value) + { + __asm volatile("MSR SP_NS,%0" :: "r" (value)); + } + + __IAR_FT uint32_t __TZ_get_PRIMASK_NS(void) + { + uint32_t res; + __asm volatile("MRS %0,PRIMASK_NS" : "=r" (res)); + return res; + } + + __IAR_FT void __TZ_set_PRIMASK_NS(uint32_t value) + { + __asm volatile("MSR PRIMASK_NS,%0" :: "r" (value)); + } + + __IAR_FT uint32_t __TZ_get_BASEPRI_NS(void) + { + uint32_t res; + __asm volatile("MRS %0,BASEPRI_NS" : "=r" (res)); + return res; + } + + __IAR_FT void __TZ_set_BASEPRI_NS(uint32_t value) + { + __asm volatile("MSR BASEPRI_NS,%0" :: "r" (value)); + } + + __IAR_FT uint32_t __TZ_get_FAULTMASK_NS(void) + { + uint32_t res; + __asm volatile("MRS %0,FAULTMASK_NS" : "=r" (res)); + return res; + } + + __IAR_FT void __TZ_set_FAULTMASK_NS(uint32_t value) + { + __asm volatile("MSR FAULTMASK_NS,%0" :: "r" (value)); + } + + __IAR_FT uint32_t __TZ_get_PSPLIM_NS(void) + { + uint32_t res; + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE ) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + res = 0U; + #else + __asm volatile("MRS %0,PSPLIM_NS" : "=r" (res)); + #endif + return res; + } + + __IAR_FT void __TZ_set_PSPLIM_NS(uint32_t value) + { + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE ) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)value; + #else + __asm volatile("MSR PSPLIM_NS,%0" :: "r" (value)); + #endif + } + + __IAR_FT uint32_t __TZ_get_MSPLIM_NS(void) + { + uint32_t res; + __asm volatile("MRS %0,MSPLIM_NS" : "=r" (res)); + return res; + } + + __IAR_FT void __TZ_set_MSPLIM_NS(uint32_t value) + { + __asm volatile("MSR MSPLIM_NS,%0" :: "r" (value)); + } + + #endif /* __ARM_ARCH_8M_MAIN__ or __ARM_ARCH_8M_BASE__ */ + +#endif /* __ICCARM_INTRINSICS_VERSION__ == 2 */ + +#define __BKPT(value) __asm volatile ("BKPT %0" : : "i"(value)) + +#if __IAR_M0_FAMILY + __STATIC_INLINE int32_t __SSAT(int32_t val, uint32_t sat) + { + if ((sat >= 1U) && (sat <= 32U)) + { + const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); + const int32_t min = -1 - max ; + if (val > max) + { + return max; + } + else if (val < min) + { + return min; + } + } + return val; + } + + __STATIC_INLINE uint32_t __USAT(int32_t val, uint32_t sat) + { + if (sat <= 31U) + { + const uint32_t max = ((1U << sat) - 1U); + if (val > (int32_t)max) + { + return max; + } + else if (val < 0) + { + return 0U; + } + } + return (uint32_t)val; + } +#endif + +#if (__CORTEX_M >= 0x03) /* __CORTEX_M is defined in core_cm0.h, core_cm3.h and core_cm4.h. */ + + __IAR_FT uint8_t __LDRBT(volatile uint8_t *addr) + { + uint32_t res; + __ASM volatile ("LDRBT %0, [%1]" : "=r" (res) : "r" (addr) : "memory"); + return ((uint8_t)res); + } + + __IAR_FT uint16_t __LDRHT(volatile uint16_t *addr) + { + uint32_t res; + __ASM volatile ("LDRHT %0, [%1]" : "=r" (res) : "r" (addr) : "memory"); + return ((uint16_t)res); + } + + __IAR_FT uint32_t __LDRT(volatile uint32_t *addr) + { + uint32_t res; + __ASM volatile ("LDRT %0, [%1]" : "=r" (res) : "r" (addr) : "memory"); + return res; + } + + __IAR_FT void __STRBT(uint8_t value, volatile uint8_t *addr) + { + __ASM volatile ("STRBT %1, [%0]" : : "r" (addr), "r" ((uint32_t)value) : "memory"); + } + + __IAR_FT void __STRHT(uint16_t value, volatile uint16_t *addr) + { + __ASM volatile ("STRHT %1, [%0]" : : "r" (addr), "r" ((uint32_t)value) : "memory"); + } + + __IAR_FT void __STRT(uint32_t value, volatile uint32_t *addr) + { + __ASM volatile ("STRT %1, [%0]" : : "r" (addr), "r" (value) : "memory"); + } + +#endif /* (__CORTEX_M >= 0x03) */ + +#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) + + + __IAR_FT uint8_t __LDAB(volatile uint8_t *ptr) + { + uint32_t res; + __ASM volatile ("LDAB %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); + return ((uint8_t)res); + } + + __IAR_FT uint16_t __LDAH(volatile uint16_t *ptr) + { + uint32_t res; + __ASM volatile ("LDAH %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); + return ((uint16_t)res); + } + + __IAR_FT uint32_t __LDA(volatile uint32_t *ptr) + { + uint32_t res; + __ASM volatile ("LDA %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); + return res; + } + + __IAR_FT void __STLB(uint8_t value, volatile uint8_t *ptr) + { + __ASM volatile ("STLB %1, [%0]" :: "r" (ptr), "r" (value) : "memory"); + } + + __IAR_FT void __STLH(uint16_t value, volatile uint16_t *ptr) + { + __ASM volatile ("STLH %1, [%0]" :: "r" (ptr), "r" (value) : "memory"); + } + + __IAR_FT void __STL(uint32_t value, volatile uint32_t *ptr) + { + __ASM volatile ("STL %1, [%0]" :: "r" (ptr), "r" (value) : "memory"); + } + + __IAR_FT uint8_t __LDAEXB(volatile uint8_t *ptr) + { + uint32_t res; + __ASM volatile ("LDAEXB %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); + return ((uint8_t)res); + } + + __IAR_FT uint16_t __LDAEXH(volatile uint16_t *ptr) + { + uint32_t res; + __ASM volatile ("LDAEXH %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); + return ((uint16_t)res); + } + + __IAR_FT uint32_t __LDAEX(volatile uint32_t *ptr) + { + uint32_t res; + __ASM volatile ("LDAEX %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); + return res; + } + + __IAR_FT uint32_t __STLEXB(uint8_t value, volatile uint8_t *ptr) + { + uint32_t res; + __ASM volatile ("STLEXB %0, %2, [%1]" : "=r" (res) : "r" (ptr), "r" (value) : "memory"); + return res; + } + + __IAR_FT uint32_t __STLEXH(uint16_t value, volatile uint16_t *ptr) + { + uint32_t res; + __ASM volatile ("STLEXH %0, %2, [%1]" : "=r" (res) : "r" (ptr), "r" (value) : "memory"); + return res; + } + + __IAR_FT uint32_t __STLEX(uint32_t value, volatile uint32_t *ptr) + { + uint32_t res; + __ASM volatile ("STLEX %0, %2, [%1]" : "=r" (res) : "r" (ptr), "r" (value) : "memory"); + return res; + } + +#endif /* __ARM_ARCH_8M_MAIN__ or __ARM_ARCH_8M_BASE__ */ + +#undef __IAR_FT +#undef __IAR_M0_FAMILY +#undef __ICCARM_V8 + +#pragma diag_default=Pe940 +#pragma diag_default=Pe177 + +#define __SXTB16_RORn(ARG1, ARG2) __SXTB16(__ROR(ARG1, ARG2)) + +#define __SXTAB16_RORn(ARG1, ARG2, ARG3) __SXTAB16(ARG1, __ROR(ARG2, ARG3)) + +#endif /* __CMSIS_ICCARM_H__ */ diff --git a/Drivers/CMSIS/Include/cmsis_version.h b/Drivers/CMSIS/Include/cmsis_version.h new file mode 100644 index 0000000..8b4765f --- /dev/null +++ b/Drivers/CMSIS/Include/cmsis_version.h @@ -0,0 +1,39 @@ +/**************************************************************************//** + * @file cmsis_version.h + * @brief CMSIS Core(M) Version definitions + * @version V5.0.5 + * @date 02. February 2022 + ******************************************************************************/ +/* + * Copyright (c) 2009-2022 ARM Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CMSIS_VERSION_H +#define __CMSIS_VERSION_H + +/* CMSIS Version definitions */ +#define __CM_CMSIS_VERSION_MAIN ( 5U) /*!< [31:16] CMSIS Core(M) main version */ +#define __CM_CMSIS_VERSION_SUB ( 6U) /*!< [15:0] CMSIS Core(M) sub version */ +#define __CM_CMSIS_VERSION ((__CM_CMSIS_VERSION_MAIN << 16U) | \ + __CM_CMSIS_VERSION_SUB ) /*!< CMSIS Core(M) version number */ +#endif diff --git a/Drivers/CMSIS/Include/core_armv81mml.h b/Drivers/CMSIS/Include/core_armv81mml.h new file mode 100644 index 0000000..94128a1 --- /dev/null +++ b/Drivers/CMSIS/Include/core_armv81mml.h @@ -0,0 +1,4228 @@ +/**************************************************************************//** + * @file core_armv81mml.h + * @brief CMSIS Armv8.1-M Mainline Core Peripheral Access Layer Header File + * @version V1.4.2 + * @date 13. October 2021 + ******************************************************************************/ +/* + * Copyright (c) 2018-2021 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#elif defined ( __GNUC__ ) + #pragma GCC diagnostic ignored "-Wpedantic" /* disable pedantic warning due to unnamed structs/unions */ +#endif + +#ifndef __CORE_ARMV81MML_H_GENERIC +#define __CORE_ARMV81MML_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_ARMV81MML + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS ARMV81MML definitions */ +#define __ARMv81MML_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __ARMv81MML_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __ARMv81MML_CMSIS_VERSION ((__ARMv81MML_CMSIS_VERSION_MAIN << 16U) | \ + __ARMv81MML_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (81U) /*!< Cortex-M Core */ + +#if defined ( __CC_ARM ) + #error Legacy Arm Compiler does not support Armv8.1-M target architecture. +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_FP + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined(__ARM_FEATURE_DSP) + #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined(__ARM_FEATURE_DSP) + #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined(__ARM_FEATURE_DSP) + #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_ARMV81MML_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_ARMV81MML_H_DEPENDANT +#define __CORE_ARMV81MML_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __ARMv81MML_REV + #define __ARMv81MML_REV 0x0000U + #warning "__ARMv81MML_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #if __FPU_PRESENT != 0U + #ifndef __FPU_DP + #define __FPU_DP 0U + #warning "__FPU_DP not defined in device header file; using default!" + #endif + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __ICACHE_PRESENT + #define __ICACHE_PRESENT 0U + #warning "__ICACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DCACHE_PRESENT + #define __DCACHE_PRESENT 0U + #warning "__DCACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __PMU_PRESENT + #define __PMU_PRESENT 0U + #warning "__PMU_PRESENT not defined in device header file; using default!" + #endif + + #if __PMU_PRESENT != 0U + #ifndef __PMU_NUM_EVENTCNT + #define __PMU_NUM_EVENTCNT 2U + #warning "__PMU_NUM_EVENTCNT not defined in device header file; using default!" + #elif (__PMU_NUM_EVENTCNT > 31 || __PMU_NUM_EVENTCNT < 2) + #error "__PMU_NUM_EVENTCNT is out of range in device header file!" */ + #endif + #endif + + #ifndef __SAUREGION_PRESENT + #define __SAUREGION_PRESENT 0U + #warning "__SAUREGION_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DSP_PRESENT + #define __DSP_PRESENT 0U + #warning "__DSP_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 1U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group ARMv81MML */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + - Core SAU Register + - Core FPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:16; /*!< bit: 0..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:7; /*!< bit: 20..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + +#define APSR_GE_Pos 16U /*!< APSR: GE Position */ +#define APSR_GE_Msk (0xFUL << APSR_GE_Pos) /*!< APSR: GE Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:7; /*!< bit: 9..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:4; /*!< bit: 20..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t IT:2; /*!< bit: 25..26 saved IT state (read 0) */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_IT_Pos 25U /*!< xPSR: IT Position */ +#define xPSR_IT_Msk (3UL << xPSR_IT_Pos) /*!< xPSR: IT Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_GE_Pos 16U /*!< xPSR: GE Position */ +#define xPSR_GE_Msk (0xFUL << xPSR_GE_Pos) /*!< xPSR: GE Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack-pointer select */ + uint32_t FPCA:1; /*!< bit: 2 Floating-point context active */ + uint32_t SFPA:1; /*!< bit: 3 Secure floating-point active */ + uint32_t _reserved1:28; /*!< bit: 4..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SFPA_Pos 3U /*!< CONTROL: SFPA Position */ +#define CONTROL_SFPA_Msk (1UL << CONTROL_SFPA_Pos) /*!< CONTROL: SFPA Mask */ + +#define CONTROL_FPCA_Pos 2U /*!< CONTROL: FPCA Position */ +#define CONTROL_FPCA_Msk (1UL << CONTROL_FPCA_Pos) /*!< CONTROL: FPCA Mask */ + +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[16U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[16U]; + __IOM uint32_t ICER[16U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[16U]; + __IOM uint32_t ISPR[16U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[16U]; + __IOM uint32_t ICPR[16U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[16U]; + __IOM uint32_t IABR[16U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[16U]; + __IOM uint32_t ITNS[16U]; /*!< Offset: 0x280 (R/W) Interrupt Non-Secure State Register */ + uint32_t RESERVED5[16U]; + __IOM uint8_t IPR[496U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED6[580U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/* Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHPR[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t ID_PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t ID_DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ID_AFR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t ID_MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ID_ISAR[6U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + __IM uint32_t CLIDR; /*!< Offset: 0x078 (R/ ) Cache Level ID register */ + __IM uint32_t CTR; /*!< Offset: 0x07C (R/ ) Cache Type register */ + __IM uint32_t CCSIDR; /*!< Offset: 0x080 (R/ ) Cache Size ID Register */ + __IOM uint32_t CSSELR; /*!< Offset: 0x084 (R/W) Cache Size Selection Register */ + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ + __IOM uint32_t NSACR; /*!< Offset: 0x08C (R/W) Non-Secure Access Control Register */ + uint32_t RESERVED7[21U]; + __IOM uint32_t SFSR; /*!< Offset: 0x0E4 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x0E8 (R/W) Secure Fault Address Register */ + uint32_t RESERVED3[69U]; + __OM uint32_t STIR; /*!< Offset: 0x200 ( /W) Software Triggered Interrupt Register */ + __IOM uint32_t RFSR; /*!< Offset: 0x204 (R/W) RAS Fault Status Register */ + uint32_t RESERVED4[14U]; + __IM uint32_t MVFR0; /*!< Offset: 0x240 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x244 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x248 (R/ ) Media and VFP Feature Register 2 */ + uint32_t RESERVED5[1U]; + __OM uint32_t ICIALLU; /*!< Offset: 0x250 ( /W) I-Cache Invalidate All to PoU */ + uint32_t RESERVED6[1U]; + __OM uint32_t ICIMVAU; /*!< Offset: 0x258 ( /W) I-Cache Invalidate by MVA to PoU */ + __OM uint32_t DCIMVAC; /*!< Offset: 0x25C ( /W) D-Cache Invalidate by MVA to PoC */ + __OM uint32_t DCISW; /*!< Offset: 0x260 ( /W) D-Cache Invalidate by Set-way */ + __OM uint32_t DCCMVAU; /*!< Offset: 0x264 ( /W) D-Cache Clean by MVA to PoU */ + __OM uint32_t DCCMVAC; /*!< Offset: 0x268 ( /W) D-Cache Clean by MVA to PoC */ + __OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */ + __OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */ + __OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */ + __OM uint32_t BPIALL; /*!< Offset: 0x278 ( /W) Branch Predictor Invalidate All */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_PENDNMISET_Pos 31U /*!< SCB ICSR: PENDNMISET Position */ +#define SCB_ICSR_PENDNMISET_Msk (1UL << SCB_ICSR_PENDNMISET_Pos) /*!< SCB ICSR: PENDNMISET Mask */ + +#define SCB_ICSR_NMIPENDSET_Pos SCB_ICSR_PENDNMISET_Pos /*!< SCB ICSR: NMIPENDSET Position, backward compatibility */ +#define SCB_ICSR_NMIPENDSET_Msk SCB_ICSR_PENDNMISET_Msk /*!< SCB ICSR: NMIPENDSET Mask, backward compatibility */ + +#define SCB_ICSR_PENDNMICLR_Pos 30U /*!< SCB ICSR: PENDNMICLR Position */ +#define SCB_ICSR_PENDNMICLR_Msk (1UL << SCB_ICSR_PENDNMICLR_Pos) /*!< SCB ICSR: PENDNMICLR Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_STTNS_Pos 24U /*!< SCB ICSR: STTNS Position (Security Extension) */ +#define SCB_ICSR_STTNS_Msk (1UL << SCB_ICSR_STTNS_Pos) /*!< SCB ICSR: STTNS Mask (Security Extension) */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIS_Pos 14U /*!< SCB AIRCR: PRIS Position */ +#define SCB_AIRCR_PRIS_Msk (1UL << SCB_AIRCR_PRIS_Pos) /*!< SCB AIRCR: PRIS Mask */ + +#define SCB_AIRCR_BFHFNMINS_Pos 13U /*!< SCB AIRCR: BFHFNMINS Position */ +#define SCB_AIRCR_BFHFNMINS_Msk (1UL << SCB_AIRCR_BFHFNMINS_Pos) /*!< SCB AIRCR: BFHFNMINS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_IESB_Pos 5U /*!< SCB AIRCR: Implicit ESB Enable Position */ +#define SCB_AIRCR_IESB_Msk (1UL << SCB_AIRCR_IESB_Pos) /*!< SCB AIRCR: Implicit ESB Enable Mask */ + +#define SCB_AIRCR_DIT_Pos 4U /*!< SCB AIRCR: Data Independent Timing Position */ +#define SCB_AIRCR_DIT_Msk (1UL << SCB_AIRCR_DIT_Pos) /*!< SCB AIRCR: Data Independent Timing Mask */ + +#define SCB_AIRCR_SYSRESETREQS_Pos 3U /*!< SCB AIRCR: SYSRESETREQS Position */ +#define SCB_AIRCR_SYSRESETREQS_Msk (1UL << SCB_AIRCR_SYSRESETREQS_Pos) /*!< SCB AIRCR: SYSRESETREQS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEPS_Pos 3U /*!< SCB SCR: SLEEPDEEPS Position */ +#define SCB_SCR_SLEEPDEEPS_Msk (1UL << SCB_SCR_SLEEPDEEPS_Pos) /*!< SCB SCR: SLEEPDEEPS Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_TRD_Pos 20U /*!< SCB CCR: TRD Position */ +#define SCB_CCR_TRD_Msk (1UL << SCB_CCR_TRD_Pos) /*!< SCB CCR: TRD Mask */ + +#define SCB_CCR_LOB_Pos 19U /*!< SCB CCR: LOB Position */ +#define SCB_CCR_LOB_Msk (1UL << SCB_CCR_LOB_Pos) /*!< SCB CCR: LOB Mask */ + +#define SCB_CCR_BP_Pos 18U /*!< SCB CCR: BP Position */ +#define SCB_CCR_BP_Msk (1UL << SCB_CCR_BP_Pos) /*!< SCB CCR: BP Mask */ + +#define SCB_CCR_IC_Pos 17U /*!< SCB CCR: IC Position */ +#define SCB_CCR_IC_Msk (1UL << SCB_CCR_IC_Pos) /*!< SCB CCR: IC Mask */ + +#define SCB_CCR_DC_Pos 16U /*!< SCB CCR: DC Position */ +#define SCB_CCR_DC_Msk (1UL << SCB_CCR_DC_Pos) /*!< SCB CCR: DC Mask */ + +#define SCB_CCR_STKOFHFNMIGN_Pos 10U /*!< SCB CCR: STKOFHFNMIGN Position */ +#define SCB_CCR_STKOFHFNMIGN_Msk (1UL << SCB_CCR_STKOFHFNMIGN_Pos) /*!< SCB CCR: STKOFHFNMIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_HARDFAULTPENDED_Pos 21U /*!< SCB SHCSR: HARDFAULTPENDED Position */ +#define SCB_SHCSR_HARDFAULTPENDED_Msk (1UL << SCB_SHCSR_HARDFAULTPENDED_Pos) /*!< SCB SHCSR: HARDFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTPENDED_Pos 20U /*!< SCB SHCSR: SECUREFAULTPENDED Position */ +#define SCB_SHCSR_SECUREFAULTPENDED_Msk (1UL << SCB_SHCSR_SECUREFAULTPENDED_Pos) /*!< SCB SHCSR: SECUREFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTENA_Pos 19U /*!< SCB SHCSR: SECUREFAULTENA Position */ +#define SCB_SHCSR_SECUREFAULTENA_Msk (1UL << SCB_SHCSR_SECUREFAULTENA_Pos) /*!< SCB SHCSR: SECUREFAULTENA Mask */ + +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_NMIACT_Pos 5U /*!< SCB SHCSR: NMIACT Position */ +#define SCB_SHCSR_NMIACT_Msk (1UL << SCB_SHCSR_NMIACT_Pos) /*!< SCB SHCSR: NMIACT Mask */ + +#define SCB_SHCSR_SECUREFAULTACT_Pos 4U /*!< SCB SHCSR: SECUREFAULTACT Position */ +#define SCB_SHCSR_SECUREFAULTACT_Msk (1UL << SCB_SHCSR_SECUREFAULTACT_Pos) /*!< SCB SHCSR: SECUREFAULTACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_HARDFAULTACT_Pos 2U /*!< SCB SHCSR: HARDFAULTACT Position */ +#define SCB_SHCSR_HARDFAULTACT_Msk (1UL << SCB_SHCSR_HARDFAULTACT_Pos) /*!< SCB SHCSR: HARDFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/* SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/* BusFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_LSPERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 5U) /*!< SCB CFSR (BFSR): LSPERR Position */ +#define SCB_CFSR_LSPERR_Msk (1UL << SCB_CFSR_LSPERR_Pos) /*!< SCB CFSR (BFSR): LSPERR Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/* UsageFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_STKOF_Pos (SCB_CFSR_USGFAULTSR_Pos + 4U) /*!< SCB CFSR (UFSR): STKOF Position */ +#define SCB_CFSR_STKOF_Msk (1UL << SCB_CFSR_STKOF_Pos) /*!< SCB CFSR (UFSR): STKOF Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/* SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/* SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_PMU_Pos 5U /*!< SCB DFSR: PMU Position */ +#define SCB_DFSR_PMU_Msk (1UL << SCB_DFSR_PMU_Pos) /*!< SCB DFSR: PMU Mask */ + +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/* SCB Non-Secure Access Control Register Definitions */ +#define SCB_NSACR_CP11_Pos 11U /*!< SCB NSACR: CP11 Position */ +#define SCB_NSACR_CP11_Msk (1UL << SCB_NSACR_CP11_Pos) /*!< SCB NSACR: CP11 Mask */ + +#define SCB_NSACR_CP10_Pos 10U /*!< SCB NSACR: CP10 Position */ +#define SCB_NSACR_CP10_Msk (1UL << SCB_NSACR_CP10_Pos) /*!< SCB NSACR: CP10 Mask */ + +#define SCB_NSACR_CP7_Pos 7U /*!< SCB NSACR: CP7 Position */ +#define SCB_NSACR_CP7_Msk (1UL << SCB_NSACR_CP7_Pos) /*!< SCB NSACR: CP7 Mask */ + +#define SCB_NSACR_CP6_Pos 6U /*!< SCB NSACR: CP6 Position */ +#define SCB_NSACR_CP6_Msk (1UL << SCB_NSACR_CP6_Pos) /*!< SCB NSACR: CP6 Mask */ + +#define SCB_NSACR_CP5_Pos 5U /*!< SCB NSACR: CP5 Position */ +#define SCB_NSACR_CP5_Msk (1UL << SCB_NSACR_CP5_Pos) /*!< SCB NSACR: CP5 Mask */ + +#define SCB_NSACR_CP4_Pos 4U /*!< SCB NSACR: CP4 Position */ +#define SCB_NSACR_CP4_Msk (1UL << SCB_NSACR_CP4_Pos) /*!< SCB NSACR: CP4 Mask */ + +#define SCB_NSACR_CP3_Pos 3U /*!< SCB NSACR: CP3 Position */ +#define SCB_NSACR_CP3_Msk (1UL << SCB_NSACR_CP3_Pos) /*!< SCB NSACR: CP3 Mask */ + +#define SCB_NSACR_CP2_Pos 2U /*!< SCB NSACR: CP2 Position */ +#define SCB_NSACR_CP2_Msk (1UL << SCB_NSACR_CP2_Pos) /*!< SCB NSACR: CP2 Mask */ + +#define SCB_NSACR_CP1_Pos 1U /*!< SCB NSACR: CP1 Position */ +#define SCB_NSACR_CP1_Msk (1UL << SCB_NSACR_CP1_Pos) /*!< SCB NSACR: CP1 Mask */ + +#define SCB_NSACR_CP0_Pos 0U /*!< SCB NSACR: CP0 Position */ +#define SCB_NSACR_CP0_Msk (1UL /*<< SCB_NSACR_CP0_Pos*/) /*!< SCB NSACR: CP0 Mask */ + +/* SCB Debug Feature Register 0 Definitions */ +#define SCB_ID_DFR_UDE_Pos 28U /*!< SCB ID_DFR: UDE Position */ +#define SCB_ID_DFR_UDE_Msk (0xFUL << SCB_ID_DFR_UDE_Pos) /*!< SCB ID_DFR: UDE Mask */ + +#define SCB_ID_DFR_MProfDbg_Pos 20U /*!< SCB ID_DFR: MProfDbg Position */ +#define SCB_ID_DFR_MProfDbg_Msk (0xFUL << SCB_ID_DFR_MProfDbg_Pos) /*!< SCB ID_DFR: MProfDbg Mask */ + +/* SCB Cache Level ID Register Definitions */ +#define SCB_CLIDR_LOUU_Pos 27U /*!< SCB CLIDR: LoUU Position */ +#define SCB_CLIDR_LOUU_Msk (7UL << SCB_CLIDR_LOUU_Pos) /*!< SCB CLIDR: LoUU Mask */ + +#define SCB_CLIDR_LOC_Pos 24U /*!< SCB CLIDR: LoC Position */ +#define SCB_CLIDR_LOC_Msk (7UL << SCB_CLIDR_LOC_Pos) /*!< SCB CLIDR: LoC Mask */ + +/* SCB Cache Type Register Definitions */ +#define SCB_CTR_FORMAT_Pos 29U /*!< SCB CTR: Format Position */ +#define SCB_CTR_FORMAT_Msk (7UL << SCB_CTR_FORMAT_Pos) /*!< SCB CTR: Format Mask */ + +#define SCB_CTR_CWG_Pos 24U /*!< SCB CTR: CWG Position */ +#define SCB_CTR_CWG_Msk (0xFUL << SCB_CTR_CWG_Pos) /*!< SCB CTR: CWG Mask */ + +#define SCB_CTR_ERG_Pos 20U /*!< SCB CTR: ERG Position */ +#define SCB_CTR_ERG_Msk (0xFUL << SCB_CTR_ERG_Pos) /*!< SCB CTR: ERG Mask */ + +#define SCB_CTR_DMINLINE_Pos 16U /*!< SCB CTR: DminLine Position */ +#define SCB_CTR_DMINLINE_Msk (0xFUL << SCB_CTR_DMINLINE_Pos) /*!< SCB CTR: DminLine Mask */ + +#define SCB_CTR_IMINLINE_Pos 0U /*!< SCB CTR: ImInLine Position */ +#define SCB_CTR_IMINLINE_Msk (0xFUL /*<< SCB_CTR_IMINLINE_Pos*/) /*!< SCB CTR: ImInLine Mask */ + +/* SCB Cache Size ID Register Definitions */ +#define SCB_CCSIDR_WT_Pos 31U /*!< SCB CCSIDR: WT Position */ +#define SCB_CCSIDR_WT_Msk (1UL << SCB_CCSIDR_WT_Pos) /*!< SCB CCSIDR: WT Mask */ + +#define SCB_CCSIDR_WB_Pos 30U /*!< SCB CCSIDR: WB Position */ +#define SCB_CCSIDR_WB_Msk (1UL << SCB_CCSIDR_WB_Pos) /*!< SCB CCSIDR: WB Mask */ + +#define SCB_CCSIDR_RA_Pos 29U /*!< SCB CCSIDR: RA Position */ +#define SCB_CCSIDR_RA_Msk (1UL << SCB_CCSIDR_RA_Pos) /*!< SCB CCSIDR: RA Mask */ + +#define SCB_CCSIDR_WA_Pos 28U /*!< SCB CCSIDR: WA Position */ +#define SCB_CCSIDR_WA_Msk (1UL << SCB_CCSIDR_WA_Pos) /*!< SCB CCSIDR: WA Mask */ + +#define SCB_CCSIDR_NUMSETS_Pos 13U /*!< SCB CCSIDR: NumSets Position */ +#define SCB_CCSIDR_NUMSETS_Msk (0x7FFFUL << SCB_CCSIDR_NUMSETS_Pos) /*!< SCB CCSIDR: NumSets Mask */ + +#define SCB_CCSIDR_ASSOCIATIVITY_Pos 3U /*!< SCB CCSIDR: Associativity Position */ +#define SCB_CCSIDR_ASSOCIATIVITY_Msk (0x3FFUL << SCB_CCSIDR_ASSOCIATIVITY_Pos) /*!< SCB CCSIDR: Associativity Mask */ + +#define SCB_CCSIDR_LINESIZE_Pos 0U /*!< SCB CCSIDR: LineSize Position */ +#define SCB_CCSIDR_LINESIZE_Msk (7UL /*<< SCB_CCSIDR_LINESIZE_Pos*/) /*!< SCB CCSIDR: LineSize Mask */ + +/* SCB Cache Size Selection Register Definitions */ +#define SCB_CSSELR_LEVEL_Pos 1U /*!< SCB CSSELR: Level Position */ +#define SCB_CSSELR_LEVEL_Msk (7UL << SCB_CSSELR_LEVEL_Pos) /*!< SCB CSSELR: Level Mask */ + +#define SCB_CSSELR_IND_Pos 0U /*!< SCB CSSELR: InD Position */ +#define SCB_CSSELR_IND_Msk (1UL /*<< SCB_CSSELR_IND_Pos*/) /*!< SCB CSSELR: InD Mask */ + +/* SCB Software Triggered Interrupt Register Definitions */ +#define SCB_STIR_INTID_Pos 0U /*!< SCB STIR: INTID Position */ +#define SCB_STIR_INTID_Msk (0x1FFUL /*<< SCB_STIR_INTID_Pos*/) /*!< SCB STIR: INTID Mask */ + +/* SCB RAS Fault Status Register Definitions */ +#define SCB_RFSR_V_Pos 31U /*!< SCB RFSR: V Position */ +#define SCB_RFSR_V_Msk (1UL << SCB_RFSR_V_Pos) /*!< SCB RFSR: V Mask */ + +#define SCB_RFSR_IS_Pos 16U /*!< SCB RFSR: IS Position */ +#define SCB_RFSR_IS_Msk (0x7FFFUL << SCB_RFSR_IS_Pos) /*!< SCB RFSR: IS Mask */ + +#define SCB_RFSR_UET_Pos 0U /*!< SCB RFSR: UET Position */ +#define SCB_RFSR_UET_Msk (3UL /*<< SCB_RFSR_UET_Pos*/) /*!< SCB RFSR: UET Mask */ + +/* SCB D-Cache Invalidate by Set-way Register Definitions */ +#define SCB_DCISW_WAY_Pos 30U /*!< SCB DCISW: Way Position */ +#define SCB_DCISW_WAY_Msk (3UL << SCB_DCISW_WAY_Pos) /*!< SCB DCISW: Way Mask */ + +#define SCB_DCISW_SET_Pos 5U /*!< SCB DCISW: Set Position */ +#define SCB_DCISW_SET_Msk (0x1FFUL << SCB_DCISW_SET_Pos) /*!< SCB DCISW: Set Mask */ + +/* SCB D-Cache Clean by Set-way Register Definitions */ +#define SCB_DCCSW_WAY_Pos 30U /*!< SCB DCCSW: Way Position */ +#define SCB_DCCSW_WAY_Msk (3UL << SCB_DCCSW_WAY_Pos) /*!< SCB DCCSW: Way Mask */ + +#define SCB_DCCSW_SET_Pos 5U /*!< SCB DCCSW: Set Position */ +#define SCB_DCCSW_SET_Msk (0x1FFUL << SCB_DCCSW_SET_Pos) /*!< SCB DCCSW: Set Mask */ + +/* SCB D-Cache Clean and Invalidate by Set-way Register Definitions */ +#define SCB_DCCISW_WAY_Pos 30U /*!< SCB DCCISW: Way Position */ +#define SCB_DCCISW_WAY_Msk (3UL << SCB_DCCISW_WAY_Pos) /*!< SCB DCCISW: Way Mask */ + +#define SCB_DCCISW_SET_Pos 5U /*!< SCB DCCISW: Set Position */ +#define SCB_DCCISW_SET_Msk (0x1FFUL << SCB_DCCISW_SET_Pos) /*!< SCB DCCISW: Set Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ + __IOM uint32_t CPPWR; /*!< Offset: 0x00C (R/W) Coprocessor Power Control Register */ +} SCnSCB_Type; + +/* Interrupt Controller Type Register Definitions */ +#define SCnSCB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) ITM Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) ITM Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ + uint32_t RESERVED3[32U]; + uint32_t RESERVED4[43U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ + uint32_t RESERVED5[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) ITM Device Architecture Register */ + uint32_t RESERVED6[3U]; + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) ITM Device Type Register */ + __IM uint32_t PID4; /*!< Offset: 0xFD0 (R/ ) ITM Peripheral Identification Register #4 */ + __IM uint32_t PID5; /*!< Offset: 0xFD4 (R/ ) ITM Peripheral Identification Register #5 */ + __IM uint32_t PID6; /*!< Offset: 0xFD8 (R/ ) ITM Peripheral Identification Register #6 */ + __IM uint32_t PID7; /*!< Offset: 0xFDC (R/ ) ITM Peripheral Identification Register #7 */ + __IM uint32_t PID0; /*!< Offset: 0xFE0 (R/ ) ITM Peripheral Identification Register #0 */ + __IM uint32_t PID1; /*!< Offset: 0xFE4 (R/ ) ITM Peripheral Identification Register #1 */ + __IM uint32_t PID2; /*!< Offset: 0xFE8 (R/ ) ITM Peripheral Identification Register #2 */ + __IM uint32_t PID3; /*!< Offset: 0xFEC (R/ ) ITM Peripheral Identification Register #3 */ + __IM uint32_t CID0; /*!< Offset: 0xFF0 (R/ ) ITM Component Identification Register #0 */ + __IM uint32_t CID1; /*!< Offset: 0xFF4 (R/ ) ITM Component Identification Register #1 */ + __IM uint32_t CID2; /*!< Offset: 0xFF8 (R/ ) ITM Component Identification Register #2 */ + __IM uint32_t CID3; /*!< Offset: 0xFFC (R/ ) ITM Component Identification Register #3 */ +} ITM_Type; + +/* ITM Stimulus Port Register Definitions */ +#define ITM_STIM_DISABLED_Pos 1U /*!< ITM STIM: DISABLED Position */ +#define ITM_STIM_DISABLED_Msk (0x1UL << ITM_STIM_DISABLED_Pos) /*!< ITM STIM: DISABLED Mask */ + +#define ITM_STIM_FIFOREADY_Pos 0U /*!< ITM STIM: FIFOREADY Position */ +#define ITM_STIM_FIFOREADY_Msk (0x1UL /*<< ITM_STIM_FIFOREADY_Pos*/) /*!< ITM STIM: FIFOREADY Mask */ + +/* ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/* ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TRACEBUSID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TRACEBUSID_Msk (0x7FUL << ITM_TCR_TRACEBUSID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPRESCALE_Pos 8U /*!< ITM TCR: TSPRESCALE Position */ +#define ITM_TCR_TSPRESCALE_Msk (3UL << ITM_TCR_TSPRESCALE_Pos) /*!< ITM TCR: TSPRESCALE Mask */ + +#define ITM_TCR_STALLENA_Pos 5U /*!< ITM TCR: STALLENA Position */ +#define ITM_TCR_STALLENA_Msk (1UL << ITM_TCR_STALLENA_Pos) /*!< ITM TCR: STALLENA Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/* ITM Lock Status Register Definitions */ +#define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ +#define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ + +#define ITM_LSR_Access_Pos 1U /*!< ITM LSR: Access Position */ +#define ITM_LSR_Access_Msk (1UL << ITM_LSR_Access_Pos) /*!< ITM LSR: Access Mask */ + +#define ITM_LSR_Present_Pos 0U /*!< ITM LSR: Present Position */ +#define ITM_LSR_Present_Msk (1UL /*<< ITM_LSR_Present_Pos*/) /*!< ITM LSR: Present Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + uint32_t RESERVED3[1U]; + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED4[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + uint32_t RESERVED5[1U]; + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED6[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + uint32_t RESERVED7[1U]; + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ + uint32_t RESERVED8[1U]; + __IOM uint32_t COMP4; /*!< Offset: 0x060 (R/W) Comparator Register 4 */ + uint32_t RESERVED9[1U]; + __IOM uint32_t FUNCTION4; /*!< Offset: 0x068 (R/W) Function Register 4 */ + uint32_t RESERVED10[1U]; + __IOM uint32_t COMP5; /*!< Offset: 0x070 (R/W) Comparator Register 5 */ + uint32_t RESERVED11[1U]; + __IOM uint32_t FUNCTION5; /*!< Offset: 0x078 (R/W) Function Register 5 */ + uint32_t RESERVED12[1U]; + __IOM uint32_t COMP6; /*!< Offset: 0x080 (R/W) Comparator Register 6 */ + uint32_t RESERVED13[1U]; + __IOM uint32_t FUNCTION6; /*!< Offset: 0x088 (R/W) Function Register 6 */ + uint32_t RESERVED14[1U]; + __IOM uint32_t COMP7; /*!< Offset: 0x090 (R/W) Comparator Register 7 */ + uint32_t RESERVED15[1U]; + __IOM uint32_t FUNCTION7; /*!< Offset: 0x098 (R/W) Function Register 7 */ + uint32_t RESERVED16[1U]; + __IOM uint32_t COMP8; /*!< Offset: 0x0A0 (R/W) Comparator Register 8 */ + uint32_t RESERVED17[1U]; + __IOM uint32_t FUNCTION8; /*!< Offset: 0x0A8 (R/W) Function Register 8 */ + uint32_t RESERVED18[1U]; + __IOM uint32_t COMP9; /*!< Offset: 0x0B0 (R/W) Comparator Register 9 */ + uint32_t RESERVED19[1U]; + __IOM uint32_t FUNCTION9; /*!< Offset: 0x0B8 (R/W) Function Register 9 */ + uint32_t RESERVED20[1U]; + __IOM uint32_t COMP10; /*!< Offset: 0x0C0 (R/W) Comparator Register 10 */ + uint32_t RESERVED21[1U]; + __IOM uint32_t FUNCTION10; /*!< Offset: 0x0C8 (R/W) Function Register 10 */ + uint32_t RESERVED22[1U]; + __IOM uint32_t COMP11; /*!< Offset: 0x0D0 (R/W) Comparator Register 11 */ + uint32_t RESERVED23[1U]; + __IOM uint32_t FUNCTION11; /*!< Offset: 0x0D8 (R/W) Function Register 11 */ + uint32_t RESERVED24[1U]; + __IOM uint32_t COMP12; /*!< Offset: 0x0E0 (R/W) Comparator Register 12 */ + uint32_t RESERVED25[1U]; + __IOM uint32_t FUNCTION12; /*!< Offset: 0x0E8 (R/W) Function Register 12 */ + uint32_t RESERVED26[1U]; + __IOM uint32_t COMP13; /*!< Offset: 0x0F0 (R/W) Comparator Register 13 */ + uint32_t RESERVED27[1U]; + __IOM uint32_t FUNCTION13; /*!< Offset: 0x0F8 (R/W) Function Register 13 */ + uint32_t RESERVED28[1U]; + __IOM uint32_t COMP14; /*!< Offset: 0x100 (R/W) Comparator Register 14 */ + uint32_t RESERVED29[1U]; + __IOM uint32_t FUNCTION14; /*!< Offset: 0x108 (R/W) Function Register 14 */ + uint32_t RESERVED30[1U]; + __IOM uint32_t COMP15; /*!< Offset: 0x110 (R/W) Comparator Register 15 */ + uint32_t RESERVED31[1U]; + __IOM uint32_t FUNCTION15; /*!< Offset: 0x118 (R/W) Function Register 15 */ + uint32_t RESERVED32[934U]; + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R ) Lock Status Register */ + uint32_t RESERVED33[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) Device Architecture Register */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCDISS_Pos 23U /*!< DWT CTRL: CYCDISS Position */ +#define DWT_CTRL_CYCDISS_Msk (0x1UL << DWT_CTRL_CYCDISS_Pos) /*!< DWT CTRL: CYCDISS Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (0x1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (0x1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (0x1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (0x1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (0x1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (0x1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (0x1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (0x1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (0x1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (0x1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/* DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/* DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/* DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/* DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/* DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_ID_Pos 27U /*!< DWT FUNCTION: ID Position */ +#define DWT_FUNCTION_ID_Msk (0x1FUL << DWT_FUNCTION_ID_Pos) /*!< DWT FUNCTION: ID Mask */ + +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_ACTION_Pos 4U /*!< DWT FUNCTION: ACTION Position */ +#define DWT_FUNCTION_ACTION_Msk (0x1UL << DWT_FUNCTION_ACTION_Pos) /*!< DWT FUNCTION: ACTION Mask */ + +#define DWT_FUNCTION_MATCH_Pos 0U /*!< DWT FUNCTION: MATCH Position */ +#define DWT_FUNCTION_MATCH_Msk (0xFUL /*<< DWT_FUNCTION_MATCH_Pos*/) /*!< DWT FUNCTION: MATCH Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Sizes Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Sizes Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IOM uint32_t PSCR; /*!< Offset: 0x308 (R/W) Periodic Synchronization Control Register */ + uint32_t RESERVED3[809U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) Software Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) Software Lock Status Register */ + uint32_t RESERVED4[4U]; + __IM uint32_t TYPE; /*!< Offset: 0xFC8 (R/ ) Device Identifier Register */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Register */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_SWOSCALER_Pos 0U /*!< TPI ACPR: SWOSCALER Position */ +#define TPI_ACPR_SWOSCALER_Msk (0xFFFFUL /*<< TPI_ACPR_SWOSCALER_Pos*/) /*!< TPI ACPR: SWOSCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_FOnMan_Pos 6U /*!< TPI FFCR: FOnMan Position */ +#define TPI_FFCR_FOnMan_Msk (0x1UL << TPI_FFCR_FOnMan_Pos) /*!< TPI FFCR: FOnMan Mask */ + +#define TPI_FFCR_EnFmt_Pos 0U /*!< TPI FFCR: EnFmt Position */ +#define TPI_FFCR_EnFmt_Msk (0x3UL << /*TPI_FFCR_EnFmt_Pos*/) /*!< TPI FFCR: EnFmt Mask */ + +/* TPI Periodic Synchronization Control Register Definitions */ +#define TPI_PSCR_PSCount_Pos 0U /*!< TPI PSCR: PSCount Position */ +#define TPI_PSCR_PSCount_Msk (0x1FUL /*<< TPI_PSCR_PSCount_Pos*/) /*!< TPI PSCR: TPSCount Mask */ + +/* TPI Software Lock Status Register Definitions */ +#define TPI_LSR_nTT_Pos 1U /*!< TPI LSR: Not thirty-two bit. Position */ +#define TPI_LSR_nTT_Msk (0x1UL << TPI_LSR_nTT_Pos) /*!< TPI LSR: Not thirty-two bit. Mask */ + +#define TPI_LSR_SLK_Pos 1U /*!< TPI LSR: Software Lock status Position */ +#define TPI_LSR_SLK_Msk (0x1UL << TPI_LSR_SLK_Pos) /*!< TPI LSR: Software Lock status Mask */ + +#define TPI_LSR_SLI_Pos 0U /*!< TPI LSR: Software Lock implemented Position */ +#define TPI_LSR_SLI_Msk (0x1UL /*<< TPI_LSR_SLI_Pos*/) /*!< TPI LSR: Software Lock implemented Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_FIFOSZ_Pos 6U /*!< TPI DEVID: FIFO depth Position */ +#define TPI_DEVID_FIFOSZ_Msk (0x7UL << TPI_DEVID_FIFOSZ_Pos) /*!< TPI DEVID: FIFO depth Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + +#if defined (__PMU_PRESENT) && (__PMU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_PMU Performance Monitoring Unit (PMU) + \brief Type definitions for the Performance Monitoring Unit (PMU) + @{ + */ + +/** + \brief Structure type to access the Performance Monitoring Unit (PMU). + */ +typedef struct +{ + __IOM uint32_t EVCNTR[__PMU_NUM_EVENTCNT]; /*!< Offset: 0x0 (R/W) PMU Event Counter Registers */ +#if __PMU_NUM_EVENTCNT<31 + uint32_t RESERVED0[31U-__PMU_NUM_EVENTCNT]; +#endif + __IOM uint32_t CCNTR; /*!< Offset: 0x7C (R/W) PMU Cycle Counter Register */ + uint32_t RESERVED1[224]; + __IOM uint32_t EVTYPER[__PMU_NUM_EVENTCNT]; /*!< Offset: 0x400 (R/W) PMU Event Type and Filter Registers */ +#if __PMU_NUM_EVENTCNT<31 + uint32_t RESERVED2[31U-__PMU_NUM_EVENTCNT]; +#endif + __IOM uint32_t CCFILTR; /*!< Offset: 0x47C (R/W) PMU Cycle Counter Filter Register */ + uint32_t RESERVED3[480]; + __IOM uint32_t CNTENSET; /*!< Offset: 0xC00 (R/W) PMU Count Enable Set Register */ + uint32_t RESERVED4[7]; + __IOM uint32_t CNTENCLR; /*!< Offset: 0xC20 (R/W) PMU Count Enable Clear Register */ + uint32_t RESERVED5[7]; + __IOM uint32_t INTENSET; /*!< Offset: 0xC40 (R/W) PMU Interrupt Enable Set Register */ + uint32_t RESERVED6[7]; + __IOM uint32_t INTENCLR; /*!< Offset: 0xC60 (R/W) PMU Interrupt Enable Clear Register */ + uint32_t RESERVED7[7]; + __IOM uint32_t OVSCLR; /*!< Offset: 0xC80 (R/W) PMU Overflow Flag Status Clear Register */ + uint32_t RESERVED8[7]; + __IOM uint32_t SWINC; /*!< Offset: 0xCA0 (R/W) PMU Software Increment Register */ + uint32_t RESERVED9[7]; + __IOM uint32_t OVSSET; /*!< Offset: 0xCC0 (R/W) PMU Overflow Flag Status Set Register */ + uint32_t RESERVED10[79]; + __IOM uint32_t TYPE; /*!< Offset: 0xE00 (R/W) PMU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0xE04 (R/W) PMU Control Register */ + uint32_t RESERVED11[108]; + __IOM uint32_t AUTHSTATUS; /*!< Offset: 0xFB8 (R/W) PMU Authentication Status Register */ + __IOM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/W) PMU Device Architecture Register */ + uint32_t RESERVED12[3]; + __IOM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/W) PMU Device Type Register */ + __IOM uint32_t PIDR4; /*!< Offset: 0xFD0 (R/W) PMU Peripheral Identification Register 4 */ + uint32_t RESERVED13[3]; + __IOM uint32_t PIDR0; /*!< Offset: 0xFE0 (R/W) PMU Peripheral Identification Register 0 */ + __IOM uint32_t PIDR1; /*!< Offset: 0xFE4 (R/W) PMU Peripheral Identification Register 1 */ + __IOM uint32_t PIDR2; /*!< Offset: 0xFE8 (R/W) PMU Peripheral Identification Register 2 */ + __IOM uint32_t PIDR3; /*!< Offset: 0xFEC (R/W) PMU Peripheral Identification Register 3 */ + __IOM uint32_t CIDR0; /*!< Offset: 0xFF0 (R/W) PMU Component Identification Register 0 */ + __IOM uint32_t CIDR1; /*!< Offset: 0xFF4 (R/W) PMU Component Identification Register 1 */ + __IOM uint32_t CIDR2; /*!< Offset: 0xFF8 (R/W) PMU Component Identification Register 2 */ + __IOM uint32_t CIDR3; /*!< Offset: 0xFFC (R/W) PMU Component Identification Register 3 */ +} PMU_Type; + +/** \brief PMU Event Counter Registers (0-30) Definitions */ + +#define PMU_EVCNTR_CNT_Pos 0U /*!< PMU EVCNTR: Counter Position */ +#define PMU_EVCNTR_CNT_Msk (0xFFFFUL /*<< PMU_EVCNTRx_CNT_Pos*/) /*!< PMU EVCNTR: Counter Mask */ + +/** \brief PMU Event Type and Filter Registers (0-30) Definitions */ + +#define PMU_EVTYPER_EVENTTOCNT_Pos 0U /*!< PMU EVTYPER: Event to Count Position */ +#define PMU_EVTYPER_EVENTTOCNT_Msk (0xFFFFUL /*<< EVTYPERx_EVENTTOCNT_Pos*/) /*!< PMU EVTYPER: Event to Count Mask */ + +/** \brief PMU Count Enable Set Register Definitions */ + +#define PMU_CNTENSET_CNT0_ENABLE_Pos 0U /*!< PMU CNTENSET: Event Counter 0 Enable Set Position */ +#define PMU_CNTENSET_CNT0_ENABLE_Msk (1UL /*<< PMU_CNTENSET_CNT0_ENABLE_Pos*/) /*!< PMU CNTENSET: Event Counter 0 Enable Set Mask */ + +#define PMU_CNTENSET_CNT1_ENABLE_Pos 1U /*!< PMU CNTENSET: Event Counter 1 Enable Set Position */ +#define PMU_CNTENSET_CNT1_ENABLE_Msk (1UL << PMU_CNTENSET_CNT1_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 1 Enable Set Mask */ + +#define PMU_CNTENSET_CNT2_ENABLE_Pos 2U /*!< PMU CNTENSET: Event Counter 2 Enable Set Position */ +#define PMU_CNTENSET_CNT2_ENABLE_Msk (1UL << PMU_CNTENSET_CNT2_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 2 Enable Set Mask */ + +#define PMU_CNTENSET_CNT3_ENABLE_Pos 3U /*!< PMU CNTENSET: Event Counter 3 Enable Set Position */ +#define PMU_CNTENSET_CNT3_ENABLE_Msk (1UL << PMU_CNTENSET_CNT3_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 3 Enable Set Mask */ + +#define PMU_CNTENSET_CNT4_ENABLE_Pos 4U /*!< PMU CNTENSET: Event Counter 4 Enable Set Position */ +#define PMU_CNTENSET_CNT4_ENABLE_Msk (1UL << PMU_CNTENSET_CNT4_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 4 Enable Set Mask */ + +#define PMU_CNTENSET_CNT5_ENABLE_Pos 5U /*!< PMU CNTENSET: Event Counter 5 Enable Set Position */ +#define PMU_CNTENSET_CNT5_ENABLE_Msk (1UL << PMU_CNTENSET_CNT5_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 5 Enable Set Mask */ + +#define PMU_CNTENSET_CNT6_ENABLE_Pos 6U /*!< PMU CNTENSET: Event Counter 6 Enable Set Position */ +#define PMU_CNTENSET_CNT6_ENABLE_Msk (1UL << PMU_CNTENSET_CNT6_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 6 Enable Set Mask */ + +#define PMU_CNTENSET_CNT7_ENABLE_Pos 7U /*!< PMU CNTENSET: Event Counter 7 Enable Set Position */ +#define PMU_CNTENSET_CNT7_ENABLE_Msk (1UL << PMU_CNTENSET_CNT7_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 7 Enable Set Mask */ + +#define PMU_CNTENSET_CNT8_ENABLE_Pos 8U /*!< PMU CNTENSET: Event Counter 8 Enable Set Position */ +#define PMU_CNTENSET_CNT8_ENABLE_Msk (1UL << PMU_CNTENSET_CNT8_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 8 Enable Set Mask */ + +#define PMU_CNTENSET_CNT9_ENABLE_Pos 9U /*!< PMU CNTENSET: Event Counter 9 Enable Set Position */ +#define PMU_CNTENSET_CNT9_ENABLE_Msk (1UL << PMU_CNTENSET_CNT9_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 9 Enable Set Mask */ + +#define PMU_CNTENSET_CNT10_ENABLE_Pos 10U /*!< PMU CNTENSET: Event Counter 10 Enable Set Position */ +#define PMU_CNTENSET_CNT10_ENABLE_Msk (1UL << PMU_CNTENSET_CNT10_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 10 Enable Set Mask */ + +#define PMU_CNTENSET_CNT11_ENABLE_Pos 11U /*!< PMU CNTENSET: Event Counter 11 Enable Set Position */ +#define PMU_CNTENSET_CNT11_ENABLE_Msk (1UL << PMU_CNTENSET_CNT11_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 11 Enable Set Mask */ + +#define PMU_CNTENSET_CNT12_ENABLE_Pos 12U /*!< PMU CNTENSET: Event Counter 12 Enable Set Position */ +#define PMU_CNTENSET_CNT12_ENABLE_Msk (1UL << PMU_CNTENSET_CNT12_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 12 Enable Set Mask */ + +#define PMU_CNTENSET_CNT13_ENABLE_Pos 13U /*!< PMU CNTENSET: Event Counter 13 Enable Set Position */ +#define PMU_CNTENSET_CNT13_ENABLE_Msk (1UL << PMU_CNTENSET_CNT13_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 13 Enable Set Mask */ + +#define PMU_CNTENSET_CNT14_ENABLE_Pos 14U /*!< PMU CNTENSET: Event Counter 14 Enable Set Position */ +#define PMU_CNTENSET_CNT14_ENABLE_Msk (1UL << PMU_CNTENSET_CNT14_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 14 Enable Set Mask */ + +#define PMU_CNTENSET_CNT15_ENABLE_Pos 15U /*!< PMU CNTENSET: Event Counter 15 Enable Set Position */ +#define PMU_CNTENSET_CNT15_ENABLE_Msk (1UL << PMU_CNTENSET_CNT15_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 15 Enable Set Mask */ + +#define PMU_CNTENSET_CNT16_ENABLE_Pos 16U /*!< PMU CNTENSET: Event Counter 16 Enable Set Position */ +#define PMU_CNTENSET_CNT16_ENABLE_Msk (1UL << PMU_CNTENSET_CNT16_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 16 Enable Set Mask */ + +#define PMU_CNTENSET_CNT17_ENABLE_Pos 17U /*!< PMU CNTENSET: Event Counter 17 Enable Set Position */ +#define PMU_CNTENSET_CNT17_ENABLE_Msk (1UL << PMU_CNTENSET_CNT17_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 17 Enable Set Mask */ + +#define PMU_CNTENSET_CNT18_ENABLE_Pos 18U /*!< PMU CNTENSET: Event Counter 18 Enable Set Position */ +#define PMU_CNTENSET_CNT18_ENABLE_Msk (1UL << PMU_CNTENSET_CNT18_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 18 Enable Set Mask */ + +#define PMU_CNTENSET_CNT19_ENABLE_Pos 19U /*!< PMU CNTENSET: Event Counter 19 Enable Set Position */ +#define PMU_CNTENSET_CNT19_ENABLE_Msk (1UL << PMU_CNTENSET_CNT19_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 19 Enable Set Mask */ + +#define PMU_CNTENSET_CNT20_ENABLE_Pos 20U /*!< PMU CNTENSET: Event Counter 20 Enable Set Position */ +#define PMU_CNTENSET_CNT20_ENABLE_Msk (1UL << PMU_CNTENSET_CNT20_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 20 Enable Set Mask */ + +#define PMU_CNTENSET_CNT21_ENABLE_Pos 21U /*!< PMU CNTENSET: Event Counter 21 Enable Set Position */ +#define PMU_CNTENSET_CNT21_ENABLE_Msk (1UL << PMU_CNTENSET_CNT21_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 21 Enable Set Mask */ + +#define PMU_CNTENSET_CNT22_ENABLE_Pos 22U /*!< PMU CNTENSET: Event Counter 22 Enable Set Position */ +#define PMU_CNTENSET_CNT22_ENABLE_Msk (1UL << PMU_CNTENSET_CNT22_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 22 Enable Set Mask */ + +#define PMU_CNTENSET_CNT23_ENABLE_Pos 23U /*!< PMU CNTENSET: Event Counter 23 Enable Set Position */ +#define PMU_CNTENSET_CNT23_ENABLE_Msk (1UL << PMU_CNTENSET_CNT23_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 23 Enable Set Mask */ + +#define PMU_CNTENSET_CNT24_ENABLE_Pos 24U /*!< PMU CNTENSET: Event Counter 24 Enable Set Position */ +#define PMU_CNTENSET_CNT24_ENABLE_Msk (1UL << PMU_CNTENSET_CNT24_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 24 Enable Set Mask */ + +#define PMU_CNTENSET_CNT25_ENABLE_Pos 25U /*!< PMU CNTENSET: Event Counter 25 Enable Set Position */ +#define PMU_CNTENSET_CNT25_ENABLE_Msk (1UL << PMU_CNTENSET_CNT25_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 25 Enable Set Mask */ + +#define PMU_CNTENSET_CNT26_ENABLE_Pos 26U /*!< PMU CNTENSET: Event Counter 26 Enable Set Position */ +#define PMU_CNTENSET_CNT26_ENABLE_Msk (1UL << PMU_CNTENSET_CNT26_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 26 Enable Set Mask */ + +#define PMU_CNTENSET_CNT27_ENABLE_Pos 27U /*!< PMU CNTENSET: Event Counter 27 Enable Set Position */ +#define PMU_CNTENSET_CNT27_ENABLE_Msk (1UL << PMU_CNTENSET_CNT27_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 27 Enable Set Mask */ + +#define PMU_CNTENSET_CNT28_ENABLE_Pos 28U /*!< PMU CNTENSET: Event Counter 28 Enable Set Position */ +#define PMU_CNTENSET_CNT28_ENABLE_Msk (1UL << PMU_CNTENSET_CNT28_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 28 Enable Set Mask */ + +#define PMU_CNTENSET_CNT29_ENABLE_Pos 29U /*!< PMU CNTENSET: Event Counter 29 Enable Set Position */ +#define PMU_CNTENSET_CNT29_ENABLE_Msk (1UL << PMU_CNTENSET_CNT29_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 29 Enable Set Mask */ + +#define PMU_CNTENSET_CNT30_ENABLE_Pos 30U /*!< PMU CNTENSET: Event Counter 30 Enable Set Position */ +#define PMU_CNTENSET_CNT30_ENABLE_Msk (1UL << PMU_CNTENSET_CNT30_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 30 Enable Set Mask */ + +#define PMU_CNTENSET_CCNTR_ENABLE_Pos 31U /*!< PMU CNTENSET: Cycle Counter Enable Set Position */ +#define PMU_CNTENSET_CCNTR_ENABLE_Msk (1UL << PMU_CNTENSET_CCNTR_ENABLE_Pos) /*!< PMU CNTENSET: Cycle Counter Enable Set Mask */ + +/** \brief PMU Count Enable Clear Register Definitions */ + +#define PMU_CNTENSET_CNT0_ENABLE_Pos 0U /*!< PMU CNTENCLR: Event Counter 0 Enable Clear Position */ +#define PMU_CNTENCLR_CNT0_ENABLE_Msk (1UL /*<< PMU_CNTENCLR_CNT0_ENABLE_Pos*/) /*!< PMU CNTENCLR: Event Counter 0 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT1_ENABLE_Pos 1U /*!< PMU CNTENCLR: Event Counter 1 Enable Clear Position */ +#define PMU_CNTENCLR_CNT1_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT1_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 1 Enable Clear */ + +#define PMU_CNTENCLR_CNT2_ENABLE_Pos 2U /*!< PMU CNTENCLR: Event Counter 2 Enable Clear Position */ +#define PMU_CNTENCLR_CNT2_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT2_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 2 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT3_ENABLE_Pos 3U /*!< PMU CNTENCLR: Event Counter 3 Enable Clear Position */ +#define PMU_CNTENCLR_CNT3_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT3_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 3 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT4_ENABLE_Pos 4U /*!< PMU CNTENCLR: Event Counter 4 Enable Clear Position */ +#define PMU_CNTENCLR_CNT4_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT4_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 4 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT5_ENABLE_Pos 5U /*!< PMU CNTENCLR: Event Counter 5 Enable Clear Position */ +#define PMU_CNTENCLR_CNT5_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT5_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 5 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT6_ENABLE_Pos 6U /*!< PMU CNTENCLR: Event Counter 6 Enable Clear Position */ +#define PMU_CNTENCLR_CNT6_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT6_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 6 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT7_ENABLE_Pos 7U /*!< PMU CNTENCLR: Event Counter 7 Enable Clear Position */ +#define PMU_CNTENCLR_CNT7_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT7_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 7 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT8_ENABLE_Pos 8U /*!< PMU CNTENCLR: Event Counter 8 Enable Clear Position */ +#define PMU_CNTENCLR_CNT8_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT8_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 8 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT9_ENABLE_Pos 9U /*!< PMU CNTENCLR: Event Counter 9 Enable Clear Position */ +#define PMU_CNTENCLR_CNT9_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT9_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 9 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT10_ENABLE_Pos 10U /*!< PMU CNTENCLR: Event Counter 10 Enable Clear Position */ +#define PMU_CNTENCLR_CNT10_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT10_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 10 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT11_ENABLE_Pos 11U /*!< PMU CNTENCLR: Event Counter 11 Enable Clear Position */ +#define PMU_CNTENCLR_CNT11_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT11_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 11 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT12_ENABLE_Pos 12U /*!< PMU CNTENCLR: Event Counter 12 Enable Clear Position */ +#define PMU_CNTENCLR_CNT12_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT12_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 12 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT13_ENABLE_Pos 13U /*!< PMU CNTENCLR: Event Counter 13 Enable Clear Position */ +#define PMU_CNTENCLR_CNT13_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT13_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 13 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT14_ENABLE_Pos 14U /*!< PMU CNTENCLR: Event Counter 14 Enable Clear Position */ +#define PMU_CNTENCLR_CNT14_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT14_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 14 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT15_ENABLE_Pos 15U /*!< PMU CNTENCLR: Event Counter 15 Enable Clear Position */ +#define PMU_CNTENCLR_CNT15_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT15_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 15 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT16_ENABLE_Pos 16U /*!< PMU CNTENCLR: Event Counter 16 Enable Clear Position */ +#define PMU_CNTENCLR_CNT16_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT16_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 16 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT17_ENABLE_Pos 17U /*!< PMU CNTENCLR: Event Counter 17 Enable Clear Position */ +#define PMU_CNTENCLR_CNT17_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT17_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 17 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT18_ENABLE_Pos 18U /*!< PMU CNTENCLR: Event Counter 18 Enable Clear Position */ +#define PMU_CNTENCLR_CNT18_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT18_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 18 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT19_ENABLE_Pos 19U /*!< PMU CNTENCLR: Event Counter 19 Enable Clear Position */ +#define PMU_CNTENCLR_CNT19_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT19_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 19 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT20_ENABLE_Pos 20U /*!< PMU CNTENCLR: Event Counter 20 Enable Clear Position */ +#define PMU_CNTENCLR_CNT20_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT20_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 20 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT21_ENABLE_Pos 21U /*!< PMU CNTENCLR: Event Counter 21 Enable Clear Position */ +#define PMU_CNTENCLR_CNT21_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT21_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 21 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT22_ENABLE_Pos 22U /*!< PMU CNTENCLR: Event Counter 22 Enable Clear Position */ +#define PMU_CNTENCLR_CNT22_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT22_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 22 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT23_ENABLE_Pos 23U /*!< PMU CNTENCLR: Event Counter 23 Enable Clear Position */ +#define PMU_CNTENCLR_CNT23_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT23_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 23 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT24_ENABLE_Pos 24U /*!< PMU CNTENCLR: Event Counter 24 Enable Clear Position */ +#define PMU_CNTENCLR_CNT24_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT24_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 24 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT25_ENABLE_Pos 25U /*!< PMU CNTENCLR: Event Counter 25 Enable Clear Position */ +#define PMU_CNTENCLR_CNT25_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT25_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 25 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT26_ENABLE_Pos 26U /*!< PMU CNTENCLR: Event Counter 26 Enable Clear Position */ +#define PMU_CNTENCLR_CNT26_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT26_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 26 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT27_ENABLE_Pos 27U /*!< PMU CNTENCLR: Event Counter 27 Enable Clear Position */ +#define PMU_CNTENCLR_CNT27_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT27_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 27 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT28_ENABLE_Pos 28U /*!< PMU CNTENCLR: Event Counter 28 Enable Clear Position */ +#define PMU_CNTENCLR_CNT28_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT28_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 28 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT29_ENABLE_Pos 29U /*!< PMU CNTENCLR: Event Counter 29 Enable Clear Position */ +#define PMU_CNTENCLR_CNT29_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT29_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 29 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT30_ENABLE_Pos 30U /*!< PMU CNTENCLR: Event Counter 30 Enable Clear Position */ +#define PMU_CNTENCLR_CNT30_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT30_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 30 Enable Clear Mask */ + +#define PMU_CNTENCLR_CCNTR_ENABLE_Pos 31U /*!< PMU CNTENCLR: Cycle Counter Enable Clear Position */ +#define PMU_CNTENCLR_CCNTR_ENABLE_Msk (1UL << PMU_CNTENCLR_CCNTR_ENABLE_Pos) /*!< PMU CNTENCLR: Cycle Counter Enable Clear Mask */ + +/** \brief PMU Interrupt Enable Set Register Definitions */ + +#define PMU_INTENSET_CNT0_ENABLE_Pos 0U /*!< PMU INTENSET: Event Counter 0 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT0_ENABLE_Msk (1UL /*<< PMU_INTENSET_CNT0_ENABLE_Pos*/) /*!< PMU INTENSET: Event Counter 0 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT1_ENABLE_Pos 1U /*!< PMU INTENSET: Event Counter 1 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT1_ENABLE_Msk (1UL << PMU_INTENSET_CNT1_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 1 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT2_ENABLE_Pos 2U /*!< PMU INTENSET: Event Counter 2 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT2_ENABLE_Msk (1UL << PMU_INTENSET_CNT2_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 2 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT3_ENABLE_Pos 3U /*!< PMU INTENSET: Event Counter 3 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT3_ENABLE_Msk (1UL << PMU_INTENSET_CNT3_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 3 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT4_ENABLE_Pos 4U /*!< PMU INTENSET: Event Counter 4 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT4_ENABLE_Msk (1UL << PMU_INTENSET_CNT4_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 4 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT5_ENABLE_Pos 5U /*!< PMU INTENSET: Event Counter 5 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT5_ENABLE_Msk (1UL << PMU_INTENSET_CNT5_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 5 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT6_ENABLE_Pos 6U /*!< PMU INTENSET: Event Counter 6 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT6_ENABLE_Msk (1UL << PMU_INTENSET_CNT6_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 6 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT7_ENABLE_Pos 7U /*!< PMU INTENSET: Event Counter 7 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT7_ENABLE_Msk (1UL << PMU_INTENSET_CNT7_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 7 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT8_ENABLE_Pos 8U /*!< PMU INTENSET: Event Counter 8 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT8_ENABLE_Msk (1UL << PMU_INTENSET_CNT8_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 8 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT9_ENABLE_Pos 9U /*!< PMU INTENSET: Event Counter 9 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT9_ENABLE_Msk (1UL << PMU_INTENSET_CNT9_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 9 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT10_ENABLE_Pos 10U /*!< PMU INTENSET: Event Counter 10 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT10_ENABLE_Msk (1UL << PMU_INTENSET_CNT10_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 10 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT11_ENABLE_Pos 11U /*!< PMU INTENSET: Event Counter 11 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT11_ENABLE_Msk (1UL << PMU_INTENSET_CNT11_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 11 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT12_ENABLE_Pos 12U /*!< PMU INTENSET: Event Counter 12 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT12_ENABLE_Msk (1UL << PMU_INTENSET_CNT12_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 12 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT13_ENABLE_Pos 13U /*!< PMU INTENSET: Event Counter 13 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT13_ENABLE_Msk (1UL << PMU_INTENSET_CNT13_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 13 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT14_ENABLE_Pos 14U /*!< PMU INTENSET: Event Counter 14 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT14_ENABLE_Msk (1UL << PMU_INTENSET_CNT14_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 14 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT15_ENABLE_Pos 15U /*!< PMU INTENSET: Event Counter 15 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT15_ENABLE_Msk (1UL << PMU_INTENSET_CNT15_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 15 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT16_ENABLE_Pos 16U /*!< PMU INTENSET: Event Counter 16 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT16_ENABLE_Msk (1UL << PMU_INTENSET_CNT16_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 16 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT17_ENABLE_Pos 17U /*!< PMU INTENSET: Event Counter 17 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT17_ENABLE_Msk (1UL << PMU_INTENSET_CNT17_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 17 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT18_ENABLE_Pos 18U /*!< PMU INTENSET: Event Counter 18 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT18_ENABLE_Msk (1UL << PMU_INTENSET_CNT18_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 18 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT19_ENABLE_Pos 19U /*!< PMU INTENSET: Event Counter 19 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT19_ENABLE_Msk (1UL << PMU_INTENSET_CNT19_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 19 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT20_ENABLE_Pos 20U /*!< PMU INTENSET: Event Counter 20 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT20_ENABLE_Msk (1UL << PMU_INTENSET_CNT20_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 20 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT21_ENABLE_Pos 21U /*!< PMU INTENSET: Event Counter 21 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT21_ENABLE_Msk (1UL << PMU_INTENSET_CNT21_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 21 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT22_ENABLE_Pos 22U /*!< PMU INTENSET: Event Counter 22 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT22_ENABLE_Msk (1UL << PMU_INTENSET_CNT22_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 22 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT23_ENABLE_Pos 23U /*!< PMU INTENSET: Event Counter 23 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT23_ENABLE_Msk (1UL << PMU_INTENSET_CNT23_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 23 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT24_ENABLE_Pos 24U /*!< PMU INTENSET: Event Counter 24 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT24_ENABLE_Msk (1UL << PMU_INTENSET_CNT24_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 24 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT25_ENABLE_Pos 25U /*!< PMU INTENSET: Event Counter 25 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT25_ENABLE_Msk (1UL << PMU_INTENSET_CNT25_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 25 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT26_ENABLE_Pos 26U /*!< PMU INTENSET: Event Counter 26 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT26_ENABLE_Msk (1UL << PMU_INTENSET_CNT26_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 26 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT27_ENABLE_Pos 27U /*!< PMU INTENSET: Event Counter 27 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT27_ENABLE_Msk (1UL << PMU_INTENSET_CNT27_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 27 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT28_ENABLE_Pos 28U /*!< PMU INTENSET: Event Counter 28 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT28_ENABLE_Msk (1UL << PMU_INTENSET_CNT28_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 28 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT29_ENABLE_Pos 29U /*!< PMU INTENSET: Event Counter 29 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT29_ENABLE_Msk (1UL << PMU_INTENSET_CNT29_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 29 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT30_ENABLE_Pos 30U /*!< PMU INTENSET: Event Counter 30 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT30_ENABLE_Msk (1UL << PMU_INTENSET_CNT30_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 30 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CYCCNT_ENABLE_Pos 31U /*!< PMU INTENSET: Cycle Counter Interrupt Enable Set Position */ +#define PMU_INTENSET_CCYCNT_ENABLE_Msk (1UL << PMU_INTENSET_CYCCNT_ENABLE_Pos) /*!< PMU INTENSET: Cycle Counter Interrupt Enable Set Mask */ + +/** \brief PMU Interrupt Enable Clear Register Definitions */ + +#define PMU_INTENSET_CNT0_ENABLE_Pos 0U /*!< PMU INTENCLR: Event Counter 0 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT0_ENABLE_Msk (1UL /*<< PMU_INTENCLR_CNT0_ENABLE_Pos*/) /*!< PMU INTENCLR: Event Counter 0 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT1_ENABLE_Pos 1U /*!< PMU INTENCLR: Event Counter 1 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT1_ENABLE_Msk (1UL << PMU_INTENCLR_CNT1_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 1 Interrupt Enable Clear */ + +#define PMU_INTENCLR_CNT2_ENABLE_Pos 2U /*!< PMU INTENCLR: Event Counter 2 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT2_ENABLE_Msk (1UL << PMU_INTENCLR_CNT2_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 2 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT3_ENABLE_Pos 3U /*!< PMU INTENCLR: Event Counter 3 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT3_ENABLE_Msk (1UL << PMU_INTENCLR_CNT3_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 3 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT4_ENABLE_Pos 4U /*!< PMU INTENCLR: Event Counter 4 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT4_ENABLE_Msk (1UL << PMU_INTENCLR_CNT4_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 4 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT5_ENABLE_Pos 5U /*!< PMU INTENCLR: Event Counter 5 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT5_ENABLE_Msk (1UL << PMU_INTENCLR_CNT5_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 5 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT6_ENABLE_Pos 6U /*!< PMU INTENCLR: Event Counter 6 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT6_ENABLE_Msk (1UL << PMU_INTENCLR_CNT6_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 6 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT7_ENABLE_Pos 7U /*!< PMU INTENCLR: Event Counter 7 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT7_ENABLE_Msk (1UL << PMU_INTENCLR_CNT7_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 7 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT8_ENABLE_Pos 8U /*!< PMU INTENCLR: Event Counter 8 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT8_ENABLE_Msk (1UL << PMU_INTENCLR_CNT8_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 8 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT9_ENABLE_Pos 9U /*!< PMU INTENCLR: Event Counter 9 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT9_ENABLE_Msk (1UL << PMU_INTENCLR_CNT9_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 9 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT10_ENABLE_Pos 10U /*!< PMU INTENCLR: Event Counter 10 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT10_ENABLE_Msk (1UL << PMU_INTENCLR_CNT10_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 10 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT11_ENABLE_Pos 11U /*!< PMU INTENCLR: Event Counter 11 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT11_ENABLE_Msk (1UL << PMU_INTENCLR_CNT11_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 11 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT12_ENABLE_Pos 12U /*!< PMU INTENCLR: Event Counter 12 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT12_ENABLE_Msk (1UL << PMU_INTENCLR_CNT12_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 12 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT13_ENABLE_Pos 13U /*!< PMU INTENCLR: Event Counter 13 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT13_ENABLE_Msk (1UL << PMU_INTENCLR_CNT13_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 13 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT14_ENABLE_Pos 14U /*!< PMU INTENCLR: Event Counter 14 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT14_ENABLE_Msk (1UL << PMU_INTENCLR_CNT14_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 14 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT15_ENABLE_Pos 15U /*!< PMU INTENCLR: Event Counter 15 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT15_ENABLE_Msk (1UL << PMU_INTENCLR_CNT15_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 15 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT16_ENABLE_Pos 16U /*!< PMU INTENCLR: Event Counter 16 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT16_ENABLE_Msk (1UL << PMU_INTENCLR_CNT16_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 16 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT17_ENABLE_Pos 17U /*!< PMU INTENCLR: Event Counter 17 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT17_ENABLE_Msk (1UL << PMU_INTENCLR_CNT17_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 17 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT18_ENABLE_Pos 18U /*!< PMU INTENCLR: Event Counter 18 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT18_ENABLE_Msk (1UL << PMU_INTENCLR_CNT18_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 18 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT19_ENABLE_Pos 19U /*!< PMU INTENCLR: Event Counter 19 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT19_ENABLE_Msk (1UL << PMU_INTENCLR_CNT19_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 19 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT20_ENABLE_Pos 20U /*!< PMU INTENCLR: Event Counter 20 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT20_ENABLE_Msk (1UL << PMU_INTENCLR_CNT20_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 20 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT21_ENABLE_Pos 21U /*!< PMU INTENCLR: Event Counter 21 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT21_ENABLE_Msk (1UL << PMU_INTENCLR_CNT21_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 21 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT22_ENABLE_Pos 22U /*!< PMU INTENCLR: Event Counter 22 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT22_ENABLE_Msk (1UL << PMU_INTENCLR_CNT22_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 22 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT23_ENABLE_Pos 23U /*!< PMU INTENCLR: Event Counter 23 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT23_ENABLE_Msk (1UL << PMU_INTENCLR_CNT23_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 23 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT24_ENABLE_Pos 24U /*!< PMU INTENCLR: Event Counter 24 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT24_ENABLE_Msk (1UL << PMU_INTENCLR_CNT24_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 24 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT25_ENABLE_Pos 25U /*!< PMU INTENCLR: Event Counter 25 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT25_ENABLE_Msk (1UL << PMU_INTENCLR_CNT25_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 25 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT26_ENABLE_Pos 26U /*!< PMU INTENCLR: Event Counter 26 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT26_ENABLE_Msk (1UL << PMU_INTENCLR_CNT26_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 26 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT27_ENABLE_Pos 27U /*!< PMU INTENCLR: Event Counter 27 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT27_ENABLE_Msk (1UL << PMU_INTENCLR_CNT27_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 27 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT28_ENABLE_Pos 28U /*!< PMU INTENCLR: Event Counter 28 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT28_ENABLE_Msk (1UL << PMU_INTENCLR_CNT28_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 28 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT29_ENABLE_Pos 29U /*!< PMU INTENCLR: Event Counter 29 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT29_ENABLE_Msk (1UL << PMU_INTENCLR_CNT29_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 29 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT30_ENABLE_Pos 30U /*!< PMU INTENCLR: Event Counter 30 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT30_ENABLE_Msk (1UL << PMU_INTENCLR_CNT30_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 30 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CYCCNT_ENABLE_Pos 31U /*!< PMU INTENCLR: Cycle Counter Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CYCCNT_ENABLE_Msk (1UL << PMU_INTENCLR_CYCCNT_ENABLE_Pos) /*!< PMU INTENCLR: Cycle Counter Interrupt Enable Clear Mask */ + +/** \brief PMU Overflow Flag Status Set Register Definitions */ + +#define PMU_OVSSET_CNT0_STATUS_Pos 0U /*!< PMU OVSSET: Event Counter 0 Overflow Set Position */ +#define PMU_OVSSET_CNT0_STATUS_Msk (1UL /*<< PMU_OVSSET_CNT0_STATUS_Pos*/) /*!< PMU OVSSET: Event Counter 0 Overflow Set Mask */ + +#define PMU_OVSSET_CNT1_STATUS_Pos 1U /*!< PMU OVSSET: Event Counter 1 Overflow Set Position */ +#define PMU_OVSSET_CNT1_STATUS_Msk (1UL << PMU_OVSSET_CNT1_STATUS_Pos) /*!< PMU OVSSET: Event Counter 1 Overflow Set Mask */ + +#define PMU_OVSSET_CNT2_STATUS_Pos 2U /*!< PMU OVSSET: Event Counter 2 Overflow Set Position */ +#define PMU_OVSSET_CNT2_STATUS_Msk (1UL << PMU_OVSSET_CNT2_STATUS_Pos) /*!< PMU OVSSET: Event Counter 2 Overflow Set Mask */ + +#define PMU_OVSSET_CNT3_STATUS_Pos 3U /*!< PMU OVSSET: Event Counter 3 Overflow Set Position */ +#define PMU_OVSSET_CNT3_STATUS_Msk (1UL << PMU_OVSSET_CNT3_STATUS_Pos) /*!< PMU OVSSET: Event Counter 3 Overflow Set Mask */ + +#define PMU_OVSSET_CNT4_STATUS_Pos 4U /*!< PMU OVSSET: Event Counter 4 Overflow Set Position */ +#define PMU_OVSSET_CNT4_STATUS_Msk (1UL << PMU_OVSSET_CNT4_STATUS_Pos) /*!< PMU OVSSET: Event Counter 4 Overflow Set Mask */ + +#define PMU_OVSSET_CNT5_STATUS_Pos 5U /*!< PMU OVSSET: Event Counter 5 Overflow Set Position */ +#define PMU_OVSSET_CNT5_STATUS_Msk (1UL << PMU_OVSSET_CNT5_STATUS_Pos) /*!< PMU OVSSET: Event Counter 5 Overflow Set Mask */ + +#define PMU_OVSSET_CNT6_STATUS_Pos 6U /*!< PMU OVSSET: Event Counter 6 Overflow Set Position */ +#define PMU_OVSSET_CNT6_STATUS_Msk (1UL << PMU_OVSSET_CNT6_STATUS_Pos) /*!< PMU OVSSET: Event Counter 6 Overflow Set Mask */ + +#define PMU_OVSSET_CNT7_STATUS_Pos 7U /*!< PMU OVSSET: Event Counter 7 Overflow Set Position */ +#define PMU_OVSSET_CNT7_STATUS_Msk (1UL << PMU_OVSSET_CNT7_STATUS_Pos) /*!< PMU OVSSET: Event Counter 7 Overflow Set Mask */ + +#define PMU_OVSSET_CNT8_STATUS_Pos 8U /*!< PMU OVSSET: Event Counter 8 Overflow Set Position */ +#define PMU_OVSSET_CNT8_STATUS_Msk (1UL << PMU_OVSSET_CNT8_STATUS_Pos) /*!< PMU OVSSET: Event Counter 8 Overflow Set Mask */ + +#define PMU_OVSSET_CNT9_STATUS_Pos 9U /*!< PMU OVSSET: Event Counter 9 Overflow Set Position */ +#define PMU_OVSSET_CNT9_STATUS_Msk (1UL << PMU_OVSSET_CNT9_STATUS_Pos) /*!< PMU OVSSET: Event Counter 9 Overflow Set Mask */ + +#define PMU_OVSSET_CNT10_STATUS_Pos 10U /*!< PMU OVSSET: Event Counter 10 Overflow Set Position */ +#define PMU_OVSSET_CNT10_STATUS_Msk (1UL << PMU_OVSSET_CNT10_STATUS_Pos) /*!< PMU OVSSET: Event Counter 10 Overflow Set Mask */ + +#define PMU_OVSSET_CNT11_STATUS_Pos 11U /*!< PMU OVSSET: Event Counter 11 Overflow Set Position */ +#define PMU_OVSSET_CNT11_STATUS_Msk (1UL << PMU_OVSSET_CNT11_STATUS_Pos) /*!< PMU OVSSET: Event Counter 11 Overflow Set Mask */ + +#define PMU_OVSSET_CNT12_STATUS_Pos 12U /*!< PMU OVSSET: Event Counter 12 Overflow Set Position */ +#define PMU_OVSSET_CNT12_STATUS_Msk (1UL << PMU_OVSSET_CNT12_STATUS_Pos) /*!< PMU OVSSET: Event Counter 12 Overflow Set Mask */ + +#define PMU_OVSSET_CNT13_STATUS_Pos 13U /*!< PMU OVSSET: Event Counter 13 Overflow Set Position */ +#define PMU_OVSSET_CNT13_STATUS_Msk (1UL << PMU_OVSSET_CNT13_STATUS_Pos) /*!< PMU OVSSET: Event Counter 13 Overflow Set Mask */ + +#define PMU_OVSSET_CNT14_STATUS_Pos 14U /*!< PMU OVSSET: Event Counter 14 Overflow Set Position */ +#define PMU_OVSSET_CNT14_STATUS_Msk (1UL << PMU_OVSSET_CNT14_STATUS_Pos) /*!< PMU OVSSET: Event Counter 14 Overflow Set Mask */ + +#define PMU_OVSSET_CNT15_STATUS_Pos 15U /*!< PMU OVSSET: Event Counter 15 Overflow Set Position */ +#define PMU_OVSSET_CNT15_STATUS_Msk (1UL << PMU_OVSSET_CNT15_STATUS_Pos) /*!< PMU OVSSET: Event Counter 15 Overflow Set Mask */ + +#define PMU_OVSSET_CNT16_STATUS_Pos 16U /*!< PMU OVSSET: Event Counter 16 Overflow Set Position */ +#define PMU_OVSSET_CNT16_STATUS_Msk (1UL << PMU_OVSSET_CNT16_STATUS_Pos) /*!< PMU OVSSET: Event Counter 16 Overflow Set Mask */ + +#define PMU_OVSSET_CNT17_STATUS_Pos 17U /*!< PMU OVSSET: Event Counter 17 Overflow Set Position */ +#define PMU_OVSSET_CNT17_STATUS_Msk (1UL << PMU_OVSSET_CNT17_STATUS_Pos) /*!< PMU OVSSET: Event Counter 17 Overflow Set Mask */ + +#define PMU_OVSSET_CNT18_STATUS_Pos 18U /*!< PMU OVSSET: Event Counter 18 Overflow Set Position */ +#define PMU_OVSSET_CNT18_STATUS_Msk (1UL << PMU_OVSSET_CNT18_STATUS_Pos) /*!< PMU OVSSET: Event Counter 18 Overflow Set Mask */ + +#define PMU_OVSSET_CNT19_STATUS_Pos 19U /*!< PMU OVSSET: Event Counter 19 Overflow Set Position */ +#define PMU_OVSSET_CNT19_STATUS_Msk (1UL << PMU_OVSSET_CNT19_STATUS_Pos) /*!< PMU OVSSET: Event Counter 19 Overflow Set Mask */ + +#define PMU_OVSSET_CNT20_STATUS_Pos 20U /*!< PMU OVSSET: Event Counter 20 Overflow Set Position */ +#define PMU_OVSSET_CNT20_STATUS_Msk (1UL << PMU_OVSSET_CNT20_STATUS_Pos) /*!< PMU OVSSET: Event Counter 20 Overflow Set Mask */ + +#define PMU_OVSSET_CNT21_STATUS_Pos 21U /*!< PMU OVSSET: Event Counter 21 Overflow Set Position */ +#define PMU_OVSSET_CNT21_STATUS_Msk (1UL << PMU_OVSSET_CNT21_STATUS_Pos) /*!< PMU OVSSET: Event Counter 21 Overflow Set Mask */ + +#define PMU_OVSSET_CNT22_STATUS_Pos 22U /*!< PMU OVSSET: Event Counter 22 Overflow Set Position */ +#define PMU_OVSSET_CNT22_STATUS_Msk (1UL << PMU_OVSSET_CNT22_STATUS_Pos) /*!< PMU OVSSET: Event Counter 22 Overflow Set Mask */ + +#define PMU_OVSSET_CNT23_STATUS_Pos 23U /*!< PMU OVSSET: Event Counter 23 Overflow Set Position */ +#define PMU_OVSSET_CNT23_STATUS_Msk (1UL << PMU_OVSSET_CNT23_STATUS_Pos) /*!< PMU OVSSET: Event Counter 23 Overflow Set Mask */ + +#define PMU_OVSSET_CNT24_STATUS_Pos 24U /*!< PMU OVSSET: Event Counter 24 Overflow Set Position */ +#define PMU_OVSSET_CNT24_STATUS_Msk (1UL << PMU_OVSSET_CNT24_STATUS_Pos) /*!< PMU OVSSET: Event Counter 24 Overflow Set Mask */ + +#define PMU_OVSSET_CNT25_STATUS_Pos 25U /*!< PMU OVSSET: Event Counter 25 Overflow Set Position */ +#define PMU_OVSSET_CNT25_STATUS_Msk (1UL << PMU_OVSSET_CNT25_STATUS_Pos) /*!< PMU OVSSET: Event Counter 25 Overflow Set Mask */ + +#define PMU_OVSSET_CNT26_STATUS_Pos 26U /*!< PMU OVSSET: Event Counter 26 Overflow Set Position */ +#define PMU_OVSSET_CNT26_STATUS_Msk (1UL << PMU_OVSSET_CNT26_STATUS_Pos) /*!< PMU OVSSET: Event Counter 26 Overflow Set Mask */ + +#define PMU_OVSSET_CNT27_STATUS_Pos 27U /*!< PMU OVSSET: Event Counter 27 Overflow Set Position */ +#define PMU_OVSSET_CNT27_STATUS_Msk (1UL << PMU_OVSSET_CNT27_STATUS_Pos) /*!< PMU OVSSET: Event Counter 27 Overflow Set Mask */ + +#define PMU_OVSSET_CNT28_STATUS_Pos 28U /*!< PMU OVSSET: Event Counter 28 Overflow Set Position */ +#define PMU_OVSSET_CNT28_STATUS_Msk (1UL << PMU_OVSSET_CNT28_STATUS_Pos) /*!< PMU OVSSET: Event Counter 28 Overflow Set Mask */ + +#define PMU_OVSSET_CNT29_STATUS_Pos 29U /*!< PMU OVSSET: Event Counter 29 Overflow Set Position */ +#define PMU_OVSSET_CNT29_STATUS_Msk (1UL << PMU_OVSSET_CNT29_STATUS_Pos) /*!< PMU OVSSET: Event Counter 29 Overflow Set Mask */ + +#define PMU_OVSSET_CNT30_STATUS_Pos 30U /*!< PMU OVSSET: Event Counter 30 Overflow Set Position */ +#define PMU_OVSSET_CNT30_STATUS_Msk (1UL << PMU_OVSSET_CNT30_STATUS_Pos) /*!< PMU OVSSET: Event Counter 30 Overflow Set Mask */ + +#define PMU_OVSSET_CYCCNT_STATUS_Pos 31U /*!< PMU OVSSET: Cycle Counter Overflow Set Position */ +#define PMU_OVSSET_CYCCNT_STATUS_Msk (1UL << PMU_OVSSET_CYCCNT_STATUS_Pos) /*!< PMU OVSSET: Cycle Counter Overflow Set Mask */ + +/** \brief PMU Overflow Flag Status Clear Register Definitions */ + +#define PMU_OVSCLR_CNT0_STATUS_Pos 0U /*!< PMU OVSCLR: Event Counter 0 Overflow Clear Position */ +#define PMU_OVSCLR_CNT0_STATUS_Msk (1UL /*<< PMU_OVSCLR_CNT0_STATUS_Pos*/) /*!< PMU OVSCLR: Event Counter 0 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT1_STATUS_Pos 1U /*!< PMU OVSCLR: Event Counter 1 Overflow Clear Position */ +#define PMU_OVSCLR_CNT1_STATUS_Msk (1UL << PMU_OVSCLR_CNT1_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 1 Overflow Clear */ + +#define PMU_OVSCLR_CNT2_STATUS_Pos 2U /*!< PMU OVSCLR: Event Counter 2 Overflow Clear Position */ +#define PMU_OVSCLR_CNT2_STATUS_Msk (1UL << PMU_OVSCLR_CNT2_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 2 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT3_STATUS_Pos 3U /*!< PMU OVSCLR: Event Counter 3 Overflow Clear Position */ +#define PMU_OVSCLR_CNT3_STATUS_Msk (1UL << PMU_OVSCLR_CNT3_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 3 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT4_STATUS_Pos 4U /*!< PMU OVSCLR: Event Counter 4 Overflow Clear Position */ +#define PMU_OVSCLR_CNT4_STATUS_Msk (1UL << PMU_OVSCLR_CNT4_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 4 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT5_STATUS_Pos 5U /*!< PMU OVSCLR: Event Counter 5 Overflow Clear Position */ +#define PMU_OVSCLR_CNT5_STATUS_Msk (1UL << PMU_OVSCLR_CNT5_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 5 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT6_STATUS_Pos 6U /*!< PMU OVSCLR: Event Counter 6 Overflow Clear Position */ +#define PMU_OVSCLR_CNT6_STATUS_Msk (1UL << PMU_OVSCLR_CNT6_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 6 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT7_STATUS_Pos 7U /*!< PMU OVSCLR: Event Counter 7 Overflow Clear Position */ +#define PMU_OVSCLR_CNT7_STATUS_Msk (1UL << PMU_OVSCLR_CNT7_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 7 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT8_STATUS_Pos 8U /*!< PMU OVSCLR: Event Counter 8 Overflow Clear Position */ +#define PMU_OVSCLR_CNT8_STATUS_Msk (1UL << PMU_OVSCLR_CNT8_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 8 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT9_STATUS_Pos 9U /*!< PMU OVSCLR: Event Counter 9 Overflow Clear Position */ +#define PMU_OVSCLR_CNT9_STATUS_Msk (1UL << PMU_OVSCLR_CNT9_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 9 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT10_STATUS_Pos 10U /*!< PMU OVSCLR: Event Counter 10 Overflow Clear Position */ +#define PMU_OVSCLR_CNT10_STATUS_Msk (1UL << PMU_OVSCLR_CNT10_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 10 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT11_STATUS_Pos 11U /*!< PMU OVSCLR: Event Counter 11 Overflow Clear Position */ +#define PMU_OVSCLR_CNT11_STATUS_Msk (1UL << PMU_OVSCLR_CNT11_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 11 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT12_STATUS_Pos 12U /*!< PMU OVSCLR: Event Counter 12 Overflow Clear Position */ +#define PMU_OVSCLR_CNT12_STATUS_Msk (1UL << PMU_OVSCLR_CNT12_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 12 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT13_STATUS_Pos 13U /*!< PMU OVSCLR: Event Counter 13 Overflow Clear Position */ +#define PMU_OVSCLR_CNT13_STATUS_Msk (1UL << PMU_OVSCLR_CNT13_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 13 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT14_STATUS_Pos 14U /*!< PMU OVSCLR: Event Counter 14 Overflow Clear Position */ +#define PMU_OVSCLR_CNT14_STATUS_Msk (1UL << PMU_OVSCLR_CNT14_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 14 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT15_STATUS_Pos 15U /*!< PMU OVSCLR: Event Counter 15 Overflow Clear Position */ +#define PMU_OVSCLR_CNT15_STATUS_Msk (1UL << PMU_OVSCLR_CNT15_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 15 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT16_STATUS_Pos 16U /*!< PMU OVSCLR: Event Counter 16 Overflow Clear Position */ +#define PMU_OVSCLR_CNT16_STATUS_Msk (1UL << PMU_OVSCLR_CNT16_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 16 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT17_STATUS_Pos 17U /*!< PMU OVSCLR: Event Counter 17 Overflow Clear Position */ +#define PMU_OVSCLR_CNT17_STATUS_Msk (1UL << PMU_OVSCLR_CNT17_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 17 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT18_STATUS_Pos 18U /*!< PMU OVSCLR: Event Counter 18 Overflow Clear Position */ +#define PMU_OVSCLR_CNT18_STATUS_Msk (1UL << PMU_OVSCLR_CNT18_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 18 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT19_STATUS_Pos 19U /*!< PMU OVSCLR: Event Counter 19 Overflow Clear Position */ +#define PMU_OVSCLR_CNT19_STATUS_Msk (1UL << PMU_OVSCLR_CNT19_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 19 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT20_STATUS_Pos 20U /*!< PMU OVSCLR: Event Counter 20 Overflow Clear Position */ +#define PMU_OVSCLR_CNT20_STATUS_Msk (1UL << PMU_OVSCLR_CNT20_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 20 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT21_STATUS_Pos 21U /*!< PMU OVSCLR: Event Counter 21 Overflow Clear Position */ +#define PMU_OVSCLR_CNT21_STATUS_Msk (1UL << PMU_OVSCLR_CNT21_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 21 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT22_STATUS_Pos 22U /*!< PMU OVSCLR: Event Counter 22 Overflow Clear Position */ +#define PMU_OVSCLR_CNT22_STATUS_Msk (1UL << PMU_OVSCLR_CNT22_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 22 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT23_STATUS_Pos 23U /*!< PMU OVSCLR: Event Counter 23 Overflow Clear Position */ +#define PMU_OVSCLR_CNT23_STATUS_Msk (1UL << PMU_OVSCLR_CNT23_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 23 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT24_STATUS_Pos 24U /*!< PMU OVSCLR: Event Counter 24 Overflow Clear Position */ +#define PMU_OVSCLR_CNT24_STATUS_Msk (1UL << PMU_OVSCLR_CNT24_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 24 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT25_STATUS_Pos 25U /*!< PMU OVSCLR: Event Counter 25 Overflow Clear Position */ +#define PMU_OVSCLR_CNT25_STATUS_Msk (1UL << PMU_OVSCLR_CNT25_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 25 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT26_STATUS_Pos 26U /*!< PMU OVSCLR: Event Counter 26 Overflow Clear Position */ +#define PMU_OVSCLR_CNT26_STATUS_Msk (1UL << PMU_OVSCLR_CNT26_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 26 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT27_STATUS_Pos 27U /*!< PMU OVSCLR: Event Counter 27 Overflow Clear Position */ +#define PMU_OVSCLR_CNT27_STATUS_Msk (1UL << PMU_OVSCLR_CNT27_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 27 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT28_STATUS_Pos 28U /*!< PMU OVSCLR: Event Counter 28 Overflow Clear Position */ +#define PMU_OVSCLR_CNT28_STATUS_Msk (1UL << PMU_OVSCLR_CNT28_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 28 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT29_STATUS_Pos 29U /*!< PMU OVSCLR: Event Counter 29 Overflow Clear Position */ +#define PMU_OVSCLR_CNT29_STATUS_Msk (1UL << PMU_OVSCLR_CNT29_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 29 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT30_STATUS_Pos 30U /*!< PMU OVSCLR: Event Counter 30 Overflow Clear Position */ +#define PMU_OVSCLR_CNT30_STATUS_Msk (1UL << PMU_OVSCLR_CNT30_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 30 Overflow Clear Mask */ + +#define PMU_OVSCLR_CYCCNT_STATUS_Pos 31U /*!< PMU OVSCLR: Cycle Counter Overflow Clear Position */ +#define PMU_OVSCLR_CYCCNT_STATUS_Msk (1UL << PMU_OVSCLR_CYCCNT_STATUS_Pos) /*!< PMU OVSCLR: Cycle Counter Overflow Clear Mask */ + +/** \brief PMU Software Increment Counter */ + +#define PMU_SWINC_CNT0_Pos 0U /*!< PMU SWINC: Event Counter 0 Software Increment Position */ +#define PMU_SWINC_CNT0_Msk (1UL /*<< PMU_SWINC_CNT0_Pos */) /*!< PMU SWINC: Event Counter 0 Software Increment Mask */ + +#define PMU_SWINC_CNT1_Pos 1U /*!< PMU SWINC: Event Counter 1 Software Increment Position */ +#define PMU_SWINC_CNT1_Msk (1UL << PMU_SWINC_CNT1_Pos) /*!< PMU SWINC: Event Counter 1 Software Increment Mask */ + +#define PMU_SWINC_CNT2_Pos 2U /*!< PMU SWINC: Event Counter 2 Software Increment Position */ +#define PMU_SWINC_CNT2_Msk (1UL << PMU_SWINC_CNT2_Pos) /*!< PMU SWINC: Event Counter 2 Software Increment Mask */ + +#define PMU_SWINC_CNT3_Pos 3U /*!< PMU SWINC: Event Counter 3 Software Increment Position */ +#define PMU_SWINC_CNT3_Msk (1UL << PMU_SWINC_CNT3_Pos) /*!< PMU SWINC: Event Counter 3 Software Increment Mask */ + +#define PMU_SWINC_CNT4_Pos 4U /*!< PMU SWINC: Event Counter 4 Software Increment Position */ +#define PMU_SWINC_CNT4_Msk (1UL << PMU_SWINC_CNT4_Pos) /*!< PMU SWINC: Event Counter 4 Software Increment Mask */ + +#define PMU_SWINC_CNT5_Pos 5U /*!< PMU SWINC: Event Counter 5 Software Increment Position */ +#define PMU_SWINC_CNT5_Msk (1UL << PMU_SWINC_CNT5_Pos) /*!< PMU SWINC: Event Counter 5 Software Increment Mask */ + +#define PMU_SWINC_CNT6_Pos 6U /*!< PMU SWINC: Event Counter 6 Software Increment Position */ +#define PMU_SWINC_CNT6_Msk (1UL << PMU_SWINC_CNT6_Pos) /*!< PMU SWINC: Event Counter 6 Software Increment Mask */ + +#define PMU_SWINC_CNT7_Pos 7U /*!< PMU SWINC: Event Counter 7 Software Increment Position */ +#define PMU_SWINC_CNT7_Msk (1UL << PMU_SWINC_CNT7_Pos) /*!< PMU SWINC: Event Counter 7 Software Increment Mask */ + +#define PMU_SWINC_CNT8_Pos 8U /*!< PMU SWINC: Event Counter 8 Software Increment Position */ +#define PMU_SWINC_CNT8_Msk (1UL << PMU_SWINC_CNT8_Pos) /*!< PMU SWINC: Event Counter 8 Software Increment Mask */ + +#define PMU_SWINC_CNT9_Pos 9U /*!< PMU SWINC: Event Counter 9 Software Increment Position */ +#define PMU_SWINC_CNT9_Msk (1UL << PMU_SWINC_CNT9_Pos) /*!< PMU SWINC: Event Counter 9 Software Increment Mask */ + +#define PMU_SWINC_CNT10_Pos 10U /*!< PMU SWINC: Event Counter 10 Software Increment Position */ +#define PMU_SWINC_CNT10_Msk (1UL << PMU_SWINC_CNT10_Pos) /*!< PMU SWINC: Event Counter 10 Software Increment Mask */ + +#define PMU_SWINC_CNT11_Pos 11U /*!< PMU SWINC: Event Counter 11 Software Increment Position */ +#define PMU_SWINC_CNT11_Msk (1UL << PMU_SWINC_CNT11_Pos) /*!< PMU SWINC: Event Counter 11 Software Increment Mask */ + +#define PMU_SWINC_CNT12_Pos 12U /*!< PMU SWINC: Event Counter 12 Software Increment Position */ +#define PMU_SWINC_CNT12_Msk (1UL << PMU_SWINC_CNT12_Pos) /*!< PMU SWINC: Event Counter 12 Software Increment Mask */ + +#define PMU_SWINC_CNT13_Pos 13U /*!< PMU SWINC: Event Counter 13 Software Increment Position */ +#define PMU_SWINC_CNT13_Msk (1UL << PMU_SWINC_CNT13_Pos) /*!< PMU SWINC: Event Counter 13 Software Increment Mask */ + +#define PMU_SWINC_CNT14_Pos 14U /*!< PMU SWINC: Event Counter 14 Software Increment Position */ +#define PMU_SWINC_CNT14_Msk (1UL << PMU_SWINC_CNT14_Pos) /*!< PMU SWINC: Event Counter 14 Software Increment Mask */ + +#define PMU_SWINC_CNT15_Pos 15U /*!< PMU SWINC: Event Counter 15 Software Increment Position */ +#define PMU_SWINC_CNT15_Msk (1UL << PMU_SWINC_CNT15_Pos) /*!< PMU SWINC: Event Counter 15 Software Increment Mask */ + +#define PMU_SWINC_CNT16_Pos 16U /*!< PMU SWINC: Event Counter 16 Software Increment Position */ +#define PMU_SWINC_CNT16_Msk (1UL << PMU_SWINC_CNT16_Pos) /*!< PMU SWINC: Event Counter 16 Software Increment Mask */ + +#define PMU_SWINC_CNT17_Pos 17U /*!< PMU SWINC: Event Counter 17 Software Increment Position */ +#define PMU_SWINC_CNT17_Msk (1UL << PMU_SWINC_CNT17_Pos) /*!< PMU SWINC: Event Counter 17 Software Increment Mask */ + +#define PMU_SWINC_CNT18_Pos 18U /*!< PMU SWINC: Event Counter 18 Software Increment Position */ +#define PMU_SWINC_CNT18_Msk (1UL << PMU_SWINC_CNT18_Pos) /*!< PMU SWINC: Event Counter 18 Software Increment Mask */ + +#define PMU_SWINC_CNT19_Pos 19U /*!< PMU SWINC: Event Counter 19 Software Increment Position */ +#define PMU_SWINC_CNT19_Msk (1UL << PMU_SWINC_CNT19_Pos) /*!< PMU SWINC: Event Counter 19 Software Increment Mask */ + +#define PMU_SWINC_CNT20_Pos 20U /*!< PMU SWINC: Event Counter 20 Software Increment Position */ +#define PMU_SWINC_CNT20_Msk (1UL << PMU_SWINC_CNT20_Pos) /*!< PMU SWINC: Event Counter 20 Software Increment Mask */ + +#define PMU_SWINC_CNT21_Pos 21U /*!< PMU SWINC: Event Counter 21 Software Increment Position */ +#define PMU_SWINC_CNT21_Msk (1UL << PMU_SWINC_CNT21_Pos) /*!< PMU SWINC: Event Counter 21 Software Increment Mask */ + +#define PMU_SWINC_CNT22_Pos 22U /*!< PMU SWINC: Event Counter 22 Software Increment Position */ +#define PMU_SWINC_CNT22_Msk (1UL << PMU_SWINC_CNT22_Pos) /*!< PMU SWINC: Event Counter 22 Software Increment Mask */ + +#define PMU_SWINC_CNT23_Pos 23U /*!< PMU SWINC: Event Counter 23 Software Increment Position */ +#define PMU_SWINC_CNT23_Msk (1UL << PMU_SWINC_CNT23_Pos) /*!< PMU SWINC: Event Counter 23 Software Increment Mask */ + +#define PMU_SWINC_CNT24_Pos 24U /*!< PMU SWINC: Event Counter 24 Software Increment Position */ +#define PMU_SWINC_CNT24_Msk (1UL << PMU_SWINC_CNT24_Pos) /*!< PMU SWINC: Event Counter 24 Software Increment Mask */ + +#define PMU_SWINC_CNT25_Pos 25U /*!< PMU SWINC: Event Counter 25 Software Increment Position */ +#define PMU_SWINC_CNT25_Msk (1UL << PMU_SWINC_CNT25_Pos) /*!< PMU SWINC: Event Counter 25 Software Increment Mask */ + +#define PMU_SWINC_CNT26_Pos 26U /*!< PMU SWINC: Event Counter 26 Software Increment Position */ +#define PMU_SWINC_CNT26_Msk (1UL << PMU_SWINC_CNT26_Pos) /*!< PMU SWINC: Event Counter 26 Software Increment Mask */ + +#define PMU_SWINC_CNT27_Pos 27U /*!< PMU SWINC: Event Counter 27 Software Increment Position */ +#define PMU_SWINC_CNT27_Msk (1UL << PMU_SWINC_CNT27_Pos) /*!< PMU SWINC: Event Counter 27 Software Increment Mask */ + +#define PMU_SWINC_CNT28_Pos 28U /*!< PMU SWINC: Event Counter 28 Software Increment Position */ +#define PMU_SWINC_CNT28_Msk (1UL << PMU_SWINC_CNT28_Pos) /*!< PMU SWINC: Event Counter 28 Software Increment Mask */ + +#define PMU_SWINC_CNT29_Pos 29U /*!< PMU SWINC: Event Counter 29 Software Increment Position */ +#define PMU_SWINC_CNT29_Msk (1UL << PMU_SWINC_CNT29_Pos) /*!< PMU SWINC: Event Counter 29 Software Increment Mask */ + +#define PMU_SWINC_CNT30_Pos 30U /*!< PMU SWINC: Event Counter 30 Software Increment Position */ +#define PMU_SWINC_CNT30_Msk (1UL << PMU_SWINC_CNT30_Pos) /*!< PMU SWINC: Event Counter 30 Software Increment Mask */ + +/** \brief PMU Control Register Definitions */ + +#define PMU_CTRL_ENABLE_Pos 0U /*!< PMU CTRL: ENABLE Position */ +#define PMU_CTRL_ENABLE_Msk (1UL /*<< PMU_CTRL_ENABLE_Pos*/) /*!< PMU CTRL: ENABLE Mask */ + +#define PMU_CTRL_EVENTCNT_RESET_Pos 1U /*!< PMU CTRL: Event Counter Reset Position */ +#define PMU_CTRL_EVENTCNT_RESET_Msk (1UL << PMU_CTRL_EVENTCNT_RESET_Pos) /*!< PMU CTRL: Event Counter Reset Mask */ + +#define PMU_CTRL_CYCCNT_RESET_Pos 2U /*!< PMU CTRL: Cycle Counter Reset Position */ +#define PMU_CTRL_CYCCNT_RESET_Msk (1UL << PMU_CTRL_CYCCNT_RESET_Pos) /*!< PMU CTRL: Cycle Counter Reset Mask */ + +#define PMU_CTRL_CYCCNT_DISABLE_Pos 5U /*!< PMU CTRL: Disable Cycle Counter Position */ +#define PMU_CTRL_CYCCNT_DISABLE_Msk (1UL << PMU_CTRL_CYCCNT_DISABLE_Pos) /*!< PMU CTRL: Disable Cycle Counter Mask */ + +#define PMU_CTRL_FRZ_ON_OV_Pos 9U /*!< PMU CTRL: Freeze-on-overflow Position */ +#define PMU_CTRL_FRZ_ON_OV_Msk (1UL << PMU_CTRL_FRZ_ON_OVERFLOW_Pos) /*!< PMU CTRL: Freeze-on-overflow Mask */ + +#define PMU_CTRL_TRACE_ON_OV_Pos 11U /*!< PMU CTRL: Trace-on-overflow Position */ +#define PMU_CTRL_TRACE_ON_OV_Msk (1UL << PMU_CTRL_TRACE_ON_OVERFLOW_Pos) /*!< PMU CTRL: Trace-on-overflow Mask */ + +/** \brief PMU Type Register Definitions */ + +#define PMU_TYPE_NUM_CNTS_Pos 0U /*!< PMU TYPE: Number of Counters Position */ +#define PMU_TYPE_NUM_CNTS_Msk (0xFFUL /*<< PMU_TYPE_NUM_CNTS_Pos*/) /*!< PMU TYPE: Number of Counters Mask */ + +#define PMU_TYPE_SIZE_CNTS_Pos 8U /*!< PMU TYPE: Size of Counters Position */ +#define PMU_TYPE_SIZE_CNTS_Msk (0x3FUL << PMU_TYPE_SIZE_CNTS_Pos) /*!< PMU TYPE: Size of Counters Mask */ + +#define PMU_TYPE_CYCCNT_PRESENT_Pos 14U /*!< PMU TYPE: Cycle Counter Present Position */ +#define PMU_TYPE_CYCCNT_PRESENT_Msk (1UL << PMU_TYPE_CYCCNT_PRESENT_Pos) /*!< PMU TYPE: Cycle Counter Present Mask */ + +#define PMU_TYPE_FRZ_OV_SUPPORT_Pos 21U /*!< PMU TYPE: Freeze-on-overflow Support Position */ +#define PMU_TYPE_FRZ_OV_SUPPORT_Msk (1UL << PMU_TYPE_FRZ_OV_SUPPORT_Pos) /*!< PMU TYPE: Freeze-on-overflow Support Mask */ + +#define PMU_TYPE_TRACE_ON_OV_SUPPORT_Pos 23U /*!< PMU TYPE: Trace-on-overflow Support Position */ +#define PMU_TYPE_TRACE_ON_OV_SUPPORT_Msk (1UL << PMU_TYPE_FRZ_OV_SUPPORT_Pos) /*!< PMU TYPE: Trace-on-overflow Support Mask */ + +/** \brief PMU Authentication Status Register Definitions */ + +#define PMU_AUTHSTATUS_NSID_Pos 0U /*!< PMU AUTHSTATUS: Non-secure Invasive Debug Position */ +#define PMU_AUTHSTATUS_NSID_Msk (0x3UL /*<< PMU_AUTHSTATUS_NSID_Pos*/) /*!< PMU AUTHSTATUS: Non-secure Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_NSNID_Pos 2U /*!< PMU AUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_NSNID_Msk (0x3UL << PMU_AUTHSTATUS_NSNID_Pos) /*!< PMU AUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SID_Pos 4U /*!< PMU AUTHSTATUS: Secure Invasive Debug Position */ +#define PMU_AUTHSTATUS_SID_Msk (0x3UL << PMU_AUTHSTATUS_SID_Pos) /*!< PMU AUTHSTATUS: Secure Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SNID_Pos 6U /*!< PMU AUTHSTATUS: Secure Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_SNID_Msk (0x3UL << PMU_AUTHSTATUS_SNID_Pos) /*!< PMU AUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define PMU_AUTHSTATUS_NSUID_Pos 16U /*!< PMU AUTHSTATUS: Non-secure Unprivileged Invasive Debug Position */ +#define PMU_AUTHSTATUS_NSUID_Msk (0x3UL << PMU_AUTHSTATUS_NSUID_Pos) /*!< PMU AUTHSTATUS: Non-secure Unprivileged Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_NSUNID_Pos 18U /*!< PMU AUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_NSUNID_Msk (0x3UL << PMU_AUTHSTATUS_NSUNID_Pos) /*!< PMU AUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SUID_Pos 20U /*!< PMU AUTHSTATUS: Secure Unprivileged Invasive Debug Position */ +#define PMU_AUTHSTATUS_SUID_Msk (0x3UL << PMU_AUTHSTATUS_SUID_Pos) /*!< PMU AUTHSTATUS: Secure Unprivileged Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SUNID_Pos 22U /*!< PMU AUTHSTATUS: Secure Unprivileged Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_SUNID_Msk (0x3UL << PMU_AUTHSTATUS_SUNID_Pos) /*!< PMU AUTHSTATUS: Secure Unprivileged Non-invasive Debug Mask */ + +/*@} end of group CMSIS_PMU */ +#endif + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) MPU Region Limit Address Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Region Base Address Register Alias 1 */ + __IOM uint32_t RLAR_A1; /*!< Offset: 0x018 (R/W) MPU Region Limit Address Register Alias 1 */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Region Base Address Register Alias 2 */ + __IOM uint32_t RLAR_A2; /*!< Offset: 0x020 (R/W) MPU Region Limit Address Register Alias 2 */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Region Base Address Register Alias 3 */ + __IOM uint32_t RLAR_A3; /*!< Offset: 0x028 (R/W) MPU Region Limit Address Register Alias 3 */ + uint32_t RESERVED0[1]; + union { + __IOM uint32_t MAIR[2]; + struct { + __IOM uint32_t MAIR0; /*!< Offset: 0x030 (R/W) MPU Memory Attribute Indirection Register 0 */ + __IOM uint32_t MAIR1; /*!< Offset: 0x034 (R/W) MPU Memory Attribute Indirection Register 1 */ + }; + }; +} MPU_Type; + +#define MPU_TYPE_RALIASES 4U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_BASE_Pos 5U /*!< MPU RBAR: BASE Position */ +#define MPU_RBAR_BASE_Msk (0x7FFFFFFUL << MPU_RBAR_BASE_Pos) /*!< MPU RBAR: BASE Mask */ + +#define MPU_RBAR_SH_Pos 3U /*!< MPU RBAR: SH Position */ +#define MPU_RBAR_SH_Msk (0x3UL << MPU_RBAR_SH_Pos) /*!< MPU RBAR: SH Mask */ + +#define MPU_RBAR_AP_Pos 1U /*!< MPU RBAR: AP Position */ +#define MPU_RBAR_AP_Msk (0x3UL << MPU_RBAR_AP_Pos) /*!< MPU RBAR: AP Mask */ + +#define MPU_RBAR_XN_Pos 0U /*!< MPU RBAR: XN Position */ +#define MPU_RBAR_XN_Msk (01UL /*<< MPU_RBAR_XN_Pos*/) /*!< MPU RBAR: XN Mask */ + +/* MPU Region Limit Address Register Definitions */ +#define MPU_RLAR_LIMIT_Pos 5U /*!< MPU RLAR: LIMIT Position */ +#define MPU_RLAR_LIMIT_Msk (0x7FFFFFFUL << MPU_RLAR_LIMIT_Pos) /*!< MPU RLAR: LIMIT Mask */ + +#define MPU_RLAR_PXN_Pos 4U /*!< MPU RLAR: PXN Position */ +#define MPU_RLAR_PXN_Msk (1UL << MPU_RLAR_PXN_Pos) /*!< MPU RLAR: PXN Mask */ + +#define MPU_RLAR_AttrIndx_Pos 1U /*!< MPU RLAR: AttrIndx Position */ +#define MPU_RLAR_AttrIndx_Msk (7UL << MPU_RLAR_AttrIndx_Pos) /*!< MPU RLAR: AttrIndx Mask */ + +#define MPU_RLAR_EN_Pos 0U /*!< MPU RLAR: Region enable bit Position */ +#define MPU_RLAR_EN_Msk (1UL /*<< MPU_RLAR_EN_Pos*/) /*!< MPU RLAR: Region enable bit Disable Mask */ + +/* MPU Memory Attribute Indirection Register 0 Definitions */ +#define MPU_MAIR0_Attr3_Pos 24U /*!< MPU MAIR0: Attr3 Position */ +#define MPU_MAIR0_Attr3_Msk (0xFFUL << MPU_MAIR0_Attr3_Pos) /*!< MPU MAIR0: Attr3 Mask */ + +#define MPU_MAIR0_Attr2_Pos 16U /*!< MPU MAIR0: Attr2 Position */ +#define MPU_MAIR0_Attr2_Msk (0xFFUL << MPU_MAIR0_Attr2_Pos) /*!< MPU MAIR0: Attr2 Mask */ + +#define MPU_MAIR0_Attr1_Pos 8U /*!< MPU MAIR0: Attr1 Position */ +#define MPU_MAIR0_Attr1_Msk (0xFFUL << MPU_MAIR0_Attr1_Pos) /*!< MPU MAIR0: Attr1 Mask */ + +#define MPU_MAIR0_Attr0_Pos 0U /*!< MPU MAIR0: Attr0 Position */ +#define MPU_MAIR0_Attr0_Msk (0xFFUL /*<< MPU_MAIR0_Attr0_Pos*/) /*!< MPU MAIR0: Attr0 Mask */ + +/* MPU Memory Attribute Indirection Register 1 Definitions */ +#define MPU_MAIR1_Attr7_Pos 24U /*!< MPU MAIR1: Attr7 Position */ +#define MPU_MAIR1_Attr7_Msk (0xFFUL << MPU_MAIR1_Attr7_Pos) /*!< MPU MAIR1: Attr7 Mask */ + +#define MPU_MAIR1_Attr6_Pos 16U /*!< MPU MAIR1: Attr6 Position */ +#define MPU_MAIR1_Attr6_Msk (0xFFUL << MPU_MAIR1_Attr6_Pos) /*!< MPU MAIR1: Attr6 Mask */ + +#define MPU_MAIR1_Attr5_Pos 8U /*!< MPU MAIR1: Attr5 Position */ +#define MPU_MAIR1_Attr5_Msk (0xFFUL << MPU_MAIR1_Attr5_Pos) /*!< MPU MAIR1: Attr5 Mask */ + +#define MPU_MAIR1_Attr4_Pos 0U /*!< MPU MAIR1: Attr4 Position */ +#define MPU_MAIR1_Attr4_Msk (0xFFUL /*<< MPU_MAIR1_Attr4_Pos*/) /*!< MPU MAIR1: Attr4 Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SAU Security Attribution Unit (SAU) + \brief Type definitions for the Security Attribution Unit (SAU) + @{ + */ + +/** + \brief Structure type to access the Security Attribution Unit (SAU). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SAU Control Register */ + __IM uint32_t TYPE; /*!< Offset: 0x004 (R/ ) SAU Type Register */ +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) SAU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) SAU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) SAU Region Limit Address Register */ +#else + uint32_t RESERVED0[3]; +#endif + __IOM uint32_t SFSR; /*!< Offset: 0x014 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x018 (R/W) Secure Fault Address Register */ +} SAU_Type; + +/* SAU Control Register Definitions */ +#define SAU_CTRL_ALLNS_Pos 1U /*!< SAU CTRL: ALLNS Position */ +#define SAU_CTRL_ALLNS_Msk (1UL << SAU_CTRL_ALLNS_Pos) /*!< SAU CTRL: ALLNS Mask */ + +#define SAU_CTRL_ENABLE_Pos 0U /*!< SAU CTRL: ENABLE Position */ +#define SAU_CTRL_ENABLE_Msk (1UL /*<< SAU_CTRL_ENABLE_Pos*/) /*!< SAU CTRL: ENABLE Mask */ + +/* SAU Type Register Definitions */ +#define SAU_TYPE_SREGION_Pos 0U /*!< SAU TYPE: SREGION Position */ +#define SAU_TYPE_SREGION_Msk (0xFFUL /*<< SAU_TYPE_SREGION_Pos*/) /*!< SAU TYPE: SREGION Mask */ + +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) +/* SAU Region Number Register Definitions */ +#define SAU_RNR_REGION_Pos 0U /*!< SAU RNR: REGION Position */ +#define SAU_RNR_REGION_Msk (0xFFUL /*<< SAU_RNR_REGION_Pos*/) /*!< SAU RNR: REGION Mask */ + +/* SAU Region Base Address Register Definitions */ +#define SAU_RBAR_BADDR_Pos 5U /*!< SAU RBAR: BADDR Position */ +#define SAU_RBAR_BADDR_Msk (0x7FFFFFFUL << SAU_RBAR_BADDR_Pos) /*!< SAU RBAR: BADDR Mask */ + +/* SAU Region Limit Address Register Definitions */ +#define SAU_RLAR_LADDR_Pos 5U /*!< SAU RLAR: LADDR Position */ +#define SAU_RLAR_LADDR_Msk (0x7FFFFFFUL << SAU_RLAR_LADDR_Pos) /*!< SAU RLAR: LADDR Mask */ + +#define SAU_RLAR_NSC_Pos 1U /*!< SAU RLAR: NSC Position */ +#define SAU_RLAR_NSC_Msk (1UL << SAU_RLAR_NSC_Pos) /*!< SAU RLAR: NSC Mask */ + +#define SAU_RLAR_ENABLE_Pos 0U /*!< SAU RLAR: ENABLE Position */ +#define SAU_RLAR_ENABLE_Msk (1UL /*<< SAU_RLAR_ENABLE_Pos*/) /*!< SAU RLAR: ENABLE Mask */ + +#endif /* defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) */ + +/* Secure Fault Status Register Definitions */ +#define SAU_SFSR_LSERR_Pos 7U /*!< SAU SFSR: LSERR Position */ +#define SAU_SFSR_LSERR_Msk (1UL << SAU_SFSR_LSERR_Pos) /*!< SAU SFSR: LSERR Mask */ + +#define SAU_SFSR_SFARVALID_Pos 6U /*!< SAU SFSR: SFARVALID Position */ +#define SAU_SFSR_SFARVALID_Msk (1UL << SAU_SFSR_SFARVALID_Pos) /*!< SAU SFSR: SFARVALID Mask */ + +#define SAU_SFSR_LSPERR_Pos 5U /*!< SAU SFSR: LSPERR Position */ +#define SAU_SFSR_LSPERR_Msk (1UL << SAU_SFSR_LSPERR_Pos) /*!< SAU SFSR: LSPERR Mask */ + +#define SAU_SFSR_INVTRAN_Pos 4U /*!< SAU SFSR: INVTRAN Position */ +#define SAU_SFSR_INVTRAN_Msk (1UL << SAU_SFSR_INVTRAN_Pos) /*!< SAU SFSR: INVTRAN Mask */ + +#define SAU_SFSR_AUVIOL_Pos 3U /*!< SAU SFSR: AUVIOL Position */ +#define SAU_SFSR_AUVIOL_Msk (1UL << SAU_SFSR_AUVIOL_Pos) /*!< SAU SFSR: AUVIOL Mask */ + +#define SAU_SFSR_INVER_Pos 2U /*!< SAU SFSR: INVER Position */ +#define SAU_SFSR_INVER_Msk (1UL << SAU_SFSR_INVER_Pos) /*!< SAU SFSR: INVER Mask */ + +#define SAU_SFSR_INVIS_Pos 1U /*!< SAU SFSR: INVIS Position */ +#define SAU_SFSR_INVIS_Msk (1UL << SAU_SFSR_INVIS_Pos) /*!< SAU SFSR: INVIS Mask */ + +#define SAU_SFSR_INVEP_Pos 0U /*!< SAU SFSR: INVEP Position */ +#define SAU_SFSR_INVEP_Msk (1UL /*<< SAU_SFSR_INVEP_Pos*/) /*!< SAU SFSR: INVEP Mask */ + +/*@} end of group CMSIS_SAU */ +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_FPU Floating Point Unit (FPU) + \brief Type definitions for the Floating Point Unit (FPU) + @{ + */ + +/** + \brief Structure type to access the Floating Point Unit (FPU). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IOM uint32_t FPCCR; /*!< Offset: 0x004 (R/W) Floating-Point Context Control Register */ + __IOM uint32_t FPCAR; /*!< Offset: 0x008 (R/W) Floating-Point Context Address Register */ + __IOM uint32_t FPDSCR; /*!< Offset: 0x00C (R/W) Floating-Point Default Status Control Register */ + __IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x018 (R/ ) Media and VFP Feature Register 2 */ +} FPU_Type; + +/* Floating-Point Context Control Register Definitions */ +#define FPU_FPCCR_ASPEN_Pos 31U /*!< FPCCR: ASPEN bit Position */ +#define FPU_FPCCR_ASPEN_Msk (1UL << FPU_FPCCR_ASPEN_Pos) /*!< FPCCR: ASPEN bit Mask */ + +#define FPU_FPCCR_LSPEN_Pos 30U /*!< FPCCR: LSPEN Position */ +#define FPU_FPCCR_LSPEN_Msk (1UL << FPU_FPCCR_LSPEN_Pos) /*!< FPCCR: LSPEN bit Mask */ + +#define FPU_FPCCR_LSPENS_Pos 29U /*!< FPCCR: LSPENS Position */ +#define FPU_FPCCR_LSPENS_Msk (1UL << FPU_FPCCR_LSPENS_Pos) /*!< FPCCR: LSPENS bit Mask */ + +#define FPU_FPCCR_CLRONRET_Pos 28U /*!< FPCCR: CLRONRET Position */ +#define FPU_FPCCR_CLRONRET_Msk (1UL << FPU_FPCCR_CLRONRET_Pos) /*!< FPCCR: CLRONRET bit Mask */ + +#define FPU_FPCCR_CLRONRETS_Pos 27U /*!< FPCCR: CLRONRETS Position */ +#define FPU_FPCCR_CLRONRETS_Msk (1UL << FPU_FPCCR_CLRONRETS_Pos) /*!< FPCCR: CLRONRETS bit Mask */ + +#define FPU_FPCCR_TS_Pos 26U /*!< FPCCR: TS Position */ +#define FPU_FPCCR_TS_Msk (1UL << FPU_FPCCR_TS_Pos) /*!< FPCCR: TS bit Mask */ + +#define FPU_FPCCR_UFRDY_Pos 10U /*!< FPCCR: UFRDY Position */ +#define FPU_FPCCR_UFRDY_Msk (1UL << FPU_FPCCR_UFRDY_Pos) /*!< FPCCR: UFRDY bit Mask */ + +#define FPU_FPCCR_SPLIMVIOL_Pos 9U /*!< FPCCR: SPLIMVIOL Position */ +#define FPU_FPCCR_SPLIMVIOL_Msk (1UL << FPU_FPCCR_SPLIMVIOL_Pos) /*!< FPCCR: SPLIMVIOL bit Mask */ + +#define FPU_FPCCR_MONRDY_Pos 8U /*!< FPCCR: MONRDY Position */ +#define FPU_FPCCR_MONRDY_Msk (1UL << FPU_FPCCR_MONRDY_Pos) /*!< FPCCR: MONRDY bit Mask */ + +#define FPU_FPCCR_SFRDY_Pos 7U /*!< FPCCR: SFRDY Position */ +#define FPU_FPCCR_SFRDY_Msk (1UL << FPU_FPCCR_SFRDY_Pos) /*!< FPCCR: SFRDY bit Mask */ + +#define FPU_FPCCR_BFRDY_Pos 6U /*!< FPCCR: BFRDY Position */ +#define FPU_FPCCR_BFRDY_Msk (1UL << FPU_FPCCR_BFRDY_Pos) /*!< FPCCR: BFRDY bit Mask */ + +#define FPU_FPCCR_MMRDY_Pos 5U /*!< FPCCR: MMRDY Position */ +#define FPU_FPCCR_MMRDY_Msk (1UL << FPU_FPCCR_MMRDY_Pos) /*!< FPCCR: MMRDY bit Mask */ + +#define FPU_FPCCR_HFRDY_Pos 4U /*!< FPCCR: HFRDY Position */ +#define FPU_FPCCR_HFRDY_Msk (1UL << FPU_FPCCR_HFRDY_Pos) /*!< FPCCR: HFRDY bit Mask */ + +#define FPU_FPCCR_THREAD_Pos 3U /*!< FPCCR: processor mode bit Position */ +#define FPU_FPCCR_THREAD_Msk (1UL << FPU_FPCCR_THREAD_Pos) /*!< FPCCR: processor mode active bit Mask */ + +#define FPU_FPCCR_S_Pos 2U /*!< FPCCR: Security status of the FP context bit Position */ +#define FPU_FPCCR_S_Msk (1UL << FPU_FPCCR_S_Pos) /*!< FPCCR: Security status of the FP context bit Mask */ + +#define FPU_FPCCR_USER_Pos 1U /*!< FPCCR: privilege level bit Position */ +#define FPU_FPCCR_USER_Msk (1UL << FPU_FPCCR_USER_Pos) /*!< FPCCR: privilege level bit Mask */ + +#define FPU_FPCCR_LSPACT_Pos 0U /*!< FPCCR: Lazy state preservation active bit Position */ +#define FPU_FPCCR_LSPACT_Msk (1UL /*<< FPU_FPCCR_LSPACT_Pos*/) /*!< FPCCR: Lazy state preservation active bit Mask */ + +/* Floating-Point Context Address Register Definitions */ +#define FPU_FPCAR_ADDRESS_Pos 3U /*!< FPCAR: ADDRESS bit Position */ +#define FPU_FPCAR_ADDRESS_Msk (0x1FFFFFFFUL << FPU_FPCAR_ADDRESS_Pos) /*!< FPCAR: ADDRESS bit Mask */ + +/* Floating-Point Default Status Control Register Definitions */ +#define FPU_FPDSCR_AHP_Pos 26U /*!< FPDSCR: AHP bit Position */ +#define FPU_FPDSCR_AHP_Msk (1UL << FPU_FPDSCR_AHP_Pos) /*!< FPDSCR: AHP bit Mask */ + +#define FPU_FPDSCR_DN_Pos 25U /*!< FPDSCR: DN bit Position */ +#define FPU_FPDSCR_DN_Msk (1UL << FPU_FPDSCR_DN_Pos) /*!< FPDSCR: DN bit Mask */ + +#define FPU_FPDSCR_FZ_Pos 24U /*!< FPDSCR: FZ bit Position */ +#define FPU_FPDSCR_FZ_Msk (1UL << FPU_FPDSCR_FZ_Pos) /*!< FPDSCR: FZ bit Mask */ + +#define FPU_FPDSCR_RMode_Pos 22U /*!< FPDSCR: RMode bit Position */ +#define FPU_FPDSCR_RMode_Msk (3UL << FPU_FPDSCR_RMode_Pos) /*!< FPDSCR: RMode bit Mask */ + +#define FPU_FPDSCR_FZ16_Pos 19U /*!< FPDSCR: FZ16 bit Position */ +#define FPU_FPDSCR_FZ16_Msk (1UL << FPU_FPDSCR_FZ16_Pos) /*!< FPDSCR: FZ16 bit Mask */ + +#define FPU_FPDSCR_LTPSIZE_Pos 16U /*!< FPDSCR: LTPSIZE bit Position */ +#define FPU_FPDSCR_LTPSIZE_Msk (7UL << FPU_FPDSCR_LTPSIZE_Pos) /*!< FPDSCR: LTPSIZE bit Mask */ + +/* Media and VFP Feature Register 0 Definitions */ +#define FPU_MVFR0_FPRound_Pos 28U /*!< MVFR0: FPRound bits Position */ +#define FPU_MVFR0_FPRound_Msk (0xFUL << FPU_MVFR0_FPRound_Pos) /*!< MVFR0: FPRound bits Mask */ + +#define FPU_MVFR0_FPSqrt_Pos 20U /*!< MVFR0: FPSqrt bits Position */ +#define FPU_MVFR0_FPSqrt_Msk (0xFUL << FPU_MVFR0_FPSqrt_Pos) /*!< MVFR0: FPSqrt bits Mask */ + +#define FPU_MVFR0_FPDivide_Pos 16U /*!< MVFR0: FPDivide bits Position */ +#define FPU_MVFR0_FPDivide_Msk (0xFUL << FPU_MVFR0_FPDivide_Pos) /*!< MVFR0: Divide bits Mask */ + +#define FPU_MVFR0_FPDP_Pos 8U /*!< MVFR0: FPDP bits Position */ +#define FPU_MVFR0_FPDP_Msk (0xFUL << FPU_MVFR0_FPDP_Pos) /*!< MVFR0: FPDP bits Mask */ + +#define FPU_MVFR0_FPSP_Pos 4U /*!< MVFR0: FPSP bits Position */ +#define FPU_MVFR0_FPSP_Msk (0xFUL << FPU_MVFR0_FPSP_Pos) /*!< MVFR0: FPSP bits Mask */ + +#define FPU_MVFR0_SIMDReg_Pos 0U /*!< MVFR0: SIMDReg bits Position */ +#define FPU_MVFR0_SIMDReg_Msk (0xFUL /*<< FPU_MVFR0_SIMDReg_Pos*/) /*!< MVFR0: SIMDReg bits Mask */ + +/* Media and VFP Feature Register 1 Definitions */ +#define FPU_MVFR1_FMAC_Pos 28U /*!< MVFR1: FMAC bits Position */ +#define FPU_MVFR1_FMAC_Msk (0xFUL << FPU_MVFR1_FMAC_Pos) /*!< MVFR1: FMAC bits Mask */ + +#define FPU_MVFR1_FPHP_Pos 24U /*!< MVFR1: FPHP bits Position */ +#define FPU_MVFR1_FPHP_Msk (0xFUL << FPU_MVFR1_FPHP_Pos) /*!< MVFR1: FPHP bits Mask */ + +#define FPU_MVFR1_FP16_Pos 20U /*!< MVFR1: FP16 bits Position */ +#define FPU_MVFR1_FP16_Msk (0xFUL << FPU_MVFR1_FP16_Pos) /*!< MVFR1: FP16 bits Mask */ + +#define FPU_MVFR1_MVE_Pos 8U /*!< MVFR1: MVE bits Position */ +#define FPU_MVFR1_MVE_Msk (0xFUL << FPU_MVFR1_MVE_Pos) /*!< MVFR1: MVE bits Mask */ + +#define FPU_MVFR1_FPDNaN_Pos 4U /*!< MVFR1: FPDNaN bits Position */ +#define FPU_MVFR1_FPDNaN_Msk (0xFUL << FPU_MVFR1_FPDNaN_Pos) /*!< MVFR1: FPDNaN bits Mask */ + +#define FPU_MVFR1_FPFtZ_Pos 0U /*!< MVFR1: FPFtZ bits Position */ +#define FPU_MVFR1_FPFtZ_Msk (0xFUL /*<< FPU_MVFR1_FPFtZ_Pos*/) /*!< MVFR1: FPFtZ bits Mask */ + +/* Media and VFP Feature Register 2 Definitions */ +#define FPU_MVFR2_FPMisc_Pos 4U /*!< MVFR2: FPMisc bits Position */ +#define FPU_MVFR2_FPMisc_Msk (0xFUL << FPU_MVFR2_FPMisc_Pos) /*!< MVFR2: FPMisc bits Mask */ + +/*@} end of group CMSIS_FPU */ + +/* CoreDebug is deprecated. replaced by DCB (Debug Control Block) */ +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Type definitions for the Core Debug Registers + @{ + */ + +/** + \brief \deprecated Structure type to access the Core Debug Register (CoreDebug). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + __OM uint32_t DSCEMCR; /*!< Offset: 0x010 ( /W) Debug Set Clear Exception and Monitor Control Register */ + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< \deprecated CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< \deprecated CoreDebug DHCSR: DBGKEY Mask */ + +#define CoreDebug_DHCSR_S_RESTART_ST_Pos 26U /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Position */ +#define CoreDebug_DHCSR_S_RESTART_ST_Msk (1UL << CoreDebug_DHCSR_S_RESTART_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Mask */ + +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Mask */ + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Mask */ + +#define CoreDebug_DHCSR_S_FPD_Pos 23U /*!< \deprecated CoreDebug DHCSR: S_FPD Position */ +#define CoreDebug_DHCSR_S_FPD_Msk (1UL << CoreDebug_DHCSR_S_FPD_Pos) /*!< \deprecated CoreDebug DHCSR: S_FPD Mask */ + +#define CoreDebug_DHCSR_S_SUIDE_Pos 22U /*!< \deprecated CoreDebug DHCSR: S_SUIDE Position */ +#define CoreDebug_DHCSR_S_SUIDE_Msk (1UL << CoreDebug_DHCSR_S_SUIDE_Pos) /*!< \deprecated CoreDebug DHCSR: S_SUIDE Mask */ + +#define CoreDebug_DHCSR_S_NSUIDE_Pos 21U /*!< \deprecated CoreDebug DHCSR: S_NSUIDE Position */ +#define CoreDebug_DHCSR_S_NSUIDE_Msk (1UL << CoreDebug_DHCSR_S_NSUIDE_Pos) /*!< \deprecated CoreDebug DHCSR: S_NSUIDE Mask */ + +#define CoreDebug_DHCSR_S_SDE_Pos 20U /*!< \deprecated CoreDebug DHCSR: S_SDE Position */ +#define CoreDebug_DHCSR_S_SDE_Msk (1UL << CoreDebug_DHCSR_S_SDE_Pos) /*!< \deprecated CoreDebug DHCSR: S_SDE Mask */ + +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Mask */ + +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< \deprecated CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< \deprecated CoreDebug DHCSR: S_SLEEP Mask */ + +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< \deprecated CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: S_HALT Mask */ + +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< \deprecated CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< \deprecated CoreDebug DHCSR: S_REGRDY Mask */ + +#define CoreDebug_DHCSR_C_PMOV_Pos 6U /*!< \deprecated CoreDebug DHCSR: C_PMOV Position */ +#define CoreDebug_DHCSR_C_PMOV_Msk (1UL << CoreDebug_DHCSR_C_PMOV_Pos) /*!< \deprecated CoreDebug DHCSR: C_PMOV Mask */ + +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos 5U /*!< \deprecated CoreDebug DHCSR: C_SNAPSTALL Position */ +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk (1UL << CoreDebug_DHCSR_C_SNAPSTALL_Pos) /*!< \deprecated CoreDebug DHCSR: C_SNAPSTALL Mask */ + +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Mask */ + +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< \deprecated CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< \deprecated CoreDebug DHCSR: C_STEP Mask */ + +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< \deprecated CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: C_HALT Mask */ + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Mask */ + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< \deprecated CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< \deprecated CoreDebug DCRSR: REGWnR Mask */ + +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< \deprecated CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< \deprecated CoreDebug DCRSR: REGSEL Mask */ + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_TRCENA_Pos 24U /*!< \deprecated CoreDebug DEMCR: TRCENA Position */ +#define CoreDebug_DEMCR_TRCENA_Msk (1UL << CoreDebug_DEMCR_TRCENA_Pos) /*!< \deprecated CoreDebug DEMCR: TRCENA Mask */ + +#define CoreDebug_DEMCR_MON_REQ_Pos 19U /*!< \deprecated CoreDebug DEMCR: MON_REQ Position */ +#define CoreDebug_DEMCR_MON_REQ_Msk (1UL << CoreDebug_DEMCR_MON_REQ_Pos) /*!< \deprecated CoreDebug DEMCR: MON_REQ Mask */ + +#define CoreDebug_DEMCR_MON_STEP_Pos 18U /*!< \deprecated CoreDebug DEMCR: MON_STEP Position */ +#define CoreDebug_DEMCR_MON_STEP_Msk (1UL << CoreDebug_DEMCR_MON_STEP_Pos) /*!< \deprecated CoreDebug DEMCR: MON_STEP Mask */ + +#define CoreDebug_DEMCR_MON_PEND_Pos 17U /*!< \deprecated CoreDebug DEMCR: MON_PEND Position */ +#define CoreDebug_DEMCR_MON_PEND_Msk (1UL << CoreDebug_DEMCR_MON_PEND_Pos) /*!< \deprecated CoreDebug DEMCR: MON_PEND Mask */ + +#define CoreDebug_DEMCR_MON_EN_Pos 16U /*!< \deprecated CoreDebug DEMCR: MON_EN Position */ +#define CoreDebug_DEMCR_MON_EN_Msk (1UL << CoreDebug_DEMCR_MON_EN_Pos) /*!< \deprecated CoreDebug DEMCR: MON_EN Mask */ + +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Mask */ + +#define CoreDebug_DEMCR_VC_INTERR_Pos 9U /*!< \deprecated CoreDebug DEMCR: VC_INTERR Position */ +#define CoreDebug_DEMCR_VC_INTERR_Msk (1UL << CoreDebug_DEMCR_VC_INTERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_INTERR Mask */ + +#define CoreDebug_DEMCR_VC_BUSERR_Pos 8U /*!< \deprecated CoreDebug DEMCR: VC_BUSERR Position */ +#define CoreDebug_DEMCR_VC_BUSERR_Msk (1UL << CoreDebug_DEMCR_VC_BUSERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_BUSERR Mask */ + +#define CoreDebug_DEMCR_VC_STATERR_Pos 7U /*!< \deprecated CoreDebug DEMCR: VC_STATERR Position */ +#define CoreDebug_DEMCR_VC_STATERR_Msk (1UL << CoreDebug_DEMCR_VC_STATERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_STATERR Mask */ + +#define CoreDebug_DEMCR_VC_CHKERR_Pos 6U /*!< \deprecated CoreDebug DEMCR: VC_CHKERR Position */ +#define CoreDebug_DEMCR_VC_CHKERR_Msk (1UL << CoreDebug_DEMCR_VC_CHKERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_CHKERR Mask */ + +#define CoreDebug_DEMCR_VC_NOCPERR_Pos 5U /*!< \deprecated CoreDebug DEMCR: VC_NOCPERR Position */ +#define CoreDebug_DEMCR_VC_NOCPERR_Msk (1UL << CoreDebug_DEMCR_VC_NOCPERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_NOCPERR Mask */ + +#define CoreDebug_DEMCR_VC_MMERR_Pos 4U /*!< \deprecated CoreDebug DEMCR: VC_MMERR Position */ +#define CoreDebug_DEMCR_VC_MMERR_Msk (1UL << CoreDebug_DEMCR_VC_MMERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_MMERR Mask */ + +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Mask */ + +/* Debug Set Clear Exception and Monitor Control Register Definitions */ +#define CoreDebug_DSCEMCR_CLR_MON_REQ_Pos 19U /*!< \deprecated CoreDebug DSCEMCR: CLR_MON_REQ, Position */ +#define CoreDebug_DSCEMCR_CLR_MON_REQ_Msk (1UL << CoreDebug_DSCEMCR_CLR_MON_REQ_Pos) /*!< \deprecated CoreDebug DSCEMCR: CLR_MON_REQ, Mask */ + +#define CoreDebug_DSCEMCR_CLR_MON_PEND_Pos 17U /*!< \deprecated CoreDebug DSCEMCR: CLR_MON_PEND, Position */ +#define CoreDebug_DSCEMCR_CLR_MON_PEND_Msk (1UL << CoreDebug_DSCEMCR_CLR_MON_PEND_Pos) /*!< \deprecated CoreDebug DSCEMCR: CLR_MON_PEND, Mask */ + +#define CoreDebug_DSCEMCR_SET_MON_REQ_Pos 3U /*!< \deprecated CoreDebug DSCEMCR: SET_MON_REQ, Position */ +#define CoreDebug_DSCEMCR_SET_MON_REQ_Msk (1UL << CoreDebug_DSCEMCR_SET_MON_REQ_Pos) /*!< \deprecated CoreDebug DSCEMCR: SET_MON_REQ, Mask */ + +#define CoreDebug_DSCEMCR_SET_MON_PEND_Pos 1U /*!< \deprecated CoreDebug DSCEMCR: SET_MON_PEND, Position */ +#define CoreDebug_DSCEMCR_SET_MON_PEND_Msk (1UL << CoreDebug_DSCEMCR_SET_MON_PEND_Pos) /*!< \deprecated CoreDebug DSCEMCR: SET_MON_PEND, Mask */ + +/* Debug Authentication Control Register Definitions */ +#define CoreDebug_DAUTHCTRL_UIDEN_Pos 10U /*!< \deprecated CoreDebug DAUTHCTRL: UIDEN, Position */ +#define CoreDebug_DAUTHCTRL_UIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_UIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: UIDEN, Mask */ + +#define CoreDebug_DAUTHCTRL_UIDAPEN_Pos 9U /*!< \deprecated CoreDebug DAUTHCTRL: UIDAPEN, Position */ +#define CoreDebug_DAUTHCTRL_UIDAPEN_Msk (1UL << CoreDebug_DAUTHCTRL_UIDAPEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: UIDAPEN, Mask */ + +#define CoreDebug_DAUTHCTRL_FSDMA_Pos 8U /*!< \deprecated CoreDebug DAUTHCTRL: FSDMA, Position */ +#define CoreDebug_DAUTHCTRL_FSDMA_Msk (1UL << CoreDebug_DAUTHCTRL_FSDMA_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: FSDMA, Mask */ + +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Position */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Mask */ + +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Msk (1UL << CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Mask */ + +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Position */ +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Mask */ + +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Msk (1UL /*<< CoreDebug_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Mask */ + +/* Debug Security Control and Status Register Definitions */ +#define CoreDebug_DSCSR_CDS_Pos 16U /*!< \deprecated CoreDebug DSCSR: CDS Position */ +#define CoreDebug_DSCSR_CDS_Msk (1UL << CoreDebug_DSCSR_CDS_Pos) /*!< \deprecated CoreDebug DSCSR: CDS Mask */ + +#define CoreDebug_DSCSR_SBRSEL_Pos 1U /*!< \deprecated CoreDebug DSCSR: SBRSEL Position */ +#define CoreDebug_DSCSR_SBRSEL_Msk (1UL << CoreDebug_DSCSR_SBRSEL_Pos) /*!< \deprecated CoreDebug DSCSR: SBRSEL Mask */ + +#define CoreDebug_DSCSR_SBRSELEN_Pos 0U /*!< \deprecated CoreDebug DSCSR: SBRSELEN Position */ +#define CoreDebug_DSCSR_SBRSELEN_Msk (1UL /*<< CoreDebug_DSCSR_SBRSELEN_Pos*/) /*!< \deprecated CoreDebug DSCSR: SBRSELEN Mask */ + +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DCB Debug Control Block + \brief Type definitions for the Debug Control Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Control Block Registers (DCB). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + __OM uint32_t DSCEMCR; /*!< Offset: 0x010 ( /W) Debug Set Clear Exception and Monitor Control Register */ + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} DCB_Type; + +/* DHCSR, Debug Halting Control and Status Register Definitions */ +#define DCB_DHCSR_DBGKEY_Pos 16U /*!< DCB DHCSR: Debug key Position */ +#define DCB_DHCSR_DBGKEY_Msk (0xFFFFUL << DCB_DHCSR_DBGKEY_Pos) /*!< DCB DHCSR: Debug key Mask */ + +#define DCB_DHCSR_S_RESTART_ST_Pos 26U /*!< DCB DHCSR: Restart sticky status Position */ +#define DCB_DHCSR_S_RESTART_ST_Msk (0x1UL << DCB_DHCSR_S_RESTART_ST_Pos) /*!< DCB DHCSR: Restart sticky status Mask */ + +#define DCB_DHCSR_S_RESET_ST_Pos 25U /*!< DCB DHCSR: Reset sticky status Position */ +#define DCB_DHCSR_S_RESET_ST_Msk (0x1UL << DCB_DHCSR_S_RESET_ST_Pos) /*!< DCB DHCSR: Reset sticky status Mask */ + +#define DCB_DHCSR_S_RETIRE_ST_Pos 24U /*!< DCB DHCSR: Retire sticky status Position */ +#define DCB_DHCSR_S_RETIRE_ST_Msk (0x1UL << DCB_DHCSR_S_RETIRE_ST_Pos) /*!< DCB DHCSR: Retire sticky status Mask */ + +#define DCB_DHCSR_S_FPD_Pos 23U /*!< DCB DHCSR: Floating-point registers Debuggable Position */ +#define DCB_DHCSR_S_FPD_Msk (0x1UL << DCB_DHCSR_S_FPD_Pos) /*!< DCB DHCSR: Floating-point registers Debuggable Mask */ + +#define DCB_DHCSR_S_SUIDE_Pos 22U /*!< DCB DHCSR: Secure unprivileged halting debug enabled Position */ +#define DCB_DHCSR_S_SUIDE_Msk (0x1UL << DCB_DHCSR_S_SUIDE_Pos) /*!< DCB DHCSR: Secure unprivileged halting debug enabled Mask */ + +#define DCB_DHCSR_S_NSUIDE_Pos 21U /*!< DCB DHCSR: Non-secure unprivileged halting debug enabled Position */ +#define DCB_DHCSR_S_NSUIDE_Msk (0x1UL << DCB_DHCSR_S_NSUIDE_Pos) /*!< DCB DHCSR: Non-secure unprivileged halting debug enabled Mask */ + +#define DCB_DHCSR_S_SDE_Pos 20U /*!< DCB DHCSR: Secure debug enabled Position */ +#define DCB_DHCSR_S_SDE_Msk (0x1UL << DCB_DHCSR_S_SDE_Pos) /*!< DCB DHCSR: Secure debug enabled Mask */ + +#define DCB_DHCSR_S_LOCKUP_Pos 19U /*!< DCB DHCSR: Lockup status Position */ +#define DCB_DHCSR_S_LOCKUP_Msk (0x1UL << DCB_DHCSR_S_LOCKUP_Pos) /*!< DCB DHCSR: Lockup status Mask */ + +#define DCB_DHCSR_S_SLEEP_Pos 18U /*!< DCB DHCSR: Sleeping status Position */ +#define DCB_DHCSR_S_SLEEP_Msk (0x1UL << DCB_DHCSR_S_SLEEP_Pos) /*!< DCB DHCSR: Sleeping status Mask */ + +#define DCB_DHCSR_S_HALT_Pos 17U /*!< DCB DHCSR: Halted status Position */ +#define DCB_DHCSR_S_HALT_Msk (0x1UL << DCB_DHCSR_S_HALT_Pos) /*!< DCB DHCSR: Halted status Mask */ + +#define DCB_DHCSR_S_REGRDY_Pos 16U /*!< DCB DHCSR: Register ready status Position */ +#define DCB_DHCSR_S_REGRDY_Msk (0x1UL << DCB_DHCSR_S_REGRDY_Pos) /*!< DCB DHCSR: Register ready status Mask */ + +#define DCB_DHCSR_C_PMOV_Pos 6U /*!< DCB DHCSR: Halt on PMU overflow control Position */ +#define DCB_DHCSR_C_PMOV_Msk (0x1UL << DCB_DHCSR_C_PMOV_Pos) /*!< DCB DHCSR: Halt on PMU overflow control Mask */ + +#define DCB_DHCSR_C_SNAPSTALL_Pos 5U /*!< DCB DHCSR: Snap stall control Position */ +#define DCB_DHCSR_C_SNAPSTALL_Msk (0x1UL << DCB_DHCSR_C_SNAPSTALL_Pos) /*!< DCB DHCSR: Snap stall control Mask */ + +#define DCB_DHCSR_C_MASKINTS_Pos 3U /*!< DCB DHCSR: Mask interrupts control Position */ +#define DCB_DHCSR_C_MASKINTS_Msk (0x1UL << DCB_DHCSR_C_MASKINTS_Pos) /*!< DCB DHCSR: Mask interrupts control Mask */ + +#define DCB_DHCSR_C_STEP_Pos 2U /*!< DCB DHCSR: Step control Position */ +#define DCB_DHCSR_C_STEP_Msk (0x1UL << DCB_DHCSR_C_STEP_Pos) /*!< DCB DHCSR: Step control Mask */ + +#define DCB_DHCSR_C_HALT_Pos 1U /*!< DCB DHCSR: Halt control Position */ +#define DCB_DHCSR_C_HALT_Msk (0x1UL << DCB_DHCSR_C_HALT_Pos) /*!< DCB DHCSR: Halt control Mask */ + +#define DCB_DHCSR_C_DEBUGEN_Pos 0U /*!< DCB DHCSR: Debug enable control Position */ +#define DCB_DHCSR_C_DEBUGEN_Msk (0x1UL /*<< DCB_DHCSR_C_DEBUGEN_Pos*/) /*!< DCB DHCSR: Debug enable control Mask */ + +/* DCRSR, Debug Core Register Select Register Definitions */ +#define DCB_DCRSR_REGWnR_Pos 16U /*!< DCB DCRSR: Register write/not-read Position */ +#define DCB_DCRSR_REGWnR_Msk (0x1UL << DCB_DCRSR_REGWnR_Pos) /*!< DCB DCRSR: Register write/not-read Mask */ + +#define DCB_DCRSR_REGSEL_Pos 0U /*!< DCB DCRSR: Register selector Position */ +#define DCB_DCRSR_REGSEL_Msk (0x7FUL /*<< DCB_DCRSR_REGSEL_Pos*/) /*!< DCB DCRSR: Register selector Mask */ + +/* DCRDR, Debug Core Register Data Register Definitions */ +#define DCB_DCRDR_DBGTMP_Pos 0U /*!< DCB DCRDR: Data temporary buffer Position */ +#define DCB_DCRDR_DBGTMP_Msk (0xFFFFFFFFUL /*<< DCB_DCRDR_DBGTMP_Pos*/) /*!< DCB DCRDR: Data temporary buffer Mask */ + +/* DEMCR, Debug Exception and Monitor Control Register Definitions */ +#define DCB_DEMCR_TRCENA_Pos 24U /*!< DCB DEMCR: Trace enable Position */ +#define DCB_DEMCR_TRCENA_Msk (0x1UL << DCB_DEMCR_TRCENA_Pos) /*!< DCB DEMCR: Trace enable Mask */ + +#define DCB_DEMCR_MONPRKEY_Pos 23U /*!< DCB DEMCR: Monitor pend req key Position */ +#define DCB_DEMCR_MONPRKEY_Msk (0x1UL << DCB_DEMCR_MONPRKEY_Pos) /*!< DCB DEMCR: Monitor pend req key Mask */ + +#define DCB_DEMCR_UMON_EN_Pos 21U /*!< DCB DEMCR: Unprivileged monitor enable Position */ +#define DCB_DEMCR_UMON_EN_Msk (0x1UL << DCB_DEMCR_UMON_EN_Pos) /*!< DCB DEMCR: Unprivileged monitor enable Mask */ + +#define DCB_DEMCR_SDME_Pos 20U /*!< DCB DEMCR: Secure DebugMonitor enable Position */ +#define DCB_DEMCR_SDME_Msk (0x1UL << DCB_DEMCR_SDME_Pos) /*!< DCB DEMCR: Secure DebugMonitor enable Mask */ + +#define DCB_DEMCR_MON_REQ_Pos 19U /*!< DCB DEMCR: Monitor request Position */ +#define DCB_DEMCR_MON_REQ_Msk (0x1UL << DCB_DEMCR_MON_REQ_Pos) /*!< DCB DEMCR: Monitor request Mask */ + +#define DCB_DEMCR_MON_STEP_Pos 18U /*!< DCB DEMCR: Monitor step Position */ +#define DCB_DEMCR_MON_STEP_Msk (0x1UL << DCB_DEMCR_MON_STEP_Pos) /*!< DCB DEMCR: Monitor step Mask */ + +#define DCB_DEMCR_MON_PEND_Pos 17U /*!< DCB DEMCR: Monitor pend Position */ +#define DCB_DEMCR_MON_PEND_Msk (0x1UL << DCB_DEMCR_MON_PEND_Pos) /*!< DCB DEMCR: Monitor pend Mask */ + +#define DCB_DEMCR_MON_EN_Pos 16U /*!< DCB DEMCR: Monitor enable Position */ +#define DCB_DEMCR_MON_EN_Msk (0x1UL << DCB_DEMCR_MON_EN_Pos) /*!< DCB DEMCR: Monitor enable Mask */ + +#define DCB_DEMCR_VC_SFERR_Pos 11U /*!< DCB DEMCR: Vector Catch SecureFault Position */ +#define DCB_DEMCR_VC_SFERR_Msk (0x1UL << DCB_DEMCR_VC_SFERR_Pos) /*!< DCB DEMCR: Vector Catch SecureFault Mask */ + +#define DCB_DEMCR_VC_HARDERR_Pos 10U /*!< DCB DEMCR: Vector Catch HardFault errors Position */ +#define DCB_DEMCR_VC_HARDERR_Msk (0x1UL << DCB_DEMCR_VC_HARDERR_Pos) /*!< DCB DEMCR: Vector Catch HardFault errors Mask */ + +#define DCB_DEMCR_VC_INTERR_Pos 9U /*!< DCB DEMCR: Vector Catch interrupt errors Position */ +#define DCB_DEMCR_VC_INTERR_Msk (0x1UL << DCB_DEMCR_VC_INTERR_Pos) /*!< DCB DEMCR: Vector Catch interrupt errors Mask */ + +#define DCB_DEMCR_VC_BUSERR_Pos 8U /*!< DCB DEMCR: Vector Catch BusFault errors Position */ +#define DCB_DEMCR_VC_BUSERR_Msk (0x1UL << DCB_DEMCR_VC_BUSERR_Pos) /*!< DCB DEMCR: Vector Catch BusFault errors Mask */ + +#define DCB_DEMCR_VC_STATERR_Pos 7U /*!< DCB DEMCR: Vector Catch state errors Position */ +#define DCB_DEMCR_VC_STATERR_Msk (0x1UL << DCB_DEMCR_VC_STATERR_Pos) /*!< DCB DEMCR: Vector Catch state errors Mask */ + +#define DCB_DEMCR_VC_CHKERR_Pos 6U /*!< DCB DEMCR: Vector Catch check errors Position */ +#define DCB_DEMCR_VC_CHKERR_Msk (0x1UL << DCB_DEMCR_VC_CHKERR_Pos) /*!< DCB DEMCR: Vector Catch check errors Mask */ + +#define DCB_DEMCR_VC_NOCPERR_Pos 5U /*!< DCB DEMCR: Vector Catch NOCP errors Position */ +#define DCB_DEMCR_VC_NOCPERR_Msk (0x1UL << DCB_DEMCR_VC_NOCPERR_Pos) /*!< DCB DEMCR: Vector Catch NOCP errors Mask */ + +#define DCB_DEMCR_VC_MMERR_Pos 4U /*!< DCB DEMCR: Vector Catch MemManage errors Position */ +#define DCB_DEMCR_VC_MMERR_Msk (0x1UL << DCB_DEMCR_VC_MMERR_Pos) /*!< DCB DEMCR: Vector Catch MemManage errors Mask */ + +#define DCB_DEMCR_VC_CORERESET_Pos 0U /*!< DCB DEMCR: Vector Catch Core reset Position */ +#define DCB_DEMCR_VC_CORERESET_Msk (0x1UL /*<< DCB_DEMCR_VC_CORERESET_Pos*/) /*!< DCB DEMCR: Vector Catch Core reset Mask */ + +/* DSCEMCR, Debug Set Clear Exception and Monitor Control Register Definitions */ +#define DCB_DSCEMCR_CLR_MON_REQ_Pos 19U /*!< DCB DSCEMCR: Clear monitor request Position */ +#define DCB_DSCEMCR_CLR_MON_REQ_Msk (0x1UL << DCB_DSCEMCR_CLR_MON_REQ_Pos) /*!< DCB DSCEMCR: Clear monitor request Mask */ + +#define DCB_DSCEMCR_CLR_MON_PEND_Pos 17U /*!< DCB DSCEMCR: Clear monitor pend Position */ +#define DCB_DSCEMCR_CLR_MON_PEND_Msk (0x1UL << DCB_DSCEMCR_CLR_MON_PEND_Pos) /*!< DCB DSCEMCR: Clear monitor pend Mask */ + +#define DCB_DSCEMCR_SET_MON_REQ_Pos 3U /*!< DCB DSCEMCR: Set monitor request Position */ +#define DCB_DSCEMCR_SET_MON_REQ_Msk (0x1UL << DCB_DSCEMCR_SET_MON_REQ_Pos) /*!< DCB DSCEMCR: Set monitor request Mask */ + +#define DCB_DSCEMCR_SET_MON_PEND_Pos 1U /*!< DCB DSCEMCR: Set monitor pend Position */ +#define DCB_DSCEMCR_SET_MON_PEND_Msk (0x1UL << DCB_DSCEMCR_SET_MON_PEND_Pos) /*!< DCB DSCEMCR: Set monitor pend Mask */ + +/* DAUTHCTRL, Debug Authentication Control Register Definitions */ +#define DCB_DAUTHCTRL_UIDEN_Pos 10U /*!< DCB DAUTHCTRL: Unprivileged Invasive Debug Enable Position */ +#define DCB_DAUTHCTRL_UIDEN_Msk (0x1UL << DCB_DAUTHCTRL_UIDEN_Pos) /*!< DCB DAUTHCTRL: Unprivileged Invasive Debug Enable Mask */ + +#define DCB_DAUTHCTRL_UIDAPEN_Pos 9U /*!< DCB DAUTHCTRL: Unprivileged Invasive DAP Access Enable Position */ +#define DCB_DAUTHCTRL_UIDAPEN_Msk (0x1UL << DCB_DAUTHCTRL_UIDAPEN_Pos) /*!< DCB DAUTHCTRL: Unprivileged Invasive DAP Access Enable Mask */ + +#define DCB_DAUTHCTRL_FSDMA_Pos 8U /*!< DCB DAUTHCTRL: Force Secure DebugMonitor Allowed Position */ +#define DCB_DAUTHCTRL_FSDMA_Msk (0x1UL << DCB_DAUTHCTRL_FSDMA_Pos) /*!< DCB DAUTHCTRL: Force Secure DebugMonitor Allowed Mask */ + +#define DCB_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPNIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPNIDENSEL_Msk (0x1UL << DCB_DAUTHCTRL_SPNIDENSEL_Pos) /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Mask */ + +#define DCB_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< DCB DAUTHCTRL: Secure invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPIDENSEL_Msk (0x1UL /*<< DCB_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< DCB DAUTHCTRL: Secure invasive debug enable select Mask */ + +/* DSCSR, Debug Security Control and Status Register Definitions */ +#define DCB_DSCSR_CDSKEY_Pos 17U /*!< DCB DSCSR: CDS write-enable key Position */ +#define DCB_DSCSR_CDSKEY_Msk (0x1UL << DCB_DSCSR_CDSKEY_Pos) /*!< DCB DSCSR: CDS write-enable key Mask */ + +#define DCB_DSCSR_CDS_Pos 16U /*!< DCB DSCSR: Current domain Secure Position */ +#define DCB_DSCSR_CDS_Msk (0x1UL << DCB_DSCSR_CDS_Pos) /*!< DCB DSCSR: Current domain Secure Mask */ + +#define DCB_DSCSR_SBRSEL_Pos 1U /*!< DCB DSCSR: Secure banked register select Position */ +#define DCB_DSCSR_SBRSEL_Msk (0x1UL << DCB_DSCSR_SBRSEL_Pos) /*!< DCB DSCSR: Secure banked register select Mask */ + +#define DCB_DSCSR_SBRSELEN_Pos 0U /*!< DCB DSCSR: Secure banked register select enable Position */ +#define DCB_DSCSR_SBRSELEN_Msk (0x1UL /*<< DCB_DSCSR_SBRSELEN_Pos*/) /*!< DCB DSCSR: Secure banked register select enable Mask */ + +/*@} end of group CMSIS_DCB */ + + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DIB Debug Identification Block + \brief Type definitions for the Debug Identification Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Identification Block Registers (DIB). + */ +typedef struct +{ + __OM uint32_t DLAR; /*!< Offset: 0x000 ( /W) SCS Software Lock Access Register */ + __IM uint32_t DLSR; /*!< Offset: 0x004 (R/ ) SCS Software Lock Status Register */ + __IM uint32_t DAUTHSTATUS; /*!< Offset: 0x008 (R/ ) Debug Authentication Status Register */ + __IM uint32_t DDEVARCH; /*!< Offset: 0x00C (R/ ) SCS Device Architecture Register */ + __IM uint32_t DDEVTYPE; /*!< Offset: 0x010 (R/ ) SCS Device Type Register */ +} DIB_Type; + +/* DLAR, SCS Software Lock Access Register Definitions */ +#define DIB_DLAR_KEY_Pos 0U /*!< DIB DLAR: KEY Position */ +#define DIB_DLAR_KEY_Msk (0xFFFFFFFFUL /*<< DIB_DLAR_KEY_Pos */) /*!< DIB DLAR: KEY Mask */ + +/* DLSR, SCS Software Lock Status Register Definitions */ +#define DIB_DLSR_nTT_Pos 2U /*!< DIB DLSR: Not thirty-two bit Position */ +#define DIB_DLSR_nTT_Msk (0x1UL << DIB_DLSR_nTT_Pos ) /*!< DIB DLSR: Not thirty-two bit Mask */ + +#define DIB_DLSR_SLK_Pos 1U /*!< DIB DLSR: Software Lock status Position */ +#define DIB_DLSR_SLK_Msk (0x1UL << DIB_DLSR_SLK_Pos ) /*!< DIB DLSR: Software Lock status Mask */ + +#define DIB_DLSR_SLI_Pos 0U /*!< DIB DLSR: Software Lock implemented Position */ +#define DIB_DLSR_SLI_Msk (0x1UL /*<< DIB_DLSR_SLI_Pos*/) /*!< DIB DLSR: Software Lock implemented Mask */ + +/* DAUTHSTATUS, Debug Authentication Status Register Definitions */ +#define DIB_DAUTHSTATUS_SUNID_Pos 22U /*!< DIB DAUTHSTATUS: Secure Unprivileged Non-invasive Debug Allowed Position */ +#define DIB_DAUTHSTATUS_SUNID_Msk (0x3UL << DIB_DAUTHSTATUS_SUNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Unprivileged Non-invasive Debug Allowed Mask */ + +#define DIB_DAUTHSTATUS_SUID_Pos 20U /*!< DIB DAUTHSTATUS: Secure Unprivileged Invasive Debug Allowed Position */ +#define DIB_DAUTHSTATUS_SUID_Msk (0x3UL << DIB_DAUTHSTATUS_SUID_Pos ) /*!< DIB DAUTHSTATUS: Secure Unprivileged Invasive Debug Allowed Mask */ + +#define DIB_DAUTHSTATUS_NSUNID_Pos 18U /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Allo Position */ +#define DIB_DAUTHSTATUS_NSUNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSUNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Allo Mask */ + +#define DIB_DAUTHSTATUS_NSUID_Pos 16U /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Invasive Debug Allowed Position */ +#define DIB_DAUTHSTATUS_NSUID_Msk (0x3UL << DIB_DAUTHSTATUS_NSUID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Invasive Debug Allowed Mask */ + +#define DIB_DAUTHSTATUS_SNID_Pos 6U /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_SNID_Msk (0x3UL << DIB_DAUTHSTATUS_SNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_SID_Pos 4U /*!< DIB DAUTHSTATUS: Secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_SID_Msk (0x3UL << DIB_DAUTHSTATUS_SID_Pos ) /*!< DIB DAUTHSTATUS: Secure Invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSNID_Pos 2U /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSID_Pos 0U /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSID_Msk (0x3UL /*<< DIB_DAUTHSTATUS_NSID_Pos*/) /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Mask */ + +/* DDEVARCH, SCS Device Architecture Register Definitions */ +#define DIB_DDEVARCH_ARCHITECT_Pos 21U /*!< DIB DDEVARCH: Architect Position */ +#define DIB_DDEVARCH_ARCHITECT_Msk (0x7FFUL << DIB_DDEVARCH_ARCHITECT_Pos ) /*!< DIB DDEVARCH: Architect Mask */ + +#define DIB_DDEVARCH_PRESENT_Pos 20U /*!< DIB DDEVARCH: DEVARCH Present Position */ +#define DIB_DDEVARCH_PRESENT_Msk (0x1FUL << DIB_DDEVARCH_PRESENT_Pos ) /*!< DIB DDEVARCH: DEVARCH Present Mask */ + +#define DIB_DDEVARCH_REVISION_Pos 16U /*!< DIB DDEVARCH: Revision Position */ +#define DIB_DDEVARCH_REVISION_Msk (0xFUL << DIB_DDEVARCH_REVISION_Pos ) /*!< DIB DDEVARCH: Revision Mask */ + +#define DIB_DDEVARCH_ARCHVER_Pos 12U /*!< DIB DDEVARCH: Architecture Version Position */ +#define DIB_DDEVARCH_ARCHVER_Msk (0xFUL << DIB_DDEVARCH_ARCHVER_Pos ) /*!< DIB DDEVARCH: Architecture Version Mask */ + +#define DIB_DDEVARCH_ARCHPART_Pos 0U /*!< DIB DDEVARCH: Architecture Part Position */ +#define DIB_DDEVARCH_ARCHPART_Msk (0xFFFUL /*<< DIB_DDEVARCH_ARCHPART_Pos*/) /*!< DIB DDEVARCH: Architecture Part Mask */ + +/* DDEVTYPE, SCS Device Type Register Definitions */ +#define DIB_DDEVTYPE_SUB_Pos 4U /*!< DIB DDEVTYPE: Sub-type Position */ +#define DIB_DDEVTYPE_SUB_Msk (0xFUL << DIB_DDEVTYPE_SUB_Pos ) /*!< DIB DDEVTYPE: Sub-type Mask */ + +#define DIB_DDEVTYPE_MAJOR_Pos 0U /*!< DIB DDEVTYPE: Major type Position */ +#define DIB_DDEVTYPE_MAJOR_Msk (0xFUL /*<< DIB_DDEVTYPE_MAJOR_Pos*/) /*!< DIB DDEVTYPE: Major type Mask */ + + +/*@} end of group CMSIS_DIB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ + #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ + #define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ + #define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ + #define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ + #define CoreDebug_BASE (0xE000EDF0UL) /*!< \deprecated Core Debug Base Address */ + #define DCB_BASE (0xE000EDF0UL) /*!< DCB Base Address */ + #define DIB_BASE (0xE000EFB0UL) /*!< DIB Base Address */ + #define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ + #define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ + #define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + + #define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ + #define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ + #define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ + #define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + #define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ + #define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ + #define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ + #define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE ) /*!< \deprecated Core Debug configuration struct */ + #define DCB ((DCB_Type *) DCB_BASE ) /*!< DCB configuration struct */ + #define DIB ((DIB_Type *) DIB_BASE ) /*!< DIB configuration struct */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ + #endif + + #if defined (__PMU_PRESENT) && (__PMU_PRESENT == 1U) + #define PMU_BASE (0xE0003000UL) /*!< PMU Base Address */ + #define PMU ((PMU_Type *) PMU_BASE ) /*!< PMU configuration struct */ + #endif + + #if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SAU_BASE (SCS_BASE + 0x0DD0UL) /*!< Security Attribution Unit */ + #define SAU ((SAU_Type *) SAU_BASE ) /*!< Security Attribution Unit */ + #endif + + #define FPU_BASE (SCS_BASE + 0x0F30UL) /*!< Floating Point Unit */ + #define FPU ((FPU_Type *) FPU_BASE ) /*!< Floating Point Unit */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SCS_BASE_NS (0xE002E000UL) /*!< System Control Space Base Address (non-secure address space) */ + #define CoreDebug_BASE_NS (0xE002EDF0UL) /*!< \deprecated Core Debug Base Address (non-secure address space) */ + #define DCB_BASE_NS (0xE002EDF0UL) /*!< DCB Base Address (non-secure address space) */ + #define DIB_BASE_NS (0xE002EFB0UL) /*!< DIB Base Address (non-secure address space) */ + #define SysTick_BASE_NS (SCS_BASE_NS + 0x0010UL) /*!< SysTick Base Address (non-secure address space) */ + #define NVIC_BASE_NS (SCS_BASE_NS + 0x0100UL) /*!< NVIC Base Address (non-secure address space) */ + #define SCB_BASE_NS (SCS_BASE_NS + 0x0D00UL) /*!< System Control Block Base Address (non-secure address space) */ + + #define SCnSCB_NS ((SCnSCB_Type *) SCS_BASE_NS ) /*!< System control Register not in SCB(non-secure address space) */ + #define SCB_NS ((SCB_Type *) SCB_BASE_NS ) /*!< SCB configuration struct (non-secure address space) */ + #define SysTick_NS ((SysTick_Type *) SysTick_BASE_NS ) /*!< SysTick configuration struct (non-secure address space) */ + #define NVIC_NS ((NVIC_Type *) NVIC_BASE_NS ) /*!< NVIC configuration struct (non-secure address space) */ + #define CoreDebug_NS ((CoreDebug_Type *) CoreDebug_BASE_NS) /*!< \deprecated Core Debug configuration struct (non-secure address space) */ + #define DCB_NS ((DCB_Type *) DCB_BASE_NS ) /*!< DCB configuration struct (non-secure address space) */ + #define DIB_NS ((DIB_Type *) DIB_BASE_NS ) /*!< DIB configuration struct (non-secure address space) */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE_NS (SCS_BASE_NS + 0x0D90UL) /*!< Memory Protection Unit (non-secure address space) */ + #define MPU_NS ((MPU_Type *) MPU_BASE_NS ) /*!< Memory Protection Unit (non-secure address space) */ + #endif + + #define FPU_BASE_NS (SCS_BASE_NS + 0x0F30UL) /*!< Floating Point Unit (non-secure address space) */ + #define FPU_NS ((FPU_Type *) FPU_BASE_NS ) /*!< Floating Point Unit (non-secure address space) */ + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ +/*@} */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_register_aliases Backwards Compatibility Aliases + \brief Register alias definitions for backwards compatibility. + @{ + */ +#define ID_ADR (ID_AFR) /*!< SCB Auxiliary Feature Register */ +/*@} */ + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* Special LR values for Secure/Non-Secure call handling and exception handling */ + +/* Function Return Payload (from ARMv8-M Architecture Reference Manual) LR value on entry from Secure BLXNS */ +#define FNC_RETURN (0xFEFFFFFFUL) /* bit [0] ignored when processing a branch */ + +/* The following EXC_RETURN mask values are used to evaluate the LR on exception entry */ +#define EXC_RETURN_PREFIX (0xFF000000UL) /* bits [31:24] set to indicate an EXC_RETURN value */ +#define EXC_RETURN_S (0x00000040UL) /* bit [6] stack used to push registers: 0=Non-secure 1=Secure */ +#define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */ +#define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */ +#define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */ +#define EXC_RETURN_SPSEL (0x00000004UL) /* bit [2] stack pointer used to restore context: 0=MSP 1=PSP */ +#define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */ + +/* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */ +#if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) /* Value for processors with floating-point extension: */ +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125AUL) /* bit [0] SFTC must match LR bit[4] EXC_RETURN_FTYPE */ +#else +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125BUL) /* Value for processors without floating-point extension */ +#endif + + +/** + \brief Set Priority Grouping + \details Sets the priority grouping field using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Interrupt Target State + \details Reads the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + \return 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_GetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Target State + \details Sets the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_SetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] |= ((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Clear Interrupt Target State + \details Clears the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_ClearTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] &= ~((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + __DSB(); +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Priority Grouping (non-secure) + \details Sets the non-secure priority grouping field when in secure state using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void TZ_NVIC_SetPriorityGrouping_NS(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB_NS->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB_NS->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping (non-secure) + \details Reads the priority grouping field from the non-secure NVIC when in secure state. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriorityGrouping_NS(void) +{ + return ((uint32_t)((SCB_NS->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt (non-secure) + \details Enables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_EnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status (non-secure) + \details Returns a device specific interrupt enable status from the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetEnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt (non-secure) + \details Disables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_DisableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Pending Interrupt (non-secure) + \details Reads the NVIC pending register in the non-secure NVIC when in secure state and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt (non-secure) + \details Sets the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_SetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt (non-secure) + \details Clears the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_ClearPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt (non-secure) + \details Reads the active register in non-secure NVIC when in secure state and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetActive_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority (non-secure) + \details Sets the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every non-secure processor exception. + */ +__STATIC_INLINE void TZ_NVIC_SetPriority_NS(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority (non-secure) + \details Reads the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriority_NS(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC_NS->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) &&(__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv8.h" + +#endif + +/* ########################## PMU functions and events #################################### */ + +#if defined (__PMU_PRESENT) && (__PMU_PRESENT == 1U) + +#include "pmu_armv8.h" + +#endif + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + uint32_t mvfr0; + + mvfr0 = FPU->MVFR0; + if ((mvfr0 & (FPU_MVFR0_FPSP_Msk | FPU_MVFR0_FPDP_Msk)) == 0x220U) + { + return 2U; /* Double + Single precision FPU */ + } + else if ((mvfr0 & (FPU_MVFR0_FPSP_Msk | FPU_MVFR0_FPDP_Msk)) == 0x020U) + { + return 1U; /* Single precision FPU */ + } + else + { + return 0U; /* No FPU */ + } +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + +/* ########################## MVE functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_MveFunctions MVE Functions + \brief Function that provides MVE type. + @{ + */ + +/** + \brief get MVE type + \details returns the MVE type + \returns + - \b 0: No Vector Extension (MVE) + - \b 1: Integer Vector Extension (MVE-I) + - \b 2: Floating-point Vector Extension (MVE-F) + */ +__STATIC_INLINE uint32_t SCB_GetMVEType(void) +{ + const uint32_t mvfr1 = FPU->MVFR1; + if ((mvfr1 & FPU_MVFR1_MVE_Msk) == (0x2U << FPU_MVFR1_MVE_Pos)) + { + return 2U; + } + else if ((mvfr1 & FPU_MVFR1_MVE_Msk) == (0x1U << FPU_MVFR1_MVE_Pos)) + { + return 1U; + } + else + { + return 0U; + } +} + + +/*@} end of CMSIS_Core_MveFunctions */ + + +/* ########################## Cache functions #################################### */ + +#if ((defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U)) || \ + (defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U))) +#include "cachel1_armv7.h" +#endif + + +/* ########################## SAU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SAUFunctions SAU Functions + \brief Functions that configure the SAU. + @{ + */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + +/** + \brief Enable SAU + \details Enables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Enable(void) +{ + SAU->CTRL |= (SAU_CTRL_ENABLE_Msk); +} + + + +/** + \brief Disable SAU + \details Disables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Disable(void) +{ + SAU->CTRL &= ~(SAU_CTRL_ENABLE_Msk); +} + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_SAUFunctions */ + + + + +/* ################################## Debug Control function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DCBFunctions Debug Control Functions + \brief Functions that access the Debug Control Block. + @{ + */ + + +/** + \brief Set Debug Authentication Control Register + \details writes to Debug Authentication Control register. + \param [in] value value to be writen. + */ +__STATIC_INLINE void DCB_SetAuthCtrl(uint32_t value) +{ + __DSB(); + __ISB(); + DCB->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register + \details Reads Debug Authentication Control register. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t DCB_GetAuthCtrl(void) +{ + return (DCB->DAUTHCTRL); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Debug Authentication Control Register (non-secure) + \details writes to non-secure Debug Authentication Control register when in secure state. + \param [in] value value to be writen + */ +__STATIC_INLINE void TZ_DCB_SetAuthCtrl_NS(uint32_t value) +{ + __DSB(); + __ISB(); + DCB_NS->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register (non-secure) + \details Reads non-secure Debug Authentication Control register when in secure state. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t TZ_DCB_GetAuthCtrl_NS(void) +{ + return (DCB_NS->DAUTHCTRL); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## Debug Identification function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DIBFunctions Debug Identification Functions + \brief Functions that access the Debug Identification Block. + @{ + */ + + +/** + \brief Get Debug Authentication Status Register + \details Reads Debug Authentication Status register. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t DIB_GetAuthStatus(void) +{ + return (DIB->DAUTHSTATUS); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Debug Authentication Status Register (non-secure) + \details Reads non-secure Debug Authentication Status register when in secure state. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t TZ_DIB_GetAuthStatus_NS(void) +{ + return (DIB_NS->DAUTHSTATUS); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief System Tick Configuration (non-secure) + \details Initializes the non-secure System Timer and its interrupt when in secure state, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function TZ_SysTick_Config_NS is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + + */ +__STATIC_INLINE uint32_t TZ_SysTick_Config_NS(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick_NS->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + TZ_NVIC_SetPriority_NS (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick_NS->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick_NS->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_ARMV81MML_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/Drivers/CMSIS/Include/core_armv8mbl.h b/Drivers/CMSIS/Include/core_armv8mbl.h new file mode 100644 index 0000000..932d3d1 --- /dev/null +++ b/Drivers/CMSIS/Include/core_armv8mbl.h @@ -0,0 +1,2222 @@ +/**************************************************************************//** + * @file core_armv8mbl.h + * @brief CMSIS Armv8-M Baseline Core Peripheral Access Layer Header File + * @version V5.1.0 + * @date 27. March 2020 + ******************************************************************************/ +/* + * Copyright (c) 2009-2020 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#elif defined ( __GNUC__ ) + #pragma GCC diagnostic ignored "-Wpedantic" /* disable pedantic warning due to unnamed structs/unions */ +#endif + +#ifndef __CORE_ARMV8MBL_H_GENERIC +#define __CORE_ARMV8MBL_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_ARMv8MBL + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS definitions */ +#define __ARMv8MBL_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __ARMv8MBL_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __ARMv8MBL_CMSIS_VERSION ((__ARMv8MBL_CMSIS_VERSION_MAIN << 16U) | \ + __ARMv8MBL_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (2U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + This core does not support an FPU at all +*/ +#define __FPU_USED 0U + +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_FP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_ARMV8MBL_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_ARMV8MBL_H_DEPENDANT +#define __CORE_ARMV8MBL_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __ARMv8MBL_REV + #define __ARMv8MBL_REV 0x0000U + #warning "__ARMv8MBL_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __SAUREGION_PRESENT + #define __SAUREGION_PRESENT 0U + #warning "__SAUREGION_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 0U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 2U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif + + #ifndef __ETM_PRESENT + #define __ETM_PRESENT 0U + #warning "__ETM_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MTB_PRESENT + #define __MTB_PRESENT 0U + #warning "__MTB_PRESENT not defined in device header file; using default!" + #endif + +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group ARMv8MBL */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + - Core SAU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:28; /*!< bit: 0..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:15; /*!< bit: 9..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t _reserved1:3; /*!< bit: 25..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack-pointer select */ + uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[16U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[16U]; + __IOM uint32_t ICER[16U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[16U]; + __IOM uint32_t ISPR[16U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[16U]; + __IOM uint32_t ICPR[16U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[16U]; + __IOM uint32_t IABR[16U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[16U]; + __IOM uint32_t ITNS[16U]; /*!< Offset: 0x280 (R/W) Interrupt Non-Secure State Register */ + uint32_t RESERVED5[16U]; + __IOM uint32_t IPR[124U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register */ +} NVIC_Type; + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ +#else + uint32_t RESERVED0; +#endif + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + uint32_t RESERVED1; + __IOM uint32_t SHPR[2U]; /*!< Offset: 0x01C (R/W) System Handlers Priority Registers. [0] is RESERVED */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_PENDNMISET_Pos 31U /*!< SCB ICSR: PENDNMISET Position */ +#define SCB_ICSR_PENDNMISET_Msk (1UL << SCB_ICSR_PENDNMISET_Pos) /*!< SCB ICSR: PENDNMISET Mask */ + +#define SCB_ICSR_NMIPENDSET_Pos SCB_ICSR_PENDNMISET_Pos /*!< SCB ICSR: NMIPENDSET Position, backward compatibility */ +#define SCB_ICSR_NMIPENDSET_Msk SCB_ICSR_PENDNMISET_Msk /*!< SCB ICSR: NMIPENDSET Mask, backward compatibility */ + +#define SCB_ICSR_PENDNMICLR_Pos 30U /*!< SCB ICSR: PENDNMICLR Position */ +#define SCB_ICSR_PENDNMICLR_Msk (1UL << SCB_ICSR_PENDNMICLR_Pos) /*!< SCB ICSR: PENDNMICLR Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_STTNS_Pos 24U /*!< SCB ICSR: STTNS Position (Security Extension) */ +#define SCB_ICSR_STTNS_Msk (1UL << SCB_ICSR_STTNS_Pos) /*!< SCB ICSR: STTNS Mask (Security Extension) */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) +/* SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ +#endif + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIS_Pos 14U /*!< SCB AIRCR: PRIS Position */ +#define SCB_AIRCR_PRIS_Msk (1UL << SCB_AIRCR_PRIS_Pos) /*!< SCB AIRCR: PRIS Mask */ + +#define SCB_AIRCR_BFHFNMINS_Pos 13U /*!< SCB AIRCR: BFHFNMINS Position */ +#define SCB_AIRCR_BFHFNMINS_Msk (1UL << SCB_AIRCR_BFHFNMINS_Pos) /*!< SCB AIRCR: BFHFNMINS Mask */ + +#define SCB_AIRCR_SYSRESETREQS_Pos 3U /*!< SCB AIRCR: SYSRESETREQS Position */ +#define SCB_AIRCR_SYSRESETREQS_Msk (1UL << SCB_AIRCR_SYSRESETREQS_Pos) /*!< SCB AIRCR: SYSRESETREQS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEPS_Pos 3U /*!< SCB SCR: SLEEPDEEPS Position */ +#define SCB_SCR_SLEEPDEEPS_Msk (1UL << SCB_SCR_SLEEPDEEPS_Pos) /*!< SCB SCR: SLEEPDEEPS Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_BP_Pos 18U /*!< SCB CCR: BP Position */ +#define SCB_CCR_BP_Msk (1UL << SCB_CCR_BP_Pos) /*!< SCB CCR: BP Mask */ + +#define SCB_CCR_IC_Pos 17U /*!< SCB CCR: IC Position */ +#define SCB_CCR_IC_Msk (1UL << SCB_CCR_IC_Pos) /*!< SCB CCR: IC Mask */ + +#define SCB_CCR_DC_Pos 16U /*!< SCB CCR: DC Position */ +#define SCB_CCR_DC_Msk (1UL << SCB_CCR_DC_Pos) /*!< SCB CCR: DC Mask */ + +#define SCB_CCR_STKOFHFNMIGN_Pos 10U /*!< SCB CCR: STKOFHFNMIGN Position */ +#define SCB_CCR_STKOFHFNMIGN_Msk (1UL << SCB_CCR_STKOFHFNMIGN_Pos) /*!< SCB CCR: STKOFHFNMIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_HARDFAULTPENDED_Pos 21U /*!< SCB SHCSR: HARDFAULTPENDED Position */ +#define SCB_SHCSR_HARDFAULTPENDED_Msk (1UL << SCB_SHCSR_HARDFAULTPENDED_Pos) /*!< SCB SHCSR: HARDFAULTPENDED Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_NMIACT_Pos 5U /*!< SCB SHCSR: NMIACT Position */ +#define SCB_SHCSR_NMIACT_Msk (1UL << SCB_SHCSR_NMIACT_Pos) /*!< SCB SHCSR: NMIACT Mask */ + +#define SCB_SHCSR_HARDFAULTACT_Pos 2U /*!< SCB SHCSR: HARDFAULTACT Position */ +#define SCB_SHCSR_HARDFAULTACT_Msk (1UL << SCB_SHCSR_HARDFAULTACT_Pos) /*!< SCB SHCSR: HARDFAULTACT Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + uint32_t RESERVED0[6U]; + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + uint32_t RESERVED3[1U]; + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED4[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + uint32_t RESERVED5[1U]; + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED6[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + uint32_t RESERVED7[1U]; + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ + uint32_t RESERVED8[1U]; + __IOM uint32_t COMP4; /*!< Offset: 0x060 (R/W) Comparator Register 4 */ + uint32_t RESERVED9[1U]; + __IOM uint32_t FUNCTION4; /*!< Offset: 0x068 (R/W) Function Register 4 */ + uint32_t RESERVED10[1U]; + __IOM uint32_t COMP5; /*!< Offset: 0x070 (R/W) Comparator Register 5 */ + uint32_t RESERVED11[1U]; + __IOM uint32_t FUNCTION5; /*!< Offset: 0x078 (R/W) Function Register 5 */ + uint32_t RESERVED12[1U]; + __IOM uint32_t COMP6; /*!< Offset: 0x080 (R/W) Comparator Register 6 */ + uint32_t RESERVED13[1U]; + __IOM uint32_t FUNCTION6; /*!< Offset: 0x088 (R/W) Function Register 6 */ + uint32_t RESERVED14[1U]; + __IOM uint32_t COMP7; /*!< Offset: 0x090 (R/W) Comparator Register 7 */ + uint32_t RESERVED15[1U]; + __IOM uint32_t FUNCTION7; /*!< Offset: 0x098 (R/W) Function Register 7 */ + uint32_t RESERVED16[1U]; + __IOM uint32_t COMP8; /*!< Offset: 0x0A0 (R/W) Comparator Register 8 */ + uint32_t RESERVED17[1U]; + __IOM uint32_t FUNCTION8; /*!< Offset: 0x0A8 (R/W) Function Register 8 */ + uint32_t RESERVED18[1U]; + __IOM uint32_t COMP9; /*!< Offset: 0x0B0 (R/W) Comparator Register 9 */ + uint32_t RESERVED19[1U]; + __IOM uint32_t FUNCTION9; /*!< Offset: 0x0B8 (R/W) Function Register 9 */ + uint32_t RESERVED20[1U]; + __IOM uint32_t COMP10; /*!< Offset: 0x0C0 (R/W) Comparator Register 10 */ + uint32_t RESERVED21[1U]; + __IOM uint32_t FUNCTION10; /*!< Offset: 0x0C8 (R/W) Function Register 10 */ + uint32_t RESERVED22[1U]; + __IOM uint32_t COMP11; /*!< Offset: 0x0D0 (R/W) Comparator Register 11 */ + uint32_t RESERVED23[1U]; + __IOM uint32_t FUNCTION11; /*!< Offset: 0x0D8 (R/W) Function Register 11 */ + uint32_t RESERVED24[1U]; + __IOM uint32_t COMP12; /*!< Offset: 0x0E0 (R/W) Comparator Register 12 */ + uint32_t RESERVED25[1U]; + __IOM uint32_t FUNCTION12; /*!< Offset: 0x0E8 (R/W) Function Register 12 */ + uint32_t RESERVED26[1U]; + __IOM uint32_t COMP13; /*!< Offset: 0x0F0 (R/W) Comparator Register 13 */ + uint32_t RESERVED27[1U]; + __IOM uint32_t FUNCTION13; /*!< Offset: 0x0F8 (R/W) Function Register 13 */ + uint32_t RESERVED28[1U]; + __IOM uint32_t COMP14; /*!< Offset: 0x100 (R/W) Comparator Register 14 */ + uint32_t RESERVED29[1U]; + __IOM uint32_t FUNCTION14; /*!< Offset: 0x108 (R/W) Function Register 14 */ + uint32_t RESERVED30[1U]; + __IOM uint32_t COMP15; /*!< Offset: 0x110 (R/W) Comparator Register 15 */ + uint32_t RESERVED31[1U]; + __IOM uint32_t FUNCTION15; /*!< Offset: 0x118 (R/W) Function Register 15 */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_ID_Pos 27U /*!< DWT FUNCTION: ID Position */ +#define DWT_FUNCTION_ID_Msk (0x1FUL << DWT_FUNCTION_ID_Pos) /*!< DWT FUNCTION: ID Mask */ + +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_ACTION_Pos 4U /*!< DWT FUNCTION: ACTION Position */ +#define DWT_FUNCTION_ACTION_Msk (0x3UL << DWT_FUNCTION_ACTION_Pos) /*!< DWT FUNCTION: ACTION Mask */ + +#define DWT_FUNCTION_MATCH_Pos 0U /*!< DWT FUNCTION: MATCH Position */ +#define DWT_FUNCTION_MATCH_Msk (0xFUL /*<< DWT_FUNCTION_MATCH_Pos*/) /*!< DWT FUNCTION: MATCH Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Sizes Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Sizes Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IOM uint32_t PSCR; /*!< Offset: 0x308 (R/W) Periodic Synchronization Control Register */ + uint32_t RESERVED3[809U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) Software Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) Software Lock Status Register */ + uint32_t RESERVED4[4U]; + __IM uint32_t TYPE; /*!< Offset: 0xFC8 (R/ ) Device Identifier Register */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Register */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_SWOSCALER_Pos 0U /*!< TPI ACPR: SWOSCALER Position */ +#define TPI_ACPR_SWOSCALER_Msk (0xFFFFUL /*<< TPI_ACPR_SWOSCALER_Pos*/) /*!< TPI ACPR: SWOSCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_FOnMan_Pos 6U /*!< TPI FFCR: FOnMan Position */ +#define TPI_FFCR_FOnMan_Msk (0x1UL << TPI_FFCR_FOnMan_Pos) /*!< TPI FFCR: FOnMan Mask */ + +#define TPI_FFCR_EnFCont_Pos 1U /*!< TPI FFCR: EnFCont Position */ +#define TPI_FFCR_EnFCont_Msk (0x1UL << TPI_FFCR_EnFCont_Pos) /*!< TPI FFCR: EnFCont Mask */ + +/* TPI Periodic Synchronization Control Register Definitions */ +#define TPI_PSCR_PSCount_Pos 0U /*!< TPI PSCR: PSCount Position */ +#define TPI_PSCR_PSCount_Msk (0x1FUL /*<< TPI_PSCR_PSCount_Pos*/) /*!< TPI PSCR: TPSCount Mask */ + +/* TPI Software Lock Status Register Definitions */ +#define TPI_LSR_nTT_Pos 1U /*!< TPI LSR: Not thirty-two bit. Position */ +#define TPI_LSR_nTT_Msk (0x1UL << TPI_LSR_nTT_Pos) /*!< TPI LSR: Not thirty-two bit. Mask */ + +#define TPI_LSR_SLK_Pos 1U /*!< TPI LSR: Software Lock status Position */ +#define TPI_LSR_SLK_Msk (0x1UL << TPI_LSR_SLK_Pos) /*!< TPI LSR: Software Lock status Mask */ + +#define TPI_LSR_SLI_Pos 0U /*!< TPI LSR: Software Lock implemented Position */ +#define TPI_LSR_SLI_Msk (0x1UL /*<< TPI_LSR_SLI_Pos*/) /*!< TPI LSR: Software Lock implemented Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_FIFOSZ_Pos 6U /*!< TPI DEVID: FIFO depth Position */ +#define TPI_DEVID_FIFOSZ_Msk (0x7UL << TPI_DEVID_FIFOSZ_Pos) /*!< TPI DEVID: FIFO depth Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) MPU Region Limit Address Register */ + uint32_t RESERVED0[7U]; + union { + __IOM uint32_t MAIR[2]; + struct { + __IOM uint32_t MAIR0; /*!< Offset: 0x030 (R/W) MPU Memory Attribute Indirection Register 0 */ + __IOM uint32_t MAIR1; /*!< Offset: 0x034 (R/W) MPU Memory Attribute Indirection Register 1 */ + }; + }; +} MPU_Type; + +#define MPU_TYPE_RALIASES 1U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_BASE_Pos 5U /*!< MPU RBAR: BASE Position */ +#define MPU_RBAR_BASE_Msk (0x7FFFFFFUL << MPU_RBAR_BASE_Pos) /*!< MPU RBAR: BASE Mask */ + +#define MPU_RBAR_SH_Pos 3U /*!< MPU RBAR: SH Position */ +#define MPU_RBAR_SH_Msk (0x3UL << MPU_RBAR_SH_Pos) /*!< MPU RBAR: SH Mask */ + +#define MPU_RBAR_AP_Pos 1U /*!< MPU RBAR: AP Position */ +#define MPU_RBAR_AP_Msk (0x3UL << MPU_RBAR_AP_Pos) /*!< MPU RBAR: AP Mask */ + +#define MPU_RBAR_XN_Pos 0U /*!< MPU RBAR: XN Position */ +#define MPU_RBAR_XN_Msk (01UL /*<< MPU_RBAR_XN_Pos*/) /*!< MPU RBAR: XN Mask */ + +/* MPU Region Limit Address Register Definitions */ +#define MPU_RLAR_LIMIT_Pos 5U /*!< MPU RLAR: LIMIT Position */ +#define MPU_RLAR_LIMIT_Msk (0x7FFFFFFUL << MPU_RLAR_LIMIT_Pos) /*!< MPU RLAR: LIMIT Mask */ + +#define MPU_RLAR_AttrIndx_Pos 1U /*!< MPU RLAR: AttrIndx Position */ +#define MPU_RLAR_AttrIndx_Msk (0x7UL << MPU_RLAR_AttrIndx_Pos) /*!< MPU RLAR: AttrIndx Mask */ + +#define MPU_RLAR_EN_Pos 0U /*!< MPU RLAR: EN Position */ +#define MPU_RLAR_EN_Msk (1UL /*<< MPU_RLAR_EN_Pos*/) /*!< MPU RLAR: EN Mask */ + +/* MPU Memory Attribute Indirection Register 0 Definitions */ +#define MPU_MAIR0_Attr3_Pos 24U /*!< MPU MAIR0: Attr3 Position */ +#define MPU_MAIR0_Attr3_Msk (0xFFUL << MPU_MAIR0_Attr3_Pos) /*!< MPU MAIR0: Attr3 Mask */ + +#define MPU_MAIR0_Attr2_Pos 16U /*!< MPU MAIR0: Attr2 Position */ +#define MPU_MAIR0_Attr2_Msk (0xFFUL << MPU_MAIR0_Attr2_Pos) /*!< MPU MAIR0: Attr2 Mask */ + +#define MPU_MAIR0_Attr1_Pos 8U /*!< MPU MAIR0: Attr1 Position */ +#define MPU_MAIR0_Attr1_Msk (0xFFUL << MPU_MAIR0_Attr1_Pos) /*!< MPU MAIR0: Attr1 Mask */ + +#define MPU_MAIR0_Attr0_Pos 0U /*!< MPU MAIR0: Attr0 Position */ +#define MPU_MAIR0_Attr0_Msk (0xFFUL /*<< MPU_MAIR0_Attr0_Pos*/) /*!< MPU MAIR0: Attr0 Mask */ + +/* MPU Memory Attribute Indirection Register 1 Definitions */ +#define MPU_MAIR1_Attr7_Pos 24U /*!< MPU MAIR1: Attr7 Position */ +#define MPU_MAIR1_Attr7_Msk (0xFFUL << MPU_MAIR1_Attr7_Pos) /*!< MPU MAIR1: Attr7 Mask */ + +#define MPU_MAIR1_Attr6_Pos 16U /*!< MPU MAIR1: Attr6 Position */ +#define MPU_MAIR1_Attr6_Msk (0xFFUL << MPU_MAIR1_Attr6_Pos) /*!< MPU MAIR1: Attr6 Mask */ + +#define MPU_MAIR1_Attr5_Pos 8U /*!< MPU MAIR1: Attr5 Position */ +#define MPU_MAIR1_Attr5_Msk (0xFFUL << MPU_MAIR1_Attr5_Pos) /*!< MPU MAIR1: Attr5 Mask */ + +#define MPU_MAIR1_Attr4_Pos 0U /*!< MPU MAIR1: Attr4 Position */ +#define MPU_MAIR1_Attr4_Msk (0xFFUL /*<< MPU_MAIR1_Attr4_Pos*/) /*!< MPU MAIR1: Attr4 Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SAU Security Attribution Unit (SAU) + \brief Type definitions for the Security Attribution Unit (SAU) + @{ + */ + +/** + \brief Structure type to access the Security Attribution Unit (SAU). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SAU Control Register */ + __IM uint32_t TYPE; /*!< Offset: 0x004 (R/ ) SAU Type Register */ +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) SAU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) SAU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) SAU Region Limit Address Register */ +#endif +} SAU_Type; + +/* SAU Control Register Definitions */ +#define SAU_CTRL_ALLNS_Pos 1U /*!< SAU CTRL: ALLNS Position */ +#define SAU_CTRL_ALLNS_Msk (1UL << SAU_CTRL_ALLNS_Pos) /*!< SAU CTRL: ALLNS Mask */ + +#define SAU_CTRL_ENABLE_Pos 0U /*!< SAU CTRL: ENABLE Position */ +#define SAU_CTRL_ENABLE_Msk (1UL /*<< SAU_CTRL_ENABLE_Pos*/) /*!< SAU CTRL: ENABLE Mask */ + +/* SAU Type Register Definitions */ +#define SAU_TYPE_SREGION_Pos 0U /*!< SAU TYPE: SREGION Position */ +#define SAU_TYPE_SREGION_Msk (0xFFUL /*<< SAU_TYPE_SREGION_Pos*/) /*!< SAU TYPE: SREGION Mask */ + +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) +/* SAU Region Number Register Definitions */ +#define SAU_RNR_REGION_Pos 0U /*!< SAU RNR: REGION Position */ +#define SAU_RNR_REGION_Msk (0xFFUL /*<< SAU_RNR_REGION_Pos*/) /*!< SAU RNR: REGION Mask */ + +/* SAU Region Base Address Register Definitions */ +#define SAU_RBAR_BADDR_Pos 5U /*!< SAU RBAR: BADDR Position */ +#define SAU_RBAR_BADDR_Msk (0x7FFFFFFUL << SAU_RBAR_BADDR_Pos) /*!< SAU RBAR: BADDR Mask */ + +/* SAU Region Limit Address Register Definitions */ +#define SAU_RLAR_LADDR_Pos 5U /*!< SAU RLAR: LADDR Position */ +#define SAU_RLAR_LADDR_Msk (0x7FFFFFFUL << SAU_RLAR_LADDR_Pos) /*!< SAU RLAR: LADDR Mask */ + +#define SAU_RLAR_NSC_Pos 1U /*!< SAU RLAR: NSC Position */ +#define SAU_RLAR_NSC_Msk (1UL << SAU_RLAR_NSC_Pos) /*!< SAU RLAR: NSC Mask */ + +#define SAU_RLAR_ENABLE_Pos 0U /*!< SAU RLAR: ENABLE Position */ +#define SAU_RLAR_ENABLE_Msk (1UL /*<< SAU_RLAR_ENABLE_Pos*/) /*!< SAU RLAR: ENABLE Mask */ + +#endif /* defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) */ + +/*@} end of group CMSIS_SAU */ +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/* CoreDebug is deprecated. replaced by DCB (Debug Control Block) */ +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Type definitions for the Core Debug Registers + @{ + */ + +/** + \brief \deprecated Structure type to access the Core Debug Register (CoreDebug). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + uint32_t RESERVED0[1U]; + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< \deprecated CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< \deprecated CoreDebug DHCSR: DBGKEY Mask */ + +#define CoreDebug_DHCSR_S_RESTART_ST_Pos 26U /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Position */ +#define CoreDebug_DHCSR_S_RESTART_ST_Msk (1UL << CoreDebug_DHCSR_S_RESTART_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Mask */ + +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Mask */ + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Mask */ + +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Mask */ + +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< \deprecated CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< \deprecated CoreDebug DHCSR: S_SLEEP Mask */ + +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< \deprecated CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: S_HALT Mask */ + +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< \deprecated CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< \deprecated CoreDebug DHCSR: S_REGRDY Mask */ + +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Mask */ + +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< \deprecated CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< \deprecated CoreDebug DHCSR: C_STEP Mask */ + +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< \deprecated CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: C_HALT Mask */ + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Mask */ + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< \deprecated CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< \deprecated CoreDebug DCRSR: REGWnR Mask */ + +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< \deprecated CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< \deprecated CoreDebug DCRSR: REGSEL Mask */ + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_DWTENA_Pos 24U /*!< \deprecated CoreDebug DEMCR: DWTENA Position */ +#define CoreDebug_DEMCR_DWTENA_Msk (1UL << CoreDebug_DEMCR_DWTENA_Pos) /*!< \deprecated CoreDebug DEMCR: DWTENA Mask */ + +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Mask */ + +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Mask */ + +/* Debug Authentication Control Register Definitions */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Position */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Mask */ + +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Msk (1UL << CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Mask */ + +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Position */ +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Mask */ + +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Msk (1UL /*<< CoreDebug_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Mask */ + +/* Debug Security Control and Status Register Definitions */ +#define CoreDebug_DSCSR_CDS_Pos 16U /*!< \deprecated CoreDebug DSCSR: CDS Position */ +#define CoreDebug_DSCSR_CDS_Msk (1UL << CoreDebug_DSCSR_CDS_Pos) /*!< \deprecated CoreDebug DSCSR: CDS Mask */ + +#define CoreDebug_DSCSR_SBRSEL_Pos 1U /*!< \deprecated CoreDebug DSCSR: SBRSEL Position */ +#define CoreDebug_DSCSR_SBRSEL_Msk (1UL << CoreDebug_DSCSR_SBRSEL_Pos) /*!< \deprecated CoreDebug DSCSR: SBRSEL Mask */ + +#define CoreDebug_DSCSR_SBRSELEN_Pos 0U /*!< \deprecated CoreDebug DSCSR: SBRSELEN Position */ +#define CoreDebug_DSCSR_SBRSELEN_Msk (1UL /*<< CoreDebug_DSCSR_SBRSELEN_Pos*/) /*!< \deprecated CoreDebug DSCSR: SBRSELEN Mask */ + +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DCB Debug Control Block + \brief Type definitions for the Debug Control Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Control Block Registers (DCB). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + uint32_t RESERVED0[1U]; + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} DCB_Type; + +/* DHCSR, Debug Halting Control and Status Register Definitions */ +#define DCB_DHCSR_DBGKEY_Pos 16U /*!< DCB DHCSR: Debug key Position */ +#define DCB_DHCSR_DBGKEY_Msk (0xFFFFUL << DCB_DHCSR_DBGKEY_Pos) /*!< DCB DHCSR: Debug key Mask */ + +#define DCB_DHCSR_S_RESTART_ST_Pos 26U /*!< DCB DHCSR: Restart sticky status Position */ +#define DCB_DHCSR_S_RESTART_ST_Msk (0x1UL << DCB_DHCSR_S_RESTART_ST_Pos) /*!< DCB DHCSR: Restart sticky status Mask */ + +#define DCB_DHCSR_S_RESET_ST_Pos 25U /*!< DCB DHCSR: Reset sticky status Position */ +#define DCB_DHCSR_S_RESET_ST_Msk (0x1UL << DCB_DHCSR_S_RESET_ST_Pos) /*!< DCB DHCSR: Reset sticky status Mask */ + +#define DCB_DHCSR_S_RETIRE_ST_Pos 24U /*!< DCB DHCSR: Retire sticky status Position */ +#define DCB_DHCSR_S_RETIRE_ST_Msk (0x1UL << DCB_DHCSR_S_RETIRE_ST_Pos) /*!< DCB DHCSR: Retire sticky status Mask */ + +#define DCB_DHCSR_S_SDE_Pos 20U /*!< DCB DHCSR: Secure debug enabled Position */ +#define DCB_DHCSR_S_SDE_Msk (0x1UL << DCB_DHCSR_S_SDE_Pos) /*!< DCB DHCSR: Secure debug enabled Mask */ + +#define DCB_DHCSR_S_LOCKUP_Pos 19U /*!< DCB DHCSR: Lockup status Position */ +#define DCB_DHCSR_S_LOCKUP_Msk (0x1UL << DCB_DHCSR_S_LOCKUP_Pos) /*!< DCB DHCSR: Lockup status Mask */ + +#define DCB_DHCSR_S_SLEEP_Pos 18U /*!< DCB DHCSR: Sleeping status Position */ +#define DCB_DHCSR_S_SLEEP_Msk (0x1UL << DCB_DHCSR_S_SLEEP_Pos) /*!< DCB DHCSR: Sleeping status Mask */ + +#define DCB_DHCSR_S_HALT_Pos 17U /*!< DCB DHCSR: Halted status Position */ +#define DCB_DHCSR_S_HALT_Msk (0x1UL << DCB_DHCSR_S_HALT_Pos) /*!< DCB DHCSR: Halted status Mask */ + +#define DCB_DHCSR_S_REGRDY_Pos 16U /*!< DCB DHCSR: Register ready status Position */ +#define DCB_DHCSR_S_REGRDY_Msk (0x1UL << DCB_DHCSR_S_REGRDY_Pos) /*!< DCB DHCSR: Register ready status Mask */ + +#define DCB_DHCSR_C_MASKINTS_Pos 3U /*!< DCB DHCSR: Mask interrupts control Position */ +#define DCB_DHCSR_C_MASKINTS_Msk (0x1UL << DCB_DHCSR_C_MASKINTS_Pos) /*!< DCB DHCSR: Mask interrupts control Mask */ + +#define DCB_DHCSR_C_STEP_Pos 2U /*!< DCB DHCSR: Step control Position */ +#define DCB_DHCSR_C_STEP_Msk (0x1UL << DCB_DHCSR_C_STEP_Pos) /*!< DCB DHCSR: Step control Mask */ + +#define DCB_DHCSR_C_HALT_Pos 1U /*!< DCB DHCSR: Halt control Position */ +#define DCB_DHCSR_C_HALT_Msk (0x1UL << DCB_DHCSR_C_HALT_Pos) /*!< DCB DHCSR: Halt control Mask */ + +#define DCB_DHCSR_C_DEBUGEN_Pos 0U /*!< DCB DHCSR: Debug enable control Position */ +#define DCB_DHCSR_C_DEBUGEN_Msk (0x1UL /*<< DCB_DHCSR_C_DEBUGEN_Pos*/) /*!< DCB DHCSR: Debug enable control Mask */ + +/* DCRSR, Debug Core Register Select Register Definitions */ +#define DCB_DCRSR_REGWnR_Pos 16U /*!< DCB DCRSR: Register write/not-read Position */ +#define DCB_DCRSR_REGWnR_Msk (0x1UL << DCB_DCRSR_REGWnR_Pos) /*!< DCB DCRSR: Register write/not-read Mask */ + +#define DCB_DCRSR_REGSEL_Pos 0U /*!< DCB DCRSR: Register selector Position */ +#define DCB_DCRSR_REGSEL_Msk (0x7FUL /*<< DCB_DCRSR_REGSEL_Pos*/) /*!< DCB DCRSR: Register selector Mask */ + +/* DCRDR, Debug Core Register Data Register Definitions */ +#define DCB_DCRDR_DBGTMP_Pos 0U /*!< DCB DCRDR: Data temporary buffer Position */ +#define DCB_DCRDR_DBGTMP_Msk (0xFFFFFFFFUL /*<< DCB_DCRDR_DBGTMP_Pos*/) /*!< DCB DCRDR: Data temporary buffer Mask */ + +/* DEMCR, Debug Exception and Monitor Control Register Definitions */ +#define DCB_DEMCR_TRCENA_Pos 24U /*!< DCB DEMCR: Trace enable Position */ +#define DCB_DEMCR_TRCENA_Msk (0x1UL << DCB_DEMCR_TRCENA_Pos) /*!< DCB DEMCR: Trace enable Mask */ + +#define DCB_DEMCR_VC_HARDERR_Pos 10U /*!< DCB DEMCR: Vector Catch HardFault errors Position */ +#define DCB_DEMCR_VC_HARDERR_Msk (0x1UL << DCB_DEMCR_VC_HARDERR_Pos) /*!< DCB DEMCR: Vector Catch HardFault errors Mask */ + +#define DCB_DEMCR_VC_CORERESET_Pos 0U /*!< DCB DEMCR: Vector Catch Core reset Position */ +#define DCB_DEMCR_VC_CORERESET_Msk (0x1UL /*<< DCB_DEMCR_VC_CORERESET_Pos*/) /*!< DCB DEMCR: Vector Catch Core reset Mask */ + +/* DAUTHCTRL, Debug Authentication Control Register Definitions */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPNIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPNIDENSEL_Msk (0x1UL << DCB_DAUTHCTRL_SPNIDENSEL_Pos) /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Mask */ + +#define DCB_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< DCB DAUTHCTRL: Secure invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPIDENSEL_Msk (0x1UL /*<< DCB_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< DCB DAUTHCTRL: Secure invasive debug enable select Mask */ + +/* DSCSR, Debug Security Control and Status Register Definitions */ +#define DCB_DSCSR_CDSKEY_Pos 17U /*!< DCB DSCSR: CDS write-enable key Position */ +#define DCB_DSCSR_CDSKEY_Msk (0x1UL << DCB_DSCSR_CDSKEY_Pos) /*!< DCB DSCSR: CDS write-enable key Mask */ + +#define DCB_DSCSR_CDS_Pos 16U /*!< DCB DSCSR: Current domain Secure Position */ +#define DCB_DSCSR_CDS_Msk (0x1UL << DCB_DSCSR_CDS_Pos) /*!< DCB DSCSR: Current domain Secure Mask */ + +#define DCB_DSCSR_SBRSEL_Pos 1U /*!< DCB DSCSR: Secure banked register select Position */ +#define DCB_DSCSR_SBRSEL_Msk (0x1UL << DCB_DSCSR_SBRSEL_Pos) /*!< DCB DSCSR: Secure banked register select Mask */ + +#define DCB_DSCSR_SBRSELEN_Pos 0U /*!< DCB DSCSR: Secure banked register select enable Position */ +#define DCB_DSCSR_SBRSELEN_Msk (0x1UL /*<< DCB_DSCSR_SBRSELEN_Pos*/) /*!< DCB DSCSR: Secure banked register select enable Mask */ + +/*@} end of group CMSIS_DCB */ + + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DIB Debug Identification Block + \brief Type definitions for the Debug Identification Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Identification Block Registers (DIB). + */ +typedef struct +{ + __OM uint32_t DLAR; /*!< Offset: 0x000 ( /W) SCS Software Lock Access Register */ + __IM uint32_t DLSR; /*!< Offset: 0x004 (R/ ) SCS Software Lock Status Register */ + __IM uint32_t DAUTHSTATUS; /*!< Offset: 0x008 (R/ ) Debug Authentication Status Register */ + __IM uint32_t DDEVARCH; /*!< Offset: 0x00C (R/ ) SCS Device Architecture Register */ + __IM uint32_t DDEVTYPE; /*!< Offset: 0x010 (R/ ) SCS Device Type Register */ +} DIB_Type; + +/* DLAR, SCS Software Lock Access Register Definitions */ +#define DIB_DLAR_KEY_Pos 0U /*!< DIB DLAR: KEY Position */ +#define DIB_DLAR_KEY_Msk (0xFFFFFFFFUL /*<< DIB_DLAR_KEY_Pos */) /*!< DIB DLAR: KEY Mask */ + +/* DLSR, SCS Software Lock Status Register Definitions */ +#define DIB_DLSR_nTT_Pos 2U /*!< DIB DLSR: Not thirty-two bit Position */ +#define DIB_DLSR_nTT_Msk (0x1UL << DIB_DLSR_nTT_Pos ) /*!< DIB DLSR: Not thirty-two bit Mask */ + +#define DIB_DLSR_SLK_Pos 1U /*!< DIB DLSR: Software Lock status Position */ +#define DIB_DLSR_SLK_Msk (0x1UL << DIB_DLSR_SLK_Pos ) /*!< DIB DLSR: Software Lock status Mask */ + +#define DIB_DLSR_SLI_Pos 0U /*!< DIB DLSR: Software Lock implemented Position */ +#define DIB_DLSR_SLI_Msk (0x1UL /*<< DIB_DLSR_SLI_Pos*/) /*!< DIB DLSR: Software Lock implemented Mask */ + +/* DAUTHSTATUS, Debug Authentication Status Register Definitions */ +#define DIB_DAUTHSTATUS_SNID_Pos 6U /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_SNID_Msk (0x3UL << DIB_DAUTHSTATUS_SNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_SID_Pos 4U /*!< DIB DAUTHSTATUS: Secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_SID_Msk (0x3UL << DIB_DAUTHSTATUS_SID_Pos ) /*!< DIB DAUTHSTATUS: Secure Invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSNID_Pos 2U /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSID_Pos 0U /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSID_Msk (0x3UL /*<< DIB_DAUTHSTATUS_NSID_Pos*/) /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Mask */ + +/* DDEVARCH, SCS Device Architecture Register Definitions */ +#define DIB_DDEVARCH_ARCHITECT_Pos 21U /*!< DIB DDEVARCH: Architect Position */ +#define DIB_DDEVARCH_ARCHITECT_Msk (0x7FFUL << DIB_DDEVARCH_ARCHITECT_Pos ) /*!< DIB DDEVARCH: Architect Mask */ + +#define DIB_DDEVARCH_PRESENT_Pos 20U /*!< DIB DDEVARCH: DEVARCH Present Position */ +#define DIB_DDEVARCH_PRESENT_Msk (0x1FUL << DIB_DDEVARCH_PRESENT_Pos ) /*!< DIB DDEVARCH: DEVARCH Present Mask */ + +#define DIB_DDEVARCH_REVISION_Pos 16U /*!< DIB DDEVARCH: Revision Position */ +#define DIB_DDEVARCH_REVISION_Msk (0xFUL << DIB_DDEVARCH_REVISION_Pos ) /*!< DIB DDEVARCH: Revision Mask */ + +#define DIB_DDEVARCH_ARCHVER_Pos 12U /*!< DIB DDEVARCH: Architecture Version Position */ +#define DIB_DDEVARCH_ARCHVER_Msk (0xFUL << DIB_DDEVARCH_ARCHVER_Pos ) /*!< DIB DDEVARCH: Architecture Version Mask */ + +#define DIB_DDEVARCH_ARCHPART_Pos 0U /*!< DIB DDEVARCH: Architecture Part Position */ +#define DIB_DDEVARCH_ARCHPART_Msk (0xFFFUL /*<< DIB_DDEVARCH_ARCHPART_Pos*/) /*!< DIB DDEVARCH: Architecture Part Mask */ + +/* DDEVTYPE, SCS Device Type Register Definitions */ +#define DIB_DDEVTYPE_SUB_Pos 4U /*!< DIB DDEVTYPE: Sub-type Position */ +#define DIB_DDEVTYPE_SUB_Msk (0xFUL << DIB_DDEVTYPE_SUB_Pos ) /*!< DIB DDEVTYPE: Sub-type Mask */ + +#define DIB_DDEVTYPE_MAJOR_Pos 0U /*!< DIB DDEVTYPE: Major type Position */ +#define DIB_DDEVTYPE_MAJOR_Msk (0xFUL /*<< DIB_DDEVTYPE_MAJOR_Pos*/) /*!< DIB DDEVTYPE: Major type Mask */ + + +/*@} end of group CMSIS_DIB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ + #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ + #define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ + #define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ + #define CoreDebug_BASE (0xE000EDF0UL) /*!< \deprecated Core Debug Base Address */ + #define DCB_BASE (0xE000EDF0UL) /*!< DCB Base Address */ + #define DIB_BASE (0xE000EFB0UL) /*!< DIB Base Address */ + #define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ + #define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ + #define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + + + #define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ + #define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ + #define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + #define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ + #define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ + #define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE ) /*!< \deprecated Core Debug configuration struct */ + #define DCB ((DCB_Type *) DCB_BASE ) /*!< DCB configuration struct */ + #define DIB ((DIB_Type *) DIB_BASE ) /*!< DIB configuration struct */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ + #endif + + #if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SAU_BASE (SCS_BASE + 0x0DD0UL) /*!< Security Attribution Unit */ + #define SAU ((SAU_Type *) SAU_BASE ) /*!< Security Attribution Unit */ + #endif + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SCS_BASE_NS (0xE002E000UL) /*!< System Control Space Base Address (non-secure address space) */ + #define CoreDebug_BASE_NS (0xE002EDF0UL) /*!< \deprecated Core Debug Base Address (non-secure address space) */ + #define DCB_BASE_NS (0xE002EDF0UL) /*!< DCB Base Address (non-secure address space) */ + #define DIB_BASE_NS (0xE002EFB0UL) /*!< DIB Base Address (non-secure address space) */ + #define SysTick_BASE_NS (SCS_BASE_NS + 0x0010UL) /*!< SysTick Base Address (non-secure address space) */ + #define NVIC_BASE_NS (SCS_BASE_NS + 0x0100UL) /*!< NVIC Base Address (non-secure address space) */ + #define SCB_BASE_NS (SCS_BASE_NS + 0x0D00UL) /*!< System Control Block Base Address (non-secure address space) */ + + #define SCB_NS ((SCB_Type *) SCB_BASE_NS ) /*!< SCB configuration struct (non-secure address space) */ + #define SysTick_NS ((SysTick_Type *) SysTick_BASE_NS ) /*!< SysTick configuration struct (non-secure address space) */ + #define NVIC_NS ((NVIC_Type *) NVIC_BASE_NS ) /*!< NVIC configuration struct (non-secure address space) */ + #define CoreDebug_NS ((CoreDebug_Type *) CoreDebug_BASE_NS) /*!< \deprecated Core Debug configuration struct (non-secure address space) */ + #define DCB_NS ((DCB_Type *) DCB_BASE_NS ) /*!< DCB configuration struct (non-secure address space) */ + #define DIB_NS ((DIB_Type *) DIB_BASE_NS ) /*!< DIB configuration struct (non-secure address space) */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE_NS (SCS_BASE_NS + 0x0D90UL) /*!< Memory Protection Unit (non-secure address space) */ + #define MPU_NS ((MPU_Type *) MPU_BASE_NS ) /*!< Memory Protection Unit (non-secure address space) */ + #endif + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* Special LR values for Secure/Non-Secure call handling and exception handling */ + +/* Function Return Payload (from ARMv8-M Architecture Reference Manual) LR value on entry from Secure BLXNS */ +#define FNC_RETURN (0xFEFFFFFFUL) /* bit [0] ignored when processing a branch */ + +/* The following EXC_RETURN mask values are used to evaluate the LR on exception entry */ +#define EXC_RETURN_PREFIX (0xFF000000UL) /* bits [31:24] set to indicate an EXC_RETURN value */ +#define EXC_RETURN_S (0x00000040UL) /* bit [6] stack used to push registers: 0=Non-secure 1=Secure */ +#define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */ +#define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */ +#define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */ +#define EXC_RETURN_SPSEL (0x00000004UL) /* bit [2] stack pointer used to restore context: 0=MSP 1=PSP */ +#define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */ + +/* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */ +#if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) /* Value for processors with floating-point extension: */ +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125AUL) /* bit [0] SFTC must match LR bit[4] EXC_RETURN_FTYPE */ +#else +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125BUL) /* Value for processors without floating-point extension */ +#endif + + +/* Interrupt Priorities are WORD accessible only under Armv6-M */ +/* The following MACROS handle generation of the register offset and byte masks */ +#define _BIT_SHIFT(IRQn) ( ((((uint32_t)(int32_t)(IRQn)) ) & 0x03UL) * 8UL) +#define _SHP_IDX(IRQn) ( (((((uint32_t)(int32_t)(IRQn)) & 0x0FUL)-8UL) >> 2UL) ) +#define _IP_IDX(IRQn) ( (((uint32_t)(int32_t)(IRQn)) >> 2UL) ) + +#define __NVIC_SetPriorityGrouping(X) (void)(X) +#define __NVIC_GetPriorityGrouping() (0U) + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Interrupt Target State + \details Reads the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + \return 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_GetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Target State + \details Sets the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_SetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] |= ((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Clear Interrupt Target State + \details Clears the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_ClearTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] &= ~((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[_IP_IDX(IRQn)] = ((uint32_t)(NVIC->IPR[_IP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } + else + { + SCB->SHPR[_SHP_IDX(IRQn)] = ((uint32_t)(SCB->SHPR[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IPR[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return((uint32_t)(((SCB->SHPR[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + If VTOR is not present address 0 must be mapped to SRAM. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) + uint32_t *vectors = (uint32_t *)SCB->VTOR; +#else + uint32_t *vectors = (uint32_t *)0x0U; +#endif + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + __DSB(); +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) + uint32_t *vectors = (uint32_t *)SCB->VTOR; +#else + uint32_t *vectors = (uint32_t *)0x0U; +#endif + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = ((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + SCB_AIRCR_SYSRESETREQ_Msk); + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Enable Interrupt (non-secure) + \details Enables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_EnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status (non-secure) + \details Returns a device specific interrupt enable status from the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetEnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt (non-secure) + \details Disables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_DisableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Pending Interrupt (non-secure) + \details Reads the NVIC pending register in the non-secure NVIC when in secure state and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt (non-secure) + \details Sets the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_SetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt (non-secure) + \details Clears the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_ClearPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt (non-secure) + \details Reads the active register in non-secure NVIC when in secure state and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetActive_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority (non-secure) + \details Sets the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every non-secure processor exception. + */ +__STATIC_INLINE void TZ_NVIC_SetPriority_NS(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->IPR[_IP_IDX(IRQn)] = ((uint32_t)(NVIC_NS->IPR[_IP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } + else + { + SCB_NS->SHPR[_SHP_IDX(IRQn)] = ((uint32_t)(SCB_NS->SHPR[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } +} + + +/** + \brief Get Interrupt Priority (non-secure) + \details Reads the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriority_NS(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IPR[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return((uint32_t)(((SCB_NS->SHPR[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) &&(__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv8.h" + +#endif + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + return 0U; /* No FPU */ +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ########################## SAU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SAUFunctions SAU Functions + \brief Functions that configure the SAU. + @{ + */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + +/** + \brief Enable SAU + \details Enables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Enable(void) +{ + SAU->CTRL |= (SAU_CTRL_ENABLE_Msk); +} + + + +/** + \brief Disable SAU + \details Disables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Disable(void) +{ + SAU->CTRL &= ~(SAU_CTRL_ENABLE_Msk); +} + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_SAUFunctions */ + + + + +/* ################################## Debug Control function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DCBFunctions Debug Control Functions + \brief Functions that access the Debug Control Block. + @{ + */ + + +/** + \brief Set Debug Authentication Control Register + \details writes to Debug Authentication Control register. + \param [in] value value to be writen. + */ +__STATIC_INLINE void DCB_SetAuthCtrl(uint32_t value) +{ + __DSB(); + __ISB(); + DCB->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register + \details Reads Debug Authentication Control register. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t DCB_GetAuthCtrl(void) +{ + return (DCB->DAUTHCTRL); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Debug Authentication Control Register (non-secure) + \details writes to non-secure Debug Authentication Control register when in secure state. + \param [in] value value to be writen + */ +__STATIC_INLINE void TZ_DCB_SetAuthCtrl_NS(uint32_t value) +{ + __DSB(); + __ISB(); + DCB_NS->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register (non-secure) + \details Reads non-secure Debug Authentication Control register when in secure state. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t TZ_DCB_GetAuthCtrl_NS(void) +{ + return (DCB_NS->DAUTHCTRL); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## Debug Identification function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DIBFunctions Debug Identification Functions + \brief Functions that access the Debug Identification Block. + @{ + */ + + +/** + \brief Get Debug Authentication Status Register + \details Reads Debug Authentication Status register. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t DIB_GetAuthStatus(void) +{ + return (DIB->DAUTHSTATUS); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Debug Authentication Status Register (non-secure) + \details Reads non-secure Debug Authentication Status register when in secure state. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t TZ_DIB_GetAuthStatus_NS(void) +{ + return (DIB_NS->DAUTHSTATUS); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief System Tick Configuration (non-secure) + \details Initializes the non-secure System Timer and its interrupt when in secure state, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function TZ_SysTick_Config_NS is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + + */ +__STATIC_INLINE uint32_t TZ_SysTick_Config_NS(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick_NS->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + TZ_NVIC_SetPriority_NS (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick_NS->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick_NS->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_ARMV8MBL_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/Drivers/CMSIS/Include/core_armv8mml.h b/Drivers/CMSIS/Include/core_armv8mml.h new file mode 100644 index 0000000..c119fbf --- /dev/null +++ b/Drivers/CMSIS/Include/core_armv8mml.h @@ -0,0 +1,3209 @@ +/**************************************************************************//** + * @file core_armv8mml.h + * @brief CMSIS Armv8-M Mainline Core Peripheral Access Layer Header File + * @version V5.2.3 + * @date 13. October 2021 + ******************************************************************************/ +/* + * Copyright (c) 2009-2021 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#elif defined ( __GNUC__ ) + #pragma GCC diagnostic ignored "-Wpedantic" /* disable pedantic warning due to unnamed structs/unions */ +#endif + +#ifndef __CORE_ARMV8MML_H_GENERIC +#define __CORE_ARMV8MML_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_ARMv8MML + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS Armv8MML definitions */ +#define __ARMv8MML_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __ARMv8MML_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __ARMv8MML_CMSIS_VERSION ((__ARMv8MML_CMSIS_VERSION_MAIN << 16U) | \ + __ARMv8MML_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (80U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + For this, __FPU_PRESENT has to be checked prior to making use of FPU specific registers and functions. +*/ +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined(__ARM_FEATURE_DSP) + #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_FP + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined(__ARM_FEATURE_DSP) + #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined(__ARM_FEATURE_DSP) + #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined(__ARM_FEATURE_DSP) + #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_ARMV8MML_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_ARMV8MML_H_DEPENDANT +#define __CORE_ARMV8MML_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __ARMv8MML_REV + #define __ARMv8MML_REV 0x0000U + #warning "__ARMv8MML_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __SAUREGION_PRESENT + #define __SAUREGION_PRESENT 0U + #warning "__SAUREGION_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DSP_PRESENT + #define __DSP_PRESENT 0U + #warning "__DSP_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 1U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group ARMv8MML */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + - Core SAU Register + - Core FPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:16; /*!< bit: 0..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:7; /*!< bit: 20..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + +#define APSR_GE_Pos 16U /*!< APSR: GE Position */ +#define APSR_GE_Msk (0xFUL << APSR_GE_Pos) /*!< APSR: GE Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:7; /*!< bit: 9..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:4; /*!< bit: 20..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t IT:2; /*!< bit: 25..26 saved IT state (read 0) */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_IT_Pos 25U /*!< xPSR: IT Position */ +#define xPSR_IT_Msk (3UL << xPSR_IT_Pos) /*!< xPSR: IT Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_GE_Pos 16U /*!< xPSR: GE Position */ +#define xPSR_GE_Msk (0xFUL << xPSR_GE_Pos) /*!< xPSR: GE Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack-pointer select */ + uint32_t FPCA:1; /*!< bit: 2 Floating-point context active */ + uint32_t SFPA:1; /*!< bit: 3 Secure floating-point active */ + uint32_t _reserved1:28; /*!< bit: 4..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SFPA_Pos 3U /*!< CONTROL: SFPA Position */ +#define CONTROL_SFPA_Msk (1UL << CONTROL_SFPA_Pos) /*!< CONTROL: SFPA Mask */ + +#define CONTROL_FPCA_Pos 2U /*!< CONTROL: FPCA Position */ +#define CONTROL_FPCA_Msk (1UL << CONTROL_FPCA_Pos) /*!< CONTROL: FPCA Mask */ + +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[16U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[16U]; + __IOM uint32_t ICER[16U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[16U]; + __IOM uint32_t ISPR[16U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[16U]; + __IOM uint32_t ICPR[16U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[16U]; + __IOM uint32_t IABR[16U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[16U]; + __IOM uint32_t ITNS[16U]; /*!< Offset: 0x280 (R/W) Interrupt Non-Secure State Register */ + uint32_t RESERVED5[16U]; + __IOM uint8_t IPR[496U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED6[580U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/* Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHPR[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t ID_PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t ID_DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ID_AFR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t ID_MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ID_ISAR[6U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + __IM uint32_t CLIDR; /*!< Offset: 0x078 (R/ ) Cache Level ID register */ + __IM uint32_t CTR; /*!< Offset: 0x07C (R/ ) Cache Type register */ + __IM uint32_t CCSIDR; /*!< Offset: 0x080 (R/ ) Cache Size ID Register */ + __IOM uint32_t CSSELR; /*!< Offset: 0x084 (R/W) Cache Size Selection Register */ + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ + __IOM uint32_t NSACR; /*!< Offset: 0x08C (R/W) Non-Secure Access Control Register */ + uint32_t RESERVED7[21U]; + __IOM uint32_t SFSR; /*!< Offset: 0x0E4 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x0E8 (R/W) Secure Fault Address Register */ + uint32_t RESERVED3[69U]; + __OM uint32_t STIR; /*!< Offset: 0x200 ( /W) Software Triggered Interrupt Register */ + uint32_t RESERVED4[15U]; + __IM uint32_t MVFR0; /*!< Offset: 0x240 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x244 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x248 (R/ ) Media and VFP Feature Register 2 */ + uint32_t RESERVED5[1U]; + __OM uint32_t ICIALLU; /*!< Offset: 0x250 ( /W) I-Cache Invalidate All to PoU */ + uint32_t RESERVED6[1U]; + __OM uint32_t ICIMVAU; /*!< Offset: 0x258 ( /W) I-Cache Invalidate by MVA to PoU */ + __OM uint32_t DCIMVAC; /*!< Offset: 0x25C ( /W) D-Cache Invalidate by MVA to PoC */ + __OM uint32_t DCISW; /*!< Offset: 0x260 ( /W) D-Cache Invalidate by Set-way */ + __OM uint32_t DCCMVAU; /*!< Offset: 0x264 ( /W) D-Cache Clean by MVA to PoU */ + __OM uint32_t DCCMVAC; /*!< Offset: 0x268 ( /W) D-Cache Clean by MVA to PoC */ + __OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */ + __OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */ + __OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */ + __OM uint32_t BPIALL; /*!< Offset: 0x278 ( /W) Branch Predictor Invalidate All */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_PENDNMISET_Pos 31U /*!< SCB ICSR: PENDNMISET Position */ +#define SCB_ICSR_PENDNMISET_Msk (1UL << SCB_ICSR_PENDNMISET_Pos) /*!< SCB ICSR: PENDNMISET Mask */ + +#define SCB_ICSR_NMIPENDSET_Pos SCB_ICSR_PENDNMISET_Pos /*!< SCB ICSR: NMIPENDSET Position, backward compatibility */ +#define SCB_ICSR_NMIPENDSET_Msk SCB_ICSR_PENDNMISET_Msk /*!< SCB ICSR: NMIPENDSET Mask, backward compatibility */ + +#define SCB_ICSR_PENDNMICLR_Pos 30U /*!< SCB ICSR: PENDNMICLR Position */ +#define SCB_ICSR_PENDNMICLR_Msk (1UL << SCB_ICSR_PENDNMICLR_Pos) /*!< SCB ICSR: PENDNMICLR Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_STTNS_Pos 24U /*!< SCB ICSR: STTNS Position (Security Extension) */ +#define SCB_ICSR_STTNS_Msk (1UL << SCB_ICSR_STTNS_Pos) /*!< SCB ICSR: STTNS Mask (Security Extension) */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIS_Pos 14U /*!< SCB AIRCR: PRIS Position */ +#define SCB_AIRCR_PRIS_Msk (1UL << SCB_AIRCR_PRIS_Pos) /*!< SCB AIRCR: PRIS Mask */ + +#define SCB_AIRCR_BFHFNMINS_Pos 13U /*!< SCB AIRCR: BFHFNMINS Position */ +#define SCB_AIRCR_BFHFNMINS_Msk (1UL << SCB_AIRCR_BFHFNMINS_Pos) /*!< SCB AIRCR: BFHFNMINS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_SYSRESETREQS_Pos 3U /*!< SCB AIRCR: SYSRESETREQS Position */ +#define SCB_AIRCR_SYSRESETREQS_Msk (1UL << SCB_AIRCR_SYSRESETREQS_Pos) /*!< SCB AIRCR: SYSRESETREQS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEPS_Pos 3U /*!< SCB SCR: SLEEPDEEPS Position */ +#define SCB_SCR_SLEEPDEEPS_Msk (1UL << SCB_SCR_SLEEPDEEPS_Pos) /*!< SCB SCR: SLEEPDEEPS Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_BP_Pos 18U /*!< SCB CCR: BP Position */ +#define SCB_CCR_BP_Msk (1UL << SCB_CCR_BP_Pos) /*!< SCB CCR: BP Mask */ + +#define SCB_CCR_IC_Pos 17U /*!< SCB CCR: IC Position */ +#define SCB_CCR_IC_Msk (1UL << SCB_CCR_IC_Pos) /*!< SCB CCR: IC Mask */ + +#define SCB_CCR_DC_Pos 16U /*!< SCB CCR: DC Position */ +#define SCB_CCR_DC_Msk (1UL << SCB_CCR_DC_Pos) /*!< SCB CCR: DC Mask */ + +#define SCB_CCR_STKOFHFNMIGN_Pos 10U /*!< SCB CCR: STKOFHFNMIGN Position */ +#define SCB_CCR_STKOFHFNMIGN_Msk (1UL << SCB_CCR_STKOFHFNMIGN_Pos) /*!< SCB CCR: STKOFHFNMIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_HARDFAULTPENDED_Pos 21U /*!< SCB SHCSR: HARDFAULTPENDED Position */ +#define SCB_SHCSR_HARDFAULTPENDED_Msk (1UL << SCB_SHCSR_HARDFAULTPENDED_Pos) /*!< SCB SHCSR: HARDFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTPENDED_Pos 20U /*!< SCB SHCSR: SECUREFAULTPENDED Position */ +#define SCB_SHCSR_SECUREFAULTPENDED_Msk (1UL << SCB_SHCSR_SECUREFAULTPENDED_Pos) /*!< SCB SHCSR: SECUREFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTENA_Pos 19U /*!< SCB SHCSR: SECUREFAULTENA Position */ +#define SCB_SHCSR_SECUREFAULTENA_Msk (1UL << SCB_SHCSR_SECUREFAULTENA_Pos) /*!< SCB SHCSR: SECUREFAULTENA Mask */ + +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_NMIACT_Pos 5U /*!< SCB SHCSR: NMIACT Position */ +#define SCB_SHCSR_NMIACT_Msk (1UL << SCB_SHCSR_NMIACT_Pos) /*!< SCB SHCSR: NMIACT Mask */ + +#define SCB_SHCSR_SECUREFAULTACT_Pos 4U /*!< SCB SHCSR: SECUREFAULTACT Position */ +#define SCB_SHCSR_SECUREFAULTACT_Msk (1UL << SCB_SHCSR_SECUREFAULTACT_Pos) /*!< SCB SHCSR: SECUREFAULTACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_HARDFAULTACT_Pos 2U /*!< SCB SHCSR: HARDFAULTACT Position */ +#define SCB_SHCSR_HARDFAULTACT_Msk (1UL << SCB_SHCSR_HARDFAULTACT_Pos) /*!< SCB SHCSR: HARDFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/* SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/* BusFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_LSPERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 5U) /*!< SCB CFSR (BFSR): LSPERR Position */ +#define SCB_CFSR_LSPERR_Msk (1UL << SCB_CFSR_LSPERR_Pos) /*!< SCB CFSR (BFSR): LSPERR Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/* UsageFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_STKOF_Pos (SCB_CFSR_USGFAULTSR_Pos + 4U) /*!< SCB CFSR (UFSR): STKOF Position */ +#define SCB_CFSR_STKOF_Msk (1UL << SCB_CFSR_STKOF_Pos) /*!< SCB CFSR (UFSR): STKOF Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/* SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/* SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/* SCB Non-Secure Access Control Register Definitions */ +#define SCB_NSACR_CP11_Pos 11U /*!< SCB NSACR: CP11 Position */ +#define SCB_NSACR_CP11_Msk (1UL << SCB_NSACR_CP11_Pos) /*!< SCB NSACR: CP11 Mask */ + +#define SCB_NSACR_CP10_Pos 10U /*!< SCB NSACR: CP10 Position */ +#define SCB_NSACR_CP10_Msk (1UL << SCB_NSACR_CP10_Pos) /*!< SCB NSACR: CP10 Mask */ + +#define SCB_NSACR_CPn_Pos 0U /*!< SCB NSACR: CPn Position */ +#define SCB_NSACR_CPn_Msk (1UL /*<< SCB_NSACR_CPn_Pos*/) /*!< SCB NSACR: CPn Mask */ + +/* SCB Cache Level ID Register Definitions */ +#define SCB_CLIDR_LOUU_Pos 27U /*!< SCB CLIDR: LoUU Position */ +#define SCB_CLIDR_LOUU_Msk (7UL << SCB_CLIDR_LOUU_Pos) /*!< SCB CLIDR: LoUU Mask */ + +#define SCB_CLIDR_LOC_Pos 24U /*!< SCB CLIDR: LoC Position */ +#define SCB_CLIDR_LOC_Msk (7UL << SCB_CLIDR_LOC_Pos) /*!< SCB CLIDR: LoC Mask */ + +/* SCB Cache Type Register Definitions */ +#define SCB_CTR_FORMAT_Pos 29U /*!< SCB CTR: Format Position */ +#define SCB_CTR_FORMAT_Msk (7UL << SCB_CTR_FORMAT_Pos) /*!< SCB CTR: Format Mask */ + +#define SCB_CTR_CWG_Pos 24U /*!< SCB CTR: CWG Position */ +#define SCB_CTR_CWG_Msk (0xFUL << SCB_CTR_CWG_Pos) /*!< SCB CTR: CWG Mask */ + +#define SCB_CTR_ERG_Pos 20U /*!< SCB CTR: ERG Position */ +#define SCB_CTR_ERG_Msk (0xFUL << SCB_CTR_ERG_Pos) /*!< SCB CTR: ERG Mask */ + +#define SCB_CTR_DMINLINE_Pos 16U /*!< SCB CTR: DminLine Position */ +#define SCB_CTR_DMINLINE_Msk (0xFUL << SCB_CTR_DMINLINE_Pos) /*!< SCB CTR: DminLine Mask */ + +#define SCB_CTR_IMINLINE_Pos 0U /*!< SCB CTR: ImInLine Position */ +#define SCB_CTR_IMINLINE_Msk (0xFUL /*<< SCB_CTR_IMINLINE_Pos*/) /*!< SCB CTR: ImInLine Mask */ + +/* SCB Cache Size ID Register Definitions */ +#define SCB_CCSIDR_WT_Pos 31U /*!< SCB CCSIDR: WT Position */ +#define SCB_CCSIDR_WT_Msk (1UL << SCB_CCSIDR_WT_Pos) /*!< SCB CCSIDR: WT Mask */ + +#define SCB_CCSIDR_WB_Pos 30U /*!< SCB CCSIDR: WB Position */ +#define SCB_CCSIDR_WB_Msk (1UL << SCB_CCSIDR_WB_Pos) /*!< SCB CCSIDR: WB Mask */ + +#define SCB_CCSIDR_RA_Pos 29U /*!< SCB CCSIDR: RA Position */ +#define SCB_CCSIDR_RA_Msk (1UL << SCB_CCSIDR_RA_Pos) /*!< SCB CCSIDR: RA Mask */ + +#define SCB_CCSIDR_WA_Pos 28U /*!< SCB CCSIDR: WA Position */ +#define SCB_CCSIDR_WA_Msk (1UL << SCB_CCSIDR_WA_Pos) /*!< SCB CCSIDR: WA Mask */ + +#define SCB_CCSIDR_NUMSETS_Pos 13U /*!< SCB CCSIDR: NumSets Position */ +#define SCB_CCSIDR_NUMSETS_Msk (0x7FFFUL << SCB_CCSIDR_NUMSETS_Pos) /*!< SCB CCSIDR: NumSets Mask */ + +#define SCB_CCSIDR_ASSOCIATIVITY_Pos 3U /*!< SCB CCSIDR: Associativity Position */ +#define SCB_CCSIDR_ASSOCIATIVITY_Msk (0x3FFUL << SCB_CCSIDR_ASSOCIATIVITY_Pos) /*!< SCB CCSIDR: Associativity Mask */ + +#define SCB_CCSIDR_LINESIZE_Pos 0U /*!< SCB CCSIDR: LineSize Position */ +#define SCB_CCSIDR_LINESIZE_Msk (7UL /*<< SCB_CCSIDR_LINESIZE_Pos*/) /*!< SCB CCSIDR: LineSize Mask */ + +/* SCB Cache Size Selection Register Definitions */ +#define SCB_CSSELR_LEVEL_Pos 1U /*!< SCB CSSELR: Level Position */ +#define SCB_CSSELR_LEVEL_Msk (7UL << SCB_CSSELR_LEVEL_Pos) /*!< SCB CSSELR: Level Mask */ + +#define SCB_CSSELR_IND_Pos 0U /*!< SCB CSSELR: InD Position */ +#define SCB_CSSELR_IND_Msk (1UL /*<< SCB_CSSELR_IND_Pos*/) /*!< SCB CSSELR: InD Mask */ + +/* SCB Software Triggered Interrupt Register Definitions */ +#define SCB_STIR_INTID_Pos 0U /*!< SCB STIR: INTID Position */ +#define SCB_STIR_INTID_Msk (0x1FFUL /*<< SCB_STIR_INTID_Pos*/) /*!< SCB STIR: INTID Mask */ + +/* SCB D-Cache Invalidate by Set-way Register Definitions */ +#define SCB_DCISW_WAY_Pos 30U /*!< SCB DCISW: Way Position */ +#define SCB_DCISW_WAY_Msk (3UL << SCB_DCISW_WAY_Pos) /*!< SCB DCISW: Way Mask */ + +#define SCB_DCISW_SET_Pos 5U /*!< SCB DCISW: Set Position */ +#define SCB_DCISW_SET_Msk (0x1FFUL << SCB_DCISW_SET_Pos) /*!< SCB DCISW: Set Mask */ + +/* SCB D-Cache Clean by Set-way Register Definitions */ +#define SCB_DCCSW_WAY_Pos 30U /*!< SCB DCCSW: Way Position */ +#define SCB_DCCSW_WAY_Msk (3UL << SCB_DCCSW_WAY_Pos) /*!< SCB DCCSW: Way Mask */ + +#define SCB_DCCSW_SET_Pos 5U /*!< SCB DCCSW: Set Position */ +#define SCB_DCCSW_SET_Msk (0x1FFUL << SCB_DCCSW_SET_Pos) /*!< SCB DCCSW: Set Mask */ + +/* SCB D-Cache Clean and Invalidate by Set-way Register Definitions */ +#define SCB_DCCISW_WAY_Pos 30U /*!< SCB DCCISW: Way Position */ +#define SCB_DCCISW_WAY_Msk (3UL << SCB_DCCISW_WAY_Pos) /*!< SCB DCCISW: Way Mask */ + +#define SCB_DCCISW_SET_Pos 5U /*!< SCB DCCISW: Set Position */ +#define SCB_DCCISW_SET_Msk (0x1FFUL << SCB_DCCISW_SET_Pos) /*!< SCB DCCISW: Set Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ + __IOM uint32_t CPPWR; /*!< Offset: 0x00C (R/W) Coprocessor Power Control Register */ +} SCnSCB_Type; + +/* Interrupt Controller Type Register Definitions */ +#define SCnSCB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) ITM Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) ITM Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ + uint32_t RESERVED3[32U]; + uint32_t RESERVED4[43U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ + uint32_t RESERVED5[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) ITM Device Architecture Register */ + uint32_t RESERVED6[4U]; + __IM uint32_t PID4; /*!< Offset: 0xFD0 (R/ ) ITM Peripheral Identification Register #4 */ + __IM uint32_t PID5; /*!< Offset: 0xFD4 (R/ ) ITM Peripheral Identification Register #5 */ + __IM uint32_t PID6; /*!< Offset: 0xFD8 (R/ ) ITM Peripheral Identification Register #6 */ + __IM uint32_t PID7; /*!< Offset: 0xFDC (R/ ) ITM Peripheral Identification Register #7 */ + __IM uint32_t PID0; /*!< Offset: 0xFE0 (R/ ) ITM Peripheral Identification Register #0 */ + __IM uint32_t PID1; /*!< Offset: 0xFE4 (R/ ) ITM Peripheral Identification Register #1 */ + __IM uint32_t PID2; /*!< Offset: 0xFE8 (R/ ) ITM Peripheral Identification Register #2 */ + __IM uint32_t PID3; /*!< Offset: 0xFEC (R/ ) ITM Peripheral Identification Register #3 */ + __IM uint32_t CID0; /*!< Offset: 0xFF0 (R/ ) ITM Component Identification Register #0 */ + __IM uint32_t CID1; /*!< Offset: 0xFF4 (R/ ) ITM Component Identification Register #1 */ + __IM uint32_t CID2; /*!< Offset: 0xFF8 (R/ ) ITM Component Identification Register #2 */ + __IM uint32_t CID3; /*!< Offset: 0xFFC (R/ ) ITM Component Identification Register #3 */ +} ITM_Type; + +/* ITM Stimulus Port Register Definitions */ +#define ITM_STIM_DISABLED_Pos 1U /*!< ITM STIM: DISABLED Position */ +#define ITM_STIM_DISABLED_Msk (0x1UL << ITM_STIM_DISABLED_Pos) /*!< ITM STIM: DISABLED Mask */ + +#define ITM_STIM_FIFOREADY_Pos 0U /*!< ITM STIM: FIFOREADY Position */ +#define ITM_STIM_FIFOREADY_Msk (0x1UL /*<< ITM_STIM_FIFOREADY_Pos*/) /*!< ITM STIM: FIFOREADY Mask */ + +/* ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/* ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TRACEBUSID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TRACEBUSID_Msk (0x7FUL << ITM_TCR_TRACEBUSID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPRESCALE_Pos 8U /*!< ITM TCR: TSPRESCALE Position */ +#define ITM_TCR_TSPRESCALE_Msk (3UL << ITM_TCR_TSPRESCALE_Pos) /*!< ITM TCR: TSPRESCALE Mask */ + +#define ITM_TCR_STALLENA_Pos 5U /*!< ITM TCR: STALLENA Position */ +#define ITM_TCR_STALLENA_Msk (1UL << ITM_TCR_STALLENA_Pos) /*!< ITM TCR: STALLENA Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/* ITM Lock Status Register Definitions */ +#define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ +#define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ + +#define ITM_LSR_Access_Pos 1U /*!< ITM LSR: Access Position */ +#define ITM_LSR_Access_Msk (1UL << ITM_LSR_Access_Pos) /*!< ITM LSR: Access Mask */ + +#define ITM_LSR_Present_Pos 0U /*!< ITM LSR: Present Position */ +#define ITM_LSR_Present_Msk (1UL /*<< ITM_LSR_Present_Pos*/) /*!< ITM LSR: Present Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + uint32_t RESERVED3[1U]; + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED4[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + uint32_t RESERVED5[1U]; + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED6[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + uint32_t RESERVED7[1U]; + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ + uint32_t RESERVED8[1U]; + __IOM uint32_t COMP4; /*!< Offset: 0x060 (R/W) Comparator Register 4 */ + uint32_t RESERVED9[1U]; + __IOM uint32_t FUNCTION4; /*!< Offset: 0x068 (R/W) Function Register 4 */ + uint32_t RESERVED10[1U]; + __IOM uint32_t COMP5; /*!< Offset: 0x070 (R/W) Comparator Register 5 */ + uint32_t RESERVED11[1U]; + __IOM uint32_t FUNCTION5; /*!< Offset: 0x078 (R/W) Function Register 5 */ + uint32_t RESERVED12[1U]; + __IOM uint32_t COMP6; /*!< Offset: 0x080 (R/W) Comparator Register 6 */ + uint32_t RESERVED13[1U]; + __IOM uint32_t FUNCTION6; /*!< Offset: 0x088 (R/W) Function Register 6 */ + uint32_t RESERVED14[1U]; + __IOM uint32_t COMP7; /*!< Offset: 0x090 (R/W) Comparator Register 7 */ + uint32_t RESERVED15[1U]; + __IOM uint32_t FUNCTION7; /*!< Offset: 0x098 (R/W) Function Register 7 */ + uint32_t RESERVED16[1U]; + __IOM uint32_t COMP8; /*!< Offset: 0x0A0 (R/W) Comparator Register 8 */ + uint32_t RESERVED17[1U]; + __IOM uint32_t FUNCTION8; /*!< Offset: 0x0A8 (R/W) Function Register 8 */ + uint32_t RESERVED18[1U]; + __IOM uint32_t COMP9; /*!< Offset: 0x0B0 (R/W) Comparator Register 9 */ + uint32_t RESERVED19[1U]; + __IOM uint32_t FUNCTION9; /*!< Offset: 0x0B8 (R/W) Function Register 9 */ + uint32_t RESERVED20[1U]; + __IOM uint32_t COMP10; /*!< Offset: 0x0C0 (R/W) Comparator Register 10 */ + uint32_t RESERVED21[1U]; + __IOM uint32_t FUNCTION10; /*!< Offset: 0x0C8 (R/W) Function Register 10 */ + uint32_t RESERVED22[1U]; + __IOM uint32_t COMP11; /*!< Offset: 0x0D0 (R/W) Comparator Register 11 */ + uint32_t RESERVED23[1U]; + __IOM uint32_t FUNCTION11; /*!< Offset: 0x0D8 (R/W) Function Register 11 */ + uint32_t RESERVED24[1U]; + __IOM uint32_t COMP12; /*!< Offset: 0x0E0 (R/W) Comparator Register 12 */ + uint32_t RESERVED25[1U]; + __IOM uint32_t FUNCTION12; /*!< Offset: 0x0E8 (R/W) Function Register 12 */ + uint32_t RESERVED26[1U]; + __IOM uint32_t COMP13; /*!< Offset: 0x0F0 (R/W) Comparator Register 13 */ + uint32_t RESERVED27[1U]; + __IOM uint32_t FUNCTION13; /*!< Offset: 0x0F8 (R/W) Function Register 13 */ + uint32_t RESERVED28[1U]; + __IOM uint32_t COMP14; /*!< Offset: 0x100 (R/W) Comparator Register 14 */ + uint32_t RESERVED29[1U]; + __IOM uint32_t FUNCTION14; /*!< Offset: 0x108 (R/W) Function Register 14 */ + uint32_t RESERVED30[1U]; + __IOM uint32_t COMP15; /*!< Offset: 0x110 (R/W) Comparator Register 15 */ + uint32_t RESERVED31[1U]; + __IOM uint32_t FUNCTION15; /*!< Offset: 0x118 (R/W) Function Register 15 */ + uint32_t RESERVED32[934U]; + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R ) Lock Status Register */ + uint32_t RESERVED33[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) Device Architecture Register */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCDISS_Pos 23U /*!< DWT CTRL: CYCDISS Position */ +#define DWT_CTRL_CYCDISS_Msk (0x1UL << DWT_CTRL_CYCDISS_Pos) /*!< DWT CTRL: CYCDISS Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (0x1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (0x1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (0x1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (0x1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (0x1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (0x1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (0x1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (0x1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (0x1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (0x1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/* DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/* DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/* DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/* DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/* DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_ID_Pos 27U /*!< DWT FUNCTION: ID Position */ +#define DWT_FUNCTION_ID_Msk (0x1FUL << DWT_FUNCTION_ID_Pos) /*!< DWT FUNCTION: ID Mask */ + +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_ACTION_Pos 4U /*!< DWT FUNCTION: ACTION Position */ +#define DWT_FUNCTION_ACTION_Msk (0x1UL << DWT_FUNCTION_ACTION_Pos) /*!< DWT FUNCTION: ACTION Mask */ + +#define DWT_FUNCTION_MATCH_Pos 0U /*!< DWT FUNCTION: MATCH Position */ +#define DWT_FUNCTION_MATCH_Msk (0xFUL /*<< DWT_FUNCTION_MATCH_Pos*/) /*!< DWT FUNCTION: MATCH Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Sizes Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Sizes Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IOM uint32_t PSCR; /*!< Offset: 0x308 (R/W) Periodic Synchronization Control Register */ + uint32_t RESERVED3[809U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) Software Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) Software Lock Status Register */ + uint32_t RESERVED4[4U]; + __IM uint32_t TYPE; /*!< Offset: 0xFC8 (R/ ) Device Identifier Register */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Register */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_SWOSCALER_Pos 0U /*!< TPI ACPR: SWOSCALER Position */ +#define TPI_ACPR_SWOSCALER_Msk (0xFFFFUL /*<< TPI_ACPR_SWOSCALER_Pos*/) /*!< TPI ACPR: SWOSCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_FOnMan_Pos 6U /*!< TPI FFCR: FOnMan Position */ +#define TPI_FFCR_FOnMan_Msk (0x1UL << TPI_FFCR_FOnMan_Pos) /*!< TPI FFCR: FOnMan Mask */ + +#define TPI_FFCR_EnFCont_Pos 1U /*!< TPI FFCR: EnFCont Position */ +#define TPI_FFCR_EnFCont_Msk (0x1UL << TPI_FFCR_EnFCont_Pos) /*!< TPI FFCR: EnFCont Mask */ + +/* TPI Periodic Synchronization Control Register Definitions */ +#define TPI_PSCR_PSCount_Pos 0U /*!< TPI PSCR: PSCount Position */ +#define TPI_PSCR_PSCount_Msk (0x1FUL /*<< TPI_PSCR_PSCount_Pos*/) /*!< TPI PSCR: TPSCount Mask */ + +/* TPI Software Lock Status Register Definitions */ +#define TPI_LSR_nTT_Pos 1U /*!< TPI LSR: Not thirty-two bit. Position */ +#define TPI_LSR_nTT_Msk (0x1UL << TPI_LSR_nTT_Pos) /*!< TPI LSR: Not thirty-two bit. Mask */ + +#define TPI_LSR_SLK_Pos 1U /*!< TPI LSR: Software Lock status Position */ +#define TPI_LSR_SLK_Msk (0x1UL << TPI_LSR_SLK_Pos) /*!< TPI LSR: Software Lock status Mask */ + +#define TPI_LSR_SLI_Pos 0U /*!< TPI LSR: Software Lock implemented Position */ +#define TPI_LSR_SLI_Msk (0x1UL /*<< TPI_LSR_SLI_Pos*/) /*!< TPI LSR: Software Lock implemented Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_FIFOSZ_Pos 6U /*!< TPI DEVID: FIFO depth Position */ +#define TPI_DEVID_FIFOSZ_Msk (0x7UL << TPI_DEVID_FIFOSZ_Pos) /*!< TPI DEVID: FIFO depth Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) MPU Region Limit Address Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Region Base Address Register Alias 1 */ + __IOM uint32_t RLAR_A1; /*!< Offset: 0x018 (R/W) MPU Region Limit Address Register Alias 1 */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Region Base Address Register Alias 2 */ + __IOM uint32_t RLAR_A2; /*!< Offset: 0x020 (R/W) MPU Region Limit Address Register Alias 2 */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Region Base Address Register Alias 3 */ + __IOM uint32_t RLAR_A3; /*!< Offset: 0x028 (R/W) MPU Region Limit Address Register Alias 3 */ + uint32_t RESERVED0[1]; + union { + __IOM uint32_t MAIR[2]; + struct { + __IOM uint32_t MAIR0; /*!< Offset: 0x030 (R/W) MPU Memory Attribute Indirection Register 0 */ + __IOM uint32_t MAIR1; /*!< Offset: 0x034 (R/W) MPU Memory Attribute Indirection Register 1 */ + }; + }; +} MPU_Type; + +#define MPU_TYPE_RALIASES 4U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_BASE_Pos 5U /*!< MPU RBAR: BASE Position */ +#define MPU_RBAR_BASE_Msk (0x7FFFFFFUL << MPU_RBAR_BASE_Pos) /*!< MPU RBAR: BASE Mask */ + +#define MPU_RBAR_SH_Pos 3U /*!< MPU RBAR: SH Position */ +#define MPU_RBAR_SH_Msk (0x3UL << MPU_RBAR_SH_Pos) /*!< MPU RBAR: SH Mask */ + +#define MPU_RBAR_AP_Pos 1U /*!< MPU RBAR: AP Position */ +#define MPU_RBAR_AP_Msk (0x3UL << MPU_RBAR_AP_Pos) /*!< MPU RBAR: AP Mask */ + +#define MPU_RBAR_XN_Pos 0U /*!< MPU RBAR: XN Position */ +#define MPU_RBAR_XN_Msk (01UL /*<< MPU_RBAR_XN_Pos*/) /*!< MPU RBAR: XN Mask */ + +/* MPU Region Limit Address Register Definitions */ +#define MPU_RLAR_LIMIT_Pos 5U /*!< MPU RLAR: LIMIT Position */ +#define MPU_RLAR_LIMIT_Msk (0x7FFFFFFUL << MPU_RLAR_LIMIT_Pos) /*!< MPU RLAR: LIMIT Mask */ + +#define MPU_RLAR_AttrIndx_Pos 1U /*!< MPU RLAR: AttrIndx Position */ +#define MPU_RLAR_AttrIndx_Msk (0x7UL << MPU_RLAR_AttrIndx_Pos) /*!< MPU RLAR: AttrIndx Mask */ + +#define MPU_RLAR_EN_Pos 0U /*!< MPU RLAR: Region enable bit Position */ +#define MPU_RLAR_EN_Msk (1UL /*<< MPU_RLAR_EN_Pos*/) /*!< MPU RLAR: Region enable bit Disable Mask */ + +/* MPU Memory Attribute Indirection Register 0 Definitions */ +#define MPU_MAIR0_Attr3_Pos 24U /*!< MPU MAIR0: Attr3 Position */ +#define MPU_MAIR0_Attr3_Msk (0xFFUL << MPU_MAIR0_Attr3_Pos) /*!< MPU MAIR0: Attr3 Mask */ + +#define MPU_MAIR0_Attr2_Pos 16U /*!< MPU MAIR0: Attr2 Position */ +#define MPU_MAIR0_Attr2_Msk (0xFFUL << MPU_MAIR0_Attr2_Pos) /*!< MPU MAIR0: Attr2 Mask */ + +#define MPU_MAIR0_Attr1_Pos 8U /*!< MPU MAIR0: Attr1 Position */ +#define MPU_MAIR0_Attr1_Msk (0xFFUL << MPU_MAIR0_Attr1_Pos) /*!< MPU MAIR0: Attr1 Mask */ + +#define MPU_MAIR0_Attr0_Pos 0U /*!< MPU MAIR0: Attr0 Position */ +#define MPU_MAIR0_Attr0_Msk (0xFFUL /*<< MPU_MAIR0_Attr0_Pos*/) /*!< MPU MAIR0: Attr0 Mask */ + +/* MPU Memory Attribute Indirection Register 1 Definitions */ +#define MPU_MAIR1_Attr7_Pos 24U /*!< MPU MAIR1: Attr7 Position */ +#define MPU_MAIR1_Attr7_Msk (0xFFUL << MPU_MAIR1_Attr7_Pos) /*!< MPU MAIR1: Attr7 Mask */ + +#define MPU_MAIR1_Attr6_Pos 16U /*!< MPU MAIR1: Attr6 Position */ +#define MPU_MAIR1_Attr6_Msk (0xFFUL << MPU_MAIR1_Attr6_Pos) /*!< MPU MAIR1: Attr6 Mask */ + +#define MPU_MAIR1_Attr5_Pos 8U /*!< MPU MAIR1: Attr5 Position */ +#define MPU_MAIR1_Attr5_Msk (0xFFUL << MPU_MAIR1_Attr5_Pos) /*!< MPU MAIR1: Attr5 Mask */ + +#define MPU_MAIR1_Attr4_Pos 0U /*!< MPU MAIR1: Attr4 Position */ +#define MPU_MAIR1_Attr4_Msk (0xFFUL /*<< MPU_MAIR1_Attr4_Pos*/) /*!< MPU MAIR1: Attr4 Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SAU Security Attribution Unit (SAU) + \brief Type definitions for the Security Attribution Unit (SAU) + @{ + */ + +/** + \brief Structure type to access the Security Attribution Unit (SAU). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SAU Control Register */ + __IM uint32_t TYPE; /*!< Offset: 0x004 (R/ ) SAU Type Register */ +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) SAU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) SAU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) SAU Region Limit Address Register */ +#else + uint32_t RESERVED0[3]; +#endif + __IOM uint32_t SFSR; /*!< Offset: 0x014 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x018 (R/W) Secure Fault Address Register */ +} SAU_Type; + +/* SAU Control Register Definitions */ +#define SAU_CTRL_ALLNS_Pos 1U /*!< SAU CTRL: ALLNS Position */ +#define SAU_CTRL_ALLNS_Msk (1UL << SAU_CTRL_ALLNS_Pos) /*!< SAU CTRL: ALLNS Mask */ + +#define SAU_CTRL_ENABLE_Pos 0U /*!< SAU CTRL: ENABLE Position */ +#define SAU_CTRL_ENABLE_Msk (1UL /*<< SAU_CTRL_ENABLE_Pos*/) /*!< SAU CTRL: ENABLE Mask */ + +/* SAU Type Register Definitions */ +#define SAU_TYPE_SREGION_Pos 0U /*!< SAU TYPE: SREGION Position */ +#define SAU_TYPE_SREGION_Msk (0xFFUL /*<< SAU_TYPE_SREGION_Pos*/) /*!< SAU TYPE: SREGION Mask */ + +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) +/* SAU Region Number Register Definitions */ +#define SAU_RNR_REGION_Pos 0U /*!< SAU RNR: REGION Position */ +#define SAU_RNR_REGION_Msk (0xFFUL /*<< SAU_RNR_REGION_Pos*/) /*!< SAU RNR: REGION Mask */ + +/* SAU Region Base Address Register Definitions */ +#define SAU_RBAR_BADDR_Pos 5U /*!< SAU RBAR: BADDR Position */ +#define SAU_RBAR_BADDR_Msk (0x7FFFFFFUL << SAU_RBAR_BADDR_Pos) /*!< SAU RBAR: BADDR Mask */ + +/* SAU Region Limit Address Register Definitions */ +#define SAU_RLAR_LADDR_Pos 5U /*!< SAU RLAR: LADDR Position */ +#define SAU_RLAR_LADDR_Msk (0x7FFFFFFUL << SAU_RLAR_LADDR_Pos) /*!< SAU RLAR: LADDR Mask */ + +#define SAU_RLAR_NSC_Pos 1U /*!< SAU RLAR: NSC Position */ +#define SAU_RLAR_NSC_Msk (1UL << SAU_RLAR_NSC_Pos) /*!< SAU RLAR: NSC Mask */ + +#define SAU_RLAR_ENABLE_Pos 0U /*!< SAU RLAR: ENABLE Position */ +#define SAU_RLAR_ENABLE_Msk (1UL /*<< SAU_RLAR_ENABLE_Pos*/) /*!< SAU RLAR: ENABLE Mask */ + +#endif /* defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) */ + +/* Secure Fault Status Register Definitions */ +#define SAU_SFSR_LSERR_Pos 7U /*!< SAU SFSR: LSERR Position */ +#define SAU_SFSR_LSERR_Msk (1UL << SAU_SFSR_LSERR_Pos) /*!< SAU SFSR: LSERR Mask */ + +#define SAU_SFSR_SFARVALID_Pos 6U /*!< SAU SFSR: SFARVALID Position */ +#define SAU_SFSR_SFARVALID_Msk (1UL << SAU_SFSR_SFARVALID_Pos) /*!< SAU SFSR: SFARVALID Mask */ + +#define SAU_SFSR_LSPERR_Pos 5U /*!< SAU SFSR: LSPERR Position */ +#define SAU_SFSR_LSPERR_Msk (1UL << SAU_SFSR_LSPERR_Pos) /*!< SAU SFSR: LSPERR Mask */ + +#define SAU_SFSR_INVTRAN_Pos 4U /*!< SAU SFSR: INVTRAN Position */ +#define SAU_SFSR_INVTRAN_Msk (1UL << SAU_SFSR_INVTRAN_Pos) /*!< SAU SFSR: INVTRAN Mask */ + +#define SAU_SFSR_AUVIOL_Pos 3U /*!< SAU SFSR: AUVIOL Position */ +#define SAU_SFSR_AUVIOL_Msk (1UL << SAU_SFSR_AUVIOL_Pos) /*!< SAU SFSR: AUVIOL Mask */ + +#define SAU_SFSR_INVER_Pos 2U /*!< SAU SFSR: INVER Position */ +#define SAU_SFSR_INVER_Msk (1UL << SAU_SFSR_INVER_Pos) /*!< SAU SFSR: INVER Mask */ + +#define SAU_SFSR_INVIS_Pos 1U /*!< SAU SFSR: INVIS Position */ +#define SAU_SFSR_INVIS_Msk (1UL << SAU_SFSR_INVIS_Pos) /*!< SAU SFSR: INVIS Mask */ + +#define SAU_SFSR_INVEP_Pos 0U /*!< SAU SFSR: INVEP Position */ +#define SAU_SFSR_INVEP_Msk (1UL /*<< SAU_SFSR_INVEP_Pos*/) /*!< SAU SFSR: INVEP Mask */ + +/*@} end of group CMSIS_SAU */ +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_FPU Floating Point Unit (FPU) + \brief Type definitions for the Floating Point Unit (FPU) + @{ + */ + +/** + \brief Structure type to access the Floating Point Unit (FPU). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IOM uint32_t FPCCR; /*!< Offset: 0x004 (R/W) Floating-Point Context Control Register */ + __IOM uint32_t FPCAR; /*!< Offset: 0x008 (R/W) Floating-Point Context Address Register */ + __IOM uint32_t FPDSCR; /*!< Offset: 0x00C (R/W) Floating-Point Default Status Control Register */ + __IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x018 (R/ ) Media and VFP Feature Register 2 */ +} FPU_Type; + +/* Floating-Point Context Control Register Definitions */ +#define FPU_FPCCR_ASPEN_Pos 31U /*!< FPCCR: ASPEN bit Position */ +#define FPU_FPCCR_ASPEN_Msk (1UL << FPU_FPCCR_ASPEN_Pos) /*!< FPCCR: ASPEN bit Mask */ + +#define FPU_FPCCR_LSPEN_Pos 30U /*!< FPCCR: LSPEN Position */ +#define FPU_FPCCR_LSPEN_Msk (1UL << FPU_FPCCR_LSPEN_Pos) /*!< FPCCR: LSPEN bit Mask */ + +#define FPU_FPCCR_LSPENS_Pos 29U /*!< FPCCR: LSPENS Position */ +#define FPU_FPCCR_LSPENS_Msk (1UL << FPU_FPCCR_LSPENS_Pos) /*!< FPCCR: LSPENS bit Mask */ + +#define FPU_FPCCR_CLRONRET_Pos 28U /*!< FPCCR: CLRONRET Position */ +#define FPU_FPCCR_CLRONRET_Msk (1UL << FPU_FPCCR_CLRONRET_Pos) /*!< FPCCR: CLRONRET bit Mask */ + +#define FPU_FPCCR_CLRONRETS_Pos 27U /*!< FPCCR: CLRONRETS Position */ +#define FPU_FPCCR_CLRONRETS_Msk (1UL << FPU_FPCCR_CLRONRETS_Pos) /*!< FPCCR: CLRONRETS bit Mask */ + +#define FPU_FPCCR_TS_Pos 26U /*!< FPCCR: TS Position */ +#define FPU_FPCCR_TS_Msk (1UL << FPU_FPCCR_TS_Pos) /*!< FPCCR: TS bit Mask */ + +#define FPU_FPCCR_UFRDY_Pos 10U /*!< FPCCR: UFRDY Position */ +#define FPU_FPCCR_UFRDY_Msk (1UL << FPU_FPCCR_UFRDY_Pos) /*!< FPCCR: UFRDY bit Mask */ + +#define FPU_FPCCR_SPLIMVIOL_Pos 9U /*!< FPCCR: SPLIMVIOL Position */ +#define FPU_FPCCR_SPLIMVIOL_Msk (1UL << FPU_FPCCR_SPLIMVIOL_Pos) /*!< FPCCR: SPLIMVIOL bit Mask */ + +#define FPU_FPCCR_MONRDY_Pos 8U /*!< FPCCR: MONRDY Position */ +#define FPU_FPCCR_MONRDY_Msk (1UL << FPU_FPCCR_MONRDY_Pos) /*!< FPCCR: MONRDY bit Mask */ + +#define FPU_FPCCR_SFRDY_Pos 7U /*!< FPCCR: SFRDY Position */ +#define FPU_FPCCR_SFRDY_Msk (1UL << FPU_FPCCR_SFRDY_Pos) /*!< FPCCR: SFRDY bit Mask */ + +#define FPU_FPCCR_BFRDY_Pos 6U /*!< FPCCR: BFRDY Position */ +#define FPU_FPCCR_BFRDY_Msk (1UL << FPU_FPCCR_BFRDY_Pos) /*!< FPCCR: BFRDY bit Mask */ + +#define FPU_FPCCR_MMRDY_Pos 5U /*!< FPCCR: MMRDY Position */ +#define FPU_FPCCR_MMRDY_Msk (1UL << FPU_FPCCR_MMRDY_Pos) /*!< FPCCR: MMRDY bit Mask */ + +#define FPU_FPCCR_HFRDY_Pos 4U /*!< FPCCR: HFRDY Position */ +#define FPU_FPCCR_HFRDY_Msk (1UL << FPU_FPCCR_HFRDY_Pos) /*!< FPCCR: HFRDY bit Mask */ + +#define FPU_FPCCR_THREAD_Pos 3U /*!< FPCCR: processor mode bit Position */ +#define FPU_FPCCR_THREAD_Msk (1UL << FPU_FPCCR_THREAD_Pos) /*!< FPCCR: processor mode active bit Mask */ + +#define FPU_FPCCR_S_Pos 2U /*!< FPCCR: Security status of the FP context bit Position */ +#define FPU_FPCCR_S_Msk (1UL << FPU_FPCCR_S_Pos) /*!< FPCCR: Security status of the FP context bit Mask */ + +#define FPU_FPCCR_USER_Pos 1U /*!< FPCCR: privilege level bit Position */ +#define FPU_FPCCR_USER_Msk (1UL << FPU_FPCCR_USER_Pos) /*!< FPCCR: privilege level bit Mask */ + +#define FPU_FPCCR_LSPACT_Pos 0U /*!< FPCCR: Lazy state preservation active bit Position */ +#define FPU_FPCCR_LSPACT_Msk (1UL /*<< FPU_FPCCR_LSPACT_Pos*/) /*!< FPCCR: Lazy state preservation active bit Mask */ + +/* Floating-Point Context Address Register Definitions */ +#define FPU_FPCAR_ADDRESS_Pos 3U /*!< FPCAR: ADDRESS bit Position */ +#define FPU_FPCAR_ADDRESS_Msk (0x1FFFFFFFUL << FPU_FPCAR_ADDRESS_Pos) /*!< FPCAR: ADDRESS bit Mask */ + +/* Floating-Point Default Status Control Register Definitions */ +#define FPU_FPDSCR_AHP_Pos 26U /*!< FPDSCR: AHP bit Position */ +#define FPU_FPDSCR_AHP_Msk (1UL << FPU_FPDSCR_AHP_Pos) /*!< FPDSCR: AHP bit Mask */ + +#define FPU_FPDSCR_DN_Pos 25U /*!< FPDSCR: DN bit Position */ +#define FPU_FPDSCR_DN_Msk (1UL << FPU_FPDSCR_DN_Pos) /*!< FPDSCR: DN bit Mask */ + +#define FPU_FPDSCR_FZ_Pos 24U /*!< FPDSCR: FZ bit Position */ +#define FPU_FPDSCR_FZ_Msk (1UL << FPU_FPDSCR_FZ_Pos) /*!< FPDSCR: FZ bit Mask */ + +#define FPU_FPDSCR_RMode_Pos 22U /*!< FPDSCR: RMode bit Position */ +#define FPU_FPDSCR_RMode_Msk (3UL << FPU_FPDSCR_RMode_Pos) /*!< FPDSCR: RMode bit Mask */ + +/* Media and VFP Feature Register 0 Definitions */ +#define FPU_MVFR0_FP_rounding_modes_Pos 28U /*!< MVFR0: FP rounding modes bits Position */ +#define FPU_MVFR0_FP_rounding_modes_Msk (0xFUL << FPU_MVFR0_FP_rounding_modes_Pos) /*!< MVFR0: FP rounding modes bits Mask */ + +#define FPU_MVFR0_Short_vectors_Pos 24U /*!< MVFR0: Short vectors bits Position */ +#define FPU_MVFR0_Short_vectors_Msk (0xFUL << FPU_MVFR0_Short_vectors_Pos) /*!< MVFR0: Short vectors bits Mask */ + +#define FPU_MVFR0_Square_root_Pos 20U /*!< MVFR0: Square root bits Position */ +#define FPU_MVFR0_Square_root_Msk (0xFUL << FPU_MVFR0_Square_root_Pos) /*!< MVFR0: Square root bits Mask */ + +#define FPU_MVFR0_Divide_Pos 16U /*!< MVFR0: Divide bits Position */ +#define FPU_MVFR0_Divide_Msk (0xFUL << FPU_MVFR0_Divide_Pos) /*!< MVFR0: Divide bits Mask */ + +#define FPU_MVFR0_FP_excep_trapping_Pos 12U /*!< MVFR0: FP exception trapping bits Position */ +#define FPU_MVFR0_FP_excep_trapping_Msk (0xFUL << FPU_MVFR0_FP_excep_trapping_Pos) /*!< MVFR0: FP exception trapping bits Mask */ + +#define FPU_MVFR0_Double_precision_Pos 8U /*!< MVFR0: Double-precision bits Position */ +#define FPU_MVFR0_Double_precision_Msk (0xFUL << FPU_MVFR0_Double_precision_Pos) /*!< MVFR0: Double-precision bits Mask */ + +#define FPU_MVFR0_Single_precision_Pos 4U /*!< MVFR0: Single-precision bits Position */ +#define FPU_MVFR0_Single_precision_Msk (0xFUL << FPU_MVFR0_Single_precision_Pos) /*!< MVFR0: Single-precision bits Mask */ + +#define FPU_MVFR0_A_SIMD_registers_Pos 0U /*!< MVFR0: A_SIMD registers bits Position */ +#define FPU_MVFR0_A_SIMD_registers_Msk (0xFUL /*<< FPU_MVFR0_A_SIMD_registers_Pos*/) /*!< MVFR0: A_SIMD registers bits Mask */ + +/* Media and VFP Feature Register 1 Definitions */ +#define FPU_MVFR1_FP_fused_MAC_Pos 28U /*!< MVFR1: FP fused MAC bits Position */ +#define FPU_MVFR1_FP_fused_MAC_Msk (0xFUL << FPU_MVFR1_FP_fused_MAC_Pos) /*!< MVFR1: FP fused MAC bits Mask */ + +#define FPU_MVFR1_FP_HPFP_Pos 24U /*!< MVFR1: FP HPFP bits Position */ +#define FPU_MVFR1_FP_HPFP_Msk (0xFUL << FPU_MVFR1_FP_HPFP_Pos) /*!< MVFR1: FP HPFP bits Mask */ + +#define FPU_MVFR1_D_NaN_mode_Pos 4U /*!< MVFR1: D_NaN mode bits Position */ +#define FPU_MVFR1_D_NaN_mode_Msk (0xFUL << FPU_MVFR1_D_NaN_mode_Pos) /*!< MVFR1: D_NaN mode bits Mask */ + +#define FPU_MVFR1_FtZ_mode_Pos 0U /*!< MVFR1: FtZ mode bits Position */ +#define FPU_MVFR1_FtZ_mode_Msk (0xFUL /*<< FPU_MVFR1_FtZ_mode_Pos*/) /*!< MVFR1: FtZ mode bits Mask */ + +/* Media and VFP Feature Register 2 Definitions */ +#define FPU_MVFR2_FPMisc_Pos 4U /*!< MVFR2: FPMisc bits Position */ +#define FPU_MVFR2_FPMisc_Msk (0xFUL << FPU_MVFR2_FPMisc_Pos) /*!< MVFR2: FPMisc bits Mask */ + +/*@} end of group CMSIS_FPU */ + +/* CoreDebug is deprecated. replaced by DCB (Debug Control Block) */ +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Type definitions for the Core Debug Registers + @{ + */ + +/** + \brief \deprecated Structure type to access the Core Debug Register (CoreDebug). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + uint32_t RESERVED0[1U]; + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< \deprecated CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< \deprecated CoreDebug DHCSR: DBGKEY Mask */ + +#define CoreDebug_DHCSR_S_RESTART_ST_Pos 26U /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Position */ +#define CoreDebug_DHCSR_S_RESTART_ST_Msk (1UL << CoreDebug_DHCSR_S_RESTART_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Mask */ + +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Mask */ + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Mask */ + +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Mask */ + +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< \deprecated CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< \deprecated CoreDebug DHCSR: S_SLEEP Mask */ + +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< \deprecated CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: S_HALT Mask */ + +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< \deprecated CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< \deprecated CoreDebug DHCSR: S_REGRDY Mask */ + +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos 5U /*!< \deprecated CoreDebug DHCSR: C_SNAPSTALL Position */ +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk (1UL << CoreDebug_DHCSR_C_SNAPSTALL_Pos) /*!< \deprecated CoreDebug DHCSR: C_SNAPSTALL Mask */ + +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Mask */ + +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< \deprecated CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< \deprecated CoreDebug DHCSR: C_STEP Mask */ + +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< \deprecated CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: C_HALT Mask */ + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Mask */ + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< \deprecated CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< \deprecated CoreDebug DCRSR: REGWnR Mask */ + +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< \deprecated CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< \deprecated CoreDebug DCRSR: REGSEL Mask */ + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_TRCENA_Pos 24U /*!< \deprecated CoreDebug DEMCR: TRCENA Position */ +#define CoreDebug_DEMCR_TRCENA_Msk (1UL << CoreDebug_DEMCR_TRCENA_Pos) /*!< \deprecated CoreDebug DEMCR: TRCENA Mask */ + +#define CoreDebug_DEMCR_MON_REQ_Pos 19U /*!< \deprecated CoreDebug DEMCR: MON_REQ Position */ +#define CoreDebug_DEMCR_MON_REQ_Msk (1UL << CoreDebug_DEMCR_MON_REQ_Pos) /*!< \deprecated CoreDebug DEMCR: MON_REQ Mask */ + +#define CoreDebug_DEMCR_MON_STEP_Pos 18U /*!< \deprecated CoreDebug DEMCR: MON_STEP Position */ +#define CoreDebug_DEMCR_MON_STEP_Msk (1UL << CoreDebug_DEMCR_MON_STEP_Pos) /*!< \deprecated CoreDebug DEMCR: MON_STEP Mask */ + +#define CoreDebug_DEMCR_MON_PEND_Pos 17U /*!< \deprecated CoreDebug DEMCR: MON_PEND Position */ +#define CoreDebug_DEMCR_MON_PEND_Msk (1UL << CoreDebug_DEMCR_MON_PEND_Pos) /*!< \deprecated CoreDebug DEMCR: MON_PEND Mask */ + +#define CoreDebug_DEMCR_MON_EN_Pos 16U /*!< \deprecated CoreDebug DEMCR: MON_EN Position */ +#define CoreDebug_DEMCR_MON_EN_Msk (1UL << CoreDebug_DEMCR_MON_EN_Pos) /*!< \deprecated CoreDebug DEMCR: MON_EN Mask */ + +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Mask */ + +#define CoreDebug_DEMCR_VC_INTERR_Pos 9U /*!< \deprecated CoreDebug DEMCR: VC_INTERR Position */ +#define CoreDebug_DEMCR_VC_INTERR_Msk (1UL << CoreDebug_DEMCR_VC_INTERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_INTERR Mask */ + +#define CoreDebug_DEMCR_VC_BUSERR_Pos 8U /*!< \deprecated CoreDebug DEMCR: VC_BUSERR Position */ +#define CoreDebug_DEMCR_VC_BUSERR_Msk (1UL << CoreDebug_DEMCR_VC_BUSERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_BUSERR Mask */ + +#define CoreDebug_DEMCR_VC_STATERR_Pos 7U /*!< \deprecated CoreDebug DEMCR: VC_STATERR Position */ +#define CoreDebug_DEMCR_VC_STATERR_Msk (1UL << CoreDebug_DEMCR_VC_STATERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_STATERR Mask */ + +#define CoreDebug_DEMCR_VC_CHKERR_Pos 6U /*!< \deprecated CoreDebug DEMCR: VC_CHKERR Position */ +#define CoreDebug_DEMCR_VC_CHKERR_Msk (1UL << CoreDebug_DEMCR_VC_CHKERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_CHKERR Mask */ + +#define CoreDebug_DEMCR_VC_NOCPERR_Pos 5U /*!< \deprecated CoreDebug DEMCR: VC_NOCPERR Position */ +#define CoreDebug_DEMCR_VC_NOCPERR_Msk (1UL << CoreDebug_DEMCR_VC_NOCPERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_NOCPERR Mask */ + +#define CoreDebug_DEMCR_VC_MMERR_Pos 4U /*!< \deprecated CoreDebug DEMCR: VC_MMERR Position */ +#define CoreDebug_DEMCR_VC_MMERR_Msk (1UL << CoreDebug_DEMCR_VC_MMERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_MMERR Mask */ + +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Mask */ + +/* Debug Authentication Control Register Definitions */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Position */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Mask */ + +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Msk (1UL << CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Mask */ + +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Position */ +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Mask */ + +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Msk (1UL /*<< CoreDebug_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Mask */ + +/* Debug Security Control and Status Register Definitions */ +#define CoreDebug_DSCSR_CDS_Pos 16U /*!< \deprecated CoreDebug DSCSR: CDS Position */ +#define CoreDebug_DSCSR_CDS_Msk (1UL << CoreDebug_DSCSR_CDS_Pos) /*!< \deprecated CoreDebug DSCSR: CDS Mask */ + +#define CoreDebug_DSCSR_SBRSEL_Pos 1U /*!< \deprecated CoreDebug DSCSR: SBRSEL Position */ +#define CoreDebug_DSCSR_SBRSEL_Msk (1UL << CoreDebug_DSCSR_SBRSEL_Pos) /*!< \deprecated CoreDebug DSCSR: SBRSEL Mask */ + +#define CoreDebug_DSCSR_SBRSELEN_Pos 0U /*!< \deprecated CoreDebug DSCSR: SBRSELEN Position */ +#define CoreDebug_DSCSR_SBRSELEN_Msk (1UL /*<< CoreDebug_DSCSR_SBRSELEN_Pos*/) /*!< \deprecated CoreDebug DSCSR: SBRSELEN Mask */ + +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DCB Debug Control Block + \brief Type definitions for the Debug Control Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Control Block Registers (DCB). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + uint32_t RESERVED0[1U]; + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} DCB_Type; + +/* DHCSR, Debug Halting Control and Status Register Definitions */ +#define DCB_DHCSR_DBGKEY_Pos 16U /*!< DCB DHCSR: Debug key Position */ +#define DCB_DHCSR_DBGKEY_Msk (0xFFFFUL << DCB_DHCSR_DBGKEY_Pos) /*!< DCB DHCSR: Debug key Mask */ + +#define DCB_DHCSR_S_RESTART_ST_Pos 26U /*!< DCB DHCSR: Restart sticky status Position */ +#define DCB_DHCSR_S_RESTART_ST_Msk (0x1UL << DCB_DHCSR_S_RESTART_ST_Pos) /*!< DCB DHCSR: Restart sticky status Mask */ + +#define DCB_DHCSR_S_RESET_ST_Pos 25U /*!< DCB DHCSR: Reset sticky status Position */ +#define DCB_DHCSR_S_RESET_ST_Msk (0x1UL << DCB_DHCSR_S_RESET_ST_Pos) /*!< DCB DHCSR: Reset sticky status Mask */ + +#define DCB_DHCSR_S_RETIRE_ST_Pos 24U /*!< DCB DHCSR: Retire sticky status Position */ +#define DCB_DHCSR_S_RETIRE_ST_Msk (0x1UL << DCB_DHCSR_S_RETIRE_ST_Pos) /*!< DCB DHCSR: Retire sticky status Mask */ + +#define DCB_DHCSR_S_SDE_Pos 20U /*!< DCB DHCSR: Secure debug enabled Position */ +#define DCB_DHCSR_S_SDE_Msk (0x1UL << DCB_DHCSR_S_SDE_Pos) /*!< DCB DHCSR: Secure debug enabled Mask */ + +#define DCB_DHCSR_S_LOCKUP_Pos 19U /*!< DCB DHCSR: Lockup status Position */ +#define DCB_DHCSR_S_LOCKUP_Msk (0x1UL << DCB_DHCSR_S_LOCKUP_Pos) /*!< DCB DHCSR: Lockup status Mask */ + +#define DCB_DHCSR_S_SLEEP_Pos 18U /*!< DCB DHCSR: Sleeping status Position */ +#define DCB_DHCSR_S_SLEEP_Msk (0x1UL << DCB_DHCSR_S_SLEEP_Pos) /*!< DCB DHCSR: Sleeping status Mask */ + +#define DCB_DHCSR_S_HALT_Pos 17U /*!< DCB DHCSR: Halted status Position */ +#define DCB_DHCSR_S_HALT_Msk (0x1UL << DCB_DHCSR_S_HALT_Pos) /*!< DCB DHCSR: Halted status Mask */ + +#define DCB_DHCSR_S_REGRDY_Pos 16U /*!< DCB DHCSR: Register ready status Position */ +#define DCB_DHCSR_S_REGRDY_Msk (0x1UL << DCB_DHCSR_S_REGRDY_Pos) /*!< DCB DHCSR: Register ready status Mask */ + +#define DCB_DHCSR_C_SNAPSTALL_Pos 5U /*!< DCB DHCSR: Snap stall control Position */ +#define DCB_DHCSR_C_SNAPSTALL_Msk (0x1UL << DCB_DHCSR_C_SNAPSTALL_Pos) /*!< DCB DHCSR: Snap stall control Mask */ + +#define DCB_DHCSR_C_MASKINTS_Pos 3U /*!< DCB DHCSR: Mask interrupts control Position */ +#define DCB_DHCSR_C_MASKINTS_Msk (0x1UL << DCB_DHCSR_C_MASKINTS_Pos) /*!< DCB DHCSR: Mask interrupts control Mask */ + +#define DCB_DHCSR_C_STEP_Pos 2U /*!< DCB DHCSR: Step control Position */ +#define DCB_DHCSR_C_STEP_Msk (0x1UL << DCB_DHCSR_C_STEP_Pos) /*!< DCB DHCSR: Step control Mask */ + +#define DCB_DHCSR_C_HALT_Pos 1U /*!< DCB DHCSR: Halt control Position */ +#define DCB_DHCSR_C_HALT_Msk (0x1UL << DCB_DHCSR_C_HALT_Pos) /*!< DCB DHCSR: Halt control Mask */ + +#define DCB_DHCSR_C_DEBUGEN_Pos 0U /*!< DCB DHCSR: Debug enable control Position */ +#define DCB_DHCSR_C_DEBUGEN_Msk (0x1UL /*<< DCB_DHCSR_C_DEBUGEN_Pos*/) /*!< DCB DHCSR: Debug enable control Mask */ + +/* DCRSR, Debug Core Register Select Register Definitions */ +#define DCB_DCRSR_REGWnR_Pos 16U /*!< DCB DCRSR: Register write/not-read Position */ +#define DCB_DCRSR_REGWnR_Msk (0x1UL << DCB_DCRSR_REGWnR_Pos) /*!< DCB DCRSR: Register write/not-read Mask */ + +#define DCB_DCRSR_REGSEL_Pos 0U /*!< DCB DCRSR: Register selector Position */ +#define DCB_DCRSR_REGSEL_Msk (0x7FUL /*<< DCB_DCRSR_REGSEL_Pos*/) /*!< DCB DCRSR: Register selector Mask */ + +/* DCRDR, Debug Core Register Data Register Definitions */ +#define DCB_DCRDR_DBGTMP_Pos 0U /*!< DCB DCRDR: Data temporary buffer Position */ +#define DCB_DCRDR_DBGTMP_Msk (0xFFFFFFFFUL /*<< DCB_DCRDR_DBGTMP_Pos*/) /*!< DCB DCRDR: Data temporary buffer Mask */ + +/* DEMCR, Debug Exception and Monitor Control Register Definitions */ +#define DCB_DEMCR_TRCENA_Pos 24U /*!< DCB DEMCR: Trace enable Position */ +#define DCB_DEMCR_TRCENA_Msk (0x1UL << DCB_DEMCR_TRCENA_Pos) /*!< DCB DEMCR: Trace enable Mask */ + +#define DCB_DEMCR_MONPRKEY_Pos 23U /*!< DCB DEMCR: Monitor pend req key Position */ +#define DCB_DEMCR_MONPRKEY_Msk (0x1UL << DCB_DEMCR_MONPRKEY_Pos) /*!< DCB DEMCR: Monitor pend req key Mask */ + +#define DCB_DEMCR_UMON_EN_Pos 21U /*!< DCB DEMCR: Unprivileged monitor enable Position */ +#define DCB_DEMCR_UMON_EN_Msk (0x1UL << DCB_DEMCR_UMON_EN_Pos) /*!< DCB DEMCR: Unprivileged monitor enable Mask */ + +#define DCB_DEMCR_SDME_Pos 20U /*!< DCB DEMCR: Secure DebugMonitor enable Position */ +#define DCB_DEMCR_SDME_Msk (0x1UL << DCB_DEMCR_SDME_Pos) /*!< DCB DEMCR: Secure DebugMonitor enable Mask */ + +#define DCB_DEMCR_MON_REQ_Pos 19U /*!< DCB DEMCR: Monitor request Position */ +#define DCB_DEMCR_MON_REQ_Msk (0x1UL << DCB_DEMCR_MON_REQ_Pos) /*!< DCB DEMCR: Monitor request Mask */ + +#define DCB_DEMCR_MON_STEP_Pos 18U /*!< DCB DEMCR: Monitor step Position */ +#define DCB_DEMCR_MON_STEP_Msk (0x1UL << DCB_DEMCR_MON_STEP_Pos) /*!< DCB DEMCR: Monitor step Mask */ + +#define DCB_DEMCR_MON_PEND_Pos 17U /*!< DCB DEMCR: Monitor pend Position */ +#define DCB_DEMCR_MON_PEND_Msk (0x1UL << DCB_DEMCR_MON_PEND_Pos) /*!< DCB DEMCR: Monitor pend Mask */ + +#define DCB_DEMCR_MON_EN_Pos 16U /*!< DCB DEMCR: Monitor enable Position */ +#define DCB_DEMCR_MON_EN_Msk (0x1UL << DCB_DEMCR_MON_EN_Pos) /*!< DCB DEMCR: Monitor enable Mask */ + +#define DCB_DEMCR_VC_SFERR_Pos 11U /*!< DCB DEMCR: Vector Catch SecureFault Position */ +#define DCB_DEMCR_VC_SFERR_Msk (0x1UL << DCB_DEMCR_VC_SFERR_Pos) /*!< DCB DEMCR: Vector Catch SecureFault Mask */ + +#define DCB_DEMCR_VC_HARDERR_Pos 10U /*!< DCB DEMCR: Vector Catch HardFault errors Position */ +#define DCB_DEMCR_VC_HARDERR_Msk (0x1UL << DCB_DEMCR_VC_HARDERR_Pos) /*!< DCB DEMCR: Vector Catch HardFault errors Mask */ + +#define DCB_DEMCR_VC_INTERR_Pos 9U /*!< DCB DEMCR: Vector Catch interrupt errors Position */ +#define DCB_DEMCR_VC_INTERR_Msk (0x1UL << DCB_DEMCR_VC_INTERR_Pos) /*!< DCB DEMCR: Vector Catch interrupt errors Mask */ + +#define DCB_DEMCR_VC_BUSERR_Pos 8U /*!< DCB DEMCR: Vector Catch BusFault errors Position */ +#define DCB_DEMCR_VC_BUSERR_Msk (0x1UL << DCB_DEMCR_VC_BUSERR_Pos) /*!< DCB DEMCR: Vector Catch BusFault errors Mask */ + +#define DCB_DEMCR_VC_STATERR_Pos 7U /*!< DCB DEMCR: Vector Catch state errors Position */ +#define DCB_DEMCR_VC_STATERR_Msk (0x1UL << DCB_DEMCR_VC_STATERR_Pos) /*!< DCB DEMCR: Vector Catch state errors Mask */ + +#define DCB_DEMCR_VC_CHKERR_Pos 6U /*!< DCB DEMCR: Vector Catch check errors Position */ +#define DCB_DEMCR_VC_CHKERR_Msk (0x1UL << DCB_DEMCR_VC_CHKERR_Pos) /*!< DCB DEMCR: Vector Catch check errors Mask */ + +#define DCB_DEMCR_VC_NOCPERR_Pos 5U /*!< DCB DEMCR: Vector Catch NOCP errors Position */ +#define DCB_DEMCR_VC_NOCPERR_Msk (0x1UL << DCB_DEMCR_VC_NOCPERR_Pos) /*!< DCB DEMCR: Vector Catch NOCP errors Mask */ + +#define DCB_DEMCR_VC_MMERR_Pos 4U /*!< DCB DEMCR: Vector Catch MemManage errors Position */ +#define DCB_DEMCR_VC_MMERR_Msk (0x1UL << DCB_DEMCR_VC_MMERR_Pos) /*!< DCB DEMCR: Vector Catch MemManage errors Mask */ + +#define DCB_DEMCR_VC_CORERESET_Pos 0U /*!< DCB DEMCR: Vector Catch Core reset Position */ +#define DCB_DEMCR_VC_CORERESET_Msk (0x1UL /*<< DCB_DEMCR_VC_CORERESET_Pos*/) /*!< DCB DEMCR: Vector Catch Core reset Mask */ + +/* DAUTHCTRL, Debug Authentication Control Register Definitions */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPNIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPNIDENSEL_Msk (0x1UL << DCB_DAUTHCTRL_SPNIDENSEL_Pos) /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Mask */ + +#define DCB_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< DCB DAUTHCTRL: Secure invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPIDENSEL_Msk (0x1UL /*<< DCB_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< DCB DAUTHCTRL: Secure invasive debug enable select Mask */ + +/* DSCSR, Debug Security Control and Status Register Definitions */ +#define DCB_DSCSR_CDSKEY_Pos 17U /*!< DCB DSCSR: CDS write-enable key Position */ +#define DCB_DSCSR_CDSKEY_Msk (0x1UL << DCB_DSCSR_CDSKEY_Pos) /*!< DCB DSCSR: CDS write-enable key Mask */ + +#define DCB_DSCSR_CDS_Pos 16U /*!< DCB DSCSR: Current domain Secure Position */ +#define DCB_DSCSR_CDS_Msk (0x1UL << DCB_DSCSR_CDS_Pos) /*!< DCB DSCSR: Current domain Secure Mask */ + +#define DCB_DSCSR_SBRSEL_Pos 1U /*!< DCB DSCSR: Secure banked register select Position */ +#define DCB_DSCSR_SBRSEL_Msk (0x1UL << DCB_DSCSR_SBRSEL_Pos) /*!< DCB DSCSR: Secure banked register select Mask */ + +#define DCB_DSCSR_SBRSELEN_Pos 0U /*!< DCB DSCSR: Secure banked register select enable Position */ +#define DCB_DSCSR_SBRSELEN_Msk (0x1UL /*<< DCB_DSCSR_SBRSELEN_Pos*/) /*!< DCB DSCSR: Secure banked register select enable Mask */ + +/*@} end of group CMSIS_DCB */ + + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DIB Debug Identification Block + \brief Type definitions for the Debug Identification Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Identification Block Registers (DIB). + */ +typedef struct +{ + __OM uint32_t DLAR; /*!< Offset: 0x000 ( /W) SCS Software Lock Access Register */ + __IM uint32_t DLSR; /*!< Offset: 0x004 (R/ ) SCS Software Lock Status Register */ + __IM uint32_t DAUTHSTATUS; /*!< Offset: 0x008 (R/ ) Debug Authentication Status Register */ + __IM uint32_t DDEVARCH; /*!< Offset: 0x00C (R/ ) SCS Device Architecture Register */ + __IM uint32_t DDEVTYPE; /*!< Offset: 0x010 (R/ ) SCS Device Type Register */ +} DIB_Type; + +/* DLAR, SCS Software Lock Access Register Definitions */ +#define DIB_DLAR_KEY_Pos 0U /*!< DIB DLAR: KEY Position */ +#define DIB_DLAR_KEY_Msk (0xFFFFFFFFUL /*<< DIB_DLAR_KEY_Pos */) /*!< DIB DLAR: KEY Mask */ + +/* DLSR, SCS Software Lock Status Register Definitions */ +#define DIB_DLSR_nTT_Pos 2U /*!< DIB DLSR: Not thirty-two bit Position */ +#define DIB_DLSR_nTT_Msk (0x1UL << DIB_DLSR_nTT_Pos ) /*!< DIB DLSR: Not thirty-two bit Mask */ + +#define DIB_DLSR_SLK_Pos 1U /*!< DIB DLSR: Software Lock status Position */ +#define DIB_DLSR_SLK_Msk (0x1UL << DIB_DLSR_SLK_Pos ) /*!< DIB DLSR: Software Lock status Mask */ + +#define DIB_DLSR_SLI_Pos 0U /*!< DIB DLSR: Software Lock implemented Position */ +#define DIB_DLSR_SLI_Msk (0x1UL /*<< DIB_DLSR_SLI_Pos*/) /*!< DIB DLSR: Software Lock implemented Mask */ + +/* DAUTHSTATUS, Debug Authentication Status Register Definitions */ +#define DIB_DAUTHSTATUS_SNID_Pos 6U /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_SNID_Msk (0x3UL << DIB_DAUTHSTATUS_SNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_SID_Pos 4U /*!< DIB DAUTHSTATUS: Secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_SID_Msk (0x3UL << DIB_DAUTHSTATUS_SID_Pos ) /*!< DIB DAUTHSTATUS: Secure Invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSNID_Pos 2U /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSID_Pos 0U /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSID_Msk (0x3UL /*<< DIB_DAUTHSTATUS_NSID_Pos*/) /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Mask */ + +/* DDEVARCH, SCS Device Architecture Register Definitions */ +#define DIB_DDEVARCH_ARCHITECT_Pos 21U /*!< DIB DDEVARCH: Architect Position */ +#define DIB_DDEVARCH_ARCHITECT_Msk (0x7FFUL << DIB_DDEVARCH_ARCHITECT_Pos ) /*!< DIB DDEVARCH: Architect Mask */ + +#define DIB_DDEVARCH_PRESENT_Pos 20U /*!< DIB DDEVARCH: DEVARCH Present Position */ +#define DIB_DDEVARCH_PRESENT_Msk (0x1FUL << DIB_DDEVARCH_PRESENT_Pos ) /*!< DIB DDEVARCH: DEVARCH Present Mask */ + +#define DIB_DDEVARCH_REVISION_Pos 16U /*!< DIB DDEVARCH: Revision Position */ +#define DIB_DDEVARCH_REVISION_Msk (0xFUL << DIB_DDEVARCH_REVISION_Pos ) /*!< DIB DDEVARCH: Revision Mask */ + +#define DIB_DDEVARCH_ARCHVER_Pos 12U /*!< DIB DDEVARCH: Architecture Version Position */ +#define DIB_DDEVARCH_ARCHVER_Msk (0xFUL << DIB_DDEVARCH_ARCHVER_Pos ) /*!< DIB DDEVARCH: Architecture Version Mask */ + +#define DIB_DDEVARCH_ARCHPART_Pos 0U /*!< DIB DDEVARCH: Architecture Part Position */ +#define DIB_DDEVARCH_ARCHPART_Msk (0xFFFUL /*<< DIB_DDEVARCH_ARCHPART_Pos*/) /*!< DIB DDEVARCH: Architecture Part Mask */ + +/* DDEVTYPE, SCS Device Type Register Definitions */ +#define DIB_DDEVTYPE_SUB_Pos 4U /*!< DIB DDEVTYPE: Sub-type Position */ +#define DIB_DDEVTYPE_SUB_Msk (0xFUL << DIB_DDEVTYPE_SUB_Pos ) /*!< DIB DDEVTYPE: Sub-type Mask */ + +#define DIB_DDEVTYPE_MAJOR_Pos 0U /*!< DIB DDEVTYPE: Major type Position */ +#define DIB_DDEVTYPE_MAJOR_Msk (0xFUL /*<< DIB_DDEVTYPE_MAJOR_Pos*/) /*!< DIB DDEVTYPE: Major type Mask */ + + +/*@} end of group CMSIS_DIB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ + #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ + #define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ + #define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ + #define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ + #define CoreDebug_BASE (0xE000EDF0UL) /*!< \deprecated Core Debug Base Address */ + #define DCB_BASE (0xE000EDF0UL) /*!< DCB Base Address */ + #define DIB_BASE (0xE000EFB0UL) /*!< DIB Base Address */ + #define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ + #define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ + #define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + + #define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ + #define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ + #define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ + #define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + #define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ + #define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ + #define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ + #define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE ) /*!< \deprecated Core Debug configuration struct */ + #define DCB ((DCB_Type *) DCB_BASE ) /*!< DCB configuration struct */ + #define DIB ((DIB_Type *) DIB_BASE ) /*!< DIB configuration struct */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ + #endif + + #if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SAU_BASE (SCS_BASE + 0x0DD0UL) /*!< Security Attribution Unit */ + #define SAU ((SAU_Type *) SAU_BASE ) /*!< Security Attribution Unit */ + #endif + + #define FPU_BASE (SCS_BASE + 0x0F30UL) /*!< Floating Point Unit */ + #define FPU ((FPU_Type *) FPU_BASE ) /*!< Floating Point Unit */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SCS_BASE_NS (0xE002E000UL) /*!< System Control Space Base Address (non-secure address space) */ + #define CoreDebug_BASE_NS (0xE002EDF0UL) /*!< \deprecated Core Debug Base Address (non-secure address space) */ + #define DCB_BASE_NS (0xE002EDF0UL) /*!< DCB Base Address (non-secure address space) */ + #define DIB_BASE_NS (0xE002EFB0UL) /*!< DIB Base Address (non-secure address space) */ + #define SysTick_BASE_NS (SCS_BASE_NS + 0x0010UL) /*!< SysTick Base Address (non-secure address space) */ + #define NVIC_BASE_NS (SCS_BASE_NS + 0x0100UL) /*!< NVIC Base Address (non-secure address space) */ + #define SCB_BASE_NS (SCS_BASE_NS + 0x0D00UL) /*!< System Control Block Base Address (non-secure address space) */ + + #define SCnSCB_NS ((SCnSCB_Type *) SCS_BASE_NS ) /*!< System control Register not in SCB(non-secure address space) */ + #define SCB_NS ((SCB_Type *) SCB_BASE_NS ) /*!< SCB configuration struct (non-secure address space) */ + #define SysTick_NS ((SysTick_Type *) SysTick_BASE_NS ) /*!< SysTick configuration struct (non-secure address space) */ + #define NVIC_NS ((NVIC_Type *) NVIC_BASE_NS ) /*!< NVIC configuration struct (non-secure address space) */ + #define CoreDebug_NS ((CoreDebug_Type *) CoreDebug_BASE_NS) /*!< \deprecated Core Debug configuration struct (non-secure address space) */ + #define DCB_NS ((DCB_Type *) DCB_BASE_NS ) /*!< DCB configuration struct (non-secure address space) */ + #define DIB_NS ((DIB_Type *) DIB_BASE_NS ) /*!< DIB configuration struct (non-secure address space) */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE_NS (SCS_BASE_NS + 0x0D90UL) /*!< Memory Protection Unit (non-secure address space) */ + #define MPU_NS ((MPU_Type *) MPU_BASE_NS ) /*!< Memory Protection Unit (non-secure address space) */ + #endif + + #define FPU_BASE_NS (SCS_BASE_NS + 0x0F30UL) /*!< Floating Point Unit (non-secure address space) */ + #define FPU_NS ((FPU_Type *) FPU_BASE_NS ) /*!< Floating Point Unit (non-secure address space) */ + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ +/*@} */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_register_aliases Backwards Compatibility Aliases + \brief Register alias definitions for backwards compatibility. + @{ + */ +#define ID_ADR (ID_AFR) /*!< SCB Auxiliary Feature Register */ +/*@} */ + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* Special LR values for Secure/Non-Secure call handling and exception handling */ + +/* Function Return Payload (from ARMv8-M Architecture Reference Manual) LR value on entry from Secure BLXNS */ +#define FNC_RETURN (0xFEFFFFFFUL) /* bit [0] ignored when processing a branch */ + +/* The following EXC_RETURN mask values are used to evaluate the LR on exception entry */ +#define EXC_RETURN_PREFIX (0xFF000000UL) /* bits [31:24] set to indicate an EXC_RETURN value */ +#define EXC_RETURN_S (0x00000040UL) /* bit [6] stack used to push registers: 0=Non-secure 1=Secure */ +#define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */ +#define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */ +#define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */ +#define EXC_RETURN_SPSEL (0x00000004UL) /* bit [2] stack pointer used to restore context: 0=MSP 1=PSP */ +#define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */ + +/* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */ +#if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) /* Value for processors with floating-point extension: */ +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125AUL) /* bit [0] SFTC must match LR bit[4] EXC_RETURN_FTYPE */ +#else +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125BUL) /* Value for processors without floating-point extension */ +#endif + + +/** + \brief Set Priority Grouping + \details Sets the priority grouping field using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Interrupt Target State + \details Reads the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + \return 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_GetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Target State + \details Sets the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_SetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] |= ((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Clear Interrupt Target State + \details Clears the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_ClearTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] &= ~((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + __DSB(); +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Priority Grouping (non-secure) + \details Sets the non-secure priority grouping field when in secure state using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void TZ_NVIC_SetPriorityGrouping_NS(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB_NS->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB_NS->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping (non-secure) + \details Reads the priority grouping field from the non-secure NVIC when in secure state. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriorityGrouping_NS(void) +{ + return ((uint32_t)((SCB_NS->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt (non-secure) + \details Enables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_EnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status (non-secure) + \details Returns a device specific interrupt enable status from the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetEnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt (non-secure) + \details Disables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_DisableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Pending Interrupt (non-secure) + \details Reads the NVIC pending register in the non-secure NVIC when in secure state and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt (non-secure) + \details Sets the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_SetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt (non-secure) + \details Clears the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_ClearPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt (non-secure) + \details Reads the active register in non-secure NVIC when in secure state and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetActive_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority (non-secure) + \details Sets the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every non-secure processor exception. + */ +__STATIC_INLINE void TZ_NVIC_SetPriority_NS(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority (non-secure) + \details Reads the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriority_NS(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC_NS->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) &&(__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv8.h" + +#endif + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + uint32_t mvfr0; + + mvfr0 = FPU->MVFR0; + if ((mvfr0 & (FPU_MVFR0_Single_precision_Msk | FPU_MVFR0_Double_precision_Msk)) == 0x220U) + { + return 2U; /* Double + Single precision FPU */ + } + else if ((mvfr0 & (FPU_MVFR0_Single_precision_Msk | FPU_MVFR0_Double_precision_Msk)) == 0x020U) + { + return 1U; /* Single precision FPU */ + } + else + { + return 0U; /* No FPU */ + } +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + +/* ########################## Cache functions #################################### */ + +#if ((defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U)) || \ + (defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U))) +#include "cachel1_armv7.h" +#endif + + +/* ########################## SAU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SAUFunctions SAU Functions + \brief Functions that configure the SAU. + @{ + */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + +/** + \brief Enable SAU + \details Enables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Enable(void) +{ + SAU->CTRL |= (SAU_CTRL_ENABLE_Msk); +} + + + +/** + \brief Disable SAU + \details Disables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Disable(void) +{ + SAU->CTRL &= ~(SAU_CTRL_ENABLE_Msk); +} + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_SAUFunctions */ + + + + +/* ################################## Debug Control function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DCBFunctions Debug Control Functions + \brief Functions that access the Debug Control Block. + @{ + */ + + +/** + \brief Set Debug Authentication Control Register + \details writes to Debug Authentication Control register. + \param [in] value value to be writen. + */ +__STATIC_INLINE void DCB_SetAuthCtrl(uint32_t value) +{ + __DSB(); + __ISB(); + DCB->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register + \details Reads Debug Authentication Control register. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t DCB_GetAuthCtrl(void) +{ + return (DCB->DAUTHCTRL); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Debug Authentication Control Register (non-secure) + \details writes to non-secure Debug Authentication Control register when in secure state. + \param [in] value value to be writen + */ +__STATIC_INLINE void TZ_DCB_SetAuthCtrl_NS(uint32_t value) +{ + __DSB(); + __ISB(); + DCB_NS->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register (non-secure) + \details Reads non-secure Debug Authentication Control register when in secure state. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t TZ_DCB_GetAuthCtrl_NS(void) +{ + return (DCB_NS->DAUTHCTRL); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## Debug Identification function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DIBFunctions Debug Identification Functions + \brief Functions that access the Debug Identification Block. + @{ + */ + + +/** + \brief Get Debug Authentication Status Register + \details Reads Debug Authentication Status register. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t DIB_GetAuthStatus(void) +{ + return (DIB->DAUTHSTATUS); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Debug Authentication Status Register (non-secure) + \details Reads non-secure Debug Authentication Status register when in secure state. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t TZ_DIB_GetAuthStatus_NS(void) +{ + return (DIB_NS->DAUTHSTATUS); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief System Tick Configuration (non-secure) + \details Initializes the non-secure System Timer and its interrupt when in secure state, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function TZ_SysTick_Config_NS is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + + */ +__STATIC_INLINE uint32_t TZ_SysTick_Config_NS(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick_NS->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + TZ_NVIC_SetPriority_NS (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick_NS->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick_NS->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_ARMV8MML_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/Drivers/CMSIS/Include/core_cm0.h b/Drivers/CMSIS/Include/core_cm0.h new file mode 100644 index 0000000..6441ff3 --- /dev/null +++ b/Drivers/CMSIS/Include/core_cm0.h @@ -0,0 +1,952 @@ +/**************************************************************************//** + * @file core_cm0.h + * @brief CMSIS Cortex-M0 Core Peripheral Access Layer Header File + * @version V5.0.8 + * @date 21. August 2019 + ******************************************************************************/ +/* + * Copyright (c) 2009-2019 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CORE_CM0_H_GENERIC +#define __CORE_CM0_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M0 + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS CM0 definitions */ +#define __CM0_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __CM0_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __CM0_CMSIS_VERSION ((__CM0_CMSIS_VERSION_MAIN << 16U) | \ + __CM0_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (0U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + This core does not support an FPU at all +*/ +#define __FPU_USED 0U + +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_FP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM0_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM0_H_DEPENDANT +#define __CORE_CM0_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM0_REV + #define __CM0_REV 0x0000U + #warning "__CM0_REV not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 2U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M0 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:28; /*!< bit: 0..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:15; /*!< bit: 9..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t _reserved1:3; /*!< bit: 25..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t _reserved0:1; /*!< bit: 0 Reserved */ + uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */ + uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[1U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[31U]; + __IOM uint32_t ICER[1U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RESERVED1[31U]; + __IOM uint32_t ISPR[1U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[31U]; + __IOM uint32_t ICPR[1U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[31U]; + uint32_t RESERVED4[64U]; + __IOM uint32_t IP[8U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register */ +} NVIC_Type; + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + uint32_t RESERVED0; + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + uint32_t RESERVED1; + __IOM uint32_t SHP[2U]; /*!< Offset: 0x01C (R/W) System Handlers Priority Registers. [0] is RESERVED */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */ +#define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */ +#define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Cortex-M0 Core Debug Registers (DCB registers, SHCSR, and DFSR) are only accessible over DAP and not via processor. + Therefore they are not covered by the Cortex-M0 header file. + @{ + */ +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ +#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ +#define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ +#define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ +#define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + +#define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ +#define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ +#define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + + +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ +/*#define NVIC_GetActive __NVIC_GetActive not available for Cortex-M0 */ + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* The following EXC_RETURN values are saved the LR on exception entry */ +#define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */ +#define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */ +#define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */ + + +/* Interrupt Priorities are WORD accessible only under Armv6-M */ +/* The following MACROS handle generation of the register offset and byte masks */ +#define _BIT_SHIFT(IRQn) ( ((((uint32_t)(int32_t)(IRQn)) ) & 0x03UL) * 8UL) +#define _SHP_IDX(IRQn) ( (((((uint32_t)(int32_t)(IRQn)) & 0x0FUL)-8UL) >> 2UL) ) +#define _IP_IDX(IRQn) ( (((uint32_t)(int32_t)(IRQn)) >> 2UL) ) + +#define __NVIC_SetPriorityGrouping(X) (void)(X) +#define __NVIC_GetPriorityGrouping() (0U) + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IP[_IP_IDX(IRQn)] = ((uint32_t)(NVIC->IP[_IP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } + else + { + SCB->SHP[_SHP_IDX(IRQn)] = ((uint32_t)(SCB->SHP[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IP[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return((uint32_t)(((SCB->SHP[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + Address 0 must be mapped to SRAM. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)(NVIC_USER_IRQ_OFFSET << 2); /* point to 1st user interrupt */ + *(vectors + (int32_t)IRQn) = vector; /* use pointer arithmetic to access vector */ + /* ARM Application Note 321 states that the M0 does not require the architectural barrier */ +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)(NVIC_USER_IRQ_OFFSET << 2); /* point to 1st user interrupt */ + return *(vectors + (int32_t)IRQn); /* use pointer arithmetic to access vector */ +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = ((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + SCB_AIRCR_SYSRESETREQ_Msk); + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +/*@} end of CMSIS_Core_NVICFunctions */ + + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + return 0U; /* No FPU */ +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM0_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/Drivers/CMSIS/Include/core_cm0plus.h b/Drivers/CMSIS/Include/core_cm0plus.h new file mode 100644 index 0000000..4e7179a --- /dev/null +++ b/Drivers/CMSIS/Include/core_cm0plus.h @@ -0,0 +1,1087 @@ +/**************************************************************************//** + * @file core_cm0plus.h + * @brief CMSIS Cortex-M0+ Core Peripheral Access Layer Header File + * @version V5.0.9 + * @date 21. August 2019 + ******************************************************************************/ +/* + * Copyright (c) 2009-2019 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CORE_CM0PLUS_H_GENERIC +#define __CORE_CM0PLUS_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex-M0+ + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS CM0+ definitions */ +#define __CM0PLUS_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __CM0PLUS_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __CM0PLUS_CMSIS_VERSION ((__CM0PLUS_CMSIS_VERSION_MAIN << 16U) | \ + __CM0PLUS_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (0U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + This core does not support an FPU at all +*/ +#define __FPU_USED 0U + +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_FP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM0PLUS_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM0PLUS_H_DEPENDANT +#define __CORE_CM0PLUS_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM0PLUS_REV + #define __CM0PLUS_REV 0x0000U + #warning "__CM0PLUS_REV not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 0U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 2U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex-M0+ */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core MPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:28; /*!< bit: 0..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:15; /*!< bit: 9..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t _reserved1:3; /*!< bit: 25..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */ + uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[1U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[31U]; + __IOM uint32_t ICER[1U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RESERVED1[31U]; + __IOM uint32_t ISPR[1U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[31U]; + __IOM uint32_t ICPR[1U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[31U]; + uint32_t RESERVED4[64U]; + __IOM uint32_t IP[8U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register */ +} NVIC_Type; + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ +#else + uint32_t RESERVED0; +#endif + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + uint32_t RESERVED1; + __IOM uint32_t SHP[2U]; /*!< Offset: 0x01C (R/W) System Handlers Priority Registers. [0] is RESERVED */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */ +#define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) +/* SCB Interrupt Control State Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 8U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0xFFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ +#endif + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */ +#define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region RNRber Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RASR; /*!< Offset: 0x010 (R/W) MPU Region Attribute and Size Register */ +} MPU_Type; + +#define MPU_TYPE_RALIASES 1U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_ADDR_Pos 8U /*!< MPU RBAR: ADDR Position */ +#define MPU_RBAR_ADDR_Msk (0xFFFFFFUL << MPU_RBAR_ADDR_Pos) /*!< MPU RBAR: ADDR Mask */ + +#define MPU_RBAR_VALID_Pos 4U /*!< MPU RBAR: VALID Position */ +#define MPU_RBAR_VALID_Msk (1UL << MPU_RBAR_VALID_Pos) /*!< MPU RBAR: VALID Mask */ + +#define MPU_RBAR_REGION_Pos 0U /*!< MPU RBAR: REGION Position */ +#define MPU_RBAR_REGION_Msk (0xFUL /*<< MPU_RBAR_REGION_Pos*/) /*!< MPU RBAR: REGION Mask */ + +/* MPU Region Attribute and Size Register Definitions */ +#define MPU_RASR_ATTRS_Pos 16U /*!< MPU RASR: MPU Region Attribute field Position */ +#define MPU_RASR_ATTRS_Msk (0xFFFFUL << MPU_RASR_ATTRS_Pos) /*!< MPU RASR: MPU Region Attribute field Mask */ + +#define MPU_RASR_XN_Pos 28U /*!< MPU RASR: ATTRS.XN Position */ +#define MPU_RASR_XN_Msk (1UL << MPU_RASR_XN_Pos) /*!< MPU RASR: ATTRS.XN Mask */ + +#define MPU_RASR_AP_Pos 24U /*!< MPU RASR: ATTRS.AP Position */ +#define MPU_RASR_AP_Msk (0x7UL << MPU_RASR_AP_Pos) /*!< MPU RASR: ATTRS.AP Mask */ + +#define MPU_RASR_TEX_Pos 19U /*!< MPU RASR: ATTRS.TEX Position */ +#define MPU_RASR_TEX_Msk (0x7UL << MPU_RASR_TEX_Pos) /*!< MPU RASR: ATTRS.TEX Mask */ + +#define MPU_RASR_S_Pos 18U /*!< MPU RASR: ATTRS.S Position */ +#define MPU_RASR_S_Msk (1UL << MPU_RASR_S_Pos) /*!< MPU RASR: ATTRS.S Mask */ + +#define MPU_RASR_C_Pos 17U /*!< MPU RASR: ATTRS.C Position */ +#define MPU_RASR_C_Msk (1UL << MPU_RASR_C_Pos) /*!< MPU RASR: ATTRS.C Mask */ + +#define MPU_RASR_B_Pos 16U /*!< MPU RASR: ATTRS.B Position */ +#define MPU_RASR_B_Msk (1UL << MPU_RASR_B_Pos) /*!< MPU RASR: ATTRS.B Mask */ + +#define MPU_RASR_SRD_Pos 8U /*!< MPU RASR: Sub-Region Disable Position */ +#define MPU_RASR_SRD_Msk (0xFFUL << MPU_RASR_SRD_Pos) /*!< MPU RASR: Sub-Region Disable Mask */ + +#define MPU_RASR_SIZE_Pos 1U /*!< MPU RASR: Region Size Field Position */ +#define MPU_RASR_SIZE_Msk (0x1FUL << MPU_RASR_SIZE_Pos) /*!< MPU RASR: Region Size Field Mask */ + +#define MPU_RASR_ENABLE_Pos 0U /*!< MPU RASR: Region enable bit Position */ +#define MPU_RASR_ENABLE_Msk (1UL /*<< MPU_RASR_ENABLE_Pos*/) /*!< MPU RASR: Region enable bit Disable Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Cortex-M0+ Core Debug Registers (DCB registers, SHCSR, and DFSR) are only accessible over DAP and not via processor. + Therefore they are not covered by the Cortex-M0+ header file. + @{ + */ +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ +#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ +#define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ +#define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ +#define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + +#define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ +#define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ +#define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ +#endif + +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ +/*#define NVIC_GetActive __NVIC_GetActive not available for Cortex-M0+ */ + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* The following EXC_RETURN values are saved the LR on exception entry */ +#define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */ +#define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */ +#define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */ + + +/* Interrupt Priorities are WORD accessible only under Armv6-M */ +/* The following MACROS handle generation of the register offset and byte masks */ +#define _BIT_SHIFT(IRQn) ( ((((uint32_t)(int32_t)(IRQn)) ) & 0x03UL) * 8UL) +#define _SHP_IDX(IRQn) ( (((((uint32_t)(int32_t)(IRQn)) & 0x0FUL)-8UL) >> 2UL) ) +#define _IP_IDX(IRQn) ( (((uint32_t)(int32_t)(IRQn)) >> 2UL) ) + +#define __NVIC_SetPriorityGrouping(X) (void)(X) +#define __NVIC_GetPriorityGrouping() (0U) + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IP[_IP_IDX(IRQn)] = ((uint32_t)(NVIC->IP[_IP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } + else + { + SCB->SHP[_SHP_IDX(IRQn)] = ((uint32_t)(SCB->SHP[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IP[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return((uint32_t)(((SCB->SHP[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + If VTOR is not present address 0 must be mapped to SRAM. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) + uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; +#else + uint32_t *vectors = (uint32_t *)(NVIC_USER_IRQ_OFFSET << 2); /* point to 1st user interrupt */ + *(vectors + (int32_t)IRQn) = vector; /* use pointer arithmetic to access vector */ +#endif + /* ARM Application Note 321 states that the M0+ does not require the architectural barrier */ +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) + uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +#else + uint32_t *vectors = (uint32_t *)(NVIC_USER_IRQ_OFFSET << 2); /* point to 1st user interrupt */ + return *(vectors + (int32_t)IRQn); /* use pointer arithmetic to access vector */ +#endif +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = ((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + SCB_AIRCR_SYSRESETREQ_Msk); + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv7.h" + +#endif + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + return 0U; /* No FPU */ +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM0PLUS_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/Drivers/CMSIS/Include/core_cm1.h b/Drivers/CMSIS/Include/core_cm1.h new file mode 100644 index 0000000..76b4569 --- /dev/null +++ b/Drivers/CMSIS/Include/core_cm1.h @@ -0,0 +1,979 @@ +/**************************************************************************//** + * @file core_cm1.h + * @brief CMSIS Cortex-M1 Core Peripheral Access Layer Header File + * @version V1.0.1 + * @date 12. November 2018 + ******************************************************************************/ +/* + * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CORE_CM1_H_GENERIC +#define __CORE_CM1_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M1 + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS CM1 definitions */ +#define __CM1_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __CM1_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __CM1_CMSIS_VERSION ((__CM1_CMSIS_VERSION_MAIN << 16U) | \ + __CM1_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (1U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + This core does not support an FPU at all +*/ +#define __FPU_USED 0U + +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_FP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM1_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM1_H_DEPENDANT +#define __CORE_CM1_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM1_REV + #define __CM1_REV 0x0100U + #warning "__CM1_REV not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 2U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M1 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:28; /*!< bit: 0..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:15; /*!< bit: 9..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t _reserved1:3; /*!< bit: 25..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t _reserved0:1; /*!< bit: 0 Reserved */ + uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */ + uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[1U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[31U]; + __IOM uint32_t ICER[1U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[31U]; + __IOM uint32_t ISPR[1U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[31U]; + __IOM uint32_t ICPR[1U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[31U]; + uint32_t RESERVED4[64U]; + __IOM uint32_t IP[8U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register */ +} NVIC_Type; + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + uint32_t RESERVED0; + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + uint32_t RESERVED1; + __IOM uint32_t SHP[2U]; /*!< Offset: 0x01C (R/W) System Handlers Priority Registers. [0] is RESERVED */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */ +#define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */ +#define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ +} SCnSCB_Type; + +/* Auxiliary Control Register Definitions */ +#define SCnSCB_ACTLR_ITCMUAEN_Pos 4U /*!< ACTLR: Instruction TCM Upper Alias Enable Position */ +#define SCnSCB_ACTLR_ITCMUAEN_Msk (1UL << SCnSCB_ACTLR_ITCMUAEN_Pos) /*!< ACTLR: Instruction TCM Upper Alias Enable Mask */ + +#define SCnSCB_ACTLR_ITCMLAEN_Pos 3U /*!< ACTLR: Instruction TCM Lower Alias Enable Position */ +#define SCnSCB_ACTLR_ITCMLAEN_Msk (1UL << SCnSCB_ACTLR_ITCMLAEN_Pos) /*!< ACTLR: Instruction TCM Lower Alias Enable Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Cortex-M1 Core Debug Registers (DCB registers, SHCSR, and DFSR) are only accessible over DAP and not via processor. + Therefore they are not covered by the Cortex-M1 header file. + @{ + */ +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ +#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ +#define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ +#define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ +#define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + +#define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ +#define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ +#define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ +#define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + + +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ +/*#define NVIC_GetActive __NVIC_GetActive not available for Cortex-M1 */ + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* The following EXC_RETURN values are saved the LR on exception entry */ +#define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */ +#define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */ +#define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */ + + +/* Interrupt Priorities are WORD accessible only under Armv6-M */ +/* The following MACROS handle generation of the register offset and byte masks */ +#define _BIT_SHIFT(IRQn) ( ((((uint32_t)(int32_t)(IRQn)) ) & 0x03UL) * 8UL) +#define _SHP_IDX(IRQn) ( (((((uint32_t)(int32_t)(IRQn)) & 0x0FUL)-8UL) >> 2UL) ) +#define _IP_IDX(IRQn) ( (((uint32_t)(int32_t)(IRQn)) >> 2UL) ) + +#define __NVIC_SetPriorityGrouping(X) (void)(X) +#define __NVIC_GetPriorityGrouping() (0U) + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IP[_IP_IDX(IRQn)] = ((uint32_t)(NVIC->IP[_IP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } + else + { + SCB->SHP[_SHP_IDX(IRQn)] = ((uint32_t)(SCB->SHP[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IP[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return((uint32_t)(((SCB->SHP[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + Address 0 must be mapped to SRAM. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)0x0U; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + /* ARM Application Note 321 states that the M1 does not require the architectural barrier */ +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)0x0U; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = ((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + SCB_AIRCR_SYSRESETREQ_Msk); + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +/*@} end of CMSIS_Core_NVICFunctions */ + + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + return 0U; /* No FPU */ +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM1_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/Drivers/CMSIS/Include/core_cm23.h b/Drivers/CMSIS/Include/core_cm23.h new file mode 100644 index 0000000..55fff99 --- /dev/null +++ b/Drivers/CMSIS/Include/core_cm23.h @@ -0,0 +1,2297 @@ +/**************************************************************************//** + * @file core_cm23.h + * @brief CMSIS Cortex-M23 Core Peripheral Access Layer Header File + * @version V5.1.0 + * @date 11. February 2020 + ******************************************************************************/ +/* + * Copyright (c) 2009-2020 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#elif defined ( __GNUC__ ) + #pragma GCC diagnostic ignored "-Wpedantic" /* disable pedantic warning due to unnamed structs/unions */ +#endif + +#ifndef __CORE_CM23_H_GENERIC +#define __CORE_CM23_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M23 + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS definitions */ +#define __CM23_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __CM23_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __CM23_CMSIS_VERSION ((__CM23_CMSIS_VERSION_MAIN << 16U) | \ + __CM23_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (23U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + This core does not support an FPU at all +*/ +#define __FPU_USED 0U + +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_FP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM23_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM23_H_DEPENDANT +#define __CORE_CM23_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM23_REV + #define __CM23_REV 0x0000U + #warning "__CM23_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __SAUREGION_PRESENT + #define __SAUREGION_PRESENT 0U + #warning "__SAUREGION_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 0U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 2U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif + + #ifndef __ETM_PRESENT + #define __ETM_PRESENT 0U + #warning "__ETM_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MTB_PRESENT + #define __MTB_PRESENT 0U + #warning "__MTB_PRESENT not defined in device header file; using default!" + #endif + +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M23 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + - Core SAU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:28; /*!< bit: 0..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:15; /*!< bit: 9..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t _reserved1:3; /*!< bit: 25..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack-pointer select */ + uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[16U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[16U]; + __IOM uint32_t ICER[16U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[16U]; + __IOM uint32_t ISPR[16U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[16U]; + __IOM uint32_t ICPR[16U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[16U]; + __IOM uint32_t IABR[16U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[16U]; + __IOM uint32_t ITNS[16U]; /*!< Offset: 0x280 (R/W) Interrupt Non-Secure State Register */ + uint32_t RESERVED5[16U]; + __IOM uint32_t IPR[124U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register */ +} NVIC_Type; + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ +#else + uint32_t RESERVED0; +#endif + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + uint32_t RESERVED1; + __IOM uint32_t SHPR[2U]; /*!< Offset: 0x01C (R/W) System Handlers Priority Registers. [0] is RESERVED */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_PENDNMISET_Pos 31U /*!< SCB ICSR: PENDNMISET Position */ +#define SCB_ICSR_PENDNMISET_Msk (1UL << SCB_ICSR_PENDNMISET_Pos) /*!< SCB ICSR: PENDNMISET Mask */ + +#define SCB_ICSR_NMIPENDSET_Pos SCB_ICSR_PENDNMISET_Pos /*!< SCB ICSR: NMIPENDSET Position, backward compatibility */ +#define SCB_ICSR_NMIPENDSET_Msk SCB_ICSR_PENDNMISET_Msk /*!< SCB ICSR: NMIPENDSET Mask, backward compatibility */ + +#define SCB_ICSR_PENDNMICLR_Pos 30U /*!< SCB ICSR: PENDNMICLR Position */ +#define SCB_ICSR_PENDNMICLR_Msk (1UL << SCB_ICSR_PENDNMICLR_Pos) /*!< SCB ICSR: PENDNMICLR Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_STTNS_Pos 24U /*!< SCB ICSR: STTNS Position (Security Extension) */ +#define SCB_ICSR_STTNS_Msk (1UL << SCB_ICSR_STTNS_Pos) /*!< SCB ICSR: STTNS Mask (Security Extension) */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) +/* SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ +#endif + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIS_Pos 14U /*!< SCB AIRCR: PRIS Position */ +#define SCB_AIRCR_PRIS_Msk (1UL << SCB_AIRCR_PRIS_Pos) /*!< SCB AIRCR: PRIS Mask */ + +#define SCB_AIRCR_BFHFNMINS_Pos 13U /*!< SCB AIRCR: BFHFNMINS Position */ +#define SCB_AIRCR_BFHFNMINS_Msk (1UL << SCB_AIRCR_BFHFNMINS_Pos) /*!< SCB AIRCR: BFHFNMINS Mask */ + +#define SCB_AIRCR_SYSRESETREQS_Pos 3U /*!< SCB AIRCR: SYSRESETREQS Position */ +#define SCB_AIRCR_SYSRESETREQS_Msk (1UL << SCB_AIRCR_SYSRESETREQS_Pos) /*!< SCB AIRCR: SYSRESETREQS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEPS_Pos 3U /*!< SCB SCR: SLEEPDEEPS Position */ +#define SCB_SCR_SLEEPDEEPS_Msk (1UL << SCB_SCR_SLEEPDEEPS_Pos) /*!< SCB SCR: SLEEPDEEPS Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_BP_Pos 18U /*!< SCB CCR: BP Position */ +#define SCB_CCR_BP_Msk (1UL << SCB_CCR_BP_Pos) /*!< SCB CCR: BP Mask */ + +#define SCB_CCR_IC_Pos 17U /*!< SCB CCR: IC Position */ +#define SCB_CCR_IC_Msk (1UL << SCB_CCR_IC_Pos) /*!< SCB CCR: IC Mask */ + +#define SCB_CCR_DC_Pos 16U /*!< SCB CCR: DC Position */ +#define SCB_CCR_DC_Msk (1UL << SCB_CCR_DC_Pos) /*!< SCB CCR: DC Mask */ + +#define SCB_CCR_STKOFHFNMIGN_Pos 10U /*!< SCB CCR: STKOFHFNMIGN Position */ +#define SCB_CCR_STKOFHFNMIGN_Msk (1UL << SCB_CCR_STKOFHFNMIGN_Pos) /*!< SCB CCR: STKOFHFNMIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_HARDFAULTPENDED_Pos 21U /*!< SCB SHCSR: HARDFAULTPENDED Position */ +#define SCB_SHCSR_HARDFAULTPENDED_Msk (1UL << SCB_SHCSR_HARDFAULTPENDED_Pos) /*!< SCB SHCSR: HARDFAULTPENDED Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_NMIACT_Pos 5U /*!< SCB SHCSR: NMIACT Position */ +#define SCB_SHCSR_NMIACT_Msk (1UL << SCB_SHCSR_NMIACT_Pos) /*!< SCB SHCSR: NMIACT Mask */ + +#define SCB_SHCSR_HARDFAULTACT_Pos 2U /*!< SCB SHCSR: HARDFAULTACT Position */ +#define SCB_SHCSR_HARDFAULTACT_Msk (1UL << SCB_SHCSR_HARDFAULTACT_Pos) /*!< SCB SHCSR: HARDFAULTACT Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + uint32_t RESERVED0[6U]; + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + uint32_t RESERVED3[1U]; + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED4[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + uint32_t RESERVED5[1U]; + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED6[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + uint32_t RESERVED7[1U]; + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ + uint32_t RESERVED8[1U]; + __IOM uint32_t COMP4; /*!< Offset: 0x060 (R/W) Comparator Register 4 */ + uint32_t RESERVED9[1U]; + __IOM uint32_t FUNCTION4; /*!< Offset: 0x068 (R/W) Function Register 4 */ + uint32_t RESERVED10[1U]; + __IOM uint32_t COMP5; /*!< Offset: 0x070 (R/W) Comparator Register 5 */ + uint32_t RESERVED11[1U]; + __IOM uint32_t FUNCTION5; /*!< Offset: 0x078 (R/W) Function Register 5 */ + uint32_t RESERVED12[1U]; + __IOM uint32_t COMP6; /*!< Offset: 0x080 (R/W) Comparator Register 6 */ + uint32_t RESERVED13[1U]; + __IOM uint32_t FUNCTION6; /*!< Offset: 0x088 (R/W) Function Register 6 */ + uint32_t RESERVED14[1U]; + __IOM uint32_t COMP7; /*!< Offset: 0x090 (R/W) Comparator Register 7 */ + uint32_t RESERVED15[1U]; + __IOM uint32_t FUNCTION7; /*!< Offset: 0x098 (R/W) Function Register 7 */ + uint32_t RESERVED16[1U]; + __IOM uint32_t COMP8; /*!< Offset: 0x0A0 (R/W) Comparator Register 8 */ + uint32_t RESERVED17[1U]; + __IOM uint32_t FUNCTION8; /*!< Offset: 0x0A8 (R/W) Function Register 8 */ + uint32_t RESERVED18[1U]; + __IOM uint32_t COMP9; /*!< Offset: 0x0B0 (R/W) Comparator Register 9 */ + uint32_t RESERVED19[1U]; + __IOM uint32_t FUNCTION9; /*!< Offset: 0x0B8 (R/W) Function Register 9 */ + uint32_t RESERVED20[1U]; + __IOM uint32_t COMP10; /*!< Offset: 0x0C0 (R/W) Comparator Register 10 */ + uint32_t RESERVED21[1U]; + __IOM uint32_t FUNCTION10; /*!< Offset: 0x0C8 (R/W) Function Register 10 */ + uint32_t RESERVED22[1U]; + __IOM uint32_t COMP11; /*!< Offset: 0x0D0 (R/W) Comparator Register 11 */ + uint32_t RESERVED23[1U]; + __IOM uint32_t FUNCTION11; /*!< Offset: 0x0D8 (R/W) Function Register 11 */ + uint32_t RESERVED24[1U]; + __IOM uint32_t COMP12; /*!< Offset: 0x0E0 (R/W) Comparator Register 12 */ + uint32_t RESERVED25[1U]; + __IOM uint32_t FUNCTION12; /*!< Offset: 0x0E8 (R/W) Function Register 12 */ + uint32_t RESERVED26[1U]; + __IOM uint32_t COMP13; /*!< Offset: 0x0F0 (R/W) Comparator Register 13 */ + uint32_t RESERVED27[1U]; + __IOM uint32_t FUNCTION13; /*!< Offset: 0x0F8 (R/W) Function Register 13 */ + uint32_t RESERVED28[1U]; + __IOM uint32_t COMP14; /*!< Offset: 0x100 (R/W) Comparator Register 14 */ + uint32_t RESERVED29[1U]; + __IOM uint32_t FUNCTION14; /*!< Offset: 0x108 (R/W) Function Register 14 */ + uint32_t RESERVED30[1U]; + __IOM uint32_t COMP15; /*!< Offset: 0x110 (R/W) Comparator Register 15 */ + uint32_t RESERVED31[1U]; + __IOM uint32_t FUNCTION15; /*!< Offset: 0x118 (R/W) Function Register 15 */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_ID_Pos 27U /*!< DWT FUNCTION: ID Position */ +#define DWT_FUNCTION_ID_Msk (0x1FUL << DWT_FUNCTION_ID_Pos) /*!< DWT FUNCTION: ID Mask */ + +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_ACTION_Pos 4U /*!< DWT FUNCTION: ACTION Position */ +#define DWT_FUNCTION_ACTION_Msk (0x3UL << DWT_FUNCTION_ACTION_Pos) /*!< DWT FUNCTION: ACTION Mask */ + +#define DWT_FUNCTION_MATCH_Pos 0U /*!< DWT FUNCTION: MATCH Position */ +#define DWT_FUNCTION_MATCH_Msk (0xFUL /*<< DWT_FUNCTION_MATCH_Pos*/) /*!< DWT FUNCTION: MATCH Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Size Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Size Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IOM uint32_t PSCR; /*!< Offset: 0x308 (R/W) Periodic Synchronization Control Register */ + uint32_t RESERVED3[759U]; + __IM uint32_t TRIGGER; /*!< Offset: 0xEE8 (R/ ) TRIGGER Register */ + __IM uint32_t ITFTTD0; /*!< Offset: 0xEEC (R/ ) Integration Test FIFO Test Data 0 Register */ + __IOM uint32_t ITATBCTR2; /*!< Offset: 0xEF0 (R/W) Integration Test ATB Control Register 2 */ + uint32_t RESERVED4[1U]; + __IM uint32_t ITATBCTR0; /*!< Offset: 0xEF8 (R/ ) Integration Test ATB Control Register 0 */ + __IM uint32_t ITFTTD1; /*!< Offset: 0xEFC (R/ ) Integration Test FIFO Test Data 1 Register */ + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control */ + uint32_t RESERVED5[39U]; + __IOM uint32_t CLAIMSET; /*!< Offset: 0xFA0 (R/W) Claim tag set */ + __IOM uint32_t CLAIMCLR; /*!< Offset: 0xFA4 (R/W) Claim tag clear */ + uint32_t RESERVED7[8U]; + __IM uint32_t DEVID; /*!< Offset: 0xFC8 (R/ ) Device Configuration Register */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Identifier Register */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_PRESCALER_Pos 0U /*!< TPI ACPR: PRESCALER Position */ +#define TPI_ACPR_PRESCALER_Msk (0x1FFFUL /*<< TPI_ACPR_PRESCALER_Pos*/) /*!< TPI ACPR: PRESCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_FOnMan_Pos 6U /*!< TPI FFCR: FOnMan Position */ +#define TPI_FFCR_FOnMan_Msk (0x1UL << TPI_FFCR_FOnMan_Pos) /*!< TPI FFCR: FOnMan Mask */ + +#define TPI_FFCR_EnFCont_Pos 1U /*!< TPI FFCR: EnFCont Position */ +#define TPI_FFCR_EnFCont_Msk (0x1UL << TPI_FFCR_EnFCont_Pos) /*!< TPI FFCR: EnFCont Mask */ + +/* TPI TRIGGER Register Definitions */ +#define TPI_TRIGGER_TRIGGER_Pos 0U /*!< TPI TRIGGER: TRIGGER Position */ +#define TPI_TRIGGER_TRIGGER_Msk (0x1UL /*<< TPI_TRIGGER_TRIGGER_Pos*/) /*!< TPI TRIGGER: TRIGGER Mask */ + +/* TPI Integration Test FIFO Test Data 0 Register Definitions */ +#define TPI_ITFTTD0_ATB_IF2_ATVALID_Pos 29U /*!< TPI ITFTTD0: ATB Interface 2 ATVALIDPosition */ +#define TPI_ITFTTD0_ATB_IF2_ATVALID_Msk (0x3UL << TPI_ITFTTD0_ATB_IF2_ATVALID_Pos) /*!< TPI ITFTTD0: ATB Interface 2 ATVALID Mask */ + +#define TPI_ITFTTD0_ATB_IF2_bytecount_Pos 27U /*!< TPI ITFTTD0: ATB Interface 2 byte count Position */ +#define TPI_ITFTTD0_ATB_IF2_bytecount_Msk (0x3UL << TPI_ITFTTD0_ATB_IF2_bytecount_Pos) /*!< TPI ITFTTD0: ATB Interface 2 byte count Mask */ + +#define TPI_ITFTTD0_ATB_IF1_ATVALID_Pos 26U /*!< TPI ITFTTD0: ATB Interface 1 ATVALID Position */ +#define TPI_ITFTTD0_ATB_IF1_ATVALID_Msk (0x3UL << TPI_ITFTTD0_ATB_IF1_ATVALID_Pos) /*!< TPI ITFTTD0: ATB Interface 1 ATVALID Mask */ + +#define TPI_ITFTTD0_ATB_IF1_bytecount_Pos 24U /*!< TPI ITFTTD0: ATB Interface 1 byte count Position */ +#define TPI_ITFTTD0_ATB_IF1_bytecount_Msk (0x3UL << TPI_ITFTTD0_ATB_IF1_bytecount_Pos) /*!< TPI ITFTTD0: ATB Interface 1 byte countt Mask */ + +#define TPI_ITFTTD0_ATB_IF1_data2_Pos 16U /*!< TPI ITFTTD0: ATB Interface 1 data2 Position */ +#define TPI_ITFTTD0_ATB_IF1_data2_Msk (0xFFUL << TPI_ITFTTD0_ATB_IF1_data1_Pos) /*!< TPI ITFTTD0: ATB Interface 1 data2 Mask */ + +#define TPI_ITFTTD0_ATB_IF1_data1_Pos 8U /*!< TPI ITFTTD0: ATB Interface 1 data1 Position */ +#define TPI_ITFTTD0_ATB_IF1_data1_Msk (0xFFUL << TPI_ITFTTD0_ATB_IF1_data1_Pos) /*!< TPI ITFTTD0: ATB Interface 1 data1 Mask */ + +#define TPI_ITFTTD0_ATB_IF1_data0_Pos 0U /*!< TPI ITFTTD0: ATB Interface 1 data0 Position */ +#define TPI_ITFTTD0_ATB_IF1_data0_Msk (0xFFUL /*<< TPI_ITFTTD0_ATB_IF1_data0_Pos*/) /*!< TPI ITFTTD0: ATB Interface 1 data0 Mask */ + +/* TPI Integration Test ATB Control Register 2 Register Definitions */ +#define TPI_ITATBCTR2_AFVALID2S_Pos 1U /*!< TPI ITATBCTR2: AFVALID2S Position */ +#define TPI_ITATBCTR2_AFVALID2S_Msk (0x1UL << TPI_ITATBCTR2_AFVALID2S_Pos) /*!< TPI ITATBCTR2: AFVALID2SS Mask */ + +#define TPI_ITATBCTR2_AFVALID1S_Pos 1U /*!< TPI ITATBCTR2: AFVALID1S Position */ +#define TPI_ITATBCTR2_AFVALID1S_Msk (0x1UL << TPI_ITATBCTR2_AFVALID1S_Pos) /*!< TPI ITATBCTR2: AFVALID1SS Mask */ + +#define TPI_ITATBCTR2_ATREADY2S_Pos 0U /*!< TPI ITATBCTR2: ATREADY2S Position */ +#define TPI_ITATBCTR2_ATREADY2S_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY2S_Pos*/) /*!< TPI ITATBCTR2: ATREADY2S Mask */ + +#define TPI_ITATBCTR2_ATREADY1S_Pos 0U /*!< TPI ITATBCTR2: ATREADY1S Position */ +#define TPI_ITATBCTR2_ATREADY1S_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY1S_Pos*/) /*!< TPI ITATBCTR2: ATREADY1S Mask */ + +/* TPI Integration Test FIFO Test Data 1 Register Definitions */ +#define TPI_ITFTTD1_ATB_IF2_ATVALID_Pos 29U /*!< TPI ITFTTD1: ATB Interface 2 ATVALID Position */ +#define TPI_ITFTTD1_ATB_IF2_ATVALID_Msk (0x3UL << TPI_ITFTTD1_ATB_IF2_ATVALID_Pos) /*!< TPI ITFTTD1: ATB Interface 2 ATVALID Mask */ + +#define TPI_ITFTTD1_ATB_IF2_bytecount_Pos 27U /*!< TPI ITFTTD1: ATB Interface 2 byte count Position */ +#define TPI_ITFTTD1_ATB_IF2_bytecount_Msk (0x3UL << TPI_ITFTTD1_ATB_IF2_bytecount_Pos) /*!< TPI ITFTTD1: ATB Interface 2 byte count Mask */ + +#define TPI_ITFTTD1_ATB_IF1_ATVALID_Pos 26U /*!< TPI ITFTTD1: ATB Interface 1 ATVALID Position */ +#define TPI_ITFTTD1_ATB_IF1_ATVALID_Msk (0x3UL << TPI_ITFTTD1_ATB_IF1_ATVALID_Pos) /*!< TPI ITFTTD1: ATB Interface 1 ATVALID Mask */ + +#define TPI_ITFTTD1_ATB_IF1_bytecount_Pos 24U /*!< TPI ITFTTD1: ATB Interface 1 byte count Position */ +#define TPI_ITFTTD1_ATB_IF1_bytecount_Msk (0x3UL << TPI_ITFTTD1_ATB_IF1_bytecount_Pos) /*!< TPI ITFTTD1: ATB Interface 1 byte countt Mask */ + +#define TPI_ITFTTD1_ATB_IF2_data2_Pos 16U /*!< TPI ITFTTD1: ATB Interface 2 data2 Position */ +#define TPI_ITFTTD1_ATB_IF2_data2_Msk (0xFFUL << TPI_ITFTTD1_ATB_IF2_data1_Pos) /*!< TPI ITFTTD1: ATB Interface 2 data2 Mask */ + +#define TPI_ITFTTD1_ATB_IF2_data1_Pos 8U /*!< TPI ITFTTD1: ATB Interface 2 data1 Position */ +#define TPI_ITFTTD1_ATB_IF2_data1_Msk (0xFFUL << TPI_ITFTTD1_ATB_IF2_data1_Pos) /*!< TPI ITFTTD1: ATB Interface 2 data1 Mask */ + +#define TPI_ITFTTD1_ATB_IF2_data0_Pos 0U /*!< TPI ITFTTD1: ATB Interface 2 data0 Position */ +#define TPI_ITFTTD1_ATB_IF2_data0_Msk (0xFFUL /*<< TPI_ITFTTD1_ATB_IF2_data0_Pos*/) /*!< TPI ITFTTD1: ATB Interface 2 data0 Mask */ + +/* TPI Integration Test ATB Control Register 0 Definitions */ +#define TPI_ITATBCTR0_AFVALID2S_Pos 1U /*!< TPI ITATBCTR0: AFVALID2S Position */ +#define TPI_ITATBCTR0_AFVALID2S_Msk (0x1UL << TPI_ITATBCTR0_AFVALID2S_Pos) /*!< TPI ITATBCTR0: AFVALID2SS Mask */ + +#define TPI_ITATBCTR0_AFVALID1S_Pos 1U /*!< TPI ITATBCTR0: AFVALID1S Position */ +#define TPI_ITATBCTR0_AFVALID1S_Msk (0x1UL << TPI_ITATBCTR0_AFVALID1S_Pos) /*!< TPI ITATBCTR0: AFVALID1SS Mask */ + +#define TPI_ITATBCTR0_ATREADY2S_Pos 0U /*!< TPI ITATBCTR0: ATREADY2S Position */ +#define TPI_ITATBCTR0_ATREADY2S_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY2S_Pos*/) /*!< TPI ITATBCTR0: ATREADY2S Mask */ + +#define TPI_ITATBCTR0_ATREADY1S_Pos 0U /*!< TPI ITATBCTR0: ATREADY1S Position */ +#define TPI_ITATBCTR0_ATREADY1S_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY1S_Pos*/) /*!< TPI ITATBCTR0: ATREADY1S Mask */ + +/* TPI Integration Mode Control Register Definitions */ +#define TPI_ITCTRL_Mode_Pos 0U /*!< TPI ITCTRL: Mode Position */ +#define TPI_ITCTRL_Mode_Msk (0x3UL /*<< TPI_ITCTRL_Mode_Pos*/) /*!< TPI ITCTRL: Mode Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_FIFOSZ_Pos 6U /*!< TPI DEVID: FIFOSZ Position */ +#define TPI_DEVID_FIFOSZ_Msk (0x7UL << TPI_DEVID_FIFOSZ_Pos) /*!< TPI DEVID: FIFOSZ Mask */ + +#define TPI_DEVID_NrTraceInput_Pos 0U /*!< TPI DEVID: NrTraceInput Position */ +#define TPI_DEVID_NrTraceInput_Msk (0x3FUL /*<< TPI_DEVID_NrTraceInput_Pos*/) /*!< TPI DEVID: NrTraceInput Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) MPU Region Limit Address Register */ + uint32_t RESERVED0[7U]; + union { + __IOM uint32_t MAIR[2]; + struct { + __IOM uint32_t MAIR0; /*!< Offset: 0x030 (R/W) MPU Memory Attribute Indirection Register 0 */ + __IOM uint32_t MAIR1; /*!< Offset: 0x034 (R/W) MPU Memory Attribute Indirection Register 1 */ + }; + }; +} MPU_Type; + +#define MPU_TYPE_RALIASES 1U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_BASE_Pos 5U /*!< MPU RBAR: BASE Position */ +#define MPU_RBAR_BASE_Msk (0x7FFFFFFUL << MPU_RBAR_BASE_Pos) /*!< MPU RBAR: BASE Mask */ + +#define MPU_RBAR_SH_Pos 3U /*!< MPU RBAR: SH Position */ +#define MPU_RBAR_SH_Msk (0x3UL << MPU_RBAR_SH_Pos) /*!< MPU RBAR: SH Mask */ + +#define MPU_RBAR_AP_Pos 1U /*!< MPU RBAR: AP Position */ +#define MPU_RBAR_AP_Msk (0x3UL << MPU_RBAR_AP_Pos) /*!< MPU RBAR: AP Mask */ + +#define MPU_RBAR_XN_Pos 0U /*!< MPU RBAR: XN Position */ +#define MPU_RBAR_XN_Msk (01UL /*<< MPU_RBAR_XN_Pos*/) /*!< MPU RBAR: XN Mask */ + +/* MPU Region Limit Address Register Definitions */ +#define MPU_RLAR_LIMIT_Pos 5U /*!< MPU RLAR: LIMIT Position */ +#define MPU_RLAR_LIMIT_Msk (0x7FFFFFFUL << MPU_RLAR_LIMIT_Pos) /*!< MPU RLAR: LIMIT Mask */ + +#define MPU_RLAR_AttrIndx_Pos 1U /*!< MPU RLAR: AttrIndx Position */ +#define MPU_RLAR_AttrIndx_Msk (0x7UL << MPU_RLAR_AttrIndx_Pos) /*!< MPU RLAR: AttrIndx Mask */ + +#define MPU_RLAR_EN_Pos 0U /*!< MPU RLAR: EN Position */ +#define MPU_RLAR_EN_Msk (1UL /*<< MPU_RLAR_EN_Pos*/) /*!< MPU RLAR: EN Mask */ + +/* MPU Memory Attribute Indirection Register 0 Definitions */ +#define MPU_MAIR0_Attr3_Pos 24U /*!< MPU MAIR0: Attr3 Position */ +#define MPU_MAIR0_Attr3_Msk (0xFFUL << MPU_MAIR0_Attr3_Pos) /*!< MPU MAIR0: Attr3 Mask */ + +#define MPU_MAIR0_Attr2_Pos 16U /*!< MPU MAIR0: Attr2 Position */ +#define MPU_MAIR0_Attr2_Msk (0xFFUL << MPU_MAIR0_Attr2_Pos) /*!< MPU MAIR0: Attr2 Mask */ + +#define MPU_MAIR0_Attr1_Pos 8U /*!< MPU MAIR0: Attr1 Position */ +#define MPU_MAIR0_Attr1_Msk (0xFFUL << MPU_MAIR0_Attr1_Pos) /*!< MPU MAIR0: Attr1 Mask */ + +#define MPU_MAIR0_Attr0_Pos 0U /*!< MPU MAIR0: Attr0 Position */ +#define MPU_MAIR0_Attr0_Msk (0xFFUL /*<< MPU_MAIR0_Attr0_Pos*/) /*!< MPU MAIR0: Attr0 Mask */ + +/* MPU Memory Attribute Indirection Register 1 Definitions */ +#define MPU_MAIR1_Attr7_Pos 24U /*!< MPU MAIR1: Attr7 Position */ +#define MPU_MAIR1_Attr7_Msk (0xFFUL << MPU_MAIR1_Attr7_Pos) /*!< MPU MAIR1: Attr7 Mask */ + +#define MPU_MAIR1_Attr6_Pos 16U /*!< MPU MAIR1: Attr6 Position */ +#define MPU_MAIR1_Attr6_Msk (0xFFUL << MPU_MAIR1_Attr6_Pos) /*!< MPU MAIR1: Attr6 Mask */ + +#define MPU_MAIR1_Attr5_Pos 8U /*!< MPU MAIR1: Attr5 Position */ +#define MPU_MAIR1_Attr5_Msk (0xFFUL << MPU_MAIR1_Attr5_Pos) /*!< MPU MAIR1: Attr5 Mask */ + +#define MPU_MAIR1_Attr4_Pos 0U /*!< MPU MAIR1: Attr4 Position */ +#define MPU_MAIR1_Attr4_Msk (0xFFUL /*<< MPU_MAIR1_Attr4_Pos*/) /*!< MPU MAIR1: Attr4 Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SAU Security Attribution Unit (SAU) + \brief Type definitions for the Security Attribution Unit (SAU) + @{ + */ + +/** + \brief Structure type to access the Security Attribution Unit (SAU). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SAU Control Register */ + __IM uint32_t TYPE; /*!< Offset: 0x004 (R/ ) SAU Type Register */ +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) SAU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) SAU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) SAU Region Limit Address Register */ +#endif +} SAU_Type; + +/* SAU Control Register Definitions */ +#define SAU_CTRL_ALLNS_Pos 1U /*!< SAU CTRL: ALLNS Position */ +#define SAU_CTRL_ALLNS_Msk (1UL << SAU_CTRL_ALLNS_Pos) /*!< SAU CTRL: ALLNS Mask */ + +#define SAU_CTRL_ENABLE_Pos 0U /*!< SAU CTRL: ENABLE Position */ +#define SAU_CTRL_ENABLE_Msk (1UL /*<< SAU_CTRL_ENABLE_Pos*/) /*!< SAU CTRL: ENABLE Mask */ + +/* SAU Type Register Definitions */ +#define SAU_TYPE_SREGION_Pos 0U /*!< SAU TYPE: SREGION Position */ +#define SAU_TYPE_SREGION_Msk (0xFFUL /*<< SAU_TYPE_SREGION_Pos*/) /*!< SAU TYPE: SREGION Mask */ + +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) +/* SAU Region Number Register Definitions */ +#define SAU_RNR_REGION_Pos 0U /*!< SAU RNR: REGION Position */ +#define SAU_RNR_REGION_Msk (0xFFUL /*<< SAU_RNR_REGION_Pos*/) /*!< SAU RNR: REGION Mask */ + +/* SAU Region Base Address Register Definitions */ +#define SAU_RBAR_BADDR_Pos 5U /*!< SAU RBAR: BADDR Position */ +#define SAU_RBAR_BADDR_Msk (0x7FFFFFFUL << SAU_RBAR_BADDR_Pos) /*!< SAU RBAR: BADDR Mask */ + +/* SAU Region Limit Address Register Definitions */ +#define SAU_RLAR_LADDR_Pos 5U /*!< SAU RLAR: LADDR Position */ +#define SAU_RLAR_LADDR_Msk (0x7FFFFFFUL << SAU_RLAR_LADDR_Pos) /*!< SAU RLAR: LADDR Mask */ + +#define SAU_RLAR_NSC_Pos 1U /*!< SAU RLAR: NSC Position */ +#define SAU_RLAR_NSC_Msk (1UL << SAU_RLAR_NSC_Pos) /*!< SAU RLAR: NSC Mask */ + +#define SAU_RLAR_ENABLE_Pos 0U /*!< SAU RLAR: ENABLE Position */ +#define SAU_RLAR_ENABLE_Msk (1UL /*<< SAU_RLAR_ENABLE_Pos*/) /*!< SAU RLAR: ENABLE Mask */ + +#endif /* defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) */ + +/*@} end of group CMSIS_SAU */ +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/* CoreDebug is deprecated. replaced by DCB (Debug Control Block) */ +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Type definitions for the Core Debug Registers + @{ + */ + +/** + \brief \deprecated Structure type to access the Core Debug Register (CoreDebug). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + uint32_t RESERVED0[1U]; + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< \deprecated CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< \deprecated CoreDebug DHCSR: DBGKEY Mask */ + +#define CoreDebug_DHCSR_S_RESTART_ST_Pos 26U /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Position */ +#define CoreDebug_DHCSR_S_RESTART_ST_Msk (1UL << CoreDebug_DHCSR_S_RESTART_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Mask */ + +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Mask */ + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Mask */ + +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Mask */ + +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< \deprecated CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< \deprecated CoreDebug DHCSR: S_SLEEP Mask */ + +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< \deprecated CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: S_HALT Mask */ + +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< \deprecated CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< \deprecated CoreDebug DHCSR: S_REGRDY Mask */ + +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Mask */ + +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< \deprecated CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< \deprecated CoreDebug DHCSR: C_STEP Mask */ + +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< \deprecated CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: C_HALT Mask */ + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Mask */ + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< \deprecated CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< \deprecated CoreDebug DCRSR: REGWnR Mask */ + +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< \deprecated CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< \deprecated CoreDebug DCRSR: REGSEL Mask */ + +/* Debug Exception and Monitor Control Register */ +#define CoreDebug_DEMCR_DWTENA_Pos 24U /*!< \deprecated CoreDebug DEMCR: DWTENA Position */ +#define CoreDebug_DEMCR_DWTENA_Msk (1UL << CoreDebug_DEMCR_DWTENA_Pos) /*!< \deprecated CoreDebug DEMCR: DWTENA Mask */ + +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Mask */ + +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Mask */ + +/* Debug Authentication Control Register Definitions */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Position */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Mask */ + +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Msk (1UL << CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Mask */ + +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Position */ +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Mask */ + +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Msk (1UL /*<< CoreDebug_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Mask */ + +/* Debug Security Control and Status Register Definitions */ +#define CoreDebug_DSCSR_CDS_Pos 16U /*!< \deprecated CoreDebug DSCSR: CDS Position */ +#define CoreDebug_DSCSR_CDS_Msk (1UL << CoreDebug_DSCSR_CDS_Pos) /*!< \deprecated CoreDebug DSCSR: CDS Mask */ + +#define CoreDebug_DSCSR_SBRSEL_Pos 1U /*!< \deprecated CoreDebug DSCSR: SBRSEL Position */ +#define CoreDebug_DSCSR_SBRSEL_Msk (1UL << CoreDebug_DSCSR_SBRSEL_Pos) /*!< \deprecated CoreDebug DSCSR: SBRSEL Mask */ + +#define CoreDebug_DSCSR_SBRSELEN_Pos 0U /*!< \deprecated CoreDebug DSCSR: SBRSELEN Position */ +#define CoreDebug_DSCSR_SBRSELEN_Msk (1UL /*<< CoreDebug_DSCSR_SBRSELEN_Pos*/) /*!< \deprecated CoreDebug DSCSR: SBRSELEN Mask */ + +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DCB Debug Control Block + \brief Type definitions for the Debug Control Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Control Block Registers (DCB). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + uint32_t RESERVED0[1U]; + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} DCB_Type; + +/* DHCSR, Debug Halting Control and Status Register Definitions */ +#define DCB_DHCSR_DBGKEY_Pos 16U /*!< DCB DHCSR: Debug key Position */ +#define DCB_DHCSR_DBGKEY_Msk (0xFFFFUL << DCB_DHCSR_DBGKEY_Pos) /*!< DCB DHCSR: Debug key Mask */ + +#define DCB_DHCSR_S_RESTART_ST_Pos 26U /*!< DCB DHCSR: Restart sticky status Position */ +#define DCB_DHCSR_S_RESTART_ST_Msk (0x1UL << DCB_DHCSR_S_RESTART_ST_Pos) /*!< DCB DHCSR: Restart sticky status Mask */ + +#define DCB_DHCSR_S_RESET_ST_Pos 25U /*!< DCB DHCSR: Reset sticky status Position */ +#define DCB_DHCSR_S_RESET_ST_Msk (0x1UL << DCB_DHCSR_S_RESET_ST_Pos) /*!< DCB DHCSR: Reset sticky status Mask */ + +#define DCB_DHCSR_S_RETIRE_ST_Pos 24U /*!< DCB DHCSR: Retire sticky status Position */ +#define DCB_DHCSR_S_RETIRE_ST_Msk (0x1UL << DCB_DHCSR_S_RETIRE_ST_Pos) /*!< DCB DHCSR: Retire sticky status Mask */ + +#define DCB_DHCSR_S_SDE_Pos 20U /*!< DCB DHCSR: Secure debug enabled Position */ +#define DCB_DHCSR_S_SDE_Msk (0x1UL << DCB_DHCSR_S_SDE_Pos) /*!< DCB DHCSR: Secure debug enabled Mask */ + +#define DCB_DHCSR_S_LOCKUP_Pos 19U /*!< DCB DHCSR: Lockup status Position */ +#define DCB_DHCSR_S_LOCKUP_Msk (0x1UL << DCB_DHCSR_S_LOCKUP_Pos) /*!< DCB DHCSR: Lockup status Mask */ + +#define DCB_DHCSR_S_SLEEP_Pos 18U /*!< DCB DHCSR: Sleeping status Position */ +#define DCB_DHCSR_S_SLEEP_Msk (0x1UL << DCB_DHCSR_S_SLEEP_Pos) /*!< DCB DHCSR: Sleeping status Mask */ + +#define DCB_DHCSR_S_HALT_Pos 17U /*!< DCB DHCSR: Halted status Position */ +#define DCB_DHCSR_S_HALT_Msk (0x1UL << DCB_DHCSR_S_HALT_Pos) /*!< DCB DHCSR: Halted status Mask */ + +#define DCB_DHCSR_S_REGRDY_Pos 16U /*!< DCB DHCSR: Register ready status Position */ +#define DCB_DHCSR_S_REGRDY_Msk (0x1UL << DCB_DHCSR_S_REGRDY_Pos) /*!< DCB DHCSR: Register ready status Mask */ + +#define DCB_DHCSR_C_MASKINTS_Pos 3U /*!< DCB DHCSR: Mask interrupts control Position */ +#define DCB_DHCSR_C_MASKINTS_Msk (0x1UL << DCB_DHCSR_C_MASKINTS_Pos) /*!< DCB DHCSR: Mask interrupts control Mask */ + +#define DCB_DHCSR_C_STEP_Pos 2U /*!< DCB DHCSR: Step control Position */ +#define DCB_DHCSR_C_STEP_Msk (0x1UL << DCB_DHCSR_C_STEP_Pos) /*!< DCB DHCSR: Step control Mask */ + +#define DCB_DHCSR_C_HALT_Pos 1U /*!< DCB DHCSR: Halt control Position */ +#define DCB_DHCSR_C_HALT_Msk (0x1UL << DCB_DHCSR_C_HALT_Pos) /*!< DCB DHCSR: Halt control Mask */ + +#define DCB_DHCSR_C_DEBUGEN_Pos 0U /*!< DCB DHCSR: Debug enable control Position */ +#define DCB_DHCSR_C_DEBUGEN_Msk (0x1UL /*<< DCB_DHCSR_C_DEBUGEN_Pos*/) /*!< DCB DHCSR: Debug enable control Mask */ + +/* DCRSR, Debug Core Register Select Register Definitions */ +#define DCB_DCRSR_REGWnR_Pos 16U /*!< DCB DCRSR: Register write/not-read Position */ +#define DCB_DCRSR_REGWnR_Msk (0x1UL << DCB_DCRSR_REGWnR_Pos) /*!< DCB DCRSR: Register write/not-read Mask */ + +#define DCB_DCRSR_REGSEL_Pos 0U /*!< DCB DCRSR: Register selector Position */ +#define DCB_DCRSR_REGSEL_Msk (0x7FUL /*<< DCB_DCRSR_REGSEL_Pos*/) /*!< DCB DCRSR: Register selector Mask */ + +/* DCRDR, Debug Core Register Data Register Definitions */ +#define DCB_DCRDR_DBGTMP_Pos 0U /*!< DCB DCRDR: Data temporary buffer Position */ +#define DCB_DCRDR_DBGTMP_Msk (0xFFFFFFFFUL /*<< DCB_DCRDR_DBGTMP_Pos*/) /*!< DCB DCRDR: Data temporary buffer Mask */ + +/* DEMCR, Debug Exception and Monitor Control Register Definitions */ +#define DCB_DEMCR_TRCENA_Pos 24U /*!< DCB DEMCR: Trace enable Position */ +#define DCB_DEMCR_TRCENA_Msk (0x1UL << DCB_DEMCR_TRCENA_Pos) /*!< DCB DEMCR: Trace enable Mask */ + +#define DCB_DEMCR_VC_HARDERR_Pos 10U /*!< DCB DEMCR: Vector Catch HardFault errors Position */ +#define DCB_DEMCR_VC_HARDERR_Msk (0x1UL << DCB_DEMCR_VC_HARDERR_Pos) /*!< DCB DEMCR: Vector Catch HardFault errors Mask */ + +#define DCB_DEMCR_VC_CORERESET_Pos 0U /*!< DCB DEMCR: Vector Catch Core reset Position */ +#define DCB_DEMCR_VC_CORERESET_Msk (0x1UL /*<< DCB_DEMCR_VC_CORERESET_Pos*/) /*!< DCB DEMCR: Vector Catch Core reset Mask */ + +/* DAUTHCTRL, Debug Authentication Control Register Definitions */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPNIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPNIDENSEL_Msk (0x1UL << DCB_DAUTHCTRL_SPNIDENSEL_Pos) /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Mask */ + +#define DCB_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< DCB DAUTHCTRL: Secure invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPIDENSEL_Msk (0x1UL /*<< DCB_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< DCB DAUTHCTRL: Secure invasive debug enable select Mask */ + +/* DSCSR, Debug Security Control and Status Register Definitions */ +#define DCB_DSCSR_CDSKEY_Pos 17U /*!< DCB DSCSR: CDS write-enable key Position */ +#define DCB_DSCSR_CDSKEY_Msk (0x1UL << DCB_DSCSR_CDSKEY_Pos) /*!< DCB DSCSR: CDS write-enable key Mask */ + +#define DCB_DSCSR_CDS_Pos 16U /*!< DCB DSCSR: Current domain Secure Position */ +#define DCB_DSCSR_CDS_Msk (0x1UL << DCB_DSCSR_CDS_Pos) /*!< DCB DSCSR: Current domain Secure Mask */ + +#define DCB_DSCSR_SBRSEL_Pos 1U /*!< DCB DSCSR: Secure banked register select Position */ +#define DCB_DSCSR_SBRSEL_Msk (0x1UL << DCB_DSCSR_SBRSEL_Pos) /*!< DCB DSCSR: Secure banked register select Mask */ + +#define DCB_DSCSR_SBRSELEN_Pos 0U /*!< DCB DSCSR: Secure banked register select enable Position */ +#define DCB_DSCSR_SBRSELEN_Msk (0x1UL /*<< DCB_DSCSR_SBRSELEN_Pos*/) /*!< DCB DSCSR: Secure banked register select enable Mask */ + +/*@} end of group CMSIS_DCB */ + + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DIB Debug Identification Block + \brief Type definitions for the Debug Identification Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Identification Block Registers (DIB). + */ +typedef struct +{ + __OM uint32_t DLAR; /*!< Offset: 0x000 ( /W) SCS Software Lock Access Register */ + __IM uint32_t DLSR; /*!< Offset: 0x004 (R/ ) SCS Software Lock Status Register */ + __IM uint32_t DAUTHSTATUS; /*!< Offset: 0x008 (R/ ) Debug Authentication Status Register */ + __IM uint32_t DDEVARCH; /*!< Offset: 0x00C (R/ ) SCS Device Architecture Register */ + __IM uint32_t DDEVTYPE; /*!< Offset: 0x010 (R/ ) SCS Device Type Register */ +} DIB_Type; + +/* DLAR, SCS Software Lock Access Register Definitions */ +#define DIB_DLAR_KEY_Pos 0U /*!< DIB DLAR: KEY Position */ +#define DIB_DLAR_KEY_Msk (0xFFFFFFFFUL /*<< DIB_DLAR_KEY_Pos */) /*!< DIB DLAR: KEY Mask */ + +/* DLSR, SCS Software Lock Status Register Definitions */ +#define DIB_DLSR_nTT_Pos 2U /*!< DIB DLSR: Not thirty-two bit Position */ +#define DIB_DLSR_nTT_Msk (0x1UL << DIB_DLSR_nTT_Pos ) /*!< DIB DLSR: Not thirty-two bit Mask */ + +#define DIB_DLSR_SLK_Pos 1U /*!< DIB DLSR: Software Lock status Position */ +#define DIB_DLSR_SLK_Msk (0x1UL << DIB_DLSR_SLK_Pos ) /*!< DIB DLSR: Software Lock status Mask */ + +#define DIB_DLSR_SLI_Pos 0U /*!< DIB DLSR: Software Lock implemented Position */ +#define DIB_DLSR_SLI_Msk (0x1UL /*<< DIB_DLSR_SLI_Pos*/) /*!< DIB DLSR: Software Lock implemented Mask */ + +/* DAUTHSTATUS, Debug Authentication Status Register Definitions */ +#define DIB_DAUTHSTATUS_SNID_Pos 6U /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_SNID_Msk (0x3UL << DIB_DAUTHSTATUS_SNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_SID_Pos 4U /*!< DIB DAUTHSTATUS: Secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_SID_Msk (0x3UL << DIB_DAUTHSTATUS_SID_Pos ) /*!< DIB DAUTHSTATUS: Secure Invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSNID_Pos 2U /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSID_Pos 0U /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSID_Msk (0x3UL /*<< DIB_DAUTHSTATUS_NSID_Pos*/) /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Mask */ + +/* DDEVARCH, SCS Device Architecture Register Definitions */ +#define DIB_DDEVARCH_ARCHITECT_Pos 21U /*!< DIB DDEVARCH: Architect Position */ +#define DIB_DDEVARCH_ARCHITECT_Msk (0x7FFUL << DIB_DDEVARCH_ARCHITECT_Pos ) /*!< DIB DDEVARCH: Architect Mask */ + +#define DIB_DDEVARCH_PRESENT_Pos 20U /*!< DIB DDEVARCH: DEVARCH Present Position */ +#define DIB_DDEVARCH_PRESENT_Msk (0x1FUL << DIB_DDEVARCH_PRESENT_Pos ) /*!< DIB DDEVARCH: DEVARCH Present Mask */ + +#define DIB_DDEVARCH_REVISION_Pos 16U /*!< DIB DDEVARCH: Revision Position */ +#define DIB_DDEVARCH_REVISION_Msk (0xFUL << DIB_DDEVARCH_REVISION_Pos ) /*!< DIB DDEVARCH: Revision Mask */ + +#define DIB_DDEVARCH_ARCHVER_Pos 12U /*!< DIB DDEVARCH: Architecture Version Position */ +#define DIB_DDEVARCH_ARCHVER_Msk (0xFUL << DIB_DDEVARCH_ARCHVER_Pos ) /*!< DIB DDEVARCH: Architecture Version Mask */ + +#define DIB_DDEVARCH_ARCHPART_Pos 0U /*!< DIB DDEVARCH: Architecture Part Position */ +#define DIB_DDEVARCH_ARCHPART_Msk (0xFFFUL /*<< DIB_DDEVARCH_ARCHPART_Pos*/) /*!< DIB DDEVARCH: Architecture Part Mask */ + +/* DDEVTYPE, SCS Device Type Register Definitions */ +#define DIB_DDEVTYPE_SUB_Pos 4U /*!< DIB DDEVTYPE: Sub-type Position */ +#define DIB_DDEVTYPE_SUB_Msk (0xFUL << DIB_DDEVTYPE_SUB_Pos ) /*!< DIB DDEVTYPE: Sub-type Mask */ + +#define DIB_DDEVTYPE_MAJOR_Pos 0U /*!< DIB DDEVTYPE: Major type Position */ +#define DIB_DDEVTYPE_MAJOR_Msk (0xFUL /*<< DIB_DDEVTYPE_MAJOR_Pos*/) /*!< DIB DDEVTYPE: Major type Mask */ + + +/*@} end of group CMSIS_DIB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ + #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ + #define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ + #define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ + #define CoreDebug_BASE (0xE000EDF0UL) /*!< \deprecated Core Debug Base Address */ + #define DCB_BASE (0xE000EDF0UL) /*!< DCB Base Address */ + #define DIB_BASE (0xE000EFB0UL) /*!< DIB Base Address */ + #define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ + #define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ + #define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + + + #define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ + #define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ + #define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + #define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ + #define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ + #define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE ) /*!< \deprecated Core Debug configuration struct */ + #define DCB ((DCB_Type *) DCB_BASE ) /*!< DCB configuration struct */ + #define DIB ((DIB_Type *) DIB_BASE ) /*!< DIB configuration struct */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ + #endif + + #if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SAU_BASE (SCS_BASE + 0x0DD0UL) /*!< Security Attribution Unit */ + #define SAU ((SAU_Type *) SAU_BASE ) /*!< Security Attribution Unit */ + #endif + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SCS_BASE_NS (0xE002E000UL) /*!< System Control Space Base Address (non-secure address space) */ + #define CoreDebug_BASE_NS (0xE002EDF0UL) /*!< \deprecated Core Debug Base Address (non-secure address space) */ + #define DCB_BASE_NS (0xE002EDF0UL) /*!< DCB Base Address (non-secure address space) */ + #define DIB_BASE_NS (0xE002EFB0UL) /*!< DIB Base Address (non-secure address space) */ + #define SysTick_BASE_NS (SCS_BASE_NS + 0x0010UL) /*!< SysTick Base Address (non-secure address space) */ + #define NVIC_BASE_NS (SCS_BASE_NS + 0x0100UL) /*!< NVIC Base Address (non-secure address space) */ + #define SCB_BASE_NS (SCS_BASE_NS + 0x0D00UL) /*!< System Control Block Base Address (non-secure address space) */ + + #define SCB_NS ((SCB_Type *) SCB_BASE_NS ) /*!< SCB configuration struct (non-secure address space) */ + #define SysTick_NS ((SysTick_Type *) SysTick_BASE_NS ) /*!< SysTick configuration struct (non-secure address space) */ + #define NVIC_NS ((NVIC_Type *) NVIC_BASE_NS ) /*!< NVIC configuration struct (non-secure address space) */ + #define CoreDebug_NS ((CoreDebug_Type *) CoreDebug_BASE_NS) /*!< \deprecated Core Debug configuration struct (non-secure address space) */ + #define DCB_NS ((DCB_Type *) DCB_BASE_NS ) /*!< DCB configuration struct (non-secure address space) */ + #define DIB_NS ((DIB_Type *) DIB_BASE_NS ) /*!< DIB configuration struct (non-secure address space) */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE_NS (SCS_BASE_NS + 0x0D90UL) /*!< Memory Protection Unit (non-secure address space) */ + #define MPU_NS ((MPU_Type *) MPU_BASE_NS ) /*!< Memory Protection Unit (non-secure address space) */ + #endif + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else +/*#define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping not available for Cortex-M23 */ +/*#define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping not available for Cortex-M23 */ + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* Special LR values for Secure/Non-Secure call handling and exception handling */ + +/* Function Return Payload (from ARMv8-M Architecture Reference Manual) LR value on entry from Secure BLXNS */ +#define FNC_RETURN (0xFEFFFFFFUL) /* bit [0] ignored when processing a branch */ + +/* The following EXC_RETURN mask values are used to evaluate the LR on exception entry */ +#define EXC_RETURN_PREFIX (0xFF000000UL) /* bits [31:24] set to indicate an EXC_RETURN value */ +#define EXC_RETURN_S (0x00000040UL) /* bit [6] stack used to push registers: 0=Non-secure 1=Secure */ +#define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */ +#define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */ +#define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */ +#define EXC_RETURN_SPSEL (0x00000004UL) /* bit [2] stack pointer used to restore context: 0=MSP 1=PSP */ +#define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */ + +/* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */ +#if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) /* Value for processors with floating-point extension: */ +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125AUL) /* bit [0] SFTC must match LR bit[4] EXC_RETURN_FTYPE */ +#else +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125BUL) /* Value for processors without floating-point extension */ +#endif + + +/* Interrupt Priorities are WORD accessible only under Armv6-M */ +/* The following MACROS handle generation of the register offset and byte masks */ +#define _BIT_SHIFT(IRQn) ( ((((uint32_t)(int32_t)(IRQn)) ) & 0x03UL) * 8UL) +#define _SHP_IDX(IRQn) ( (((((uint32_t)(int32_t)(IRQn)) & 0x0FUL)-8UL) >> 2UL) ) +#define _IP_IDX(IRQn) ( (((uint32_t)(int32_t)(IRQn)) >> 2UL) ) + +#define __NVIC_SetPriorityGrouping(X) (void)(X) +#define __NVIC_GetPriorityGrouping() (0U) + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Interrupt Target State + \details Reads the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + \return 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_GetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Target State + \details Sets the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_SetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] |= ((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Clear Interrupt Target State + \details Clears the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_ClearTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] &= ~((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[_IP_IDX(IRQn)] = ((uint32_t)(NVIC->IPR[_IP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } + else + { + SCB->SHPR[_SHP_IDX(IRQn)] = ((uint32_t)(SCB->SHPR[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IPR[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return((uint32_t)(((SCB->SHPR[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + If VTOR is not present address 0 must be mapped to SRAM. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) + uint32_t *vectors = (uint32_t *)SCB->VTOR; +#else + uint32_t *vectors = (uint32_t *)0x0U; +#endif + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + __DSB(); +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) + uint32_t *vectors = (uint32_t *)SCB->VTOR; +#else + uint32_t *vectors = (uint32_t *)0x0U; +#endif + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = ((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + SCB_AIRCR_SYSRESETREQ_Msk); + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Enable Interrupt (non-secure) + \details Enables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_EnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status (non-secure) + \details Returns a device specific interrupt enable status from the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetEnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt (non-secure) + \details Disables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_DisableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Pending Interrupt (non-secure) + \details Reads the NVIC pending register in the non-secure NVIC when in secure state and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt (non-secure) + \details Sets the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_SetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt (non-secure) + \details Clears the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_ClearPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt (non-secure) + \details Reads the active register in non-secure NVIC when in secure state and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetActive_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority (non-secure) + \details Sets the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every non-secure processor exception. + */ +__STATIC_INLINE void TZ_NVIC_SetPriority_NS(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->IPR[_IP_IDX(IRQn)] = ((uint32_t)(NVIC_NS->IPR[_IP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } + else + { + SCB_NS->SHPR[_SHP_IDX(IRQn)] = ((uint32_t)(SCB_NS->SHPR[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } +} + + +/** + \brief Get Interrupt Priority (non-secure) + \details Reads the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriority_NS(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IPR[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return((uint32_t)(((SCB_NS->SHPR[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) &&(__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv8.h" + +#endif + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + return 0U; /* No FPU */ +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ########################## SAU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SAUFunctions SAU Functions + \brief Functions that configure the SAU. + @{ + */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + +/** + \brief Enable SAU + \details Enables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Enable(void) +{ + SAU->CTRL |= (SAU_CTRL_ENABLE_Msk); +} + + + +/** + \brief Disable SAU + \details Disables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Disable(void) +{ + SAU->CTRL &= ~(SAU_CTRL_ENABLE_Msk); +} + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_SAUFunctions */ + + + + +/* ################################## Debug Control function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DCBFunctions Debug Control Functions + \brief Functions that access the Debug Control Block. + @{ + */ + + +/** + \brief Set Debug Authentication Control Register + \details writes to Debug Authentication Control register. + \param [in] value value to be writen. + */ +__STATIC_INLINE void DCB_SetAuthCtrl(uint32_t value) +{ + __DSB(); + __ISB(); + DCB->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register + \details Reads Debug Authentication Control register. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t DCB_GetAuthCtrl(void) +{ + return (DCB->DAUTHCTRL); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Debug Authentication Control Register (non-secure) + \details writes to non-secure Debug Authentication Control register when in secure state. + \param [in] value value to be writen + */ +__STATIC_INLINE void TZ_DCB_SetAuthCtrl_NS(uint32_t value) +{ + __DSB(); + __ISB(); + DCB_NS->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register (non-secure) + \details Reads non-secure Debug Authentication Control register when in secure state. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t TZ_DCB_GetAuthCtrl_NS(void) +{ + return (DCB_NS->DAUTHCTRL); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## Debug Identification function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DIBFunctions Debug Identification Functions + \brief Functions that access the Debug Identification Block. + @{ + */ + + +/** + \brief Get Debug Authentication Status Register + \details Reads Debug Authentication Status register. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t DIB_GetAuthStatus(void) +{ + return (DIB->DAUTHSTATUS); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Debug Authentication Status Register (non-secure) + \details Reads non-secure Debug Authentication Status register when in secure state. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t TZ_DIB_GetAuthStatus_NS(void) +{ + return (DIB_NS->DAUTHSTATUS); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief System Tick Configuration (non-secure) + \details Initializes the non-secure System Timer and its interrupt when in secure state, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function TZ_SysTick_Config_NS is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + + */ +__STATIC_INLINE uint32_t TZ_SysTick_Config_NS(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick_NS->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + TZ_NVIC_SetPriority_NS (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick_NS->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick_NS->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM23_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/Drivers/CMSIS/Include/core_cm3.h b/Drivers/CMSIS/Include/core_cm3.h new file mode 100644 index 0000000..74fb87e --- /dev/null +++ b/Drivers/CMSIS/Include/core_cm3.h @@ -0,0 +1,1943 @@ +/**************************************************************************//** + * @file core_cm3.h + * @brief CMSIS Cortex-M3 Core Peripheral Access Layer Header File + * @version V5.1.2 + * @date 04. June 2021 + ******************************************************************************/ +/* + * Copyright (c) 2009-2021 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CORE_CM3_H_GENERIC +#define __CORE_CM3_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M3 + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS CM3 definitions */ +#define __CM3_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __CM3_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __CM3_CMSIS_VERSION ((__CM3_CMSIS_VERSION_MAIN << 16U) | \ + __CM3_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (3U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + This core does not support an FPU at all +*/ +#define __FPU_USED 0U + +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_FP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM3_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM3_H_DEPENDANT +#define __CORE_CM3_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM3_REV + #define __CM3_REV 0x0200U + #warning "__CM3_REV not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 1U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M3 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:27; /*!< bit: 0..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:1; /*!< bit: 9 Reserved */ + uint32_t ICI_IT_1:6; /*!< bit: 10..15 ICI/IT part 1 */ + uint32_t _reserved1:8; /*!< bit: 16..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit */ + uint32_t ICI_IT_2:2; /*!< bit: 25..26 ICI/IT part 2 */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_ICI_IT_2_Pos 25U /*!< xPSR: ICI/IT part 2 Position */ +#define xPSR_ICI_IT_2_Msk (3UL << xPSR_ICI_IT_2_Pos) /*!< xPSR: ICI/IT part 2 Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_ICI_IT_1_Pos 10U /*!< xPSR: ICI/IT part 1 Position */ +#define xPSR_ICI_IT_1_Msk (0x3FUL << xPSR_ICI_IT_1_Pos) /*!< xPSR: ICI/IT part 1 Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */ + uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[8U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[24U]; + __IOM uint32_t ICER[8U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RESERVED1[24U]; + __IOM uint32_t ISPR[8U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[24U]; + __IOM uint32_t ICPR[8U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[24U]; + __IOM uint32_t IABR[8U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[56U]; + __IOM uint8_t IP[240U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED5[644U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/* Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHP[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ADR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ISAR[5U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + uint32_t RESERVED0[5U]; + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */ +#define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Vector Table Offset Register Definitions */ +#if defined (__CM3_REV) && (__CM3_REV < 0x0201U) /* core r2p1 */ +#define SCB_VTOR_TBLBASE_Pos 29U /*!< SCB VTOR: TBLBASE Position */ +#define SCB_VTOR_TBLBASE_Msk (1UL << SCB_VTOR_TBLBASE_Pos) /*!< SCB VTOR: TBLBASE Mask */ + +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x3FFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ +#else +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ +#endif + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +#define SCB_AIRCR_VECTRESET_Pos 0U /*!< SCB AIRCR: VECTRESET Position */ +#define SCB_AIRCR_VECTRESET_Msk (1UL /*<< SCB_AIRCR_VECTRESET_Pos*/) /*!< SCB AIRCR: VECTRESET Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */ +#define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +#define SCB_CCR_NONBASETHRDENA_Pos 0U /*!< SCB CCR: NONBASETHRDENA Position */ +#define SCB_CCR_NONBASETHRDENA_Msk (1UL /*<< SCB_CCR_NONBASETHRDENA_Pos*/) /*!< SCB CCR: NONBASETHRDENA Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/* SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/* BusFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/* UsageFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/* SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/* SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ +#if defined (__CM3_REV) && (__CM3_REV >= 0x200U) + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ +#else + uint32_t RESERVED1[1U]; +#endif +} SCnSCB_Type; + +/* Interrupt Controller Type Register Definitions */ +#define SCnSCB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/* Auxiliary Control Register Definitions */ +#if defined (__CM3_REV) && (__CM3_REV >= 0x200U) +#define SCnSCB_ACTLR_DISOOFP_Pos 9U /*!< ACTLR: DISOOFP Position */ +#define SCnSCB_ACTLR_DISOOFP_Msk (1UL << SCnSCB_ACTLR_DISOOFP_Pos) /*!< ACTLR: DISOOFP Mask */ + +#define SCnSCB_ACTLR_DISFPCA_Pos 8U /*!< ACTLR: DISFPCA Position */ +#define SCnSCB_ACTLR_DISFPCA_Msk (1UL << SCnSCB_ACTLR_DISFPCA_Pos) /*!< ACTLR: DISFPCA Mask */ + +#define SCnSCB_ACTLR_DISFOLD_Pos 2U /*!< ACTLR: DISFOLD Position */ +#define SCnSCB_ACTLR_DISFOLD_Msk (1UL << SCnSCB_ACTLR_DISFOLD_Pos) /*!< ACTLR: DISFOLD Mask */ + +#define SCnSCB_ACTLR_DISDEFWBUF_Pos 1U /*!< ACTLR: DISDEFWBUF Position */ +#define SCnSCB_ACTLR_DISDEFWBUF_Msk (1UL << SCnSCB_ACTLR_DISDEFWBUF_Pos) /*!< ACTLR: DISDEFWBUF Mask */ + +#define SCnSCB_ACTLR_DISMCYCINT_Pos 0U /*!< ACTLR: DISMCYCINT Position */ +#define SCnSCB_ACTLR_DISMCYCINT_Msk (1UL /*<< SCnSCB_ACTLR_DISMCYCINT_Pos*/) /*!< ACTLR: DISMCYCINT Mask */ +#endif + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) ITM Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) ITM Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ + uint32_t RESERVED3[32U]; + uint32_t RESERVED4[43U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ + uint32_t RESERVED5[6U]; + __IM uint32_t PID4; /*!< Offset: 0xFD0 (R/ ) ITM Peripheral Identification Register #4 */ + __IM uint32_t PID5; /*!< Offset: 0xFD4 (R/ ) ITM Peripheral Identification Register #5 */ + __IM uint32_t PID6; /*!< Offset: 0xFD8 (R/ ) ITM Peripheral Identification Register #6 */ + __IM uint32_t PID7; /*!< Offset: 0xFDC (R/ ) ITM Peripheral Identification Register #7 */ + __IM uint32_t PID0; /*!< Offset: 0xFE0 (R/ ) ITM Peripheral Identification Register #0 */ + __IM uint32_t PID1; /*!< Offset: 0xFE4 (R/ ) ITM Peripheral Identification Register #1 */ + __IM uint32_t PID2; /*!< Offset: 0xFE8 (R/ ) ITM Peripheral Identification Register #2 */ + __IM uint32_t PID3; /*!< Offset: 0xFEC (R/ ) ITM Peripheral Identification Register #3 */ + __IM uint32_t CID0; /*!< Offset: 0xFF0 (R/ ) ITM Component Identification Register #0 */ + __IM uint32_t CID1; /*!< Offset: 0xFF4 (R/ ) ITM Component Identification Register #1 */ + __IM uint32_t CID2; /*!< Offset: 0xFF8 (R/ ) ITM Component Identification Register #2 */ + __IM uint32_t CID3; /*!< Offset: 0xFFC (R/ ) ITM Component Identification Register #3 */ +} ITM_Type; + +/* ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFFFFFFFFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/* ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TraceBusID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TraceBusID_Msk (0x7FUL << ITM_TCR_TraceBusID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPrescale_Pos 8U /*!< ITM TCR: TSPrescale Position */ +#define ITM_TCR_TSPrescale_Msk (3UL << ITM_TCR_TSPrescale_Pos) /*!< ITM TCR: TSPrescale Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/* ITM Lock Status Register Definitions */ +#define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ +#define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ + +#define ITM_LSR_Access_Pos 1U /*!< ITM LSR: Access Position */ +#define ITM_LSR_Access_Msk (1UL << ITM_LSR_Access_Pos) /*!< ITM LSR: Access Mask */ + +#define ITM_LSR_Present_Pos 0U /*!< ITM LSR: Present Position */ +#define ITM_LSR_Present_Msk (1UL /*<< ITM_LSR_Present_Pos*/) /*!< ITM LSR: Present Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + __IOM uint32_t MASK0; /*!< Offset: 0x024 (R/W) Mask Register 0 */ + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED0[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + __IOM uint32_t MASK1; /*!< Offset: 0x034 (R/W) Mask Register 1 */ + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + __IOM uint32_t MASK2; /*!< Offset: 0x044 (R/W) Mask Register 2 */ + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + __IOM uint32_t MASK3; /*!< Offset: 0x054 (R/W) Mask Register 3 */ + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (0x1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (0x1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (0x1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (0x1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (0x1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (0x1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (0x1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (0x1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (0x1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (0x1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/* DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/* DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/* DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/* DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/* DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/* DWT Comparator Mask Register Definitions */ +#define DWT_MASK_MASK_Pos 0U /*!< DWT MASK: MASK Position */ +#define DWT_MASK_MASK_Msk (0x1FUL /*<< DWT_MASK_MASK_Pos*/) /*!< DWT MASK: MASK Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVADDR1_Pos 16U /*!< DWT FUNCTION: DATAVADDR1 Position */ +#define DWT_FUNCTION_DATAVADDR1_Msk (0xFUL << DWT_FUNCTION_DATAVADDR1_Pos) /*!< DWT FUNCTION: DATAVADDR1 Mask */ + +#define DWT_FUNCTION_DATAVADDR0_Pos 12U /*!< DWT FUNCTION: DATAVADDR0 Position */ +#define DWT_FUNCTION_DATAVADDR0_Msk (0xFUL << DWT_FUNCTION_DATAVADDR0_Pos) /*!< DWT FUNCTION: DATAVADDR0 Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_LNK1ENA_Pos 9U /*!< DWT FUNCTION: LNK1ENA Position */ +#define DWT_FUNCTION_LNK1ENA_Msk (0x1UL << DWT_FUNCTION_LNK1ENA_Pos) /*!< DWT FUNCTION: LNK1ENA Mask */ + +#define DWT_FUNCTION_DATAVMATCH_Pos 8U /*!< DWT FUNCTION: DATAVMATCH Position */ +#define DWT_FUNCTION_DATAVMATCH_Msk (0x1UL << DWT_FUNCTION_DATAVMATCH_Pos) /*!< DWT FUNCTION: DATAVMATCH Mask */ + +#define DWT_FUNCTION_CYCMATCH_Pos 7U /*!< DWT FUNCTION: CYCMATCH Position */ +#define DWT_FUNCTION_CYCMATCH_Msk (0x1UL << DWT_FUNCTION_CYCMATCH_Pos) /*!< DWT FUNCTION: CYCMATCH Mask */ + +#define DWT_FUNCTION_EMITRANGE_Pos 5U /*!< DWT FUNCTION: EMITRANGE Position */ +#define DWT_FUNCTION_EMITRANGE_Msk (0x1UL << DWT_FUNCTION_EMITRANGE_Pos) /*!< DWT FUNCTION: EMITRANGE Mask */ + +#define DWT_FUNCTION_FUNCTION_Pos 0U /*!< DWT FUNCTION: FUNCTION Position */ +#define DWT_FUNCTION_FUNCTION_Msk (0xFUL /*<< DWT_FUNCTION_FUNCTION_Pos*/) /*!< DWT FUNCTION: FUNCTION Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Size Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Size Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IM uint32_t FSCR; /*!< Offset: 0x308 (R/ ) Formatter Synchronization Counter Register */ + uint32_t RESERVED3[759U]; + __IM uint32_t TRIGGER; /*!< Offset: 0xEE8 (R/ ) TRIGGER Register */ + __IM uint32_t FIFO0; /*!< Offset: 0xEEC (R/ ) Integration ETM Data */ + __IM uint32_t ITATBCTR2; /*!< Offset: 0xEF0 (R/ ) ITATBCTR2 */ + uint32_t RESERVED4[1U]; + __IM uint32_t ITATBCTR0; /*!< Offset: 0xEF8 (R/ ) ITATBCTR0 */ + __IM uint32_t FIFO1; /*!< Offset: 0xEFC (R/ ) Integration ITM Data */ + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control */ + uint32_t RESERVED5[39U]; + __IOM uint32_t CLAIMSET; /*!< Offset: 0xFA0 (R/W) Claim tag set */ + __IOM uint32_t CLAIMCLR; /*!< Offset: 0xFA4 (R/W) Claim tag clear */ + uint32_t RESERVED7[8U]; + __IM uint32_t DEVID; /*!< Offset: 0xFC8 (R/ ) TPIU_DEVID */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) TPIU_DEVTYPE */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_PRESCALER_Pos 0U /*!< TPI ACPR: PRESCALER Position */ +#define TPI_ACPR_PRESCALER_Msk (0x1FFFUL /*<< TPI_ACPR_PRESCALER_Pos*/) /*!< TPI ACPR: PRESCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_EnFCont_Pos 1U /*!< TPI FFCR: EnFCont Position */ +#define TPI_FFCR_EnFCont_Msk (0x1UL << TPI_FFCR_EnFCont_Pos) /*!< TPI FFCR: EnFCont Mask */ + +/* TPI TRIGGER Register Definitions */ +#define TPI_TRIGGER_TRIGGER_Pos 0U /*!< TPI TRIGGER: TRIGGER Position */ +#define TPI_TRIGGER_TRIGGER_Msk (0x1UL /*<< TPI_TRIGGER_TRIGGER_Pos*/) /*!< TPI TRIGGER: TRIGGER Mask */ + +/* TPI Integration ETM Data Register Definitions (FIFO0) */ +#define TPI_FIFO0_ITM_ATVALID_Pos 29U /*!< TPI FIFO0: ITM_ATVALID Position */ +#define TPI_FIFO0_ITM_ATVALID_Msk (0x1UL << TPI_FIFO0_ITM_ATVALID_Pos) /*!< TPI FIFO0: ITM_ATVALID Mask */ + +#define TPI_FIFO0_ITM_bytecount_Pos 27U /*!< TPI FIFO0: ITM_bytecount Position */ +#define TPI_FIFO0_ITM_bytecount_Msk (0x3UL << TPI_FIFO0_ITM_bytecount_Pos) /*!< TPI FIFO0: ITM_bytecount Mask */ + +#define TPI_FIFO0_ETM_ATVALID_Pos 26U /*!< TPI FIFO0: ETM_ATVALID Position */ +#define TPI_FIFO0_ETM_ATVALID_Msk (0x1UL << TPI_FIFO0_ETM_ATVALID_Pos) /*!< TPI FIFO0: ETM_ATVALID Mask */ + +#define TPI_FIFO0_ETM_bytecount_Pos 24U /*!< TPI FIFO0: ETM_bytecount Position */ +#define TPI_FIFO0_ETM_bytecount_Msk (0x3UL << TPI_FIFO0_ETM_bytecount_Pos) /*!< TPI FIFO0: ETM_bytecount Mask */ + +#define TPI_FIFO0_ETM2_Pos 16U /*!< TPI FIFO0: ETM2 Position */ +#define TPI_FIFO0_ETM2_Msk (0xFFUL << TPI_FIFO0_ETM2_Pos) /*!< TPI FIFO0: ETM2 Mask */ + +#define TPI_FIFO0_ETM1_Pos 8U /*!< TPI FIFO0: ETM1 Position */ +#define TPI_FIFO0_ETM1_Msk (0xFFUL << TPI_FIFO0_ETM1_Pos) /*!< TPI FIFO0: ETM1 Mask */ + +#define TPI_FIFO0_ETM0_Pos 0U /*!< TPI FIFO0: ETM0 Position */ +#define TPI_FIFO0_ETM0_Msk (0xFFUL /*<< TPI_FIFO0_ETM0_Pos*/) /*!< TPI FIFO0: ETM0 Mask */ + +/* TPI ITATBCTR2 Register Definitions */ +#define TPI_ITATBCTR2_ATREADY2_Pos 0U /*!< TPI ITATBCTR2: ATREADY2 Position */ +#define TPI_ITATBCTR2_ATREADY2_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY2_Pos*/) /*!< TPI ITATBCTR2: ATREADY2 Mask */ + +#define TPI_ITATBCTR2_ATREADY1_Pos 0U /*!< TPI ITATBCTR2: ATREADY1 Position */ +#define TPI_ITATBCTR2_ATREADY1_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY1_Pos*/) /*!< TPI ITATBCTR2: ATREADY1 Mask */ + +/* TPI Integration ITM Data Register Definitions (FIFO1) */ +#define TPI_FIFO1_ITM_ATVALID_Pos 29U /*!< TPI FIFO1: ITM_ATVALID Position */ +#define TPI_FIFO1_ITM_ATVALID_Msk (0x1UL << TPI_FIFO1_ITM_ATVALID_Pos) /*!< TPI FIFO1: ITM_ATVALID Mask */ + +#define TPI_FIFO1_ITM_bytecount_Pos 27U /*!< TPI FIFO1: ITM_bytecount Position */ +#define TPI_FIFO1_ITM_bytecount_Msk (0x3UL << TPI_FIFO1_ITM_bytecount_Pos) /*!< TPI FIFO1: ITM_bytecount Mask */ + +#define TPI_FIFO1_ETM_ATVALID_Pos 26U /*!< TPI FIFO1: ETM_ATVALID Position */ +#define TPI_FIFO1_ETM_ATVALID_Msk (0x1UL << TPI_FIFO1_ETM_ATVALID_Pos) /*!< TPI FIFO1: ETM_ATVALID Mask */ + +#define TPI_FIFO1_ETM_bytecount_Pos 24U /*!< TPI FIFO1: ETM_bytecount Position */ +#define TPI_FIFO1_ETM_bytecount_Msk (0x3UL << TPI_FIFO1_ETM_bytecount_Pos) /*!< TPI FIFO1: ETM_bytecount Mask */ + +#define TPI_FIFO1_ITM2_Pos 16U /*!< TPI FIFO1: ITM2 Position */ +#define TPI_FIFO1_ITM2_Msk (0xFFUL << TPI_FIFO1_ITM2_Pos) /*!< TPI FIFO1: ITM2 Mask */ + +#define TPI_FIFO1_ITM1_Pos 8U /*!< TPI FIFO1: ITM1 Position */ +#define TPI_FIFO1_ITM1_Msk (0xFFUL << TPI_FIFO1_ITM1_Pos) /*!< TPI FIFO1: ITM1 Mask */ + +#define TPI_FIFO1_ITM0_Pos 0U /*!< TPI FIFO1: ITM0 Position */ +#define TPI_FIFO1_ITM0_Msk (0xFFUL /*<< TPI_FIFO1_ITM0_Pos*/) /*!< TPI FIFO1: ITM0 Mask */ + +/* TPI ITATBCTR0 Register Definitions */ +#define TPI_ITATBCTR0_ATREADY2_Pos 0U /*!< TPI ITATBCTR0: ATREADY2 Position */ +#define TPI_ITATBCTR0_ATREADY2_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY2_Pos*/) /*!< TPI ITATBCTR0: ATREADY2 Mask */ + +#define TPI_ITATBCTR0_ATREADY1_Pos 0U /*!< TPI ITATBCTR0: ATREADY1 Position */ +#define TPI_ITATBCTR0_ATREADY1_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY1_Pos*/) /*!< TPI ITATBCTR0: ATREADY1 Mask */ + +/* TPI Integration Mode Control Register Definitions */ +#define TPI_ITCTRL_Mode_Pos 0U /*!< TPI ITCTRL: Mode Position */ +#define TPI_ITCTRL_Mode_Msk (0x3UL /*<< TPI_ITCTRL_Mode_Pos*/) /*!< TPI ITCTRL: Mode Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_MinBufSz_Pos 6U /*!< TPI DEVID: MinBufSz Position */ +#define TPI_DEVID_MinBufSz_Msk (0x7UL << TPI_DEVID_MinBufSz_Pos) /*!< TPI DEVID: MinBufSz Mask */ + +#define TPI_DEVID_AsynClkIn_Pos 5U /*!< TPI DEVID: AsynClkIn Position */ +#define TPI_DEVID_AsynClkIn_Msk (0x1UL << TPI_DEVID_AsynClkIn_Pos) /*!< TPI DEVID: AsynClkIn Mask */ + +#define TPI_DEVID_NrTraceInput_Pos 0U /*!< TPI DEVID: NrTraceInput Position */ +#define TPI_DEVID_NrTraceInput_Msk (0x1FUL /*<< TPI_DEVID_NrTraceInput_Pos*/) /*!< TPI DEVID: NrTraceInput Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region RNRber Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RASR; /*!< Offset: 0x010 (R/W) MPU Region Attribute and Size Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Alias 1 Region Base Address Register */ + __IOM uint32_t RASR_A1; /*!< Offset: 0x018 (R/W) MPU Alias 1 Region Attribute and Size Register */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Alias 2 Region Base Address Register */ + __IOM uint32_t RASR_A2; /*!< Offset: 0x020 (R/W) MPU Alias 2 Region Attribute and Size Register */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Alias 3 Region Base Address Register */ + __IOM uint32_t RASR_A3; /*!< Offset: 0x028 (R/W) MPU Alias 3 Region Attribute and Size Register */ +} MPU_Type; + +#define MPU_TYPE_RALIASES 4U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_ADDR_Pos 5U /*!< MPU RBAR: ADDR Position */ +#define MPU_RBAR_ADDR_Msk (0x7FFFFFFUL << MPU_RBAR_ADDR_Pos) /*!< MPU RBAR: ADDR Mask */ + +#define MPU_RBAR_VALID_Pos 4U /*!< MPU RBAR: VALID Position */ +#define MPU_RBAR_VALID_Msk (1UL << MPU_RBAR_VALID_Pos) /*!< MPU RBAR: VALID Mask */ + +#define MPU_RBAR_REGION_Pos 0U /*!< MPU RBAR: REGION Position */ +#define MPU_RBAR_REGION_Msk (0xFUL /*<< MPU_RBAR_REGION_Pos*/) /*!< MPU RBAR: REGION Mask */ + +/* MPU Region Attribute and Size Register Definitions */ +#define MPU_RASR_ATTRS_Pos 16U /*!< MPU RASR: MPU Region Attribute field Position */ +#define MPU_RASR_ATTRS_Msk (0xFFFFUL << MPU_RASR_ATTRS_Pos) /*!< MPU RASR: MPU Region Attribute field Mask */ + +#define MPU_RASR_XN_Pos 28U /*!< MPU RASR: ATTRS.XN Position */ +#define MPU_RASR_XN_Msk (1UL << MPU_RASR_XN_Pos) /*!< MPU RASR: ATTRS.XN Mask */ + +#define MPU_RASR_AP_Pos 24U /*!< MPU RASR: ATTRS.AP Position */ +#define MPU_RASR_AP_Msk (0x7UL << MPU_RASR_AP_Pos) /*!< MPU RASR: ATTRS.AP Mask */ + +#define MPU_RASR_TEX_Pos 19U /*!< MPU RASR: ATTRS.TEX Position */ +#define MPU_RASR_TEX_Msk (0x7UL << MPU_RASR_TEX_Pos) /*!< MPU RASR: ATTRS.TEX Mask */ + +#define MPU_RASR_S_Pos 18U /*!< MPU RASR: ATTRS.S Position */ +#define MPU_RASR_S_Msk (1UL << MPU_RASR_S_Pos) /*!< MPU RASR: ATTRS.S Mask */ + +#define MPU_RASR_C_Pos 17U /*!< MPU RASR: ATTRS.C Position */ +#define MPU_RASR_C_Msk (1UL << MPU_RASR_C_Pos) /*!< MPU RASR: ATTRS.C Mask */ + +#define MPU_RASR_B_Pos 16U /*!< MPU RASR: ATTRS.B Position */ +#define MPU_RASR_B_Msk (1UL << MPU_RASR_B_Pos) /*!< MPU RASR: ATTRS.B Mask */ + +#define MPU_RASR_SRD_Pos 8U /*!< MPU RASR: Sub-Region Disable Position */ +#define MPU_RASR_SRD_Msk (0xFFUL << MPU_RASR_SRD_Pos) /*!< MPU RASR: Sub-Region Disable Mask */ + +#define MPU_RASR_SIZE_Pos 1U /*!< MPU RASR: Region Size Field Position */ +#define MPU_RASR_SIZE_Msk (0x1FUL << MPU_RASR_SIZE_Pos) /*!< MPU RASR: Region Size Field Mask */ + +#define MPU_RASR_ENABLE_Pos 0U /*!< MPU RASR: Region enable bit Position */ +#define MPU_RASR_ENABLE_Msk (1UL /*<< MPU_RASR_ENABLE_Pos*/) /*!< MPU RASR: Region enable bit Disable Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Type definitions for the Core Debug Registers + @{ + */ + +/** + \brief Structure type to access the Core Debug Register (CoreDebug). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< CoreDebug DHCSR: DBGKEY Mask */ + +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< CoreDebug DHCSR: S_RESET_ST Mask */ + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< CoreDebug DHCSR: S_RETIRE_ST Mask */ + +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< CoreDebug DHCSR: S_LOCKUP Mask */ + +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< CoreDebug DHCSR: S_SLEEP Mask */ + +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< CoreDebug DHCSR: S_HALT Mask */ + +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< CoreDebug DHCSR: S_REGRDY Mask */ + +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos 5U /*!< CoreDebug DHCSR: C_SNAPSTALL Position */ +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk (1UL << CoreDebug_DHCSR_C_SNAPSTALL_Pos) /*!< CoreDebug DHCSR: C_SNAPSTALL Mask */ + +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< CoreDebug DHCSR: C_MASKINTS Mask */ + +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< CoreDebug DHCSR: C_STEP Mask */ + +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< CoreDebug DHCSR: C_HALT Mask */ + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< CoreDebug DHCSR: C_DEBUGEN Mask */ + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< CoreDebug DCRSR: REGWnR Mask */ + +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< CoreDebug DCRSR: REGSEL Mask */ + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_TRCENA_Pos 24U /*!< CoreDebug DEMCR: TRCENA Position */ +#define CoreDebug_DEMCR_TRCENA_Msk (1UL << CoreDebug_DEMCR_TRCENA_Pos) /*!< CoreDebug DEMCR: TRCENA Mask */ + +#define CoreDebug_DEMCR_MON_REQ_Pos 19U /*!< CoreDebug DEMCR: MON_REQ Position */ +#define CoreDebug_DEMCR_MON_REQ_Msk (1UL << CoreDebug_DEMCR_MON_REQ_Pos) /*!< CoreDebug DEMCR: MON_REQ Mask */ + +#define CoreDebug_DEMCR_MON_STEP_Pos 18U /*!< CoreDebug DEMCR: MON_STEP Position */ +#define CoreDebug_DEMCR_MON_STEP_Msk (1UL << CoreDebug_DEMCR_MON_STEP_Pos) /*!< CoreDebug DEMCR: MON_STEP Mask */ + +#define CoreDebug_DEMCR_MON_PEND_Pos 17U /*!< CoreDebug DEMCR: MON_PEND Position */ +#define CoreDebug_DEMCR_MON_PEND_Msk (1UL << CoreDebug_DEMCR_MON_PEND_Pos) /*!< CoreDebug DEMCR: MON_PEND Mask */ + +#define CoreDebug_DEMCR_MON_EN_Pos 16U /*!< CoreDebug DEMCR: MON_EN Position */ +#define CoreDebug_DEMCR_MON_EN_Msk (1UL << CoreDebug_DEMCR_MON_EN_Pos) /*!< CoreDebug DEMCR: MON_EN Mask */ + +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< CoreDebug DEMCR: VC_HARDERR Mask */ + +#define CoreDebug_DEMCR_VC_INTERR_Pos 9U /*!< CoreDebug DEMCR: VC_INTERR Position */ +#define CoreDebug_DEMCR_VC_INTERR_Msk (1UL << CoreDebug_DEMCR_VC_INTERR_Pos) /*!< CoreDebug DEMCR: VC_INTERR Mask */ + +#define CoreDebug_DEMCR_VC_BUSERR_Pos 8U /*!< CoreDebug DEMCR: VC_BUSERR Position */ +#define CoreDebug_DEMCR_VC_BUSERR_Msk (1UL << CoreDebug_DEMCR_VC_BUSERR_Pos) /*!< CoreDebug DEMCR: VC_BUSERR Mask */ + +#define CoreDebug_DEMCR_VC_STATERR_Pos 7U /*!< CoreDebug DEMCR: VC_STATERR Position */ +#define CoreDebug_DEMCR_VC_STATERR_Msk (1UL << CoreDebug_DEMCR_VC_STATERR_Pos) /*!< CoreDebug DEMCR: VC_STATERR Mask */ + +#define CoreDebug_DEMCR_VC_CHKERR_Pos 6U /*!< CoreDebug DEMCR: VC_CHKERR Position */ +#define CoreDebug_DEMCR_VC_CHKERR_Msk (1UL << CoreDebug_DEMCR_VC_CHKERR_Pos) /*!< CoreDebug DEMCR: VC_CHKERR Mask */ + +#define CoreDebug_DEMCR_VC_NOCPERR_Pos 5U /*!< CoreDebug DEMCR: VC_NOCPERR Position */ +#define CoreDebug_DEMCR_VC_NOCPERR_Msk (1UL << CoreDebug_DEMCR_VC_NOCPERR_Pos) /*!< CoreDebug DEMCR: VC_NOCPERR Mask */ + +#define CoreDebug_DEMCR_VC_MMERR_Pos 4U /*!< CoreDebug DEMCR: VC_MMERR Position */ +#define CoreDebug_DEMCR_VC_MMERR_Msk (1UL << CoreDebug_DEMCR_VC_MMERR_Pos) /*!< CoreDebug DEMCR: VC_MMERR Mask */ + +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< CoreDebug DEMCR: VC_CORERESET Mask */ + +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ +#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ +#define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ +#define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ +#define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ +#define CoreDebug_BASE (0xE000EDF0UL) /*!< Core Debug Base Address */ +#define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ +#define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ +#define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + +#define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ +#define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ +#define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ +#define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ +#define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ +#define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ +#define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ +#define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE) /*!< Core Debug configuration struct */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ +#endif + +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* The following EXC_RETURN values are saved the LR on exception entry */ +#define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */ +#define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */ +#define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */ + + +/** + \brief Set Priority Grouping + \details Sets the priority grouping field using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IP[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHP[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IP[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHP[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + /* ARM Application Note 321 states that the M3 does not require the architectural barrier */ +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +/*@} end of CMSIS_Core_NVICFunctions */ + + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv7.h" + +#endif + + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + return 0U; /* No FPU */ +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM3_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/Drivers/CMSIS/Include/core_cm33.h b/Drivers/CMSIS/Include/core_cm33.h new file mode 100644 index 0000000..18a2e6f --- /dev/null +++ b/Drivers/CMSIS/Include/core_cm33.h @@ -0,0 +1,3277 @@ +/**************************************************************************//** + * @file core_cm33.h + * @brief CMSIS Cortex-M33 Core Peripheral Access Layer Header File + * @version V5.2.3 + * @date 13. October 2021 + ******************************************************************************/ +/* + * Copyright (c) 2009-2021 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#elif defined ( __GNUC__ ) + #pragma GCC diagnostic ignored "-Wpedantic" /* disable pedantic warning due to unnamed structs/unions */ +#endif + +#ifndef __CORE_CM33_H_GENERIC +#define __CORE_CM33_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M33 + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS CM33 definitions */ +#define __CM33_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __CM33_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __CM33_CMSIS_VERSION ((__CM33_CMSIS_VERSION_MAIN << 16U) | \ + __CM33_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (33U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + For this, __FPU_PRESENT has to be checked prior to making use of FPU specific registers and functions. +*/ +#if defined ( __CC_ARM ) + #if defined (__TARGET_FPU_VFP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined (__ARM_FP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __ICCARM__ ) + #if defined (__ARMVFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __TI_ARM__ ) + #if defined (__TI_VFP_SUPPORT__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TASKING__ ) + #if defined (__FPU_VFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM33_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM33_H_DEPENDANT +#define __CORE_CM33_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM33_REV + #define __CM33_REV 0x0000U + #warning "__CM33_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __SAUREGION_PRESENT + #define __SAUREGION_PRESENT 0U + #warning "__SAUREGION_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DSP_PRESENT + #define __DSP_PRESENT 0U + #warning "__DSP_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 1U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M33 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + - Core SAU Register + - Core FPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:16; /*!< bit: 0..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:7; /*!< bit: 20..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + +#define APSR_GE_Pos 16U /*!< APSR: GE Position */ +#define APSR_GE_Msk (0xFUL << APSR_GE_Pos) /*!< APSR: GE Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:7; /*!< bit: 9..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:4; /*!< bit: 20..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t IT:2; /*!< bit: 25..26 saved IT state (read 0) */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_IT_Pos 25U /*!< xPSR: IT Position */ +#define xPSR_IT_Msk (3UL << xPSR_IT_Pos) /*!< xPSR: IT Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_GE_Pos 16U /*!< xPSR: GE Position */ +#define xPSR_GE_Msk (0xFUL << xPSR_GE_Pos) /*!< xPSR: GE Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack-pointer select */ + uint32_t FPCA:1; /*!< bit: 2 Floating-point context active */ + uint32_t SFPA:1; /*!< bit: 3 Secure floating-point active */ + uint32_t _reserved1:28; /*!< bit: 4..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SFPA_Pos 3U /*!< CONTROL: SFPA Position */ +#define CONTROL_SFPA_Msk (1UL << CONTROL_SFPA_Pos) /*!< CONTROL: SFPA Mask */ + +#define CONTROL_FPCA_Pos 2U /*!< CONTROL: FPCA Position */ +#define CONTROL_FPCA_Msk (1UL << CONTROL_FPCA_Pos) /*!< CONTROL: FPCA Mask */ + +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[16U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[16U]; + __IOM uint32_t ICER[16U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[16U]; + __IOM uint32_t ISPR[16U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[16U]; + __IOM uint32_t ICPR[16U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[16U]; + __IOM uint32_t IABR[16U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[16U]; + __IOM uint32_t ITNS[16U]; /*!< Offset: 0x280 (R/W) Interrupt Non-Secure State Register */ + uint32_t RESERVED5[16U]; + __IOM uint8_t IPR[496U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED6[580U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/* Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHPR[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t ID_PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t ID_DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ID_AFR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t ID_MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ID_ISAR[6U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + __IM uint32_t CLIDR; /*!< Offset: 0x078 (R/ ) Cache Level ID register */ + __IM uint32_t CTR; /*!< Offset: 0x07C (R/ ) Cache Type register */ + __IM uint32_t CCSIDR; /*!< Offset: 0x080 (R/ ) Cache Size ID Register */ + __IOM uint32_t CSSELR; /*!< Offset: 0x084 (R/W) Cache Size Selection Register */ + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ + __IOM uint32_t NSACR; /*!< Offset: 0x08C (R/W) Non-Secure Access Control Register */ + uint32_t RESERVED7[21U]; + __IOM uint32_t SFSR; /*!< Offset: 0x0E4 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x0E8 (R/W) Secure Fault Address Register */ + uint32_t RESERVED3[69U]; + __OM uint32_t STIR; /*!< Offset: 0x200 ( /W) Software Triggered Interrupt Register */ + uint32_t RESERVED4[15U]; + __IM uint32_t MVFR0; /*!< Offset: 0x240 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x244 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x248 (R/ ) Media and VFP Feature Register 2 */ + uint32_t RESERVED5[1U]; + __OM uint32_t ICIALLU; /*!< Offset: 0x250 ( /W) I-Cache Invalidate All to PoU */ + uint32_t RESERVED6[1U]; + __OM uint32_t ICIMVAU; /*!< Offset: 0x258 ( /W) I-Cache Invalidate by MVA to PoU */ + __OM uint32_t DCIMVAC; /*!< Offset: 0x25C ( /W) D-Cache Invalidate by MVA to PoC */ + __OM uint32_t DCISW; /*!< Offset: 0x260 ( /W) D-Cache Invalidate by Set-way */ + __OM uint32_t DCCMVAU; /*!< Offset: 0x264 ( /W) D-Cache Clean by MVA to PoU */ + __OM uint32_t DCCMVAC; /*!< Offset: 0x268 ( /W) D-Cache Clean by MVA to PoC */ + __OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */ + __OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */ + __OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */ + __OM uint32_t BPIALL; /*!< Offset: 0x278 ( /W) Branch Predictor Invalidate All */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_PENDNMISET_Pos 31U /*!< SCB ICSR: PENDNMISET Position */ +#define SCB_ICSR_PENDNMISET_Msk (1UL << SCB_ICSR_PENDNMISET_Pos) /*!< SCB ICSR: PENDNMISET Mask */ + +#define SCB_ICSR_NMIPENDSET_Pos SCB_ICSR_PENDNMISET_Pos /*!< SCB ICSR: NMIPENDSET Position, backward compatibility */ +#define SCB_ICSR_NMIPENDSET_Msk SCB_ICSR_PENDNMISET_Msk /*!< SCB ICSR: NMIPENDSET Mask, backward compatibility */ + +#define SCB_ICSR_PENDNMICLR_Pos 30U /*!< SCB ICSR: PENDNMICLR Position */ +#define SCB_ICSR_PENDNMICLR_Msk (1UL << SCB_ICSR_PENDNMICLR_Pos) /*!< SCB ICSR: PENDNMICLR Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_STTNS_Pos 24U /*!< SCB ICSR: STTNS Position (Security Extension) */ +#define SCB_ICSR_STTNS_Msk (1UL << SCB_ICSR_STTNS_Pos) /*!< SCB ICSR: STTNS Mask (Security Extension) */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIS_Pos 14U /*!< SCB AIRCR: PRIS Position */ +#define SCB_AIRCR_PRIS_Msk (1UL << SCB_AIRCR_PRIS_Pos) /*!< SCB AIRCR: PRIS Mask */ + +#define SCB_AIRCR_BFHFNMINS_Pos 13U /*!< SCB AIRCR: BFHFNMINS Position */ +#define SCB_AIRCR_BFHFNMINS_Msk (1UL << SCB_AIRCR_BFHFNMINS_Pos) /*!< SCB AIRCR: BFHFNMINS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_SYSRESETREQS_Pos 3U /*!< SCB AIRCR: SYSRESETREQS Position */ +#define SCB_AIRCR_SYSRESETREQS_Msk (1UL << SCB_AIRCR_SYSRESETREQS_Pos) /*!< SCB AIRCR: SYSRESETREQS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEPS_Pos 3U /*!< SCB SCR: SLEEPDEEPS Position */ +#define SCB_SCR_SLEEPDEEPS_Msk (1UL << SCB_SCR_SLEEPDEEPS_Pos) /*!< SCB SCR: SLEEPDEEPS Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_BP_Pos 18U /*!< SCB CCR: BP Position */ +#define SCB_CCR_BP_Msk (1UL << SCB_CCR_BP_Pos) /*!< SCB CCR: BP Mask */ + +#define SCB_CCR_IC_Pos 17U /*!< SCB CCR: IC Position */ +#define SCB_CCR_IC_Msk (1UL << SCB_CCR_IC_Pos) /*!< SCB CCR: IC Mask */ + +#define SCB_CCR_DC_Pos 16U /*!< SCB CCR: DC Position */ +#define SCB_CCR_DC_Msk (1UL << SCB_CCR_DC_Pos) /*!< SCB CCR: DC Mask */ + +#define SCB_CCR_STKOFHFNMIGN_Pos 10U /*!< SCB CCR: STKOFHFNMIGN Position */ +#define SCB_CCR_STKOFHFNMIGN_Msk (1UL << SCB_CCR_STKOFHFNMIGN_Pos) /*!< SCB CCR: STKOFHFNMIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_HARDFAULTPENDED_Pos 21U /*!< SCB SHCSR: HARDFAULTPENDED Position */ +#define SCB_SHCSR_HARDFAULTPENDED_Msk (1UL << SCB_SHCSR_HARDFAULTPENDED_Pos) /*!< SCB SHCSR: HARDFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTPENDED_Pos 20U /*!< SCB SHCSR: SECUREFAULTPENDED Position */ +#define SCB_SHCSR_SECUREFAULTPENDED_Msk (1UL << SCB_SHCSR_SECUREFAULTPENDED_Pos) /*!< SCB SHCSR: SECUREFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTENA_Pos 19U /*!< SCB SHCSR: SECUREFAULTENA Position */ +#define SCB_SHCSR_SECUREFAULTENA_Msk (1UL << SCB_SHCSR_SECUREFAULTENA_Pos) /*!< SCB SHCSR: SECUREFAULTENA Mask */ + +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_NMIACT_Pos 5U /*!< SCB SHCSR: NMIACT Position */ +#define SCB_SHCSR_NMIACT_Msk (1UL << SCB_SHCSR_NMIACT_Pos) /*!< SCB SHCSR: NMIACT Mask */ + +#define SCB_SHCSR_SECUREFAULTACT_Pos 4U /*!< SCB SHCSR: SECUREFAULTACT Position */ +#define SCB_SHCSR_SECUREFAULTACT_Msk (1UL << SCB_SHCSR_SECUREFAULTACT_Pos) /*!< SCB SHCSR: SECUREFAULTACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_HARDFAULTACT_Pos 2U /*!< SCB SHCSR: HARDFAULTACT Position */ +#define SCB_SHCSR_HARDFAULTACT_Msk (1UL << SCB_SHCSR_HARDFAULTACT_Pos) /*!< SCB SHCSR: HARDFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/* SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/* BusFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_LSPERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 5U) /*!< SCB CFSR (BFSR): LSPERR Position */ +#define SCB_CFSR_LSPERR_Msk (1UL << SCB_CFSR_LSPERR_Pos) /*!< SCB CFSR (BFSR): LSPERR Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/* UsageFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_STKOF_Pos (SCB_CFSR_USGFAULTSR_Pos + 4U) /*!< SCB CFSR (UFSR): STKOF Position */ +#define SCB_CFSR_STKOF_Msk (1UL << SCB_CFSR_STKOF_Pos) /*!< SCB CFSR (UFSR): STKOF Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/* SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/* SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/* SCB Non-Secure Access Control Register Definitions */ +#define SCB_NSACR_CP11_Pos 11U /*!< SCB NSACR: CP11 Position */ +#define SCB_NSACR_CP11_Msk (1UL << SCB_NSACR_CP11_Pos) /*!< SCB NSACR: CP11 Mask */ + +#define SCB_NSACR_CP10_Pos 10U /*!< SCB NSACR: CP10 Position */ +#define SCB_NSACR_CP10_Msk (1UL << SCB_NSACR_CP10_Pos) /*!< SCB NSACR: CP10 Mask */ + +#define SCB_NSACR_CPn_Pos 0U /*!< SCB NSACR: CPn Position */ +#define SCB_NSACR_CPn_Msk (1UL /*<< SCB_NSACR_CPn_Pos*/) /*!< SCB NSACR: CPn Mask */ + +/* SCB Cache Level ID Register Definitions */ +#define SCB_CLIDR_LOUU_Pos 27U /*!< SCB CLIDR: LoUU Position */ +#define SCB_CLIDR_LOUU_Msk (7UL << SCB_CLIDR_LOUU_Pos) /*!< SCB CLIDR: LoUU Mask */ + +#define SCB_CLIDR_LOC_Pos 24U /*!< SCB CLIDR: LoC Position */ +#define SCB_CLIDR_LOC_Msk (7UL << SCB_CLIDR_LOC_Pos) /*!< SCB CLIDR: LoC Mask */ + +/* SCB Cache Type Register Definitions */ +#define SCB_CTR_FORMAT_Pos 29U /*!< SCB CTR: Format Position */ +#define SCB_CTR_FORMAT_Msk (7UL << SCB_CTR_FORMAT_Pos) /*!< SCB CTR: Format Mask */ + +#define SCB_CTR_CWG_Pos 24U /*!< SCB CTR: CWG Position */ +#define SCB_CTR_CWG_Msk (0xFUL << SCB_CTR_CWG_Pos) /*!< SCB CTR: CWG Mask */ + +#define SCB_CTR_ERG_Pos 20U /*!< SCB CTR: ERG Position */ +#define SCB_CTR_ERG_Msk (0xFUL << SCB_CTR_ERG_Pos) /*!< SCB CTR: ERG Mask */ + +#define SCB_CTR_DMINLINE_Pos 16U /*!< SCB CTR: DminLine Position */ +#define SCB_CTR_DMINLINE_Msk (0xFUL << SCB_CTR_DMINLINE_Pos) /*!< SCB CTR: DminLine Mask */ + +#define SCB_CTR_IMINLINE_Pos 0U /*!< SCB CTR: ImInLine Position */ +#define SCB_CTR_IMINLINE_Msk (0xFUL /*<< SCB_CTR_IMINLINE_Pos*/) /*!< SCB CTR: ImInLine Mask */ + +/* SCB Cache Size ID Register Definitions */ +#define SCB_CCSIDR_WT_Pos 31U /*!< SCB CCSIDR: WT Position */ +#define SCB_CCSIDR_WT_Msk (1UL << SCB_CCSIDR_WT_Pos) /*!< SCB CCSIDR: WT Mask */ + +#define SCB_CCSIDR_WB_Pos 30U /*!< SCB CCSIDR: WB Position */ +#define SCB_CCSIDR_WB_Msk (1UL << SCB_CCSIDR_WB_Pos) /*!< SCB CCSIDR: WB Mask */ + +#define SCB_CCSIDR_RA_Pos 29U /*!< SCB CCSIDR: RA Position */ +#define SCB_CCSIDR_RA_Msk (1UL << SCB_CCSIDR_RA_Pos) /*!< SCB CCSIDR: RA Mask */ + +#define SCB_CCSIDR_WA_Pos 28U /*!< SCB CCSIDR: WA Position */ +#define SCB_CCSIDR_WA_Msk (1UL << SCB_CCSIDR_WA_Pos) /*!< SCB CCSIDR: WA Mask */ + +#define SCB_CCSIDR_NUMSETS_Pos 13U /*!< SCB CCSIDR: NumSets Position */ +#define SCB_CCSIDR_NUMSETS_Msk (0x7FFFUL << SCB_CCSIDR_NUMSETS_Pos) /*!< SCB CCSIDR: NumSets Mask */ + +#define SCB_CCSIDR_ASSOCIATIVITY_Pos 3U /*!< SCB CCSIDR: Associativity Position */ +#define SCB_CCSIDR_ASSOCIATIVITY_Msk (0x3FFUL << SCB_CCSIDR_ASSOCIATIVITY_Pos) /*!< SCB CCSIDR: Associativity Mask */ + +#define SCB_CCSIDR_LINESIZE_Pos 0U /*!< SCB CCSIDR: LineSize Position */ +#define SCB_CCSIDR_LINESIZE_Msk (7UL /*<< SCB_CCSIDR_LINESIZE_Pos*/) /*!< SCB CCSIDR: LineSize Mask */ + +/* SCB Cache Size Selection Register Definitions */ +#define SCB_CSSELR_LEVEL_Pos 1U /*!< SCB CSSELR: Level Position */ +#define SCB_CSSELR_LEVEL_Msk (7UL << SCB_CSSELR_LEVEL_Pos) /*!< SCB CSSELR: Level Mask */ + +#define SCB_CSSELR_IND_Pos 0U /*!< SCB CSSELR: InD Position */ +#define SCB_CSSELR_IND_Msk (1UL /*<< SCB_CSSELR_IND_Pos*/) /*!< SCB CSSELR: InD Mask */ + +/* SCB Software Triggered Interrupt Register Definitions */ +#define SCB_STIR_INTID_Pos 0U /*!< SCB STIR: INTID Position */ +#define SCB_STIR_INTID_Msk (0x1FFUL /*<< SCB_STIR_INTID_Pos*/) /*!< SCB STIR: INTID Mask */ + +/* SCB D-Cache Invalidate by Set-way Register Definitions */ +#define SCB_DCISW_WAY_Pos 30U /*!< SCB DCISW: Way Position */ +#define SCB_DCISW_WAY_Msk (3UL << SCB_DCISW_WAY_Pos) /*!< SCB DCISW: Way Mask */ + +#define SCB_DCISW_SET_Pos 5U /*!< SCB DCISW: Set Position */ +#define SCB_DCISW_SET_Msk (0x1FFUL << SCB_DCISW_SET_Pos) /*!< SCB DCISW: Set Mask */ + +/* SCB D-Cache Clean by Set-way Register Definitions */ +#define SCB_DCCSW_WAY_Pos 30U /*!< SCB DCCSW: Way Position */ +#define SCB_DCCSW_WAY_Msk (3UL << SCB_DCCSW_WAY_Pos) /*!< SCB DCCSW: Way Mask */ + +#define SCB_DCCSW_SET_Pos 5U /*!< SCB DCCSW: Set Position */ +#define SCB_DCCSW_SET_Msk (0x1FFUL << SCB_DCCSW_SET_Pos) /*!< SCB DCCSW: Set Mask */ + +/* SCB D-Cache Clean and Invalidate by Set-way Register Definitions */ +#define SCB_DCCISW_WAY_Pos 30U /*!< SCB DCCISW: Way Position */ +#define SCB_DCCISW_WAY_Msk (3UL << SCB_DCCISW_WAY_Pos) /*!< SCB DCCISW: Way Mask */ + +#define SCB_DCCISW_SET_Pos 5U /*!< SCB DCCISW: Set Position */ +#define SCB_DCCISW_SET_Msk (0x1FFUL << SCB_DCCISW_SET_Pos) /*!< SCB DCCISW: Set Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ + __IOM uint32_t CPPWR; /*!< Offset: 0x00C (R/W) Coprocessor Power Control Register */ +} SCnSCB_Type; + +/* Interrupt Controller Type Register Definitions */ +#define SCnSCB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) ITM Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) ITM Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ + uint32_t RESERVED3[32U]; + uint32_t RESERVED4[43U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ + uint32_t RESERVED5[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) ITM Device Architecture Register */ + uint32_t RESERVED6[4U]; + __IM uint32_t PID4; /*!< Offset: 0xFD0 (R/ ) ITM Peripheral Identification Register #4 */ + __IM uint32_t PID5; /*!< Offset: 0xFD4 (R/ ) ITM Peripheral Identification Register #5 */ + __IM uint32_t PID6; /*!< Offset: 0xFD8 (R/ ) ITM Peripheral Identification Register #6 */ + __IM uint32_t PID7; /*!< Offset: 0xFDC (R/ ) ITM Peripheral Identification Register #7 */ + __IM uint32_t PID0; /*!< Offset: 0xFE0 (R/ ) ITM Peripheral Identification Register #0 */ + __IM uint32_t PID1; /*!< Offset: 0xFE4 (R/ ) ITM Peripheral Identification Register #1 */ + __IM uint32_t PID2; /*!< Offset: 0xFE8 (R/ ) ITM Peripheral Identification Register #2 */ + __IM uint32_t PID3; /*!< Offset: 0xFEC (R/ ) ITM Peripheral Identification Register #3 */ + __IM uint32_t CID0; /*!< Offset: 0xFF0 (R/ ) ITM Component Identification Register #0 */ + __IM uint32_t CID1; /*!< Offset: 0xFF4 (R/ ) ITM Component Identification Register #1 */ + __IM uint32_t CID2; /*!< Offset: 0xFF8 (R/ ) ITM Component Identification Register #2 */ + __IM uint32_t CID3; /*!< Offset: 0xFFC (R/ ) ITM Component Identification Register #3 */ +} ITM_Type; + +/* ITM Stimulus Port Register Definitions */ +#define ITM_STIM_DISABLED_Pos 1U /*!< ITM STIM: DISABLED Position */ +#define ITM_STIM_DISABLED_Msk (0x1UL << ITM_STIM_DISABLED_Pos) /*!< ITM STIM: DISABLED Mask */ + +#define ITM_STIM_FIFOREADY_Pos 0U /*!< ITM STIM: FIFOREADY Position */ +#define ITM_STIM_FIFOREADY_Msk (0x1UL /*<< ITM_STIM_FIFOREADY_Pos*/) /*!< ITM STIM: FIFOREADY Mask */ + +/* ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFFFFFFFFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/* ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TRACEBUSID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TRACEBUSID_Msk (0x7FUL << ITM_TCR_TRACEBUSID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPRESCALE_Pos 8U /*!< ITM TCR: TSPRESCALE Position */ +#define ITM_TCR_TSPRESCALE_Msk (3UL << ITM_TCR_TSPRESCALE_Pos) /*!< ITM TCR: TSPRESCALE Mask */ + +#define ITM_TCR_STALLENA_Pos 5U /*!< ITM TCR: STALLENA Position */ +#define ITM_TCR_STALLENA_Msk (1UL << ITM_TCR_STALLENA_Pos) /*!< ITM TCR: STALLENA Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/* ITM Lock Status Register Definitions */ +#define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ +#define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ + +#define ITM_LSR_Access_Pos 1U /*!< ITM LSR: Access Position */ +#define ITM_LSR_Access_Msk (1UL << ITM_LSR_Access_Pos) /*!< ITM LSR: Access Mask */ + +#define ITM_LSR_Present_Pos 0U /*!< ITM LSR: Present Position */ +#define ITM_LSR_Present_Msk (1UL /*<< ITM_LSR_Present_Pos*/) /*!< ITM LSR: Present Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + uint32_t RESERVED3[1U]; + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED4[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + uint32_t RESERVED5[1U]; + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED6[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + uint32_t RESERVED7[1U]; + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ + uint32_t RESERVED8[1U]; + __IOM uint32_t COMP4; /*!< Offset: 0x060 (R/W) Comparator Register 4 */ + uint32_t RESERVED9[1U]; + __IOM uint32_t FUNCTION4; /*!< Offset: 0x068 (R/W) Function Register 4 */ + uint32_t RESERVED10[1U]; + __IOM uint32_t COMP5; /*!< Offset: 0x070 (R/W) Comparator Register 5 */ + uint32_t RESERVED11[1U]; + __IOM uint32_t FUNCTION5; /*!< Offset: 0x078 (R/W) Function Register 5 */ + uint32_t RESERVED12[1U]; + __IOM uint32_t COMP6; /*!< Offset: 0x080 (R/W) Comparator Register 6 */ + uint32_t RESERVED13[1U]; + __IOM uint32_t FUNCTION6; /*!< Offset: 0x088 (R/W) Function Register 6 */ + uint32_t RESERVED14[1U]; + __IOM uint32_t COMP7; /*!< Offset: 0x090 (R/W) Comparator Register 7 */ + uint32_t RESERVED15[1U]; + __IOM uint32_t FUNCTION7; /*!< Offset: 0x098 (R/W) Function Register 7 */ + uint32_t RESERVED16[1U]; + __IOM uint32_t COMP8; /*!< Offset: 0x0A0 (R/W) Comparator Register 8 */ + uint32_t RESERVED17[1U]; + __IOM uint32_t FUNCTION8; /*!< Offset: 0x0A8 (R/W) Function Register 8 */ + uint32_t RESERVED18[1U]; + __IOM uint32_t COMP9; /*!< Offset: 0x0B0 (R/W) Comparator Register 9 */ + uint32_t RESERVED19[1U]; + __IOM uint32_t FUNCTION9; /*!< Offset: 0x0B8 (R/W) Function Register 9 */ + uint32_t RESERVED20[1U]; + __IOM uint32_t COMP10; /*!< Offset: 0x0C0 (R/W) Comparator Register 10 */ + uint32_t RESERVED21[1U]; + __IOM uint32_t FUNCTION10; /*!< Offset: 0x0C8 (R/W) Function Register 10 */ + uint32_t RESERVED22[1U]; + __IOM uint32_t COMP11; /*!< Offset: 0x0D0 (R/W) Comparator Register 11 */ + uint32_t RESERVED23[1U]; + __IOM uint32_t FUNCTION11; /*!< Offset: 0x0D8 (R/W) Function Register 11 */ + uint32_t RESERVED24[1U]; + __IOM uint32_t COMP12; /*!< Offset: 0x0E0 (R/W) Comparator Register 12 */ + uint32_t RESERVED25[1U]; + __IOM uint32_t FUNCTION12; /*!< Offset: 0x0E8 (R/W) Function Register 12 */ + uint32_t RESERVED26[1U]; + __IOM uint32_t COMP13; /*!< Offset: 0x0F0 (R/W) Comparator Register 13 */ + uint32_t RESERVED27[1U]; + __IOM uint32_t FUNCTION13; /*!< Offset: 0x0F8 (R/W) Function Register 13 */ + uint32_t RESERVED28[1U]; + __IOM uint32_t COMP14; /*!< Offset: 0x100 (R/W) Comparator Register 14 */ + uint32_t RESERVED29[1U]; + __IOM uint32_t FUNCTION14; /*!< Offset: 0x108 (R/W) Function Register 14 */ + uint32_t RESERVED30[1U]; + __IOM uint32_t COMP15; /*!< Offset: 0x110 (R/W) Comparator Register 15 */ + uint32_t RESERVED31[1U]; + __IOM uint32_t FUNCTION15; /*!< Offset: 0x118 (R/W) Function Register 15 */ + uint32_t RESERVED32[934U]; + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R ) Lock Status Register */ + uint32_t RESERVED33[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) Device Architecture Register */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCDISS_Pos 23U /*!< DWT CTRL: CYCDISS Position */ +#define DWT_CTRL_CYCDISS_Msk (0x1UL << DWT_CTRL_CYCDISS_Pos) /*!< DWT CTRL: CYCDISS Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (0x1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (0x1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (0x1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (0x1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (0x1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (0x1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (0x1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (0x1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (0x1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (0x1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/* DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/* DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/* DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/* DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/* DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_ID_Pos 27U /*!< DWT FUNCTION: ID Position */ +#define DWT_FUNCTION_ID_Msk (0x1FUL << DWT_FUNCTION_ID_Pos) /*!< DWT FUNCTION: ID Mask */ + +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_ACTION_Pos 4U /*!< DWT FUNCTION: ACTION Position */ +#define DWT_FUNCTION_ACTION_Msk (0x1UL << DWT_FUNCTION_ACTION_Pos) /*!< DWT FUNCTION: ACTION Mask */ + +#define DWT_FUNCTION_MATCH_Pos 0U /*!< DWT FUNCTION: MATCH Position */ +#define DWT_FUNCTION_MATCH_Msk (0xFUL /*<< DWT_FUNCTION_MATCH_Pos*/) /*!< DWT FUNCTION: MATCH Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Size Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Size Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IOM uint32_t PSCR; /*!< Offset: 0x308 (R/W) Periodic Synchronization Control Register */ + uint32_t RESERVED3[759U]; + __IM uint32_t TRIGGER; /*!< Offset: 0xEE8 (R/ ) TRIGGER Register */ + __IM uint32_t ITFTTD0; /*!< Offset: 0xEEC (R/ ) Integration Test FIFO Test Data 0 Register */ + __IOM uint32_t ITATBCTR2; /*!< Offset: 0xEF0 (R/W) Integration Test ATB Control Register 2 */ + uint32_t RESERVED4[1U]; + __IM uint32_t ITATBCTR0; /*!< Offset: 0xEF8 (R/ ) Integration Test ATB Control Register 0 */ + __IM uint32_t ITFTTD1; /*!< Offset: 0xEFC (R/ ) Integration Test FIFO Test Data 1 Register */ + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control */ + uint32_t RESERVED5[39U]; + __IOM uint32_t CLAIMSET; /*!< Offset: 0xFA0 (R/W) Claim tag set */ + __IOM uint32_t CLAIMCLR; /*!< Offset: 0xFA4 (R/W) Claim tag clear */ + uint32_t RESERVED7[8U]; + __IM uint32_t DEVID; /*!< Offset: 0xFC8 (R/ ) Device Configuration Register */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Identifier Register */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_PRESCALER_Pos 0U /*!< TPI ACPR: PRESCALER Position */ +#define TPI_ACPR_PRESCALER_Msk (0x1FFFUL /*<< TPI_ACPR_PRESCALER_Pos*/) /*!< TPI ACPR: PRESCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_FOnMan_Pos 6U /*!< TPI FFCR: FOnMan Position */ +#define TPI_FFCR_FOnMan_Msk (0x1UL << TPI_FFCR_FOnMan_Pos) /*!< TPI FFCR: FOnMan Mask */ + +#define TPI_FFCR_EnFCont_Pos 1U /*!< TPI FFCR: EnFCont Position */ +#define TPI_FFCR_EnFCont_Msk (0x1UL << TPI_FFCR_EnFCont_Pos) /*!< TPI FFCR: EnFCont Mask */ + +/* TPI TRIGGER Register Definitions */ +#define TPI_TRIGGER_TRIGGER_Pos 0U /*!< TPI TRIGGER: TRIGGER Position */ +#define TPI_TRIGGER_TRIGGER_Msk (0x1UL /*<< TPI_TRIGGER_TRIGGER_Pos*/) /*!< TPI TRIGGER: TRIGGER Mask */ + +/* TPI Integration Test FIFO Test Data 0 Register Definitions */ +#define TPI_ITFTTD0_ATB_IF2_ATVALID_Pos 29U /*!< TPI ITFTTD0: ATB Interface 2 ATVALIDPosition */ +#define TPI_ITFTTD0_ATB_IF2_ATVALID_Msk (0x3UL << TPI_ITFTTD0_ATB_IF2_ATVALID_Pos) /*!< TPI ITFTTD0: ATB Interface 2 ATVALID Mask */ + +#define TPI_ITFTTD0_ATB_IF2_bytecount_Pos 27U /*!< TPI ITFTTD0: ATB Interface 2 byte count Position */ +#define TPI_ITFTTD0_ATB_IF2_bytecount_Msk (0x3UL << TPI_ITFTTD0_ATB_IF2_bytecount_Pos) /*!< TPI ITFTTD0: ATB Interface 2 byte count Mask */ + +#define TPI_ITFTTD0_ATB_IF1_ATVALID_Pos 26U /*!< TPI ITFTTD0: ATB Interface 1 ATVALID Position */ +#define TPI_ITFTTD0_ATB_IF1_ATVALID_Msk (0x3UL << TPI_ITFTTD0_ATB_IF1_ATVALID_Pos) /*!< TPI ITFTTD0: ATB Interface 1 ATVALID Mask */ + +#define TPI_ITFTTD0_ATB_IF1_bytecount_Pos 24U /*!< TPI ITFTTD0: ATB Interface 1 byte count Position */ +#define TPI_ITFTTD0_ATB_IF1_bytecount_Msk (0x3UL << TPI_ITFTTD0_ATB_IF1_bytecount_Pos) /*!< TPI ITFTTD0: ATB Interface 1 byte countt Mask */ + +#define TPI_ITFTTD0_ATB_IF1_data2_Pos 16U /*!< TPI ITFTTD0: ATB Interface 1 data2 Position */ +#define TPI_ITFTTD0_ATB_IF1_data2_Msk (0xFFUL << TPI_ITFTTD0_ATB_IF1_data1_Pos) /*!< TPI ITFTTD0: ATB Interface 1 data2 Mask */ + +#define TPI_ITFTTD0_ATB_IF1_data1_Pos 8U /*!< TPI ITFTTD0: ATB Interface 1 data1 Position */ +#define TPI_ITFTTD0_ATB_IF1_data1_Msk (0xFFUL << TPI_ITFTTD0_ATB_IF1_data1_Pos) /*!< TPI ITFTTD0: ATB Interface 1 data1 Mask */ + +#define TPI_ITFTTD0_ATB_IF1_data0_Pos 0U /*!< TPI ITFTTD0: ATB Interface 1 data0 Position */ +#define TPI_ITFTTD0_ATB_IF1_data0_Msk (0xFFUL /*<< TPI_ITFTTD0_ATB_IF1_data0_Pos*/) /*!< TPI ITFTTD0: ATB Interface 1 data0 Mask */ + +/* TPI Integration Test ATB Control Register 2 Register Definitions */ +#define TPI_ITATBCTR2_AFVALID2S_Pos 1U /*!< TPI ITATBCTR2: AFVALID2S Position */ +#define TPI_ITATBCTR2_AFVALID2S_Msk (0x1UL << TPI_ITATBCTR2_AFVALID2S_Pos) /*!< TPI ITATBCTR2: AFVALID2SS Mask */ + +#define TPI_ITATBCTR2_AFVALID1S_Pos 1U /*!< TPI ITATBCTR2: AFVALID1S Position */ +#define TPI_ITATBCTR2_AFVALID1S_Msk (0x1UL << TPI_ITATBCTR2_AFVALID1S_Pos) /*!< TPI ITATBCTR2: AFVALID1SS Mask */ + +#define TPI_ITATBCTR2_ATREADY2S_Pos 0U /*!< TPI ITATBCTR2: ATREADY2S Position */ +#define TPI_ITATBCTR2_ATREADY2S_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY2S_Pos*/) /*!< TPI ITATBCTR2: ATREADY2S Mask */ + +#define TPI_ITATBCTR2_ATREADY1S_Pos 0U /*!< TPI ITATBCTR2: ATREADY1S Position */ +#define TPI_ITATBCTR2_ATREADY1S_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY1S_Pos*/) /*!< TPI ITATBCTR2: ATREADY1S Mask */ + +/* TPI Integration Test FIFO Test Data 1 Register Definitions */ +#define TPI_ITFTTD1_ATB_IF2_ATVALID_Pos 29U /*!< TPI ITFTTD1: ATB Interface 2 ATVALID Position */ +#define TPI_ITFTTD1_ATB_IF2_ATVALID_Msk (0x3UL << TPI_ITFTTD1_ATB_IF2_ATVALID_Pos) /*!< TPI ITFTTD1: ATB Interface 2 ATVALID Mask */ + +#define TPI_ITFTTD1_ATB_IF2_bytecount_Pos 27U /*!< TPI ITFTTD1: ATB Interface 2 byte count Position */ +#define TPI_ITFTTD1_ATB_IF2_bytecount_Msk (0x3UL << TPI_ITFTTD1_ATB_IF2_bytecount_Pos) /*!< TPI ITFTTD1: ATB Interface 2 byte count Mask */ + +#define TPI_ITFTTD1_ATB_IF1_ATVALID_Pos 26U /*!< TPI ITFTTD1: ATB Interface 1 ATVALID Position */ +#define TPI_ITFTTD1_ATB_IF1_ATVALID_Msk (0x3UL << TPI_ITFTTD1_ATB_IF1_ATVALID_Pos) /*!< TPI ITFTTD1: ATB Interface 1 ATVALID Mask */ + +#define TPI_ITFTTD1_ATB_IF1_bytecount_Pos 24U /*!< TPI ITFTTD1: ATB Interface 1 byte count Position */ +#define TPI_ITFTTD1_ATB_IF1_bytecount_Msk (0x3UL << TPI_ITFTTD1_ATB_IF1_bytecount_Pos) /*!< TPI ITFTTD1: ATB Interface 1 byte countt Mask */ + +#define TPI_ITFTTD1_ATB_IF2_data2_Pos 16U /*!< TPI ITFTTD1: ATB Interface 2 data2 Position */ +#define TPI_ITFTTD1_ATB_IF2_data2_Msk (0xFFUL << TPI_ITFTTD1_ATB_IF2_data1_Pos) /*!< TPI ITFTTD1: ATB Interface 2 data2 Mask */ + +#define TPI_ITFTTD1_ATB_IF2_data1_Pos 8U /*!< TPI ITFTTD1: ATB Interface 2 data1 Position */ +#define TPI_ITFTTD1_ATB_IF2_data1_Msk (0xFFUL << TPI_ITFTTD1_ATB_IF2_data1_Pos) /*!< TPI ITFTTD1: ATB Interface 2 data1 Mask */ + +#define TPI_ITFTTD1_ATB_IF2_data0_Pos 0U /*!< TPI ITFTTD1: ATB Interface 2 data0 Position */ +#define TPI_ITFTTD1_ATB_IF2_data0_Msk (0xFFUL /*<< TPI_ITFTTD1_ATB_IF2_data0_Pos*/) /*!< TPI ITFTTD1: ATB Interface 2 data0 Mask */ + +/* TPI Integration Test ATB Control Register 0 Definitions */ +#define TPI_ITATBCTR0_AFVALID2S_Pos 1U /*!< TPI ITATBCTR0: AFVALID2S Position */ +#define TPI_ITATBCTR0_AFVALID2S_Msk (0x1UL << TPI_ITATBCTR0_AFVALID2S_Pos) /*!< TPI ITATBCTR0: AFVALID2SS Mask */ + +#define TPI_ITATBCTR0_AFVALID1S_Pos 1U /*!< TPI ITATBCTR0: AFVALID1S Position */ +#define TPI_ITATBCTR0_AFVALID1S_Msk (0x1UL << TPI_ITATBCTR0_AFVALID1S_Pos) /*!< TPI ITATBCTR0: AFVALID1SS Mask */ + +#define TPI_ITATBCTR0_ATREADY2S_Pos 0U /*!< TPI ITATBCTR0: ATREADY2S Position */ +#define TPI_ITATBCTR0_ATREADY2S_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY2S_Pos*/) /*!< TPI ITATBCTR0: ATREADY2S Mask */ + +#define TPI_ITATBCTR0_ATREADY1S_Pos 0U /*!< TPI ITATBCTR0: ATREADY1S Position */ +#define TPI_ITATBCTR0_ATREADY1S_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY1S_Pos*/) /*!< TPI ITATBCTR0: ATREADY1S Mask */ + +/* TPI Integration Mode Control Register Definitions */ +#define TPI_ITCTRL_Mode_Pos 0U /*!< TPI ITCTRL: Mode Position */ +#define TPI_ITCTRL_Mode_Msk (0x3UL /*<< TPI_ITCTRL_Mode_Pos*/) /*!< TPI ITCTRL: Mode Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_FIFOSZ_Pos 6U /*!< TPI DEVID: FIFOSZ Position */ +#define TPI_DEVID_FIFOSZ_Msk (0x7UL << TPI_DEVID_FIFOSZ_Pos) /*!< TPI DEVID: FIFOSZ Mask */ + +#define TPI_DEVID_NrTraceInput_Pos 0U /*!< TPI DEVID: NrTraceInput Position */ +#define TPI_DEVID_NrTraceInput_Msk (0x3FUL /*<< TPI_DEVID_NrTraceInput_Pos*/) /*!< TPI DEVID: NrTraceInput Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) MPU Region Limit Address Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Region Base Address Register Alias 1 */ + __IOM uint32_t RLAR_A1; /*!< Offset: 0x018 (R/W) MPU Region Limit Address Register Alias 1 */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Region Base Address Register Alias 2 */ + __IOM uint32_t RLAR_A2; /*!< Offset: 0x020 (R/W) MPU Region Limit Address Register Alias 2 */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Region Base Address Register Alias 3 */ + __IOM uint32_t RLAR_A3; /*!< Offset: 0x028 (R/W) MPU Region Limit Address Register Alias 3 */ + uint32_t RESERVED0[1]; + union { + __IOM uint32_t MAIR[2]; + struct { + __IOM uint32_t MAIR0; /*!< Offset: 0x030 (R/W) MPU Memory Attribute Indirection Register 0 */ + __IOM uint32_t MAIR1; /*!< Offset: 0x034 (R/W) MPU Memory Attribute Indirection Register 1 */ + }; + }; +} MPU_Type; + +#define MPU_TYPE_RALIASES 4U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_BASE_Pos 5U /*!< MPU RBAR: BASE Position */ +#define MPU_RBAR_BASE_Msk (0x7FFFFFFUL << MPU_RBAR_BASE_Pos) /*!< MPU RBAR: BASE Mask */ + +#define MPU_RBAR_SH_Pos 3U /*!< MPU RBAR: SH Position */ +#define MPU_RBAR_SH_Msk (0x3UL << MPU_RBAR_SH_Pos) /*!< MPU RBAR: SH Mask */ + +#define MPU_RBAR_AP_Pos 1U /*!< MPU RBAR: AP Position */ +#define MPU_RBAR_AP_Msk (0x3UL << MPU_RBAR_AP_Pos) /*!< MPU RBAR: AP Mask */ + +#define MPU_RBAR_XN_Pos 0U /*!< MPU RBAR: XN Position */ +#define MPU_RBAR_XN_Msk (01UL /*<< MPU_RBAR_XN_Pos*/) /*!< MPU RBAR: XN Mask */ + +/* MPU Region Limit Address Register Definitions */ +#define MPU_RLAR_LIMIT_Pos 5U /*!< MPU RLAR: LIMIT Position */ +#define MPU_RLAR_LIMIT_Msk (0x7FFFFFFUL << MPU_RLAR_LIMIT_Pos) /*!< MPU RLAR: LIMIT Mask */ + +#define MPU_RLAR_AttrIndx_Pos 1U /*!< MPU RLAR: AttrIndx Position */ +#define MPU_RLAR_AttrIndx_Msk (0x7UL << MPU_RLAR_AttrIndx_Pos) /*!< MPU RLAR: AttrIndx Mask */ + +#define MPU_RLAR_EN_Pos 0U /*!< MPU RLAR: Region enable bit Position */ +#define MPU_RLAR_EN_Msk (1UL /*<< MPU_RLAR_EN_Pos*/) /*!< MPU RLAR: Region enable bit Disable Mask */ + +/* MPU Memory Attribute Indirection Register 0 Definitions */ +#define MPU_MAIR0_Attr3_Pos 24U /*!< MPU MAIR0: Attr3 Position */ +#define MPU_MAIR0_Attr3_Msk (0xFFUL << MPU_MAIR0_Attr3_Pos) /*!< MPU MAIR0: Attr3 Mask */ + +#define MPU_MAIR0_Attr2_Pos 16U /*!< MPU MAIR0: Attr2 Position */ +#define MPU_MAIR0_Attr2_Msk (0xFFUL << MPU_MAIR0_Attr2_Pos) /*!< MPU MAIR0: Attr2 Mask */ + +#define MPU_MAIR0_Attr1_Pos 8U /*!< MPU MAIR0: Attr1 Position */ +#define MPU_MAIR0_Attr1_Msk (0xFFUL << MPU_MAIR0_Attr1_Pos) /*!< MPU MAIR0: Attr1 Mask */ + +#define MPU_MAIR0_Attr0_Pos 0U /*!< MPU MAIR0: Attr0 Position */ +#define MPU_MAIR0_Attr0_Msk (0xFFUL /*<< MPU_MAIR0_Attr0_Pos*/) /*!< MPU MAIR0: Attr0 Mask */ + +/* MPU Memory Attribute Indirection Register 1 Definitions */ +#define MPU_MAIR1_Attr7_Pos 24U /*!< MPU MAIR1: Attr7 Position */ +#define MPU_MAIR1_Attr7_Msk (0xFFUL << MPU_MAIR1_Attr7_Pos) /*!< MPU MAIR1: Attr7 Mask */ + +#define MPU_MAIR1_Attr6_Pos 16U /*!< MPU MAIR1: Attr6 Position */ +#define MPU_MAIR1_Attr6_Msk (0xFFUL << MPU_MAIR1_Attr6_Pos) /*!< MPU MAIR1: Attr6 Mask */ + +#define MPU_MAIR1_Attr5_Pos 8U /*!< MPU MAIR1: Attr5 Position */ +#define MPU_MAIR1_Attr5_Msk (0xFFUL << MPU_MAIR1_Attr5_Pos) /*!< MPU MAIR1: Attr5 Mask */ + +#define MPU_MAIR1_Attr4_Pos 0U /*!< MPU MAIR1: Attr4 Position */ +#define MPU_MAIR1_Attr4_Msk (0xFFUL /*<< MPU_MAIR1_Attr4_Pos*/) /*!< MPU MAIR1: Attr4 Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SAU Security Attribution Unit (SAU) + \brief Type definitions for the Security Attribution Unit (SAU) + @{ + */ + +/** + \brief Structure type to access the Security Attribution Unit (SAU). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SAU Control Register */ + __IM uint32_t TYPE; /*!< Offset: 0x004 (R/ ) SAU Type Register */ +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) SAU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) SAU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) SAU Region Limit Address Register */ +#else + uint32_t RESERVED0[3]; +#endif + __IOM uint32_t SFSR; /*!< Offset: 0x014 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x018 (R/W) Secure Fault Address Register */ +} SAU_Type; + +/* SAU Control Register Definitions */ +#define SAU_CTRL_ALLNS_Pos 1U /*!< SAU CTRL: ALLNS Position */ +#define SAU_CTRL_ALLNS_Msk (1UL << SAU_CTRL_ALLNS_Pos) /*!< SAU CTRL: ALLNS Mask */ + +#define SAU_CTRL_ENABLE_Pos 0U /*!< SAU CTRL: ENABLE Position */ +#define SAU_CTRL_ENABLE_Msk (1UL /*<< SAU_CTRL_ENABLE_Pos*/) /*!< SAU CTRL: ENABLE Mask */ + +/* SAU Type Register Definitions */ +#define SAU_TYPE_SREGION_Pos 0U /*!< SAU TYPE: SREGION Position */ +#define SAU_TYPE_SREGION_Msk (0xFFUL /*<< SAU_TYPE_SREGION_Pos*/) /*!< SAU TYPE: SREGION Mask */ + +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) +/* SAU Region Number Register Definitions */ +#define SAU_RNR_REGION_Pos 0U /*!< SAU RNR: REGION Position */ +#define SAU_RNR_REGION_Msk (0xFFUL /*<< SAU_RNR_REGION_Pos*/) /*!< SAU RNR: REGION Mask */ + +/* SAU Region Base Address Register Definitions */ +#define SAU_RBAR_BADDR_Pos 5U /*!< SAU RBAR: BADDR Position */ +#define SAU_RBAR_BADDR_Msk (0x7FFFFFFUL << SAU_RBAR_BADDR_Pos) /*!< SAU RBAR: BADDR Mask */ + +/* SAU Region Limit Address Register Definitions */ +#define SAU_RLAR_LADDR_Pos 5U /*!< SAU RLAR: LADDR Position */ +#define SAU_RLAR_LADDR_Msk (0x7FFFFFFUL << SAU_RLAR_LADDR_Pos) /*!< SAU RLAR: LADDR Mask */ + +#define SAU_RLAR_NSC_Pos 1U /*!< SAU RLAR: NSC Position */ +#define SAU_RLAR_NSC_Msk (1UL << SAU_RLAR_NSC_Pos) /*!< SAU RLAR: NSC Mask */ + +#define SAU_RLAR_ENABLE_Pos 0U /*!< SAU RLAR: ENABLE Position */ +#define SAU_RLAR_ENABLE_Msk (1UL /*<< SAU_RLAR_ENABLE_Pos*/) /*!< SAU RLAR: ENABLE Mask */ + +#endif /* defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) */ + +/* Secure Fault Status Register Definitions */ +#define SAU_SFSR_LSERR_Pos 7U /*!< SAU SFSR: LSERR Position */ +#define SAU_SFSR_LSERR_Msk (1UL << SAU_SFSR_LSERR_Pos) /*!< SAU SFSR: LSERR Mask */ + +#define SAU_SFSR_SFARVALID_Pos 6U /*!< SAU SFSR: SFARVALID Position */ +#define SAU_SFSR_SFARVALID_Msk (1UL << SAU_SFSR_SFARVALID_Pos) /*!< SAU SFSR: SFARVALID Mask */ + +#define SAU_SFSR_LSPERR_Pos 5U /*!< SAU SFSR: LSPERR Position */ +#define SAU_SFSR_LSPERR_Msk (1UL << SAU_SFSR_LSPERR_Pos) /*!< SAU SFSR: LSPERR Mask */ + +#define SAU_SFSR_INVTRAN_Pos 4U /*!< SAU SFSR: INVTRAN Position */ +#define SAU_SFSR_INVTRAN_Msk (1UL << SAU_SFSR_INVTRAN_Pos) /*!< SAU SFSR: INVTRAN Mask */ + +#define SAU_SFSR_AUVIOL_Pos 3U /*!< SAU SFSR: AUVIOL Position */ +#define SAU_SFSR_AUVIOL_Msk (1UL << SAU_SFSR_AUVIOL_Pos) /*!< SAU SFSR: AUVIOL Mask */ + +#define SAU_SFSR_INVER_Pos 2U /*!< SAU SFSR: INVER Position */ +#define SAU_SFSR_INVER_Msk (1UL << SAU_SFSR_INVER_Pos) /*!< SAU SFSR: INVER Mask */ + +#define SAU_SFSR_INVIS_Pos 1U /*!< SAU SFSR: INVIS Position */ +#define SAU_SFSR_INVIS_Msk (1UL << SAU_SFSR_INVIS_Pos) /*!< SAU SFSR: INVIS Mask */ + +#define SAU_SFSR_INVEP_Pos 0U /*!< SAU SFSR: INVEP Position */ +#define SAU_SFSR_INVEP_Msk (1UL /*<< SAU_SFSR_INVEP_Pos*/) /*!< SAU SFSR: INVEP Mask */ + +/*@} end of group CMSIS_SAU */ +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_FPU Floating Point Unit (FPU) + \brief Type definitions for the Floating Point Unit (FPU) + @{ + */ + +/** + \brief Structure type to access the Floating Point Unit (FPU). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IOM uint32_t FPCCR; /*!< Offset: 0x004 (R/W) Floating-Point Context Control Register */ + __IOM uint32_t FPCAR; /*!< Offset: 0x008 (R/W) Floating-Point Context Address Register */ + __IOM uint32_t FPDSCR; /*!< Offset: 0x00C (R/W) Floating-Point Default Status Control Register */ + __IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x018 (R/ ) Media and VFP Feature Register 2 */ +} FPU_Type; + +/* Floating-Point Context Control Register Definitions */ +#define FPU_FPCCR_ASPEN_Pos 31U /*!< FPCCR: ASPEN bit Position */ +#define FPU_FPCCR_ASPEN_Msk (1UL << FPU_FPCCR_ASPEN_Pos) /*!< FPCCR: ASPEN bit Mask */ + +#define FPU_FPCCR_LSPEN_Pos 30U /*!< FPCCR: LSPEN Position */ +#define FPU_FPCCR_LSPEN_Msk (1UL << FPU_FPCCR_LSPEN_Pos) /*!< FPCCR: LSPEN bit Mask */ + +#define FPU_FPCCR_LSPENS_Pos 29U /*!< FPCCR: LSPENS Position */ +#define FPU_FPCCR_LSPENS_Msk (1UL << FPU_FPCCR_LSPENS_Pos) /*!< FPCCR: LSPENS bit Mask */ + +#define FPU_FPCCR_CLRONRET_Pos 28U /*!< FPCCR: CLRONRET Position */ +#define FPU_FPCCR_CLRONRET_Msk (1UL << FPU_FPCCR_CLRONRET_Pos) /*!< FPCCR: CLRONRET bit Mask */ + +#define FPU_FPCCR_CLRONRETS_Pos 27U /*!< FPCCR: CLRONRETS Position */ +#define FPU_FPCCR_CLRONRETS_Msk (1UL << FPU_FPCCR_CLRONRETS_Pos) /*!< FPCCR: CLRONRETS bit Mask */ + +#define FPU_FPCCR_TS_Pos 26U /*!< FPCCR: TS Position */ +#define FPU_FPCCR_TS_Msk (1UL << FPU_FPCCR_TS_Pos) /*!< FPCCR: TS bit Mask */ + +#define FPU_FPCCR_UFRDY_Pos 10U /*!< FPCCR: UFRDY Position */ +#define FPU_FPCCR_UFRDY_Msk (1UL << FPU_FPCCR_UFRDY_Pos) /*!< FPCCR: UFRDY bit Mask */ + +#define FPU_FPCCR_SPLIMVIOL_Pos 9U /*!< FPCCR: SPLIMVIOL Position */ +#define FPU_FPCCR_SPLIMVIOL_Msk (1UL << FPU_FPCCR_SPLIMVIOL_Pos) /*!< FPCCR: SPLIMVIOL bit Mask */ + +#define FPU_FPCCR_MONRDY_Pos 8U /*!< FPCCR: MONRDY Position */ +#define FPU_FPCCR_MONRDY_Msk (1UL << FPU_FPCCR_MONRDY_Pos) /*!< FPCCR: MONRDY bit Mask */ + +#define FPU_FPCCR_SFRDY_Pos 7U /*!< FPCCR: SFRDY Position */ +#define FPU_FPCCR_SFRDY_Msk (1UL << FPU_FPCCR_SFRDY_Pos) /*!< FPCCR: SFRDY bit Mask */ + +#define FPU_FPCCR_BFRDY_Pos 6U /*!< FPCCR: BFRDY Position */ +#define FPU_FPCCR_BFRDY_Msk (1UL << FPU_FPCCR_BFRDY_Pos) /*!< FPCCR: BFRDY bit Mask */ + +#define FPU_FPCCR_MMRDY_Pos 5U /*!< FPCCR: MMRDY Position */ +#define FPU_FPCCR_MMRDY_Msk (1UL << FPU_FPCCR_MMRDY_Pos) /*!< FPCCR: MMRDY bit Mask */ + +#define FPU_FPCCR_HFRDY_Pos 4U /*!< FPCCR: HFRDY Position */ +#define FPU_FPCCR_HFRDY_Msk (1UL << FPU_FPCCR_HFRDY_Pos) /*!< FPCCR: HFRDY bit Mask */ + +#define FPU_FPCCR_THREAD_Pos 3U /*!< FPCCR: processor mode bit Position */ +#define FPU_FPCCR_THREAD_Msk (1UL << FPU_FPCCR_THREAD_Pos) /*!< FPCCR: processor mode active bit Mask */ + +#define FPU_FPCCR_S_Pos 2U /*!< FPCCR: Security status of the FP context bit Position */ +#define FPU_FPCCR_S_Msk (1UL << FPU_FPCCR_S_Pos) /*!< FPCCR: Security status of the FP context bit Mask */ + +#define FPU_FPCCR_USER_Pos 1U /*!< FPCCR: privilege level bit Position */ +#define FPU_FPCCR_USER_Msk (1UL << FPU_FPCCR_USER_Pos) /*!< FPCCR: privilege level bit Mask */ + +#define FPU_FPCCR_LSPACT_Pos 0U /*!< FPCCR: Lazy state preservation active bit Position */ +#define FPU_FPCCR_LSPACT_Msk (1UL /*<< FPU_FPCCR_LSPACT_Pos*/) /*!< FPCCR: Lazy state preservation active bit Mask */ + +/* Floating-Point Context Address Register Definitions */ +#define FPU_FPCAR_ADDRESS_Pos 3U /*!< FPCAR: ADDRESS bit Position */ +#define FPU_FPCAR_ADDRESS_Msk (0x1FFFFFFFUL << FPU_FPCAR_ADDRESS_Pos) /*!< FPCAR: ADDRESS bit Mask */ + +/* Floating-Point Default Status Control Register Definitions */ +#define FPU_FPDSCR_AHP_Pos 26U /*!< FPDSCR: AHP bit Position */ +#define FPU_FPDSCR_AHP_Msk (1UL << FPU_FPDSCR_AHP_Pos) /*!< FPDSCR: AHP bit Mask */ + +#define FPU_FPDSCR_DN_Pos 25U /*!< FPDSCR: DN bit Position */ +#define FPU_FPDSCR_DN_Msk (1UL << FPU_FPDSCR_DN_Pos) /*!< FPDSCR: DN bit Mask */ + +#define FPU_FPDSCR_FZ_Pos 24U /*!< FPDSCR: FZ bit Position */ +#define FPU_FPDSCR_FZ_Msk (1UL << FPU_FPDSCR_FZ_Pos) /*!< FPDSCR: FZ bit Mask */ + +#define FPU_FPDSCR_RMode_Pos 22U /*!< FPDSCR: RMode bit Position */ +#define FPU_FPDSCR_RMode_Msk (3UL << FPU_FPDSCR_RMode_Pos) /*!< FPDSCR: RMode bit Mask */ + +/* Media and VFP Feature Register 0 Definitions */ +#define FPU_MVFR0_FP_rounding_modes_Pos 28U /*!< MVFR0: FP rounding modes bits Position */ +#define FPU_MVFR0_FP_rounding_modes_Msk (0xFUL << FPU_MVFR0_FP_rounding_modes_Pos) /*!< MVFR0: FP rounding modes bits Mask */ + +#define FPU_MVFR0_Short_vectors_Pos 24U /*!< MVFR0: Short vectors bits Position */ +#define FPU_MVFR0_Short_vectors_Msk (0xFUL << FPU_MVFR0_Short_vectors_Pos) /*!< MVFR0: Short vectors bits Mask */ + +#define FPU_MVFR0_Square_root_Pos 20U /*!< MVFR0: Square root bits Position */ +#define FPU_MVFR0_Square_root_Msk (0xFUL << FPU_MVFR0_Square_root_Pos) /*!< MVFR0: Square root bits Mask */ + +#define FPU_MVFR0_Divide_Pos 16U /*!< MVFR0: Divide bits Position */ +#define FPU_MVFR0_Divide_Msk (0xFUL << FPU_MVFR0_Divide_Pos) /*!< MVFR0: Divide bits Mask */ + +#define FPU_MVFR0_FP_excep_trapping_Pos 12U /*!< MVFR0: FP exception trapping bits Position */ +#define FPU_MVFR0_FP_excep_trapping_Msk (0xFUL << FPU_MVFR0_FP_excep_trapping_Pos) /*!< MVFR0: FP exception trapping bits Mask */ + +#define FPU_MVFR0_Double_precision_Pos 8U /*!< MVFR0: Double-precision bits Position */ +#define FPU_MVFR0_Double_precision_Msk (0xFUL << FPU_MVFR0_Double_precision_Pos) /*!< MVFR0: Double-precision bits Mask */ + +#define FPU_MVFR0_Single_precision_Pos 4U /*!< MVFR0: Single-precision bits Position */ +#define FPU_MVFR0_Single_precision_Msk (0xFUL << FPU_MVFR0_Single_precision_Pos) /*!< MVFR0: Single-precision bits Mask */ + +#define FPU_MVFR0_A_SIMD_registers_Pos 0U /*!< MVFR0: A_SIMD registers bits Position */ +#define FPU_MVFR0_A_SIMD_registers_Msk (0xFUL /*<< FPU_MVFR0_A_SIMD_registers_Pos*/) /*!< MVFR0: A_SIMD registers bits Mask */ + +/* Media and VFP Feature Register 1 Definitions */ +#define FPU_MVFR1_FP_fused_MAC_Pos 28U /*!< MVFR1: FP fused MAC bits Position */ +#define FPU_MVFR1_FP_fused_MAC_Msk (0xFUL << FPU_MVFR1_FP_fused_MAC_Pos) /*!< MVFR1: FP fused MAC bits Mask */ + +#define FPU_MVFR1_FP_HPFP_Pos 24U /*!< MVFR1: FP HPFP bits Position */ +#define FPU_MVFR1_FP_HPFP_Msk (0xFUL << FPU_MVFR1_FP_HPFP_Pos) /*!< MVFR1: FP HPFP bits Mask */ + +#define FPU_MVFR1_D_NaN_mode_Pos 4U /*!< MVFR1: D_NaN mode bits Position */ +#define FPU_MVFR1_D_NaN_mode_Msk (0xFUL << FPU_MVFR1_D_NaN_mode_Pos) /*!< MVFR1: D_NaN mode bits Mask */ + +#define FPU_MVFR1_FtZ_mode_Pos 0U /*!< MVFR1: FtZ mode bits Position */ +#define FPU_MVFR1_FtZ_mode_Msk (0xFUL /*<< FPU_MVFR1_FtZ_mode_Pos*/) /*!< MVFR1: FtZ mode bits Mask */ + +/* Media and VFP Feature Register 2 Definitions */ +#define FPU_MVFR2_FPMisc_Pos 4U /*!< MVFR2: FPMisc bits Position */ +#define FPU_MVFR2_FPMisc_Msk (0xFUL << FPU_MVFR2_FPMisc_Pos) /*!< MVFR2: FPMisc bits Mask */ + +/*@} end of group CMSIS_FPU */ + +/* CoreDebug is deprecated. replaced by DCB (Debug Control Block) */ +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Type definitions for the Core Debug Registers + @{ + */ + +/** + \brief \deprecated Structure type to access the Core Debug Register (CoreDebug). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + uint32_t RESERVED0[1U]; + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< \deprecated CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< \deprecated CoreDebug DHCSR: DBGKEY Mask */ + +#define CoreDebug_DHCSR_S_RESTART_ST_Pos 26U /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Position */ +#define CoreDebug_DHCSR_S_RESTART_ST_Msk (1UL << CoreDebug_DHCSR_S_RESTART_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Mask */ + +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Mask */ + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Mask */ + +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Mask */ + +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< \deprecated CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< \deprecated CoreDebug DHCSR: S_SLEEP Mask */ + +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< \deprecated CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: S_HALT Mask */ + +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< \deprecated CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< \deprecated CoreDebug DHCSR: S_REGRDY Mask */ + +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos 5U /*!< \deprecated CoreDebug DHCSR: C_SNAPSTALL Position */ +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk (1UL << CoreDebug_DHCSR_C_SNAPSTALL_Pos) /*!< \deprecated CoreDebug DHCSR: C_SNAPSTALL Mask */ + +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Mask */ + +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< \deprecated CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< \deprecated CoreDebug DHCSR: C_STEP Mask */ + +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< \deprecated CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: C_HALT Mask */ + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Mask */ + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< \deprecated CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< \deprecated CoreDebug DCRSR: REGWnR Mask */ + +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< \deprecated CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< \deprecated CoreDebug DCRSR: REGSEL Mask */ + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_TRCENA_Pos 24U /*!< \deprecated CoreDebug DEMCR: TRCENA Position */ +#define CoreDebug_DEMCR_TRCENA_Msk (1UL << CoreDebug_DEMCR_TRCENA_Pos) /*!< \deprecated CoreDebug DEMCR: TRCENA Mask */ + +#define CoreDebug_DEMCR_MON_REQ_Pos 19U /*!< \deprecated CoreDebug DEMCR: MON_REQ Position */ +#define CoreDebug_DEMCR_MON_REQ_Msk (1UL << CoreDebug_DEMCR_MON_REQ_Pos) /*!< \deprecated CoreDebug DEMCR: MON_REQ Mask */ + +#define CoreDebug_DEMCR_MON_STEP_Pos 18U /*!< \deprecated CoreDebug DEMCR: MON_STEP Position */ +#define CoreDebug_DEMCR_MON_STEP_Msk (1UL << CoreDebug_DEMCR_MON_STEP_Pos) /*!< \deprecated CoreDebug DEMCR: MON_STEP Mask */ + +#define CoreDebug_DEMCR_MON_PEND_Pos 17U /*!< \deprecated CoreDebug DEMCR: MON_PEND Position */ +#define CoreDebug_DEMCR_MON_PEND_Msk (1UL << CoreDebug_DEMCR_MON_PEND_Pos) /*!< \deprecated CoreDebug DEMCR: MON_PEND Mask */ + +#define CoreDebug_DEMCR_MON_EN_Pos 16U /*!< \deprecated CoreDebug DEMCR: MON_EN Position */ +#define CoreDebug_DEMCR_MON_EN_Msk (1UL << CoreDebug_DEMCR_MON_EN_Pos) /*!< \deprecated CoreDebug DEMCR: MON_EN Mask */ + +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Mask */ + +#define CoreDebug_DEMCR_VC_INTERR_Pos 9U /*!< \deprecated CoreDebug DEMCR: VC_INTERR Position */ +#define CoreDebug_DEMCR_VC_INTERR_Msk (1UL << CoreDebug_DEMCR_VC_INTERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_INTERR Mask */ + +#define CoreDebug_DEMCR_VC_BUSERR_Pos 8U /*!< \deprecated CoreDebug DEMCR: VC_BUSERR Position */ +#define CoreDebug_DEMCR_VC_BUSERR_Msk (1UL << CoreDebug_DEMCR_VC_BUSERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_BUSERR Mask */ + +#define CoreDebug_DEMCR_VC_STATERR_Pos 7U /*!< \deprecated CoreDebug DEMCR: VC_STATERR Position */ +#define CoreDebug_DEMCR_VC_STATERR_Msk (1UL << CoreDebug_DEMCR_VC_STATERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_STATERR Mask */ + +#define CoreDebug_DEMCR_VC_CHKERR_Pos 6U /*!< \deprecated CoreDebug DEMCR: VC_CHKERR Position */ +#define CoreDebug_DEMCR_VC_CHKERR_Msk (1UL << CoreDebug_DEMCR_VC_CHKERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_CHKERR Mask */ + +#define CoreDebug_DEMCR_VC_NOCPERR_Pos 5U /*!< \deprecated CoreDebug DEMCR: VC_NOCPERR Position */ +#define CoreDebug_DEMCR_VC_NOCPERR_Msk (1UL << CoreDebug_DEMCR_VC_NOCPERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_NOCPERR Mask */ + +#define CoreDebug_DEMCR_VC_MMERR_Pos 4U /*!< \deprecated CoreDebug DEMCR: VC_MMERR Position */ +#define CoreDebug_DEMCR_VC_MMERR_Msk (1UL << CoreDebug_DEMCR_VC_MMERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_MMERR Mask */ + +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Mask */ + +/* Debug Authentication Control Register Definitions */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Position */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Mask */ + +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Msk (1UL << CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Mask */ + +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Position */ +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Mask */ + +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Msk (1UL /*<< CoreDebug_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Mask */ + +/* Debug Security Control and Status Register Definitions */ +#define CoreDebug_DSCSR_CDS_Pos 16U /*!< \deprecated CoreDebug DSCSR: CDS Position */ +#define CoreDebug_DSCSR_CDS_Msk (1UL << CoreDebug_DSCSR_CDS_Pos) /*!< \deprecated CoreDebug DSCSR: CDS Mask */ + +#define CoreDebug_DSCSR_SBRSEL_Pos 1U /*!< \deprecated CoreDebug DSCSR: SBRSEL Position */ +#define CoreDebug_DSCSR_SBRSEL_Msk (1UL << CoreDebug_DSCSR_SBRSEL_Pos) /*!< \deprecated CoreDebug DSCSR: SBRSEL Mask */ + +#define CoreDebug_DSCSR_SBRSELEN_Pos 0U /*!< \deprecated CoreDebug DSCSR: SBRSELEN Position */ +#define CoreDebug_DSCSR_SBRSELEN_Msk (1UL /*<< CoreDebug_DSCSR_SBRSELEN_Pos*/) /*!< \deprecated CoreDebug DSCSR: SBRSELEN Mask */ + +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DCB Debug Control Block + \brief Type definitions for the Debug Control Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Control Block Registers (DCB). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + uint32_t RESERVED0[1U]; + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} DCB_Type; + +/* DHCSR, Debug Halting Control and Status Register Definitions */ +#define DCB_DHCSR_DBGKEY_Pos 16U /*!< DCB DHCSR: Debug key Position */ +#define DCB_DHCSR_DBGKEY_Msk (0xFFFFUL << DCB_DHCSR_DBGKEY_Pos) /*!< DCB DHCSR: Debug key Mask */ + +#define DCB_DHCSR_S_RESTART_ST_Pos 26U /*!< DCB DHCSR: Restart sticky status Position */ +#define DCB_DHCSR_S_RESTART_ST_Msk (0x1UL << DCB_DHCSR_S_RESTART_ST_Pos) /*!< DCB DHCSR: Restart sticky status Mask */ + +#define DCB_DHCSR_S_RESET_ST_Pos 25U /*!< DCB DHCSR: Reset sticky status Position */ +#define DCB_DHCSR_S_RESET_ST_Msk (0x1UL << DCB_DHCSR_S_RESET_ST_Pos) /*!< DCB DHCSR: Reset sticky status Mask */ + +#define DCB_DHCSR_S_RETIRE_ST_Pos 24U /*!< DCB DHCSR: Retire sticky status Position */ +#define DCB_DHCSR_S_RETIRE_ST_Msk (0x1UL << DCB_DHCSR_S_RETIRE_ST_Pos) /*!< DCB DHCSR: Retire sticky status Mask */ + +#define DCB_DHCSR_S_SDE_Pos 20U /*!< DCB DHCSR: Secure debug enabled Position */ +#define DCB_DHCSR_S_SDE_Msk (0x1UL << DCB_DHCSR_S_SDE_Pos) /*!< DCB DHCSR: Secure debug enabled Mask */ + +#define DCB_DHCSR_S_LOCKUP_Pos 19U /*!< DCB DHCSR: Lockup status Position */ +#define DCB_DHCSR_S_LOCKUP_Msk (0x1UL << DCB_DHCSR_S_LOCKUP_Pos) /*!< DCB DHCSR: Lockup status Mask */ + +#define DCB_DHCSR_S_SLEEP_Pos 18U /*!< DCB DHCSR: Sleeping status Position */ +#define DCB_DHCSR_S_SLEEP_Msk (0x1UL << DCB_DHCSR_S_SLEEP_Pos) /*!< DCB DHCSR: Sleeping status Mask */ + +#define DCB_DHCSR_S_HALT_Pos 17U /*!< DCB DHCSR: Halted status Position */ +#define DCB_DHCSR_S_HALT_Msk (0x1UL << DCB_DHCSR_S_HALT_Pos) /*!< DCB DHCSR: Halted status Mask */ + +#define DCB_DHCSR_S_REGRDY_Pos 16U /*!< DCB DHCSR: Register ready status Position */ +#define DCB_DHCSR_S_REGRDY_Msk (0x1UL << DCB_DHCSR_S_REGRDY_Pos) /*!< DCB DHCSR: Register ready status Mask */ + +#define DCB_DHCSR_C_SNAPSTALL_Pos 5U /*!< DCB DHCSR: Snap stall control Position */ +#define DCB_DHCSR_C_SNAPSTALL_Msk (0x1UL << DCB_DHCSR_C_SNAPSTALL_Pos) /*!< DCB DHCSR: Snap stall control Mask */ + +#define DCB_DHCSR_C_MASKINTS_Pos 3U /*!< DCB DHCSR: Mask interrupts control Position */ +#define DCB_DHCSR_C_MASKINTS_Msk (0x1UL << DCB_DHCSR_C_MASKINTS_Pos) /*!< DCB DHCSR: Mask interrupts control Mask */ + +#define DCB_DHCSR_C_STEP_Pos 2U /*!< DCB DHCSR: Step control Position */ +#define DCB_DHCSR_C_STEP_Msk (0x1UL << DCB_DHCSR_C_STEP_Pos) /*!< DCB DHCSR: Step control Mask */ + +#define DCB_DHCSR_C_HALT_Pos 1U /*!< DCB DHCSR: Halt control Position */ +#define DCB_DHCSR_C_HALT_Msk (0x1UL << DCB_DHCSR_C_HALT_Pos) /*!< DCB DHCSR: Halt control Mask */ + +#define DCB_DHCSR_C_DEBUGEN_Pos 0U /*!< DCB DHCSR: Debug enable control Position */ +#define DCB_DHCSR_C_DEBUGEN_Msk (0x1UL /*<< DCB_DHCSR_C_DEBUGEN_Pos*/) /*!< DCB DHCSR: Debug enable control Mask */ + +/* DCRSR, Debug Core Register Select Register Definitions */ +#define DCB_DCRSR_REGWnR_Pos 16U /*!< DCB DCRSR: Register write/not-read Position */ +#define DCB_DCRSR_REGWnR_Msk (0x1UL << DCB_DCRSR_REGWnR_Pos) /*!< DCB DCRSR: Register write/not-read Mask */ + +#define DCB_DCRSR_REGSEL_Pos 0U /*!< DCB DCRSR: Register selector Position */ +#define DCB_DCRSR_REGSEL_Msk (0x7FUL /*<< DCB_DCRSR_REGSEL_Pos*/) /*!< DCB DCRSR: Register selector Mask */ + +/* DCRDR, Debug Core Register Data Register Definitions */ +#define DCB_DCRDR_DBGTMP_Pos 0U /*!< DCB DCRDR: Data temporary buffer Position */ +#define DCB_DCRDR_DBGTMP_Msk (0xFFFFFFFFUL /*<< DCB_DCRDR_DBGTMP_Pos*/) /*!< DCB DCRDR: Data temporary buffer Mask */ + +/* DEMCR, Debug Exception and Monitor Control Register Definitions */ +#define DCB_DEMCR_TRCENA_Pos 24U /*!< DCB DEMCR: Trace enable Position */ +#define DCB_DEMCR_TRCENA_Msk (0x1UL << DCB_DEMCR_TRCENA_Pos) /*!< DCB DEMCR: Trace enable Mask */ + +#define DCB_DEMCR_MONPRKEY_Pos 23U /*!< DCB DEMCR: Monitor pend req key Position */ +#define DCB_DEMCR_MONPRKEY_Msk (0x1UL << DCB_DEMCR_MONPRKEY_Pos) /*!< DCB DEMCR: Monitor pend req key Mask */ + +#define DCB_DEMCR_UMON_EN_Pos 21U /*!< DCB DEMCR: Unprivileged monitor enable Position */ +#define DCB_DEMCR_UMON_EN_Msk (0x1UL << DCB_DEMCR_UMON_EN_Pos) /*!< DCB DEMCR: Unprivileged monitor enable Mask */ + +#define DCB_DEMCR_SDME_Pos 20U /*!< DCB DEMCR: Secure DebugMonitor enable Position */ +#define DCB_DEMCR_SDME_Msk (0x1UL << DCB_DEMCR_SDME_Pos) /*!< DCB DEMCR: Secure DebugMonitor enable Mask */ + +#define DCB_DEMCR_MON_REQ_Pos 19U /*!< DCB DEMCR: Monitor request Position */ +#define DCB_DEMCR_MON_REQ_Msk (0x1UL << DCB_DEMCR_MON_REQ_Pos) /*!< DCB DEMCR: Monitor request Mask */ + +#define DCB_DEMCR_MON_STEP_Pos 18U /*!< DCB DEMCR: Monitor step Position */ +#define DCB_DEMCR_MON_STEP_Msk (0x1UL << DCB_DEMCR_MON_STEP_Pos) /*!< DCB DEMCR: Monitor step Mask */ + +#define DCB_DEMCR_MON_PEND_Pos 17U /*!< DCB DEMCR: Monitor pend Position */ +#define DCB_DEMCR_MON_PEND_Msk (0x1UL << DCB_DEMCR_MON_PEND_Pos) /*!< DCB DEMCR: Monitor pend Mask */ + +#define DCB_DEMCR_MON_EN_Pos 16U /*!< DCB DEMCR: Monitor enable Position */ +#define DCB_DEMCR_MON_EN_Msk (0x1UL << DCB_DEMCR_MON_EN_Pos) /*!< DCB DEMCR: Monitor enable Mask */ + +#define DCB_DEMCR_VC_SFERR_Pos 11U /*!< DCB DEMCR: Vector Catch SecureFault Position */ +#define DCB_DEMCR_VC_SFERR_Msk (0x1UL << DCB_DEMCR_VC_SFERR_Pos) /*!< DCB DEMCR: Vector Catch SecureFault Mask */ + +#define DCB_DEMCR_VC_HARDERR_Pos 10U /*!< DCB DEMCR: Vector Catch HardFault errors Position */ +#define DCB_DEMCR_VC_HARDERR_Msk (0x1UL << DCB_DEMCR_VC_HARDERR_Pos) /*!< DCB DEMCR: Vector Catch HardFault errors Mask */ + +#define DCB_DEMCR_VC_INTERR_Pos 9U /*!< DCB DEMCR: Vector Catch interrupt errors Position */ +#define DCB_DEMCR_VC_INTERR_Msk (0x1UL << DCB_DEMCR_VC_INTERR_Pos) /*!< DCB DEMCR: Vector Catch interrupt errors Mask */ + +#define DCB_DEMCR_VC_BUSERR_Pos 8U /*!< DCB DEMCR: Vector Catch BusFault errors Position */ +#define DCB_DEMCR_VC_BUSERR_Msk (0x1UL << DCB_DEMCR_VC_BUSERR_Pos) /*!< DCB DEMCR: Vector Catch BusFault errors Mask */ + +#define DCB_DEMCR_VC_STATERR_Pos 7U /*!< DCB DEMCR: Vector Catch state errors Position */ +#define DCB_DEMCR_VC_STATERR_Msk (0x1UL << DCB_DEMCR_VC_STATERR_Pos) /*!< DCB DEMCR: Vector Catch state errors Mask */ + +#define DCB_DEMCR_VC_CHKERR_Pos 6U /*!< DCB DEMCR: Vector Catch check errors Position */ +#define DCB_DEMCR_VC_CHKERR_Msk (0x1UL << DCB_DEMCR_VC_CHKERR_Pos) /*!< DCB DEMCR: Vector Catch check errors Mask */ + +#define DCB_DEMCR_VC_NOCPERR_Pos 5U /*!< DCB DEMCR: Vector Catch NOCP errors Position */ +#define DCB_DEMCR_VC_NOCPERR_Msk (0x1UL << DCB_DEMCR_VC_NOCPERR_Pos) /*!< DCB DEMCR: Vector Catch NOCP errors Mask */ + +#define DCB_DEMCR_VC_MMERR_Pos 4U /*!< DCB DEMCR: Vector Catch MemManage errors Position */ +#define DCB_DEMCR_VC_MMERR_Msk (0x1UL << DCB_DEMCR_VC_MMERR_Pos) /*!< DCB DEMCR: Vector Catch MemManage errors Mask */ + +#define DCB_DEMCR_VC_CORERESET_Pos 0U /*!< DCB DEMCR: Vector Catch Core reset Position */ +#define DCB_DEMCR_VC_CORERESET_Msk (0x1UL /*<< DCB_DEMCR_VC_CORERESET_Pos*/) /*!< DCB DEMCR: Vector Catch Core reset Mask */ + +/* DAUTHCTRL, Debug Authentication Control Register Definitions */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPNIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPNIDENSEL_Msk (0x1UL << DCB_DAUTHCTRL_SPNIDENSEL_Pos) /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Mask */ + +#define DCB_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< DCB DAUTHCTRL: Secure invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPIDENSEL_Msk (0x1UL /*<< DCB_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< DCB DAUTHCTRL: Secure invasive debug enable select Mask */ + +/* DSCSR, Debug Security Control and Status Register Definitions */ +#define DCB_DSCSR_CDSKEY_Pos 17U /*!< DCB DSCSR: CDS write-enable key Position */ +#define DCB_DSCSR_CDSKEY_Msk (0x1UL << DCB_DSCSR_CDSKEY_Pos) /*!< DCB DSCSR: CDS write-enable key Mask */ + +#define DCB_DSCSR_CDS_Pos 16U /*!< DCB DSCSR: Current domain Secure Position */ +#define DCB_DSCSR_CDS_Msk (0x1UL << DCB_DSCSR_CDS_Pos) /*!< DCB DSCSR: Current domain Secure Mask */ + +#define DCB_DSCSR_SBRSEL_Pos 1U /*!< DCB DSCSR: Secure banked register select Position */ +#define DCB_DSCSR_SBRSEL_Msk (0x1UL << DCB_DSCSR_SBRSEL_Pos) /*!< DCB DSCSR: Secure banked register select Mask */ + +#define DCB_DSCSR_SBRSELEN_Pos 0U /*!< DCB DSCSR: Secure banked register select enable Position */ +#define DCB_DSCSR_SBRSELEN_Msk (0x1UL /*<< DCB_DSCSR_SBRSELEN_Pos*/) /*!< DCB DSCSR: Secure banked register select enable Mask */ + +/*@} end of group CMSIS_DCB */ + + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DIB Debug Identification Block + \brief Type definitions for the Debug Identification Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Identification Block Registers (DIB). + */ +typedef struct +{ + __OM uint32_t DLAR; /*!< Offset: 0x000 ( /W) SCS Software Lock Access Register */ + __IM uint32_t DLSR; /*!< Offset: 0x004 (R/ ) SCS Software Lock Status Register */ + __IM uint32_t DAUTHSTATUS; /*!< Offset: 0x008 (R/ ) Debug Authentication Status Register */ + __IM uint32_t DDEVARCH; /*!< Offset: 0x00C (R/ ) SCS Device Architecture Register */ + __IM uint32_t DDEVTYPE; /*!< Offset: 0x010 (R/ ) SCS Device Type Register */ +} DIB_Type; + +/* DLAR, SCS Software Lock Access Register Definitions */ +#define DIB_DLAR_KEY_Pos 0U /*!< DIB DLAR: KEY Position */ +#define DIB_DLAR_KEY_Msk (0xFFFFFFFFUL /*<< DIB_DLAR_KEY_Pos */) /*!< DIB DLAR: KEY Mask */ + +/* DLSR, SCS Software Lock Status Register Definitions */ +#define DIB_DLSR_nTT_Pos 2U /*!< DIB DLSR: Not thirty-two bit Position */ +#define DIB_DLSR_nTT_Msk (0x1UL << DIB_DLSR_nTT_Pos ) /*!< DIB DLSR: Not thirty-two bit Mask */ + +#define DIB_DLSR_SLK_Pos 1U /*!< DIB DLSR: Software Lock status Position */ +#define DIB_DLSR_SLK_Msk (0x1UL << DIB_DLSR_SLK_Pos ) /*!< DIB DLSR: Software Lock status Mask */ + +#define DIB_DLSR_SLI_Pos 0U /*!< DIB DLSR: Software Lock implemented Position */ +#define DIB_DLSR_SLI_Msk (0x1UL /*<< DIB_DLSR_SLI_Pos*/) /*!< DIB DLSR: Software Lock implemented Mask */ + +/* DAUTHSTATUS, Debug Authentication Status Register Definitions */ +#define DIB_DAUTHSTATUS_SNID_Pos 6U /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_SNID_Msk (0x3UL << DIB_DAUTHSTATUS_SNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_SID_Pos 4U /*!< DIB DAUTHSTATUS: Secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_SID_Msk (0x3UL << DIB_DAUTHSTATUS_SID_Pos ) /*!< DIB DAUTHSTATUS: Secure Invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSNID_Pos 2U /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSID_Pos 0U /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSID_Msk (0x3UL /*<< DIB_DAUTHSTATUS_NSID_Pos*/) /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Mask */ + +/* DDEVARCH, SCS Device Architecture Register Definitions */ +#define DIB_DDEVARCH_ARCHITECT_Pos 21U /*!< DIB DDEVARCH: Architect Position */ +#define DIB_DDEVARCH_ARCHITECT_Msk (0x7FFUL << DIB_DDEVARCH_ARCHITECT_Pos ) /*!< DIB DDEVARCH: Architect Mask */ + +#define DIB_DDEVARCH_PRESENT_Pos 20U /*!< DIB DDEVARCH: DEVARCH Present Position */ +#define DIB_DDEVARCH_PRESENT_Msk (0x1FUL << DIB_DDEVARCH_PRESENT_Pos ) /*!< DIB DDEVARCH: DEVARCH Present Mask */ + +#define DIB_DDEVARCH_REVISION_Pos 16U /*!< DIB DDEVARCH: Revision Position */ +#define DIB_DDEVARCH_REVISION_Msk (0xFUL << DIB_DDEVARCH_REVISION_Pos ) /*!< DIB DDEVARCH: Revision Mask */ + +#define DIB_DDEVARCH_ARCHVER_Pos 12U /*!< DIB DDEVARCH: Architecture Version Position */ +#define DIB_DDEVARCH_ARCHVER_Msk (0xFUL << DIB_DDEVARCH_ARCHVER_Pos ) /*!< DIB DDEVARCH: Architecture Version Mask */ + +#define DIB_DDEVARCH_ARCHPART_Pos 0U /*!< DIB DDEVARCH: Architecture Part Position */ +#define DIB_DDEVARCH_ARCHPART_Msk (0xFFFUL /*<< DIB_DDEVARCH_ARCHPART_Pos*/) /*!< DIB DDEVARCH: Architecture Part Mask */ + +/* DDEVTYPE, SCS Device Type Register Definitions */ +#define DIB_DDEVTYPE_SUB_Pos 4U /*!< DIB DDEVTYPE: Sub-type Position */ +#define DIB_DDEVTYPE_SUB_Msk (0xFUL << DIB_DDEVTYPE_SUB_Pos ) /*!< DIB DDEVTYPE: Sub-type Mask */ + +#define DIB_DDEVTYPE_MAJOR_Pos 0U /*!< DIB DDEVTYPE: Major type Position */ +#define DIB_DDEVTYPE_MAJOR_Msk (0xFUL /*<< DIB_DDEVTYPE_MAJOR_Pos*/) /*!< DIB DDEVTYPE: Major type Mask */ + + +/*@} end of group CMSIS_DIB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ + #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ + #define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ + #define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ + #define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ + #define CoreDebug_BASE (0xE000EDF0UL) /*!< \deprecated Core Debug Base Address */ + #define DCB_BASE (0xE000EDF0UL) /*!< DCB Base Address */ + #define DIB_BASE (0xE000EFB0UL) /*!< DIB Base Address */ + #define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ + #define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ + #define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + + #define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ + #define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ + #define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ + #define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + #define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ + #define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ + #define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ + #define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE ) /*!< \deprecated Core Debug configuration struct */ + #define DCB ((DCB_Type *) DCB_BASE ) /*!< DCB configuration struct */ + #define DIB ((DIB_Type *) DIB_BASE ) /*!< DIB configuration struct */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ + #endif + + #if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SAU_BASE (SCS_BASE + 0x0DD0UL) /*!< Security Attribution Unit */ + #define SAU ((SAU_Type *) SAU_BASE ) /*!< Security Attribution Unit */ + #endif + + #define FPU_BASE (SCS_BASE + 0x0F30UL) /*!< Floating Point Unit */ + #define FPU ((FPU_Type *) FPU_BASE ) /*!< Floating Point Unit */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SCS_BASE_NS (0xE002E000UL) /*!< System Control Space Base Address (non-secure address space) */ + #define CoreDebug_BASE_NS (0xE002EDF0UL) /*!< \deprecated Core Debug Base Address (non-secure address space) */ + #define DCB_BASE_NS (0xE002EDF0UL) /*!< DCB Base Address (non-secure address space) */ + #define DIB_BASE_NS (0xE002EFB0UL) /*!< DIB Base Address (non-secure address space) */ + #define SysTick_BASE_NS (SCS_BASE_NS + 0x0010UL) /*!< SysTick Base Address (non-secure address space) */ + #define NVIC_BASE_NS (SCS_BASE_NS + 0x0100UL) /*!< NVIC Base Address (non-secure address space) */ + #define SCB_BASE_NS (SCS_BASE_NS + 0x0D00UL) /*!< System Control Block Base Address (non-secure address space) */ + + #define SCnSCB_NS ((SCnSCB_Type *) SCS_BASE_NS ) /*!< System control Register not in SCB(non-secure address space) */ + #define SCB_NS ((SCB_Type *) SCB_BASE_NS ) /*!< SCB configuration struct (non-secure address space) */ + #define SysTick_NS ((SysTick_Type *) SysTick_BASE_NS ) /*!< SysTick configuration struct (non-secure address space) */ + #define NVIC_NS ((NVIC_Type *) NVIC_BASE_NS ) /*!< NVIC configuration struct (non-secure address space) */ + #define CoreDebug_NS ((CoreDebug_Type *) CoreDebug_BASE_NS) /*!< \deprecated Core Debug configuration struct (non-secure address space) */ + #define DCB_NS ((DCB_Type *) DCB_BASE_NS ) /*!< DCB configuration struct (non-secure address space) */ + #define DIB_NS ((DIB_Type *) DIB_BASE_NS ) /*!< DIB configuration struct (non-secure address space) */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE_NS (SCS_BASE_NS + 0x0D90UL) /*!< Memory Protection Unit (non-secure address space) */ + #define MPU_NS ((MPU_Type *) MPU_BASE_NS ) /*!< Memory Protection Unit (non-secure address space) */ + #endif + + #define FPU_BASE_NS (SCS_BASE_NS + 0x0F30UL) /*!< Floating Point Unit (non-secure address space) */ + #define FPU_NS ((FPU_Type *) FPU_BASE_NS ) /*!< Floating Point Unit (non-secure address space) */ + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ +/*@} */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_register_aliases Backwards Compatibility Aliases + \brief Register alias definitions for backwards compatibility. + @{ + */ +#define ID_ADR (ID_AFR) /*!< SCB Auxiliary Feature Register */ +/*@} */ + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* Special LR values for Secure/Non-Secure call handling and exception handling */ + +/* Function Return Payload (from ARMv8-M Architecture Reference Manual) LR value on entry from Secure BLXNS */ +#define FNC_RETURN (0xFEFFFFFFUL) /* bit [0] ignored when processing a branch */ + +/* The following EXC_RETURN mask values are used to evaluate the LR on exception entry */ +#define EXC_RETURN_PREFIX (0xFF000000UL) /* bits [31:24] set to indicate an EXC_RETURN value */ +#define EXC_RETURN_S (0x00000040UL) /* bit [6] stack used to push registers: 0=Non-secure 1=Secure */ +#define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */ +#define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */ +#define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */ +#define EXC_RETURN_SPSEL (0x00000004UL) /* bit [2] stack pointer used to restore context: 0=MSP 1=PSP */ +#define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */ + +/* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */ +#if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) /* Value for processors with floating-point extension: */ +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125AUL) /* bit [0] SFTC must match LR bit[4] EXC_RETURN_FTYPE */ +#else +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125BUL) /* Value for processors without floating-point extension */ +#endif + + +/** + \brief Set Priority Grouping + \details Sets the priority grouping field using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Interrupt Target State + \details Reads the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + \return 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_GetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Target State + \details Sets the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_SetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] |= ((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Clear Interrupt Target State + \details Clears the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_ClearTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] &= ~((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + __DSB(); +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Priority Grouping (non-secure) + \details Sets the non-secure priority grouping field when in secure state using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void TZ_NVIC_SetPriorityGrouping_NS(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB_NS->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB_NS->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping (non-secure) + \details Reads the priority grouping field from the non-secure NVIC when in secure state. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriorityGrouping_NS(void) +{ + return ((uint32_t)((SCB_NS->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt (non-secure) + \details Enables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_EnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status (non-secure) + \details Returns a device specific interrupt enable status from the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetEnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt (non-secure) + \details Disables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_DisableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Pending Interrupt (non-secure) + \details Reads the NVIC pending register in the non-secure NVIC when in secure state and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt (non-secure) + \details Sets the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_SetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt (non-secure) + \details Clears the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_ClearPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt (non-secure) + \details Reads the active register in non-secure NVIC when in secure state and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetActive_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority (non-secure) + \details Sets the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every non-secure processor exception. + */ +__STATIC_INLINE void TZ_NVIC_SetPriority_NS(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority (non-secure) + \details Reads the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriority_NS(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC_NS->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) &&(__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv8.h" + +#endif + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + uint32_t mvfr0; + + mvfr0 = FPU->MVFR0; + if ((mvfr0 & (FPU_MVFR0_Single_precision_Msk | FPU_MVFR0_Double_precision_Msk)) == 0x220U) + { + return 2U; /* Double + Single precision FPU */ + } + else if ((mvfr0 & (FPU_MVFR0_Single_precision_Msk | FPU_MVFR0_Double_precision_Msk)) == 0x020U) + { + return 1U; /* Single precision FPU */ + } + else + { + return 0U; /* No FPU */ + } +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ########################## SAU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SAUFunctions SAU Functions + \brief Functions that configure the SAU. + @{ + */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + +/** + \brief Enable SAU + \details Enables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Enable(void) +{ + SAU->CTRL |= (SAU_CTRL_ENABLE_Msk); +} + + + +/** + \brief Disable SAU + \details Disables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Disable(void) +{ + SAU->CTRL &= ~(SAU_CTRL_ENABLE_Msk); +} + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_SAUFunctions */ + + + + +/* ################################## Debug Control function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DCBFunctions Debug Control Functions + \brief Functions that access the Debug Control Block. + @{ + */ + + +/** + \brief Set Debug Authentication Control Register + \details writes to Debug Authentication Control register. + \param [in] value value to be writen. + */ +__STATIC_INLINE void DCB_SetAuthCtrl(uint32_t value) +{ + __DSB(); + __ISB(); + DCB->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register + \details Reads Debug Authentication Control register. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t DCB_GetAuthCtrl(void) +{ + return (DCB->DAUTHCTRL); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Debug Authentication Control Register (non-secure) + \details writes to non-secure Debug Authentication Control register when in secure state. + \param [in] value value to be writen + */ +__STATIC_INLINE void TZ_DCB_SetAuthCtrl_NS(uint32_t value) +{ + __DSB(); + __ISB(); + DCB_NS->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register (non-secure) + \details Reads non-secure Debug Authentication Control register when in secure state. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t TZ_DCB_GetAuthCtrl_NS(void) +{ + return (DCB_NS->DAUTHCTRL); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## Debug Identification function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DIBFunctions Debug Identification Functions + \brief Functions that access the Debug Identification Block. + @{ + */ + + +/** + \brief Get Debug Authentication Status Register + \details Reads Debug Authentication Status register. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t DIB_GetAuthStatus(void) +{ + return (DIB->DAUTHSTATUS); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Debug Authentication Status Register (non-secure) + \details Reads non-secure Debug Authentication Status register when in secure state. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t TZ_DIB_GetAuthStatus_NS(void) +{ + return (DIB_NS->DAUTHSTATUS); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief System Tick Configuration (non-secure) + \details Initializes the non-secure System Timer and its interrupt when in secure state, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function TZ_SysTick_Config_NS is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + + */ +__STATIC_INLINE uint32_t TZ_SysTick_Config_NS(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick_NS->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + TZ_NVIC_SetPriority_NS (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick_NS->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick_NS->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM33_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/Drivers/CMSIS/Include/core_cm35p.h b/Drivers/CMSIS/Include/core_cm35p.h new file mode 100644 index 0000000..3843d95 --- /dev/null +++ b/Drivers/CMSIS/Include/core_cm35p.h @@ -0,0 +1,3277 @@ +/**************************************************************************//** + * @file core_cm35p.h + * @brief CMSIS Cortex-M35P Core Peripheral Access Layer Header File + * @version V1.1.3 + * @date 13. October 2021 + ******************************************************************************/ +/* + * Copyright (c) 2018-2021 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#elif defined ( __GNUC__ ) + #pragma GCC diagnostic ignored "-Wpedantic" /* disable pedantic warning due to unnamed structs/unions */ +#endif + +#ifndef __CORE_CM35P_H_GENERIC +#define __CORE_CM35P_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M35P + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS CM35P definitions */ +#define __CM35P_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __CM35P_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __CM35P_CMSIS_VERSION ((__CM35P_CMSIS_VERSION_MAIN << 16U) | \ + __CM35P_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (35U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + For this, __FPU_PRESENT has to be checked prior to making use of FPU specific registers and functions. +*/ +#if defined ( __CC_ARM ) + #if defined (__TARGET_FPU_VFP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined (__ARM_FP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __ICCARM__ ) + #if defined (__ARMVFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __TI_ARM__ ) + #if defined (__TI_VFP_SUPPORT__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TASKING__ ) + #if defined (__FPU_VFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM35P_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM35P_H_DEPENDANT +#define __CORE_CM35P_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM35P_REV + #define __CM35P_REV 0x0000U + #warning "__CM35P_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __SAUREGION_PRESENT + #define __SAUREGION_PRESENT 0U + #warning "__SAUREGION_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DSP_PRESENT + #define __DSP_PRESENT 0U + #warning "__DSP_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 1U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M35P */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + - Core SAU Register + - Core FPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:16; /*!< bit: 0..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:7; /*!< bit: 20..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + +#define APSR_GE_Pos 16U /*!< APSR: GE Position */ +#define APSR_GE_Msk (0xFUL << APSR_GE_Pos) /*!< APSR: GE Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:7; /*!< bit: 9..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:4; /*!< bit: 20..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t IT:2; /*!< bit: 25..26 saved IT state (read 0) */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_IT_Pos 25U /*!< xPSR: IT Position */ +#define xPSR_IT_Msk (3UL << xPSR_IT_Pos) /*!< xPSR: IT Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_GE_Pos 16U /*!< xPSR: GE Position */ +#define xPSR_GE_Msk (0xFUL << xPSR_GE_Pos) /*!< xPSR: GE Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack-pointer select */ + uint32_t FPCA:1; /*!< bit: 2 Floating-point context active */ + uint32_t SFPA:1; /*!< bit: 3 Secure floating-point active */ + uint32_t _reserved1:28; /*!< bit: 4..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SFPA_Pos 3U /*!< CONTROL: SFPA Position */ +#define CONTROL_SFPA_Msk (1UL << CONTROL_SFPA_Pos) /*!< CONTROL: SFPA Mask */ + +#define CONTROL_FPCA_Pos 2U /*!< CONTROL: FPCA Position */ +#define CONTROL_FPCA_Msk (1UL << CONTROL_FPCA_Pos) /*!< CONTROL: FPCA Mask */ + +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[16U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[16U]; + __IOM uint32_t ICER[16U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[16U]; + __IOM uint32_t ISPR[16U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[16U]; + __IOM uint32_t ICPR[16U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[16U]; + __IOM uint32_t IABR[16U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[16U]; + __IOM uint32_t ITNS[16U]; /*!< Offset: 0x280 (R/W) Interrupt Non-Secure State Register */ + uint32_t RESERVED5[16U]; + __IOM uint8_t IPR[496U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED6[580U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/* Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHPR[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t ID_PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t ID_DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ID_AFR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t ID_MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ID_ISAR[6U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + __IM uint32_t CLIDR; /*!< Offset: 0x078 (R/ ) Cache Level ID register */ + __IM uint32_t CTR; /*!< Offset: 0x07C (R/ ) Cache Type register */ + __IM uint32_t CCSIDR; /*!< Offset: 0x080 (R/ ) Cache Size ID Register */ + __IOM uint32_t CSSELR; /*!< Offset: 0x084 (R/W) Cache Size Selection Register */ + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ + __IOM uint32_t NSACR; /*!< Offset: 0x08C (R/W) Non-Secure Access Control Register */ + uint32_t RESERVED7[21U]; + __IOM uint32_t SFSR; /*!< Offset: 0x0E4 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x0E8 (R/W) Secure Fault Address Register */ + uint32_t RESERVED3[69U]; + __OM uint32_t STIR; /*!< Offset: 0x200 ( /W) Software Triggered Interrupt Register */ + uint32_t RESERVED4[15U]; + __IM uint32_t MVFR0; /*!< Offset: 0x240 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x244 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x248 (R/ ) Media and VFP Feature Register 2 */ + uint32_t RESERVED5[1U]; + __OM uint32_t ICIALLU; /*!< Offset: 0x250 ( /W) I-Cache Invalidate All to PoU */ + uint32_t RESERVED6[1U]; + __OM uint32_t ICIMVAU; /*!< Offset: 0x258 ( /W) I-Cache Invalidate by MVA to PoU */ + __OM uint32_t DCIMVAC; /*!< Offset: 0x25C ( /W) D-Cache Invalidate by MVA to PoC */ + __OM uint32_t DCISW; /*!< Offset: 0x260 ( /W) D-Cache Invalidate by Set-way */ + __OM uint32_t DCCMVAU; /*!< Offset: 0x264 ( /W) D-Cache Clean by MVA to PoU */ + __OM uint32_t DCCMVAC; /*!< Offset: 0x268 ( /W) D-Cache Clean by MVA to PoC */ + __OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */ + __OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */ + __OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */ + __OM uint32_t BPIALL; /*!< Offset: 0x278 ( /W) Branch Predictor Invalidate All */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_PENDNMISET_Pos 31U /*!< SCB ICSR: PENDNMISET Position */ +#define SCB_ICSR_PENDNMISET_Msk (1UL << SCB_ICSR_PENDNMISET_Pos) /*!< SCB ICSR: PENDNMISET Mask */ + +#define SCB_ICSR_NMIPENDSET_Pos SCB_ICSR_PENDNMISET_Pos /*!< SCB ICSR: NMIPENDSET Position, backward compatibility */ +#define SCB_ICSR_NMIPENDSET_Msk SCB_ICSR_PENDNMISET_Msk /*!< SCB ICSR: NMIPENDSET Mask, backward compatibility */ + +#define SCB_ICSR_PENDNMICLR_Pos 30U /*!< SCB ICSR: PENDNMICLR Position */ +#define SCB_ICSR_PENDNMICLR_Msk (1UL << SCB_ICSR_PENDNMICLR_Pos) /*!< SCB ICSR: PENDNMICLR Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_STTNS_Pos 24U /*!< SCB ICSR: STTNS Position (Security Extension) */ +#define SCB_ICSR_STTNS_Msk (1UL << SCB_ICSR_STTNS_Pos) /*!< SCB ICSR: STTNS Mask (Security Extension) */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIS_Pos 14U /*!< SCB AIRCR: PRIS Position */ +#define SCB_AIRCR_PRIS_Msk (1UL << SCB_AIRCR_PRIS_Pos) /*!< SCB AIRCR: PRIS Mask */ + +#define SCB_AIRCR_BFHFNMINS_Pos 13U /*!< SCB AIRCR: BFHFNMINS Position */ +#define SCB_AIRCR_BFHFNMINS_Msk (1UL << SCB_AIRCR_BFHFNMINS_Pos) /*!< SCB AIRCR: BFHFNMINS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_SYSRESETREQS_Pos 3U /*!< SCB AIRCR: SYSRESETREQS Position */ +#define SCB_AIRCR_SYSRESETREQS_Msk (1UL << SCB_AIRCR_SYSRESETREQS_Pos) /*!< SCB AIRCR: SYSRESETREQS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEPS_Pos 3U /*!< SCB SCR: SLEEPDEEPS Position */ +#define SCB_SCR_SLEEPDEEPS_Msk (1UL << SCB_SCR_SLEEPDEEPS_Pos) /*!< SCB SCR: SLEEPDEEPS Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_BP_Pos 18U /*!< SCB CCR: BP Position */ +#define SCB_CCR_BP_Msk (1UL << SCB_CCR_BP_Pos) /*!< SCB CCR: BP Mask */ + +#define SCB_CCR_IC_Pos 17U /*!< SCB CCR: IC Position */ +#define SCB_CCR_IC_Msk (1UL << SCB_CCR_IC_Pos) /*!< SCB CCR: IC Mask */ + +#define SCB_CCR_DC_Pos 16U /*!< SCB CCR: DC Position */ +#define SCB_CCR_DC_Msk (1UL << SCB_CCR_DC_Pos) /*!< SCB CCR: DC Mask */ + +#define SCB_CCR_STKOFHFNMIGN_Pos 10U /*!< SCB CCR: STKOFHFNMIGN Position */ +#define SCB_CCR_STKOFHFNMIGN_Msk (1UL << SCB_CCR_STKOFHFNMIGN_Pos) /*!< SCB CCR: STKOFHFNMIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_HARDFAULTPENDED_Pos 21U /*!< SCB SHCSR: HARDFAULTPENDED Position */ +#define SCB_SHCSR_HARDFAULTPENDED_Msk (1UL << SCB_SHCSR_HARDFAULTPENDED_Pos) /*!< SCB SHCSR: HARDFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTPENDED_Pos 20U /*!< SCB SHCSR: SECUREFAULTPENDED Position */ +#define SCB_SHCSR_SECUREFAULTPENDED_Msk (1UL << SCB_SHCSR_SECUREFAULTPENDED_Pos) /*!< SCB SHCSR: SECUREFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTENA_Pos 19U /*!< SCB SHCSR: SECUREFAULTENA Position */ +#define SCB_SHCSR_SECUREFAULTENA_Msk (1UL << SCB_SHCSR_SECUREFAULTENA_Pos) /*!< SCB SHCSR: SECUREFAULTENA Mask */ + +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_NMIACT_Pos 5U /*!< SCB SHCSR: NMIACT Position */ +#define SCB_SHCSR_NMIACT_Msk (1UL << SCB_SHCSR_NMIACT_Pos) /*!< SCB SHCSR: NMIACT Mask */ + +#define SCB_SHCSR_SECUREFAULTACT_Pos 4U /*!< SCB SHCSR: SECUREFAULTACT Position */ +#define SCB_SHCSR_SECUREFAULTACT_Msk (1UL << SCB_SHCSR_SECUREFAULTACT_Pos) /*!< SCB SHCSR: SECUREFAULTACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_HARDFAULTACT_Pos 2U /*!< SCB SHCSR: HARDFAULTACT Position */ +#define SCB_SHCSR_HARDFAULTACT_Msk (1UL << SCB_SHCSR_HARDFAULTACT_Pos) /*!< SCB SHCSR: HARDFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/* SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/* BusFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_LSPERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 5U) /*!< SCB CFSR (BFSR): LSPERR Position */ +#define SCB_CFSR_LSPERR_Msk (1UL << SCB_CFSR_LSPERR_Pos) /*!< SCB CFSR (BFSR): LSPERR Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/* UsageFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_STKOF_Pos (SCB_CFSR_USGFAULTSR_Pos + 4U) /*!< SCB CFSR (UFSR): STKOF Position */ +#define SCB_CFSR_STKOF_Msk (1UL << SCB_CFSR_STKOF_Pos) /*!< SCB CFSR (UFSR): STKOF Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/* SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/* SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/* SCB Non-Secure Access Control Register Definitions */ +#define SCB_NSACR_CP11_Pos 11U /*!< SCB NSACR: CP11 Position */ +#define SCB_NSACR_CP11_Msk (1UL << SCB_NSACR_CP11_Pos) /*!< SCB NSACR: CP11 Mask */ + +#define SCB_NSACR_CP10_Pos 10U /*!< SCB NSACR: CP10 Position */ +#define SCB_NSACR_CP10_Msk (1UL << SCB_NSACR_CP10_Pos) /*!< SCB NSACR: CP10 Mask */ + +#define SCB_NSACR_CPn_Pos 0U /*!< SCB NSACR: CPn Position */ +#define SCB_NSACR_CPn_Msk (1UL /*<< SCB_NSACR_CPn_Pos*/) /*!< SCB NSACR: CPn Mask */ + +/* SCB Cache Level ID Register Definitions */ +#define SCB_CLIDR_LOUU_Pos 27U /*!< SCB CLIDR: LoUU Position */ +#define SCB_CLIDR_LOUU_Msk (7UL << SCB_CLIDR_LOUU_Pos) /*!< SCB CLIDR: LoUU Mask */ + +#define SCB_CLIDR_LOC_Pos 24U /*!< SCB CLIDR: LoC Position */ +#define SCB_CLIDR_LOC_Msk (7UL << SCB_CLIDR_LOC_Pos) /*!< SCB CLIDR: LoC Mask */ + +/* SCB Cache Type Register Definitions */ +#define SCB_CTR_FORMAT_Pos 29U /*!< SCB CTR: Format Position */ +#define SCB_CTR_FORMAT_Msk (7UL << SCB_CTR_FORMAT_Pos) /*!< SCB CTR: Format Mask */ + +#define SCB_CTR_CWG_Pos 24U /*!< SCB CTR: CWG Position */ +#define SCB_CTR_CWG_Msk (0xFUL << SCB_CTR_CWG_Pos) /*!< SCB CTR: CWG Mask */ + +#define SCB_CTR_ERG_Pos 20U /*!< SCB CTR: ERG Position */ +#define SCB_CTR_ERG_Msk (0xFUL << SCB_CTR_ERG_Pos) /*!< SCB CTR: ERG Mask */ + +#define SCB_CTR_DMINLINE_Pos 16U /*!< SCB CTR: DminLine Position */ +#define SCB_CTR_DMINLINE_Msk (0xFUL << SCB_CTR_DMINLINE_Pos) /*!< SCB CTR: DminLine Mask */ + +#define SCB_CTR_IMINLINE_Pos 0U /*!< SCB CTR: ImInLine Position */ +#define SCB_CTR_IMINLINE_Msk (0xFUL /*<< SCB_CTR_IMINLINE_Pos*/) /*!< SCB CTR: ImInLine Mask */ + +/* SCB Cache Size ID Register Definitions */ +#define SCB_CCSIDR_WT_Pos 31U /*!< SCB CCSIDR: WT Position */ +#define SCB_CCSIDR_WT_Msk (1UL << SCB_CCSIDR_WT_Pos) /*!< SCB CCSIDR: WT Mask */ + +#define SCB_CCSIDR_WB_Pos 30U /*!< SCB CCSIDR: WB Position */ +#define SCB_CCSIDR_WB_Msk (1UL << SCB_CCSIDR_WB_Pos) /*!< SCB CCSIDR: WB Mask */ + +#define SCB_CCSIDR_RA_Pos 29U /*!< SCB CCSIDR: RA Position */ +#define SCB_CCSIDR_RA_Msk (1UL << SCB_CCSIDR_RA_Pos) /*!< SCB CCSIDR: RA Mask */ + +#define SCB_CCSIDR_WA_Pos 28U /*!< SCB CCSIDR: WA Position */ +#define SCB_CCSIDR_WA_Msk (1UL << SCB_CCSIDR_WA_Pos) /*!< SCB CCSIDR: WA Mask */ + +#define SCB_CCSIDR_NUMSETS_Pos 13U /*!< SCB CCSIDR: NumSets Position */ +#define SCB_CCSIDR_NUMSETS_Msk (0x7FFFUL << SCB_CCSIDR_NUMSETS_Pos) /*!< SCB CCSIDR: NumSets Mask */ + +#define SCB_CCSIDR_ASSOCIATIVITY_Pos 3U /*!< SCB CCSIDR: Associativity Position */ +#define SCB_CCSIDR_ASSOCIATIVITY_Msk (0x3FFUL << SCB_CCSIDR_ASSOCIATIVITY_Pos) /*!< SCB CCSIDR: Associativity Mask */ + +#define SCB_CCSIDR_LINESIZE_Pos 0U /*!< SCB CCSIDR: LineSize Position */ +#define SCB_CCSIDR_LINESIZE_Msk (7UL /*<< SCB_CCSIDR_LINESIZE_Pos*/) /*!< SCB CCSIDR: LineSize Mask */ + +/* SCB Cache Size Selection Register Definitions */ +#define SCB_CSSELR_LEVEL_Pos 1U /*!< SCB CSSELR: Level Position */ +#define SCB_CSSELR_LEVEL_Msk (7UL << SCB_CSSELR_LEVEL_Pos) /*!< SCB CSSELR: Level Mask */ + +#define SCB_CSSELR_IND_Pos 0U /*!< SCB CSSELR: InD Position */ +#define SCB_CSSELR_IND_Msk (1UL /*<< SCB_CSSELR_IND_Pos*/) /*!< SCB CSSELR: InD Mask */ + +/* SCB Software Triggered Interrupt Register Definitions */ +#define SCB_STIR_INTID_Pos 0U /*!< SCB STIR: INTID Position */ +#define SCB_STIR_INTID_Msk (0x1FFUL /*<< SCB_STIR_INTID_Pos*/) /*!< SCB STIR: INTID Mask */ + +/* SCB D-Cache Invalidate by Set-way Register Definitions */ +#define SCB_DCISW_WAY_Pos 30U /*!< SCB DCISW: Way Position */ +#define SCB_DCISW_WAY_Msk (3UL << SCB_DCISW_WAY_Pos) /*!< SCB DCISW: Way Mask */ + +#define SCB_DCISW_SET_Pos 5U /*!< SCB DCISW: Set Position */ +#define SCB_DCISW_SET_Msk (0x1FFUL << SCB_DCISW_SET_Pos) /*!< SCB DCISW: Set Mask */ + +/* SCB D-Cache Clean by Set-way Register Definitions */ +#define SCB_DCCSW_WAY_Pos 30U /*!< SCB DCCSW: Way Position */ +#define SCB_DCCSW_WAY_Msk (3UL << SCB_DCCSW_WAY_Pos) /*!< SCB DCCSW: Way Mask */ + +#define SCB_DCCSW_SET_Pos 5U /*!< SCB DCCSW: Set Position */ +#define SCB_DCCSW_SET_Msk (0x1FFUL << SCB_DCCSW_SET_Pos) /*!< SCB DCCSW: Set Mask */ + +/* SCB D-Cache Clean and Invalidate by Set-way Register Definitions */ +#define SCB_DCCISW_WAY_Pos 30U /*!< SCB DCCISW: Way Position */ +#define SCB_DCCISW_WAY_Msk (3UL << SCB_DCCISW_WAY_Pos) /*!< SCB DCCISW: Way Mask */ + +#define SCB_DCCISW_SET_Pos 5U /*!< SCB DCCISW: Set Position */ +#define SCB_DCCISW_SET_Msk (0x1FFUL << SCB_DCCISW_SET_Pos) /*!< SCB DCCISW: Set Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ + __IOM uint32_t CPPWR; /*!< Offset: 0x00C (R/W) Coprocessor Power Control Register */ +} SCnSCB_Type; + +/* Interrupt Controller Type Register Definitions */ +#define SCnSCB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) ITM Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) ITM Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ + uint32_t RESERVED3[32U]; + uint32_t RESERVED4[43U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ + uint32_t RESERVED5[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) ITM Device Architecture Register */ + uint32_t RESERVED6[4U]; + __IM uint32_t PID4; /*!< Offset: 0xFD0 (R/ ) ITM Peripheral Identification Register #4 */ + __IM uint32_t PID5; /*!< Offset: 0xFD4 (R/ ) ITM Peripheral Identification Register #5 */ + __IM uint32_t PID6; /*!< Offset: 0xFD8 (R/ ) ITM Peripheral Identification Register #6 */ + __IM uint32_t PID7; /*!< Offset: 0xFDC (R/ ) ITM Peripheral Identification Register #7 */ + __IM uint32_t PID0; /*!< Offset: 0xFE0 (R/ ) ITM Peripheral Identification Register #0 */ + __IM uint32_t PID1; /*!< Offset: 0xFE4 (R/ ) ITM Peripheral Identification Register #1 */ + __IM uint32_t PID2; /*!< Offset: 0xFE8 (R/ ) ITM Peripheral Identification Register #2 */ + __IM uint32_t PID3; /*!< Offset: 0xFEC (R/ ) ITM Peripheral Identification Register #3 */ + __IM uint32_t CID0; /*!< Offset: 0xFF0 (R/ ) ITM Component Identification Register #0 */ + __IM uint32_t CID1; /*!< Offset: 0xFF4 (R/ ) ITM Component Identification Register #1 */ + __IM uint32_t CID2; /*!< Offset: 0xFF8 (R/ ) ITM Component Identification Register #2 */ + __IM uint32_t CID3; /*!< Offset: 0xFFC (R/ ) ITM Component Identification Register #3 */ +} ITM_Type; + +/* ITM Stimulus Port Register Definitions */ +#define ITM_STIM_DISABLED_Pos 1U /*!< ITM STIM: DISABLED Position */ +#define ITM_STIM_DISABLED_Msk (0x1UL << ITM_STIM_DISABLED_Pos) /*!< ITM STIM: DISABLED Mask */ + +#define ITM_STIM_FIFOREADY_Pos 0U /*!< ITM STIM: FIFOREADY Position */ +#define ITM_STIM_FIFOREADY_Msk (0x1UL /*<< ITM_STIM_FIFOREADY_Pos*/) /*!< ITM STIM: FIFOREADY Mask */ + +/* ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFFFFFFFFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/* ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TRACEBUSID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TRACEBUSID_Msk (0x7FUL << ITM_TCR_TRACEBUSID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPRESCALE_Pos 8U /*!< ITM TCR: TSPRESCALE Position */ +#define ITM_TCR_TSPRESCALE_Msk (3UL << ITM_TCR_TSPRESCALE_Pos) /*!< ITM TCR: TSPRESCALE Mask */ + +#define ITM_TCR_STALLENA_Pos 5U /*!< ITM TCR: STALLENA Position */ +#define ITM_TCR_STALLENA_Msk (1UL << ITM_TCR_STALLENA_Pos) /*!< ITM TCR: STALLENA Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/* ITM Lock Status Register Definitions */ +#define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ +#define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ + +#define ITM_LSR_Access_Pos 1U /*!< ITM LSR: Access Position */ +#define ITM_LSR_Access_Msk (1UL << ITM_LSR_Access_Pos) /*!< ITM LSR: Access Mask */ + +#define ITM_LSR_Present_Pos 0U /*!< ITM LSR: Present Position */ +#define ITM_LSR_Present_Msk (1UL /*<< ITM_LSR_Present_Pos*/) /*!< ITM LSR: Present Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + uint32_t RESERVED3[1U]; + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED4[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + uint32_t RESERVED5[1U]; + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED6[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + uint32_t RESERVED7[1U]; + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ + uint32_t RESERVED8[1U]; + __IOM uint32_t COMP4; /*!< Offset: 0x060 (R/W) Comparator Register 4 */ + uint32_t RESERVED9[1U]; + __IOM uint32_t FUNCTION4; /*!< Offset: 0x068 (R/W) Function Register 4 */ + uint32_t RESERVED10[1U]; + __IOM uint32_t COMP5; /*!< Offset: 0x070 (R/W) Comparator Register 5 */ + uint32_t RESERVED11[1U]; + __IOM uint32_t FUNCTION5; /*!< Offset: 0x078 (R/W) Function Register 5 */ + uint32_t RESERVED12[1U]; + __IOM uint32_t COMP6; /*!< Offset: 0x080 (R/W) Comparator Register 6 */ + uint32_t RESERVED13[1U]; + __IOM uint32_t FUNCTION6; /*!< Offset: 0x088 (R/W) Function Register 6 */ + uint32_t RESERVED14[1U]; + __IOM uint32_t COMP7; /*!< Offset: 0x090 (R/W) Comparator Register 7 */ + uint32_t RESERVED15[1U]; + __IOM uint32_t FUNCTION7; /*!< Offset: 0x098 (R/W) Function Register 7 */ + uint32_t RESERVED16[1U]; + __IOM uint32_t COMP8; /*!< Offset: 0x0A0 (R/W) Comparator Register 8 */ + uint32_t RESERVED17[1U]; + __IOM uint32_t FUNCTION8; /*!< Offset: 0x0A8 (R/W) Function Register 8 */ + uint32_t RESERVED18[1U]; + __IOM uint32_t COMP9; /*!< Offset: 0x0B0 (R/W) Comparator Register 9 */ + uint32_t RESERVED19[1U]; + __IOM uint32_t FUNCTION9; /*!< Offset: 0x0B8 (R/W) Function Register 9 */ + uint32_t RESERVED20[1U]; + __IOM uint32_t COMP10; /*!< Offset: 0x0C0 (R/W) Comparator Register 10 */ + uint32_t RESERVED21[1U]; + __IOM uint32_t FUNCTION10; /*!< Offset: 0x0C8 (R/W) Function Register 10 */ + uint32_t RESERVED22[1U]; + __IOM uint32_t COMP11; /*!< Offset: 0x0D0 (R/W) Comparator Register 11 */ + uint32_t RESERVED23[1U]; + __IOM uint32_t FUNCTION11; /*!< Offset: 0x0D8 (R/W) Function Register 11 */ + uint32_t RESERVED24[1U]; + __IOM uint32_t COMP12; /*!< Offset: 0x0E0 (R/W) Comparator Register 12 */ + uint32_t RESERVED25[1U]; + __IOM uint32_t FUNCTION12; /*!< Offset: 0x0E8 (R/W) Function Register 12 */ + uint32_t RESERVED26[1U]; + __IOM uint32_t COMP13; /*!< Offset: 0x0F0 (R/W) Comparator Register 13 */ + uint32_t RESERVED27[1U]; + __IOM uint32_t FUNCTION13; /*!< Offset: 0x0F8 (R/W) Function Register 13 */ + uint32_t RESERVED28[1U]; + __IOM uint32_t COMP14; /*!< Offset: 0x100 (R/W) Comparator Register 14 */ + uint32_t RESERVED29[1U]; + __IOM uint32_t FUNCTION14; /*!< Offset: 0x108 (R/W) Function Register 14 */ + uint32_t RESERVED30[1U]; + __IOM uint32_t COMP15; /*!< Offset: 0x110 (R/W) Comparator Register 15 */ + uint32_t RESERVED31[1U]; + __IOM uint32_t FUNCTION15; /*!< Offset: 0x118 (R/W) Function Register 15 */ + uint32_t RESERVED32[934U]; + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R ) Lock Status Register */ + uint32_t RESERVED33[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) Device Architecture Register */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCDISS_Pos 23U /*!< DWT CTRL: CYCDISS Position */ +#define DWT_CTRL_CYCDISS_Msk (0x1UL << DWT_CTRL_CYCDISS_Pos) /*!< DWT CTRL: CYCDISS Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (0x1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (0x1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (0x1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (0x1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (0x1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (0x1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (0x1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (0x1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (0x1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (0x1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/* DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/* DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/* DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/* DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/* DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_ID_Pos 27U /*!< DWT FUNCTION: ID Position */ +#define DWT_FUNCTION_ID_Msk (0x1FUL << DWT_FUNCTION_ID_Pos) /*!< DWT FUNCTION: ID Mask */ + +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_ACTION_Pos 4U /*!< DWT FUNCTION: ACTION Position */ +#define DWT_FUNCTION_ACTION_Msk (0x1UL << DWT_FUNCTION_ACTION_Pos) /*!< DWT FUNCTION: ACTION Mask */ + +#define DWT_FUNCTION_MATCH_Pos 0U /*!< DWT FUNCTION: MATCH Position */ +#define DWT_FUNCTION_MATCH_Msk (0xFUL /*<< DWT_FUNCTION_MATCH_Pos*/) /*!< DWT FUNCTION: MATCH Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Size Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Size Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IOM uint32_t PSCR; /*!< Offset: 0x308 (R/W) Periodic Synchronization Control Register */ + uint32_t RESERVED3[759U]; + __IM uint32_t TRIGGER; /*!< Offset: 0xEE8 (R/ ) TRIGGER Register */ + __IM uint32_t ITFTTD0; /*!< Offset: 0xEEC (R/ ) Integration Test FIFO Test Data 0 Register */ + __IOM uint32_t ITATBCTR2; /*!< Offset: 0xEF0 (R/W) Integration Test ATB Control Register 2 */ + uint32_t RESERVED4[1U]; + __IM uint32_t ITATBCTR0; /*!< Offset: 0xEF8 (R/ ) Integration Test ATB Control Register 0 */ + __IM uint32_t ITFTTD1; /*!< Offset: 0xEFC (R/ ) Integration Test FIFO Test Data 1 Register */ + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control */ + uint32_t RESERVED5[39U]; + __IOM uint32_t CLAIMSET; /*!< Offset: 0xFA0 (R/W) Claim tag set */ + __IOM uint32_t CLAIMCLR; /*!< Offset: 0xFA4 (R/W) Claim tag clear */ + uint32_t RESERVED7[8U]; + __IM uint32_t DEVID; /*!< Offset: 0xFC8 (R/ ) Device Configuration Register */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Identifier Register */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_PRESCALER_Pos 0U /*!< TPI ACPR: PRESCALER Position */ +#define TPI_ACPR_PRESCALER_Msk (0x1FFFUL /*<< TPI_ACPR_PRESCALER_Pos*/) /*!< TPI ACPR: PRESCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_FOnMan_Pos 6U /*!< TPI FFCR: FOnMan Position */ +#define TPI_FFCR_FOnMan_Msk (0x1UL << TPI_FFCR_FOnMan_Pos) /*!< TPI FFCR: FOnMan Mask */ + +#define TPI_FFCR_EnFCont_Pos 1U /*!< TPI FFCR: EnFCont Position */ +#define TPI_FFCR_EnFCont_Msk (0x1UL << TPI_FFCR_EnFCont_Pos) /*!< TPI FFCR: EnFCont Mask */ + +/* TPI TRIGGER Register Definitions */ +#define TPI_TRIGGER_TRIGGER_Pos 0U /*!< TPI TRIGGER: TRIGGER Position */ +#define TPI_TRIGGER_TRIGGER_Msk (0x1UL /*<< TPI_TRIGGER_TRIGGER_Pos*/) /*!< TPI TRIGGER: TRIGGER Mask */ + +/* TPI Integration Test FIFO Test Data 0 Register Definitions */ +#define TPI_ITFTTD0_ATB_IF2_ATVALID_Pos 29U /*!< TPI ITFTTD0: ATB Interface 2 ATVALIDPosition */ +#define TPI_ITFTTD0_ATB_IF2_ATVALID_Msk (0x3UL << TPI_ITFTTD0_ATB_IF2_ATVALID_Pos) /*!< TPI ITFTTD0: ATB Interface 2 ATVALID Mask */ + +#define TPI_ITFTTD0_ATB_IF2_bytecount_Pos 27U /*!< TPI ITFTTD0: ATB Interface 2 byte count Position */ +#define TPI_ITFTTD0_ATB_IF2_bytecount_Msk (0x3UL << TPI_ITFTTD0_ATB_IF2_bytecount_Pos) /*!< TPI ITFTTD0: ATB Interface 2 byte count Mask */ + +#define TPI_ITFTTD0_ATB_IF1_ATVALID_Pos 26U /*!< TPI ITFTTD0: ATB Interface 1 ATVALID Position */ +#define TPI_ITFTTD0_ATB_IF1_ATVALID_Msk (0x3UL << TPI_ITFTTD0_ATB_IF1_ATVALID_Pos) /*!< TPI ITFTTD0: ATB Interface 1 ATVALID Mask */ + +#define TPI_ITFTTD0_ATB_IF1_bytecount_Pos 24U /*!< TPI ITFTTD0: ATB Interface 1 byte count Position */ +#define TPI_ITFTTD0_ATB_IF1_bytecount_Msk (0x3UL << TPI_ITFTTD0_ATB_IF1_bytecount_Pos) /*!< TPI ITFTTD0: ATB Interface 1 byte countt Mask */ + +#define TPI_ITFTTD0_ATB_IF1_data2_Pos 16U /*!< TPI ITFTTD0: ATB Interface 1 data2 Position */ +#define TPI_ITFTTD0_ATB_IF1_data2_Msk (0xFFUL << TPI_ITFTTD0_ATB_IF1_data1_Pos) /*!< TPI ITFTTD0: ATB Interface 1 data2 Mask */ + +#define TPI_ITFTTD0_ATB_IF1_data1_Pos 8U /*!< TPI ITFTTD0: ATB Interface 1 data1 Position */ +#define TPI_ITFTTD0_ATB_IF1_data1_Msk (0xFFUL << TPI_ITFTTD0_ATB_IF1_data1_Pos) /*!< TPI ITFTTD0: ATB Interface 1 data1 Mask */ + +#define TPI_ITFTTD0_ATB_IF1_data0_Pos 0U /*!< TPI ITFTTD0: ATB Interface 1 data0 Position */ +#define TPI_ITFTTD0_ATB_IF1_data0_Msk (0xFFUL /*<< TPI_ITFTTD0_ATB_IF1_data0_Pos*/) /*!< TPI ITFTTD0: ATB Interface 1 data0 Mask */ + +/* TPI Integration Test ATB Control Register 2 Register Definitions */ +#define TPI_ITATBCTR2_AFVALID2S_Pos 1U /*!< TPI ITATBCTR2: AFVALID2S Position */ +#define TPI_ITATBCTR2_AFVALID2S_Msk (0x1UL << TPI_ITATBCTR2_AFVALID2S_Pos) /*!< TPI ITATBCTR2: AFVALID2SS Mask */ + +#define TPI_ITATBCTR2_AFVALID1S_Pos 1U /*!< TPI ITATBCTR2: AFVALID1S Position */ +#define TPI_ITATBCTR2_AFVALID1S_Msk (0x1UL << TPI_ITATBCTR2_AFVALID1S_Pos) /*!< TPI ITATBCTR2: AFVALID1SS Mask */ + +#define TPI_ITATBCTR2_ATREADY2S_Pos 0U /*!< TPI ITATBCTR2: ATREADY2S Position */ +#define TPI_ITATBCTR2_ATREADY2S_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY2S_Pos*/) /*!< TPI ITATBCTR2: ATREADY2S Mask */ + +#define TPI_ITATBCTR2_ATREADY1S_Pos 0U /*!< TPI ITATBCTR2: ATREADY1S Position */ +#define TPI_ITATBCTR2_ATREADY1S_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY1S_Pos*/) /*!< TPI ITATBCTR2: ATREADY1S Mask */ + +/* TPI Integration Test FIFO Test Data 1 Register Definitions */ +#define TPI_ITFTTD1_ATB_IF2_ATVALID_Pos 29U /*!< TPI ITFTTD1: ATB Interface 2 ATVALID Position */ +#define TPI_ITFTTD1_ATB_IF2_ATVALID_Msk (0x3UL << TPI_ITFTTD1_ATB_IF2_ATVALID_Pos) /*!< TPI ITFTTD1: ATB Interface 2 ATVALID Mask */ + +#define TPI_ITFTTD1_ATB_IF2_bytecount_Pos 27U /*!< TPI ITFTTD1: ATB Interface 2 byte count Position */ +#define TPI_ITFTTD1_ATB_IF2_bytecount_Msk (0x3UL << TPI_ITFTTD1_ATB_IF2_bytecount_Pos) /*!< TPI ITFTTD1: ATB Interface 2 byte count Mask */ + +#define TPI_ITFTTD1_ATB_IF1_ATVALID_Pos 26U /*!< TPI ITFTTD1: ATB Interface 1 ATVALID Position */ +#define TPI_ITFTTD1_ATB_IF1_ATVALID_Msk (0x3UL << TPI_ITFTTD1_ATB_IF1_ATVALID_Pos) /*!< TPI ITFTTD1: ATB Interface 1 ATVALID Mask */ + +#define TPI_ITFTTD1_ATB_IF1_bytecount_Pos 24U /*!< TPI ITFTTD1: ATB Interface 1 byte count Position */ +#define TPI_ITFTTD1_ATB_IF1_bytecount_Msk (0x3UL << TPI_ITFTTD1_ATB_IF1_bytecount_Pos) /*!< TPI ITFTTD1: ATB Interface 1 byte countt Mask */ + +#define TPI_ITFTTD1_ATB_IF2_data2_Pos 16U /*!< TPI ITFTTD1: ATB Interface 2 data2 Position */ +#define TPI_ITFTTD1_ATB_IF2_data2_Msk (0xFFUL << TPI_ITFTTD1_ATB_IF2_data1_Pos) /*!< TPI ITFTTD1: ATB Interface 2 data2 Mask */ + +#define TPI_ITFTTD1_ATB_IF2_data1_Pos 8U /*!< TPI ITFTTD1: ATB Interface 2 data1 Position */ +#define TPI_ITFTTD1_ATB_IF2_data1_Msk (0xFFUL << TPI_ITFTTD1_ATB_IF2_data1_Pos) /*!< TPI ITFTTD1: ATB Interface 2 data1 Mask */ + +#define TPI_ITFTTD1_ATB_IF2_data0_Pos 0U /*!< TPI ITFTTD1: ATB Interface 2 data0 Position */ +#define TPI_ITFTTD1_ATB_IF2_data0_Msk (0xFFUL /*<< TPI_ITFTTD1_ATB_IF2_data0_Pos*/) /*!< TPI ITFTTD1: ATB Interface 2 data0 Mask */ + +/* TPI Integration Test ATB Control Register 0 Definitions */ +#define TPI_ITATBCTR0_AFVALID2S_Pos 1U /*!< TPI ITATBCTR0: AFVALID2S Position */ +#define TPI_ITATBCTR0_AFVALID2S_Msk (0x1UL << TPI_ITATBCTR0_AFVALID2S_Pos) /*!< TPI ITATBCTR0: AFVALID2SS Mask */ + +#define TPI_ITATBCTR0_AFVALID1S_Pos 1U /*!< TPI ITATBCTR0: AFVALID1S Position */ +#define TPI_ITATBCTR0_AFVALID1S_Msk (0x1UL << TPI_ITATBCTR0_AFVALID1S_Pos) /*!< TPI ITATBCTR0: AFVALID1SS Mask */ + +#define TPI_ITATBCTR0_ATREADY2S_Pos 0U /*!< TPI ITATBCTR0: ATREADY2S Position */ +#define TPI_ITATBCTR0_ATREADY2S_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY2S_Pos*/) /*!< TPI ITATBCTR0: ATREADY2S Mask */ + +#define TPI_ITATBCTR0_ATREADY1S_Pos 0U /*!< TPI ITATBCTR0: ATREADY1S Position */ +#define TPI_ITATBCTR0_ATREADY1S_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY1S_Pos*/) /*!< TPI ITATBCTR0: ATREADY1S Mask */ + +/* TPI Integration Mode Control Register Definitions */ +#define TPI_ITCTRL_Mode_Pos 0U /*!< TPI ITCTRL: Mode Position */ +#define TPI_ITCTRL_Mode_Msk (0x3UL /*<< TPI_ITCTRL_Mode_Pos*/) /*!< TPI ITCTRL: Mode Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_FIFOSZ_Pos 6U /*!< TPI DEVID: FIFOSZ Position */ +#define TPI_DEVID_FIFOSZ_Msk (0x7UL << TPI_DEVID_FIFOSZ_Pos) /*!< TPI DEVID: FIFOSZ Mask */ + +#define TPI_DEVID_NrTraceInput_Pos 0U /*!< TPI DEVID: NrTraceInput Position */ +#define TPI_DEVID_NrTraceInput_Msk (0x3FUL /*<< TPI_DEVID_NrTraceInput_Pos*/) /*!< TPI DEVID: NrTraceInput Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) MPU Region Limit Address Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Region Base Address Register Alias 1 */ + __IOM uint32_t RLAR_A1; /*!< Offset: 0x018 (R/W) MPU Region Limit Address Register Alias 1 */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Region Base Address Register Alias 2 */ + __IOM uint32_t RLAR_A2; /*!< Offset: 0x020 (R/W) MPU Region Limit Address Register Alias 2 */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Region Base Address Register Alias 3 */ + __IOM uint32_t RLAR_A3; /*!< Offset: 0x028 (R/W) MPU Region Limit Address Register Alias 3 */ + uint32_t RESERVED0[1]; + union { + __IOM uint32_t MAIR[2]; + struct { + __IOM uint32_t MAIR0; /*!< Offset: 0x030 (R/W) MPU Memory Attribute Indirection Register 0 */ + __IOM uint32_t MAIR1; /*!< Offset: 0x034 (R/W) MPU Memory Attribute Indirection Register 1 */ + }; + }; +} MPU_Type; + +#define MPU_TYPE_RALIASES 4U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_BASE_Pos 5U /*!< MPU RBAR: BASE Position */ +#define MPU_RBAR_BASE_Msk (0x7FFFFFFUL << MPU_RBAR_BASE_Pos) /*!< MPU RBAR: BASE Mask */ + +#define MPU_RBAR_SH_Pos 3U /*!< MPU RBAR: SH Position */ +#define MPU_RBAR_SH_Msk (0x3UL << MPU_RBAR_SH_Pos) /*!< MPU RBAR: SH Mask */ + +#define MPU_RBAR_AP_Pos 1U /*!< MPU RBAR: AP Position */ +#define MPU_RBAR_AP_Msk (0x3UL << MPU_RBAR_AP_Pos) /*!< MPU RBAR: AP Mask */ + +#define MPU_RBAR_XN_Pos 0U /*!< MPU RBAR: XN Position */ +#define MPU_RBAR_XN_Msk (01UL /*<< MPU_RBAR_XN_Pos*/) /*!< MPU RBAR: XN Mask */ + +/* MPU Region Limit Address Register Definitions */ +#define MPU_RLAR_LIMIT_Pos 5U /*!< MPU RLAR: LIMIT Position */ +#define MPU_RLAR_LIMIT_Msk (0x7FFFFFFUL << MPU_RLAR_LIMIT_Pos) /*!< MPU RLAR: LIMIT Mask */ + +#define MPU_RLAR_AttrIndx_Pos 1U /*!< MPU RLAR: AttrIndx Position */ +#define MPU_RLAR_AttrIndx_Msk (0x7UL << MPU_RLAR_AttrIndx_Pos) /*!< MPU RLAR: AttrIndx Mask */ + +#define MPU_RLAR_EN_Pos 0U /*!< MPU RLAR: Region enable bit Position */ +#define MPU_RLAR_EN_Msk (1UL /*<< MPU_RLAR_EN_Pos*/) /*!< MPU RLAR: Region enable bit Disable Mask */ + +/* MPU Memory Attribute Indirection Register 0 Definitions */ +#define MPU_MAIR0_Attr3_Pos 24U /*!< MPU MAIR0: Attr3 Position */ +#define MPU_MAIR0_Attr3_Msk (0xFFUL << MPU_MAIR0_Attr3_Pos) /*!< MPU MAIR0: Attr3 Mask */ + +#define MPU_MAIR0_Attr2_Pos 16U /*!< MPU MAIR0: Attr2 Position */ +#define MPU_MAIR0_Attr2_Msk (0xFFUL << MPU_MAIR0_Attr2_Pos) /*!< MPU MAIR0: Attr2 Mask */ + +#define MPU_MAIR0_Attr1_Pos 8U /*!< MPU MAIR0: Attr1 Position */ +#define MPU_MAIR0_Attr1_Msk (0xFFUL << MPU_MAIR0_Attr1_Pos) /*!< MPU MAIR0: Attr1 Mask */ + +#define MPU_MAIR0_Attr0_Pos 0U /*!< MPU MAIR0: Attr0 Position */ +#define MPU_MAIR0_Attr0_Msk (0xFFUL /*<< MPU_MAIR0_Attr0_Pos*/) /*!< MPU MAIR0: Attr0 Mask */ + +/* MPU Memory Attribute Indirection Register 1 Definitions */ +#define MPU_MAIR1_Attr7_Pos 24U /*!< MPU MAIR1: Attr7 Position */ +#define MPU_MAIR1_Attr7_Msk (0xFFUL << MPU_MAIR1_Attr7_Pos) /*!< MPU MAIR1: Attr7 Mask */ + +#define MPU_MAIR1_Attr6_Pos 16U /*!< MPU MAIR1: Attr6 Position */ +#define MPU_MAIR1_Attr6_Msk (0xFFUL << MPU_MAIR1_Attr6_Pos) /*!< MPU MAIR1: Attr6 Mask */ + +#define MPU_MAIR1_Attr5_Pos 8U /*!< MPU MAIR1: Attr5 Position */ +#define MPU_MAIR1_Attr5_Msk (0xFFUL << MPU_MAIR1_Attr5_Pos) /*!< MPU MAIR1: Attr5 Mask */ + +#define MPU_MAIR1_Attr4_Pos 0U /*!< MPU MAIR1: Attr4 Position */ +#define MPU_MAIR1_Attr4_Msk (0xFFUL /*<< MPU_MAIR1_Attr4_Pos*/) /*!< MPU MAIR1: Attr4 Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SAU Security Attribution Unit (SAU) + \brief Type definitions for the Security Attribution Unit (SAU) + @{ + */ + +/** + \brief Structure type to access the Security Attribution Unit (SAU). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SAU Control Register */ + __IM uint32_t TYPE; /*!< Offset: 0x004 (R/ ) SAU Type Register */ +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) SAU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) SAU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) SAU Region Limit Address Register */ +#else + uint32_t RESERVED0[3]; +#endif + __IOM uint32_t SFSR; /*!< Offset: 0x014 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x018 (R/W) Secure Fault Address Register */ +} SAU_Type; + +/* SAU Control Register Definitions */ +#define SAU_CTRL_ALLNS_Pos 1U /*!< SAU CTRL: ALLNS Position */ +#define SAU_CTRL_ALLNS_Msk (1UL << SAU_CTRL_ALLNS_Pos) /*!< SAU CTRL: ALLNS Mask */ + +#define SAU_CTRL_ENABLE_Pos 0U /*!< SAU CTRL: ENABLE Position */ +#define SAU_CTRL_ENABLE_Msk (1UL /*<< SAU_CTRL_ENABLE_Pos*/) /*!< SAU CTRL: ENABLE Mask */ + +/* SAU Type Register Definitions */ +#define SAU_TYPE_SREGION_Pos 0U /*!< SAU TYPE: SREGION Position */ +#define SAU_TYPE_SREGION_Msk (0xFFUL /*<< SAU_TYPE_SREGION_Pos*/) /*!< SAU TYPE: SREGION Mask */ + +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) +/* SAU Region Number Register Definitions */ +#define SAU_RNR_REGION_Pos 0U /*!< SAU RNR: REGION Position */ +#define SAU_RNR_REGION_Msk (0xFFUL /*<< SAU_RNR_REGION_Pos*/) /*!< SAU RNR: REGION Mask */ + +/* SAU Region Base Address Register Definitions */ +#define SAU_RBAR_BADDR_Pos 5U /*!< SAU RBAR: BADDR Position */ +#define SAU_RBAR_BADDR_Msk (0x7FFFFFFUL << SAU_RBAR_BADDR_Pos) /*!< SAU RBAR: BADDR Mask */ + +/* SAU Region Limit Address Register Definitions */ +#define SAU_RLAR_LADDR_Pos 5U /*!< SAU RLAR: LADDR Position */ +#define SAU_RLAR_LADDR_Msk (0x7FFFFFFUL << SAU_RLAR_LADDR_Pos) /*!< SAU RLAR: LADDR Mask */ + +#define SAU_RLAR_NSC_Pos 1U /*!< SAU RLAR: NSC Position */ +#define SAU_RLAR_NSC_Msk (1UL << SAU_RLAR_NSC_Pos) /*!< SAU RLAR: NSC Mask */ + +#define SAU_RLAR_ENABLE_Pos 0U /*!< SAU RLAR: ENABLE Position */ +#define SAU_RLAR_ENABLE_Msk (1UL /*<< SAU_RLAR_ENABLE_Pos*/) /*!< SAU RLAR: ENABLE Mask */ + +#endif /* defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) */ + +/* Secure Fault Status Register Definitions */ +#define SAU_SFSR_LSERR_Pos 7U /*!< SAU SFSR: LSERR Position */ +#define SAU_SFSR_LSERR_Msk (1UL << SAU_SFSR_LSERR_Pos) /*!< SAU SFSR: LSERR Mask */ + +#define SAU_SFSR_SFARVALID_Pos 6U /*!< SAU SFSR: SFARVALID Position */ +#define SAU_SFSR_SFARVALID_Msk (1UL << SAU_SFSR_SFARVALID_Pos) /*!< SAU SFSR: SFARVALID Mask */ + +#define SAU_SFSR_LSPERR_Pos 5U /*!< SAU SFSR: LSPERR Position */ +#define SAU_SFSR_LSPERR_Msk (1UL << SAU_SFSR_LSPERR_Pos) /*!< SAU SFSR: LSPERR Mask */ + +#define SAU_SFSR_INVTRAN_Pos 4U /*!< SAU SFSR: INVTRAN Position */ +#define SAU_SFSR_INVTRAN_Msk (1UL << SAU_SFSR_INVTRAN_Pos) /*!< SAU SFSR: INVTRAN Mask */ + +#define SAU_SFSR_AUVIOL_Pos 3U /*!< SAU SFSR: AUVIOL Position */ +#define SAU_SFSR_AUVIOL_Msk (1UL << SAU_SFSR_AUVIOL_Pos) /*!< SAU SFSR: AUVIOL Mask */ + +#define SAU_SFSR_INVER_Pos 2U /*!< SAU SFSR: INVER Position */ +#define SAU_SFSR_INVER_Msk (1UL << SAU_SFSR_INVER_Pos) /*!< SAU SFSR: INVER Mask */ + +#define SAU_SFSR_INVIS_Pos 1U /*!< SAU SFSR: INVIS Position */ +#define SAU_SFSR_INVIS_Msk (1UL << SAU_SFSR_INVIS_Pos) /*!< SAU SFSR: INVIS Mask */ + +#define SAU_SFSR_INVEP_Pos 0U /*!< SAU SFSR: INVEP Position */ +#define SAU_SFSR_INVEP_Msk (1UL /*<< SAU_SFSR_INVEP_Pos*/) /*!< SAU SFSR: INVEP Mask */ + +/*@} end of group CMSIS_SAU */ +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_FPU Floating Point Unit (FPU) + \brief Type definitions for the Floating Point Unit (FPU) + @{ + */ + +/** + \brief Structure type to access the Floating Point Unit (FPU). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IOM uint32_t FPCCR; /*!< Offset: 0x004 (R/W) Floating-Point Context Control Register */ + __IOM uint32_t FPCAR; /*!< Offset: 0x008 (R/W) Floating-Point Context Address Register */ + __IOM uint32_t FPDSCR; /*!< Offset: 0x00C (R/W) Floating-Point Default Status Control Register */ + __IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x018 (R/ ) Media and VFP Feature Register 2 */ +} FPU_Type; + +/* Floating-Point Context Control Register Definitions */ +#define FPU_FPCCR_ASPEN_Pos 31U /*!< FPCCR: ASPEN bit Position */ +#define FPU_FPCCR_ASPEN_Msk (1UL << FPU_FPCCR_ASPEN_Pos) /*!< FPCCR: ASPEN bit Mask */ + +#define FPU_FPCCR_LSPEN_Pos 30U /*!< FPCCR: LSPEN Position */ +#define FPU_FPCCR_LSPEN_Msk (1UL << FPU_FPCCR_LSPEN_Pos) /*!< FPCCR: LSPEN bit Mask */ + +#define FPU_FPCCR_LSPENS_Pos 29U /*!< FPCCR: LSPENS Position */ +#define FPU_FPCCR_LSPENS_Msk (1UL << FPU_FPCCR_LSPENS_Pos) /*!< FPCCR: LSPENS bit Mask */ + +#define FPU_FPCCR_CLRONRET_Pos 28U /*!< FPCCR: CLRONRET Position */ +#define FPU_FPCCR_CLRONRET_Msk (1UL << FPU_FPCCR_CLRONRET_Pos) /*!< FPCCR: CLRONRET bit Mask */ + +#define FPU_FPCCR_CLRONRETS_Pos 27U /*!< FPCCR: CLRONRETS Position */ +#define FPU_FPCCR_CLRONRETS_Msk (1UL << FPU_FPCCR_CLRONRETS_Pos) /*!< FPCCR: CLRONRETS bit Mask */ + +#define FPU_FPCCR_TS_Pos 26U /*!< FPCCR: TS Position */ +#define FPU_FPCCR_TS_Msk (1UL << FPU_FPCCR_TS_Pos) /*!< FPCCR: TS bit Mask */ + +#define FPU_FPCCR_UFRDY_Pos 10U /*!< FPCCR: UFRDY Position */ +#define FPU_FPCCR_UFRDY_Msk (1UL << FPU_FPCCR_UFRDY_Pos) /*!< FPCCR: UFRDY bit Mask */ + +#define FPU_FPCCR_SPLIMVIOL_Pos 9U /*!< FPCCR: SPLIMVIOL Position */ +#define FPU_FPCCR_SPLIMVIOL_Msk (1UL << FPU_FPCCR_SPLIMVIOL_Pos) /*!< FPCCR: SPLIMVIOL bit Mask */ + +#define FPU_FPCCR_MONRDY_Pos 8U /*!< FPCCR: MONRDY Position */ +#define FPU_FPCCR_MONRDY_Msk (1UL << FPU_FPCCR_MONRDY_Pos) /*!< FPCCR: MONRDY bit Mask */ + +#define FPU_FPCCR_SFRDY_Pos 7U /*!< FPCCR: SFRDY Position */ +#define FPU_FPCCR_SFRDY_Msk (1UL << FPU_FPCCR_SFRDY_Pos) /*!< FPCCR: SFRDY bit Mask */ + +#define FPU_FPCCR_BFRDY_Pos 6U /*!< FPCCR: BFRDY Position */ +#define FPU_FPCCR_BFRDY_Msk (1UL << FPU_FPCCR_BFRDY_Pos) /*!< FPCCR: BFRDY bit Mask */ + +#define FPU_FPCCR_MMRDY_Pos 5U /*!< FPCCR: MMRDY Position */ +#define FPU_FPCCR_MMRDY_Msk (1UL << FPU_FPCCR_MMRDY_Pos) /*!< FPCCR: MMRDY bit Mask */ + +#define FPU_FPCCR_HFRDY_Pos 4U /*!< FPCCR: HFRDY Position */ +#define FPU_FPCCR_HFRDY_Msk (1UL << FPU_FPCCR_HFRDY_Pos) /*!< FPCCR: HFRDY bit Mask */ + +#define FPU_FPCCR_THREAD_Pos 3U /*!< FPCCR: processor mode bit Position */ +#define FPU_FPCCR_THREAD_Msk (1UL << FPU_FPCCR_THREAD_Pos) /*!< FPCCR: processor mode active bit Mask */ + +#define FPU_FPCCR_S_Pos 2U /*!< FPCCR: Security status of the FP context bit Position */ +#define FPU_FPCCR_S_Msk (1UL << FPU_FPCCR_S_Pos) /*!< FPCCR: Security status of the FP context bit Mask */ + +#define FPU_FPCCR_USER_Pos 1U /*!< FPCCR: privilege level bit Position */ +#define FPU_FPCCR_USER_Msk (1UL << FPU_FPCCR_USER_Pos) /*!< FPCCR: privilege level bit Mask */ + +#define FPU_FPCCR_LSPACT_Pos 0U /*!< FPCCR: Lazy state preservation active bit Position */ +#define FPU_FPCCR_LSPACT_Msk (1UL /*<< FPU_FPCCR_LSPACT_Pos*/) /*!< FPCCR: Lazy state preservation active bit Mask */ + +/* Floating-Point Context Address Register Definitions */ +#define FPU_FPCAR_ADDRESS_Pos 3U /*!< FPCAR: ADDRESS bit Position */ +#define FPU_FPCAR_ADDRESS_Msk (0x1FFFFFFFUL << FPU_FPCAR_ADDRESS_Pos) /*!< FPCAR: ADDRESS bit Mask */ + +/* Floating-Point Default Status Control Register Definitions */ +#define FPU_FPDSCR_AHP_Pos 26U /*!< FPDSCR: AHP bit Position */ +#define FPU_FPDSCR_AHP_Msk (1UL << FPU_FPDSCR_AHP_Pos) /*!< FPDSCR: AHP bit Mask */ + +#define FPU_FPDSCR_DN_Pos 25U /*!< FPDSCR: DN bit Position */ +#define FPU_FPDSCR_DN_Msk (1UL << FPU_FPDSCR_DN_Pos) /*!< FPDSCR: DN bit Mask */ + +#define FPU_FPDSCR_FZ_Pos 24U /*!< FPDSCR: FZ bit Position */ +#define FPU_FPDSCR_FZ_Msk (1UL << FPU_FPDSCR_FZ_Pos) /*!< FPDSCR: FZ bit Mask */ + +#define FPU_FPDSCR_RMode_Pos 22U /*!< FPDSCR: RMode bit Position */ +#define FPU_FPDSCR_RMode_Msk (3UL << FPU_FPDSCR_RMode_Pos) /*!< FPDSCR: RMode bit Mask */ + +/* Media and VFP Feature Register 0 Definitions */ +#define FPU_MVFR0_FP_rounding_modes_Pos 28U /*!< MVFR0: FP rounding modes bits Position */ +#define FPU_MVFR0_FP_rounding_modes_Msk (0xFUL << FPU_MVFR0_FP_rounding_modes_Pos) /*!< MVFR0: FP rounding modes bits Mask */ + +#define FPU_MVFR0_Short_vectors_Pos 24U /*!< MVFR0: Short vectors bits Position */ +#define FPU_MVFR0_Short_vectors_Msk (0xFUL << FPU_MVFR0_Short_vectors_Pos) /*!< MVFR0: Short vectors bits Mask */ + +#define FPU_MVFR0_Square_root_Pos 20U /*!< MVFR0: Square root bits Position */ +#define FPU_MVFR0_Square_root_Msk (0xFUL << FPU_MVFR0_Square_root_Pos) /*!< MVFR0: Square root bits Mask */ + +#define FPU_MVFR0_Divide_Pos 16U /*!< MVFR0: Divide bits Position */ +#define FPU_MVFR0_Divide_Msk (0xFUL << FPU_MVFR0_Divide_Pos) /*!< MVFR0: Divide bits Mask */ + +#define FPU_MVFR0_FP_excep_trapping_Pos 12U /*!< MVFR0: FP exception trapping bits Position */ +#define FPU_MVFR0_FP_excep_trapping_Msk (0xFUL << FPU_MVFR0_FP_excep_trapping_Pos) /*!< MVFR0: FP exception trapping bits Mask */ + +#define FPU_MVFR0_Double_precision_Pos 8U /*!< MVFR0: Double-precision bits Position */ +#define FPU_MVFR0_Double_precision_Msk (0xFUL << FPU_MVFR0_Double_precision_Pos) /*!< MVFR0: Double-precision bits Mask */ + +#define FPU_MVFR0_Single_precision_Pos 4U /*!< MVFR0: Single-precision bits Position */ +#define FPU_MVFR0_Single_precision_Msk (0xFUL << FPU_MVFR0_Single_precision_Pos) /*!< MVFR0: Single-precision bits Mask */ + +#define FPU_MVFR0_A_SIMD_registers_Pos 0U /*!< MVFR0: A_SIMD registers bits Position */ +#define FPU_MVFR0_A_SIMD_registers_Msk (0xFUL /*<< FPU_MVFR0_A_SIMD_registers_Pos*/) /*!< MVFR0: A_SIMD registers bits Mask */ + +/* Media and VFP Feature Register 1 Definitions */ +#define FPU_MVFR1_FP_fused_MAC_Pos 28U /*!< MVFR1: FP fused MAC bits Position */ +#define FPU_MVFR1_FP_fused_MAC_Msk (0xFUL << FPU_MVFR1_FP_fused_MAC_Pos) /*!< MVFR1: FP fused MAC bits Mask */ + +#define FPU_MVFR1_FP_HPFP_Pos 24U /*!< MVFR1: FP HPFP bits Position */ +#define FPU_MVFR1_FP_HPFP_Msk (0xFUL << FPU_MVFR1_FP_HPFP_Pos) /*!< MVFR1: FP HPFP bits Mask */ + +#define FPU_MVFR1_D_NaN_mode_Pos 4U /*!< MVFR1: D_NaN mode bits Position */ +#define FPU_MVFR1_D_NaN_mode_Msk (0xFUL << FPU_MVFR1_D_NaN_mode_Pos) /*!< MVFR1: D_NaN mode bits Mask */ + +#define FPU_MVFR1_FtZ_mode_Pos 0U /*!< MVFR1: FtZ mode bits Position */ +#define FPU_MVFR1_FtZ_mode_Msk (0xFUL /*<< FPU_MVFR1_FtZ_mode_Pos*/) /*!< MVFR1: FtZ mode bits Mask */ + +/* Media and VFP Feature Register 2 Definitions */ +#define FPU_MVFR2_FPMisc_Pos 4U /*!< MVFR2: FPMisc bits Position */ +#define FPU_MVFR2_FPMisc_Msk (0xFUL << FPU_MVFR2_FPMisc_Pos) /*!< MVFR2: FPMisc bits Mask */ + +/*@} end of group CMSIS_FPU */ + +/* CoreDebug is deprecated. replaced by DCB (Debug Control Block) */ +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Type definitions for the Core Debug Registers + @{ + */ + +/** + \brief \deprecated Structure type to access the Core Debug Register (CoreDebug). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + uint32_t RESERVED0[1U]; + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< \deprecated CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< \deprecated CoreDebug DHCSR: DBGKEY Mask */ + +#define CoreDebug_DHCSR_S_RESTART_ST_Pos 26U /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Position */ +#define CoreDebug_DHCSR_S_RESTART_ST_Msk (1UL << CoreDebug_DHCSR_S_RESTART_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Mask */ + +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Mask */ + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Mask */ + +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Mask */ + +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< \deprecated CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< \deprecated CoreDebug DHCSR: S_SLEEP Mask */ + +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< \deprecated CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: S_HALT Mask */ + +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< \deprecated CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< \deprecated CoreDebug DHCSR: S_REGRDY Mask */ + +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos 5U /*!< \deprecated CoreDebug DHCSR: C_SNAPSTALL Position */ +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk (1UL << CoreDebug_DHCSR_C_SNAPSTALL_Pos) /*!< \deprecated CoreDebug DHCSR: C_SNAPSTALL Mask */ + +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Mask */ + +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< \deprecated CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< \deprecated CoreDebug DHCSR: C_STEP Mask */ + +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< \deprecated CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: C_HALT Mask */ + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Mask */ + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< \deprecated CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< \deprecated CoreDebug DCRSR: REGWnR Mask */ + +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< \deprecated CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< \deprecated CoreDebug DCRSR: REGSEL Mask */ + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_TRCENA_Pos 24U /*!< \deprecated CoreDebug DEMCR: TRCENA Position */ +#define CoreDebug_DEMCR_TRCENA_Msk (1UL << CoreDebug_DEMCR_TRCENA_Pos) /*!< \deprecated CoreDebug DEMCR: TRCENA Mask */ + +#define CoreDebug_DEMCR_MON_REQ_Pos 19U /*!< \deprecated CoreDebug DEMCR: MON_REQ Position */ +#define CoreDebug_DEMCR_MON_REQ_Msk (1UL << CoreDebug_DEMCR_MON_REQ_Pos) /*!< \deprecated CoreDebug DEMCR: MON_REQ Mask */ + +#define CoreDebug_DEMCR_MON_STEP_Pos 18U /*!< \deprecated CoreDebug DEMCR: MON_STEP Position */ +#define CoreDebug_DEMCR_MON_STEP_Msk (1UL << CoreDebug_DEMCR_MON_STEP_Pos) /*!< \deprecated CoreDebug DEMCR: MON_STEP Mask */ + +#define CoreDebug_DEMCR_MON_PEND_Pos 17U /*!< \deprecated CoreDebug DEMCR: MON_PEND Position */ +#define CoreDebug_DEMCR_MON_PEND_Msk (1UL << CoreDebug_DEMCR_MON_PEND_Pos) /*!< \deprecated CoreDebug DEMCR: MON_PEND Mask */ + +#define CoreDebug_DEMCR_MON_EN_Pos 16U /*!< \deprecated CoreDebug DEMCR: MON_EN Position */ +#define CoreDebug_DEMCR_MON_EN_Msk (1UL << CoreDebug_DEMCR_MON_EN_Pos) /*!< \deprecated CoreDebug DEMCR: MON_EN Mask */ + +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Mask */ + +#define CoreDebug_DEMCR_VC_INTERR_Pos 9U /*!< \deprecated CoreDebug DEMCR: VC_INTERR Position */ +#define CoreDebug_DEMCR_VC_INTERR_Msk (1UL << CoreDebug_DEMCR_VC_INTERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_INTERR Mask */ + +#define CoreDebug_DEMCR_VC_BUSERR_Pos 8U /*!< \deprecated CoreDebug DEMCR: VC_BUSERR Position */ +#define CoreDebug_DEMCR_VC_BUSERR_Msk (1UL << CoreDebug_DEMCR_VC_BUSERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_BUSERR Mask */ + +#define CoreDebug_DEMCR_VC_STATERR_Pos 7U /*!< \deprecated CoreDebug DEMCR: VC_STATERR Position */ +#define CoreDebug_DEMCR_VC_STATERR_Msk (1UL << CoreDebug_DEMCR_VC_STATERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_STATERR Mask */ + +#define CoreDebug_DEMCR_VC_CHKERR_Pos 6U /*!< \deprecated CoreDebug DEMCR: VC_CHKERR Position */ +#define CoreDebug_DEMCR_VC_CHKERR_Msk (1UL << CoreDebug_DEMCR_VC_CHKERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_CHKERR Mask */ + +#define CoreDebug_DEMCR_VC_NOCPERR_Pos 5U /*!< \deprecated CoreDebug DEMCR: VC_NOCPERR Position */ +#define CoreDebug_DEMCR_VC_NOCPERR_Msk (1UL << CoreDebug_DEMCR_VC_NOCPERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_NOCPERR Mask */ + +#define CoreDebug_DEMCR_VC_MMERR_Pos 4U /*!< \deprecated CoreDebug DEMCR: VC_MMERR Position */ +#define CoreDebug_DEMCR_VC_MMERR_Msk (1UL << CoreDebug_DEMCR_VC_MMERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_MMERR Mask */ + +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Mask */ + +/* Debug Authentication Control Register Definitions */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Position */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Mask */ + +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Msk (1UL << CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Mask */ + +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Position */ +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Mask */ + +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Msk (1UL /*<< CoreDebug_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Mask */ + +/* Debug Security Control and Status Register Definitions */ +#define CoreDebug_DSCSR_CDS_Pos 16U /*!< \deprecated CoreDebug DSCSR: CDS Position */ +#define CoreDebug_DSCSR_CDS_Msk (1UL << CoreDebug_DSCSR_CDS_Pos) /*!< \deprecated CoreDebug DSCSR: CDS Mask */ + +#define CoreDebug_DSCSR_SBRSEL_Pos 1U /*!< \deprecated CoreDebug DSCSR: SBRSEL Position */ +#define CoreDebug_DSCSR_SBRSEL_Msk (1UL << CoreDebug_DSCSR_SBRSEL_Pos) /*!< \deprecated CoreDebug DSCSR: SBRSEL Mask */ + +#define CoreDebug_DSCSR_SBRSELEN_Pos 0U /*!< \deprecated CoreDebug DSCSR: SBRSELEN Position */ +#define CoreDebug_DSCSR_SBRSELEN_Msk (1UL /*<< CoreDebug_DSCSR_SBRSELEN_Pos*/) /*!< \deprecated CoreDebug DSCSR: SBRSELEN Mask */ + +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DCB Debug Control Block + \brief Type definitions for the Debug Control Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Control Block Registers (DCB). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + uint32_t RESERVED0[1U]; + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} DCB_Type; + +/* DHCSR, Debug Halting Control and Status Register Definitions */ +#define DCB_DHCSR_DBGKEY_Pos 16U /*!< DCB DHCSR: Debug key Position */ +#define DCB_DHCSR_DBGKEY_Msk (0xFFFFUL << DCB_DHCSR_DBGKEY_Pos) /*!< DCB DHCSR: Debug key Mask */ + +#define DCB_DHCSR_S_RESTART_ST_Pos 26U /*!< DCB DHCSR: Restart sticky status Position */ +#define DCB_DHCSR_S_RESTART_ST_Msk (0x1UL << DCB_DHCSR_S_RESTART_ST_Pos) /*!< DCB DHCSR: Restart sticky status Mask */ + +#define DCB_DHCSR_S_RESET_ST_Pos 25U /*!< DCB DHCSR: Reset sticky status Position */ +#define DCB_DHCSR_S_RESET_ST_Msk (0x1UL << DCB_DHCSR_S_RESET_ST_Pos) /*!< DCB DHCSR: Reset sticky status Mask */ + +#define DCB_DHCSR_S_RETIRE_ST_Pos 24U /*!< DCB DHCSR: Retire sticky status Position */ +#define DCB_DHCSR_S_RETIRE_ST_Msk (0x1UL << DCB_DHCSR_S_RETIRE_ST_Pos) /*!< DCB DHCSR: Retire sticky status Mask */ + +#define DCB_DHCSR_S_SDE_Pos 20U /*!< DCB DHCSR: Secure debug enabled Position */ +#define DCB_DHCSR_S_SDE_Msk (0x1UL << DCB_DHCSR_S_SDE_Pos) /*!< DCB DHCSR: Secure debug enabled Mask */ + +#define DCB_DHCSR_S_LOCKUP_Pos 19U /*!< DCB DHCSR: Lockup status Position */ +#define DCB_DHCSR_S_LOCKUP_Msk (0x1UL << DCB_DHCSR_S_LOCKUP_Pos) /*!< DCB DHCSR: Lockup status Mask */ + +#define DCB_DHCSR_S_SLEEP_Pos 18U /*!< DCB DHCSR: Sleeping status Position */ +#define DCB_DHCSR_S_SLEEP_Msk (0x1UL << DCB_DHCSR_S_SLEEP_Pos) /*!< DCB DHCSR: Sleeping status Mask */ + +#define DCB_DHCSR_S_HALT_Pos 17U /*!< DCB DHCSR: Halted status Position */ +#define DCB_DHCSR_S_HALT_Msk (0x1UL << DCB_DHCSR_S_HALT_Pos) /*!< DCB DHCSR: Halted status Mask */ + +#define DCB_DHCSR_S_REGRDY_Pos 16U /*!< DCB DHCSR: Register ready status Position */ +#define DCB_DHCSR_S_REGRDY_Msk (0x1UL << DCB_DHCSR_S_REGRDY_Pos) /*!< DCB DHCSR: Register ready status Mask */ + +#define DCB_DHCSR_C_SNAPSTALL_Pos 5U /*!< DCB DHCSR: Snap stall control Position */ +#define DCB_DHCSR_C_SNAPSTALL_Msk (0x1UL << DCB_DHCSR_C_SNAPSTALL_Pos) /*!< DCB DHCSR: Snap stall control Mask */ + +#define DCB_DHCSR_C_MASKINTS_Pos 3U /*!< DCB DHCSR: Mask interrupts control Position */ +#define DCB_DHCSR_C_MASKINTS_Msk (0x1UL << DCB_DHCSR_C_MASKINTS_Pos) /*!< DCB DHCSR: Mask interrupts control Mask */ + +#define DCB_DHCSR_C_STEP_Pos 2U /*!< DCB DHCSR: Step control Position */ +#define DCB_DHCSR_C_STEP_Msk (0x1UL << DCB_DHCSR_C_STEP_Pos) /*!< DCB DHCSR: Step control Mask */ + +#define DCB_DHCSR_C_HALT_Pos 1U /*!< DCB DHCSR: Halt control Position */ +#define DCB_DHCSR_C_HALT_Msk (0x1UL << DCB_DHCSR_C_HALT_Pos) /*!< DCB DHCSR: Halt control Mask */ + +#define DCB_DHCSR_C_DEBUGEN_Pos 0U /*!< DCB DHCSR: Debug enable control Position */ +#define DCB_DHCSR_C_DEBUGEN_Msk (0x1UL /*<< DCB_DHCSR_C_DEBUGEN_Pos*/) /*!< DCB DHCSR: Debug enable control Mask */ + +/* DCRSR, Debug Core Register Select Register Definitions */ +#define DCB_DCRSR_REGWnR_Pos 16U /*!< DCB DCRSR: Register write/not-read Position */ +#define DCB_DCRSR_REGWnR_Msk (0x1UL << DCB_DCRSR_REGWnR_Pos) /*!< DCB DCRSR: Register write/not-read Mask */ + +#define DCB_DCRSR_REGSEL_Pos 0U /*!< DCB DCRSR: Register selector Position */ +#define DCB_DCRSR_REGSEL_Msk (0x7FUL /*<< DCB_DCRSR_REGSEL_Pos*/) /*!< DCB DCRSR: Register selector Mask */ + +/* DCRDR, Debug Core Register Data Register Definitions */ +#define DCB_DCRDR_DBGTMP_Pos 0U /*!< DCB DCRDR: Data temporary buffer Position */ +#define DCB_DCRDR_DBGTMP_Msk (0xFFFFFFFFUL /*<< DCB_DCRDR_DBGTMP_Pos*/) /*!< DCB DCRDR: Data temporary buffer Mask */ + +/* DEMCR, Debug Exception and Monitor Control Register Definitions */ +#define DCB_DEMCR_TRCENA_Pos 24U /*!< DCB DEMCR: Trace enable Position */ +#define DCB_DEMCR_TRCENA_Msk (0x1UL << DCB_DEMCR_TRCENA_Pos) /*!< DCB DEMCR: Trace enable Mask */ + +#define DCB_DEMCR_MONPRKEY_Pos 23U /*!< DCB DEMCR: Monitor pend req key Position */ +#define DCB_DEMCR_MONPRKEY_Msk (0x1UL << DCB_DEMCR_MONPRKEY_Pos) /*!< DCB DEMCR: Monitor pend req key Mask */ + +#define DCB_DEMCR_UMON_EN_Pos 21U /*!< DCB DEMCR: Unprivileged monitor enable Position */ +#define DCB_DEMCR_UMON_EN_Msk (0x1UL << DCB_DEMCR_UMON_EN_Pos) /*!< DCB DEMCR: Unprivileged monitor enable Mask */ + +#define DCB_DEMCR_SDME_Pos 20U /*!< DCB DEMCR: Secure DebugMonitor enable Position */ +#define DCB_DEMCR_SDME_Msk (0x1UL << DCB_DEMCR_SDME_Pos) /*!< DCB DEMCR: Secure DebugMonitor enable Mask */ + +#define DCB_DEMCR_MON_REQ_Pos 19U /*!< DCB DEMCR: Monitor request Position */ +#define DCB_DEMCR_MON_REQ_Msk (0x1UL << DCB_DEMCR_MON_REQ_Pos) /*!< DCB DEMCR: Monitor request Mask */ + +#define DCB_DEMCR_MON_STEP_Pos 18U /*!< DCB DEMCR: Monitor step Position */ +#define DCB_DEMCR_MON_STEP_Msk (0x1UL << DCB_DEMCR_MON_STEP_Pos) /*!< DCB DEMCR: Monitor step Mask */ + +#define DCB_DEMCR_MON_PEND_Pos 17U /*!< DCB DEMCR: Monitor pend Position */ +#define DCB_DEMCR_MON_PEND_Msk (0x1UL << DCB_DEMCR_MON_PEND_Pos) /*!< DCB DEMCR: Monitor pend Mask */ + +#define DCB_DEMCR_MON_EN_Pos 16U /*!< DCB DEMCR: Monitor enable Position */ +#define DCB_DEMCR_MON_EN_Msk (0x1UL << DCB_DEMCR_MON_EN_Pos) /*!< DCB DEMCR: Monitor enable Mask */ + +#define DCB_DEMCR_VC_SFERR_Pos 11U /*!< DCB DEMCR: Vector Catch SecureFault Position */ +#define DCB_DEMCR_VC_SFERR_Msk (0x1UL << DCB_DEMCR_VC_SFERR_Pos) /*!< DCB DEMCR: Vector Catch SecureFault Mask */ + +#define DCB_DEMCR_VC_HARDERR_Pos 10U /*!< DCB DEMCR: Vector Catch HardFault errors Position */ +#define DCB_DEMCR_VC_HARDERR_Msk (0x1UL << DCB_DEMCR_VC_HARDERR_Pos) /*!< DCB DEMCR: Vector Catch HardFault errors Mask */ + +#define DCB_DEMCR_VC_INTERR_Pos 9U /*!< DCB DEMCR: Vector Catch interrupt errors Position */ +#define DCB_DEMCR_VC_INTERR_Msk (0x1UL << DCB_DEMCR_VC_INTERR_Pos) /*!< DCB DEMCR: Vector Catch interrupt errors Mask */ + +#define DCB_DEMCR_VC_BUSERR_Pos 8U /*!< DCB DEMCR: Vector Catch BusFault errors Position */ +#define DCB_DEMCR_VC_BUSERR_Msk (0x1UL << DCB_DEMCR_VC_BUSERR_Pos) /*!< DCB DEMCR: Vector Catch BusFault errors Mask */ + +#define DCB_DEMCR_VC_STATERR_Pos 7U /*!< DCB DEMCR: Vector Catch state errors Position */ +#define DCB_DEMCR_VC_STATERR_Msk (0x1UL << DCB_DEMCR_VC_STATERR_Pos) /*!< DCB DEMCR: Vector Catch state errors Mask */ + +#define DCB_DEMCR_VC_CHKERR_Pos 6U /*!< DCB DEMCR: Vector Catch check errors Position */ +#define DCB_DEMCR_VC_CHKERR_Msk (0x1UL << DCB_DEMCR_VC_CHKERR_Pos) /*!< DCB DEMCR: Vector Catch check errors Mask */ + +#define DCB_DEMCR_VC_NOCPERR_Pos 5U /*!< DCB DEMCR: Vector Catch NOCP errors Position */ +#define DCB_DEMCR_VC_NOCPERR_Msk (0x1UL << DCB_DEMCR_VC_NOCPERR_Pos) /*!< DCB DEMCR: Vector Catch NOCP errors Mask */ + +#define DCB_DEMCR_VC_MMERR_Pos 4U /*!< DCB DEMCR: Vector Catch MemManage errors Position */ +#define DCB_DEMCR_VC_MMERR_Msk (0x1UL << DCB_DEMCR_VC_MMERR_Pos) /*!< DCB DEMCR: Vector Catch MemManage errors Mask */ + +#define DCB_DEMCR_VC_CORERESET_Pos 0U /*!< DCB DEMCR: Vector Catch Core reset Position */ +#define DCB_DEMCR_VC_CORERESET_Msk (0x1UL /*<< DCB_DEMCR_VC_CORERESET_Pos*/) /*!< DCB DEMCR: Vector Catch Core reset Mask */ + +/* DAUTHCTRL, Debug Authentication Control Register Definitions */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPNIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPNIDENSEL_Msk (0x1UL << DCB_DAUTHCTRL_SPNIDENSEL_Pos) /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Mask */ + +#define DCB_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< DCB DAUTHCTRL: Secure invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPIDENSEL_Msk (0x1UL /*<< DCB_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< DCB DAUTHCTRL: Secure invasive debug enable select Mask */ + +/* DSCSR, Debug Security Control and Status Register Definitions */ +#define DCB_DSCSR_CDSKEY_Pos 17U /*!< DCB DSCSR: CDS write-enable key Position */ +#define DCB_DSCSR_CDSKEY_Msk (0x1UL << DCB_DSCSR_CDSKEY_Pos) /*!< DCB DSCSR: CDS write-enable key Mask */ + +#define DCB_DSCSR_CDS_Pos 16U /*!< DCB DSCSR: Current domain Secure Position */ +#define DCB_DSCSR_CDS_Msk (0x1UL << DCB_DSCSR_CDS_Pos) /*!< DCB DSCSR: Current domain Secure Mask */ + +#define DCB_DSCSR_SBRSEL_Pos 1U /*!< DCB DSCSR: Secure banked register select Position */ +#define DCB_DSCSR_SBRSEL_Msk (0x1UL << DCB_DSCSR_SBRSEL_Pos) /*!< DCB DSCSR: Secure banked register select Mask */ + +#define DCB_DSCSR_SBRSELEN_Pos 0U /*!< DCB DSCSR: Secure banked register select enable Position */ +#define DCB_DSCSR_SBRSELEN_Msk (0x1UL /*<< DCB_DSCSR_SBRSELEN_Pos*/) /*!< DCB DSCSR: Secure banked register select enable Mask */ + +/*@} end of group CMSIS_DCB */ + + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DIB Debug Identification Block + \brief Type definitions for the Debug Identification Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Identification Block Registers (DIB). + */ +typedef struct +{ + __OM uint32_t DLAR; /*!< Offset: 0x000 ( /W) SCS Software Lock Access Register */ + __IM uint32_t DLSR; /*!< Offset: 0x004 (R/ ) SCS Software Lock Status Register */ + __IM uint32_t DAUTHSTATUS; /*!< Offset: 0x008 (R/ ) Debug Authentication Status Register */ + __IM uint32_t DDEVARCH; /*!< Offset: 0x00C (R/ ) SCS Device Architecture Register */ + __IM uint32_t DDEVTYPE; /*!< Offset: 0x010 (R/ ) SCS Device Type Register */ +} DIB_Type; + +/* DLAR, SCS Software Lock Access Register Definitions */ +#define DIB_DLAR_KEY_Pos 0U /*!< DIB DLAR: KEY Position */ +#define DIB_DLAR_KEY_Msk (0xFFFFFFFFUL /*<< DIB_DLAR_KEY_Pos */) /*!< DIB DLAR: KEY Mask */ + +/* DLSR, SCS Software Lock Status Register Definitions */ +#define DIB_DLSR_nTT_Pos 2U /*!< DIB DLSR: Not thirty-two bit Position */ +#define DIB_DLSR_nTT_Msk (0x1UL << DIB_DLSR_nTT_Pos ) /*!< DIB DLSR: Not thirty-two bit Mask */ + +#define DIB_DLSR_SLK_Pos 1U /*!< DIB DLSR: Software Lock status Position */ +#define DIB_DLSR_SLK_Msk (0x1UL << DIB_DLSR_SLK_Pos ) /*!< DIB DLSR: Software Lock status Mask */ + +#define DIB_DLSR_SLI_Pos 0U /*!< DIB DLSR: Software Lock implemented Position */ +#define DIB_DLSR_SLI_Msk (0x1UL /*<< DIB_DLSR_SLI_Pos*/) /*!< DIB DLSR: Software Lock implemented Mask */ + +/* DAUTHSTATUS, Debug Authentication Status Register Definitions */ +#define DIB_DAUTHSTATUS_SNID_Pos 6U /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_SNID_Msk (0x3UL << DIB_DAUTHSTATUS_SNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_SID_Pos 4U /*!< DIB DAUTHSTATUS: Secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_SID_Msk (0x3UL << DIB_DAUTHSTATUS_SID_Pos ) /*!< DIB DAUTHSTATUS: Secure Invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSNID_Pos 2U /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSID_Pos 0U /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSID_Msk (0x3UL /*<< DIB_DAUTHSTATUS_NSID_Pos*/) /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Mask */ + +/* DDEVARCH, SCS Device Architecture Register Definitions */ +#define DIB_DDEVARCH_ARCHITECT_Pos 21U /*!< DIB DDEVARCH: Architect Position */ +#define DIB_DDEVARCH_ARCHITECT_Msk (0x7FFUL << DIB_DDEVARCH_ARCHITECT_Pos ) /*!< DIB DDEVARCH: Architect Mask */ + +#define DIB_DDEVARCH_PRESENT_Pos 20U /*!< DIB DDEVARCH: DEVARCH Present Position */ +#define DIB_DDEVARCH_PRESENT_Msk (0x1FUL << DIB_DDEVARCH_PRESENT_Pos ) /*!< DIB DDEVARCH: DEVARCH Present Mask */ + +#define DIB_DDEVARCH_REVISION_Pos 16U /*!< DIB DDEVARCH: Revision Position */ +#define DIB_DDEVARCH_REVISION_Msk (0xFUL << DIB_DDEVARCH_REVISION_Pos ) /*!< DIB DDEVARCH: Revision Mask */ + +#define DIB_DDEVARCH_ARCHVER_Pos 12U /*!< DIB DDEVARCH: Architecture Version Position */ +#define DIB_DDEVARCH_ARCHVER_Msk (0xFUL << DIB_DDEVARCH_ARCHVER_Pos ) /*!< DIB DDEVARCH: Architecture Version Mask */ + +#define DIB_DDEVARCH_ARCHPART_Pos 0U /*!< DIB DDEVARCH: Architecture Part Position */ +#define DIB_DDEVARCH_ARCHPART_Msk (0xFFFUL /*<< DIB_DDEVARCH_ARCHPART_Pos*/) /*!< DIB DDEVARCH: Architecture Part Mask */ + +/* DDEVTYPE, SCS Device Type Register Definitions */ +#define DIB_DDEVTYPE_SUB_Pos 4U /*!< DIB DDEVTYPE: Sub-type Position */ +#define DIB_DDEVTYPE_SUB_Msk (0xFUL << DIB_DDEVTYPE_SUB_Pos ) /*!< DIB DDEVTYPE: Sub-type Mask */ + +#define DIB_DDEVTYPE_MAJOR_Pos 0U /*!< DIB DDEVTYPE: Major type Position */ +#define DIB_DDEVTYPE_MAJOR_Msk (0xFUL /*<< DIB_DDEVTYPE_MAJOR_Pos*/) /*!< DIB DDEVTYPE: Major type Mask */ + + +/*@} end of group CMSIS_DIB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ + #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ + #define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ + #define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ + #define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ + #define CoreDebug_BASE (0xE000EDF0UL) /*!< \deprecated Core Debug Base Address */ + #define DCB_BASE (0xE000EDF0UL) /*!< DCB Base Address */ + #define DIB_BASE (0xE000EFB0UL) /*!< DIB Base Address */ + #define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ + #define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ + #define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + + #define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ + #define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ + #define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ + #define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + #define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ + #define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ + #define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ + #define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE ) /*!< \deprecated Core Debug configuration struct */ + #define DCB ((DCB_Type *) DCB_BASE ) /*!< DCB configuration struct */ + #define DIB ((DIB_Type *) DIB_BASE ) /*!< DIB configuration struct */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ + #endif + + #if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SAU_BASE (SCS_BASE + 0x0DD0UL) /*!< Security Attribution Unit */ + #define SAU ((SAU_Type *) SAU_BASE ) /*!< Security Attribution Unit */ + #endif + + #define FPU_BASE (SCS_BASE + 0x0F30UL) /*!< Floating Point Unit */ + #define FPU ((FPU_Type *) FPU_BASE ) /*!< Floating Point Unit */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SCS_BASE_NS (0xE002E000UL) /*!< System Control Space Base Address (non-secure address space) */ + #define CoreDebug_BASE_NS (0xE002EDF0UL) /*!< \deprecated Core Debug Base Address (non-secure address space) */ + #define DCB_BASE_NS (0xE002EDF0UL) /*!< DCB Base Address (non-secure address space) */ + #define DIB_BASE_NS (0xE002EFB0UL) /*!< DIB Base Address (non-secure address space) */ + #define SysTick_BASE_NS (SCS_BASE_NS + 0x0010UL) /*!< SysTick Base Address (non-secure address space) */ + #define NVIC_BASE_NS (SCS_BASE_NS + 0x0100UL) /*!< NVIC Base Address (non-secure address space) */ + #define SCB_BASE_NS (SCS_BASE_NS + 0x0D00UL) /*!< System Control Block Base Address (non-secure address space) */ + + #define SCnSCB_NS ((SCnSCB_Type *) SCS_BASE_NS ) /*!< System control Register not in SCB(non-secure address space) */ + #define SCB_NS ((SCB_Type *) SCB_BASE_NS ) /*!< SCB configuration struct (non-secure address space) */ + #define SysTick_NS ((SysTick_Type *) SysTick_BASE_NS ) /*!< SysTick configuration struct (non-secure address space) */ + #define NVIC_NS ((NVIC_Type *) NVIC_BASE_NS ) /*!< NVIC configuration struct (non-secure address space) */ + #define CoreDebug_NS ((CoreDebug_Type *) CoreDebug_BASE_NS) /*!< \deprecated Core Debug configuration struct (non-secure address space) */ + #define DCB_NS ((DCB_Type *) DCB_BASE_NS ) /*!< DCB configuration struct (non-secure address space) */ + #define DIB_NS ((DIB_Type *) DIB_BASE_NS ) /*!< DIB configuration struct (non-secure address space) */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE_NS (SCS_BASE_NS + 0x0D90UL) /*!< Memory Protection Unit (non-secure address space) */ + #define MPU_NS ((MPU_Type *) MPU_BASE_NS ) /*!< Memory Protection Unit (non-secure address space) */ + #endif + + #define FPU_BASE_NS (SCS_BASE_NS + 0x0F30UL) /*!< Floating Point Unit (non-secure address space) */ + #define FPU_NS ((FPU_Type *) FPU_BASE_NS ) /*!< Floating Point Unit (non-secure address space) */ + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ +/*@} */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_register_aliases Backwards Compatibility Aliases + \brief Register alias definitions for backwards compatibility. + @{ + */ +#define ID_ADR (ID_AFR) /*!< SCB Auxiliary Feature Register */ +/*@} */ + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* Special LR values for Secure/Non-Secure call handling and exception handling */ + +/* Function Return Payload (from ARMv8-M Architecture Reference Manual) LR value on entry from Secure BLXNS */ +#define FNC_RETURN (0xFEFFFFFFUL) /* bit [0] ignored when processing a branch */ + +/* The following EXC_RETURN mask values are used to evaluate the LR on exception entry */ +#define EXC_RETURN_PREFIX (0xFF000000UL) /* bits [31:24] set to indicate an EXC_RETURN value */ +#define EXC_RETURN_S (0x00000040UL) /* bit [6] stack used to push registers: 0=Non-secure 1=Secure */ +#define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */ +#define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */ +#define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */ +#define EXC_RETURN_SPSEL (0x00000004UL) /* bit [2] stack pointer used to restore context: 0=MSP 1=PSP */ +#define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */ + +/* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */ +#if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) /* Value for processors with floating-point extension: */ +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125AUL) /* bit [0] SFTC must match LR bit[4] EXC_RETURN_FTYPE */ +#else +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125BUL) /* Value for processors without floating-point extension */ +#endif + + +/** + \brief Set Priority Grouping + \details Sets the priority grouping field using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Interrupt Target State + \details Reads the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + \return 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_GetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Target State + \details Sets the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_SetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] |= ((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Clear Interrupt Target State + \details Clears the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_ClearTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] &= ~((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + __DSB(); +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Priority Grouping (non-secure) + \details Sets the non-secure priority grouping field when in secure state using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void TZ_NVIC_SetPriorityGrouping_NS(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB_NS->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB_NS->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping (non-secure) + \details Reads the priority grouping field from the non-secure NVIC when in secure state. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriorityGrouping_NS(void) +{ + return ((uint32_t)((SCB_NS->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt (non-secure) + \details Enables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_EnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status (non-secure) + \details Returns a device specific interrupt enable status from the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetEnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt (non-secure) + \details Disables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_DisableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Pending Interrupt (non-secure) + \details Reads the NVIC pending register in the non-secure NVIC when in secure state and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt (non-secure) + \details Sets the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_SetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt (non-secure) + \details Clears the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_ClearPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt (non-secure) + \details Reads the active register in non-secure NVIC when in secure state and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetActive_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority (non-secure) + \details Sets the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every non-secure processor exception. + */ +__STATIC_INLINE void TZ_NVIC_SetPriority_NS(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority (non-secure) + \details Reads the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriority_NS(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC_NS->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) &&(__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv8.h" + +#endif + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + uint32_t mvfr0; + + mvfr0 = FPU->MVFR0; + if ((mvfr0 & (FPU_MVFR0_Single_precision_Msk | FPU_MVFR0_Double_precision_Msk)) == 0x220U) + { + return 2U; /* Double + Single precision FPU */ + } + else if ((mvfr0 & (FPU_MVFR0_Single_precision_Msk | FPU_MVFR0_Double_precision_Msk)) == 0x020U) + { + return 1U; /* Single precision FPU */ + } + else + { + return 0U; /* No FPU */ + } +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ########################## SAU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SAUFunctions SAU Functions + \brief Functions that configure the SAU. + @{ + */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + +/** + \brief Enable SAU + \details Enables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Enable(void) +{ + SAU->CTRL |= (SAU_CTRL_ENABLE_Msk); +} + + + +/** + \brief Disable SAU + \details Disables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Disable(void) +{ + SAU->CTRL &= ~(SAU_CTRL_ENABLE_Msk); +} + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_SAUFunctions */ + + + + +/* ################################## Debug Control function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DCBFunctions Debug Control Functions + \brief Functions that access the Debug Control Block. + @{ + */ + + +/** + \brief Set Debug Authentication Control Register + \details writes to Debug Authentication Control register. + \param [in] value value to be writen. + */ +__STATIC_INLINE void DCB_SetAuthCtrl(uint32_t value) +{ + __DSB(); + __ISB(); + DCB->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register + \details Reads Debug Authentication Control register. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t DCB_GetAuthCtrl(void) +{ + return (DCB->DAUTHCTRL); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Debug Authentication Control Register (non-secure) + \details writes to non-secure Debug Authentication Control register when in secure state. + \param [in] value value to be writen + */ +__STATIC_INLINE void TZ_DCB_SetAuthCtrl_NS(uint32_t value) +{ + __DSB(); + __ISB(); + DCB_NS->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register (non-secure) + \details Reads non-secure Debug Authentication Control register when in secure state. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t TZ_DCB_GetAuthCtrl_NS(void) +{ + return (DCB_NS->DAUTHCTRL); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## Debug Identification function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DIBFunctions Debug Identification Functions + \brief Functions that access the Debug Identification Block. + @{ + */ + + +/** + \brief Get Debug Authentication Status Register + \details Reads Debug Authentication Status register. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t DIB_GetAuthStatus(void) +{ + return (DIB->DAUTHSTATUS); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Debug Authentication Status Register (non-secure) + \details Reads non-secure Debug Authentication Status register when in secure state. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t TZ_DIB_GetAuthStatus_NS(void) +{ + return (DIB_NS->DAUTHSTATUS); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief System Tick Configuration (non-secure) + \details Initializes the non-secure System Timer and its interrupt when in secure state, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function TZ_SysTick_Config_NS is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + + */ +__STATIC_INLINE uint32_t TZ_SysTick_Config_NS(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick_NS->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + TZ_NVIC_SetPriority_NS (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick_NS->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick_NS->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM35P_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/Drivers/CMSIS/Include/core_cm4.h b/Drivers/CMSIS/Include/core_cm4.h new file mode 100644 index 0000000..e21cd14 --- /dev/null +++ b/Drivers/CMSIS/Include/core_cm4.h @@ -0,0 +1,2129 @@ +/**************************************************************************//** + * @file core_cm4.h + * @brief CMSIS Cortex-M4 Core Peripheral Access Layer Header File + * @version V5.1.2 + * @date 04. June 2021 + ******************************************************************************/ +/* + * Copyright (c) 2009-2020 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CORE_CM4_H_GENERIC +#define __CORE_CM4_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M4 + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS CM4 definitions */ +#define __CM4_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __CM4_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __CM4_CMSIS_VERSION ((__CM4_CMSIS_VERSION_MAIN << 16U) | \ + __CM4_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (4U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + For this, __FPU_PRESENT has to be checked prior to making use of FPU specific registers and functions. +*/ +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_FP + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM4_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM4_H_DEPENDANT +#define __CORE_CM4_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM4_REV + #define __CM4_REV 0x0000U + #warning "__CM4_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 1U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M4 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + - Core FPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:16; /*!< bit: 0..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:7; /*!< bit: 20..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + +#define APSR_GE_Pos 16U /*!< APSR: GE Position */ +#define APSR_GE_Msk (0xFUL << APSR_GE_Pos) /*!< APSR: GE Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:1; /*!< bit: 9 Reserved */ + uint32_t ICI_IT_1:6; /*!< bit: 10..15 ICI/IT part 1 */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:4; /*!< bit: 20..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit */ + uint32_t ICI_IT_2:2; /*!< bit: 25..26 ICI/IT part 2 */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_ICI_IT_2_Pos 25U /*!< xPSR: ICI/IT part 2 Position */ +#define xPSR_ICI_IT_2_Msk (3UL << xPSR_ICI_IT_2_Pos) /*!< xPSR: ICI/IT part 2 Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_GE_Pos 16U /*!< xPSR: GE Position */ +#define xPSR_GE_Msk (0xFUL << xPSR_GE_Pos) /*!< xPSR: GE Mask */ + +#define xPSR_ICI_IT_1_Pos 10U /*!< xPSR: ICI/IT part 1 Position */ +#define xPSR_ICI_IT_1_Msk (0x3FUL << xPSR_ICI_IT_1_Pos) /*!< xPSR: ICI/IT part 1 Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */ + uint32_t FPCA:1; /*!< bit: 2 FP extension active flag */ + uint32_t _reserved0:29; /*!< bit: 3..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_FPCA_Pos 2U /*!< CONTROL: FPCA Position */ +#define CONTROL_FPCA_Msk (1UL << CONTROL_FPCA_Pos) /*!< CONTROL: FPCA Mask */ + +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[8U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[24U]; + __IOM uint32_t ICER[8U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RESERVED1[24U]; + __IOM uint32_t ISPR[8U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[24U]; + __IOM uint32_t ICPR[8U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[24U]; + __IOM uint32_t IABR[8U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[56U]; + __IOM uint8_t IP[240U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED5[644U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/* Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHP[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ADR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ISAR[5U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + uint32_t RESERVED0[5U]; + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */ +#define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +#define SCB_AIRCR_VECTRESET_Pos 0U /*!< SCB AIRCR: VECTRESET Position */ +#define SCB_AIRCR_VECTRESET_Msk (1UL /*<< SCB_AIRCR_VECTRESET_Pos*/) /*!< SCB AIRCR: VECTRESET Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */ +#define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +#define SCB_CCR_NONBASETHRDENA_Pos 0U /*!< SCB CCR: NONBASETHRDENA Position */ +#define SCB_CCR_NONBASETHRDENA_Msk (1UL /*<< SCB_CCR_NONBASETHRDENA_Pos*/) /*!< SCB CCR: NONBASETHRDENA Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/* SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/* BusFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_LSPERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 5U) /*!< SCB CFSR (BFSR): LSPERR Position */ +#define SCB_CFSR_LSPERR_Msk (1UL << SCB_CFSR_LSPERR_Pos) /*!< SCB CFSR (BFSR): LSPERR Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/* UsageFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/* SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/* SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ +} SCnSCB_Type; + +/* Interrupt Controller Type Register Definitions */ +#define SCnSCB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/* Auxiliary Control Register Definitions */ +#define SCnSCB_ACTLR_DISOOFP_Pos 9U /*!< ACTLR: DISOOFP Position */ +#define SCnSCB_ACTLR_DISOOFP_Msk (1UL << SCnSCB_ACTLR_DISOOFP_Pos) /*!< ACTLR: DISOOFP Mask */ + +#define SCnSCB_ACTLR_DISFPCA_Pos 8U /*!< ACTLR: DISFPCA Position */ +#define SCnSCB_ACTLR_DISFPCA_Msk (1UL << SCnSCB_ACTLR_DISFPCA_Pos) /*!< ACTLR: DISFPCA Mask */ + +#define SCnSCB_ACTLR_DISFOLD_Pos 2U /*!< ACTLR: DISFOLD Position */ +#define SCnSCB_ACTLR_DISFOLD_Msk (1UL << SCnSCB_ACTLR_DISFOLD_Pos) /*!< ACTLR: DISFOLD Mask */ + +#define SCnSCB_ACTLR_DISDEFWBUF_Pos 1U /*!< ACTLR: DISDEFWBUF Position */ +#define SCnSCB_ACTLR_DISDEFWBUF_Msk (1UL << SCnSCB_ACTLR_DISDEFWBUF_Pos) /*!< ACTLR: DISDEFWBUF Mask */ + +#define SCnSCB_ACTLR_DISMCYCINT_Pos 0U /*!< ACTLR: DISMCYCINT Position */ +#define SCnSCB_ACTLR_DISMCYCINT_Msk (1UL /*<< SCnSCB_ACTLR_DISMCYCINT_Pos*/) /*!< ACTLR: DISMCYCINT Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) ITM Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) ITM Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ + uint32_t RESERVED3[32U]; + uint32_t RESERVED4[43U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ + uint32_t RESERVED5[6U]; + __IM uint32_t PID4; /*!< Offset: 0xFD0 (R/ ) ITM Peripheral Identification Register #4 */ + __IM uint32_t PID5; /*!< Offset: 0xFD4 (R/ ) ITM Peripheral Identification Register #5 */ + __IM uint32_t PID6; /*!< Offset: 0xFD8 (R/ ) ITM Peripheral Identification Register #6 */ + __IM uint32_t PID7; /*!< Offset: 0xFDC (R/ ) ITM Peripheral Identification Register #7 */ + __IM uint32_t PID0; /*!< Offset: 0xFE0 (R/ ) ITM Peripheral Identification Register #0 */ + __IM uint32_t PID1; /*!< Offset: 0xFE4 (R/ ) ITM Peripheral Identification Register #1 */ + __IM uint32_t PID2; /*!< Offset: 0xFE8 (R/ ) ITM Peripheral Identification Register #2 */ + __IM uint32_t PID3; /*!< Offset: 0xFEC (R/ ) ITM Peripheral Identification Register #3 */ + __IM uint32_t CID0; /*!< Offset: 0xFF0 (R/ ) ITM Component Identification Register #0 */ + __IM uint32_t CID1; /*!< Offset: 0xFF4 (R/ ) ITM Component Identification Register #1 */ + __IM uint32_t CID2; /*!< Offset: 0xFF8 (R/ ) ITM Component Identification Register #2 */ + __IM uint32_t CID3; /*!< Offset: 0xFFC (R/ ) ITM Component Identification Register #3 */ +} ITM_Type; + +/* ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFFFFFFFFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/* ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TraceBusID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TraceBusID_Msk (0x7FUL << ITM_TCR_TraceBusID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPrescale_Pos 8U /*!< ITM TCR: TSPrescale Position */ +#define ITM_TCR_TSPrescale_Msk (3UL << ITM_TCR_TSPrescale_Pos) /*!< ITM TCR: TSPrescale Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/* ITM Lock Status Register Definitions */ +#define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ +#define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ + +#define ITM_LSR_Access_Pos 1U /*!< ITM LSR: Access Position */ +#define ITM_LSR_Access_Msk (1UL << ITM_LSR_Access_Pos) /*!< ITM LSR: Access Mask */ + +#define ITM_LSR_Present_Pos 0U /*!< ITM LSR: Present Position */ +#define ITM_LSR_Present_Msk (1UL /*<< ITM_LSR_Present_Pos*/) /*!< ITM LSR: Present Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + __IOM uint32_t MASK0; /*!< Offset: 0x024 (R/W) Mask Register 0 */ + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED0[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + __IOM uint32_t MASK1; /*!< Offset: 0x034 (R/W) Mask Register 1 */ + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + __IOM uint32_t MASK2; /*!< Offset: 0x044 (R/W) Mask Register 2 */ + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + __IOM uint32_t MASK3; /*!< Offset: 0x054 (R/W) Mask Register 3 */ + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (0x1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (0x1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (0x1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (0x1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (0x1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (0x1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (0x1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (0x1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (0x1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (0x1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/* DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/* DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/* DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/* DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/* DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/* DWT Comparator Mask Register Definitions */ +#define DWT_MASK_MASK_Pos 0U /*!< DWT MASK: MASK Position */ +#define DWT_MASK_MASK_Msk (0x1FUL /*<< DWT_MASK_MASK_Pos*/) /*!< DWT MASK: MASK Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVADDR1_Pos 16U /*!< DWT FUNCTION: DATAVADDR1 Position */ +#define DWT_FUNCTION_DATAVADDR1_Msk (0xFUL << DWT_FUNCTION_DATAVADDR1_Pos) /*!< DWT FUNCTION: DATAVADDR1 Mask */ + +#define DWT_FUNCTION_DATAVADDR0_Pos 12U /*!< DWT FUNCTION: DATAVADDR0 Position */ +#define DWT_FUNCTION_DATAVADDR0_Msk (0xFUL << DWT_FUNCTION_DATAVADDR0_Pos) /*!< DWT FUNCTION: DATAVADDR0 Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_LNK1ENA_Pos 9U /*!< DWT FUNCTION: LNK1ENA Position */ +#define DWT_FUNCTION_LNK1ENA_Msk (0x1UL << DWT_FUNCTION_LNK1ENA_Pos) /*!< DWT FUNCTION: LNK1ENA Mask */ + +#define DWT_FUNCTION_DATAVMATCH_Pos 8U /*!< DWT FUNCTION: DATAVMATCH Position */ +#define DWT_FUNCTION_DATAVMATCH_Msk (0x1UL << DWT_FUNCTION_DATAVMATCH_Pos) /*!< DWT FUNCTION: DATAVMATCH Mask */ + +#define DWT_FUNCTION_CYCMATCH_Pos 7U /*!< DWT FUNCTION: CYCMATCH Position */ +#define DWT_FUNCTION_CYCMATCH_Msk (0x1UL << DWT_FUNCTION_CYCMATCH_Pos) /*!< DWT FUNCTION: CYCMATCH Mask */ + +#define DWT_FUNCTION_EMITRANGE_Pos 5U /*!< DWT FUNCTION: EMITRANGE Position */ +#define DWT_FUNCTION_EMITRANGE_Msk (0x1UL << DWT_FUNCTION_EMITRANGE_Pos) /*!< DWT FUNCTION: EMITRANGE Mask */ + +#define DWT_FUNCTION_FUNCTION_Pos 0U /*!< DWT FUNCTION: FUNCTION Position */ +#define DWT_FUNCTION_FUNCTION_Msk (0xFUL /*<< DWT_FUNCTION_FUNCTION_Pos*/) /*!< DWT FUNCTION: FUNCTION Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Size Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Size Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IM uint32_t FSCR; /*!< Offset: 0x308 (R/ ) Formatter Synchronization Counter Register */ + uint32_t RESERVED3[759U]; + __IM uint32_t TRIGGER; /*!< Offset: 0xEE8 (R/ ) TRIGGER Register */ + __IM uint32_t FIFO0; /*!< Offset: 0xEEC (R/ ) Integration ETM Data */ + __IM uint32_t ITATBCTR2; /*!< Offset: 0xEF0 (R/ ) ITATBCTR2 */ + uint32_t RESERVED4[1U]; + __IM uint32_t ITATBCTR0; /*!< Offset: 0xEF8 (R/ ) ITATBCTR0 */ + __IM uint32_t FIFO1; /*!< Offset: 0xEFC (R/ ) Integration ITM Data */ + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control */ + uint32_t RESERVED5[39U]; + __IOM uint32_t CLAIMSET; /*!< Offset: 0xFA0 (R/W) Claim tag set */ + __IOM uint32_t CLAIMCLR; /*!< Offset: 0xFA4 (R/W) Claim tag clear */ + uint32_t RESERVED7[8U]; + __IM uint32_t DEVID; /*!< Offset: 0xFC8 (R/ ) TPIU_DEVID */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) TPIU_DEVTYPE */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_PRESCALER_Pos 0U /*!< TPI ACPR: PRESCALER Position */ +#define TPI_ACPR_PRESCALER_Msk (0x1FFFUL /*<< TPI_ACPR_PRESCALER_Pos*/) /*!< TPI ACPR: PRESCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_EnFCont_Pos 1U /*!< TPI FFCR: EnFCont Position */ +#define TPI_FFCR_EnFCont_Msk (0x1UL << TPI_FFCR_EnFCont_Pos) /*!< TPI FFCR: EnFCont Mask */ + +/* TPI TRIGGER Register Definitions */ +#define TPI_TRIGGER_TRIGGER_Pos 0U /*!< TPI TRIGGER: TRIGGER Position */ +#define TPI_TRIGGER_TRIGGER_Msk (0x1UL /*<< TPI_TRIGGER_TRIGGER_Pos*/) /*!< TPI TRIGGER: TRIGGER Mask */ + +/* TPI Integration ETM Data Register Definitions (FIFO0) */ +#define TPI_FIFO0_ITM_ATVALID_Pos 29U /*!< TPI FIFO0: ITM_ATVALID Position */ +#define TPI_FIFO0_ITM_ATVALID_Msk (0x1UL << TPI_FIFO0_ITM_ATVALID_Pos) /*!< TPI FIFO0: ITM_ATVALID Mask */ + +#define TPI_FIFO0_ITM_bytecount_Pos 27U /*!< TPI FIFO0: ITM_bytecount Position */ +#define TPI_FIFO0_ITM_bytecount_Msk (0x3UL << TPI_FIFO0_ITM_bytecount_Pos) /*!< TPI FIFO0: ITM_bytecount Mask */ + +#define TPI_FIFO0_ETM_ATVALID_Pos 26U /*!< TPI FIFO0: ETM_ATVALID Position */ +#define TPI_FIFO0_ETM_ATVALID_Msk (0x1UL << TPI_FIFO0_ETM_ATVALID_Pos) /*!< TPI FIFO0: ETM_ATVALID Mask */ + +#define TPI_FIFO0_ETM_bytecount_Pos 24U /*!< TPI FIFO0: ETM_bytecount Position */ +#define TPI_FIFO0_ETM_bytecount_Msk (0x3UL << TPI_FIFO0_ETM_bytecount_Pos) /*!< TPI FIFO0: ETM_bytecount Mask */ + +#define TPI_FIFO0_ETM2_Pos 16U /*!< TPI FIFO0: ETM2 Position */ +#define TPI_FIFO0_ETM2_Msk (0xFFUL << TPI_FIFO0_ETM2_Pos) /*!< TPI FIFO0: ETM2 Mask */ + +#define TPI_FIFO0_ETM1_Pos 8U /*!< TPI FIFO0: ETM1 Position */ +#define TPI_FIFO0_ETM1_Msk (0xFFUL << TPI_FIFO0_ETM1_Pos) /*!< TPI FIFO0: ETM1 Mask */ + +#define TPI_FIFO0_ETM0_Pos 0U /*!< TPI FIFO0: ETM0 Position */ +#define TPI_FIFO0_ETM0_Msk (0xFFUL /*<< TPI_FIFO0_ETM0_Pos*/) /*!< TPI FIFO0: ETM0 Mask */ + +/* TPI ITATBCTR2 Register Definitions */ +#define TPI_ITATBCTR2_ATREADY2_Pos 0U /*!< TPI ITATBCTR2: ATREADY2 Position */ +#define TPI_ITATBCTR2_ATREADY2_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY2_Pos*/) /*!< TPI ITATBCTR2: ATREADY2 Mask */ + +#define TPI_ITATBCTR2_ATREADY1_Pos 0U /*!< TPI ITATBCTR2: ATREADY1 Position */ +#define TPI_ITATBCTR2_ATREADY1_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY1_Pos*/) /*!< TPI ITATBCTR2: ATREADY1 Mask */ + +/* TPI Integration ITM Data Register Definitions (FIFO1) */ +#define TPI_FIFO1_ITM_ATVALID_Pos 29U /*!< TPI FIFO1: ITM_ATVALID Position */ +#define TPI_FIFO1_ITM_ATVALID_Msk (0x1UL << TPI_FIFO1_ITM_ATVALID_Pos) /*!< TPI FIFO1: ITM_ATVALID Mask */ + +#define TPI_FIFO1_ITM_bytecount_Pos 27U /*!< TPI FIFO1: ITM_bytecount Position */ +#define TPI_FIFO1_ITM_bytecount_Msk (0x3UL << TPI_FIFO1_ITM_bytecount_Pos) /*!< TPI FIFO1: ITM_bytecount Mask */ + +#define TPI_FIFO1_ETM_ATVALID_Pos 26U /*!< TPI FIFO1: ETM_ATVALID Position */ +#define TPI_FIFO1_ETM_ATVALID_Msk (0x1UL << TPI_FIFO1_ETM_ATVALID_Pos) /*!< TPI FIFO1: ETM_ATVALID Mask */ + +#define TPI_FIFO1_ETM_bytecount_Pos 24U /*!< TPI FIFO1: ETM_bytecount Position */ +#define TPI_FIFO1_ETM_bytecount_Msk (0x3UL << TPI_FIFO1_ETM_bytecount_Pos) /*!< TPI FIFO1: ETM_bytecount Mask */ + +#define TPI_FIFO1_ITM2_Pos 16U /*!< TPI FIFO1: ITM2 Position */ +#define TPI_FIFO1_ITM2_Msk (0xFFUL << TPI_FIFO1_ITM2_Pos) /*!< TPI FIFO1: ITM2 Mask */ + +#define TPI_FIFO1_ITM1_Pos 8U /*!< TPI FIFO1: ITM1 Position */ +#define TPI_FIFO1_ITM1_Msk (0xFFUL << TPI_FIFO1_ITM1_Pos) /*!< TPI FIFO1: ITM1 Mask */ + +#define TPI_FIFO1_ITM0_Pos 0U /*!< TPI FIFO1: ITM0 Position */ +#define TPI_FIFO1_ITM0_Msk (0xFFUL /*<< TPI_FIFO1_ITM0_Pos*/) /*!< TPI FIFO1: ITM0 Mask */ + +/* TPI ITATBCTR0 Register Definitions */ +#define TPI_ITATBCTR0_ATREADY2_Pos 0U /*!< TPI ITATBCTR0: ATREADY2 Position */ +#define TPI_ITATBCTR0_ATREADY2_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY2_Pos*/) /*!< TPI ITATBCTR0: ATREADY2 Mask */ + +#define TPI_ITATBCTR0_ATREADY1_Pos 0U /*!< TPI ITATBCTR0: ATREADY1 Position */ +#define TPI_ITATBCTR0_ATREADY1_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY1_Pos*/) /*!< TPI ITATBCTR0: ATREADY1 Mask */ + +/* TPI Integration Mode Control Register Definitions */ +#define TPI_ITCTRL_Mode_Pos 0U /*!< TPI ITCTRL: Mode Position */ +#define TPI_ITCTRL_Mode_Msk (0x3UL /*<< TPI_ITCTRL_Mode_Pos*/) /*!< TPI ITCTRL: Mode Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_MinBufSz_Pos 6U /*!< TPI DEVID: MinBufSz Position */ +#define TPI_DEVID_MinBufSz_Msk (0x7UL << TPI_DEVID_MinBufSz_Pos) /*!< TPI DEVID: MinBufSz Mask */ + +#define TPI_DEVID_AsynClkIn_Pos 5U /*!< TPI DEVID: AsynClkIn Position */ +#define TPI_DEVID_AsynClkIn_Msk (0x1UL << TPI_DEVID_AsynClkIn_Pos) /*!< TPI DEVID: AsynClkIn Mask */ + +#define TPI_DEVID_NrTraceInput_Pos 0U /*!< TPI DEVID: NrTraceInput Position */ +#define TPI_DEVID_NrTraceInput_Msk (0x1FUL /*<< TPI_DEVID_NrTraceInput_Pos*/) /*!< TPI DEVID: NrTraceInput Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region RNRber Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RASR; /*!< Offset: 0x010 (R/W) MPU Region Attribute and Size Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Alias 1 Region Base Address Register */ + __IOM uint32_t RASR_A1; /*!< Offset: 0x018 (R/W) MPU Alias 1 Region Attribute and Size Register */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Alias 2 Region Base Address Register */ + __IOM uint32_t RASR_A2; /*!< Offset: 0x020 (R/W) MPU Alias 2 Region Attribute and Size Register */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Alias 3 Region Base Address Register */ + __IOM uint32_t RASR_A3; /*!< Offset: 0x028 (R/W) MPU Alias 3 Region Attribute and Size Register */ +} MPU_Type; + +#define MPU_TYPE_RALIASES 4U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_ADDR_Pos 5U /*!< MPU RBAR: ADDR Position */ +#define MPU_RBAR_ADDR_Msk (0x7FFFFFFUL << MPU_RBAR_ADDR_Pos) /*!< MPU RBAR: ADDR Mask */ + +#define MPU_RBAR_VALID_Pos 4U /*!< MPU RBAR: VALID Position */ +#define MPU_RBAR_VALID_Msk (1UL << MPU_RBAR_VALID_Pos) /*!< MPU RBAR: VALID Mask */ + +#define MPU_RBAR_REGION_Pos 0U /*!< MPU RBAR: REGION Position */ +#define MPU_RBAR_REGION_Msk (0xFUL /*<< MPU_RBAR_REGION_Pos*/) /*!< MPU RBAR: REGION Mask */ + +/* MPU Region Attribute and Size Register Definitions */ +#define MPU_RASR_ATTRS_Pos 16U /*!< MPU RASR: MPU Region Attribute field Position */ +#define MPU_RASR_ATTRS_Msk (0xFFFFUL << MPU_RASR_ATTRS_Pos) /*!< MPU RASR: MPU Region Attribute field Mask */ + +#define MPU_RASR_XN_Pos 28U /*!< MPU RASR: ATTRS.XN Position */ +#define MPU_RASR_XN_Msk (1UL << MPU_RASR_XN_Pos) /*!< MPU RASR: ATTRS.XN Mask */ + +#define MPU_RASR_AP_Pos 24U /*!< MPU RASR: ATTRS.AP Position */ +#define MPU_RASR_AP_Msk (0x7UL << MPU_RASR_AP_Pos) /*!< MPU RASR: ATTRS.AP Mask */ + +#define MPU_RASR_TEX_Pos 19U /*!< MPU RASR: ATTRS.TEX Position */ +#define MPU_RASR_TEX_Msk (0x7UL << MPU_RASR_TEX_Pos) /*!< MPU RASR: ATTRS.TEX Mask */ + +#define MPU_RASR_S_Pos 18U /*!< MPU RASR: ATTRS.S Position */ +#define MPU_RASR_S_Msk (1UL << MPU_RASR_S_Pos) /*!< MPU RASR: ATTRS.S Mask */ + +#define MPU_RASR_C_Pos 17U /*!< MPU RASR: ATTRS.C Position */ +#define MPU_RASR_C_Msk (1UL << MPU_RASR_C_Pos) /*!< MPU RASR: ATTRS.C Mask */ + +#define MPU_RASR_B_Pos 16U /*!< MPU RASR: ATTRS.B Position */ +#define MPU_RASR_B_Msk (1UL << MPU_RASR_B_Pos) /*!< MPU RASR: ATTRS.B Mask */ + +#define MPU_RASR_SRD_Pos 8U /*!< MPU RASR: Sub-Region Disable Position */ +#define MPU_RASR_SRD_Msk (0xFFUL << MPU_RASR_SRD_Pos) /*!< MPU RASR: Sub-Region Disable Mask */ + +#define MPU_RASR_SIZE_Pos 1U /*!< MPU RASR: Region Size Field Position */ +#define MPU_RASR_SIZE_Msk (0x1FUL << MPU_RASR_SIZE_Pos) /*!< MPU RASR: Region Size Field Mask */ + +#define MPU_RASR_ENABLE_Pos 0U /*!< MPU RASR: Region enable bit Position */ +#define MPU_RASR_ENABLE_Msk (1UL /*<< MPU_RASR_ENABLE_Pos*/) /*!< MPU RASR: Region enable bit Disable Mask */ + +/*@} end of group CMSIS_MPU */ +#endif /* defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_FPU Floating Point Unit (FPU) + \brief Type definitions for the Floating Point Unit (FPU) + @{ + */ + +/** + \brief Structure type to access the Floating Point Unit (FPU). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IOM uint32_t FPCCR; /*!< Offset: 0x004 (R/W) Floating-Point Context Control Register */ + __IOM uint32_t FPCAR; /*!< Offset: 0x008 (R/W) Floating-Point Context Address Register */ + __IOM uint32_t FPDSCR; /*!< Offset: 0x00C (R/W) Floating-Point Default Status Control Register */ + __IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and FP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and FP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x018 (R/ ) Media and FP Feature Register 2 */ +} FPU_Type; + +/* Floating-Point Context Control Register Definitions */ +#define FPU_FPCCR_ASPEN_Pos 31U /*!< FPCCR: ASPEN bit Position */ +#define FPU_FPCCR_ASPEN_Msk (1UL << FPU_FPCCR_ASPEN_Pos) /*!< FPCCR: ASPEN bit Mask */ + +#define FPU_FPCCR_LSPEN_Pos 30U /*!< FPCCR: LSPEN Position */ +#define FPU_FPCCR_LSPEN_Msk (1UL << FPU_FPCCR_LSPEN_Pos) /*!< FPCCR: LSPEN bit Mask */ + +#define FPU_FPCCR_MONRDY_Pos 8U /*!< FPCCR: MONRDY Position */ +#define FPU_FPCCR_MONRDY_Msk (1UL << FPU_FPCCR_MONRDY_Pos) /*!< FPCCR: MONRDY bit Mask */ + +#define FPU_FPCCR_BFRDY_Pos 6U /*!< FPCCR: BFRDY Position */ +#define FPU_FPCCR_BFRDY_Msk (1UL << FPU_FPCCR_BFRDY_Pos) /*!< FPCCR: BFRDY bit Mask */ + +#define FPU_FPCCR_MMRDY_Pos 5U /*!< FPCCR: MMRDY Position */ +#define FPU_FPCCR_MMRDY_Msk (1UL << FPU_FPCCR_MMRDY_Pos) /*!< FPCCR: MMRDY bit Mask */ + +#define FPU_FPCCR_HFRDY_Pos 4U /*!< FPCCR: HFRDY Position */ +#define FPU_FPCCR_HFRDY_Msk (1UL << FPU_FPCCR_HFRDY_Pos) /*!< FPCCR: HFRDY bit Mask */ + +#define FPU_FPCCR_THREAD_Pos 3U /*!< FPCCR: processor mode bit Position */ +#define FPU_FPCCR_THREAD_Msk (1UL << FPU_FPCCR_THREAD_Pos) /*!< FPCCR: processor mode active bit Mask */ + +#define FPU_FPCCR_USER_Pos 1U /*!< FPCCR: privilege level bit Position */ +#define FPU_FPCCR_USER_Msk (1UL << FPU_FPCCR_USER_Pos) /*!< FPCCR: privilege level bit Mask */ + +#define FPU_FPCCR_LSPACT_Pos 0U /*!< FPCCR: Lazy state preservation active bit Position */ +#define FPU_FPCCR_LSPACT_Msk (1UL /*<< FPU_FPCCR_LSPACT_Pos*/) /*!< FPCCR: Lazy state preservation active bit Mask */ + +/* Floating-Point Context Address Register Definitions */ +#define FPU_FPCAR_ADDRESS_Pos 3U /*!< FPCAR: ADDRESS bit Position */ +#define FPU_FPCAR_ADDRESS_Msk (0x1FFFFFFFUL << FPU_FPCAR_ADDRESS_Pos) /*!< FPCAR: ADDRESS bit Mask */ + +/* Floating-Point Default Status Control Register Definitions */ +#define FPU_FPDSCR_AHP_Pos 26U /*!< FPDSCR: AHP bit Position */ +#define FPU_FPDSCR_AHP_Msk (1UL << FPU_FPDSCR_AHP_Pos) /*!< FPDSCR: AHP bit Mask */ + +#define FPU_FPDSCR_DN_Pos 25U /*!< FPDSCR: DN bit Position */ +#define FPU_FPDSCR_DN_Msk (1UL << FPU_FPDSCR_DN_Pos) /*!< FPDSCR: DN bit Mask */ + +#define FPU_FPDSCR_FZ_Pos 24U /*!< FPDSCR: FZ bit Position */ +#define FPU_FPDSCR_FZ_Msk (1UL << FPU_FPDSCR_FZ_Pos) /*!< FPDSCR: FZ bit Mask */ + +#define FPU_FPDSCR_RMode_Pos 22U /*!< FPDSCR: RMode bit Position */ +#define FPU_FPDSCR_RMode_Msk (3UL << FPU_FPDSCR_RMode_Pos) /*!< FPDSCR: RMode bit Mask */ + +/* Media and FP Feature Register 0 Definitions */ +#define FPU_MVFR0_FP_rounding_modes_Pos 28U /*!< MVFR0: FP rounding modes bits Position */ +#define FPU_MVFR0_FP_rounding_modes_Msk (0xFUL << FPU_MVFR0_FP_rounding_modes_Pos) /*!< MVFR0: FP rounding modes bits Mask */ + +#define FPU_MVFR0_Short_vectors_Pos 24U /*!< MVFR0: Short vectors bits Position */ +#define FPU_MVFR0_Short_vectors_Msk (0xFUL << FPU_MVFR0_Short_vectors_Pos) /*!< MVFR0: Short vectors bits Mask */ + +#define FPU_MVFR0_Square_root_Pos 20U /*!< MVFR0: Square root bits Position */ +#define FPU_MVFR0_Square_root_Msk (0xFUL << FPU_MVFR0_Square_root_Pos) /*!< MVFR0: Square root bits Mask */ + +#define FPU_MVFR0_Divide_Pos 16U /*!< MVFR0: Divide bits Position */ +#define FPU_MVFR0_Divide_Msk (0xFUL << FPU_MVFR0_Divide_Pos) /*!< MVFR0: Divide bits Mask */ + +#define FPU_MVFR0_FP_excep_trapping_Pos 12U /*!< MVFR0: FP exception trapping bits Position */ +#define FPU_MVFR0_FP_excep_trapping_Msk (0xFUL << FPU_MVFR0_FP_excep_trapping_Pos) /*!< MVFR0: FP exception trapping bits Mask */ + +#define FPU_MVFR0_Double_precision_Pos 8U /*!< MVFR0: Double-precision bits Position */ +#define FPU_MVFR0_Double_precision_Msk (0xFUL << FPU_MVFR0_Double_precision_Pos) /*!< MVFR0: Double-precision bits Mask */ + +#define FPU_MVFR0_Single_precision_Pos 4U /*!< MVFR0: Single-precision bits Position */ +#define FPU_MVFR0_Single_precision_Msk (0xFUL << FPU_MVFR0_Single_precision_Pos) /*!< MVFR0: Single-precision bits Mask */ + +#define FPU_MVFR0_A_SIMD_registers_Pos 0U /*!< MVFR0: A_SIMD registers bits Position */ +#define FPU_MVFR0_A_SIMD_registers_Msk (0xFUL /*<< FPU_MVFR0_A_SIMD_registers_Pos*/) /*!< MVFR0: A_SIMD registers bits Mask */ + +/* Media and FP Feature Register 1 Definitions */ +#define FPU_MVFR1_FP_fused_MAC_Pos 28U /*!< MVFR1: FP fused MAC bits Position */ +#define FPU_MVFR1_FP_fused_MAC_Msk (0xFUL << FPU_MVFR1_FP_fused_MAC_Pos) /*!< MVFR1: FP fused MAC bits Mask */ + +#define FPU_MVFR1_FP_HPFP_Pos 24U /*!< MVFR1: FP HPFP bits Position */ +#define FPU_MVFR1_FP_HPFP_Msk (0xFUL << FPU_MVFR1_FP_HPFP_Pos) /*!< MVFR1: FP HPFP bits Mask */ + +#define FPU_MVFR1_D_NaN_mode_Pos 4U /*!< MVFR1: D_NaN mode bits Position */ +#define FPU_MVFR1_D_NaN_mode_Msk (0xFUL << FPU_MVFR1_D_NaN_mode_Pos) /*!< MVFR1: D_NaN mode bits Mask */ + +#define FPU_MVFR1_FtZ_mode_Pos 0U /*!< MVFR1: FtZ mode bits Position */ +#define FPU_MVFR1_FtZ_mode_Msk (0xFUL /*<< FPU_MVFR1_FtZ_mode_Pos*/) /*!< MVFR1: FtZ mode bits Mask */ + +/* Media and FP Feature Register 2 Definitions */ + +#define FPU_MVFR2_VFP_Misc_Pos 4U /*!< MVFR2: VFP Misc bits Position */ +#define FPU_MVFR2_VFP_Misc_Msk (0xFUL << FPU_MVFR2_VFP_Misc_Pos) /*!< MVFR2: VFP Misc bits Mask */ + +/*@} end of group CMSIS_FPU */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Type definitions for the Core Debug Registers + @{ + */ + +/** + \brief Structure type to access the Core Debug Register (CoreDebug). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< CoreDebug DHCSR: DBGKEY Mask */ + +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< CoreDebug DHCSR: S_RESET_ST Mask */ + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< CoreDebug DHCSR: S_RETIRE_ST Mask */ + +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< CoreDebug DHCSR: S_LOCKUP Mask */ + +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< CoreDebug DHCSR: S_SLEEP Mask */ + +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< CoreDebug DHCSR: S_HALT Mask */ + +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< CoreDebug DHCSR: S_REGRDY Mask */ + +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos 5U /*!< CoreDebug DHCSR: C_SNAPSTALL Position */ +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk (1UL << CoreDebug_DHCSR_C_SNAPSTALL_Pos) /*!< CoreDebug DHCSR: C_SNAPSTALL Mask */ + +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< CoreDebug DHCSR: C_MASKINTS Mask */ + +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< CoreDebug DHCSR: C_STEP Mask */ + +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< CoreDebug DHCSR: C_HALT Mask */ + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< CoreDebug DHCSR: C_DEBUGEN Mask */ + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< CoreDebug DCRSR: REGWnR Mask */ + +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< CoreDebug DCRSR: REGSEL Mask */ + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_TRCENA_Pos 24U /*!< CoreDebug DEMCR: TRCENA Position */ +#define CoreDebug_DEMCR_TRCENA_Msk (1UL << CoreDebug_DEMCR_TRCENA_Pos) /*!< CoreDebug DEMCR: TRCENA Mask */ + +#define CoreDebug_DEMCR_MON_REQ_Pos 19U /*!< CoreDebug DEMCR: MON_REQ Position */ +#define CoreDebug_DEMCR_MON_REQ_Msk (1UL << CoreDebug_DEMCR_MON_REQ_Pos) /*!< CoreDebug DEMCR: MON_REQ Mask */ + +#define CoreDebug_DEMCR_MON_STEP_Pos 18U /*!< CoreDebug DEMCR: MON_STEP Position */ +#define CoreDebug_DEMCR_MON_STEP_Msk (1UL << CoreDebug_DEMCR_MON_STEP_Pos) /*!< CoreDebug DEMCR: MON_STEP Mask */ + +#define CoreDebug_DEMCR_MON_PEND_Pos 17U /*!< CoreDebug DEMCR: MON_PEND Position */ +#define CoreDebug_DEMCR_MON_PEND_Msk (1UL << CoreDebug_DEMCR_MON_PEND_Pos) /*!< CoreDebug DEMCR: MON_PEND Mask */ + +#define CoreDebug_DEMCR_MON_EN_Pos 16U /*!< CoreDebug DEMCR: MON_EN Position */ +#define CoreDebug_DEMCR_MON_EN_Msk (1UL << CoreDebug_DEMCR_MON_EN_Pos) /*!< CoreDebug DEMCR: MON_EN Mask */ + +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< CoreDebug DEMCR: VC_HARDERR Mask */ + +#define CoreDebug_DEMCR_VC_INTERR_Pos 9U /*!< CoreDebug DEMCR: VC_INTERR Position */ +#define CoreDebug_DEMCR_VC_INTERR_Msk (1UL << CoreDebug_DEMCR_VC_INTERR_Pos) /*!< CoreDebug DEMCR: VC_INTERR Mask */ + +#define CoreDebug_DEMCR_VC_BUSERR_Pos 8U /*!< CoreDebug DEMCR: VC_BUSERR Position */ +#define CoreDebug_DEMCR_VC_BUSERR_Msk (1UL << CoreDebug_DEMCR_VC_BUSERR_Pos) /*!< CoreDebug DEMCR: VC_BUSERR Mask */ + +#define CoreDebug_DEMCR_VC_STATERR_Pos 7U /*!< CoreDebug DEMCR: VC_STATERR Position */ +#define CoreDebug_DEMCR_VC_STATERR_Msk (1UL << CoreDebug_DEMCR_VC_STATERR_Pos) /*!< CoreDebug DEMCR: VC_STATERR Mask */ + +#define CoreDebug_DEMCR_VC_CHKERR_Pos 6U /*!< CoreDebug DEMCR: VC_CHKERR Position */ +#define CoreDebug_DEMCR_VC_CHKERR_Msk (1UL << CoreDebug_DEMCR_VC_CHKERR_Pos) /*!< CoreDebug DEMCR: VC_CHKERR Mask */ + +#define CoreDebug_DEMCR_VC_NOCPERR_Pos 5U /*!< CoreDebug DEMCR: VC_NOCPERR Position */ +#define CoreDebug_DEMCR_VC_NOCPERR_Msk (1UL << CoreDebug_DEMCR_VC_NOCPERR_Pos) /*!< CoreDebug DEMCR: VC_NOCPERR Mask */ + +#define CoreDebug_DEMCR_VC_MMERR_Pos 4U /*!< CoreDebug DEMCR: VC_MMERR Position */ +#define CoreDebug_DEMCR_VC_MMERR_Msk (1UL << CoreDebug_DEMCR_VC_MMERR_Pos) /*!< CoreDebug DEMCR: VC_MMERR Mask */ + +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< CoreDebug DEMCR: VC_CORERESET Mask */ + +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ +#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ +#define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ +#define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ +#define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ +#define CoreDebug_BASE (0xE000EDF0UL) /*!< Core Debug Base Address */ +#define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ +#define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ +#define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + +#define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ +#define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ +#define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ +#define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ +#define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ +#define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ +#define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ +#define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE) /*!< Core Debug configuration struct */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ +#endif + +#define FPU_BASE (SCS_BASE + 0x0F30UL) /*!< Floating Point Unit */ +#define FPU ((FPU_Type *) FPU_BASE ) /*!< Floating Point Unit */ + +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* The following EXC_RETURN values are saved the LR on exception entry */ +#define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */ +#define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */ +#define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */ +#define EXC_RETURN_HANDLER_FPU (0xFFFFFFE1UL) /* return to Handler mode, uses MSP after return, restore floating-point state */ +#define EXC_RETURN_THREAD_MSP_FPU (0xFFFFFFE9UL) /* return to Thread mode, uses MSP after return, restore floating-point state */ +#define EXC_RETURN_THREAD_PSP_FPU (0xFFFFFFEDUL) /* return to Thread mode, uses PSP after return, restore floating-point state */ + + +/** + \brief Set Priority Grouping + \details Sets the priority grouping field using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IP[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHP[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IP[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHP[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + /* ARM Application Note 321 states that the M4 does not require the architectural barrier */ +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +/*@} end of CMSIS_Core_NVICFunctions */ + + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv7.h" + +#endif + + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + uint32_t mvfr0; + + mvfr0 = FPU->MVFR0; + if ((mvfr0 & (FPU_MVFR0_Single_precision_Msk | FPU_MVFR0_Double_precision_Msk)) == 0x020U) + { + return 1U; /* Single precision FPU */ + } + else + { + return 0U; /* No FPU */ + } +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM4_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/Drivers/CMSIS/Include/core_cm55.h b/Drivers/CMSIS/Include/core_cm55.h new file mode 100644 index 0000000..faa30ce --- /dev/null +++ b/Drivers/CMSIS/Include/core_cm55.h @@ -0,0 +1,4817 @@ +/**************************************************************************//** + * @file core_cm55.h + * @brief CMSIS Cortex-M55 Core Peripheral Access Layer Header File + * @version V1.2.4 + * @date 21. April 2022 + ******************************************************************************/ +/* + * Copyright (c) 2018-2022 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#elif defined ( __GNUC__ ) + #pragma GCC diagnostic ignored "-Wpedantic" /* disable pedantic warning due to unnamed structs/unions */ +#endif + +#ifndef __CORE_CM55_H_GENERIC +#define __CORE_CM55_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M55 + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS CM55 definitions */ +#define __CM55_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __CM55_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __CM55_CMSIS_VERSION ((__CM55_CMSIS_VERSION_MAIN << 16U) | \ + __CM55_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (55U) /*!< Cortex-M Core */ + +#if defined ( __CC_ARM ) + #error Legacy Arm Compiler does not support Armv8.1-M target architecture. +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_FP + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined(__ARM_FEATURE_DSP) + #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined(__ARM_FEATURE_DSP) + #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined(__ARM_FEATURE_DSP) + #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM55_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM55_H_DEPENDANT +#define __CORE_CM55_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM55_REV + #define __CM55_REV 0x0000U + #warning "__CM55_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #if __FPU_PRESENT != 0U + #ifndef __FPU_DP + #define __FPU_DP 0U + #warning "__FPU_DP not defined in device header file; using default!" + #endif + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __ICACHE_PRESENT + #define __ICACHE_PRESENT 0U + #warning "__ICACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DCACHE_PRESENT + #define __DCACHE_PRESENT 0U + #warning "__DCACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 1U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __PMU_PRESENT + #define __PMU_PRESENT 0U + #warning "__PMU_PRESENT not defined in device header file; using default!" + #endif + + #if __PMU_PRESENT != 0U + #ifndef __PMU_NUM_EVENTCNT + #define __PMU_NUM_EVENTCNT 8U + #warning "__PMU_NUM_EVENTCNT not defined in device header file; using default!" + #elif (__PMU_NUM_EVENTCNT > 8 || __PMU_NUM_EVENTCNT < 2) + #error "__PMU_NUM_EVENTCNT is out of range in device header file!" */ + #endif + #endif + + #ifndef __SAUREGION_PRESENT + #define __SAUREGION_PRESENT 0U + #warning "__SAUREGION_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DSP_PRESENT + #define __DSP_PRESENT 0U + #warning "__DSP_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M55 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core EWIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core PMU Register + - Core MPU Register + - Core SAU Register + - Core FPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:16; /*!< bit: 0..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:7; /*!< bit: 20..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + +#define APSR_GE_Pos 16U /*!< APSR: GE Position */ +#define APSR_GE_Msk (0xFUL << APSR_GE_Pos) /*!< APSR: GE Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:7; /*!< bit: 9..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:4; /*!< bit: 20..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t IT:2; /*!< bit: 25..26 saved IT state (read 0) */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_IT_Pos 25U /*!< xPSR: IT Position */ +#define xPSR_IT_Msk (3UL << xPSR_IT_Pos) /*!< xPSR: IT Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_GE_Pos 16U /*!< xPSR: GE Position */ +#define xPSR_GE_Msk (0xFUL << xPSR_GE_Pos) /*!< xPSR: GE Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack-pointer select */ + uint32_t FPCA:1; /*!< bit: 2 Floating-point context active */ + uint32_t SFPA:1; /*!< bit: 3 Secure floating-point active */ + uint32_t _reserved1:28; /*!< bit: 4..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SFPA_Pos 3U /*!< CONTROL: SFPA Position */ +#define CONTROL_SFPA_Msk (1UL << CONTROL_SFPA_Pos) /*!< CONTROL: SFPA Mask */ + +#define CONTROL_FPCA_Pos 2U /*!< CONTROL: FPCA Position */ +#define CONTROL_FPCA_Msk (1UL << CONTROL_FPCA_Pos) /*!< CONTROL: FPCA Mask */ + +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[16U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[16U]; + __IOM uint32_t ICER[16U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[16U]; + __IOM uint32_t ISPR[16U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[16U]; + __IOM uint32_t ICPR[16U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[16U]; + __IOM uint32_t IABR[16U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[16U]; + __IOM uint32_t ITNS[16U]; /*!< Offset: 0x280 (R/W) Interrupt Non-Secure State Register */ + uint32_t RESERVED5[16U]; + __IOM uint8_t IPR[496U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED6[580U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/* Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHPR[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t ID_PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t ID_DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ID_AFR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t ID_MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ID_ISAR[6U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + __IM uint32_t CLIDR; /*!< Offset: 0x078 (R/ ) Cache Level ID register */ + __IM uint32_t CTR; /*!< Offset: 0x07C (R/ ) Cache Type register */ + __IM uint32_t CCSIDR; /*!< Offset: 0x080 (R/ ) Cache Size ID Register */ + __IOM uint32_t CSSELR; /*!< Offset: 0x084 (R/W) Cache Size Selection Register */ + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ + __IOM uint32_t NSACR; /*!< Offset: 0x08C (R/W) Non-Secure Access Control Register */ + uint32_t RESERVED7[21U]; + __IOM uint32_t SFSR; /*!< Offset: 0x0E4 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x0E8 (R/W) Secure Fault Address Register */ + uint32_t RESERVED3[69U]; + __OM uint32_t STIR; /*!< Offset: 0x200 ( /W) Software Triggered Interrupt Register */ + __IOM uint32_t RFSR; /*!< Offset: 0x204 (R/W) RAS Fault Status Register */ + uint32_t RESERVED4[14U]; + __IM uint32_t MVFR0; /*!< Offset: 0x240 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x244 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x248 (R/ ) Media and VFP Feature Register 2 */ + uint32_t RESERVED5[1U]; + __OM uint32_t ICIALLU; /*!< Offset: 0x250 ( /W) I-Cache Invalidate All to PoU */ + uint32_t RESERVED6[1U]; + __OM uint32_t ICIMVAU; /*!< Offset: 0x258 ( /W) I-Cache Invalidate by MVA to PoU */ + __OM uint32_t DCIMVAC; /*!< Offset: 0x25C ( /W) D-Cache Invalidate by MVA to PoC */ + __OM uint32_t DCISW; /*!< Offset: 0x260 ( /W) D-Cache Invalidate by Set-way */ + __OM uint32_t DCCMVAU; /*!< Offset: 0x264 ( /W) D-Cache Clean by MVA to PoU */ + __OM uint32_t DCCMVAC; /*!< Offset: 0x268 ( /W) D-Cache Clean by MVA to PoC */ + __OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */ + __OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */ + __OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */ + __OM uint32_t BPIALL; /*!< Offset: 0x278 ( /W) Branch Predictor Invalidate All */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_PENDNMISET_Pos 31U /*!< SCB ICSR: PENDNMISET Position */ +#define SCB_ICSR_PENDNMISET_Msk (1UL << SCB_ICSR_PENDNMISET_Pos) /*!< SCB ICSR: PENDNMISET Mask */ + +#define SCB_ICSR_NMIPENDSET_Pos SCB_ICSR_PENDNMISET_Pos /*!< SCB ICSR: NMIPENDSET Position, backward compatibility */ +#define SCB_ICSR_NMIPENDSET_Msk SCB_ICSR_PENDNMISET_Msk /*!< SCB ICSR: NMIPENDSET Mask, backward compatibility */ + +#define SCB_ICSR_PENDNMICLR_Pos 30U /*!< SCB ICSR: PENDNMICLR Position */ +#define SCB_ICSR_PENDNMICLR_Msk (1UL << SCB_ICSR_PENDNMICLR_Pos) /*!< SCB ICSR: PENDNMICLR Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_STTNS_Pos 24U /*!< SCB ICSR: STTNS Position (Security Extension) */ +#define SCB_ICSR_STTNS_Msk (1UL << SCB_ICSR_STTNS_Pos) /*!< SCB ICSR: STTNS Mask (Security Extension) */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIS_Pos 14U /*!< SCB AIRCR: PRIS Position */ +#define SCB_AIRCR_PRIS_Msk (1UL << SCB_AIRCR_PRIS_Pos) /*!< SCB AIRCR: PRIS Mask */ + +#define SCB_AIRCR_BFHFNMINS_Pos 13U /*!< SCB AIRCR: BFHFNMINS Position */ +#define SCB_AIRCR_BFHFNMINS_Msk (1UL << SCB_AIRCR_BFHFNMINS_Pos) /*!< SCB AIRCR: BFHFNMINS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_IESB_Pos 5U /*!< SCB AIRCR: Implicit ESB Enable Position */ +#define SCB_AIRCR_IESB_Msk (1UL << SCB_AIRCR_IESB_Pos) /*!< SCB AIRCR: Implicit ESB Enable Mask */ + +#define SCB_AIRCR_DIT_Pos 4U /*!< SCB AIRCR: Data Independent Timing Position */ +#define SCB_AIRCR_DIT_Msk (1UL << SCB_AIRCR_DIT_Pos) /*!< SCB AIRCR: Data Independent Timing Mask */ + +#define SCB_AIRCR_SYSRESETREQS_Pos 3U /*!< SCB AIRCR: SYSRESETREQS Position */ +#define SCB_AIRCR_SYSRESETREQS_Msk (1UL << SCB_AIRCR_SYSRESETREQS_Pos) /*!< SCB AIRCR: SYSRESETREQS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEPS_Pos 3U /*!< SCB SCR: SLEEPDEEPS Position */ +#define SCB_SCR_SLEEPDEEPS_Msk (1UL << SCB_SCR_SLEEPDEEPS_Pos) /*!< SCB SCR: SLEEPDEEPS Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_TRD_Pos 20U /*!< SCB CCR: TRD Position */ +#define SCB_CCR_TRD_Msk (1UL << SCB_CCR_TRD_Pos) /*!< SCB CCR: TRD Mask */ + +#define SCB_CCR_LOB_Pos 19U /*!< SCB CCR: LOB Position */ +#define SCB_CCR_LOB_Msk (1UL << SCB_CCR_LOB_Pos) /*!< SCB CCR: LOB Mask */ + +#define SCB_CCR_BP_Pos 18U /*!< SCB CCR: BP Position */ +#define SCB_CCR_BP_Msk (1UL << SCB_CCR_BP_Pos) /*!< SCB CCR: BP Mask */ + +#define SCB_CCR_IC_Pos 17U /*!< SCB CCR: IC Position */ +#define SCB_CCR_IC_Msk (1UL << SCB_CCR_IC_Pos) /*!< SCB CCR: IC Mask */ + +#define SCB_CCR_DC_Pos 16U /*!< SCB CCR: DC Position */ +#define SCB_CCR_DC_Msk (1UL << SCB_CCR_DC_Pos) /*!< SCB CCR: DC Mask */ + +#define SCB_CCR_STKOFHFNMIGN_Pos 10U /*!< SCB CCR: STKOFHFNMIGN Position */ +#define SCB_CCR_STKOFHFNMIGN_Msk (1UL << SCB_CCR_STKOFHFNMIGN_Pos) /*!< SCB CCR: STKOFHFNMIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_HARDFAULTPENDED_Pos 21U /*!< SCB SHCSR: HARDFAULTPENDED Position */ +#define SCB_SHCSR_HARDFAULTPENDED_Msk (1UL << SCB_SHCSR_HARDFAULTPENDED_Pos) /*!< SCB SHCSR: HARDFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTPENDED_Pos 20U /*!< SCB SHCSR: SECUREFAULTPENDED Position */ +#define SCB_SHCSR_SECUREFAULTPENDED_Msk (1UL << SCB_SHCSR_SECUREFAULTPENDED_Pos) /*!< SCB SHCSR: SECUREFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTENA_Pos 19U /*!< SCB SHCSR: SECUREFAULTENA Position */ +#define SCB_SHCSR_SECUREFAULTENA_Msk (1UL << SCB_SHCSR_SECUREFAULTENA_Pos) /*!< SCB SHCSR: SECUREFAULTENA Mask */ + +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_NMIACT_Pos 5U /*!< SCB SHCSR: NMIACT Position */ +#define SCB_SHCSR_NMIACT_Msk (1UL << SCB_SHCSR_NMIACT_Pos) /*!< SCB SHCSR: NMIACT Mask */ + +#define SCB_SHCSR_SECUREFAULTACT_Pos 4U /*!< SCB SHCSR: SECUREFAULTACT Position */ +#define SCB_SHCSR_SECUREFAULTACT_Msk (1UL << SCB_SHCSR_SECUREFAULTACT_Pos) /*!< SCB SHCSR: SECUREFAULTACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_HARDFAULTACT_Pos 2U /*!< SCB SHCSR: HARDFAULTACT Position */ +#define SCB_SHCSR_HARDFAULTACT_Msk (1UL << SCB_SHCSR_HARDFAULTACT_Pos) /*!< SCB SHCSR: HARDFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/* SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/* BusFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_LSPERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 5U) /*!< SCB CFSR (BFSR): LSPERR Position */ +#define SCB_CFSR_LSPERR_Msk (1UL << SCB_CFSR_LSPERR_Pos) /*!< SCB CFSR (BFSR): LSPERR Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/* UsageFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_STKOF_Pos (SCB_CFSR_USGFAULTSR_Pos + 4U) /*!< SCB CFSR (UFSR): STKOF Position */ +#define SCB_CFSR_STKOF_Msk (1UL << SCB_CFSR_STKOF_Pos) /*!< SCB CFSR (UFSR): STKOF Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/* SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/* SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_PMU_Pos 5U /*!< SCB DFSR: PMU Position */ +#define SCB_DFSR_PMU_Msk (1UL << SCB_DFSR_PMU_Pos) /*!< SCB DFSR: PMU Mask */ + +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/* SCB Non-Secure Access Control Register Definitions */ +#define SCB_NSACR_CP11_Pos 11U /*!< SCB NSACR: CP11 Position */ +#define SCB_NSACR_CP11_Msk (1UL << SCB_NSACR_CP11_Pos) /*!< SCB NSACR: CP11 Mask */ + +#define SCB_NSACR_CP10_Pos 10U /*!< SCB NSACR: CP10 Position */ +#define SCB_NSACR_CP10_Msk (1UL << SCB_NSACR_CP10_Pos) /*!< SCB NSACR: CP10 Mask */ + +#define SCB_NSACR_CP7_Pos 7U /*!< SCB NSACR: CP7 Position */ +#define SCB_NSACR_CP7_Msk (1UL << SCB_NSACR_CP7_Pos) /*!< SCB NSACR: CP7 Mask */ + +#define SCB_NSACR_CP6_Pos 6U /*!< SCB NSACR: CP6 Position */ +#define SCB_NSACR_CP6_Msk (1UL << SCB_NSACR_CP6_Pos) /*!< SCB NSACR: CP6 Mask */ + +#define SCB_NSACR_CP5_Pos 5U /*!< SCB NSACR: CP5 Position */ +#define SCB_NSACR_CP5_Msk (1UL << SCB_NSACR_CP5_Pos) /*!< SCB NSACR: CP5 Mask */ + +#define SCB_NSACR_CP4_Pos 4U /*!< SCB NSACR: CP4 Position */ +#define SCB_NSACR_CP4_Msk (1UL << SCB_NSACR_CP4_Pos) /*!< SCB NSACR: CP4 Mask */ + +#define SCB_NSACR_CP3_Pos 3U /*!< SCB NSACR: CP3 Position */ +#define SCB_NSACR_CP3_Msk (1UL << SCB_NSACR_CP3_Pos) /*!< SCB NSACR: CP3 Mask */ + +#define SCB_NSACR_CP2_Pos 2U /*!< SCB NSACR: CP2 Position */ +#define SCB_NSACR_CP2_Msk (1UL << SCB_NSACR_CP2_Pos) /*!< SCB NSACR: CP2 Mask */ + +#define SCB_NSACR_CP1_Pos 1U /*!< SCB NSACR: CP1 Position */ +#define SCB_NSACR_CP1_Msk (1UL << SCB_NSACR_CP1_Pos) /*!< SCB NSACR: CP1 Mask */ + +#define SCB_NSACR_CP0_Pos 0U /*!< SCB NSACR: CP0 Position */ +#define SCB_NSACR_CP0_Msk (1UL /*<< SCB_NSACR_CP0_Pos*/) /*!< SCB NSACR: CP0 Mask */ + +/* SCB Debug Feature Register 0 Definitions */ +#define SCB_ID_DFR_UDE_Pos 28U /*!< SCB ID_DFR: UDE Position */ +#define SCB_ID_DFR_UDE_Msk (0xFUL << SCB_ID_DFR_UDE_Pos) /*!< SCB ID_DFR: UDE Mask */ + +#define SCB_ID_DFR_MProfDbg_Pos 20U /*!< SCB ID_DFR: MProfDbg Position */ +#define SCB_ID_DFR_MProfDbg_Msk (0xFUL << SCB_ID_DFR_MProfDbg_Pos) /*!< SCB ID_DFR: MProfDbg Mask */ + +/* SCB Cache Level ID Register Definitions */ +#define SCB_CLIDR_LOUU_Pos 27U /*!< SCB CLIDR: LoUU Position */ +#define SCB_CLIDR_LOUU_Msk (7UL << SCB_CLIDR_LOUU_Pos) /*!< SCB CLIDR: LoUU Mask */ + +#define SCB_CLIDR_LOC_Pos 24U /*!< SCB CLIDR: LoC Position */ +#define SCB_CLIDR_LOC_Msk (7UL << SCB_CLIDR_LOC_Pos) /*!< SCB CLIDR: LoC Mask */ + +/* SCB Cache Type Register Definitions */ +#define SCB_CTR_FORMAT_Pos 29U /*!< SCB CTR: Format Position */ +#define SCB_CTR_FORMAT_Msk (7UL << SCB_CTR_FORMAT_Pos) /*!< SCB CTR: Format Mask */ + +#define SCB_CTR_CWG_Pos 24U /*!< SCB CTR: CWG Position */ +#define SCB_CTR_CWG_Msk (0xFUL << SCB_CTR_CWG_Pos) /*!< SCB CTR: CWG Mask */ + +#define SCB_CTR_ERG_Pos 20U /*!< SCB CTR: ERG Position */ +#define SCB_CTR_ERG_Msk (0xFUL << SCB_CTR_ERG_Pos) /*!< SCB CTR: ERG Mask */ + +#define SCB_CTR_DMINLINE_Pos 16U /*!< SCB CTR: DminLine Position */ +#define SCB_CTR_DMINLINE_Msk (0xFUL << SCB_CTR_DMINLINE_Pos) /*!< SCB CTR: DminLine Mask */ + +#define SCB_CTR_IMINLINE_Pos 0U /*!< SCB CTR: ImInLine Position */ +#define SCB_CTR_IMINLINE_Msk (0xFUL /*<< SCB_CTR_IMINLINE_Pos*/) /*!< SCB CTR: ImInLine Mask */ + +/* SCB Cache Size ID Register Definitions */ +#define SCB_CCSIDR_WT_Pos 31U /*!< SCB CCSIDR: WT Position */ +#define SCB_CCSIDR_WT_Msk (1UL << SCB_CCSIDR_WT_Pos) /*!< SCB CCSIDR: WT Mask */ + +#define SCB_CCSIDR_WB_Pos 30U /*!< SCB CCSIDR: WB Position */ +#define SCB_CCSIDR_WB_Msk (1UL << SCB_CCSIDR_WB_Pos) /*!< SCB CCSIDR: WB Mask */ + +#define SCB_CCSIDR_RA_Pos 29U /*!< SCB CCSIDR: RA Position */ +#define SCB_CCSIDR_RA_Msk (1UL << SCB_CCSIDR_RA_Pos) /*!< SCB CCSIDR: RA Mask */ + +#define SCB_CCSIDR_WA_Pos 28U /*!< SCB CCSIDR: WA Position */ +#define SCB_CCSIDR_WA_Msk (1UL << SCB_CCSIDR_WA_Pos) /*!< SCB CCSIDR: WA Mask */ + +#define SCB_CCSIDR_NUMSETS_Pos 13U /*!< SCB CCSIDR: NumSets Position */ +#define SCB_CCSIDR_NUMSETS_Msk (0x7FFFUL << SCB_CCSIDR_NUMSETS_Pos) /*!< SCB CCSIDR: NumSets Mask */ + +#define SCB_CCSIDR_ASSOCIATIVITY_Pos 3U /*!< SCB CCSIDR: Associativity Position */ +#define SCB_CCSIDR_ASSOCIATIVITY_Msk (0x3FFUL << SCB_CCSIDR_ASSOCIATIVITY_Pos) /*!< SCB CCSIDR: Associativity Mask */ + +#define SCB_CCSIDR_LINESIZE_Pos 0U /*!< SCB CCSIDR: LineSize Position */ +#define SCB_CCSIDR_LINESIZE_Msk (7UL /*<< SCB_CCSIDR_LINESIZE_Pos*/) /*!< SCB CCSIDR: LineSize Mask */ + +/* SCB Cache Size Selection Register Definitions */ +#define SCB_CSSELR_LEVEL_Pos 1U /*!< SCB CSSELR: Level Position */ +#define SCB_CSSELR_LEVEL_Msk (7UL << SCB_CSSELR_LEVEL_Pos) /*!< SCB CSSELR: Level Mask */ + +#define SCB_CSSELR_IND_Pos 0U /*!< SCB CSSELR: InD Position */ +#define SCB_CSSELR_IND_Msk (1UL /*<< SCB_CSSELR_IND_Pos*/) /*!< SCB CSSELR: InD Mask */ + +/* SCB Software Triggered Interrupt Register Definitions */ +#define SCB_STIR_INTID_Pos 0U /*!< SCB STIR: INTID Position */ +#define SCB_STIR_INTID_Msk (0x1FFUL /*<< SCB_STIR_INTID_Pos*/) /*!< SCB STIR: INTID Mask */ + +/* SCB RAS Fault Status Register Definitions */ +#define SCB_RFSR_V_Pos 31U /*!< SCB RFSR: V Position */ +#define SCB_RFSR_V_Msk (1UL << SCB_RFSR_V_Pos) /*!< SCB RFSR: V Mask */ + +#define SCB_RFSR_IS_Pos 16U /*!< SCB RFSR: IS Position */ +#define SCB_RFSR_IS_Msk (0x7FFFUL << SCB_RFSR_IS_Pos) /*!< SCB RFSR: IS Mask */ + +#define SCB_RFSR_UET_Pos 0U /*!< SCB RFSR: UET Position */ +#define SCB_RFSR_UET_Msk (3UL /*<< SCB_RFSR_UET_Pos*/) /*!< SCB RFSR: UET Mask */ + +/* SCB D-Cache Invalidate by Set-way Register Definitions */ +#define SCB_DCISW_WAY_Pos 30U /*!< SCB DCISW: Way Position */ +#define SCB_DCISW_WAY_Msk (3UL << SCB_DCISW_WAY_Pos) /*!< SCB DCISW: Way Mask */ + +#define SCB_DCISW_SET_Pos 5U /*!< SCB DCISW: Set Position */ +#define SCB_DCISW_SET_Msk (0x1FFUL << SCB_DCISW_SET_Pos) /*!< SCB DCISW: Set Mask */ + +/* SCB D-Cache Clean by Set-way Register Definitions */ +#define SCB_DCCSW_WAY_Pos 30U /*!< SCB DCCSW: Way Position */ +#define SCB_DCCSW_WAY_Msk (3UL << SCB_DCCSW_WAY_Pos) /*!< SCB DCCSW: Way Mask */ + +#define SCB_DCCSW_SET_Pos 5U /*!< SCB DCCSW: Set Position */ +#define SCB_DCCSW_SET_Msk (0x1FFUL << SCB_DCCSW_SET_Pos) /*!< SCB DCCSW: Set Mask */ + +/* SCB D-Cache Clean and Invalidate by Set-way Register Definitions */ +#define SCB_DCCISW_WAY_Pos 30U /*!< SCB DCCISW: Way Position */ +#define SCB_DCCISW_WAY_Msk (3UL << SCB_DCCISW_WAY_Pos) /*!< SCB DCCISW: Way Mask */ + +#define SCB_DCCISW_SET_Pos 5U /*!< SCB DCCISW: Set Position */ +#define SCB_DCCISW_SET_Msk (0x1FFUL << SCB_DCCISW_SET_Pos) /*!< SCB DCCISW: Set Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ICB Implementation Control Block register (ICB) + \brief Type definitions for the Implementation Control Block Register + @{ + */ + +/** + \brief Structure type to access the Implementation Control Block (ICB). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ + __IOM uint32_t CPPWR; /*!< Offset: 0x00C (R/W) Coprocessor Power Control Register */ +} ICB_Type; + +/* Auxiliary Control Register Definitions */ +#define ICB_ACTLR_DISCRITAXIRUW_Pos 27U /*!< ACTLR: DISCRITAXIRUW Position */ +#define ICB_ACTLR_DISCRITAXIRUW_Msk (1UL << ICB_ACTLR_DISCRITAXIRUW_Pos) /*!< ACTLR: DISCRITAXIRUW Mask */ + +#define ICB_ACTLR_DISDI_Pos 16U /*!< ACTLR: DISDI Position */ +#define ICB_ACTLR_DISDI_Msk (3UL << ICB_ACTLR_DISDI_Pos) /*!< ACTLR: DISDI Mask */ + +#define ICB_ACTLR_DISCRITAXIRUR_Pos 15U /*!< ACTLR: DISCRITAXIRUR Position */ +#define ICB_ACTLR_DISCRITAXIRUR_Msk (1UL << ICB_ACTLR_DISCRITAXIRUR_Pos) /*!< ACTLR: DISCRITAXIRUR Mask */ + +#define ICB_ACTLR_EVENTBUSEN_Pos 14U /*!< ACTLR: EVENTBUSEN Position */ +#define ICB_ACTLR_EVENTBUSEN_Msk (1UL << ICB_ACTLR_EVENTBUSEN_Pos) /*!< ACTLR: EVENTBUSEN Mask */ + +#define ICB_ACTLR_EVENTBUSEN_S_Pos 13U /*!< ACTLR: EVENTBUSEN_S Position */ +#define ICB_ACTLR_EVENTBUSEN_S_Msk (1UL << ICB_ACTLR_EVENTBUSEN_S_Pos) /*!< ACTLR: EVENTBUSEN_S Mask */ + +#define ICB_ACTLR_DISITMATBFLUSH_Pos 12U /*!< ACTLR: DISITMATBFLUSH Position */ +#define ICB_ACTLR_DISITMATBFLUSH_Msk (1UL << ICB_ACTLR_DISITMATBFLUSH_Pos) /*!< ACTLR: DISITMATBFLUSH Mask */ + +#define ICB_ACTLR_DISNWAMODE_Pos 11U /*!< ACTLR: DISNWAMODE Position */ +#define ICB_ACTLR_DISNWAMODE_Msk (1UL << ICB_ACTLR_DISNWAMODE_Pos) /*!< ACTLR: DISNWAMODE Mask */ + +#define ICB_ACTLR_FPEXCODIS_Pos 10U /*!< ACTLR: FPEXCODIS Position */ +#define ICB_ACTLR_FPEXCODIS_Msk (1UL << ICB_ACTLR_FPEXCODIS_Pos) /*!< ACTLR: FPEXCODIS Mask */ + +#define ICB_ACTLR_DISOLAP_Pos 7U /*!< ACTLR: DISOLAP Position */ +#define ICB_ACTLR_DISOLAP_Msk (1UL << ICB_ACTLR_DISOLAP_Pos) /*!< ACTLR: DISOLAP Mask */ + +#define ICB_ACTLR_DISOLAPS_Pos 6U /*!< ACTLR: DISOLAPS Position */ +#define ICB_ACTLR_DISOLAPS_Msk (1UL << ICB_ACTLR_DISOLAPS_Pos) /*!< ACTLR: DISOLAPS Mask */ + +#define ICB_ACTLR_DISLOBR_Pos 5U /*!< ACTLR: DISLOBR Position */ +#define ICB_ACTLR_DISLOBR_Msk (1UL << ICB_ACTLR_DISLOBR_Pos) /*!< ACTLR: DISLOBR Mask */ + +#define ICB_ACTLR_DISLO_Pos 4U /*!< ACTLR: DISLO Position */ +#define ICB_ACTLR_DISLO_Msk (1UL << ICB_ACTLR_DISLO_Pos) /*!< ACTLR: DISLO Mask */ + +#define ICB_ACTLR_DISLOLEP_Pos 3U /*!< ACTLR: DISLOLEP Position */ +#define ICB_ACTLR_DISLOLEP_Msk (1UL << ICB_ACTLR_DISLOLEP_Pos) /*!< ACTLR: DISLOLEP Mask */ + +#define ICB_ACTLR_DISFOLD_Pos 2U /*!< ACTLR: DISFOLD Position */ +#define ICB_ACTLR_DISFOLD_Msk (1UL << ICB_ACTLR_DISFOLD_Pos) /*!< ACTLR: DISFOLD Mask */ + +/* Interrupt Controller Type Register Definitions */ +#define ICB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define ICB_ICTR_INTLINESNUM_Msk (0xFUL /*<< ICB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_ICB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) ITM Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) ITM Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ + uint32_t RESERVED3[32U]; + uint32_t RESERVED4[43U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ + uint32_t RESERVED5[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) ITM Device Architecture Register */ + uint32_t RESERVED6[3U]; + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) ITM Device Type Register */ + __IM uint32_t PID4; /*!< Offset: 0xFD0 (R/ ) ITM Peripheral Identification Register #4 */ + __IM uint32_t PID5; /*!< Offset: 0xFD4 (R/ ) ITM Peripheral Identification Register #5 */ + __IM uint32_t PID6; /*!< Offset: 0xFD8 (R/ ) ITM Peripheral Identification Register #6 */ + __IM uint32_t PID7; /*!< Offset: 0xFDC (R/ ) ITM Peripheral Identification Register #7 */ + __IM uint32_t PID0; /*!< Offset: 0xFE0 (R/ ) ITM Peripheral Identification Register #0 */ + __IM uint32_t PID1; /*!< Offset: 0xFE4 (R/ ) ITM Peripheral Identification Register #1 */ + __IM uint32_t PID2; /*!< Offset: 0xFE8 (R/ ) ITM Peripheral Identification Register #2 */ + __IM uint32_t PID3; /*!< Offset: 0xFEC (R/ ) ITM Peripheral Identification Register #3 */ + __IM uint32_t CID0; /*!< Offset: 0xFF0 (R/ ) ITM Component Identification Register #0 */ + __IM uint32_t CID1; /*!< Offset: 0xFF4 (R/ ) ITM Component Identification Register #1 */ + __IM uint32_t CID2; /*!< Offset: 0xFF8 (R/ ) ITM Component Identification Register #2 */ + __IM uint32_t CID3; /*!< Offset: 0xFFC (R/ ) ITM Component Identification Register #3 */ +} ITM_Type; + +/* ITM Stimulus Port Register Definitions */ +#define ITM_STIM_DISABLED_Pos 1U /*!< ITM STIM: DISABLED Position */ +#define ITM_STIM_DISABLED_Msk (0x1UL << ITM_STIM_DISABLED_Pos) /*!< ITM STIM: DISABLED Mask */ + +#define ITM_STIM_FIFOREADY_Pos 0U /*!< ITM STIM: FIFOREADY Position */ +#define ITM_STIM_FIFOREADY_Msk (0x1UL /*<< ITM_STIM_FIFOREADY_Pos*/) /*!< ITM STIM: FIFOREADY Mask */ + +/* ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/* ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TRACEBUSID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TRACEBUSID_Msk (0x7FUL << ITM_TCR_TRACEBUSID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPRESCALE_Pos 8U /*!< ITM TCR: TSPRESCALE Position */ +#define ITM_TCR_TSPRESCALE_Msk (3UL << ITM_TCR_TSPRESCALE_Pos) /*!< ITM TCR: TSPRESCALE Mask */ + +#define ITM_TCR_STALLENA_Pos 5U /*!< ITM TCR: STALLENA Position */ +#define ITM_TCR_STALLENA_Msk (1UL << ITM_TCR_STALLENA_Pos) /*!< ITM TCR: STALLENA Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/* ITM Lock Status Register Definitions */ +#define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ +#define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ + +#define ITM_LSR_Access_Pos 1U /*!< ITM LSR: Access Position */ +#define ITM_LSR_Access_Msk (1UL << ITM_LSR_Access_Pos) /*!< ITM LSR: Access Mask */ + +#define ITM_LSR_Present_Pos 0U /*!< ITM LSR: Present Position */ +#define ITM_LSR_Present_Msk (1UL /*<< ITM_LSR_Present_Pos*/) /*!< ITM LSR: Present Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + uint32_t RESERVED3[1U]; + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED4[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + uint32_t RESERVED5[1U]; + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED6[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + uint32_t RESERVED7[1U]; + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ + uint32_t RESERVED8[1U]; + __IOM uint32_t COMP4; /*!< Offset: 0x060 (R/W) Comparator Register 4 */ + uint32_t RESERVED9[1U]; + __IOM uint32_t FUNCTION4; /*!< Offset: 0x068 (R/W) Function Register 4 */ + uint32_t RESERVED10[1U]; + __IOM uint32_t COMP5; /*!< Offset: 0x070 (R/W) Comparator Register 5 */ + uint32_t RESERVED11[1U]; + __IOM uint32_t FUNCTION5; /*!< Offset: 0x078 (R/W) Function Register 5 */ + uint32_t RESERVED12[1U]; + __IOM uint32_t COMP6; /*!< Offset: 0x080 (R/W) Comparator Register 6 */ + uint32_t RESERVED13[1U]; + __IOM uint32_t FUNCTION6; /*!< Offset: 0x088 (R/W) Function Register 6 */ + uint32_t RESERVED14[1U]; + __IOM uint32_t COMP7; /*!< Offset: 0x090 (R/W) Comparator Register 7 */ + uint32_t RESERVED15[1U]; + __IOM uint32_t FUNCTION7; /*!< Offset: 0x098 (R/W) Function Register 7 */ + uint32_t RESERVED16[1U]; + __IOM uint32_t COMP8; /*!< Offset: 0x0A0 (R/W) Comparator Register 8 */ + uint32_t RESERVED17[1U]; + __IOM uint32_t FUNCTION8; /*!< Offset: 0x0A8 (R/W) Function Register 8 */ + uint32_t RESERVED18[1U]; + __IOM uint32_t COMP9; /*!< Offset: 0x0B0 (R/W) Comparator Register 9 */ + uint32_t RESERVED19[1U]; + __IOM uint32_t FUNCTION9; /*!< Offset: 0x0B8 (R/W) Function Register 9 */ + uint32_t RESERVED20[1U]; + __IOM uint32_t COMP10; /*!< Offset: 0x0C0 (R/W) Comparator Register 10 */ + uint32_t RESERVED21[1U]; + __IOM uint32_t FUNCTION10; /*!< Offset: 0x0C8 (R/W) Function Register 10 */ + uint32_t RESERVED22[1U]; + __IOM uint32_t COMP11; /*!< Offset: 0x0D0 (R/W) Comparator Register 11 */ + uint32_t RESERVED23[1U]; + __IOM uint32_t FUNCTION11; /*!< Offset: 0x0D8 (R/W) Function Register 11 */ + uint32_t RESERVED24[1U]; + __IOM uint32_t COMP12; /*!< Offset: 0x0E0 (R/W) Comparator Register 12 */ + uint32_t RESERVED25[1U]; + __IOM uint32_t FUNCTION12; /*!< Offset: 0x0E8 (R/W) Function Register 12 */ + uint32_t RESERVED26[1U]; + __IOM uint32_t COMP13; /*!< Offset: 0x0F0 (R/W) Comparator Register 13 */ + uint32_t RESERVED27[1U]; + __IOM uint32_t FUNCTION13; /*!< Offset: 0x0F8 (R/W) Function Register 13 */ + uint32_t RESERVED28[1U]; + __IOM uint32_t COMP14; /*!< Offset: 0x100 (R/W) Comparator Register 14 */ + uint32_t RESERVED29[1U]; + __IOM uint32_t FUNCTION14; /*!< Offset: 0x108 (R/W) Function Register 14 */ + uint32_t RESERVED30[1U]; + __IOM uint32_t COMP15; /*!< Offset: 0x110 (R/W) Comparator Register 15 */ + uint32_t RESERVED31[1U]; + __IOM uint32_t FUNCTION15; /*!< Offset: 0x118 (R/W) Function Register 15 */ + uint32_t RESERVED32[934U]; + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R ) Lock Status Register */ + uint32_t RESERVED33[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) Device Architecture Register */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCDISS_Pos 23U /*!< DWT CTRL: CYCDISS Position */ +#define DWT_CTRL_CYCDISS_Msk (0x1UL << DWT_CTRL_CYCDISS_Pos) /*!< DWT CTRL: CYCDISS Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (0x1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (0x1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (0x1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (0x1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (0x1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (0x1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (0x1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (0x1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (0x1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (0x1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/* DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/* DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/* DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/* DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/* DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_ID_Pos 27U /*!< DWT FUNCTION: ID Position */ +#define DWT_FUNCTION_ID_Msk (0x1FUL << DWT_FUNCTION_ID_Pos) /*!< DWT FUNCTION: ID Mask */ + +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_ACTION_Pos 4U /*!< DWT FUNCTION: ACTION Position */ +#define DWT_FUNCTION_ACTION_Msk (0x1UL << DWT_FUNCTION_ACTION_Pos) /*!< DWT FUNCTION: ACTION Mask */ + +#define DWT_FUNCTION_MATCH_Pos 0U /*!< DWT FUNCTION: MATCH Position */ +#define DWT_FUNCTION_MATCH_Msk (0xFUL /*<< DWT_FUNCTION_MATCH_Pos*/) /*!< DWT FUNCTION: MATCH Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup MemSysCtl_Type Memory System Control Registers (IMPLEMENTATION DEFINED) + \brief Type definitions for the Memory System Control Registers (MEMSYSCTL) + @{ + */ + +/** + \brief Structure type to access the Memory System Control Registers (MEMSYSCTL). + */ +typedef struct +{ + __IOM uint32_t MSCR; /*!< Offset: 0x000 (R/W) Memory System Control Register */ + __IOM uint32_t PFCR; /*!< Offset: 0x004 (R/W) Prefetcher Control Register */ + uint32_t RESERVED1[2U]; + __IOM uint32_t ITCMCR; /*!< Offset: 0x010 (R/W) ITCM Control Register */ + __IOM uint32_t DTCMCR; /*!< Offset: 0x014 (R/W) DTCM Control Register */ + __IOM uint32_t PAHBCR; /*!< Offset: 0x018 (R/W) P-AHB Control Register */ + uint32_t RESERVED2[313U]; + __IOM uint32_t ITGU_CTRL; /*!< Offset: 0x500 (R/W) ITGU Control Register */ + __IOM uint32_t ITGU_CFG; /*!< Offset: 0x504 (R/W) ITGU Configuration Register */ + uint32_t RESERVED3[2U]; + __IOM uint32_t ITGU_LUT[16U]; /*!< Offset: 0x510 (R/W) ITGU Look Up Table Register */ + uint32_t RESERVED4[44U]; + __IOM uint32_t DTGU_CTRL; /*!< Offset: 0x600 (R/W) DTGU Control Registers */ + __IOM uint32_t DTGU_CFG; /*!< Offset: 0x604 (R/W) DTGU Configuration Register */ + uint32_t RESERVED5[2U]; + __IOM uint32_t DTGU_LUT[16U]; /*!< Offset: 0x610 (R/W) DTGU Look Up Table Register */ +} MemSysCtl_Type; + +/* MEMSYSCTL Memory System Control Register (MSCR) Register Definitions */ +#define MEMSYSCTL_MSCR_CPWRDN_Pos 17U /*!< MEMSYSCTL MSCR: CPWRDN Position */ +#define MEMSYSCTL_MSCR_CPWRDN_Msk (0x1UL << MEMSYSCTL_MSCR_CPWRDN_Pos) /*!< MEMSYSCTL MSCR: CPWRDN Mask */ + +#define MEMSYSCTL_MSCR_DCCLEAN_Pos 16U /*!< MEMSYSCTL MSCR: DCCLEAN Position */ +#define MEMSYSCTL_MSCR_DCCLEAN_Msk (0x1UL << MEMSYSCTL_MSCR_DCCLEAN_Pos) /*!< MEMSYSCTL MSCR: DCCLEAN Mask */ + +#define MEMSYSCTL_MSCR_ICACTIVE_Pos 13U /*!< MEMSYSCTL MSCR: ICACTIVE Position */ +#define MEMSYSCTL_MSCR_ICACTIVE_Msk (0x1UL << MEMSYSCTL_MSCR_ICACTIVE_Pos) /*!< MEMSYSCTL MSCR: ICACTIVE Mask */ + +#define MEMSYSCTL_MSCR_DCACTIVE_Pos 12U /*!< MEMSYSCTL MSCR: DCACTIVE Position */ +#define MEMSYSCTL_MSCR_DCACTIVE_Msk (0x1UL << MEMSYSCTL_MSCR_DCACTIVE_Pos) /*!< MEMSYSCTL MSCR: DCACTIVE Mask */ + +#define MEMSYSCTL_MSCR_TECCCHKDIS_Pos 4U /*!< MEMSYSCTL MSCR: TECCCHKDIS Position */ +#define MEMSYSCTL_MSCR_TECCCHKDIS_Msk (0x1UL << MEMSYSCTL_MSCR_TECCCHKDIS_Pos) /*!< MEMSYSCTL MSCR: TECCCHKDIS Mask */ + +#define MEMSYSCTL_MSCR_EVECCFAULT_Pos 3U /*!< MEMSYSCTL MSCR: EVECCFAULT Position */ +#define MEMSYSCTL_MSCR_EVECCFAULT_Msk (0x1UL << MEMSYSCTL_MSCR_EVECCFAULT_Pos) /*!< MEMSYSCTL MSCR: EVECCFAULT Mask */ + +#define MEMSYSCTL_MSCR_FORCEWT_Pos 2U /*!< MEMSYSCTL MSCR: FORCEWT Position */ +#define MEMSYSCTL_MSCR_FORCEWT_Msk (0x1UL << MEMSYSCTL_MSCR_FORCEWT_Pos) /*!< MEMSYSCTL MSCR: FORCEWT Mask */ + +#define MEMSYSCTL_MSCR_ECCEN_Pos 1U /*!< MEMSYSCTL MSCR: ECCEN Position */ +#define MEMSYSCTL_MSCR_ECCEN_Msk (0x1UL << MEMSYSCTL_MSCR_ECCEN_Pos) /*!< MEMSYSCTL MSCR: ECCEN Mask */ + +/* MEMSYSCTL Prefetcher Control Register (PFCR) Register Definitions */ +#define MEMSYSCTL_PFCR_MAX_OS_Pos 7U /*!< MEMSYSCTL PFCR: MAX_OS Position */ +#define MEMSYSCTL_PFCR_MAX_OS_Msk (0x7UL << MEMSYSCTL_PFCR_MAX_OS_Pos) /*!< MEMSYSCTL PFCR: MAX_OS Mask */ + +#define MEMSYSCTL_PFCR_MAX_LA_Pos 4U /*!< MEMSYSCTL PFCR: MAX_LA Position */ +#define MEMSYSCTL_PFCR_MAX_LA_Msk (0x7UL << MEMSYSCTL_PFCR_MAX_LA_Pos) /*!< MEMSYSCTL PFCR: MAX_LA Mask */ + +#define MEMSYSCTL_PFCR_MIN_LA_Pos 1U /*!< MEMSYSCTL PFCR: MIN_LA Position */ +#define MEMSYSCTL_PFCR_MIN_LA_Msk (0x7UL << MEMSYSCTL_PFCR_MIN_LA_Pos) /*!< MEMSYSCTL PFCR: MIN_LA Mask */ + +#define MEMSYSCTL_PFCR_ENABLE_Pos 0U /*!< MEMSYSCTL PFCR: ENABLE Position */ +#define MEMSYSCTL_PFCR_ENABLE_Msk (0x1UL /*<< MEMSYSCTL_PFCR_ENABLE_Pos*/) /*!< MEMSYSCTL PFCR: ENABLE Mask */ + +/* MEMSYSCTL ITCM Control Register (ITCMCR) Register Definitions */ +#define MEMSYSCTL_ITCMCR_SZ_Pos 3U /*!< MEMSYSCTL ITCMCR: SZ Position */ +#define MEMSYSCTL_ITCMCR_SZ_Msk (0xFUL << MEMSYSCTL_ITCMCR_SZ_Pos) /*!< MEMSYSCTL ITCMCR: SZ Mask */ + +#define MEMSYSCTL_ITCMCR_EN_Pos 0U /*!< MEMSYSCTL ITCMCR: EN Position */ +#define MEMSYSCTL_ITCMCR_EN_Msk (0x1UL /*<< MEMSYSCTL_ITCMCR_EN_Pos*/) /*!< MEMSYSCTL ITCMCR: EN Mask */ + +/* MEMSYSCTL DTCM Control Register (DTCMCR) Register Definitions */ +#define MEMSYSCTL_DTCMCR_SZ_Pos 3U /*!< MEMSYSCTL DTCMCR: SZ Position */ +#define MEMSYSCTL_DTCMCR_SZ_Msk (0xFUL << MEMSYSCTL_DTCMCR_SZ_Pos) /*!< MEMSYSCTL DTCMCR: SZ Mask */ + +#define MEMSYSCTL_DTCMCR_EN_Pos 0U /*!< MEMSYSCTL DTCMCR: EN Position */ +#define MEMSYSCTL_DTCMCR_EN_Msk (0x1UL /*<< MEMSYSCTL_DTCMCR_EN_Pos*/) /*!< MEMSYSCTL DTCMCR: EN Mask */ + +/* MEMSYSCTL P-AHB Control Register (PAHBCR) Register Definitions */ +#define MEMSYSCTL_PAHBCR_SZ_Pos 1U /*!< MEMSYSCTL PAHBCR: SZ Position */ +#define MEMSYSCTL_PAHBCR_SZ_Msk (0x7UL << MEMSYSCTL_PAHBCR_SZ_Pos) /*!< MEMSYSCTL PAHBCR: SZ Mask */ + +#define MEMSYSCTL_PAHBCR_EN_Pos 0U /*!< MEMSYSCTL PAHBCR: EN Position */ +#define MEMSYSCTL_PAHBCR_EN_Msk (0x1UL /*<< MEMSYSCTL_PAHBCR_EN_Pos*/) /*!< MEMSYSCTL PAHBCR: EN Mask */ + +/* MEMSYSCTL ITGU Control Register (ITGU_CTRL) Register Definitions */ +#define MEMSYSCTL_ITGU_CTRL_DEREN_Pos 1U /*!< MEMSYSCTL ITGU_CTRL: DEREN Position */ +#define MEMSYSCTL_ITGU_CTRL_DEREN_Msk (0x1UL << MEMSYSCTL_ITGU_CTRL_DEREN_Pos) /*!< MEMSYSCTL ITGU_CTRL: DEREN Mask */ + +#define MEMSYSCTL_ITGU_CTRL_DBFEN_Pos 0U /*!< MEMSYSCTL ITGU_CTRL: DBFEN Position */ +#define MEMSYSCTL_ITGU_CTRL_DBFEN_Msk (0x1UL /*<< MEMSYSCTL_ITGU_CTRL_DBFEN_Pos*/) /*!< MEMSYSCTL ITGU_CTRL: DBFEN Mask */ + +/* MEMSYSCTL ITGU Configuration Register (ITGU_CFG) Register Definitions */ +#define MEMSYSCTL_ITGU_CFG_PRESENT_Pos 31U /*!< MEMSYSCTL ITGU_CFG: PRESENT Position */ +#define MEMSYSCTL_ITGU_CFG_PRESENT_Msk (0x1UL << MEMSYSCTL_ITGU_CFG_PRESENT_Pos) /*!< MEMSYSCTL ITGU_CFG: PRESENT Mask */ + +#define MEMSYSCTL_ITGU_CFG_NUMBLKS_Pos 8U /*!< MEMSYSCTL ITGU_CFG: NUMBLKS Position */ +#define MEMSYSCTL_ITGU_CFG_NUMBLKS_Msk (0xFUL << MEMSYSCTL_ITGU_CFG_NUMBLKS_Pos) /*!< MEMSYSCTL ITGU_CFG: NUMBLKS Mask */ + +#define MEMSYSCTL_ITGU_CFG_BLKSZ_Pos 0U /*!< MEMSYSCTL ITGU_CFG: BLKSZ Position */ +#define MEMSYSCTL_ITGU_CFG_BLKSZ_Msk (0xFUL /*<< MEMSYSCTL_ITGU_CFG_BLKSZ_Pos*/) /*!< MEMSYSCTL ITGU_CFG: BLKSZ Mask */ + +/* MEMSYSCTL DTGU Control Registers (DTGU_CTRL) Register Definitions */ +#define MEMSYSCTL_DTGU_CTRL_DEREN_Pos 1U /*!< MEMSYSCTL DTGU_CTRL: DEREN Position */ +#define MEMSYSCTL_DTGU_CTRL_DEREN_Msk (0x1UL << MEMSYSCTL_DTGU_CTRL_DEREN_Pos) /*!< MEMSYSCTL DTGU_CTRL: DEREN Mask */ + +#define MEMSYSCTL_DTGU_CTRL_DBFEN_Pos 0U /*!< MEMSYSCTL DTGU_CTRL: DBFEN Position */ +#define MEMSYSCTL_DTGU_CTRL_DBFEN_Msk (0x1UL /*<< MEMSYSCTL_DTGU_CTRL_DBFEN_Pos*/) /*!< MEMSYSCTL DTGU_CTRL: DBFEN Mask */ + +/* MEMSYSCTL DTGU Configuration Register (DTGU_CFG) Register Definitions */ +#define MEMSYSCTL_DTGU_CFG_PRESENT_Pos 31U /*!< MEMSYSCTL DTGU_CFG: PRESENT Position */ +#define MEMSYSCTL_DTGU_CFG_PRESENT_Msk (0x1UL << MEMSYSCTL_DTGU_CFG_PRESENT_Pos) /*!< MEMSYSCTL DTGU_CFG: PRESENT Mask */ + +#define MEMSYSCTL_DTGU_CFG_NUMBLKS_Pos 8U /*!< MEMSYSCTL DTGU_CFG: NUMBLKS Position */ +#define MEMSYSCTL_DTGU_CFG_NUMBLKS_Msk (0xFUL << MEMSYSCTL_DTGU_CFG_NUMBLKS_Pos) /*!< MEMSYSCTL DTGU_CFG: NUMBLKS Mask */ + +#define MEMSYSCTL_DTGU_CFG_BLKSZ_Pos 0U /*!< MEMSYSCTL DTGU_CFG: BLKSZ Position */ +#define MEMSYSCTL_DTGU_CFG_BLKSZ_Msk (0xFUL /*<< MEMSYSCTL_DTGU_CFG_BLKSZ_Pos*/) /*!< MEMSYSCTL DTGU_CFG: BLKSZ Mask */ + + +/*@}*/ /* end of group MemSysCtl_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup PwrModCtl_Type Power Mode Control Registers + \brief Type definitions for the Power Mode Control Registers (PWRMODCTL) + @{ + */ + +/** + \brief Structure type to access the Power Mode Control Registers (PWRMODCTL). + */ +typedef struct +{ + __IOM uint32_t CPDLPSTATE; /*!< Offset: 0x000 (R/W) Core Power Domain Low Power State Register */ + __IOM uint32_t DPDLPSTATE; /*!< Offset: 0x004 (R/W) Debug Power Domain Low Power State Register */ +} PwrModCtl_Type; + +/* PWRMODCTL Core Power Domain Low Power State (CPDLPSTATE) Register Definitions */ +#define PWRMODCTL_CPDLPSTATE_RLPSTATE_Pos 8U /*!< PWRMODCTL CPDLPSTATE: RLPSTATE Position */ +#define PWRMODCTL_CPDLPSTATE_RLPSTATE_Msk (0x3UL << PWRMODCTL_CPDLPSTATE_RLPSTATE_Pos) /*!< PWRMODCTL CPDLPSTATE: RLPSTATE Mask */ + +#define PWRMODCTL_CPDLPSTATE_ELPSTATE_Pos 4U /*!< PWRMODCTL CPDLPSTATE: ELPSTATE Position */ +#define PWRMODCTL_CPDLPSTATE_ELPSTATE_Msk (0x3UL << PWRMODCTL_CPDLPSTATE_ELPSTATE_Pos) /*!< PWRMODCTL CPDLPSTATE: ELPSTATE Mask */ + +#define PWRMODCTL_CPDLPSTATE_CLPSTATE_Pos 0U /*!< PWRMODCTL CPDLPSTATE: CLPSTATE Position */ +#define PWRMODCTL_CPDLPSTATE_CLPSTATE_Msk (0x3UL /*<< PWRMODCTL_CPDLPSTATE_CLPSTATE_Pos*/) /*!< PWRMODCTL CPDLPSTATE: CLPSTATE Mask */ + +/* PWRMODCTL Debug Power Domain Low Power State (DPDLPSTATE) Register Definitions */ +#define PWRMODCTL_DPDLPSTATE_DLPSTATE_Pos 0U /*!< PWRMODCTL DPDLPSTATE: DLPSTATE Position */ +#define PWRMODCTL_DPDLPSTATE_DLPSTATE_Msk (0x3UL /*<< PWRMODCTL_DPDLPSTATE_DLPSTATE_Pos*/) /*!< PWRMODCTL DPDLPSTATE: DLPSTATE Mask */ + +/*@}*/ /* end of group PwrModCtl_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup EWIC_Type External Wakeup Interrupt Controller Registers + \brief Type definitions for the External Wakeup Interrupt Controller Registers (EWIC) + @{ + */ + +/** + \brief Structure type to access the External Wakeup Interrupt Controller Registers (EWIC). + */ +typedef struct +{ + __OM uint32_t EVENTSPR; /*!< Offset: 0x000 ( /W) Event Set Pending Register */ + uint32_t RESERVED0[31U]; + __IM uint32_t EVENTMASKA; /*!< Offset: 0x080 (R/W) Event Mask A Register */ + __IM uint32_t EVENTMASK[15]; /*!< Offset: 0x084 (R/W) Event Mask Register */ +} EWIC_Type; + +/* EWIC External Wakeup Interrupt Controller (EVENTSPR) Register Definitions */ +#define EWIC_EVENTSPR_EDBGREQ_Pos 2U /*!< EWIC EVENTSPR: EDBGREQ Position */ +#define EWIC_EVENTSPR_EDBGREQ_Msk (0x1UL << EWIC_EVENTSPR_EDBGREQ_Pos) /*!< EWIC EVENTSPR: EDBGREQ Mask */ + +#define EWIC_EVENTSPR_NMI_Pos 1U /*!< EWIC EVENTSPR: NMI Position */ +#define EWIC_EVENTSPR_NMI_Msk (0x1UL << EWIC_EVENTSPR_NMI_Pos) /*!< EWIC EVENTSPR: NMI Mask */ + +#define EWIC_EVENTSPR_EVENT_Pos 0U /*!< EWIC EVENTSPR: EVENT Position */ +#define EWIC_EVENTSPR_EVENT_Msk (0x1UL /*<< EWIC_EVENTSPR_EVENT_Pos*/) /*!< EWIC EVENTSPR: EVENT Mask */ + +/* EWIC External Wakeup Interrupt Controller (EVENTMASKA) Register Definitions */ +#define EWIC_EVENTMASKA_EDBGREQ_Pos 2U /*!< EWIC EVENTMASKA: EDBGREQ Position */ +#define EWIC_EVENTMASKA_EDBGREQ_Msk (0x1UL << EWIC_EVENTMASKA_EDBGREQ_Pos) /*!< EWIC EVENTMASKA: EDBGREQ Mask */ + +#define EWIC_EVENTMASKA_NMI_Pos 1U /*!< EWIC EVENTMASKA: NMI Position */ +#define EWIC_EVENTMASKA_NMI_Msk (0x1UL << EWIC_EVENTMASKA_NMI_Pos) /*!< EWIC EVENTMASKA: NMI Mask */ + +#define EWIC_EVENTMASKA_EVENT_Pos 0U /*!< EWIC EVENTMASKA: EVENT Position */ +#define EWIC_EVENTMASKA_EVENT_Msk (0x1UL /*<< EWIC_EVENTMASKA_EVENT_Pos*/) /*!< EWIC EVENTMASKA: EVENT Mask */ + +/* EWIC External Wakeup Interrupt Controller (EVENTMASK) Register Definitions */ +#define EWIC_EVENTMASK_IRQ_Pos 0U /*!< EWIC EVENTMASKA: IRQ Position */ +#define EWIC_EVENTMASK_IRQ_Msk (0xFFFFFFFFUL /*<< EWIC_EVENTMASKA_IRQ_Pos*/) /*!< EWIC EVENTMASKA: IRQ Mask */ + +/*@}*/ /* end of group EWIC_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup ErrBnk_Type Error Banking Registers (IMPLEMENTATION DEFINED) + \brief Type definitions for the Error Banking Registers (ERRBNK) + @{ + */ + +/** + \brief Structure type to access the Error Banking Registers (ERRBNK). + */ +typedef struct +{ + __IOM uint32_t IEBR0; /*!< Offset: 0x000 (R/W) Instruction Cache Error Bank Register 0 */ + __IOM uint32_t IEBR1; /*!< Offset: 0x004 (R/W) Instruction Cache Error Bank Register 1 */ + uint32_t RESERVED0[2U]; + __IOM uint32_t DEBR0; /*!< Offset: 0x010 (R/W) Data Cache Error Bank Register 0 */ + __IOM uint32_t DEBR1; /*!< Offset: 0x014 (R/W) Data Cache Error Bank Register 1 */ + uint32_t RESERVED1[2U]; + __IOM uint32_t TEBR0; /*!< Offset: 0x020 (R/W) TCM Error Bank Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t TEBR1; /*!< Offset: 0x028 (R/W) TCM Error Bank Register 1 */ +} ErrBnk_Type; + +/* ERRBNK Instruction Cache Error Bank Register 0 (IEBR0) Register Definitions */ +#define ERRBNK_IEBR0_SWDEF_Pos 30U /*!< ERRBNK IEBR0: SWDEF Position */ +#define ERRBNK_IEBR0_SWDEF_Msk (0x3UL << ERRBNK_IEBR0_SWDEF_Pos) /*!< ERRBNK IEBR0: SWDEF Mask */ + +#define ERRBNK_IEBR0_BANK_Pos 16U /*!< ERRBNK IEBR0: BANK Position */ +#define ERRBNK_IEBR0_BANK_Msk (0x1UL << ERRBNK_IEBR0_BANK_Pos) /*!< ERRBNK IEBR0: BANK Mask */ + +#define ERRBNK_IEBR0_LOCATION_Pos 2U /*!< ERRBNK IEBR0: LOCATION Position */ +#define ERRBNK_IEBR0_LOCATION_Msk (0x3FFFUL << ERRBNK_IEBR0_LOCATION_Pos) /*!< ERRBNK IEBR0: LOCATION Mask */ + +#define ERRBNK_IEBR0_LOCKED_Pos 1U /*!< ERRBNK IEBR0: LOCKED Position */ +#define ERRBNK_IEBR0_LOCKED_Msk (0x1UL << ERRBNK_IEBR0_LOCKED_Pos) /*!< ERRBNK IEBR0: LOCKED Mask */ + +#define ERRBNK_IEBR0_VALID_Pos 0U /*!< ERRBNK IEBR0: VALID Position */ +#define ERRBNK_IEBR0_VALID_Msk (0x1UL << /*ERRBNK_IEBR0_VALID_Pos*/) /*!< ERRBNK IEBR0: VALID Mask */ + +/* ERRBNK Instruction Cache Error Bank Register 1 (IEBR1) Register Definitions */ +#define ERRBNK_IEBR1_SWDEF_Pos 30U /*!< ERRBNK IEBR1: SWDEF Position */ +#define ERRBNK_IEBR1_SWDEF_Msk (0x3UL << ERRBNK_IEBR1_SWDEF_Pos) /*!< ERRBNK IEBR1: SWDEF Mask */ + +#define ERRBNK_IEBR1_BANK_Pos 16U /*!< ERRBNK IEBR1: BANK Position */ +#define ERRBNK_IEBR1_BANK_Msk (0x1UL << ERRBNK_IEBR1_BANK_Pos) /*!< ERRBNK IEBR1: BANK Mask */ + +#define ERRBNK_IEBR1_LOCATION_Pos 2U /*!< ERRBNK IEBR1: LOCATION Position */ +#define ERRBNK_IEBR1_LOCATION_Msk (0x3FFFUL << ERRBNK_IEBR1_LOCATION_Pos) /*!< ERRBNK IEBR1: LOCATION Mask */ + +#define ERRBNK_IEBR1_LOCKED_Pos 1U /*!< ERRBNK IEBR1: LOCKED Position */ +#define ERRBNK_IEBR1_LOCKED_Msk (0x1UL << ERRBNK_IEBR1_LOCKED_Pos) /*!< ERRBNK IEBR1: LOCKED Mask */ + +#define ERRBNK_IEBR1_VALID_Pos 0U /*!< ERRBNK IEBR1: VALID Position */ +#define ERRBNK_IEBR1_VALID_Msk (0x1UL << /*ERRBNK_IEBR1_VALID_Pos*/) /*!< ERRBNK IEBR1: VALID Mask */ + +/* ERRBNK Data Cache Error Bank Register 0 (DEBR0) Register Definitions */ +#define ERRBNK_DEBR0_SWDEF_Pos 30U /*!< ERRBNK DEBR0: SWDEF Position */ +#define ERRBNK_DEBR0_SWDEF_Msk (0x3UL << ERRBNK_DEBR0_SWDEF_Pos) /*!< ERRBNK DEBR0: SWDEF Mask */ + +#define ERRBNK_DEBR0_TYPE_Pos 17U /*!< ERRBNK DEBR0: TYPE Position */ +#define ERRBNK_DEBR0_TYPE_Msk (0x1UL << ERRBNK_DEBR0_TYPE_Pos) /*!< ERRBNK DEBR0: TYPE Mask */ + +#define ERRBNK_DEBR0_BANK_Pos 16U /*!< ERRBNK DEBR0: BANK Position */ +#define ERRBNK_DEBR0_BANK_Msk (0x1UL << ERRBNK_DEBR0_BANK_Pos) /*!< ERRBNK DEBR0: BANK Mask */ + +#define ERRBNK_DEBR0_LOCATION_Pos 2U /*!< ERRBNK DEBR0: LOCATION Position */ +#define ERRBNK_DEBR0_LOCATION_Msk (0x3FFFUL << ERRBNK_DEBR0_LOCATION_Pos) /*!< ERRBNK DEBR0: LOCATION Mask */ + +#define ERRBNK_DEBR0_LOCKED_Pos 1U /*!< ERRBNK DEBR0: LOCKED Position */ +#define ERRBNK_DEBR0_LOCKED_Msk (0x1UL << ERRBNK_DEBR0_LOCKED_Pos) /*!< ERRBNK DEBR0: LOCKED Mask */ + +#define ERRBNK_DEBR0_VALID_Pos 0U /*!< ERRBNK DEBR0: VALID Position */ +#define ERRBNK_DEBR0_VALID_Msk (0x1UL << /*ERRBNK_DEBR0_VALID_Pos*/) /*!< ERRBNK DEBR0: VALID Mask */ + +/* ERRBNK Data Cache Error Bank Register 1 (DEBR1) Register Definitions */ +#define ERRBNK_DEBR1_SWDEF_Pos 30U /*!< ERRBNK DEBR1: SWDEF Position */ +#define ERRBNK_DEBR1_SWDEF_Msk (0x3UL << ERRBNK_DEBR1_SWDEF_Pos) /*!< ERRBNK DEBR1: SWDEF Mask */ + +#define ERRBNK_DEBR1_TYPE_Pos 17U /*!< ERRBNK DEBR1: TYPE Position */ +#define ERRBNK_DEBR1_TYPE_Msk (0x1UL << ERRBNK_DEBR1_TYPE_Pos) /*!< ERRBNK DEBR1: TYPE Mask */ + +#define ERRBNK_DEBR1_BANK_Pos 16U /*!< ERRBNK DEBR1: BANK Position */ +#define ERRBNK_DEBR1_BANK_Msk (0x1UL << ERRBNK_DEBR1_BANK_Pos) /*!< ERRBNK DEBR1: BANK Mask */ + +#define ERRBNK_DEBR1_LOCATION_Pos 2U /*!< ERRBNK DEBR1: LOCATION Position */ +#define ERRBNK_DEBR1_LOCATION_Msk (0x3FFFUL << ERRBNK_DEBR1_LOCATION_Pos) /*!< ERRBNK DEBR1: LOCATION Mask */ + +#define ERRBNK_DEBR1_LOCKED_Pos 1U /*!< ERRBNK DEBR1: LOCKED Position */ +#define ERRBNK_DEBR1_LOCKED_Msk (0x1UL << ERRBNK_DEBR1_LOCKED_Pos) /*!< ERRBNK DEBR1: LOCKED Mask */ + +#define ERRBNK_DEBR1_VALID_Pos 0U /*!< ERRBNK DEBR1: VALID Position */ +#define ERRBNK_DEBR1_VALID_Msk (0x1UL << /*ERRBNK_DEBR1_VALID_Pos*/) /*!< ERRBNK DEBR1: VALID Mask */ + +/* ERRBNK TCM Error Bank Register 0 (TEBR0) Register Definitions */ +#define ERRBNK_TEBR0_SWDEF_Pos 30U /*!< ERRBNK TEBR0: SWDEF Position */ +#define ERRBNK_TEBR0_SWDEF_Msk (0x3UL << ERRBNK_TEBR0_SWDEF_Pos) /*!< ERRBNK TEBR0: SWDEF Mask */ + +#define ERRBNK_TEBR0_POISON_Pos 28U /*!< ERRBNK TEBR0: POISON Position */ +#define ERRBNK_TEBR0_POISON_Msk (0x1UL << ERRBNK_TEBR0_POISON_Pos) /*!< ERRBNK TEBR0: POISON Mask */ + +#define ERRBNK_TEBR0_TYPE_Pos 27U /*!< ERRBNK TEBR0: TYPE Position */ +#define ERRBNK_TEBR0_TYPE_Msk (0x1UL << ERRBNK_TEBR0_TYPE_Pos) /*!< ERRBNK TEBR0: TYPE Mask */ + +#define ERRBNK_TEBR0_BANK_Pos 24U /*!< ERRBNK TEBR0: BANK Position */ +#define ERRBNK_TEBR0_BANK_Msk (0x3UL << ERRBNK_TEBR0_BANK_Pos) /*!< ERRBNK TEBR0: BANK Mask */ + +#define ERRBNK_TEBR0_LOCATION_Pos 2U /*!< ERRBNK TEBR0: LOCATION Position */ +#define ERRBNK_TEBR0_LOCATION_Msk (0x3FFFFFUL << ERRBNK_TEBR0_LOCATION_Pos) /*!< ERRBNK TEBR0: LOCATION Mask */ + +#define ERRBNK_TEBR0_LOCKED_Pos 1U /*!< ERRBNK TEBR0: LOCKED Position */ +#define ERRBNK_TEBR0_LOCKED_Msk (0x1UL << ERRBNK_TEBR0_LOCKED_Pos) /*!< ERRBNK TEBR0: LOCKED Mask */ + +#define ERRBNK_TEBR0_VALID_Pos 0U /*!< ERRBNK TEBR0: VALID Position */ +#define ERRBNK_TEBR0_VALID_Msk (0x1UL << /*ERRBNK_TEBR0_VALID_Pos*/) /*!< ERRBNK TEBR0: VALID Mask */ + +/* ERRBNK TCM Error Bank Register 1 (TEBR1) Register Definitions */ +#define ERRBNK_TEBR1_SWDEF_Pos 30U /*!< ERRBNK TEBR1: SWDEF Position */ +#define ERRBNK_TEBR1_SWDEF_Msk (0x3UL << ERRBNK_TEBR1_SWDEF_Pos) /*!< ERRBNK TEBR1: SWDEF Mask */ + +#define ERRBNK_TEBR1_POISON_Pos 28U /*!< ERRBNK TEBR1: POISON Position */ +#define ERRBNK_TEBR1_POISON_Msk (0x1UL << ERRBNK_TEBR1_POISON_Pos) /*!< ERRBNK TEBR1: POISON Mask */ + +#define ERRBNK_TEBR1_TYPE_Pos 27U /*!< ERRBNK TEBR1: TYPE Position */ +#define ERRBNK_TEBR1_TYPE_Msk (0x1UL << ERRBNK_TEBR1_TYPE_Pos) /*!< ERRBNK TEBR1: TYPE Mask */ + +#define ERRBNK_TEBR1_BANK_Pos 24U /*!< ERRBNK TEBR1: BANK Position */ +#define ERRBNK_TEBR1_BANK_Msk (0x3UL << ERRBNK_TEBR1_BANK_Pos) /*!< ERRBNK TEBR1: BANK Mask */ + +#define ERRBNK_TEBR1_LOCATION_Pos 2U /*!< ERRBNK TEBR1: LOCATION Position */ +#define ERRBNK_TEBR1_LOCATION_Msk (0x3FFFFFUL << ERRBNK_TEBR1_LOCATION_Pos) /*!< ERRBNK TEBR1: LOCATION Mask */ + +#define ERRBNK_TEBR1_LOCKED_Pos 1U /*!< ERRBNK TEBR1: LOCKED Position */ +#define ERRBNK_TEBR1_LOCKED_Msk (0x1UL << ERRBNK_TEBR1_LOCKED_Pos) /*!< ERRBNK TEBR1: LOCKED Mask */ + +#define ERRBNK_TEBR1_VALID_Pos 0U /*!< ERRBNK TEBR1: VALID Position */ +#define ERRBNK_TEBR1_VALID_Msk (0x1UL << /*ERRBNK_TEBR1_VALID_Pos*/) /*!< ERRBNK TEBR1: VALID Mask */ + +/*@}*/ /* end of group ErrBnk_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup PrcCfgInf_Type Processor Configuration Information Registers (IMPLEMENTATION DEFINED) + \brief Type definitions for the Processor Configuration Information Registerss (PRCCFGINF) + @{ + */ + +/** + \brief Structure type to access the Processor Configuration Information Registerss (PRCCFGINF). + */ +typedef struct +{ + __OM uint32_t CFGINFOSEL; /*!< Offset: 0x000 ( /W) Processor Configuration Information Selection Register */ + __IM uint32_t CFGINFORD; /*!< Offset: 0x004 (R/ ) Processor Configuration Information Read Data Register */ +} PrcCfgInf_Type; + +/* PRCCFGINF Processor Configuration Information Selection Register (CFGINFOSEL) Definitions */ + +/* PRCCFGINF Processor Configuration Information Read Data Register (CFGINFORD) Definitions */ + +/*@}*/ /* end of group PrcCfgInf_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup STL_Type Software Test Library Observation Registers + \brief Type definitions for the Software Test Library Observation Registerss (STL) + @{ + */ + +/** + \brief Structure type to access the Software Test Library Observation Registerss (STL). + */ +typedef struct +{ + __IM uint32_t STLNVICPENDOR; /*!< Offset: 0x000 (R/ ) NVIC Pending Priority Tree Register */ + __IM uint32_t STLNVICACTVOR; /*!< Offset: 0x004 (R/ ) NVIC Active Priority Tree Register */ + uint32_t RESERVED0[2U]; + __OM uint32_t STLIDMPUSR; /*!< Offset: 0x010 ( /W) MPU Sanple Register */ + __IM uint32_t STLIMPUOR; /*!< Offset: 0x014 (R/ ) MPU Region Hit Register */ + __IM uint32_t STLD0MPUOR; /*!< Offset: 0x018 (R/ ) MPU Memory Attributes Register 0 */ + __IM uint32_t STLD1MPUOR; /*!< Offset: 0x01C (R/ ) MPU Memory Attributes Register 1 */ + +} STL_Type; + +/* STL Software Test Library Observation Register (STLNVICPENDOR) Definitions */ +#define STL_STLNVICPENDOR_VALID_Pos 18U /*!< STL STLNVICPENDOR: VALID Position */ +#define STL_STLNVICPENDOR_VALID_Msk (0x1UL << STL_STLNVICPENDOR_VALID_Pos) /*!< STL STLNVICPENDOR: VALID Mask */ + +#define STL_STLNVICPENDOR_TARGET_Pos 17U /*!< STL STLNVICPENDOR: TARGET Position */ +#define STL_STLNVICPENDOR_TARGET_Msk (0x1UL << STL_STLNVICPENDOR_TARGET_Pos) /*!< STL STLNVICPENDOR: TARGET Mask */ + +#define STL_STLNVICPENDOR_PRIORITY_Pos 9U /*!< STL STLNVICPENDOR: PRIORITY Position */ +#define STL_STLNVICPENDOR_PRIORITY_Msk (0xFFUL << STL_STLNVICPENDOR_PRIORITY_Pos) /*!< STL STLNVICPENDOR: PRIORITY Mask */ + +#define STL_STLNVICPENDOR_INTNUM_Pos 0U /*!< STL STLNVICPENDOR: INTNUM Position */ +#define STL_STLNVICPENDOR_INTNUM_Msk (0x1FFUL /*<< STL_STLNVICPENDOR_INTNUM_Pos*/) /*!< STL STLNVICPENDOR: INTNUM Mask */ + +/* STL Software Test Library Observation Register (STLNVICACTVOR) Definitions */ +#define STL_STLNVICACTVOR_VALID_Pos 18U /*!< STL STLNVICACTVOR: VALID Position */ +#define STL_STLNVICACTVOR_VALID_Msk (0x1UL << STL_STLNVICACTVOR_VALID_Pos) /*!< STL STLNVICACTVOR: VALID Mask */ + +#define STL_STLNVICACTVOR_TARGET_Pos 17U /*!< STL STLNVICACTVOR: TARGET Position */ +#define STL_STLNVICACTVOR_TARGET_Msk (0x1UL << STL_STLNVICACTVOR_TARGET_Pos) /*!< STL STLNVICACTVOR: TARGET Mask */ + +#define STL_STLNVICACTVOR_PRIORITY_Pos 9U /*!< STL STLNVICACTVOR: PRIORITY Position */ +#define STL_STLNVICACTVOR_PRIORITY_Msk (0xFFUL << STL_STLNVICACTVOR_PRIORITY_Pos) /*!< STL STLNVICACTVOR: PRIORITY Mask */ + +#define STL_STLNVICACTVOR_INTNUM_Pos 0U /*!< STL STLNVICACTVOR: INTNUM Position */ +#define STL_STLNVICACTVOR_INTNUM_Msk (0x1FFUL /*<< STL_STLNVICACTVOR_INTNUM_Pos*/) /*!< STL STLNVICACTVOR: INTNUM Mask */ + +/* STL Software Test Library Observation Register (STLIDMPUSR) Definitions */ +#define STL_STLIDMPUSR_ADDR_Pos 5U /*!< STL STLIDMPUSR: ADDR Position */ +#define STL_STLIDMPUSR_ADDR_Msk (0x7FFFFFFUL << STL_STLIDMPUSR_ADDR_Pos) /*!< STL STLIDMPUSR: ADDR Mask */ + +#define STL_STLIDMPUSR_INSTR_Pos 2U /*!< STL STLIDMPUSR: INSTR Position */ +#define STL_STLIDMPUSR_INSTR_Msk (0x1UL << STL_STLIDMPUSR_INSTR_Pos) /*!< STL STLIDMPUSR: INSTR Mask */ + +#define STL_STLIDMPUSR_DATA_Pos 1U /*!< STL STLIDMPUSR: DATA Position */ +#define STL_STLIDMPUSR_DATA_Msk (0x1UL << STL_STLIDMPUSR_DATA_Pos) /*!< STL STLIDMPUSR: DATA Mask */ + +/* STL Software Test Library Observation Register (STLIMPUOR) Definitions */ +#define STL_STLIMPUOR_HITREGION_Pos 9U /*!< STL STLIMPUOR: HITREGION Position */ +#define STL_STLIMPUOR_HITREGION_Msk (0xFFUL << STL_STLIMPUOR_HITREGION_Pos) /*!< STL STLIMPUOR: HITREGION Mask */ + +#define STL_STLIMPUOR_ATTR_Pos 0U /*!< STL STLIMPUOR: ATTR Position */ +#define STL_STLIMPUOR_ATTR_Msk (0x1FFUL /*<< STL_STLIMPUOR_ATTR_Pos*/) /*!< STL STLIMPUOR: ATTR Mask */ + +/* STL Software Test Library Observation Register (STLD0MPUOR) Definitions */ +#define STL_STLD0MPUOR_HITREGION_Pos 9U /*!< STL STLD0MPUOR: HITREGION Position */ +#define STL_STLD0MPUOR_HITREGION_Msk (0xFFUL << STL_STLD0MPUOR_HITREGION_Pos) /*!< STL STLD0MPUOR: HITREGION Mask */ + +#define STL_STLD0MPUOR_ATTR_Pos 0U /*!< STL STLD0MPUOR: ATTR Position */ +#define STL_STLD0MPUOR_ATTR_Msk (0x1FFUL /*<< STL_STLD0MPUOR_ATTR_Pos*/) /*!< STL STLD0MPUOR: ATTR Mask */ + +/* STL Software Test Library Observation Register (STLD1MPUOR) Definitions */ +#define STL_STLD1MPUOR_HITREGION_Pos 9U /*!< STL STLD1MPUOR: HITREGION Position */ +#define STL_STLD1MPUOR_HITREGION_Msk (0xFFUL << STL_STLD1MPUOR_HITREGION_Pos) /*!< STL STLD1MPUOR: HITREGION Mask */ + +#define STL_STLD1MPUOR_ATTR_Pos 0U /*!< STL STLD1MPUOR: ATTR Position */ +#define STL_STLD1MPUOR_ATTR_Msk (0x1FFUL /*<< STL_STLD1MPUOR_ATTR_Pos*/) /*!< STL STLD1MPUOR: ATTR Mask */ + +/*@}*/ /* end of group STL_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Sizes Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Sizes Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IOM uint32_t PSCR; /*!< Offset: 0x308 (R/W) Periodic Synchronization Control Register */ + uint32_t RESERVED3[809U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) Software Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) Software Lock Status Register */ + uint32_t RESERVED4[4U]; + __IM uint32_t TYPE; /*!< Offset: 0xFC8 (R/ ) Device Identifier Register */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Register */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_SWOSCALER_Pos 0U /*!< TPI ACPR: SWOSCALER Position */ +#define TPI_ACPR_SWOSCALER_Msk (0xFFFFUL /*<< TPI_ACPR_SWOSCALER_Pos*/) /*!< TPI ACPR: SWOSCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_FOnMan_Pos 6U /*!< TPI FFCR: FOnMan Position */ +#define TPI_FFCR_FOnMan_Msk (0x1UL << TPI_FFCR_FOnMan_Pos) /*!< TPI FFCR: FOnMan Mask */ + +#define TPI_FFCR_EnFmt_Pos 0U /*!< TPI FFCR: EnFmt Position */ +#define TPI_FFCR_EnFmt_Msk (0x3UL << /*TPI_FFCR_EnFmt_Pos*/) /*!< TPI FFCR: EnFmt Mask */ + +/* TPI Periodic Synchronization Control Register Definitions */ +#define TPI_PSCR_PSCount_Pos 0U /*!< TPI PSCR: PSCount Position */ +#define TPI_PSCR_PSCount_Msk (0x1FUL /*<< TPI_PSCR_PSCount_Pos*/) /*!< TPI PSCR: TPSCount Mask */ + +/* TPI Software Lock Status Register Definitions */ +#define TPI_LSR_nTT_Pos 1U /*!< TPI LSR: Not thirty-two bit. Position */ +#define TPI_LSR_nTT_Msk (0x1UL << TPI_LSR_nTT_Pos) /*!< TPI LSR: Not thirty-two bit. Mask */ + +#define TPI_LSR_SLK_Pos 1U /*!< TPI LSR: Software Lock status Position */ +#define TPI_LSR_SLK_Msk (0x1UL << TPI_LSR_SLK_Pos) /*!< TPI LSR: Software Lock status Mask */ + +#define TPI_LSR_SLI_Pos 0U /*!< TPI LSR: Software Lock implemented Position */ +#define TPI_LSR_SLI_Msk (0x1UL /*<< TPI_LSR_SLI_Pos*/) /*!< TPI LSR: Software Lock implemented Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_FIFOSZ_Pos 6U /*!< TPI DEVID: FIFO depth Position */ +#define TPI_DEVID_FIFOSZ_Msk (0x7UL << TPI_DEVID_FIFOSZ_Pos) /*!< TPI DEVID: FIFO depth Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + +#if defined (__PMU_PRESENT) && (__PMU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_PMU Performance Monitoring Unit (PMU) + \brief Type definitions for the Performance Monitoring Unit (PMU) + @{ + */ + +/** + \brief Structure type to access the Performance Monitoring Unit (PMU). + */ +typedef struct +{ + __IOM uint32_t EVCNTR[__PMU_NUM_EVENTCNT]; /*!< Offset: 0x0 (R/W) PMU Event Counter Registers */ +#if __PMU_NUM_EVENTCNT<31 + uint32_t RESERVED0[31U-__PMU_NUM_EVENTCNT]; +#endif + __IOM uint32_t CCNTR; /*!< Offset: 0x7C (R/W) PMU Cycle Counter Register */ + uint32_t RESERVED1[224]; + __IOM uint32_t EVTYPER[__PMU_NUM_EVENTCNT]; /*!< Offset: 0x400 (R/W) PMU Event Type and Filter Registers */ +#if __PMU_NUM_EVENTCNT<31 + uint32_t RESERVED2[31U-__PMU_NUM_EVENTCNT]; +#endif + __IOM uint32_t CCFILTR; /*!< Offset: 0x47C (R/W) PMU Cycle Counter Filter Register */ + uint32_t RESERVED3[480]; + __IOM uint32_t CNTENSET; /*!< Offset: 0xC00 (R/W) PMU Count Enable Set Register */ + uint32_t RESERVED4[7]; + __IOM uint32_t CNTENCLR; /*!< Offset: 0xC20 (R/W) PMU Count Enable Clear Register */ + uint32_t RESERVED5[7]; + __IOM uint32_t INTENSET; /*!< Offset: 0xC40 (R/W) PMU Interrupt Enable Set Register */ + uint32_t RESERVED6[7]; + __IOM uint32_t INTENCLR; /*!< Offset: 0xC60 (R/W) PMU Interrupt Enable Clear Register */ + uint32_t RESERVED7[7]; + __IOM uint32_t OVSCLR; /*!< Offset: 0xC80 (R/W) PMU Overflow Flag Status Clear Register */ + uint32_t RESERVED8[7]; + __IOM uint32_t SWINC; /*!< Offset: 0xCA0 (R/W) PMU Software Increment Register */ + uint32_t RESERVED9[7]; + __IOM uint32_t OVSSET; /*!< Offset: 0xCC0 (R/W) PMU Overflow Flag Status Set Register */ + uint32_t RESERVED10[79]; + __IOM uint32_t TYPE; /*!< Offset: 0xE00 (R/W) PMU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0xE04 (R/W) PMU Control Register */ + uint32_t RESERVED11[108]; + __IOM uint32_t AUTHSTATUS; /*!< Offset: 0xFB8 (R/W) PMU Authentication Status Register */ + __IOM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/W) PMU Device Architecture Register */ + uint32_t RESERVED12[3]; + __IOM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/W) PMU Device Type Register */ + __IOM uint32_t PIDR4; /*!< Offset: 0xFD0 (R/W) PMU Peripheral Identification Register 4 */ + uint32_t RESERVED13[3]; + __IOM uint32_t PIDR0; /*!< Offset: 0xFE0 (R/W) PMU Peripheral Identification Register 0 */ + __IOM uint32_t PIDR1; /*!< Offset: 0xFE4 (R/W) PMU Peripheral Identification Register 1 */ + __IOM uint32_t PIDR2; /*!< Offset: 0xFE8 (R/W) PMU Peripheral Identification Register 2 */ + __IOM uint32_t PIDR3; /*!< Offset: 0xFEC (R/W) PMU Peripheral Identification Register 3 */ + __IOM uint32_t CIDR0; /*!< Offset: 0xFF0 (R/W) PMU Component Identification Register 0 */ + __IOM uint32_t CIDR1; /*!< Offset: 0xFF4 (R/W) PMU Component Identification Register 1 */ + __IOM uint32_t CIDR2; /*!< Offset: 0xFF8 (R/W) PMU Component Identification Register 2 */ + __IOM uint32_t CIDR3; /*!< Offset: 0xFFC (R/W) PMU Component Identification Register 3 */ +} PMU_Type; + +/** \brief PMU Event Counter Registers (0-30) Definitions */ + +#define PMU_EVCNTR_CNT_Pos 0U /*!< PMU EVCNTR: Counter Position */ +#define PMU_EVCNTR_CNT_Msk (0xFFFFUL /*<< PMU_EVCNTRx_CNT_Pos*/) /*!< PMU EVCNTR: Counter Mask */ + +/** \brief PMU Event Type and Filter Registers (0-30) Definitions */ + +#define PMU_EVTYPER_EVENTTOCNT_Pos 0U /*!< PMU EVTYPER: Event to Count Position */ +#define PMU_EVTYPER_EVENTTOCNT_Msk (0xFFFFUL /*<< EVTYPERx_EVENTTOCNT_Pos*/) /*!< PMU EVTYPER: Event to Count Mask */ + +/** \brief PMU Count Enable Set Register Definitions */ + +#define PMU_CNTENSET_CNT0_ENABLE_Pos 0U /*!< PMU CNTENSET: Event Counter 0 Enable Set Position */ +#define PMU_CNTENSET_CNT0_ENABLE_Msk (1UL /*<< PMU_CNTENSET_CNT0_ENABLE_Pos*/) /*!< PMU CNTENSET: Event Counter 0 Enable Set Mask */ + +#define PMU_CNTENSET_CNT1_ENABLE_Pos 1U /*!< PMU CNTENSET: Event Counter 1 Enable Set Position */ +#define PMU_CNTENSET_CNT1_ENABLE_Msk (1UL << PMU_CNTENSET_CNT1_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 1 Enable Set Mask */ + +#define PMU_CNTENSET_CNT2_ENABLE_Pos 2U /*!< PMU CNTENSET: Event Counter 2 Enable Set Position */ +#define PMU_CNTENSET_CNT2_ENABLE_Msk (1UL << PMU_CNTENSET_CNT2_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 2 Enable Set Mask */ + +#define PMU_CNTENSET_CNT3_ENABLE_Pos 3U /*!< PMU CNTENSET: Event Counter 3 Enable Set Position */ +#define PMU_CNTENSET_CNT3_ENABLE_Msk (1UL << PMU_CNTENSET_CNT3_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 3 Enable Set Mask */ + +#define PMU_CNTENSET_CNT4_ENABLE_Pos 4U /*!< PMU CNTENSET: Event Counter 4 Enable Set Position */ +#define PMU_CNTENSET_CNT4_ENABLE_Msk (1UL << PMU_CNTENSET_CNT4_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 4 Enable Set Mask */ + +#define PMU_CNTENSET_CNT5_ENABLE_Pos 5U /*!< PMU CNTENSET: Event Counter 5 Enable Set Position */ +#define PMU_CNTENSET_CNT5_ENABLE_Msk (1UL << PMU_CNTENSET_CNT5_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 5 Enable Set Mask */ + +#define PMU_CNTENSET_CNT6_ENABLE_Pos 6U /*!< PMU CNTENSET: Event Counter 6 Enable Set Position */ +#define PMU_CNTENSET_CNT6_ENABLE_Msk (1UL << PMU_CNTENSET_CNT6_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 6 Enable Set Mask */ + +#define PMU_CNTENSET_CNT7_ENABLE_Pos 7U /*!< PMU CNTENSET: Event Counter 7 Enable Set Position */ +#define PMU_CNTENSET_CNT7_ENABLE_Msk (1UL << PMU_CNTENSET_CNT7_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 7 Enable Set Mask */ + +#define PMU_CNTENSET_CNT8_ENABLE_Pos 8U /*!< PMU CNTENSET: Event Counter 8 Enable Set Position */ +#define PMU_CNTENSET_CNT8_ENABLE_Msk (1UL << PMU_CNTENSET_CNT8_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 8 Enable Set Mask */ + +#define PMU_CNTENSET_CNT9_ENABLE_Pos 9U /*!< PMU CNTENSET: Event Counter 9 Enable Set Position */ +#define PMU_CNTENSET_CNT9_ENABLE_Msk (1UL << PMU_CNTENSET_CNT9_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 9 Enable Set Mask */ + +#define PMU_CNTENSET_CNT10_ENABLE_Pos 10U /*!< PMU CNTENSET: Event Counter 10 Enable Set Position */ +#define PMU_CNTENSET_CNT10_ENABLE_Msk (1UL << PMU_CNTENSET_CNT10_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 10 Enable Set Mask */ + +#define PMU_CNTENSET_CNT11_ENABLE_Pos 11U /*!< PMU CNTENSET: Event Counter 11 Enable Set Position */ +#define PMU_CNTENSET_CNT11_ENABLE_Msk (1UL << PMU_CNTENSET_CNT11_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 11 Enable Set Mask */ + +#define PMU_CNTENSET_CNT12_ENABLE_Pos 12U /*!< PMU CNTENSET: Event Counter 12 Enable Set Position */ +#define PMU_CNTENSET_CNT12_ENABLE_Msk (1UL << PMU_CNTENSET_CNT12_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 12 Enable Set Mask */ + +#define PMU_CNTENSET_CNT13_ENABLE_Pos 13U /*!< PMU CNTENSET: Event Counter 13 Enable Set Position */ +#define PMU_CNTENSET_CNT13_ENABLE_Msk (1UL << PMU_CNTENSET_CNT13_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 13 Enable Set Mask */ + +#define PMU_CNTENSET_CNT14_ENABLE_Pos 14U /*!< PMU CNTENSET: Event Counter 14 Enable Set Position */ +#define PMU_CNTENSET_CNT14_ENABLE_Msk (1UL << PMU_CNTENSET_CNT14_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 14 Enable Set Mask */ + +#define PMU_CNTENSET_CNT15_ENABLE_Pos 15U /*!< PMU CNTENSET: Event Counter 15 Enable Set Position */ +#define PMU_CNTENSET_CNT15_ENABLE_Msk (1UL << PMU_CNTENSET_CNT15_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 15 Enable Set Mask */ + +#define PMU_CNTENSET_CNT16_ENABLE_Pos 16U /*!< PMU CNTENSET: Event Counter 16 Enable Set Position */ +#define PMU_CNTENSET_CNT16_ENABLE_Msk (1UL << PMU_CNTENSET_CNT16_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 16 Enable Set Mask */ + +#define PMU_CNTENSET_CNT17_ENABLE_Pos 17U /*!< PMU CNTENSET: Event Counter 17 Enable Set Position */ +#define PMU_CNTENSET_CNT17_ENABLE_Msk (1UL << PMU_CNTENSET_CNT17_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 17 Enable Set Mask */ + +#define PMU_CNTENSET_CNT18_ENABLE_Pos 18U /*!< PMU CNTENSET: Event Counter 18 Enable Set Position */ +#define PMU_CNTENSET_CNT18_ENABLE_Msk (1UL << PMU_CNTENSET_CNT18_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 18 Enable Set Mask */ + +#define PMU_CNTENSET_CNT19_ENABLE_Pos 19U /*!< PMU CNTENSET: Event Counter 19 Enable Set Position */ +#define PMU_CNTENSET_CNT19_ENABLE_Msk (1UL << PMU_CNTENSET_CNT19_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 19 Enable Set Mask */ + +#define PMU_CNTENSET_CNT20_ENABLE_Pos 20U /*!< PMU CNTENSET: Event Counter 20 Enable Set Position */ +#define PMU_CNTENSET_CNT20_ENABLE_Msk (1UL << PMU_CNTENSET_CNT20_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 20 Enable Set Mask */ + +#define PMU_CNTENSET_CNT21_ENABLE_Pos 21U /*!< PMU CNTENSET: Event Counter 21 Enable Set Position */ +#define PMU_CNTENSET_CNT21_ENABLE_Msk (1UL << PMU_CNTENSET_CNT21_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 21 Enable Set Mask */ + +#define PMU_CNTENSET_CNT22_ENABLE_Pos 22U /*!< PMU CNTENSET: Event Counter 22 Enable Set Position */ +#define PMU_CNTENSET_CNT22_ENABLE_Msk (1UL << PMU_CNTENSET_CNT22_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 22 Enable Set Mask */ + +#define PMU_CNTENSET_CNT23_ENABLE_Pos 23U /*!< PMU CNTENSET: Event Counter 23 Enable Set Position */ +#define PMU_CNTENSET_CNT23_ENABLE_Msk (1UL << PMU_CNTENSET_CNT23_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 23 Enable Set Mask */ + +#define PMU_CNTENSET_CNT24_ENABLE_Pos 24U /*!< PMU CNTENSET: Event Counter 24 Enable Set Position */ +#define PMU_CNTENSET_CNT24_ENABLE_Msk (1UL << PMU_CNTENSET_CNT24_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 24 Enable Set Mask */ + +#define PMU_CNTENSET_CNT25_ENABLE_Pos 25U /*!< PMU CNTENSET: Event Counter 25 Enable Set Position */ +#define PMU_CNTENSET_CNT25_ENABLE_Msk (1UL << PMU_CNTENSET_CNT25_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 25 Enable Set Mask */ + +#define PMU_CNTENSET_CNT26_ENABLE_Pos 26U /*!< PMU CNTENSET: Event Counter 26 Enable Set Position */ +#define PMU_CNTENSET_CNT26_ENABLE_Msk (1UL << PMU_CNTENSET_CNT26_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 26 Enable Set Mask */ + +#define PMU_CNTENSET_CNT27_ENABLE_Pos 27U /*!< PMU CNTENSET: Event Counter 27 Enable Set Position */ +#define PMU_CNTENSET_CNT27_ENABLE_Msk (1UL << PMU_CNTENSET_CNT27_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 27 Enable Set Mask */ + +#define PMU_CNTENSET_CNT28_ENABLE_Pos 28U /*!< PMU CNTENSET: Event Counter 28 Enable Set Position */ +#define PMU_CNTENSET_CNT28_ENABLE_Msk (1UL << PMU_CNTENSET_CNT28_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 28 Enable Set Mask */ + +#define PMU_CNTENSET_CNT29_ENABLE_Pos 29U /*!< PMU CNTENSET: Event Counter 29 Enable Set Position */ +#define PMU_CNTENSET_CNT29_ENABLE_Msk (1UL << PMU_CNTENSET_CNT29_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 29 Enable Set Mask */ + +#define PMU_CNTENSET_CNT30_ENABLE_Pos 30U /*!< PMU CNTENSET: Event Counter 30 Enable Set Position */ +#define PMU_CNTENSET_CNT30_ENABLE_Msk (1UL << PMU_CNTENSET_CNT30_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 30 Enable Set Mask */ + +#define PMU_CNTENSET_CCNTR_ENABLE_Pos 31U /*!< PMU CNTENSET: Cycle Counter Enable Set Position */ +#define PMU_CNTENSET_CCNTR_ENABLE_Msk (1UL << PMU_CNTENSET_CCNTR_ENABLE_Pos) /*!< PMU CNTENSET: Cycle Counter Enable Set Mask */ + +/** \brief PMU Count Enable Clear Register Definitions */ + +#define PMU_CNTENSET_CNT0_ENABLE_Pos 0U /*!< PMU CNTENCLR: Event Counter 0 Enable Clear Position */ +#define PMU_CNTENCLR_CNT0_ENABLE_Msk (1UL /*<< PMU_CNTENCLR_CNT0_ENABLE_Pos*/) /*!< PMU CNTENCLR: Event Counter 0 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT1_ENABLE_Pos 1U /*!< PMU CNTENCLR: Event Counter 1 Enable Clear Position */ +#define PMU_CNTENCLR_CNT1_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT1_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 1 Enable Clear */ + +#define PMU_CNTENCLR_CNT2_ENABLE_Pos 2U /*!< PMU CNTENCLR: Event Counter 2 Enable Clear Position */ +#define PMU_CNTENCLR_CNT2_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT2_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 2 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT3_ENABLE_Pos 3U /*!< PMU CNTENCLR: Event Counter 3 Enable Clear Position */ +#define PMU_CNTENCLR_CNT3_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT3_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 3 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT4_ENABLE_Pos 4U /*!< PMU CNTENCLR: Event Counter 4 Enable Clear Position */ +#define PMU_CNTENCLR_CNT4_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT4_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 4 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT5_ENABLE_Pos 5U /*!< PMU CNTENCLR: Event Counter 5 Enable Clear Position */ +#define PMU_CNTENCLR_CNT5_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT5_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 5 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT6_ENABLE_Pos 6U /*!< PMU CNTENCLR: Event Counter 6 Enable Clear Position */ +#define PMU_CNTENCLR_CNT6_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT6_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 6 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT7_ENABLE_Pos 7U /*!< PMU CNTENCLR: Event Counter 7 Enable Clear Position */ +#define PMU_CNTENCLR_CNT7_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT7_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 7 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT8_ENABLE_Pos 8U /*!< PMU CNTENCLR: Event Counter 8 Enable Clear Position */ +#define PMU_CNTENCLR_CNT8_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT8_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 8 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT9_ENABLE_Pos 9U /*!< PMU CNTENCLR: Event Counter 9 Enable Clear Position */ +#define PMU_CNTENCLR_CNT9_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT9_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 9 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT10_ENABLE_Pos 10U /*!< PMU CNTENCLR: Event Counter 10 Enable Clear Position */ +#define PMU_CNTENCLR_CNT10_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT10_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 10 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT11_ENABLE_Pos 11U /*!< PMU CNTENCLR: Event Counter 11 Enable Clear Position */ +#define PMU_CNTENCLR_CNT11_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT11_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 11 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT12_ENABLE_Pos 12U /*!< PMU CNTENCLR: Event Counter 12 Enable Clear Position */ +#define PMU_CNTENCLR_CNT12_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT12_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 12 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT13_ENABLE_Pos 13U /*!< PMU CNTENCLR: Event Counter 13 Enable Clear Position */ +#define PMU_CNTENCLR_CNT13_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT13_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 13 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT14_ENABLE_Pos 14U /*!< PMU CNTENCLR: Event Counter 14 Enable Clear Position */ +#define PMU_CNTENCLR_CNT14_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT14_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 14 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT15_ENABLE_Pos 15U /*!< PMU CNTENCLR: Event Counter 15 Enable Clear Position */ +#define PMU_CNTENCLR_CNT15_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT15_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 15 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT16_ENABLE_Pos 16U /*!< PMU CNTENCLR: Event Counter 16 Enable Clear Position */ +#define PMU_CNTENCLR_CNT16_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT16_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 16 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT17_ENABLE_Pos 17U /*!< PMU CNTENCLR: Event Counter 17 Enable Clear Position */ +#define PMU_CNTENCLR_CNT17_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT17_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 17 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT18_ENABLE_Pos 18U /*!< PMU CNTENCLR: Event Counter 18 Enable Clear Position */ +#define PMU_CNTENCLR_CNT18_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT18_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 18 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT19_ENABLE_Pos 19U /*!< PMU CNTENCLR: Event Counter 19 Enable Clear Position */ +#define PMU_CNTENCLR_CNT19_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT19_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 19 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT20_ENABLE_Pos 20U /*!< PMU CNTENCLR: Event Counter 20 Enable Clear Position */ +#define PMU_CNTENCLR_CNT20_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT20_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 20 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT21_ENABLE_Pos 21U /*!< PMU CNTENCLR: Event Counter 21 Enable Clear Position */ +#define PMU_CNTENCLR_CNT21_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT21_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 21 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT22_ENABLE_Pos 22U /*!< PMU CNTENCLR: Event Counter 22 Enable Clear Position */ +#define PMU_CNTENCLR_CNT22_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT22_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 22 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT23_ENABLE_Pos 23U /*!< PMU CNTENCLR: Event Counter 23 Enable Clear Position */ +#define PMU_CNTENCLR_CNT23_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT23_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 23 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT24_ENABLE_Pos 24U /*!< PMU CNTENCLR: Event Counter 24 Enable Clear Position */ +#define PMU_CNTENCLR_CNT24_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT24_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 24 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT25_ENABLE_Pos 25U /*!< PMU CNTENCLR: Event Counter 25 Enable Clear Position */ +#define PMU_CNTENCLR_CNT25_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT25_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 25 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT26_ENABLE_Pos 26U /*!< PMU CNTENCLR: Event Counter 26 Enable Clear Position */ +#define PMU_CNTENCLR_CNT26_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT26_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 26 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT27_ENABLE_Pos 27U /*!< PMU CNTENCLR: Event Counter 27 Enable Clear Position */ +#define PMU_CNTENCLR_CNT27_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT27_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 27 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT28_ENABLE_Pos 28U /*!< PMU CNTENCLR: Event Counter 28 Enable Clear Position */ +#define PMU_CNTENCLR_CNT28_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT28_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 28 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT29_ENABLE_Pos 29U /*!< PMU CNTENCLR: Event Counter 29 Enable Clear Position */ +#define PMU_CNTENCLR_CNT29_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT29_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 29 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT30_ENABLE_Pos 30U /*!< PMU CNTENCLR: Event Counter 30 Enable Clear Position */ +#define PMU_CNTENCLR_CNT30_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT30_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 30 Enable Clear Mask */ + +#define PMU_CNTENCLR_CCNTR_ENABLE_Pos 31U /*!< PMU CNTENCLR: Cycle Counter Enable Clear Position */ +#define PMU_CNTENCLR_CCNTR_ENABLE_Msk (1UL << PMU_CNTENCLR_CCNTR_ENABLE_Pos) /*!< PMU CNTENCLR: Cycle Counter Enable Clear Mask */ + +/** \brief PMU Interrupt Enable Set Register Definitions */ + +#define PMU_INTENSET_CNT0_ENABLE_Pos 0U /*!< PMU INTENSET: Event Counter 0 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT0_ENABLE_Msk (1UL /*<< PMU_INTENSET_CNT0_ENABLE_Pos*/) /*!< PMU INTENSET: Event Counter 0 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT1_ENABLE_Pos 1U /*!< PMU INTENSET: Event Counter 1 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT1_ENABLE_Msk (1UL << PMU_INTENSET_CNT1_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 1 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT2_ENABLE_Pos 2U /*!< PMU INTENSET: Event Counter 2 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT2_ENABLE_Msk (1UL << PMU_INTENSET_CNT2_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 2 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT3_ENABLE_Pos 3U /*!< PMU INTENSET: Event Counter 3 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT3_ENABLE_Msk (1UL << PMU_INTENSET_CNT3_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 3 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT4_ENABLE_Pos 4U /*!< PMU INTENSET: Event Counter 4 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT4_ENABLE_Msk (1UL << PMU_INTENSET_CNT4_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 4 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT5_ENABLE_Pos 5U /*!< PMU INTENSET: Event Counter 5 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT5_ENABLE_Msk (1UL << PMU_INTENSET_CNT5_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 5 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT6_ENABLE_Pos 6U /*!< PMU INTENSET: Event Counter 6 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT6_ENABLE_Msk (1UL << PMU_INTENSET_CNT6_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 6 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT7_ENABLE_Pos 7U /*!< PMU INTENSET: Event Counter 7 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT7_ENABLE_Msk (1UL << PMU_INTENSET_CNT7_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 7 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT8_ENABLE_Pos 8U /*!< PMU INTENSET: Event Counter 8 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT8_ENABLE_Msk (1UL << PMU_INTENSET_CNT8_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 8 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT9_ENABLE_Pos 9U /*!< PMU INTENSET: Event Counter 9 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT9_ENABLE_Msk (1UL << PMU_INTENSET_CNT9_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 9 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT10_ENABLE_Pos 10U /*!< PMU INTENSET: Event Counter 10 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT10_ENABLE_Msk (1UL << PMU_INTENSET_CNT10_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 10 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT11_ENABLE_Pos 11U /*!< PMU INTENSET: Event Counter 11 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT11_ENABLE_Msk (1UL << PMU_INTENSET_CNT11_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 11 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT12_ENABLE_Pos 12U /*!< PMU INTENSET: Event Counter 12 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT12_ENABLE_Msk (1UL << PMU_INTENSET_CNT12_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 12 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT13_ENABLE_Pos 13U /*!< PMU INTENSET: Event Counter 13 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT13_ENABLE_Msk (1UL << PMU_INTENSET_CNT13_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 13 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT14_ENABLE_Pos 14U /*!< PMU INTENSET: Event Counter 14 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT14_ENABLE_Msk (1UL << PMU_INTENSET_CNT14_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 14 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT15_ENABLE_Pos 15U /*!< PMU INTENSET: Event Counter 15 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT15_ENABLE_Msk (1UL << PMU_INTENSET_CNT15_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 15 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT16_ENABLE_Pos 16U /*!< PMU INTENSET: Event Counter 16 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT16_ENABLE_Msk (1UL << PMU_INTENSET_CNT16_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 16 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT17_ENABLE_Pos 17U /*!< PMU INTENSET: Event Counter 17 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT17_ENABLE_Msk (1UL << PMU_INTENSET_CNT17_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 17 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT18_ENABLE_Pos 18U /*!< PMU INTENSET: Event Counter 18 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT18_ENABLE_Msk (1UL << PMU_INTENSET_CNT18_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 18 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT19_ENABLE_Pos 19U /*!< PMU INTENSET: Event Counter 19 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT19_ENABLE_Msk (1UL << PMU_INTENSET_CNT19_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 19 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT20_ENABLE_Pos 20U /*!< PMU INTENSET: Event Counter 20 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT20_ENABLE_Msk (1UL << PMU_INTENSET_CNT20_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 20 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT21_ENABLE_Pos 21U /*!< PMU INTENSET: Event Counter 21 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT21_ENABLE_Msk (1UL << PMU_INTENSET_CNT21_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 21 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT22_ENABLE_Pos 22U /*!< PMU INTENSET: Event Counter 22 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT22_ENABLE_Msk (1UL << PMU_INTENSET_CNT22_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 22 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT23_ENABLE_Pos 23U /*!< PMU INTENSET: Event Counter 23 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT23_ENABLE_Msk (1UL << PMU_INTENSET_CNT23_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 23 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT24_ENABLE_Pos 24U /*!< PMU INTENSET: Event Counter 24 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT24_ENABLE_Msk (1UL << PMU_INTENSET_CNT24_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 24 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT25_ENABLE_Pos 25U /*!< PMU INTENSET: Event Counter 25 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT25_ENABLE_Msk (1UL << PMU_INTENSET_CNT25_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 25 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT26_ENABLE_Pos 26U /*!< PMU INTENSET: Event Counter 26 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT26_ENABLE_Msk (1UL << PMU_INTENSET_CNT26_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 26 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT27_ENABLE_Pos 27U /*!< PMU INTENSET: Event Counter 27 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT27_ENABLE_Msk (1UL << PMU_INTENSET_CNT27_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 27 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT28_ENABLE_Pos 28U /*!< PMU INTENSET: Event Counter 28 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT28_ENABLE_Msk (1UL << PMU_INTENSET_CNT28_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 28 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT29_ENABLE_Pos 29U /*!< PMU INTENSET: Event Counter 29 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT29_ENABLE_Msk (1UL << PMU_INTENSET_CNT29_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 29 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT30_ENABLE_Pos 30U /*!< PMU INTENSET: Event Counter 30 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT30_ENABLE_Msk (1UL << PMU_INTENSET_CNT30_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 30 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CYCCNT_ENABLE_Pos 31U /*!< PMU INTENSET: Cycle Counter Interrupt Enable Set Position */ +#define PMU_INTENSET_CCYCNT_ENABLE_Msk (1UL << PMU_INTENSET_CYCCNT_ENABLE_Pos) /*!< PMU INTENSET: Cycle Counter Interrupt Enable Set Mask */ + +/** \brief PMU Interrupt Enable Clear Register Definitions */ + +#define PMU_INTENSET_CNT0_ENABLE_Pos 0U /*!< PMU INTENCLR: Event Counter 0 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT0_ENABLE_Msk (1UL /*<< PMU_INTENCLR_CNT0_ENABLE_Pos*/) /*!< PMU INTENCLR: Event Counter 0 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT1_ENABLE_Pos 1U /*!< PMU INTENCLR: Event Counter 1 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT1_ENABLE_Msk (1UL << PMU_INTENCLR_CNT1_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 1 Interrupt Enable Clear */ + +#define PMU_INTENCLR_CNT2_ENABLE_Pos 2U /*!< PMU INTENCLR: Event Counter 2 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT2_ENABLE_Msk (1UL << PMU_INTENCLR_CNT2_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 2 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT3_ENABLE_Pos 3U /*!< PMU INTENCLR: Event Counter 3 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT3_ENABLE_Msk (1UL << PMU_INTENCLR_CNT3_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 3 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT4_ENABLE_Pos 4U /*!< PMU INTENCLR: Event Counter 4 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT4_ENABLE_Msk (1UL << PMU_INTENCLR_CNT4_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 4 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT5_ENABLE_Pos 5U /*!< PMU INTENCLR: Event Counter 5 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT5_ENABLE_Msk (1UL << PMU_INTENCLR_CNT5_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 5 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT6_ENABLE_Pos 6U /*!< PMU INTENCLR: Event Counter 6 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT6_ENABLE_Msk (1UL << PMU_INTENCLR_CNT6_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 6 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT7_ENABLE_Pos 7U /*!< PMU INTENCLR: Event Counter 7 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT7_ENABLE_Msk (1UL << PMU_INTENCLR_CNT7_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 7 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT8_ENABLE_Pos 8U /*!< PMU INTENCLR: Event Counter 8 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT8_ENABLE_Msk (1UL << PMU_INTENCLR_CNT8_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 8 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT9_ENABLE_Pos 9U /*!< PMU INTENCLR: Event Counter 9 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT9_ENABLE_Msk (1UL << PMU_INTENCLR_CNT9_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 9 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT10_ENABLE_Pos 10U /*!< PMU INTENCLR: Event Counter 10 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT10_ENABLE_Msk (1UL << PMU_INTENCLR_CNT10_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 10 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT11_ENABLE_Pos 11U /*!< PMU INTENCLR: Event Counter 11 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT11_ENABLE_Msk (1UL << PMU_INTENCLR_CNT11_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 11 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT12_ENABLE_Pos 12U /*!< PMU INTENCLR: Event Counter 12 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT12_ENABLE_Msk (1UL << PMU_INTENCLR_CNT12_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 12 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT13_ENABLE_Pos 13U /*!< PMU INTENCLR: Event Counter 13 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT13_ENABLE_Msk (1UL << PMU_INTENCLR_CNT13_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 13 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT14_ENABLE_Pos 14U /*!< PMU INTENCLR: Event Counter 14 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT14_ENABLE_Msk (1UL << PMU_INTENCLR_CNT14_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 14 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT15_ENABLE_Pos 15U /*!< PMU INTENCLR: Event Counter 15 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT15_ENABLE_Msk (1UL << PMU_INTENCLR_CNT15_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 15 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT16_ENABLE_Pos 16U /*!< PMU INTENCLR: Event Counter 16 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT16_ENABLE_Msk (1UL << PMU_INTENCLR_CNT16_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 16 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT17_ENABLE_Pos 17U /*!< PMU INTENCLR: Event Counter 17 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT17_ENABLE_Msk (1UL << PMU_INTENCLR_CNT17_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 17 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT18_ENABLE_Pos 18U /*!< PMU INTENCLR: Event Counter 18 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT18_ENABLE_Msk (1UL << PMU_INTENCLR_CNT18_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 18 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT19_ENABLE_Pos 19U /*!< PMU INTENCLR: Event Counter 19 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT19_ENABLE_Msk (1UL << PMU_INTENCLR_CNT19_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 19 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT20_ENABLE_Pos 20U /*!< PMU INTENCLR: Event Counter 20 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT20_ENABLE_Msk (1UL << PMU_INTENCLR_CNT20_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 20 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT21_ENABLE_Pos 21U /*!< PMU INTENCLR: Event Counter 21 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT21_ENABLE_Msk (1UL << PMU_INTENCLR_CNT21_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 21 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT22_ENABLE_Pos 22U /*!< PMU INTENCLR: Event Counter 22 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT22_ENABLE_Msk (1UL << PMU_INTENCLR_CNT22_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 22 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT23_ENABLE_Pos 23U /*!< PMU INTENCLR: Event Counter 23 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT23_ENABLE_Msk (1UL << PMU_INTENCLR_CNT23_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 23 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT24_ENABLE_Pos 24U /*!< PMU INTENCLR: Event Counter 24 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT24_ENABLE_Msk (1UL << PMU_INTENCLR_CNT24_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 24 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT25_ENABLE_Pos 25U /*!< PMU INTENCLR: Event Counter 25 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT25_ENABLE_Msk (1UL << PMU_INTENCLR_CNT25_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 25 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT26_ENABLE_Pos 26U /*!< PMU INTENCLR: Event Counter 26 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT26_ENABLE_Msk (1UL << PMU_INTENCLR_CNT26_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 26 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT27_ENABLE_Pos 27U /*!< PMU INTENCLR: Event Counter 27 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT27_ENABLE_Msk (1UL << PMU_INTENCLR_CNT27_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 27 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT28_ENABLE_Pos 28U /*!< PMU INTENCLR: Event Counter 28 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT28_ENABLE_Msk (1UL << PMU_INTENCLR_CNT28_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 28 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT29_ENABLE_Pos 29U /*!< PMU INTENCLR: Event Counter 29 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT29_ENABLE_Msk (1UL << PMU_INTENCLR_CNT29_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 29 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT30_ENABLE_Pos 30U /*!< PMU INTENCLR: Event Counter 30 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT30_ENABLE_Msk (1UL << PMU_INTENCLR_CNT30_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 30 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CYCCNT_ENABLE_Pos 31U /*!< PMU INTENCLR: Cycle Counter Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CYCCNT_ENABLE_Msk (1UL << PMU_INTENCLR_CYCCNT_ENABLE_Pos) /*!< PMU INTENCLR: Cycle Counter Interrupt Enable Clear Mask */ + +/** \brief PMU Overflow Flag Status Set Register Definitions */ + +#define PMU_OVSSET_CNT0_STATUS_Pos 0U /*!< PMU OVSSET: Event Counter 0 Overflow Set Position */ +#define PMU_OVSSET_CNT0_STATUS_Msk (1UL /*<< PMU_OVSSET_CNT0_STATUS_Pos*/) /*!< PMU OVSSET: Event Counter 0 Overflow Set Mask */ + +#define PMU_OVSSET_CNT1_STATUS_Pos 1U /*!< PMU OVSSET: Event Counter 1 Overflow Set Position */ +#define PMU_OVSSET_CNT1_STATUS_Msk (1UL << PMU_OVSSET_CNT1_STATUS_Pos) /*!< PMU OVSSET: Event Counter 1 Overflow Set Mask */ + +#define PMU_OVSSET_CNT2_STATUS_Pos 2U /*!< PMU OVSSET: Event Counter 2 Overflow Set Position */ +#define PMU_OVSSET_CNT2_STATUS_Msk (1UL << PMU_OVSSET_CNT2_STATUS_Pos) /*!< PMU OVSSET: Event Counter 2 Overflow Set Mask */ + +#define PMU_OVSSET_CNT3_STATUS_Pos 3U /*!< PMU OVSSET: Event Counter 3 Overflow Set Position */ +#define PMU_OVSSET_CNT3_STATUS_Msk (1UL << PMU_OVSSET_CNT3_STATUS_Pos) /*!< PMU OVSSET: Event Counter 3 Overflow Set Mask */ + +#define PMU_OVSSET_CNT4_STATUS_Pos 4U /*!< PMU OVSSET: Event Counter 4 Overflow Set Position */ +#define PMU_OVSSET_CNT4_STATUS_Msk (1UL << PMU_OVSSET_CNT4_STATUS_Pos) /*!< PMU OVSSET: Event Counter 4 Overflow Set Mask */ + +#define PMU_OVSSET_CNT5_STATUS_Pos 5U /*!< PMU OVSSET: Event Counter 5 Overflow Set Position */ +#define PMU_OVSSET_CNT5_STATUS_Msk (1UL << PMU_OVSSET_CNT5_STATUS_Pos) /*!< PMU OVSSET: Event Counter 5 Overflow Set Mask */ + +#define PMU_OVSSET_CNT6_STATUS_Pos 6U /*!< PMU OVSSET: Event Counter 6 Overflow Set Position */ +#define PMU_OVSSET_CNT6_STATUS_Msk (1UL << PMU_OVSSET_CNT6_STATUS_Pos) /*!< PMU OVSSET: Event Counter 6 Overflow Set Mask */ + +#define PMU_OVSSET_CNT7_STATUS_Pos 7U /*!< PMU OVSSET: Event Counter 7 Overflow Set Position */ +#define PMU_OVSSET_CNT7_STATUS_Msk (1UL << PMU_OVSSET_CNT7_STATUS_Pos) /*!< PMU OVSSET: Event Counter 7 Overflow Set Mask */ + +#define PMU_OVSSET_CNT8_STATUS_Pos 8U /*!< PMU OVSSET: Event Counter 8 Overflow Set Position */ +#define PMU_OVSSET_CNT8_STATUS_Msk (1UL << PMU_OVSSET_CNT8_STATUS_Pos) /*!< PMU OVSSET: Event Counter 8 Overflow Set Mask */ + +#define PMU_OVSSET_CNT9_STATUS_Pos 9U /*!< PMU OVSSET: Event Counter 9 Overflow Set Position */ +#define PMU_OVSSET_CNT9_STATUS_Msk (1UL << PMU_OVSSET_CNT9_STATUS_Pos) /*!< PMU OVSSET: Event Counter 9 Overflow Set Mask */ + +#define PMU_OVSSET_CNT10_STATUS_Pos 10U /*!< PMU OVSSET: Event Counter 10 Overflow Set Position */ +#define PMU_OVSSET_CNT10_STATUS_Msk (1UL << PMU_OVSSET_CNT10_STATUS_Pos) /*!< PMU OVSSET: Event Counter 10 Overflow Set Mask */ + +#define PMU_OVSSET_CNT11_STATUS_Pos 11U /*!< PMU OVSSET: Event Counter 11 Overflow Set Position */ +#define PMU_OVSSET_CNT11_STATUS_Msk (1UL << PMU_OVSSET_CNT11_STATUS_Pos) /*!< PMU OVSSET: Event Counter 11 Overflow Set Mask */ + +#define PMU_OVSSET_CNT12_STATUS_Pos 12U /*!< PMU OVSSET: Event Counter 12 Overflow Set Position */ +#define PMU_OVSSET_CNT12_STATUS_Msk (1UL << PMU_OVSSET_CNT12_STATUS_Pos) /*!< PMU OVSSET: Event Counter 12 Overflow Set Mask */ + +#define PMU_OVSSET_CNT13_STATUS_Pos 13U /*!< PMU OVSSET: Event Counter 13 Overflow Set Position */ +#define PMU_OVSSET_CNT13_STATUS_Msk (1UL << PMU_OVSSET_CNT13_STATUS_Pos) /*!< PMU OVSSET: Event Counter 13 Overflow Set Mask */ + +#define PMU_OVSSET_CNT14_STATUS_Pos 14U /*!< PMU OVSSET: Event Counter 14 Overflow Set Position */ +#define PMU_OVSSET_CNT14_STATUS_Msk (1UL << PMU_OVSSET_CNT14_STATUS_Pos) /*!< PMU OVSSET: Event Counter 14 Overflow Set Mask */ + +#define PMU_OVSSET_CNT15_STATUS_Pos 15U /*!< PMU OVSSET: Event Counter 15 Overflow Set Position */ +#define PMU_OVSSET_CNT15_STATUS_Msk (1UL << PMU_OVSSET_CNT15_STATUS_Pos) /*!< PMU OVSSET: Event Counter 15 Overflow Set Mask */ + +#define PMU_OVSSET_CNT16_STATUS_Pos 16U /*!< PMU OVSSET: Event Counter 16 Overflow Set Position */ +#define PMU_OVSSET_CNT16_STATUS_Msk (1UL << PMU_OVSSET_CNT16_STATUS_Pos) /*!< PMU OVSSET: Event Counter 16 Overflow Set Mask */ + +#define PMU_OVSSET_CNT17_STATUS_Pos 17U /*!< PMU OVSSET: Event Counter 17 Overflow Set Position */ +#define PMU_OVSSET_CNT17_STATUS_Msk (1UL << PMU_OVSSET_CNT17_STATUS_Pos) /*!< PMU OVSSET: Event Counter 17 Overflow Set Mask */ + +#define PMU_OVSSET_CNT18_STATUS_Pos 18U /*!< PMU OVSSET: Event Counter 18 Overflow Set Position */ +#define PMU_OVSSET_CNT18_STATUS_Msk (1UL << PMU_OVSSET_CNT18_STATUS_Pos) /*!< PMU OVSSET: Event Counter 18 Overflow Set Mask */ + +#define PMU_OVSSET_CNT19_STATUS_Pos 19U /*!< PMU OVSSET: Event Counter 19 Overflow Set Position */ +#define PMU_OVSSET_CNT19_STATUS_Msk (1UL << PMU_OVSSET_CNT19_STATUS_Pos) /*!< PMU OVSSET: Event Counter 19 Overflow Set Mask */ + +#define PMU_OVSSET_CNT20_STATUS_Pos 20U /*!< PMU OVSSET: Event Counter 20 Overflow Set Position */ +#define PMU_OVSSET_CNT20_STATUS_Msk (1UL << PMU_OVSSET_CNT20_STATUS_Pos) /*!< PMU OVSSET: Event Counter 20 Overflow Set Mask */ + +#define PMU_OVSSET_CNT21_STATUS_Pos 21U /*!< PMU OVSSET: Event Counter 21 Overflow Set Position */ +#define PMU_OVSSET_CNT21_STATUS_Msk (1UL << PMU_OVSSET_CNT21_STATUS_Pos) /*!< PMU OVSSET: Event Counter 21 Overflow Set Mask */ + +#define PMU_OVSSET_CNT22_STATUS_Pos 22U /*!< PMU OVSSET: Event Counter 22 Overflow Set Position */ +#define PMU_OVSSET_CNT22_STATUS_Msk (1UL << PMU_OVSSET_CNT22_STATUS_Pos) /*!< PMU OVSSET: Event Counter 22 Overflow Set Mask */ + +#define PMU_OVSSET_CNT23_STATUS_Pos 23U /*!< PMU OVSSET: Event Counter 23 Overflow Set Position */ +#define PMU_OVSSET_CNT23_STATUS_Msk (1UL << PMU_OVSSET_CNT23_STATUS_Pos) /*!< PMU OVSSET: Event Counter 23 Overflow Set Mask */ + +#define PMU_OVSSET_CNT24_STATUS_Pos 24U /*!< PMU OVSSET: Event Counter 24 Overflow Set Position */ +#define PMU_OVSSET_CNT24_STATUS_Msk (1UL << PMU_OVSSET_CNT24_STATUS_Pos) /*!< PMU OVSSET: Event Counter 24 Overflow Set Mask */ + +#define PMU_OVSSET_CNT25_STATUS_Pos 25U /*!< PMU OVSSET: Event Counter 25 Overflow Set Position */ +#define PMU_OVSSET_CNT25_STATUS_Msk (1UL << PMU_OVSSET_CNT25_STATUS_Pos) /*!< PMU OVSSET: Event Counter 25 Overflow Set Mask */ + +#define PMU_OVSSET_CNT26_STATUS_Pos 26U /*!< PMU OVSSET: Event Counter 26 Overflow Set Position */ +#define PMU_OVSSET_CNT26_STATUS_Msk (1UL << PMU_OVSSET_CNT26_STATUS_Pos) /*!< PMU OVSSET: Event Counter 26 Overflow Set Mask */ + +#define PMU_OVSSET_CNT27_STATUS_Pos 27U /*!< PMU OVSSET: Event Counter 27 Overflow Set Position */ +#define PMU_OVSSET_CNT27_STATUS_Msk (1UL << PMU_OVSSET_CNT27_STATUS_Pos) /*!< PMU OVSSET: Event Counter 27 Overflow Set Mask */ + +#define PMU_OVSSET_CNT28_STATUS_Pos 28U /*!< PMU OVSSET: Event Counter 28 Overflow Set Position */ +#define PMU_OVSSET_CNT28_STATUS_Msk (1UL << PMU_OVSSET_CNT28_STATUS_Pos) /*!< PMU OVSSET: Event Counter 28 Overflow Set Mask */ + +#define PMU_OVSSET_CNT29_STATUS_Pos 29U /*!< PMU OVSSET: Event Counter 29 Overflow Set Position */ +#define PMU_OVSSET_CNT29_STATUS_Msk (1UL << PMU_OVSSET_CNT29_STATUS_Pos) /*!< PMU OVSSET: Event Counter 29 Overflow Set Mask */ + +#define PMU_OVSSET_CNT30_STATUS_Pos 30U /*!< PMU OVSSET: Event Counter 30 Overflow Set Position */ +#define PMU_OVSSET_CNT30_STATUS_Msk (1UL << PMU_OVSSET_CNT30_STATUS_Pos) /*!< PMU OVSSET: Event Counter 30 Overflow Set Mask */ + +#define PMU_OVSSET_CYCCNT_STATUS_Pos 31U /*!< PMU OVSSET: Cycle Counter Overflow Set Position */ +#define PMU_OVSSET_CYCCNT_STATUS_Msk (1UL << PMU_OVSSET_CYCCNT_STATUS_Pos) /*!< PMU OVSSET: Cycle Counter Overflow Set Mask */ + +/** \brief PMU Overflow Flag Status Clear Register Definitions */ + +#define PMU_OVSCLR_CNT0_STATUS_Pos 0U /*!< PMU OVSCLR: Event Counter 0 Overflow Clear Position */ +#define PMU_OVSCLR_CNT0_STATUS_Msk (1UL /*<< PMU_OVSCLR_CNT0_STATUS_Pos*/) /*!< PMU OVSCLR: Event Counter 0 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT1_STATUS_Pos 1U /*!< PMU OVSCLR: Event Counter 1 Overflow Clear Position */ +#define PMU_OVSCLR_CNT1_STATUS_Msk (1UL << PMU_OVSCLR_CNT1_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 1 Overflow Clear */ + +#define PMU_OVSCLR_CNT2_STATUS_Pos 2U /*!< PMU OVSCLR: Event Counter 2 Overflow Clear Position */ +#define PMU_OVSCLR_CNT2_STATUS_Msk (1UL << PMU_OVSCLR_CNT2_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 2 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT3_STATUS_Pos 3U /*!< PMU OVSCLR: Event Counter 3 Overflow Clear Position */ +#define PMU_OVSCLR_CNT3_STATUS_Msk (1UL << PMU_OVSCLR_CNT3_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 3 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT4_STATUS_Pos 4U /*!< PMU OVSCLR: Event Counter 4 Overflow Clear Position */ +#define PMU_OVSCLR_CNT4_STATUS_Msk (1UL << PMU_OVSCLR_CNT4_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 4 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT5_STATUS_Pos 5U /*!< PMU OVSCLR: Event Counter 5 Overflow Clear Position */ +#define PMU_OVSCLR_CNT5_STATUS_Msk (1UL << PMU_OVSCLR_CNT5_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 5 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT6_STATUS_Pos 6U /*!< PMU OVSCLR: Event Counter 6 Overflow Clear Position */ +#define PMU_OVSCLR_CNT6_STATUS_Msk (1UL << PMU_OVSCLR_CNT6_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 6 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT7_STATUS_Pos 7U /*!< PMU OVSCLR: Event Counter 7 Overflow Clear Position */ +#define PMU_OVSCLR_CNT7_STATUS_Msk (1UL << PMU_OVSCLR_CNT7_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 7 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT8_STATUS_Pos 8U /*!< PMU OVSCLR: Event Counter 8 Overflow Clear Position */ +#define PMU_OVSCLR_CNT8_STATUS_Msk (1UL << PMU_OVSCLR_CNT8_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 8 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT9_STATUS_Pos 9U /*!< PMU OVSCLR: Event Counter 9 Overflow Clear Position */ +#define PMU_OVSCLR_CNT9_STATUS_Msk (1UL << PMU_OVSCLR_CNT9_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 9 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT10_STATUS_Pos 10U /*!< PMU OVSCLR: Event Counter 10 Overflow Clear Position */ +#define PMU_OVSCLR_CNT10_STATUS_Msk (1UL << PMU_OVSCLR_CNT10_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 10 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT11_STATUS_Pos 11U /*!< PMU OVSCLR: Event Counter 11 Overflow Clear Position */ +#define PMU_OVSCLR_CNT11_STATUS_Msk (1UL << PMU_OVSCLR_CNT11_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 11 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT12_STATUS_Pos 12U /*!< PMU OVSCLR: Event Counter 12 Overflow Clear Position */ +#define PMU_OVSCLR_CNT12_STATUS_Msk (1UL << PMU_OVSCLR_CNT12_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 12 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT13_STATUS_Pos 13U /*!< PMU OVSCLR: Event Counter 13 Overflow Clear Position */ +#define PMU_OVSCLR_CNT13_STATUS_Msk (1UL << PMU_OVSCLR_CNT13_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 13 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT14_STATUS_Pos 14U /*!< PMU OVSCLR: Event Counter 14 Overflow Clear Position */ +#define PMU_OVSCLR_CNT14_STATUS_Msk (1UL << PMU_OVSCLR_CNT14_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 14 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT15_STATUS_Pos 15U /*!< PMU OVSCLR: Event Counter 15 Overflow Clear Position */ +#define PMU_OVSCLR_CNT15_STATUS_Msk (1UL << PMU_OVSCLR_CNT15_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 15 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT16_STATUS_Pos 16U /*!< PMU OVSCLR: Event Counter 16 Overflow Clear Position */ +#define PMU_OVSCLR_CNT16_STATUS_Msk (1UL << PMU_OVSCLR_CNT16_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 16 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT17_STATUS_Pos 17U /*!< PMU OVSCLR: Event Counter 17 Overflow Clear Position */ +#define PMU_OVSCLR_CNT17_STATUS_Msk (1UL << PMU_OVSCLR_CNT17_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 17 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT18_STATUS_Pos 18U /*!< PMU OVSCLR: Event Counter 18 Overflow Clear Position */ +#define PMU_OVSCLR_CNT18_STATUS_Msk (1UL << PMU_OVSCLR_CNT18_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 18 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT19_STATUS_Pos 19U /*!< PMU OVSCLR: Event Counter 19 Overflow Clear Position */ +#define PMU_OVSCLR_CNT19_STATUS_Msk (1UL << PMU_OVSCLR_CNT19_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 19 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT20_STATUS_Pos 20U /*!< PMU OVSCLR: Event Counter 20 Overflow Clear Position */ +#define PMU_OVSCLR_CNT20_STATUS_Msk (1UL << PMU_OVSCLR_CNT20_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 20 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT21_STATUS_Pos 21U /*!< PMU OVSCLR: Event Counter 21 Overflow Clear Position */ +#define PMU_OVSCLR_CNT21_STATUS_Msk (1UL << PMU_OVSCLR_CNT21_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 21 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT22_STATUS_Pos 22U /*!< PMU OVSCLR: Event Counter 22 Overflow Clear Position */ +#define PMU_OVSCLR_CNT22_STATUS_Msk (1UL << PMU_OVSCLR_CNT22_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 22 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT23_STATUS_Pos 23U /*!< PMU OVSCLR: Event Counter 23 Overflow Clear Position */ +#define PMU_OVSCLR_CNT23_STATUS_Msk (1UL << PMU_OVSCLR_CNT23_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 23 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT24_STATUS_Pos 24U /*!< PMU OVSCLR: Event Counter 24 Overflow Clear Position */ +#define PMU_OVSCLR_CNT24_STATUS_Msk (1UL << PMU_OVSCLR_CNT24_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 24 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT25_STATUS_Pos 25U /*!< PMU OVSCLR: Event Counter 25 Overflow Clear Position */ +#define PMU_OVSCLR_CNT25_STATUS_Msk (1UL << PMU_OVSCLR_CNT25_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 25 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT26_STATUS_Pos 26U /*!< PMU OVSCLR: Event Counter 26 Overflow Clear Position */ +#define PMU_OVSCLR_CNT26_STATUS_Msk (1UL << PMU_OVSCLR_CNT26_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 26 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT27_STATUS_Pos 27U /*!< PMU OVSCLR: Event Counter 27 Overflow Clear Position */ +#define PMU_OVSCLR_CNT27_STATUS_Msk (1UL << PMU_OVSCLR_CNT27_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 27 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT28_STATUS_Pos 28U /*!< PMU OVSCLR: Event Counter 28 Overflow Clear Position */ +#define PMU_OVSCLR_CNT28_STATUS_Msk (1UL << PMU_OVSCLR_CNT28_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 28 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT29_STATUS_Pos 29U /*!< PMU OVSCLR: Event Counter 29 Overflow Clear Position */ +#define PMU_OVSCLR_CNT29_STATUS_Msk (1UL << PMU_OVSCLR_CNT29_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 29 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT30_STATUS_Pos 30U /*!< PMU OVSCLR: Event Counter 30 Overflow Clear Position */ +#define PMU_OVSCLR_CNT30_STATUS_Msk (1UL << PMU_OVSCLR_CNT30_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 30 Overflow Clear Mask */ + +#define PMU_OVSCLR_CYCCNT_STATUS_Pos 31U /*!< PMU OVSCLR: Cycle Counter Overflow Clear Position */ +#define PMU_OVSCLR_CYCCNT_STATUS_Msk (1UL << PMU_OVSCLR_CYCCNT_STATUS_Pos) /*!< PMU OVSCLR: Cycle Counter Overflow Clear Mask */ + +/** \brief PMU Software Increment Counter */ + +#define PMU_SWINC_CNT0_Pos 0U /*!< PMU SWINC: Event Counter 0 Software Increment Position */ +#define PMU_SWINC_CNT0_Msk (1UL /*<< PMU_SWINC_CNT0_Pos */) /*!< PMU SWINC: Event Counter 0 Software Increment Mask */ + +#define PMU_SWINC_CNT1_Pos 1U /*!< PMU SWINC: Event Counter 1 Software Increment Position */ +#define PMU_SWINC_CNT1_Msk (1UL << PMU_SWINC_CNT1_Pos) /*!< PMU SWINC: Event Counter 1 Software Increment Mask */ + +#define PMU_SWINC_CNT2_Pos 2U /*!< PMU SWINC: Event Counter 2 Software Increment Position */ +#define PMU_SWINC_CNT2_Msk (1UL << PMU_SWINC_CNT2_Pos) /*!< PMU SWINC: Event Counter 2 Software Increment Mask */ + +#define PMU_SWINC_CNT3_Pos 3U /*!< PMU SWINC: Event Counter 3 Software Increment Position */ +#define PMU_SWINC_CNT3_Msk (1UL << PMU_SWINC_CNT3_Pos) /*!< PMU SWINC: Event Counter 3 Software Increment Mask */ + +#define PMU_SWINC_CNT4_Pos 4U /*!< PMU SWINC: Event Counter 4 Software Increment Position */ +#define PMU_SWINC_CNT4_Msk (1UL << PMU_SWINC_CNT4_Pos) /*!< PMU SWINC: Event Counter 4 Software Increment Mask */ + +#define PMU_SWINC_CNT5_Pos 5U /*!< PMU SWINC: Event Counter 5 Software Increment Position */ +#define PMU_SWINC_CNT5_Msk (1UL << PMU_SWINC_CNT5_Pos) /*!< PMU SWINC: Event Counter 5 Software Increment Mask */ + +#define PMU_SWINC_CNT6_Pos 6U /*!< PMU SWINC: Event Counter 6 Software Increment Position */ +#define PMU_SWINC_CNT6_Msk (1UL << PMU_SWINC_CNT6_Pos) /*!< PMU SWINC: Event Counter 6 Software Increment Mask */ + +#define PMU_SWINC_CNT7_Pos 7U /*!< PMU SWINC: Event Counter 7 Software Increment Position */ +#define PMU_SWINC_CNT7_Msk (1UL << PMU_SWINC_CNT7_Pos) /*!< PMU SWINC: Event Counter 7 Software Increment Mask */ + +#define PMU_SWINC_CNT8_Pos 8U /*!< PMU SWINC: Event Counter 8 Software Increment Position */ +#define PMU_SWINC_CNT8_Msk (1UL << PMU_SWINC_CNT8_Pos) /*!< PMU SWINC: Event Counter 8 Software Increment Mask */ + +#define PMU_SWINC_CNT9_Pos 9U /*!< PMU SWINC: Event Counter 9 Software Increment Position */ +#define PMU_SWINC_CNT9_Msk (1UL << PMU_SWINC_CNT9_Pos) /*!< PMU SWINC: Event Counter 9 Software Increment Mask */ + +#define PMU_SWINC_CNT10_Pos 10U /*!< PMU SWINC: Event Counter 10 Software Increment Position */ +#define PMU_SWINC_CNT10_Msk (1UL << PMU_SWINC_CNT10_Pos) /*!< PMU SWINC: Event Counter 10 Software Increment Mask */ + +#define PMU_SWINC_CNT11_Pos 11U /*!< PMU SWINC: Event Counter 11 Software Increment Position */ +#define PMU_SWINC_CNT11_Msk (1UL << PMU_SWINC_CNT11_Pos) /*!< PMU SWINC: Event Counter 11 Software Increment Mask */ + +#define PMU_SWINC_CNT12_Pos 12U /*!< PMU SWINC: Event Counter 12 Software Increment Position */ +#define PMU_SWINC_CNT12_Msk (1UL << PMU_SWINC_CNT12_Pos) /*!< PMU SWINC: Event Counter 12 Software Increment Mask */ + +#define PMU_SWINC_CNT13_Pos 13U /*!< PMU SWINC: Event Counter 13 Software Increment Position */ +#define PMU_SWINC_CNT13_Msk (1UL << PMU_SWINC_CNT13_Pos) /*!< PMU SWINC: Event Counter 13 Software Increment Mask */ + +#define PMU_SWINC_CNT14_Pos 14U /*!< PMU SWINC: Event Counter 14 Software Increment Position */ +#define PMU_SWINC_CNT14_Msk (1UL << PMU_SWINC_CNT14_Pos) /*!< PMU SWINC: Event Counter 14 Software Increment Mask */ + +#define PMU_SWINC_CNT15_Pos 15U /*!< PMU SWINC: Event Counter 15 Software Increment Position */ +#define PMU_SWINC_CNT15_Msk (1UL << PMU_SWINC_CNT15_Pos) /*!< PMU SWINC: Event Counter 15 Software Increment Mask */ + +#define PMU_SWINC_CNT16_Pos 16U /*!< PMU SWINC: Event Counter 16 Software Increment Position */ +#define PMU_SWINC_CNT16_Msk (1UL << PMU_SWINC_CNT16_Pos) /*!< PMU SWINC: Event Counter 16 Software Increment Mask */ + +#define PMU_SWINC_CNT17_Pos 17U /*!< PMU SWINC: Event Counter 17 Software Increment Position */ +#define PMU_SWINC_CNT17_Msk (1UL << PMU_SWINC_CNT17_Pos) /*!< PMU SWINC: Event Counter 17 Software Increment Mask */ + +#define PMU_SWINC_CNT18_Pos 18U /*!< PMU SWINC: Event Counter 18 Software Increment Position */ +#define PMU_SWINC_CNT18_Msk (1UL << PMU_SWINC_CNT18_Pos) /*!< PMU SWINC: Event Counter 18 Software Increment Mask */ + +#define PMU_SWINC_CNT19_Pos 19U /*!< PMU SWINC: Event Counter 19 Software Increment Position */ +#define PMU_SWINC_CNT19_Msk (1UL << PMU_SWINC_CNT19_Pos) /*!< PMU SWINC: Event Counter 19 Software Increment Mask */ + +#define PMU_SWINC_CNT20_Pos 20U /*!< PMU SWINC: Event Counter 20 Software Increment Position */ +#define PMU_SWINC_CNT20_Msk (1UL << PMU_SWINC_CNT20_Pos) /*!< PMU SWINC: Event Counter 20 Software Increment Mask */ + +#define PMU_SWINC_CNT21_Pos 21U /*!< PMU SWINC: Event Counter 21 Software Increment Position */ +#define PMU_SWINC_CNT21_Msk (1UL << PMU_SWINC_CNT21_Pos) /*!< PMU SWINC: Event Counter 21 Software Increment Mask */ + +#define PMU_SWINC_CNT22_Pos 22U /*!< PMU SWINC: Event Counter 22 Software Increment Position */ +#define PMU_SWINC_CNT22_Msk (1UL << PMU_SWINC_CNT22_Pos) /*!< PMU SWINC: Event Counter 22 Software Increment Mask */ + +#define PMU_SWINC_CNT23_Pos 23U /*!< PMU SWINC: Event Counter 23 Software Increment Position */ +#define PMU_SWINC_CNT23_Msk (1UL << PMU_SWINC_CNT23_Pos) /*!< PMU SWINC: Event Counter 23 Software Increment Mask */ + +#define PMU_SWINC_CNT24_Pos 24U /*!< PMU SWINC: Event Counter 24 Software Increment Position */ +#define PMU_SWINC_CNT24_Msk (1UL << PMU_SWINC_CNT24_Pos) /*!< PMU SWINC: Event Counter 24 Software Increment Mask */ + +#define PMU_SWINC_CNT25_Pos 25U /*!< PMU SWINC: Event Counter 25 Software Increment Position */ +#define PMU_SWINC_CNT25_Msk (1UL << PMU_SWINC_CNT25_Pos) /*!< PMU SWINC: Event Counter 25 Software Increment Mask */ + +#define PMU_SWINC_CNT26_Pos 26U /*!< PMU SWINC: Event Counter 26 Software Increment Position */ +#define PMU_SWINC_CNT26_Msk (1UL << PMU_SWINC_CNT26_Pos) /*!< PMU SWINC: Event Counter 26 Software Increment Mask */ + +#define PMU_SWINC_CNT27_Pos 27U /*!< PMU SWINC: Event Counter 27 Software Increment Position */ +#define PMU_SWINC_CNT27_Msk (1UL << PMU_SWINC_CNT27_Pos) /*!< PMU SWINC: Event Counter 27 Software Increment Mask */ + +#define PMU_SWINC_CNT28_Pos 28U /*!< PMU SWINC: Event Counter 28 Software Increment Position */ +#define PMU_SWINC_CNT28_Msk (1UL << PMU_SWINC_CNT28_Pos) /*!< PMU SWINC: Event Counter 28 Software Increment Mask */ + +#define PMU_SWINC_CNT29_Pos 29U /*!< PMU SWINC: Event Counter 29 Software Increment Position */ +#define PMU_SWINC_CNT29_Msk (1UL << PMU_SWINC_CNT29_Pos) /*!< PMU SWINC: Event Counter 29 Software Increment Mask */ + +#define PMU_SWINC_CNT30_Pos 30U /*!< PMU SWINC: Event Counter 30 Software Increment Position */ +#define PMU_SWINC_CNT30_Msk (1UL << PMU_SWINC_CNT30_Pos) /*!< PMU SWINC: Event Counter 30 Software Increment Mask */ + +/** \brief PMU Control Register Definitions */ + +#define PMU_CTRL_ENABLE_Pos 0U /*!< PMU CTRL: ENABLE Position */ +#define PMU_CTRL_ENABLE_Msk (1UL /*<< PMU_CTRL_ENABLE_Pos*/) /*!< PMU CTRL: ENABLE Mask */ + +#define PMU_CTRL_EVENTCNT_RESET_Pos 1U /*!< PMU CTRL: Event Counter Reset Position */ +#define PMU_CTRL_EVENTCNT_RESET_Msk (1UL << PMU_CTRL_EVENTCNT_RESET_Pos) /*!< PMU CTRL: Event Counter Reset Mask */ + +#define PMU_CTRL_CYCCNT_RESET_Pos 2U /*!< PMU CTRL: Cycle Counter Reset Position */ +#define PMU_CTRL_CYCCNT_RESET_Msk (1UL << PMU_CTRL_CYCCNT_RESET_Pos) /*!< PMU CTRL: Cycle Counter Reset Mask */ + +#define PMU_CTRL_CYCCNT_DISABLE_Pos 5U /*!< PMU CTRL: Disable Cycle Counter Position */ +#define PMU_CTRL_CYCCNT_DISABLE_Msk (1UL << PMU_CTRL_CYCCNT_DISABLE_Pos) /*!< PMU CTRL: Disable Cycle Counter Mask */ + +#define PMU_CTRL_FRZ_ON_OV_Pos 9U /*!< PMU CTRL: Freeze-on-overflow Position */ +#define PMU_CTRL_FRZ_ON_OV_Msk (1UL << PMU_CTRL_FRZ_ON_OVERFLOW_Pos) /*!< PMU CTRL: Freeze-on-overflow Mask */ + +#define PMU_CTRL_TRACE_ON_OV_Pos 11U /*!< PMU CTRL: Trace-on-overflow Position */ +#define PMU_CTRL_TRACE_ON_OV_Msk (1UL << PMU_CTRL_TRACE_ON_OVERFLOW_Pos) /*!< PMU CTRL: Trace-on-overflow Mask */ + +/** \brief PMU Type Register Definitions */ + +#define PMU_TYPE_NUM_CNTS_Pos 0U /*!< PMU TYPE: Number of Counters Position */ +#define PMU_TYPE_NUM_CNTS_Msk (0xFFUL /*<< PMU_TYPE_NUM_CNTS_Pos*/) /*!< PMU TYPE: Number of Counters Mask */ + +#define PMU_TYPE_SIZE_CNTS_Pos 8U /*!< PMU TYPE: Size of Counters Position */ +#define PMU_TYPE_SIZE_CNTS_Msk (0x3FUL << PMU_TYPE_SIZE_CNTS_Pos) /*!< PMU TYPE: Size of Counters Mask */ + +#define PMU_TYPE_CYCCNT_PRESENT_Pos 14U /*!< PMU TYPE: Cycle Counter Present Position */ +#define PMU_TYPE_CYCCNT_PRESENT_Msk (1UL << PMU_TYPE_CYCCNT_PRESENT_Pos) /*!< PMU TYPE: Cycle Counter Present Mask */ + +#define PMU_TYPE_FRZ_OV_SUPPORT_Pos 21U /*!< PMU TYPE: Freeze-on-overflow Support Position */ +#define PMU_TYPE_FRZ_OV_SUPPORT_Msk (1UL << PMU_TYPE_FRZ_OV_SUPPORT_Pos) /*!< PMU TYPE: Freeze-on-overflow Support Mask */ + +#define PMU_TYPE_TRACE_ON_OV_SUPPORT_Pos 23U /*!< PMU TYPE: Trace-on-overflow Support Position */ +#define PMU_TYPE_TRACE_ON_OV_SUPPORT_Msk (1UL << PMU_TYPE_FRZ_OV_SUPPORT_Pos) /*!< PMU TYPE: Trace-on-overflow Support Mask */ + +/** \brief PMU Authentication Status Register Definitions */ + +#define PMU_AUTHSTATUS_NSID_Pos 0U /*!< PMU AUTHSTATUS: Non-secure Invasive Debug Position */ +#define PMU_AUTHSTATUS_NSID_Msk (0x3UL /*<< PMU_AUTHSTATUS_NSID_Pos*/) /*!< PMU AUTHSTATUS: Non-secure Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_NSNID_Pos 2U /*!< PMU AUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_NSNID_Msk (0x3UL << PMU_AUTHSTATUS_NSNID_Pos) /*!< PMU AUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SID_Pos 4U /*!< PMU AUTHSTATUS: Secure Invasive Debug Position */ +#define PMU_AUTHSTATUS_SID_Msk (0x3UL << PMU_AUTHSTATUS_SID_Pos) /*!< PMU AUTHSTATUS: Secure Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SNID_Pos 6U /*!< PMU AUTHSTATUS: Secure Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_SNID_Msk (0x3UL << PMU_AUTHSTATUS_SNID_Pos) /*!< PMU AUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define PMU_AUTHSTATUS_NSUID_Pos 16U /*!< PMU AUTHSTATUS: Non-secure Unprivileged Invasive Debug Position */ +#define PMU_AUTHSTATUS_NSUID_Msk (0x3UL << PMU_AUTHSTATUS_NSUID_Pos) /*!< PMU AUTHSTATUS: Non-secure Unprivileged Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_NSUNID_Pos 18U /*!< PMU AUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_NSUNID_Msk (0x3UL << PMU_AUTHSTATUS_NSUNID_Pos) /*!< PMU AUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SUID_Pos 20U /*!< PMU AUTHSTATUS: Secure Unprivileged Invasive Debug Position */ +#define PMU_AUTHSTATUS_SUID_Msk (0x3UL << PMU_AUTHSTATUS_SUID_Pos) /*!< PMU AUTHSTATUS: Secure Unprivileged Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SUNID_Pos 22U /*!< PMU AUTHSTATUS: Secure Unprivileged Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_SUNID_Msk (0x3UL << PMU_AUTHSTATUS_SUNID_Pos) /*!< PMU AUTHSTATUS: Secure Unprivileged Non-invasive Debug Mask */ + + +/*@} end of group CMSIS_PMU */ +#endif + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) MPU Region Limit Address Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Region Base Address Register Alias 1 */ + __IOM uint32_t RLAR_A1; /*!< Offset: 0x018 (R/W) MPU Region Limit Address Register Alias 1 */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Region Base Address Register Alias 2 */ + __IOM uint32_t RLAR_A2; /*!< Offset: 0x020 (R/W) MPU Region Limit Address Register Alias 2 */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Region Base Address Register Alias 3 */ + __IOM uint32_t RLAR_A3; /*!< Offset: 0x028 (R/W) MPU Region Limit Address Register Alias 3 */ + uint32_t RESERVED0[1]; + union { + __IOM uint32_t MAIR[2]; + struct { + __IOM uint32_t MAIR0; /*!< Offset: 0x030 (R/W) MPU Memory Attribute Indirection Register 0 */ + __IOM uint32_t MAIR1; /*!< Offset: 0x034 (R/W) MPU Memory Attribute Indirection Register 1 */ + }; + }; +} MPU_Type; + +#define MPU_TYPE_RALIASES 4U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_BASE_Pos 5U /*!< MPU RBAR: BASE Position */ +#define MPU_RBAR_BASE_Msk (0x7FFFFFFUL << MPU_RBAR_BASE_Pos) /*!< MPU RBAR: BASE Mask */ + +#define MPU_RBAR_SH_Pos 3U /*!< MPU RBAR: SH Position */ +#define MPU_RBAR_SH_Msk (0x3UL << MPU_RBAR_SH_Pos) /*!< MPU RBAR: SH Mask */ + +#define MPU_RBAR_AP_Pos 1U /*!< MPU RBAR: AP Position */ +#define MPU_RBAR_AP_Msk (0x3UL << MPU_RBAR_AP_Pos) /*!< MPU RBAR: AP Mask */ + +#define MPU_RBAR_XN_Pos 0U /*!< MPU RBAR: XN Position */ +#define MPU_RBAR_XN_Msk (01UL /*<< MPU_RBAR_XN_Pos*/) /*!< MPU RBAR: XN Mask */ + +/* MPU Region Limit Address Register Definitions */ +#define MPU_RLAR_LIMIT_Pos 5U /*!< MPU RLAR: LIMIT Position */ +#define MPU_RLAR_LIMIT_Msk (0x7FFFFFFUL << MPU_RLAR_LIMIT_Pos) /*!< MPU RLAR: LIMIT Mask */ + +#define MPU_RLAR_PXN_Pos 4U /*!< MPU RLAR: PXN Position */ +#define MPU_RLAR_PXN_Msk (1UL << MPU_RLAR_PXN_Pos) /*!< MPU RLAR: PXN Mask */ + +#define MPU_RLAR_AttrIndx_Pos 1U /*!< MPU RLAR: AttrIndx Position */ +#define MPU_RLAR_AttrIndx_Msk (7UL << MPU_RLAR_AttrIndx_Pos) /*!< MPU RLAR: AttrIndx Mask */ + +#define MPU_RLAR_EN_Pos 0U /*!< MPU RLAR: Region enable bit Position */ +#define MPU_RLAR_EN_Msk (1UL /*<< MPU_RLAR_EN_Pos*/) /*!< MPU RLAR: Region enable bit Disable Mask */ + +/* MPU Memory Attribute Indirection Register 0 Definitions */ +#define MPU_MAIR0_Attr3_Pos 24U /*!< MPU MAIR0: Attr3 Position */ +#define MPU_MAIR0_Attr3_Msk (0xFFUL << MPU_MAIR0_Attr3_Pos) /*!< MPU MAIR0: Attr3 Mask */ + +#define MPU_MAIR0_Attr2_Pos 16U /*!< MPU MAIR0: Attr2 Position */ +#define MPU_MAIR0_Attr2_Msk (0xFFUL << MPU_MAIR0_Attr2_Pos) /*!< MPU MAIR0: Attr2 Mask */ + +#define MPU_MAIR0_Attr1_Pos 8U /*!< MPU MAIR0: Attr1 Position */ +#define MPU_MAIR0_Attr1_Msk (0xFFUL << MPU_MAIR0_Attr1_Pos) /*!< MPU MAIR0: Attr1 Mask */ + +#define MPU_MAIR0_Attr0_Pos 0U /*!< MPU MAIR0: Attr0 Position */ +#define MPU_MAIR0_Attr0_Msk (0xFFUL /*<< MPU_MAIR0_Attr0_Pos*/) /*!< MPU MAIR0: Attr0 Mask */ + +/* MPU Memory Attribute Indirection Register 1 Definitions */ +#define MPU_MAIR1_Attr7_Pos 24U /*!< MPU MAIR1: Attr7 Position */ +#define MPU_MAIR1_Attr7_Msk (0xFFUL << MPU_MAIR1_Attr7_Pos) /*!< MPU MAIR1: Attr7 Mask */ + +#define MPU_MAIR1_Attr6_Pos 16U /*!< MPU MAIR1: Attr6 Position */ +#define MPU_MAIR1_Attr6_Msk (0xFFUL << MPU_MAIR1_Attr6_Pos) /*!< MPU MAIR1: Attr6 Mask */ + +#define MPU_MAIR1_Attr5_Pos 8U /*!< MPU MAIR1: Attr5 Position */ +#define MPU_MAIR1_Attr5_Msk (0xFFUL << MPU_MAIR1_Attr5_Pos) /*!< MPU MAIR1: Attr5 Mask */ + +#define MPU_MAIR1_Attr4_Pos 0U /*!< MPU MAIR1: Attr4 Position */ +#define MPU_MAIR1_Attr4_Msk (0xFFUL /*<< MPU_MAIR1_Attr4_Pos*/) /*!< MPU MAIR1: Attr4 Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SAU Security Attribution Unit (SAU) + \brief Type definitions for the Security Attribution Unit (SAU) + @{ + */ + +/** + \brief Structure type to access the Security Attribution Unit (SAU). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SAU Control Register */ + __IM uint32_t TYPE; /*!< Offset: 0x004 (R/ ) SAU Type Register */ +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) SAU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) SAU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) SAU Region Limit Address Register */ +#else + uint32_t RESERVED0[3]; +#endif + __IOM uint32_t SFSR; /*!< Offset: 0x014 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x018 (R/W) Secure Fault Address Register */ +} SAU_Type; + +/* SAU Control Register Definitions */ +#define SAU_CTRL_ALLNS_Pos 1U /*!< SAU CTRL: ALLNS Position */ +#define SAU_CTRL_ALLNS_Msk (1UL << SAU_CTRL_ALLNS_Pos) /*!< SAU CTRL: ALLNS Mask */ + +#define SAU_CTRL_ENABLE_Pos 0U /*!< SAU CTRL: ENABLE Position */ +#define SAU_CTRL_ENABLE_Msk (1UL /*<< SAU_CTRL_ENABLE_Pos*/) /*!< SAU CTRL: ENABLE Mask */ + +/* SAU Type Register Definitions */ +#define SAU_TYPE_SREGION_Pos 0U /*!< SAU TYPE: SREGION Position */ +#define SAU_TYPE_SREGION_Msk (0xFFUL /*<< SAU_TYPE_SREGION_Pos*/) /*!< SAU TYPE: SREGION Mask */ + +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) +/* SAU Region Number Register Definitions */ +#define SAU_RNR_REGION_Pos 0U /*!< SAU RNR: REGION Position */ +#define SAU_RNR_REGION_Msk (0xFFUL /*<< SAU_RNR_REGION_Pos*/) /*!< SAU RNR: REGION Mask */ + +/* SAU Region Base Address Register Definitions */ +#define SAU_RBAR_BADDR_Pos 5U /*!< SAU RBAR: BADDR Position */ +#define SAU_RBAR_BADDR_Msk (0x7FFFFFFUL << SAU_RBAR_BADDR_Pos) /*!< SAU RBAR: BADDR Mask */ + +/* SAU Region Limit Address Register Definitions */ +#define SAU_RLAR_LADDR_Pos 5U /*!< SAU RLAR: LADDR Position */ +#define SAU_RLAR_LADDR_Msk (0x7FFFFFFUL << SAU_RLAR_LADDR_Pos) /*!< SAU RLAR: LADDR Mask */ + +#define SAU_RLAR_NSC_Pos 1U /*!< SAU RLAR: NSC Position */ +#define SAU_RLAR_NSC_Msk (1UL << SAU_RLAR_NSC_Pos) /*!< SAU RLAR: NSC Mask */ + +#define SAU_RLAR_ENABLE_Pos 0U /*!< SAU RLAR: ENABLE Position */ +#define SAU_RLAR_ENABLE_Msk (1UL /*<< SAU_RLAR_ENABLE_Pos*/) /*!< SAU RLAR: ENABLE Mask */ + +#endif /* defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) */ + +/* Secure Fault Status Register Definitions */ +#define SAU_SFSR_LSERR_Pos 7U /*!< SAU SFSR: LSERR Position */ +#define SAU_SFSR_LSERR_Msk (1UL << SAU_SFSR_LSERR_Pos) /*!< SAU SFSR: LSERR Mask */ + +#define SAU_SFSR_SFARVALID_Pos 6U /*!< SAU SFSR: SFARVALID Position */ +#define SAU_SFSR_SFARVALID_Msk (1UL << SAU_SFSR_SFARVALID_Pos) /*!< SAU SFSR: SFARVALID Mask */ + +#define SAU_SFSR_LSPERR_Pos 5U /*!< SAU SFSR: LSPERR Position */ +#define SAU_SFSR_LSPERR_Msk (1UL << SAU_SFSR_LSPERR_Pos) /*!< SAU SFSR: LSPERR Mask */ + +#define SAU_SFSR_INVTRAN_Pos 4U /*!< SAU SFSR: INVTRAN Position */ +#define SAU_SFSR_INVTRAN_Msk (1UL << SAU_SFSR_INVTRAN_Pos) /*!< SAU SFSR: INVTRAN Mask */ + +#define SAU_SFSR_AUVIOL_Pos 3U /*!< SAU SFSR: AUVIOL Position */ +#define SAU_SFSR_AUVIOL_Msk (1UL << SAU_SFSR_AUVIOL_Pos) /*!< SAU SFSR: AUVIOL Mask */ + +#define SAU_SFSR_INVER_Pos 2U /*!< SAU SFSR: INVER Position */ +#define SAU_SFSR_INVER_Msk (1UL << SAU_SFSR_INVER_Pos) /*!< SAU SFSR: INVER Mask */ + +#define SAU_SFSR_INVIS_Pos 1U /*!< SAU SFSR: INVIS Position */ +#define SAU_SFSR_INVIS_Msk (1UL << SAU_SFSR_INVIS_Pos) /*!< SAU SFSR: INVIS Mask */ + +#define SAU_SFSR_INVEP_Pos 0U /*!< SAU SFSR: INVEP Position */ +#define SAU_SFSR_INVEP_Msk (1UL /*<< SAU_SFSR_INVEP_Pos*/) /*!< SAU SFSR: INVEP Mask */ + +/*@} end of group CMSIS_SAU */ +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_FPU Floating Point Unit (FPU) + \brief Type definitions for the Floating Point Unit (FPU) + @{ + */ + +/** + \brief Structure type to access the Floating Point Unit (FPU). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IOM uint32_t FPCCR; /*!< Offset: 0x004 (R/W) Floating-Point Context Control Register */ + __IOM uint32_t FPCAR; /*!< Offset: 0x008 (R/W) Floating-Point Context Address Register */ + __IOM uint32_t FPDSCR; /*!< Offset: 0x00C (R/W) Floating-Point Default Status Control Register */ + __IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x018 (R/ ) Media and VFP Feature Register 2 */ +} FPU_Type; + +/* Floating-Point Context Control Register Definitions */ +#define FPU_FPCCR_ASPEN_Pos 31U /*!< FPCCR: ASPEN bit Position */ +#define FPU_FPCCR_ASPEN_Msk (1UL << FPU_FPCCR_ASPEN_Pos) /*!< FPCCR: ASPEN bit Mask */ + +#define FPU_FPCCR_LSPEN_Pos 30U /*!< FPCCR: LSPEN Position */ +#define FPU_FPCCR_LSPEN_Msk (1UL << FPU_FPCCR_LSPEN_Pos) /*!< FPCCR: LSPEN bit Mask */ + +#define FPU_FPCCR_LSPENS_Pos 29U /*!< FPCCR: LSPENS Position */ +#define FPU_FPCCR_LSPENS_Msk (1UL << FPU_FPCCR_LSPENS_Pos) /*!< FPCCR: LSPENS bit Mask */ + +#define FPU_FPCCR_CLRONRET_Pos 28U /*!< FPCCR: CLRONRET Position */ +#define FPU_FPCCR_CLRONRET_Msk (1UL << FPU_FPCCR_CLRONRET_Pos) /*!< FPCCR: CLRONRET bit Mask */ + +#define FPU_FPCCR_CLRONRETS_Pos 27U /*!< FPCCR: CLRONRETS Position */ +#define FPU_FPCCR_CLRONRETS_Msk (1UL << FPU_FPCCR_CLRONRETS_Pos) /*!< FPCCR: CLRONRETS bit Mask */ + +#define FPU_FPCCR_TS_Pos 26U /*!< FPCCR: TS Position */ +#define FPU_FPCCR_TS_Msk (1UL << FPU_FPCCR_TS_Pos) /*!< FPCCR: TS bit Mask */ + +#define FPU_FPCCR_UFRDY_Pos 10U /*!< FPCCR: UFRDY Position */ +#define FPU_FPCCR_UFRDY_Msk (1UL << FPU_FPCCR_UFRDY_Pos) /*!< FPCCR: UFRDY bit Mask */ + +#define FPU_FPCCR_SPLIMVIOL_Pos 9U /*!< FPCCR: SPLIMVIOL Position */ +#define FPU_FPCCR_SPLIMVIOL_Msk (1UL << FPU_FPCCR_SPLIMVIOL_Pos) /*!< FPCCR: SPLIMVIOL bit Mask */ + +#define FPU_FPCCR_MONRDY_Pos 8U /*!< FPCCR: MONRDY Position */ +#define FPU_FPCCR_MONRDY_Msk (1UL << FPU_FPCCR_MONRDY_Pos) /*!< FPCCR: MONRDY bit Mask */ + +#define FPU_FPCCR_SFRDY_Pos 7U /*!< FPCCR: SFRDY Position */ +#define FPU_FPCCR_SFRDY_Msk (1UL << FPU_FPCCR_SFRDY_Pos) /*!< FPCCR: SFRDY bit Mask */ + +#define FPU_FPCCR_BFRDY_Pos 6U /*!< FPCCR: BFRDY Position */ +#define FPU_FPCCR_BFRDY_Msk (1UL << FPU_FPCCR_BFRDY_Pos) /*!< FPCCR: BFRDY bit Mask */ + +#define FPU_FPCCR_MMRDY_Pos 5U /*!< FPCCR: MMRDY Position */ +#define FPU_FPCCR_MMRDY_Msk (1UL << FPU_FPCCR_MMRDY_Pos) /*!< FPCCR: MMRDY bit Mask */ + +#define FPU_FPCCR_HFRDY_Pos 4U /*!< FPCCR: HFRDY Position */ +#define FPU_FPCCR_HFRDY_Msk (1UL << FPU_FPCCR_HFRDY_Pos) /*!< FPCCR: HFRDY bit Mask */ + +#define FPU_FPCCR_THREAD_Pos 3U /*!< FPCCR: processor mode bit Position */ +#define FPU_FPCCR_THREAD_Msk (1UL << FPU_FPCCR_THREAD_Pos) /*!< FPCCR: processor mode active bit Mask */ + +#define FPU_FPCCR_S_Pos 2U /*!< FPCCR: Security status of the FP context bit Position */ +#define FPU_FPCCR_S_Msk (1UL << FPU_FPCCR_S_Pos) /*!< FPCCR: Security status of the FP context bit Mask */ + +#define FPU_FPCCR_USER_Pos 1U /*!< FPCCR: privilege level bit Position */ +#define FPU_FPCCR_USER_Msk (1UL << FPU_FPCCR_USER_Pos) /*!< FPCCR: privilege level bit Mask */ + +#define FPU_FPCCR_LSPACT_Pos 0U /*!< FPCCR: Lazy state preservation active bit Position */ +#define FPU_FPCCR_LSPACT_Msk (1UL /*<< FPU_FPCCR_LSPACT_Pos*/) /*!< FPCCR: Lazy state preservation active bit Mask */ + +/* Floating-Point Context Address Register Definitions */ +#define FPU_FPCAR_ADDRESS_Pos 3U /*!< FPCAR: ADDRESS bit Position */ +#define FPU_FPCAR_ADDRESS_Msk (0x1FFFFFFFUL << FPU_FPCAR_ADDRESS_Pos) /*!< FPCAR: ADDRESS bit Mask */ + +/* Floating-Point Default Status Control Register Definitions */ +#define FPU_FPDSCR_AHP_Pos 26U /*!< FPDSCR: AHP bit Position */ +#define FPU_FPDSCR_AHP_Msk (1UL << FPU_FPDSCR_AHP_Pos) /*!< FPDSCR: AHP bit Mask */ + +#define FPU_FPDSCR_DN_Pos 25U /*!< FPDSCR: DN bit Position */ +#define FPU_FPDSCR_DN_Msk (1UL << FPU_FPDSCR_DN_Pos) /*!< FPDSCR: DN bit Mask */ + +#define FPU_FPDSCR_FZ_Pos 24U /*!< FPDSCR: FZ bit Position */ +#define FPU_FPDSCR_FZ_Msk (1UL << FPU_FPDSCR_FZ_Pos) /*!< FPDSCR: FZ bit Mask */ + +#define FPU_FPDSCR_RMode_Pos 22U /*!< FPDSCR: RMode bit Position */ +#define FPU_FPDSCR_RMode_Msk (3UL << FPU_FPDSCR_RMode_Pos) /*!< FPDSCR: RMode bit Mask */ + +#define FPU_FPDSCR_FZ16_Pos 19U /*!< FPDSCR: FZ16 bit Position */ +#define FPU_FPDSCR_FZ16_Msk (1UL << FPU_FPDSCR_FZ16_Pos) /*!< FPDSCR: FZ16 bit Mask */ + +#define FPU_FPDSCR_LTPSIZE_Pos 16U /*!< FPDSCR: LTPSIZE bit Position */ +#define FPU_FPDSCR_LTPSIZE_Msk (7UL << FPU_FPDSCR_LTPSIZE_Pos) /*!< FPDSCR: LTPSIZE bit Mask */ + +/* Media and VFP Feature Register 0 Definitions */ +#define FPU_MVFR0_FPRound_Pos 28U /*!< MVFR0: FPRound bits Position */ +#define FPU_MVFR0_FPRound_Msk (0xFUL << FPU_MVFR0_FPRound_Pos) /*!< MVFR0: FPRound bits Mask */ + +#define FPU_MVFR0_FPSqrt_Pos 20U /*!< MVFR0: FPSqrt bits Position */ +#define FPU_MVFR0_FPSqrt_Msk (0xFUL << FPU_MVFR0_FPSqrt_Pos) /*!< MVFR0: FPSqrt bits Mask */ + +#define FPU_MVFR0_FPDivide_Pos 16U /*!< MVFR0: FPDivide bits Position */ +#define FPU_MVFR0_FPDivide_Msk (0xFUL << FPU_MVFR0_FPDivide_Pos) /*!< MVFR0: Divide bits Mask */ + +#define FPU_MVFR0_FPDP_Pos 8U /*!< MVFR0: FPDP bits Position */ +#define FPU_MVFR0_FPDP_Msk (0xFUL << FPU_MVFR0_FPDP_Pos) /*!< MVFR0: FPDP bits Mask */ + +#define FPU_MVFR0_FPSP_Pos 4U /*!< MVFR0: FPSP bits Position */ +#define FPU_MVFR0_FPSP_Msk (0xFUL << FPU_MVFR0_FPSP_Pos) /*!< MVFR0: FPSP bits Mask */ + +#define FPU_MVFR0_SIMDReg_Pos 0U /*!< MVFR0: SIMDReg bits Position */ +#define FPU_MVFR0_SIMDReg_Msk (0xFUL /*<< FPU_MVFR0_SIMDReg_Pos*/) /*!< MVFR0: SIMDReg bits Mask */ + +/* Media and VFP Feature Register 1 Definitions */ +#define FPU_MVFR1_FMAC_Pos 28U /*!< MVFR1: FMAC bits Position */ +#define FPU_MVFR1_FMAC_Msk (0xFUL << FPU_MVFR1_FMAC_Pos) /*!< MVFR1: FMAC bits Mask */ + +#define FPU_MVFR1_FPHP_Pos 24U /*!< MVFR1: FPHP bits Position */ +#define FPU_MVFR1_FPHP_Msk (0xFUL << FPU_MVFR1_FPHP_Pos) /*!< MVFR1: FPHP bits Mask */ + +#define FPU_MVFR1_FP16_Pos 20U /*!< MVFR1: FP16 bits Position */ +#define FPU_MVFR1_FP16_Msk (0xFUL << FPU_MVFR1_FP16_Pos) /*!< MVFR1: FP16 bits Mask */ + +#define FPU_MVFR1_MVE_Pos 8U /*!< MVFR1: MVE bits Position */ +#define FPU_MVFR1_MVE_Msk (0xFUL << FPU_MVFR1_MVE_Pos) /*!< MVFR1: MVE bits Mask */ + +#define FPU_MVFR1_FPDNaN_Pos 4U /*!< MVFR1: FPDNaN bits Position */ +#define FPU_MVFR1_FPDNaN_Msk (0xFUL << FPU_MVFR1_FPDNaN_Pos) /*!< MVFR1: FPDNaN bits Mask */ + +#define FPU_MVFR1_FPFtZ_Pos 0U /*!< MVFR1: FPFtZ bits Position */ +#define FPU_MVFR1_FPFtZ_Msk (0xFUL /*<< FPU_MVFR1_FPFtZ_Pos*/) /*!< MVFR1: FPFtZ bits Mask */ + +/* Media and VFP Feature Register 2 Definitions */ +#define FPU_MVFR2_FPMisc_Pos 4U /*!< MVFR2: FPMisc bits Position */ +#define FPU_MVFR2_FPMisc_Msk (0xFUL << FPU_MVFR2_FPMisc_Pos) /*!< MVFR2: FPMisc bits Mask */ + +/*@} end of group CMSIS_FPU */ + +/* CoreDebug is deprecated. replaced by DCB (Debug Control Block) */ +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Type definitions for the Core Debug Registers + @{ + */ + +/** + \brief \deprecated Structure type to access the Core Debug Register (CoreDebug). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + __OM uint32_t DSCEMCR; /*!< Offset: 0x010 ( /W) Debug Set Clear Exception and Monitor Control Register */ + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< \deprecated CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< \deprecated CoreDebug DHCSR: DBGKEY Mask */ + +#define CoreDebug_DHCSR_S_RESTART_ST_Pos 26U /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Position */ +#define CoreDebug_DHCSR_S_RESTART_ST_Msk (1UL << CoreDebug_DHCSR_S_RESTART_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Mask */ + +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Mask */ + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Mask */ + +#define CoreDebug_DHCSR_S_FPD_Pos 23U /*!< \deprecated CoreDebug DHCSR: S_FPD Position */ +#define CoreDebug_DHCSR_S_FPD_Msk (1UL << CoreDebug_DHCSR_S_FPD_Pos) /*!< \deprecated CoreDebug DHCSR: S_FPD Mask */ + +#define CoreDebug_DHCSR_S_SUIDE_Pos 22U /*!< \deprecated CoreDebug DHCSR: S_SUIDE Position */ +#define CoreDebug_DHCSR_S_SUIDE_Msk (1UL << CoreDebug_DHCSR_S_SUIDE_Pos) /*!< \deprecated CoreDebug DHCSR: S_SUIDE Mask */ + +#define CoreDebug_DHCSR_S_NSUIDE_Pos 21U /*!< \deprecated CoreDebug DHCSR: S_NSUIDE Position */ +#define CoreDebug_DHCSR_S_NSUIDE_Msk (1UL << CoreDebug_DHCSR_S_NSUIDE_Pos) /*!< \deprecated CoreDebug DHCSR: S_NSUIDE Mask */ + +#define CoreDebug_DHCSR_S_SDE_Pos 20U /*!< \deprecated CoreDebug DHCSR: S_SDE Position */ +#define CoreDebug_DHCSR_S_SDE_Msk (1UL << CoreDebug_DHCSR_S_SDE_Pos) /*!< \deprecated CoreDebug DHCSR: S_SDE Mask */ + +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Mask */ + +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< \deprecated CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< \deprecated CoreDebug DHCSR: S_SLEEP Mask */ + +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< \deprecated CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: S_HALT Mask */ + +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< \deprecated CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< \deprecated CoreDebug DHCSR: S_REGRDY Mask */ + +#define CoreDebug_DHCSR_C_PMOV_Pos 6U /*!< \deprecated CoreDebug DHCSR: C_PMOV Position */ +#define CoreDebug_DHCSR_C_PMOV_Msk (1UL << CoreDebug_DHCSR_C_PMOV_Pos) /*!< \deprecated CoreDebug DHCSR: C_PMOV Mask */ + +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos 5U /*!< \deprecated CoreDebug DHCSR: C_SNAPSTALL Position */ +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk (1UL << CoreDebug_DHCSR_C_SNAPSTALL_Pos) /*!< \deprecated CoreDebug DHCSR: C_SNAPSTALL Mask */ + +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Mask */ + +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< \deprecated CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< \deprecated CoreDebug DHCSR: C_STEP Mask */ + +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< \deprecated CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: C_HALT Mask */ + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Mask */ + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< \deprecated CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< \deprecated CoreDebug DCRSR: REGWnR Mask */ + +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< \deprecated CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< \deprecated CoreDebug DCRSR: REGSEL Mask */ + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_TRCENA_Pos 24U /*!< \deprecated CoreDebug DEMCR: TRCENA Position */ +#define CoreDebug_DEMCR_TRCENA_Msk (1UL << CoreDebug_DEMCR_TRCENA_Pos) /*!< \deprecated CoreDebug DEMCR: TRCENA Mask */ + +#define CoreDebug_DEMCR_MON_REQ_Pos 19U /*!< \deprecated CoreDebug DEMCR: MON_REQ Position */ +#define CoreDebug_DEMCR_MON_REQ_Msk (1UL << CoreDebug_DEMCR_MON_REQ_Pos) /*!< \deprecated CoreDebug DEMCR: MON_REQ Mask */ + +#define CoreDebug_DEMCR_MON_STEP_Pos 18U /*!< \deprecated CoreDebug DEMCR: MON_STEP Position */ +#define CoreDebug_DEMCR_MON_STEP_Msk (1UL << CoreDebug_DEMCR_MON_STEP_Pos) /*!< \deprecated CoreDebug DEMCR: MON_STEP Mask */ + +#define CoreDebug_DEMCR_MON_PEND_Pos 17U /*!< \deprecated CoreDebug DEMCR: MON_PEND Position */ +#define CoreDebug_DEMCR_MON_PEND_Msk (1UL << CoreDebug_DEMCR_MON_PEND_Pos) /*!< \deprecated CoreDebug DEMCR: MON_PEND Mask */ + +#define CoreDebug_DEMCR_MON_EN_Pos 16U /*!< \deprecated CoreDebug DEMCR: MON_EN Position */ +#define CoreDebug_DEMCR_MON_EN_Msk (1UL << CoreDebug_DEMCR_MON_EN_Pos) /*!< \deprecated CoreDebug DEMCR: MON_EN Mask */ + +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Mask */ + +#define CoreDebug_DEMCR_VC_INTERR_Pos 9U /*!< \deprecated CoreDebug DEMCR: VC_INTERR Position */ +#define CoreDebug_DEMCR_VC_INTERR_Msk (1UL << CoreDebug_DEMCR_VC_INTERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_INTERR Mask */ + +#define CoreDebug_DEMCR_VC_BUSERR_Pos 8U /*!< \deprecated CoreDebug DEMCR: VC_BUSERR Position */ +#define CoreDebug_DEMCR_VC_BUSERR_Msk (1UL << CoreDebug_DEMCR_VC_BUSERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_BUSERR Mask */ + +#define CoreDebug_DEMCR_VC_STATERR_Pos 7U /*!< \deprecated CoreDebug DEMCR: VC_STATERR Position */ +#define CoreDebug_DEMCR_VC_STATERR_Msk (1UL << CoreDebug_DEMCR_VC_STATERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_STATERR Mask */ + +#define CoreDebug_DEMCR_VC_CHKERR_Pos 6U /*!< \deprecated CoreDebug DEMCR: VC_CHKERR Position */ +#define CoreDebug_DEMCR_VC_CHKERR_Msk (1UL << CoreDebug_DEMCR_VC_CHKERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_CHKERR Mask */ + +#define CoreDebug_DEMCR_VC_NOCPERR_Pos 5U /*!< \deprecated CoreDebug DEMCR: VC_NOCPERR Position */ +#define CoreDebug_DEMCR_VC_NOCPERR_Msk (1UL << CoreDebug_DEMCR_VC_NOCPERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_NOCPERR Mask */ + +#define CoreDebug_DEMCR_VC_MMERR_Pos 4U /*!< \deprecated CoreDebug DEMCR: VC_MMERR Position */ +#define CoreDebug_DEMCR_VC_MMERR_Msk (1UL << CoreDebug_DEMCR_VC_MMERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_MMERR Mask */ + +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Mask */ + +/* Debug Set Clear Exception and Monitor Control Register Definitions */ +#define CoreDebug_DSCEMCR_CLR_MON_REQ_Pos 19U /*!< \deprecated CoreDebug DSCEMCR: CLR_MON_REQ, Position */ +#define CoreDebug_DSCEMCR_CLR_MON_REQ_Msk (1UL << CoreDebug_DSCEMCR_CLR_MON_REQ_Pos) /*!< \deprecated CoreDebug DSCEMCR: CLR_MON_REQ, Mask */ + +#define CoreDebug_DSCEMCR_CLR_MON_PEND_Pos 17U /*!< \deprecated CoreDebug DSCEMCR: CLR_MON_PEND, Position */ +#define CoreDebug_DSCEMCR_CLR_MON_PEND_Msk (1UL << CoreDebug_DSCEMCR_CLR_MON_PEND_Pos) /*!< \deprecated CoreDebug DSCEMCR: CLR_MON_PEND, Mask */ + +#define CoreDebug_DSCEMCR_SET_MON_REQ_Pos 3U /*!< \deprecated CoreDebug DSCEMCR: SET_MON_REQ, Position */ +#define CoreDebug_DSCEMCR_SET_MON_REQ_Msk (1UL << CoreDebug_DSCEMCR_SET_MON_REQ_Pos) /*!< \deprecated CoreDebug DSCEMCR: SET_MON_REQ, Mask */ + +#define CoreDebug_DSCEMCR_SET_MON_PEND_Pos 1U /*!< \deprecated CoreDebug DSCEMCR: SET_MON_PEND, Position */ +#define CoreDebug_DSCEMCR_SET_MON_PEND_Msk (1UL << CoreDebug_DSCEMCR_SET_MON_PEND_Pos) /*!< \deprecated CoreDebug DSCEMCR: SET_MON_PEND, Mask */ + +/* Debug Authentication Control Register Definitions */ +#define CoreDebug_DAUTHCTRL_UIDEN_Pos 10U /*!< \deprecated CoreDebug DAUTHCTRL: UIDEN, Position */ +#define CoreDebug_DAUTHCTRL_UIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_UIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: UIDEN, Mask */ + +#define CoreDebug_DAUTHCTRL_UIDAPEN_Pos 9U /*!< \deprecated CoreDebug DAUTHCTRL: UIDAPEN, Position */ +#define CoreDebug_DAUTHCTRL_UIDAPEN_Msk (1UL << CoreDebug_DAUTHCTRL_UIDAPEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: UIDAPEN, Mask */ + +#define CoreDebug_DAUTHCTRL_FSDMA_Pos 8U /*!< \deprecated CoreDebug DAUTHCTRL: FSDMA, Position */ +#define CoreDebug_DAUTHCTRL_FSDMA_Msk (1UL << CoreDebug_DAUTHCTRL_FSDMA_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: FSDMA, Mask */ + +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Position */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Mask */ + +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Msk (1UL << CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Mask */ + +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Position */ +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Mask */ + +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Msk (1UL /*<< CoreDebug_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Mask */ + +/* Debug Security Control and Status Register Definitions */ +#define CoreDebug_DSCSR_CDS_Pos 16U /*!< \deprecated CoreDebug DSCSR: CDS Position */ +#define CoreDebug_DSCSR_CDS_Msk (1UL << CoreDebug_DSCSR_CDS_Pos) /*!< \deprecated CoreDebug DSCSR: CDS Mask */ + +#define CoreDebug_DSCSR_SBRSEL_Pos 1U /*!< \deprecated CoreDebug DSCSR: SBRSEL Position */ +#define CoreDebug_DSCSR_SBRSEL_Msk (1UL << CoreDebug_DSCSR_SBRSEL_Pos) /*!< \deprecated CoreDebug DSCSR: SBRSEL Mask */ + +#define CoreDebug_DSCSR_SBRSELEN_Pos 0U /*!< \deprecated CoreDebug DSCSR: SBRSELEN Position */ +#define CoreDebug_DSCSR_SBRSELEN_Msk (1UL /*<< CoreDebug_DSCSR_SBRSELEN_Pos*/) /*!< \deprecated CoreDebug DSCSR: SBRSELEN Mask */ + +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DCB Debug Control Block + \brief Type definitions for the Debug Control Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Control Block Registers (DCB). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + __OM uint32_t DSCEMCR; /*!< Offset: 0x010 ( /W) Debug Set Clear Exception and Monitor Control Register */ + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} DCB_Type; + +/* DHCSR, Debug Halting Control and Status Register Definitions */ +#define DCB_DHCSR_DBGKEY_Pos 16U /*!< DCB DHCSR: Debug key Position */ +#define DCB_DHCSR_DBGKEY_Msk (0xFFFFUL << DCB_DHCSR_DBGKEY_Pos) /*!< DCB DHCSR: Debug key Mask */ + +#define DCB_DHCSR_S_RESTART_ST_Pos 26U /*!< DCB DHCSR: Restart sticky status Position */ +#define DCB_DHCSR_S_RESTART_ST_Msk (0x1UL << DCB_DHCSR_S_RESTART_ST_Pos) /*!< DCB DHCSR: Restart sticky status Mask */ + +#define DCB_DHCSR_S_RESET_ST_Pos 25U /*!< DCB DHCSR: Reset sticky status Position */ +#define DCB_DHCSR_S_RESET_ST_Msk (0x1UL << DCB_DHCSR_S_RESET_ST_Pos) /*!< DCB DHCSR: Reset sticky status Mask */ + +#define DCB_DHCSR_S_RETIRE_ST_Pos 24U /*!< DCB DHCSR: Retire sticky status Position */ +#define DCB_DHCSR_S_RETIRE_ST_Msk (0x1UL << DCB_DHCSR_S_RETIRE_ST_Pos) /*!< DCB DHCSR: Retire sticky status Mask */ + +#define DCB_DHCSR_S_FPD_Pos 23U /*!< DCB DHCSR: Floating-point registers Debuggable Position */ +#define DCB_DHCSR_S_FPD_Msk (0x1UL << DCB_DHCSR_S_FPD_Pos) /*!< DCB DHCSR: Floating-point registers Debuggable Mask */ + +#define DCB_DHCSR_S_SUIDE_Pos 22U /*!< DCB DHCSR: Secure unprivileged halting debug enabled Position */ +#define DCB_DHCSR_S_SUIDE_Msk (0x1UL << DCB_DHCSR_S_SUIDE_Pos) /*!< DCB DHCSR: Secure unprivileged halting debug enabled Mask */ + +#define DCB_DHCSR_S_NSUIDE_Pos 21U /*!< DCB DHCSR: Non-secure unprivileged halting debug enabled Position */ +#define DCB_DHCSR_S_NSUIDE_Msk (0x1UL << DCB_DHCSR_S_NSUIDE_Pos) /*!< DCB DHCSR: Non-secure unprivileged halting debug enabled Mask */ + +#define DCB_DHCSR_S_SDE_Pos 20U /*!< DCB DHCSR: Secure debug enabled Position */ +#define DCB_DHCSR_S_SDE_Msk (0x1UL << DCB_DHCSR_S_SDE_Pos) /*!< DCB DHCSR: Secure debug enabled Mask */ + +#define DCB_DHCSR_S_LOCKUP_Pos 19U /*!< DCB DHCSR: Lockup status Position */ +#define DCB_DHCSR_S_LOCKUP_Msk (0x1UL << DCB_DHCSR_S_LOCKUP_Pos) /*!< DCB DHCSR: Lockup status Mask */ + +#define DCB_DHCSR_S_SLEEP_Pos 18U /*!< DCB DHCSR: Sleeping status Position */ +#define DCB_DHCSR_S_SLEEP_Msk (0x1UL << DCB_DHCSR_S_SLEEP_Pos) /*!< DCB DHCSR: Sleeping status Mask */ + +#define DCB_DHCSR_S_HALT_Pos 17U /*!< DCB DHCSR: Halted status Position */ +#define DCB_DHCSR_S_HALT_Msk (0x1UL << DCB_DHCSR_S_HALT_Pos) /*!< DCB DHCSR: Halted status Mask */ + +#define DCB_DHCSR_S_REGRDY_Pos 16U /*!< DCB DHCSR: Register ready status Position */ +#define DCB_DHCSR_S_REGRDY_Msk (0x1UL << DCB_DHCSR_S_REGRDY_Pos) /*!< DCB DHCSR: Register ready status Mask */ + +#define DCB_DHCSR_C_PMOV_Pos 6U /*!< DCB DHCSR: Halt on PMU overflow control Position */ +#define DCB_DHCSR_C_PMOV_Msk (0x1UL << DCB_DHCSR_C_PMOV_Pos) /*!< DCB DHCSR: Halt on PMU overflow control Mask */ + +#define DCB_DHCSR_C_SNAPSTALL_Pos 5U /*!< DCB DHCSR: Snap stall control Position */ +#define DCB_DHCSR_C_SNAPSTALL_Msk (0x1UL << DCB_DHCSR_C_SNAPSTALL_Pos) /*!< DCB DHCSR: Snap stall control Mask */ + +#define DCB_DHCSR_C_MASKINTS_Pos 3U /*!< DCB DHCSR: Mask interrupts control Position */ +#define DCB_DHCSR_C_MASKINTS_Msk (0x1UL << DCB_DHCSR_C_MASKINTS_Pos) /*!< DCB DHCSR: Mask interrupts control Mask */ + +#define DCB_DHCSR_C_STEP_Pos 2U /*!< DCB DHCSR: Step control Position */ +#define DCB_DHCSR_C_STEP_Msk (0x1UL << DCB_DHCSR_C_STEP_Pos) /*!< DCB DHCSR: Step control Mask */ + +#define DCB_DHCSR_C_HALT_Pos 1U /*!< DCB DHCSR: Halt control Position */ +#define DCB_DHCSR_C_HALT_Msk (0x1UL << DCB_DHCSR_C_HALT_Pos) /*!< DCB DHCSR: Halt control Mask */ + +#define DCB_DHCSR_C_DEBUGEN_Pos 0U /*!< DCB DHCSR: Debug enable control Position */ +#define DCB_DHCSR_C_DEBUGEN_Msk (0x1UL /*<< DCB_DHCSR_C_DEBUGEN_Pos*/) /*!< DCB DHCSR: Debug enable control Mask */ + +/* DCRSR, Debug Core Register Select Register Definitions */ +#define DCB_DCRSR_REGWnR_Pos 16U /*!< DCB DCRSR: Register write/not-read Position */ +#define DCB_DCRSR_REGWnR_Msk (0x1UL << DCB_DCRSR_REGWnR_Pos) /*!< DCB DCRSR: Register write/not-read Mask */ + +#define DCB_DCRSR_REGSEL_Pos 0U /*!< DCB DCRSR: Register selector Position */ +#define DCB_DCRSR_REGSEL_Msk (0x7FUL /*<< DCB_DCRSR_REGSEL_Pos*/) /*!< DCB DCRSR: Register selector Mask */ + +/* DCRDR, Debug Core Register Data Register Definitions */ +#define DCB_DCRDR_DBGTMP_Pos 0U /*!< DCB DCRDR: Data temporary buffer Position */ +#define DCB_DCRDR_DBGTMP_Msk (0xFFFFFFFFUL /*<< DCB_DCRDR_DBGTMP_Pos*/) /*!< DCB DCRDR: Data temporary buffer Mask */ + +/* DEMCR, Debug Exception and Monitor Control Register Definitions */ +#define DCB_DEMCR_TRCENA_Pos 24U /*!< DCB DEMCR: Trace enable Position */ +#define DCB_DEMCR_TRCENA_Msk (0x1UL << DCB_DEMCR_TRCENA_Pos) /*!< DCB DEMCR: Trace enable Mask */ + +#define DCB_DEMCR_MONPRKEY_Pos 23U /*!< DCB DEMCR: Monitor pend req key Position */ +#define DCB_DEMCR_MONPRKEY_Msk (0x1UL << DCB_DEMCR_MONPRKEY_Pos) /*!< DCB DEMCR: Monitor pend req key Mask */ + +#define DCB_DEMCR_UMON_EN_Pos 21U /*!< DCB DEMCR: Unprivileged monitor enable Position */ +#define DCB_DEMCR_UMON_EN_Msk (0x1UL << DCB_DEMCR_UMON_EN_Pos) /*!< DCB DEMCR: Unprivileged monitor enable Mask */ + +#define DCB_DEMCR_SDME_Pos 20U /*!< DCB DEMCR: Secure DebugMonitor enable Position */ +#define DCB_DEMCR_SDME_Msk (0x1UL << DCB_DEMCR_SDME_Pos) /*!< DCB DEMCR: Secure DebugMonitor enable Mask */ + +#define DCB_DEMCR_MON_REQ_Pos 19U /*!< DCB DEMCR: Monitor request Position */ +#define DCB_DEMCR_MON_REQ_Msk (0x1UL << DCB_DEMCR_MON_REQ_Pos) /*!< DCB DEMCR: Monitor request Mask */ + +#define DCB_DEMCR_MON_STEP_Pos 18U /*!< DCB DEMCR: Monitor step Position */ +#define DCB_DEMCR_MON_STEP_Msk (0x1UL << DCB_DEMCR_MON_STEP_Pos) /*!< DCB DEMCR: Monitor step Mask */ + +#define DCB_DEMCR_MON_PEND_Pos 17U /*!< DCB DEMCR: Monitor pend Position */ +#define DCB_DEMCR_MON_PEND_Msk (0x1UL << DCB_DEMCR_MON_PEND_Pos) /*!< DCB DEMCR: Monitor pend Mask */ + +#define DCB_DEMCR_MON_EN_Pos 16U /*!< DCB DEMCR: Monitor enable Position */ +#define DCB_DEMCR_MON_EN_Msk (0x1UL << DCB_DEMCR_MON_EN_Pos) /*!< DCB DEMCR: Monitor enable Mask */ + +#define DCB_DEMCR_VC_SFERR_Pos 11U /*!< DCB DEMCR: Vector Catch SecureFault Position */ +#define DCB_DEMCR_VC_SFERR_Msk (0x1UL << DCB_DEMCR_VC_SFERR_Pos) /*!< DCB DEMCR: Vector Catch SecureFault Mask */ + +#define DCB_DEMCR_VC_HARDERR_Pos 10U /*!< DCB DEMCR: Vector Catch HardFault errors Position */ +#define DCB_DEMCR_VC_HARDERR_Msk (0x1UL << DCB_DEMCR_VC_HARDERR_Pos) /*!< DCB DEMCR: Vector Catch HardFault errors Mask */ + +#define DCB_DEMCR_VC_INTERR_Pos 9U /*!< DCB DEMCR: Vector Catch interrupt errors Position */ +#define DCB_DEMCR_VC_INTERR_Msk (0x1UL << DCB_DEMCR_VC_INTERR_Pos) /*!< DCB DEMCR: Vector Catch interrupt errors Mask */ + +#define DCB_DEMCR_VC_BUSERR_Pos 8U /*!< DCB DEMCR: Vector Catch BusFault errors Position */ +#define DCB_DEMCR_VC_BUSERR_Msk (0x1UL << DCB_DEMCR_VC_BUSERR_Pos) /*!< DCB DEMCR: Vector Catch BusFault errors Mask */ + +#define DCB_DEMCR_VC_STATERR_Pos 7U /*!< DCB DEMCR: Vector Catch state errors Position */ +#define DCB_DEMCR_VC_STATERR_Msk (0x1UL << DCB_DEMCR_VC_STATERR_Pos) /*!< DCB DEMCR: Vector Catch state errors Mask */ + +#define DCB_DEMCR_VC_CHKERR_Pos 6U /*!< DCB DEMCR: Vector Catch check errors Position */ +#define DCB_DEMCR_VC_CHKERR_Msk (0x1UL << DCB_DEMCR_VC_CHKERR_Pos) /*!< DCB DEMCR: Vector Catch check errors Mask */ + +#define DCB_DEMCR_VC_NOCPERR_Pos 5U /*!< DCB DEMCR: Vector Catch NOCP errors Position */ +#define DCB_DEMCR_VC_NOCPERR_Msk (0x1UL << DCB_DEMCR_VC_NOCPERR_Pos) /*!< DCB DEMCR: Vector Catch NOCP errors Mask */ + +#define DCB_DEMCR_VC_MMERR_Pos 4U /*!< DCB DEMCR: Vector Catch MemManage errors Position */ +#define DCB_DEMCR_VC_MMERR_Msk (0x1UL << DCB_DEMCR_VC_MMERR_Pos) /*!< DCB DEMCR: Vector Catch MemManage errors Mask */ + +#define DCB_DEMCR_VC_CORERESET_Pos 0U /*!< DCB DEMCR: Vector Catch Core reset Position */ +#define DCB_DEMCR_VC_CORERESET_Msk (0x1UL /*<< DCB_DEMCR_VC_CORERESET_Pos*/) /*!< DCB DEMCR: Vector Catch Core reset Mask */ + +/* DSCEMCR, Debug Set Clear Exception and Monitor Control Register Definitions */ +#define DCB_DSCEMCR_CLR_MON_REQ_Pos 19U /*!< DCB DSCEMCR: Clear monitor request Position */ +#define DCB_DSCEMCR_CLR_MON_REQ_Msk (0x1UL << DCB_DSCEMCR_CLR_MON_REQ_Pos) /*!< DCB DSCEMCR: Clear monitor request Mask */ + +#define DCB_DSCEMCR_CLR_MON_PEND_Pos 17U /*!< DCB DSCEMCR: Clear monitor pend Position */ +#define DCB_DSCEMCR_CLR_MON_PEND_Msk (0x1UL << DCB_DSCEMCR_CLR_MON_PEND_Pos) /*!< DCB DSCEMCR: Clear monitor pend Mask */ + +#define DCB_DSCEMCR_SET_MON_REQ_Pos 3U /*!< DCB DSCEMCR: Set monitor request Position */ +#define DCB_DSCEMCR_SET_MON_REQ_Msk (0x1UL << DCB_DSCEMCR_SET_MON_REQ_Pos) /*!< DCB DSCEMCR: Set monitor request Mask */ + +#define DCB_DSCEMCR_SET_MON_PEND_Pos 1U /*!< DCB DSCEMCR: Set monitor pend Position */ +#define DCB_DSCEMCR_SET_MON_PEND_Msk (0x1UL << DCB_DSCEMCR_SET_MON_PEND_Pos) /*!< DCB DSCEMCR: Set monitor pend Mask */ + +/* DAUTHCTRL, Debug Authentication Control Register Definitions */ +#define DCB_DAUTHCTRL_UIDEN_Pos 10U /*!< DCB DAUTHCTRL: Unprivileged Invasive Debug Enable Position */ +#define DCB_DAUTHCTRL_UIDEN_Msk (0x1UL << DCB_DAUTHCTRL_UIDEN_Pos) /*!< DCB DAUTHCTRL: Unprivileged Invasive Debug Enable Mask */ + +#define DCB_DAUTHCTRL_UIDAPEN_Pos 9U /*!< DCB DAUTHCTRL: Unprivileged Invasive DAP Access Enable Position */ +#define DCB_DAUTHCTRL_UIDAPEN_Msk (0x1UL << DCB_DAUTHCTRL_UIDAPEN_Pos) /*!< DCB DAUTHCTRL: Unprivileged Invasive DAP Access Enable Mask */ + +#define DCB_DAUTHCTRL_FSDMA_Pos 8U /*!< DCB DAUTHCTRL: Force Secure DebugMonitor Allowed Position */ +#define DCB_DAUTHCTRL_FSDMA_Msk (0x1UL << DCB_DAUTHCTRL_FSDMA_Pos) /*!< DCB DAUTHCTRL: Force Secure DebugMonitor Allowed Mask */ + +#define DCB_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPNIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPNIDENSEL_Msk (0x1UL << DCB_DAUTHCTRL_SPNIDENSEL_Pos) /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Mask */ + +#define DCB_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< DCB DAUTHCTRL: Secure invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPIDENSEL_Msk (0x1UL /*<< DCB_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< DCB DAUTHCTRL: Secure invasive debug enable select Mask */ + +/* DSCSR, Debug Security Control and Status Register Definitions */ +#define DCB_DSCSR_CDSKEY_Pos 17U /*!< DCB DSCSR: CDS write-enable key Position */ +#define DCB_DSCSR_CDSKEY_Msk (0x1UL << DCB_DSCSR_CDSKEY_Pos) /*!< DCB DSCSR: CDS write-enable key Mask */ + +#define DCB_DSCSR_CDS_Pos 16U /*!< DCB DSCSR: Current domain Secure Position */ +#define DCB_DSCSR_CDS_Msk (0x1UL << DCB_DSCSR_CDS_Pos) /*!< DCB DSCSR: Current domain Secure Mask */ + +#define DCB_DSCSR_SBRSEL_Pos 1U /*!< DCB DSCSR: Secure banked register select Position */ +#define DCB_DSCSR_SBRSEL_Msk (0x1UL << DCB_DSCSR_SBRSEL_Pos) /*!< DCB DSCSR: Secure banked register select Mask */ + +#define DCB_DSCSR_SBRSELEN_Pos 0U /*!< DCB DSCSR: Secure banked register select enable Position */ +#define DCB_DSCSR_SBRSELEN_Msk (0x1UL /*<< DCB_DSCSR_SBRSELEN_Pos*/) /*!< DCB DSCSR: Secure banked register select enable Mask */ + +/*@} end of group CMSIS_DCB */ + + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DIB Debug Identification Block + \brief Type definitions for the Debug Identification Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Identification Block Registers (DIB). + */ +typedef struct +{ + __OM uint32_t DLAR; /*!< Offset: 0x000 ( /W) SCS Software Lock Access Register */ + __IM uint32_t DLSR; /*!< Offset: 0x004 (R/ ) SCS Software Lock Status Register */ + __IM uint32_t DAUTHSTATUS; /*!< Offset: 0x008 (R/ ) Debug Authentication Status Register */ + __IM uint32_t DDEVARCH; /*!< Offset: 0x00C (R/ ) SCS Device Architecture Register */ + __IM uint32_t DDEVTYPE; /*!< Offset: 0x010 (R/ ) SCS Device Type Register */ +} DIB_Type; + +/* DLAR, SCS Software Lock Access Register Definitions */ +#define DIB_DLAR_KEY_Pos 0U /*!< DIB DLAR: KEY Position */ +#define DIB_DLAR_KEY_Msk (0xFFFFFFFFUL /*<< DIB_DLAR_KEY_Pos */) /*!< DIB DLAR: KEY Mask */ + +/* DLSR, SCS Software Lock Status Register Definitions */ +#define DIB_DLSR_nTT_Pos 2U /*!< DIB DLSR: Not thirty-two bit Position */ +#define DIB_DLSR_nTT_Msk (0x1UL << DIB_DLSR_nTT_Pos ) /*!< DIB DLSR: Not thirty-two bit Mask */ + +#define DIB_DLSR_SLK_Pos 1U /*!< DIB DLSR: Software Lock status Position */ +#define DIB_DLSR_SLK_Msk (0x1UL << DIB_DLSR_SLK_Pos ) /*!< DIB DLSR: Software Lock status Mask */ + +#define DIB_DLSR_SLI_Pos 0U /*!< DIB DLSR: Software Lock implemented Position */ +#define DIB_DLSR_SLI_Msk (0x1UL /*<< DIB_DLSR_SLI_Pos*/) /*!< DIB DLSR: Software Lock implemented Mask */ + +/* DAUTHSTATUS, Debug Authentication Status Register Definitions */ +#define DIB_DAUTHSTATUS_SUNID_Pos 22U /*!< DIB DAUTHSTATUS: Secure Unprivileged Non-invasive Debug Allowed Position */ +#define DIB_DAUTHSTATUS_SUNID_Msk (0x3UL << DIB_DAUTHSTATUS_SUNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Unprivileged Non-invasive Debug Allowed Mask */ + +#define DIB_DAUTHSTATUS_SUID_Pos 20U /*!< DIB DAUTHSTATUS: Secure Unprivileged Invasive Debug Allowed Position */ +#define DIB_DAUTHSTATUS_SUID_Msk (0x3UL << DIB_DAUTHSTATUS_SUID_Pos ) /*!< DIB DAUTHSTATUS: Secure Unprivileged Invasive Debug Allowed Mask */ + +#define DIB_DAUTHSTATUS_NSUNID_Pos 18U /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Allo Position */ +#define DIB_DAUTHSTATUS_NSUNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSUNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Allo Mask */ + +#define DIB_DAUTHSTATUS_NSUID_Pos 16U /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Invasive Debug Allowed Position */ +#define DIB_DAUTHSTATUS_NSUID_Msk (0x3UL << DIB_DAUTHSTATUS_NSUID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Invasive Debug Allowed Mask */ + +#define DIB_DAUTHSTATUS_SNID_Pos 6U /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_SNID_Msk (0x3UL << DIB_DAUTHSTATUS_SNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_SID_Pos 4U /*!< DIB DAUTHSTATUS: Secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_SID_Msk (0x3UL << DIB_DAUTHSTATUS_SID_Pos ) /*!< DIB DAUTHSTATUS: Secure Invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSNID_Pos 2U /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSID_Pos 0U /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSID_Msk (0x3UL /*<< DIB_DAUTHSTATUS_NSID_Pos*/) /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Mask */ + +/* DDEVARCH, SCS Device Architecture Register Definitions */ +#define DIB_DDEVARCH_ARCHITECT_Pos 21U /*!< DIB DDEVARCH: Architect Position */ +#define DIB_DDEVARCH_ARCHITECT_Msk (0x7FFUL << DIB_DDEVARCH_ARCHITECT_Pos ) /*!< DIB DDEVARCH: Architect Mask */ + +#define DIB_DDEVARCH_PRESENT_Pos 20U /*!< DIB DDEVARCH: DEVARCH Present Position */ +#define DIB_DDEVARCH_PRESENT_Msk (0x1FUL << DIB_DDEVARCH_PRESENT_Pos ) /*!< DIB DDEVARCH: DEVARCH Present Mask */ + +#define DIB_DDEVARCH_REVISION_Pos 16U /*!< DIB DDEVARCH: Revision Position */ +#define DIB_DDEVARCH_REVISION_Msk (0xFUL << DIB_DDEVARCH_REVISION_Pos ) /*!< DIB DDEVARCH: Revision Mask */ + +#define DIB_DDEVARCH_ARCHVER_Pos 12U /*!< DIB DDEVARCH: Architecture Version Position */ +#define DIB_DDEVARCH_ARCHVER_Msk (0xFUL << DIB_DDEVARCH_ARCHVER_Pos ) /*!< DIB DDEVARCH: Architecture Version Mask */ + +#define DIB_DDEVARCH_ARCHPART_Pos 0U /*!< DIB DDEVARCH: Architecture Part Position */ +#define DIB_DDEVARCH_ARCHPART_Msk (0xFFFUL /*<< DIB_DDEVARCH_ARCHPART_Pos*/) /*!< DIB DDEVARCH: Architecture Part Mask */ + +/* DDEVTYPE, SCS Device Type Register Definitions */ +#define DIB_DDEVTYPE_SUB_Pos 4U /*!< DIB DDEVTYPE: Sub-type Position */ +#define DIB_DDEVTYPE_SUB_Msk (0xFUL << DIB_DDEVTYPE_SUB_Pos ) /*!< DIB DDEVTYPE: Sub-type Mask */ + +#define DIB_DDEVTYPE_MAJOR_Pos 0U /*!< DIB DDEVTYPE: Major type Position */ +#define DIB_DDEVTYPE_MAJOR_Msk (0xFUL /*<< DIB_DDEVTYPE_MAJOR_Pos*/) /*!< DIB DDEVTYPE: Major type Mask */ + + +/*@} end of group CMSIS_DIB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ + #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ + #define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ + #define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ + #define MEMSYSCTL_BASE (0xE001E000UL) /*!< Memory System Control Base Address */ + #define ERRBNK_BASE (0xE001E100UL) /*!< Error Banking Base Address */ + #define PWRMODCTL_BASE (0xE001E300UL) /*!< Power Mode Control Base Address */ + #define EWIC_BASE (0xE001E400UL) /*!< External Wakeup Interrupt Controller Base Address */ + #define PRCCFGINF_BASE (0xE001E700UL) /*!< Processor Configuration Information Base Address */ + #define STL_BASE (0xE001E800UL) /*!< Software Test Library Base Address */ + #define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ + #define CoreDebug_BASE (0xE000EDF0UL) /*!< \deprecated Core Debug Base Address */ + #define DCB_BASE (0xE000EDF0UL) /*!< DCB Base Address */ + #define DIB_BASE (0xE000EFB0UL) /*!< DIB Base Address */ + #define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ + #define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ + #define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + + #define ICB ((ICB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ + #define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ + #define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ + #define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + #define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ + #define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ + #define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ + #define MEMSYSCTL ((MemSysCtl_Type *) MEMSYSCTL_BASE ) /*!< Memory System Control configuration struct */ + #define ERRBNK ((ErrBnk_Type *) ERRBNK_BASE ) /*!< Error Banking configuration struct */ + #define PWRMODCTL ((PwrModCtl_Type *) PWRMODCTL_BASE ) /*!< Power Mode Control configuration struct */ + #define EWIC ((EWIC_Type *) EWIC_BASE ) /*!< EWIC configuration struct */ + #define PRCCFGINF ((PrcCfgInf_Type *) PRCCFGINF_BASE ) /*!< Processor Configuration Information configuration struct */ + #define STL ((STL_Type *) STL_BASE ) /*!< Software Test Library configuration struct */ + #define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE ) /*!< \deprecated Core Debug configuration struct */ + #define DCB ((DCB_Type *) DCB_BASE ) /*!< DCB configuration struct */ + #define DIB ((DIB_Type *) DIB_BASE ) /*!< DIB configuration struct */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ + #endif + + #if defined (__PMU_PRESENT) && (__PMU_PRESENT == 1U) + #define PMU_BASE (0xE0003000UL) /*!< PMU Base Address */ + #define PMU ((PMU_Type *) PMU_BASE ) /*!< PMU configuration struct */ + #endif + + #if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SAU_BASE (SCS_BASE + 0x0DD0UL) /*!< Security Attribution Unit */ + #define SAU ((SAU_Type *) SAU_BASE ) /*!< Security Attribution Unit */ + #endif + + #define FPU_BASE (SCS_BASE + 0x0F30UL) /*!< Floating Point Unit */ + #define FPU ((FPU_Type *) FPU_BASE ) /*!< Floating Point Unit */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SCS_BASE_NS (0xE002E000UL) /*!< System Control Space Base Address (non-secure address space) */ + #define CoreDebug_BASE_NS (0xE002EDF0UL) /*!< \deprecated Core Debug Base Address (non-secure address space) */ + #define DCB_BASE_NS (0xE002EDF0UL) /*!< DCB Base Address (non-secure address space) */ + #define DIB_BASE_NS (0xE002EFB0UL) /*!< DIB Base Address (non-secure address space) */ + #define SysTick_BASE_NS (SCS_BASE_NS + 0x0010UL) /*!< SysTick Base Address (non-secure address space) */ + #define NVIC_BASE_NS (SCS_BASE_NS + 0x0100UL) /*!< NVIC Base Address (non-secure address space) */ + #define SCB_BASE_NS (SCS_BASE_NS + 0x0D00UL) /*!< System Control Block Base Address (non-secure address space) */ + + #define ICB_NS ((ICB_Type *) SCS_BASE_NS ) /*!< System control Register not in SCB(non-secure address space) */ + #define SCB_NS ((SCB_Type *) SCB_BASE_NS ) /*!< SCB configuration struct (non-secure address space) */ + #define SysTick_NS ((SysTick_Type *) SysTick_BASE_NS ) /*!< SysTick configuration struct (non-secure address space) */ + #define NVIC_NS ((NVIC_Type *) NVIC_BASE_NS ) /*!< NVIC configuration struct (non-secure address space) */ + #define CoreDebug_NS ((CoreDebug_Type *) CoreDebug_BASE_NS) /*!< \deprecated Core Debug configuration struct (non-secure address space) */ + #define DCB_NS ((DCB_Type *) DCB_BASE_NS ) /*!< DCB configuration struct (non-secure address space) */ + #define DIB_NS ((DIB_Type *) DIB_BASE_NS ) /*!< DIB configuration struct (non-secure address space) */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE_NS (SCS_BASE_NS + 0x0D90UL) /*!< Memory Protection Unit (non-secure address space) */ + #define MPU_NS ((MPU_Type *) MPU_BASE_NS ) /*!< Memory Protection Unit (non-secure address space) */ + #endif + + #define FPU_BASE_NS (SCS_BASE_NS + 0x0F30UL) /*!< Floating Point Unit (non-secure address space) */ + #define FPU_NS ((FPU_Type *) FPU_BASE_NS ) /*!< Floating Point Unit (non-secure address space) */ + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ +/*@} */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_register_aliases Backwards Compatibility Aliases + \brief Register alias definitions for backwards compatibility. + @{ + */ +#define ID_ADR (ID_AFR) /*!< SCB Auxiliary Feature Register */ + +/* 'SCnSCB' is deprecated and replaced by 'ICB' */ +typedef ICB_Type SCnSCB_Type; + +/* Auxiliary Control Register Definitions */ +#define SCnSCB_ACTLR_DISCRITAXIRUW_Pos (ICB_ACTLR_DISCRITAXIRUW_Pos) +#define SCnSCB_ACTLR_DISCRITAXIRUW_Msk (ICB_ACTLR_DISCRITAXIRUW_Msk) + +#define SCnSCB_ACTLR_DISDI_Pos (ICB_ACTLR_DISDI_Pos) +#define SCnSCB_ACTLR_DISDI_Msk (ICB_ACTLR_DISDI_Msk) + +#define SCnSCB_ACTLR_DISCRITAXIRUR_Pos (ICB_ACTLR_DISCRITAXIRUR_Pos) +#define SCnSCB_ACTLR_DISCRITAXIRUR_Msk (ICB_ACTLR_DISCRITAXIRUR_Msk) + +#define SCnSCB_ACTLR_EVENTBUSEN_Pos (ICB_ACTLR_EVENTBUSEN_Pos) +#define SCnSCB_ACTLR_EVENTBUSEN_Msk (ICB_ACTLR_EVENTBUSEN_Msk) + +#define SCnSCB_ACTLR_EVENTBUSEN_S_Pos (ICB_ACTLR_EVENTBUSEN_S_Pos) +#define SCnSCB_ACTLR_EVENTBUSEN_S_Msk (ICB_ACTLR_EVENTBUSEN_S_Msk) + +#define SCnSCB_ACTLR_DISITMATBFLUSH_Pos (ICB_ACTLR_DISITMATBFLUSH_Pos) +#define SCnSCB_ACTLR_DISITMATBFLUSH_Msk (ICB_ACTLR_DISITMATBFLUSH_Msk) + +#define SCnSCB_ACTLR_DISNWAMODE_Pos (ICB_ACTLR_DISNWAMODE_Pos) +#define SCnSCB_ACTLR_DISNWAMODE_Msk (ICB_ACTLR_DISNWAMODE_Msk) + +#define SCnSCB_ACTLR_FPEXCODIS_Pos (ICB_ACTLR_FPEXCODIS_Pos) +#define SCnSCB_ACTLR_FPEXCODIS_Msk (ICB_ACTLR_FPEXCODIS_Msk) + +#define SCnSCB_ACTLR_DISOLAP_Pos (ICB_ACTLR_DISOLAP_Pos) +#define SCnSCB_ACTLR_DISOLAP_Msk (ICB_ACTLR_DISOLAP_Msk) + +#define SCnSCB_ACTLR_DISOLAPS_Pos (ICB_ACTLR_DISOLAPS_Pos) +#define SCnSCB_ACTLR_DISOLAPS_Msk (ICB_ACTLR_DISOLAPS_Msk) + +#define SCnSCB_ACTLR_DISLOBR_Pos (ICB_ACTLR_DISLOBR_Pos) +#define SCnSCB_ACTLR_DISLOBR_Msk (ICB_ACTLR_DISLOBR_Msk) + +#define SCnSCB_ACTLR_DISLO_Pos (ICB_ACTLR_DISLO_Pos) +#define SCnSCB_ACTLR_DISLO_Msk (ICB_ACTLR_DISLO_Msk) + +#define SCnSCB_ACTLR_DISLOLEP_Pos (ICB_ACTLR_DISLOLEP_Pos) +#define SCnSCB_ACTLR_DISLOLEP_Msk (ICB_ACTLR_DISLOLEP_Msk) + +#define SCnSCB_ACTLR_DISFOLD_Pos (ICB_ACTLR_DISFOLD_Pos) +#define SCnSCB_ACTLR_DISFOLD_Msk (ICB_ACTLR_DISFOLD_Msk) + +/* Interrupt Controller Type Register Definitions */ +#define SCnSCB_ICTR_INTLINESNUM_Pos (ICB_ICTR_INTLINESNUM_Pos) +#define SCnSCB_ICTR_INTLINESNUM_Msk (ICB_ICTR_INTLINESNUM_Msk) + +#define SCnSCB (ICB) +#define SCnSCB_NS (ICB_NS) + +/*@} */ + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* Special LR values for Secure/Non-Secure call handling and exception handling */ + +/* Function Return Payload (from ARMv8-M Architecture Reference Manual) LR value on entry from Secure BLXNS */ +#define FNC_RETURN (0xFEFFFFFFUL) /* bit [0] ignored when processing a branch */ + +/* The following EXC_RETURN mask values are used to evaluate the LR on exception entry */ +#define EXC_RETURN_PREFIX (0xFF000000UL) /* bits [31:24] set to indicate an EXC_RETURN value */ +#define EXC_RETURN_S (0x00000040UL) /* bit [6] stack used to push registers: 0=Non-secure 1=Secure */ +#define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */ +#define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */ +#define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */ +#define EXC_RETURN_SPSEL (0x00000004UL) /* bit [2] stack pointer used to restore context: 0=MSP 1=PSP */ +#define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */ + +/* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */ +#if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) /* Value for processors with floating-point extension: */ +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125AUL) /* bit [0] SFTC must match LR bit[4] EXC_RETURN_FTYPE */ +#else +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125BUL) /* Value for processors without floating-point extension */ +#endif + + +/** + \brief Set Priority Grouping + \details Sets the priority grouping field using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Interrupt Target State + \details Reads the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + \return 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_GetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Target State + \details Sets the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_SetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] |= ((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Clear Interrupt Target State + \details Clears the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_ClearTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] &= ~((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + __DSB(); +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Priority Grouping (non-secure) + \details Sets the non-secure priority grouping field when in secure state using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void TZ_NVIC_SetPriorityGrouping_NS(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB_NS->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB_NS->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping (non-secure) + \details Reads the priority grouping field from the non-secure NVIC when in secure state. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriorityGrouping_NS(void) +{ + return ((uint32_t)((SCB_NS->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt (non-secure) + \details Enables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_EnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status (non-secure) + \details Returns a device specific interrupt enable status from the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetEnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt (non-secure) + \details Disables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_DisableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Pending Interrupt (non-secure) + \details Reads the NVIC pending register in the non-secure NVIC when in secure state and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt (non-secure) + \details Sets the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_SetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt (non-secure) + \details Clears the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_ClearPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt (non-secure) + \details Reads the active register in non-secure NVIC when in secure state and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetActive_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority (non-secure) + \details Sets the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every non-secure processor exception. + */ +__STATIC_INLINE void TZ_NVIC_SetPriority_NS(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority (non-secure) + \details Reads the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriority_NS(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC_NS->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) &&(__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv8.h" + +#endif + +/* ########################## PMU functions and events #################################### */ + +#if defined (__PMU_PRESENT) && (__PMU_PRESENT == 1U) + +#include "pmu_armv8.h" + +/** + \brief Cortex-M55 PMU events + \note Architectural PMU events can be found in pmu_armv8.h +*/ + +#define ARMCM55_PMU_ECC_ERR 0xC000 /*!< Any ECC error */ +#define ARMCM55_PMU_ECC_ERR_FATAL 0xC001 /*!< Any fatal ECC error */ +#define ARMCM55_PMU_ECC_ERR_DCACHE 0xC010 /*!< Any ECC error in the data cache */ +#define ARMCM55_PMU_ECC_ERR_ICACHE 0xC011 /*!< Any ECC error in the instruction cache */ +#define ARMCM55_PMU_ECC_ERR_FATAL_DCACHE 0xC012 /*!< Any fatal ECC error in the data cache */ +#define ARMCM55_PMU_ECC_ERR_FATAL_ICACHE 0xC013 /*!< Any fatal ECC error in the instruction cache*/ +#define ARMCM55_PMU_ECC_ERR_DTCM 0xC020 /*!< Any ECC error in the DTCM */ +#define ARMCM55_PMU_ECC_ERR_ITCM 0xC021 /*!< Any ECC error in the ITCM */ +#define ARMCM55_PMU_ECC_ERR_FATAL_DTCM 0xC022 /*!< Any fatal ECC error in the DTCM */ +#define ARMCM55_PMU_ECC_ERR_FATAL_ITCM 0xC023 /*!< Any fatal ECC error in the ITCM */ +#define ARMCM55_PMU_PF_LINEFILL 0xC100 /*!< A prefetcher starts a line-fill */ +#define ARMCM55_PMU_PF_CANCEL 0xC101 /*!< A prefetcher stops prefetching */ +#define ARMCM55_PMU_PF_DROP_LINEFILL 0xC102 /*!< A linefill triggered by a prefetcher has been dropped because of lack of buffering */ +#define ARMCM55_PMU_NWAMODE_ENTER 0xC200 /*!< No write-allocate mode entry */ +#define ARMCM55_PMU_NWAMODE 0xC201 /*!< Write-allocate store is not allocated into the data cache due to no-write-allocate mode */ +#define ARMCM55_PMU_SAHB_ACCESS 0xC300 /*!< Read or write access on the S-AHB interface to the TCM */ +#define ARMCM55_PMU_PAHB_ACCESS 0xC301 /*!< Read or write access to the P-AHB write interface */ +#define ARMCM55_PMU_AXI_WRITE_ACCESS 0xC302 /*!< Any beat access to M-AXI write interface */ +#define ARMCM55_PMU_AXI_READ_ACCESS 0xC303 /*!< Any beat access to M-AXI read interface */ +#define ARMCM55_PMU_DOSTIMEOUT_DOUBLE 0xC400 /*!< Denial of Service timeout has fired twice and caused buffers to drain to allow forward progress */ +#define ARMCM55_PMU_DOSTIMEOUT_TRIPLE 0xC401 /*!< Denial of Service timeout has fired three times and blocked the LSU to force forward progress */ + +#endif + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + uint32_t mvfr0; + + mvfr0 = FPU->MVFR0; + if ((mvfr0 & (FPU_MVFR0_FPSP_Msk | FPU_MVFR0_FPDP_Msk)) == 0x220U) + { + return 2U; /* Double + Single precision FPU */ + } + else if ((mvfr0 & (FPU_MVFR0_FPSP_Msk | FPU_MVFR0_FPDP_Msk)) == 0x020U) + { + return 1U; /* Single precision FPU */ + } + else + { + return 0U; /* No FPU */ + } +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + +/* ########################## MVE functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_MveFunctions MVE Functions + \brief Function that provides MVE type. + @{ + */ + +/** + \brief get MVE type + \details returns the MVE type + \returns + - \b 0: No Vector Extension (MVE) + - \b 1: Integer Vector Extension (MVE-I) + - \b 2: Floating-point Vector Extension (MVE-F) + */ +__STATIC_INLINE uint32_t SCB_GetMVEType(void) +{ + const uint32_t mvfr1 = FPU->MVFR1; + if ((mvfr1 & FPU_MVFR1_MVE_Msk) == (0x2U << FPU_MVFR1_MVE_Pos)) + { + return 2U; + } + else if ((mvfr1 & FPU_MVFR1_MVE_Msk) == (0x1U << FPU_MVFR1_MVE_Pos)) + { + return 1U; + } + else + { + return 0U; + } +} + + +/*@} end of CMSIS_Core_MveFunctions */ + + +/* ########################## Cache functions #################################### */ + +#if ((defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U)) || \ + (defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U))) +#include "cachel1_armv7.h" +#endif + + +/* ########################## SAU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SAUFunctions SAU Functions + \brief Functions that configure the SAU. + @{ + */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + +/** + \brief Enable SAU + \details Enables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Enable(void) +{ + SAU->CTRL |= (SAU_CTRL_ENABLE_Msk); +} + + + +/** + \brief Disable SAU + \details Disables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Disable(void) +{ + SAU->CTRL &= ~(SAU_CTRL_ENABLE_Msk); +} + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_SAUFunctions */ + + + + +/* ################################## Debug Control function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DCBFunctions Debug Control Functions + \brief Functions that access the Debug Control Block. + @{ + */ + + +/** + \brief Set Debug Authentication Control Register + \details writes to Debug Authentication Control register. + \param [in] value value to be writen. + */ +__STATIC_INLINE void DCB_SetAuthCtrl(uint32_t value) +{ + __DSB(); + __ISB(); + DCB->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register + \details Reads Debug Authentication Control register. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t DCB_GetAuthCtrl(void) +{ + return (DCB->DAUTHCTRL); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Debug Authentication Control Register (non-secure) + \details writes to non-secure Debug Authentication Control register when in secure state. + \param [in] value value to be writen + */ +__STATIC_INLINE void TZ_DCB_SetAuthCtrl_NS(uint32_t value) +{ + __DSB(); + __ISB(); + DCB_NS->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register (non-secure) + \details Reads non-secure Debug Authentication Control register when in secure state. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t TZ_DCB_GetAuthCtrl_NS(void) +{ + return (DCB_NS->DAUTHCTRL); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## Debug Identification function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DIBFunctions Debug Identification Functions + \brief Functions that access the Debug Identification Block. + @{ + */ + + +/** + \brief Get Debug Authentication Status Register + \details Reads Debug Authentication Status register. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t DIB_GetAuthStatus(void) +{ + return (DIB->DAUTHSTATUS); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Debug Authentication Status Register (non-secure) + \details Reads non-secure Debug Authentication Status register when in secure state. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t TZ_DIB_GetAuthStatus_NS(void) +{ + return (DIB_NS->DAUTHSTATUS); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief System Tick Configuration (non-secure) + \details Initializes the non-secure System Timer and its interrupt when in secure state, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function TZ_SysTick_Config_NS is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + + */ +__STATIC_INLINE uint32_t TZ_SysTick_Config_NS(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick_NS->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + TZ_NVIC_SetPriority_NS (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick_NS->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick_NS->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM55_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/Drivers/CMSIS/Include/core_cm7.h b/Drivers/CMSIS/Include/core_cm7.h new file mode 100644 index 0000000..010506e --- /dev/null +++ b/Drivers/CMSIS/Include/core_cm7.h @@ -0,0 +1,2366 @@ +/**************************************************************************//** + * @file core_cm7.h + * @brief CMSIS Cortex-M7 Core Peripheral Access Layer Header File + * @version V5.1.6 + * @date 04. June 2021 + ******************************************************************************/ +/* + * Copyright (c) 2009-2021 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CORE_CM7_H_GENERIC +#define __CORE_CM7_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M7 + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS CM7 definitions */ +#define __CM7_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __CM7_CMSIS_VERSION_SUB ( __CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __CM7_CMSIS_VERSION ((__CM7_CMSIS_VERSION_MAIN << 16U) | \ + __CM7_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (7U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + For this, __FPU_PRESENT has to be checked prior to making use of FPU specific registers and functions. +*/ +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_FP + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM7_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM7_H_DEPENDANT +#define __CORE_CM7_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM7_REV + #define __CM7_REV 0x0000U + #warning "__CM7_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __ICACHE_PRESENT + #define __ICACHE_PRESENT 0U + #warning "__ICACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DCACHE_PRESENT + #define __DCACHE_PRESENT 0U + #warning "__DCACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DTCM_PRESENT + #define __DTCM_PRESENT 0U + #warning "__DTCM_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 1U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M7 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + - Core FPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:16; /*!< bit: 0..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:7; /*!< bit: 20..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + +#define APSR_GE_Pos 16U /*!< APSR: GE Position */ +#define APSR_GE_Msk (0xFUL << APSR_GE_Pos) /*!< APSR: GE Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:1; /*!< bit: 9 Reserved */ + uint32_t ICI_IT_1:6; /*!< bit: 10..15 ICI/IT part 1 */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:4; /*!< bit: 20..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit */ + uint32_t ICI_IT_2:2; /*!< bit: 25..26 ICI/IT part 2 */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_ICI_IT_2_Pos 25U /*!< xPSR: ICI/IT part 2 Position */ +#define xPSR_ICI_IT_2_Msk (3UL << xPSR_ICI_IT_2_Pos) /*!< xPSR: ICI/IT part 2 Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_GE_Pos 16U /*!< xPSR: GE Position */ +#define xPSR_GE_Msk (0xFUL << xPSR_GE_Pos) /*!< xPSR: GE Mask */ + +#define xPSR_ICI_IT_1_Pos 10U /*!< xPSR: ICI/IT part 1 Position */ +#define xPSR_ICI_IT_1_Msk (0x3FUL << xPSR_ICI_IT_1_Pos) /*!< xPSR: ICI/IT part 1 Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */ + uint32_t FPCA:1; /*!< bit: 2 FP extension active flag */ + uint32_t _reserved0:29; /*!< bit: 3..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_FPCA_Pos 2U /*!< CONTROL: FPCA Position */ +#define CONTROL_FPCA_Msk (1UL << CONTROL_FPCA_Pos) /*!< CONTROL: FPCA Mask */ + +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[8U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[24U]; + __IOM uint32_t ICER[8U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RESERVED1[24U]; + __IOM uint32_t ISPR[8U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[24U]; + __IOM uint32_t ICPR[8U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[24U]; + __IOM uint32_t IABR[8U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[56U]; + __IOM uint8_t IP[240U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED5[644U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/* Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHPR[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t ID_PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t ID_DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ID_AFR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t ID_MFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ID_ISAR[5U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + uint32_t RESERVED0[1U]; + __IM uint32_t CLIDR; /*!< Offset: 0x078 (R/ ) Cache Level ID register */ + __IM uint32_t CTR; /*!< Offset: 0x07C (R/ ) Cache Type register */ + __IM uint32_t CCSIDR; /*!< Offset: 0x080 (R/ ) Cache Size ID Register */ + __IOM uint32_t CSSELR; /*!< Offset: 0x084 (R/W) Cache Size Selection Register */ + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ + uint32_t RESERVED3[93U]; + __OM uint32_t STIR; /*!< Offset: 0x200 ( /W) Software Triggered Interrupt Register */ + uint32_t RESERVED4[15U]; + __IM uint32_t MVFR0; /*!< Offset: 0x240 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x244 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x248 (R/ ) Media and VFP Feature Register 2 */ + uint32_t RESERVED5[1U]; + __OM uint32_t ICIALLU; /*!< Offset: 0x250 ( /W) I-Cache Invalidate All to PoU */ + uint32_t RESERVED6[1U]; + __OM uint32_t ICIMVAU; /*!< Offset: 0x258 ( /W) I-Cache Invalidate by MVA to PoU */ + __OM uint32_t DCIMVAC; /*!< Offset: 0x25C ( /W) D-Cache Invalidate by MVA to PoC */ + __OM uint32_t DCISW; /*!< Offset: 0x260 ( /W) D-Cache Invalidate by Set-way */ + __OM uint32_t DCCMVAU; /*!< Offset: 0x264 ( /W) D-Cache Clean by MVA to PoU */ + __OM uint32_t DCCMVAC; /*!< Offset: 0x268 ( /W) D-Cache Clean by MVA to PoC */ + __OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */ + __OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */ + __OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */ + __OM uint32_t BPIALL; /*!< Offset: 0x278 ( /W) Branch Predictor Invalidate All */ + uint32_t RESERVED7[5U]; + __IOM uint32_t ITCMCR; /*!< Offset: 0x290 (R/W) Instruction Tightly-Coupled Memory Control Register */ + __IOM uint32_t DTCMCR; /*!< Offset: 0x294 (R/W) Data Tightly-Coupled Memory Control Registers */ + __IOM uint32_t AHBPCR; /*!< Offset: 0x298 (R/W) AHBP Control Register */ + __IOM uint32_t CACR; /*!< Offset: 0x29C (R/W) L1 Cache Control Register */ + __IOM uint32_t AHBSCR; /*!< Offset: 0x2A0 (R/W) AHB Slave Control Register */ + uint32_t RESERVED8[1U]; + __IOM uint32_t ABFSR; /*!< Offset: 0x2A8 (R/W) Auxiliary Bus Fault Status Register */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */ +#define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +#define SCB_AIRCR_VECTRESET_Pos 0U /*!< SCB AIRCR: VECTRESET Position */ +#define SCB_AIRCR_VECTRESET_Msk (1UL /*<< SCB_AIRCR_VECTRESET_Pos*/) /*!< SCB AIRCR: VECTRESET Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_BP_Pos 18U /*!< SCB CCR: Branch prediction enable bit Position */ +#define SCB_CCR_BP_Msk (1UL << SCB_CCR_BP_Pos) /*!< SCB CCR: Branch prediction enable bit Mask */ + +#define SCB_CCR_IC_Pos 17U /*!< SCB CCR: Instruction cache enable bit Position */ +#define SCB_CCR_IC_Msk (1UL << SCB_CCR_IC_Pos) /*!< SCB CCR: Instruction cache enable bit Mask */ + +#define SCB_CCR_DC_Pos 16U /*!< SCB CCR: Cache enable bit Position */ +#define SCB_CCR_DC_Msk (1UL << SCB_CCR_DC_Pos) /*!< SCB CCR: Cache enable bit Mask */ + +#define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */ +#define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +#define SCB_CCR_NONBASETHRDENA_Pos 0U /*!< SCB CCR: NONBASETHRDENA Position */ +#define SCB_CCR_NONBASETHRDENA_Msk (1UL /*<< SCB_CCR_NONBASETHRDENA_Pos*/) /*!< SCB CCR: NONBASETHRDENA Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/* SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/* BusFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_LSPERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 5U) /*!< SCB CFSR (BFSR): LSPERR Position */ +#define SCB_CFSR_LSPERR_Msk (1UL << SCB_CFSR_LSPERR_Pos) /*!< SCB CFSR (BFSR): LSPERR Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/* UsageFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/* SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/* SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/* SCB Cache Level ID Register Definitions */ +#define SCB_CLIDR_LOUU_Pos 27U /*!< SCB CLIDR: LoUU Position */ +#define SCB_CLIDR_LOUU_Msk (7UL << SCB_CLIDR_LOUU_Pos) /*!< SCB CLIDR: LoUU Mask */ + +#define SCB_CLIDR_LOC_Pos 24U /*!< SCB CLIDR: LoC Position */ +#define SCB_CLIDR_LOC_Msk (7UL << SCB_CLIDR_LOC_Pos) /*!< SCB CLIDR: LoC Mask */ + +/* SCB Cache Type Register Definitions */ +#define SCB_CTR_FORMAT_Pos 29U /*!< SCB CTR: Format Position */ +#define SCB_CTR_FORMAT_Msk (7UL << SCB_CTR_FORMAT_Pos) /*!< SCB CTR: Format Mask */ + +#define SCB_CTR_CWG_Pos 24U /*!< SCB CTR: CWG Position */ +#define SCB_CTR_CWG_Msk (0xFUL << SCB_CTR_CWG_Pos) /*!< SCB CTR: CWG Mask */ + +#define SCB_CTR_ERG_Pos 20U /*!< SCB CTR: ERG Position */ +#define SCB_CTR_ERG_Msk (0xFUL << SCB_CTR_ERG_Pos) /*!< SCB CTR: ERG Mask */ + +#define SCB_CTR_DMINLINE_Pos 16U /*!< SCB CTR: DminLine Position */ +#define SCB_CTR_DMINLINE_Msk (0xFUL << SCB_CTR_DMINLINE_Pos) /*!< SCB CTR: DminLine Mask */ + +#define SCB_CTR_IMINLINE_Pos 0U /*!< SCB CTR: ImInLine Position */ +#define SCB_CTR_IMINLINE_Msk (0xFUL /*<< SCB_CTR_IMINLINE_Pos*/) /*!< SCB CTR: ImInLine Mask */ + +/* SCB Cache Size ID Register Definitions */ +#define SCB_CCSIDR_WT_Pos 31U /*!< SCB CCSIDR: WT Position */ +#define SCB_CCSIDR_WT_Msk (1UL << SCB_CCSIDR_WT_Pos) /*!< SCB CCSIDR: WT Mask */ + +#define SCB_CCSIDR_WB_Pos 30U /*!< SCB CCSIDR: WB Position */ +#define SCB_CCSIDR_WB_Msk (1UL << SCB_CCSIDR_WB_Pos) /*!< SCB CCSIDR: WB Mask */ + +#define SCB_CCSIDR_RA_Pos 29U /*!< SCB CCSIDR: RA Position */ +#define SCB_CCSIDR_RA_Msk (1UL << SCB_CCSIDR_RA_Pos) /*!< SCB CCSIDR: RA Mask */ + +#define SCB_CCSIDR_WA_Pos 28U /*!< SCB CCSIDR: WA Position */ +#define SCB_CCSIDR_WA_Msk (1UL << SCB_CCSIDR_WA_Pos) /*!< SCB CCSIDR: WA Mask */ + +#define SCB_CCSIDR_NUMSETS_Pos 13U /*!< SCB CCSIDR: NumSets Position */ +#define SCB_CCSIDR_NUMSETS_Msk (0x7FFFUL << SCB_CCSIDR_NUMSETS_Pos) /*!< SCB CCSIDR: NumSets Mask */ + +#define SCB_CCSIDR_ASSOCIATIVITY_Pos 3U /*!< SCB CCSIDR: Associativity Position */ +#define SCB_CCSIDR_ASSOCIATIVITY_Msk (0x3FFUL << SCB_CCSIDR_ASSOCIATIVITY_Pos) /*!< SCB CCSIDR: Associativity Mask */ + +#define SCB_CCSIDR_LINESIZE_Pos 0U /*!< SCB CCSIDR: LineSize Position */ +#define SCB_CCSIDR_LINESIZE_Msk (7UL /*<< SCB_CCSIDR_LINESIZE_Pos*/) /*!< SCB CCSIDR: LineSize Mask */ + +/* SCB Cache Size Selection Register Definitions */ +#define SCB_CSSELR_LEVEL_Pos 1U /*!< SCB CSSELR: Level Position */ +#define SCB_CSSELR_LEVEL_Msk (7UL << SCB_CSSELR_LEVEL_Pos) /*!< SCB CSSELR: Level Mask */ + +#define SCB_CSSELR_IND_Pos 0U /*!< SCB CSSELR: InD Position */ +#define SCB_CSSELR_IND_Msk (1UL /*<< SCB_CSSELR_IND_Pos*/) /*!< SCB CSSELR: InD Mask */ + +/* SCB Software Triggered Interrupt Register Definitions */ +#define SCB_STIR_INTID_Pos 0U /*!< SCB STIR: INTID Position */ +#define SCB_STIR_INTID_Msk (0x1FFUL /*<< SCB_STIR_INTID_Pos*/) /*!< SCB STIR: INTID Mask */ + +/* SCB D-Cache Invalidate by Set-way Register Definitions */ +#define SCB_DCISW_WAY_Pos 30U /*!< SCB DCISW: Way Position */ +#define SCB_DCISW_WAY_Msk (3UL << SCB_DCISW_WAY_Pos) /*!< SCB DCISW: Way Mask */ + +#define SCB_DCISW_SET_Pos 5U /*!< SCB DCISW: Set Position */ +#define SCB_DCISW_SET_Msk (0x1FFUL << SCB_DCISW_SET_Pos) /*!< SCB DCISW: Set Mask */ + +/* SCB D-Cache Clean by Set-way Register Definitions */ +#define SCB_DCCSW_WAY_Pos 30U /*!< SCB DCCSW: Way Position */ +#define SCB_DCCSW_WAY_Msk (3UL << SCB_DCCSW_WAY_Pos) /*!< SCB DCCSW: Way Mask */ + +#define SCB_DCCSW_SET_Pos 5U /*!< SCB DCCSW: Set Position */ +#define SCB_DCCSW_SET_Msk (0x1FFUL << SCB_DCCSW_SET_Pos) /*!< SCB DCCSW: Set Mask */ + +/* SCB D-Cache Clean and Invalidate by Set-way Register Definitions */ +#define SCB_DCCISW_WAY_Pos 30U /*!< SCB DCCISW: Way Position */ +#define SCB_DCCISW_WAY_Msk (3UL << SCB_DCCISW_WAY_Pos) /*!< SCB DCCISW: Way Mask */ + +#define SCB_DCCISW_SET_Pos 5U /*!< SCB DCCISW: Set Position */ +#define SCB_DCCISW_SET_Msk (0x1FFUL << SCB_DCCISW_SET_Pos) /*!< SCB DCCISW: Set Mask */ + +/* Instruction Tightly-Coupled Memory Control Register Definitions */ +#define SCB_ITCMCR_SZ_Pos 3U /*!< SCB ITCMCR: SZ Position */ +#define SCB_ITCMCR_SZ_Msk (0xFUL << SCB_ITCMCR_SZ_Pos) /*!< SCB ITCMCR: SZ Mask */ + +#define SCB_ITCMCR_RETEN_Pos 2U /*!< SCB ITCMCR: RETEN Position */ +#define SCB_ITCMCR_RETEN_Msk (1UL << SCB_ITCMCR_RETEN_Pos) /*!< SCB ITCMCR: RETEN Mask */ + +#define SCB_ITCMCR_RMW_Pos 1U /*!< SCB ITCMCR: RMW Position */ +#define SCB_ITCMCR_RMW_Msk (1UL << SCB_ITCMCR_RMW_Pos) /*!< SCB ITCMCR: RMW Mask */ + +#define SCB_ITCMCR_EN_Pos 0U /*!< SCB ITCMCR: EN Position */ +#define SCB_ITCMCR_EN_Msk (1UL /*<< SCB_ITCMCR_EN_Pos*/) /*!< SCB ITCMCR: EN Mask */ + +/* Data Tightly-Coupled Memory Control Register Definitions */ +#define SCB_DTCMCR_SZ_Pos 3U /*!< SCB DTCMCR: SZ Position */ +#define SCB_DTCMCR_SZ_Msk (0xFUL << SCB_DTCMCR_SZ_Pos) /*!< SCB DTCMCR: SZ Mask */ + +#define SCB_DTCMCR_RETEN_Pos 2U /*!< SCB DTCMCR: RETEN Position */ +#define SCB_DTCMCR_RETEN_Msk (1UL << SCB_DTCMCR_RETEN_Pos) /*!< SCB DTCMCR: RETEN Mask */ + +#define SCB_DTCMCR_RMW_Pos 1U /*!< SCB DTCMCR: RMW Position */ +#define SCB_DTCMCR_RMW_Msk (1UL << SCB_DTCMCR_RMW_Pos) /*!< SCB DTCMCR: RMW Mask */ + +#define SCB_DTCMCR_EN_Pos 0U /*!< SCB DTCMCR: EN Position */ +#define SCB_DTCMCR_EN_Msk (1UL /*<< SCB_DTCMCR_EN_Pos*/) /*!< SCB DTCMCR: EN Mask */ + +/* AHBP Control Register Definitions */ +#define SCB_AHBPCR_SZ_Pos 1U /*!< SCB AHBPCR: SZ Position */ +#define SCB_AHBPCR_SZ_Msk (7UL << SCB_AHBPCR_SZ_Pos) /*!< SCB AHBPCR: SZ Mask */ + +#define SCB_AHBPCR_EN_Pos 0U /*!< SCB AHBPCR: EN Position */ +#define SCB_AHBPCR_EN_Msk (1UL /*<< SCB_AHBPCR_EN_Pos*/) /*!< SCB AHBPCR: EN Mask */ + +/* L1 Cache Control Register Definitions */ +#define SCB_CACR_FORCEWT_Pos 2U /*!< SCB CACR: FORCEWT Position */ +#define SCB_CACR_FORCEWT_Msk (1UL << SCB_CACR_FORCEWT_Pos) /*!< SCB CACR: FORCEWT Mask */ + +#define SCB_CACR_ECCEN_Pos 1U /*!< \deprecated SCB CACR: ECCEN Position */ +#define SCB_CACR_ECCEN_Msk (1UL << SCB_CACR_ECCEN_Pos) /*!< \deprecated SCB CACR: ECCEN Mask */ + +#define SCB_CACR_ECCDIS_Pos 1U /*!< SCB CACR: ECCDIS Position */ +#define SCB_CACR_ECCDIS_Msk (1UL << SCB_CACR_ECCDIS_Pos) /*!< SCB CACR: ECCDIS Mask */ + +#define SCB_CACR_SIWT_Pos 0U /*!< SCB CACR: SIWT Position */ +#define SCB_CACR_SIWT_Msk (1UL /*<< SCB_CACR_SIWT_Pos*/) /*!< SCB CACR: SIWT Mask */ + +/* AHBS Control Register Definitions */ +#define SCB_AHBSCR_INITCOUNT_Pos 11U /*!< SCB AHBSCR: INITCOUNT Position */ +#define SCB_AHBSCR_INITCOUNT_Msk (0x1FUL << SCB_AHBSCR_INITCOUNT_Pos) /*!< SCB AHBSCR: INITCOUNT Mask */ + +#define SCB_AHBSCR_TPRI_Pos 2U /*!< SCB AHBSCR: TPRI Position */ +#define SCB_AHBSCR_TPRI_Msk (0x1FFUL << SCB_AHBSCR_TPRI_Pos) /*!< SCB AHBSCR: TPRI Mask */ + +#define SCB_AHBSCR_CTL_Pos 0U /*!< SCB AHBSCR: CTL Position*/ +#define SCB_AHBSCR_CTL_Msk (3UL /*<< SCB_AHBSCR_CTL_Pos*/) /*!< SCB AHBSCR: CTL Mask */ + +/* Auxiliary Bus Fault Status Register Definitions */ +#define SCB_ABFSR_AXIMTYPE_Pos 8U /*!< SCB ABFSR: AXIMTYPE Position*/ +#define SCB_ABFSR_AXIMTYPE_Msk (3UL << SCB_ABFSR_AXIMTYPE_Pos) /*!< SCB ABFSR: AXIMTYPE Mask */ + +#define SCB_ABFSR_EPPB_Pos 4U /*!< SCB ABFSR: EPPB Position*/ +#define SCB_ABFSR_EPPB_Msk (1UL << SCB_ABFSR_EPPB_Pos) /*!< SCB ABFSR: EPPB Mask */ + +#define SCB_ABFSR_AXIM_Pos 3U /*!< SCB ABFSR: AXIM Position*/ +#define SCB_ABFSR_AXIM_Msk (1UL << SCB_ABFSR_AXIM_Pos) /*!< SCB ABFSR: AXIM Mask */ + +#define SCB_ABFSR_AHBP_Pos 2U /*!< SCB ABFSR: AHBP Position*/ +#define SCB_ABFSR_AHBP_Msk (1UL << SCB_ABFSR_AHBP_Pos) /*!< SCB ABFSR: AHBP Mask */ + +#define SCB_ABFSR_DTCM_Pos 1U /*!< SCB ABFSR: DTCM Position*/ +#define SCB_ABFSR_DTCM_Msk (1UL << SCB_ABFSR_DTCM_Pos) /*!< SCB ABFSR: DTCM Mask */ + +#define SCB_ABFSR_ITCM_Pos 0U /*!< SCB ABFSR: ITCM Position*/ +#define SCB_ABFSR_ITCM_Msk (1UL /*<< SCB_ABFSR_ITCM_Pos*/) /*!< SCB ABFSR: ITCM Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ +} SCnSCB_Type; + +/* Interrupt Controller Type Register Definitions */ +#define SCnSCB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/* Auxiliary Control Register Definitions */ +#define SCnSCB_ACTLR_DISDYNADD_Pos 26U /*!< ACTLR: DISDYNADD Position */ +#define SCnSCB_ACTLR_DISDYNADD_Msk (1UL << SCnSCB_ACTLR_DISDYNADD_Pos) /*!< ACTLR: DISDYNADD Mask */ + +#define SCnSCB_ACTLR_DISISSCH1_Pos 21U /*!< ACTLR: DISISSCH1 Position */ +#define SCnSCB_ACTLR_DISISSCH1_Msk (0x1FUL << SCnSCB_ACTLR_DISISSCH1_Pos) /*!< ACTLR: DISISSCH1 Mask */ + +#define SCnSCB_ACTLR_DISDI_Pos 16U /*!< ACTLR: DISDI Position */ +#define SCnSCB_ACTLR_DISDI_Msk (0x1FUL << SCnSCB_ACTLR_DISDI_Pos) /*!< ACTLR: DISDI Mask */ + +#define SCnSCB_ACTLR_DISCRITAXIRUR_Pos 15U /*!< ACTLR: DISCRITAXIRUR Position */ +#define SCnSCB_ACTLR_DISCRITAXIRUR_Msk (1UL << SCnSCB_ACTLR_DISCRITAXIRUR_Pos) /*!< ACTLR: DISCRITAXIRUR Mask */ + +#define SCnSCB_ACTLR_DISBTACALLOC_Pos 14U /*!< ACTLR: DISBTACALLOC Position */ +#define SCnSCB_ACTLR_DISBTACALLOC_Msk (1UL << SCnSCB_ACTLR_DISBTACALLOC_Pos) /*!< ACTLR: DISBTACALLOC Mask */ + +#define SCnSCB_ACTLR_DISBTACREAD_Pos 13U /*!< ACTLR: DISBTACREAD Position */ +#define SCnSCB_ACTLR_DISBTACREAD_Msk (1UL << SCnSCB_ACTLR_DISBTACREAD_Pos) /*!< ACTLR: DISBTACREAD Mask */ + +#define SCnSCB_ACTLR_DISITMATBFLUSH_Pos 12U /*!< ACTLR: DISITMATBFLUSH Position */ +#define SCnSCB_ACTLR_DISITMATBFLUSH_Msk (1UL << SCnSCB_ACTLR_DISITMATBFLUSH_Pos) /*!< ACTLR: DISITMATBFLUSH Mask */ + +#define SCnSCB_ACTLR_DISRAMODE_Pos 11U /*!< ACTLR: DISRAMODE Position */ +#define SCnSCB_ACTLR_DISRAMODE_Msk (1UL << SCnSCB_ACTLR_DISRAMODE_Pos) /*!< ACTLR: DISRAMODE Mask */ + +#define SCnSCB_ACTLR_FPEXCODIS_Pos 10U /*!< ACTLR: FPEXCODIS Position */ +#define SCnSCB_ACTLR_FPEXCODIS_Msk (1UL << SCnSCB_ACTLR_FPEXCODIS_Pos) /*!< ACTLR: FPEXCODIS Mask */ + +#define SCnSCB_ACTLR_DISFOLD_Pos 2U /*!< ACTLR: DISFOLD Position */ +#define SCnSCB_ACTLR_DISFOLD_Msk (1UL << SCnSCB_ACTLR_DISFOLD_Pos) /*!< ACTLR: DISFOLD Mask */ + +#define SCnSCB_ACTLR_DISMCYCINT_Pos 0U /*!< ACTLR: DISMCYCINT Position */ +#define SCnSCB_ACTLR_DISMCYCINT_Msk (1UL /*<< SCnSCB_ACTLR_DISMCYCINT_Pos*/) /*!< ACTLR: DISMCYCINT Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) ITM Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) ITM Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ + uint32_t RESERVED3[32U]; + uint32_t RESERVED4[43U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ + uint32_t RESERVED5[6U]; + __IM uint32_t PID4; /*!< Offset: 0xFD0 (R/ ) ITM Peripheral Identification Register #4 */ + __IM uint32_t PID5; /*!< Offset: 0xFD4 (R/ ) ITM Peripheral Identification Register #5 */ + __IM uint32_t PID6; /*!< Offset: 0xFD8 (R/ ) ITM Peripheral Identification Register #6 */ + __IM uint32_t PID7; /*!< Offset: 0xFDC (R/ ) ITM Peripheral Identification Register #7 */ + __IM uint32_t PID0; /*!< Offset: 0xFE0 (R/ ) ITM Peripheral Identification Register #0 */ + __IM uint32_t PID1; /*!< Offset: 0xFE4 (R/ ) ITM Peripheral Identification Register #1 */ + __IM uint32_t PID2; /*!< Offset: 0xFE8 (R/ ) ITM Peripheral Identification Register #2 */ + __IM uint32_t PID3; /*!< Offset: 0xFEC (R/ ) ITM Peripheral Identification Register #3 */ + __IM uint32_t CID0; /*!< Offset: 0xFF0 (R/ ) ITM Component Identification Register #0 */ + __IM uint32_t CID1; /*!< Offset: 0xFF4 (R/ ) ITM Component Identification Register #1 */ + __IM uint32_t CID2; /*!< Offset: 0xFF8 (R/ ) ITM Component Identification Register #2 */ + __IM uint32_t CID3; /*!< Offset: 0xFFC (R/ ) ITM Component Identification Register #3 */ +} ITM_Type; + +/* ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFFFFFFFFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/* ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TraceBusID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TraceBusID_Msk (0x7FUL << ITM_TCR_TraceBusID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPrescale_Pos 8U /*!< ITM TCR: TSPrescale Position */ +#define ITM_TCR_TSPrescale_Msk (3UL << ITM_TCR_TSPrescale_Pos) /*!< ITM TCR: TSPrescale Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/* ITM Lock Status Register Definitions */ +#define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ +#define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ + +#define ITM_LSR_Access_Pos 1U /*!< ITM LSR: Access Position */ +#define ITM_LSR_Access_Msk (1UL << ITM_LSR_Access_Pos) /*!< ITM LSR: Access Mask */ + +#define ITM_LSR_Present_Pos 0U /*!< ITM LSR: Present Position */ +#define ITM_LSR_Present_Msk (1UL /*<< ITM_LSR_Present_Pos*/) /*!< ITM LSR: Present Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + __IOM uint32_t MASK0; /*!< Offset: 0x024 (R/W) Mask Register 0 */ + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED0[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + __IOM uint32_t MASK1; /*!< Offset: 0x034 (R/W) Mask Register 1 */ + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + __IOM uint32_t MASK2; /*!< Offset: 0x044 (R/W) Mask Register 2 */ + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + __IOM uint32_t MASK3; /*!< Offset: 0x054 (R/W) Mask Register 3 */ + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ + uint32_t RESERVED3[981U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( W) Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R ) Lock Status Register */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (0x1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (0x1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (0x1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (0x1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (0x1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (0x1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (0x1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (0x1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (0x1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (0x1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/* DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/* DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/* DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/* DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/* DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/* DWT Comparator Mask Register Definitions */ +#define DWT_MASK_MASK_Pos 0U /*!< DWT MASK: MASK Position */ +#define DWT_MASK_MASK_Msk (0x1FUL /*<< DWT_MASK_MASK_Pos*/) /*!< DWT MASK: MASK Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVADDR1_Pos 16U /*!< DWT FUNCTION: DATAVADDR1 Position */ +#define DWT_FUNCTION_DATAVADDR1_Msk (0xFUL << DWT_FUNCTION_DATAVADDR1_Pos) /*!< DWT FUNCTION: DATAVADDR1 Mask */ + +#define DWT_FUNCTION_DATAVADDR0_Pos 12U /*!< DWT FUNCTION: DATAVADDR0 Position */ +#define DWT_FUNCTION_DATAVADDR0_Msk (0xFUL << DWT_FUNCTION_DATAVADDR0_Pos) /*!< DWT FUNCTION: DATAVADDR0 Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_LNK1ENA_Pos 9U /*!< DWT FUNCTION: LNK1ENA Position */ +#define DWT_FUNCTION_LNK1ENA_Msk (0x1UL << DWT_FUNCTION_LNK1ENA_Pos) /*!< DWT FUNCTION: LNK1ENA Mask */ + +#define DWT_FUNCTION_DATAVMATCH_Pos 8U /*!< DWT FUNCTION: DATAVMATCH Position */ +#define DWT_FUNCTION_DATAVMATCH_Msk (0x1UL << DWT_FUNCTION_DATAVMATCH_Pos) /*!< DWT FUNCTION: DATAVMATCH Mask */ + +#define DWT_FUNCTION_CYCMATCH_Pos 7U /*!< DWT FUNCTION: CYCMATCH Position */ +#define DWT_FUNCTION_CYCMATCH_Msk (0x1UL << DWT_FUNCTION_CYCMATCH_Pos) /*!< DWT FUNCTION: CYCMATCH Mask */ + +#define DWT_FUNCTION_EMITRANGE_Pos 5U /*!< DWT FUNCTION: EMITRANGE Position */ +#define DWT_FUNCTION_EMITRANGE_Msk (0x1UL << DWT_FUNCTION_EMITRANGE_Pos) /*!< DWT FUNCTION: EMITRANGE Mask */ + +#define DWT_FUNCTION_FUNCTION_Pos 0U /*!< DWT FUNCTION: FUNCTION Position */ +#define DWT_FUNCTION_FUNCTION_Msk (0xFUL /*<< DWT_FUNCTION_FUNCTION_Pos*/) /*!< DWT FUNCTION: FUNCTION Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Size Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Size Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IM uint32_t FSCR; /*!< Offset: 0x308 (R/ ) Formatter Synchronization Counter Register */ + uint32_t RESERVED3[759U]; + __IM uint32_t TRIGGER; /*!< Offset: 0xEE8 (R/ ) TRIGGER Register */ + __IM uint32_t FIFO0; /*!< Offset: 0xEEC (R/ ) Integration ETM Data */ + __IM uint32_t ITATBCTR2; /*!< Offset: 0xEF0 (R/ ) ITATBCTR2 */ + uint32_t RESERVED4[1U]; + __IM uint32_t ITATBCTR0; /*!< Offset: 0xEF8 (R/ ) ITATBCTR0 */ + __IM uint32_t FIFO1; /*!< Offset: 0xEFC (R/ ) Integration ITM Data */ + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control */ + uint32_t RESERVED5[39U]; + __IOM uint32_t CLAIMSET; /*!< Offset: 0xFA0 (R/W) Claim tag set */ + __IOM uint32_t CLAIMCLR; /*!< Offset: 0xFA4 (R/W) Claim tag clear */ + uint32_t RESERVED7[8U]; + __IM uint32_t DEVID; /*!< Offset: 0xFC8 (R/ ) TPIU_DEVID */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) TPIU_DEVTYPE */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_PRESCALER_Pos 0U /*!< TPI ACPR: PRESCALER Position */ +#define TPI_ACPR_PRESCALER_Msk (0x1FFFUL /*<< TPI_ACPR_PRESCALER_Pos*/) /*!< TPI ACPR: PRESCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_EnFCont_Pos 1U /*!< TPI FFCR: EnFCont Position */ +#define TPI_FFCR_EnFCont_Msk (0x1UL << TPI_FFCR_EnFCont_Pos) /*!< TPI FFCR: EnFCont Mask */ + +/* TPI TRIGGER Register Definitions */ +#define TPI_TRIGGER_TRIGGER_Pos 0U /*!< TPI TRIGGER: TRIGGER Position */ +#define TPI_TRIGGER_TRIGGER_Msk (0x1UL /*<< TPI_TRIGGER_TRIGGER_Pos*/) /*!< TPI TRIGGER: TRIGGER Mask */ + +/* TPI Integration ETM Data Register Definitions (FIFO0) */ +#define TPI_FIFO0_ITM_ATVALID_Pos 29U /*!< TPI FIFO0: ITM_ATVALID Position */ +#define TPI_FIFO0_ITM_ATVALID_Msk (0x1UL << TPI_FIFO0_ITM_ATVALID_Pos) /*!< TPI FIFO0: ITM_ATVALID Mask */ + +#define TPI_FIFO0_ITM_bytecount_Pos 27U /*!< TPI FIFO0: ITM_bytecount Position */ +#define TPI_FIFO0_ITM_bytecount_Msk (0x3UL << TPI_FIFO0_ITM_bytecount_Pos) /*!< TPI FIFO0: ITM_bytecount Mask */ + +#define TPI_FIFO0_ETM_ATVALID_Pos 26U /*!< TPI FIFO0: ETM_ATVALID Position */ +#define TPI_FIFO0_ETM_ATVALID_Msk (0x1UL << TPI_FIFO0_ETM_ATVALID_Pos) /*!< TPI FIFO0: ETM_ATVALID Mask */ + +#define TPI_FIFO0_ETM_bytecount_Pos 24U /*!< TPI FIFO0: ETM_bytecount Position */ +#define TPI_FIFO0_ETM_bytecount_Msk (0x3UL << TPI_FIFO0_ETM_bytecount_Pos) /*!< TPI FIFO0: ETM_bytecount Mask */ + +#define TPI_FIFO0_ETM2_Pos 16U /*!< TPI FIFO0: ETM2 Position */ +#define TPI_FIFO0_ETM2_Msk (0xFFUL << TPI_FIFO0_ETM2_Pos) /*!< TPI FIFO0: ETM2 Mask */ + +#define TPI_FIFO0_ETM1_Pos 8U /*!< TPI FIFO0: ETM1 Position */ +#define TPI_FIFO0_ETM1_Msk (0xFFUL << TPI_FIFO0_ETM1_Pos) /*!< TPI FIFO0: ETM1 Mask */ + +#define TPI_FIFO0_ETM0_Pos 0U /*!< TPI FIFO0: ETM0 Position */ +#define TPI_FIFO0_ETM0_Msk (0xFFUL /*<< TPI_FIFO0_ETM0_Pos*/) /*!< TPI FIFO0: ETM0 Mask */ + +/* TPI ITATBCTR2 Register Definitions */ +#define TPI_ITATBCTR2_ATREADY2_Pos 0U /*!< TPI ITATBCTR2: ATREADY2 Position */ +#define TPI_ITATBCTR2_ATREADY2_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY2_Pos*/) /*!< TPI ITATBCTR2: ATREADY2 Mask */ + +#define TPI_ITATBCTR2_ATREADY1_Pos 0U /*!< TPI ITATBCTR2: ATREADY1 Position */ +#define TPI_ITATBCTR2_ATREADY1_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY1_Pos*/) /*!< TPI ITATBCTR2: ATREADY1 Mask */ + +/* TPI Integration ITM Data Register Definitions (FIFO1) */ +#define TPI_FIFO1_ITM_ATVALID_Pos 29U /*!< TPI FIFO1: ITM_ATVALID Position */ +#define TPI_FIFO1_ITM_ATVALID_Msk (0x1UL << TPI_FIFO1_ITM_ATVALID_Pos) /*!< TPI FIFO1: ITM_ATVALID Mask */ + +#define TPI_FIFO1_ITM_bytecount_Pos 27U /*!< TPI FIFO1: ITM_bytecount Position */ +#define TPI_FIFO1_ITM_bytecount_Msk (0x3UL << TPI_FIFO1_ITM_bytecount_Pos) /*!< TPI FIFO1: ITM_bytecount Mask */ + +#define TPI_FIFO1_ETM_ATVALID_Pos 26U /*!< TPI FIFO1: ETM_ATVALID Position */ +#define TPI_FIFO1_ETM_ATVALID_Msk (0x1UL << TPI_FIFO1_ETM_ATVALID_Pos) /*!< TPI FIFO1: ETM_ATVALID Mask */ + +#define TPI_FIFO1_ETM_bytecount_Pos 24U /*!< TPI FIFO1: ETM_bytecount Position */ +#define TPI_FIFO1_ETM_bytecount_Msk (0x3UL << TPI_FIFO1_ETM_bytecount_Pos) /*!< TPI FIFO1: ETM_bytecount Mask */ + +#define TPI_FIFO1_ITM2_Pos 16U /*!< TPI FIFO1: ITM2 Position */ +#define TPI_FIFO1_ITM2_Msk (0xFFUL << TPI_FIFO1_ITM2_Pos) /*!< TPI FIFO1: ITM2 Mask */ + +#define TPI_FIFO1_ITM1_Pos 8U /*!< TPI FIFO1: ITM1 Position */ +#define TPI_FIFO1_ITM1_Msk (0xFFUL << TPI_FIFO1_ITM1_Pos) /*!< TPI FIFO1: ITM1 Mask */ + +#define TPI_FIFO1_ITM0_Pos 0U /*!< TPI FIFO1: ITM0 Position */ +#define TPI_FIFO1_ITM0_Msk (0xFFUL /*<< TPI_FIFO1_ITM0_Pos*/) /*!< TPI FIFO1: ITM0 Mask */ + +/* TPI ITATBCTR0 Register Definitions */ +#define TPI_ITATBCTR0_ATREADY2_Pos 0U /*!< TPI ITATBCTR0: ATREADY2 Position */ +#define TPI_ITATBCTR0_ATREADY2_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY2_Pos*/) /*!< TPI ITATBCTR0: ATREADY2 Mask */ + +#define TPI_ITATBCTR0_ATREADY1_Pos 0U /*!< TPI ITATBCTR0: ATREADY1 Position */ +#define TPI_ITATBCTR0_ATREADY1_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY1_Pos*/) /*!< TPI ITATBCTR0: ATREADY1 Mask */ + +/* TPI Integration Mode Control Register Definitions */ +#define TPI_ITCTRL_Mode_Pos 0U /*!< TPI ITCTRL: Mode Position */ +#define TPI_ITCTRL_Mode_Msk (0x3UL /*<< TPI_ITCTRL_Mode_Pos*/) /*!< TPI ITCTRL: Mode Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_MinBufSz_Pos 6U /*!< TPI DEVID: MinBufSz Position */ +#define TPI_DEVID_MinBufSz_Msk (0x7UL << TPI_DEVID_MinBufSz_Pos) /*!< TPI DEVID: MinBufSz Mask */ + +#define TPI_DEVID_AsynClkIn_Pos 5U /*!< TPI DEVID: AsynClkIn Position */ +#define TPI_DEVID_AsynClkIn_Msk (0x1UL << TPI_DEVID_AsynClkIn_Pos) /*!< TPI DEVID: AsynClkIn Mask */ + +#define TPI_DEVID_NrTraceInput_Pos 0U /*!< TPI DEVID: NrTraceInput Position */ +#define TPI_DEVID_NrTraceInput_Msk (0x1FUL /*<< TPI_DEVID_NrTraceInput_Pos*/) /*!< TPI DEVID: NrTraceInput Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region RNRber Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RASR; /*!< Offset: 0x010 (R/W) MPU Region Attribute and Size Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Alias 1 Region Base Address Register */ + __IOM uint32_t RASR_A1; /*!< Offset: 0x018 (R/W) MPU Alias 1 Region Attribute and Size Register */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Alias 2 Region Base Address Register */ + __IOM uint32_t RASR_A2; /*!< Offset: 0x020 (R/W) MPU Alias 2 Region Attribute and Size Register */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Alias 3 Region Base Address Register */ + __IOM uint32_t RASR_A3; /*!< Offset: 0x028 (R/W) MPU Alias 3 Region Attribute and Size Register */ +} MPU_Type; + +#define MPU_TYPE_RALIASES 4U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_ADDR_Pos 5U /*!< MPU RBAR: ADDR Position */ +#define MPU_RBAR_ADDR_Msk (0x7FFFFFFUL << MPU_RBAR_ADDR_Pos) /*!< MPU RBAR: ADDR Mask */ + +#define MPU_RBAR_VALID_Pos 4U /*!< MPU RBAR: VALID Position */ +#define MPU_RBAR_VALID_Msk (1UL << MPU_RBAR_VALID_Pos) /*!< MPU RBAR: VALID Mask */ + +#define MPU_RBAR_REGION_Pos 0U /*!< MPU RBAR: REGION Position */ +#define MPU_RBAR_REGION_Msk (0xFUL /*<< MPU_RBAR_REGION_Pos*/) /*!< MPU RBAR: REGION Mask */ + +/* MPU Region Attribute and Size Register Definitions */ +#define MPU_RASR_ATTRS_Pos 16U /*!< MPU RASR: MPU Region Attribute field Position */ +#define MPU_RASR_ATTRS_Msk (0xFFFFUL << MPU_RASR_ATTRS_Pos) /*!< MPU RASR: MPU Region Attribute field Mask */ + +#define MPU_RASR_XN_Pos 28U /*!< MPU RASR: ATTRS.XN Position */ +#define MPU_RASR_XN_Msk (1UL << MPU_RASR_XN_Pos) /*!< MPU RASR: ATTRS.XN Mask */ + +#define MPU_RASR_AP_Pos 24U /*!< MPU RASR: ATTRS.AP Position */ +#define MPU_RASR_AP_Msk (0x7UL << MPU_RASR_AP_Pos) /*!< MPU RASR: ATTRS.AP Mask */ + +#define MPU_RASR_TEX_Pos 19U /*!< MPU RASR: ATTRS.TEX Position */ +#define MPU_RASR_TEX_Msk (0x7UL << MPU_RASR_TEX_Pos) /*!< MPU RASR: ATTRS.TEX Mask */ + +#define MPU_RASR_S_Pos 18U /*!< MPU RASR: ATTRS.S Position */ +#define MPU_RASR_S_Msk (1UL << MPU_RASR_S_Pos) /*!< MPU RASR: ATTRS.S Mask */ + +#define MPU_RASR_C_Pos 17U /*!< MPU RASR: ATTRS.C Position */ +#define MPU_RASR_C_Msk (1UL << MPU_RASR_C_Pos) /*!< MPU RASR: ATTRS.C Mask */ + +#define MPU_RASR_B_Pos 16U /*!< MPU RASR: ATTRS.B Position */ +#define MPU_RASR_B_Msk (1UL << MPU_RASR_B_Pos) /*!< MPU RASR: ATTRS.B Mask */ + +#define MPU_RASR_SRD_Pos 8U /*!< MPU RASR: Sub-Region Disable Position */ +#define MPU_RASR_SRD_Msk (0xFFUL << MPU_RASR_SRD_Pos) /*!< MPU RASR: Sub-Region Disable Mask */ + +#define MPU_RASR_SIZE_Pos 1U /*!< MPU RASR: Region Size Field Position */ +#define MPU_RASR_SIZE_Msk (0x1FUL << MPU_RASR_SIZE_Pos) /*!< MPU RASR: Region Size Field Mask */ + +#define MPU_RASR_ENABLE_Pos 0U /*!< MPU RASR: Region enable bit Position */ +#define MPU_RASR_ENABLE_Msk (1UL /*<< MPU_RASR_ENABLE_Pos*/) /*!< MPU RASR: Region enable bit Disable Mask */ + +/*@} end of group CMSIS_MPU */ +#endif /* defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_FPU Floating Point Unit (FPU) + \brief Type definitions for the Floating Point Unit (FPU) + @{ + */ + +/** + \brief Structure type to access the Floating Point Unit (FPU). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IOM uint32_t FPCCR; /*!< Offset: 0x004 (R/W) Floating-Point Context Control Register */ + __IOM uint32_t FPCAR; /*!< Offset: 0x008 (R/W) Floating-Point Context Address Register */ + __IOM uint32_t FPDSCR; /*!< Offset: 0x00C (R/W) Floating-Point Default Status Control Register */ + __IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and FP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and FP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x018 (R/ ) Media and FP Feature Register 2 */ +} FPU_Type; + +/* Floating-Point Context Control Register Definitions */ +#define FPU_FPCCR_ASPEN_Pos 31U /*!< FPCCR: ASPEN bit Position */ +#define FPU_FPCCR_ASPEN_Msk (1UL << FPU_FPCCR_ASPEN_Pos) /*!< FPCCR: ASPEN bit Mask */ + +#define FPU_FPCCR_LSPEN_Pos 30U /*!< FPCCR: LSPEN Position */ +#define FPU_FPCCR_LSPEN_Msk (1UL << FPU_FPCCR_LSPEN_Pos) /*!< FPCCR: LSPEN bit Mask */ + +#define FPU_FPCCR_MONRDY_Pos 8U /*!< FPCCR: MONRDY Position */ +#define FPU_FPCCR_MONRDY_Msk (1UL << FPU_FPCCR_MONRDY_Pos) /*!< FPCCR: MONRDY bit Mask */ + +#define FPU_FPCCR_BFRDY_Pos 6U /*!< FPCCR: BFRDY Position */ +#define FPU_FPCCR_BFRDY_Msk (1UL << FPU_FPCCR_BFRDY_Pos) /*!< FPCCR: BFRDY bit Mask */ + +#define FPU_FPCCR_MMRDY_Pos 5U /*!< FPCCR: MMRDY Position */ +#define FPU_FPCCR_MMRDY_Msk (1UL << FPU_FPCCR_MMRDY_Pos) /*!< FPCCR: MMRDY bit Mask */ + +#define FPU_FPCCR_HFRDY_Pos 4U /*!< FPCCR: HFRDY Position */ +#define FPU_FPCCR_HFRDY_Msk (1UL << FPU_FPCCR_HFRDY_Pos) /*!< FPCCR: HFRDY bit Mask */ + +#define FPU_FPCCR_THREAD_Pos 3U /*!< FPCCR: processor mode bit Position */ +#define FPU_FPCCR_THREAD_Msk (1UL << FPU_FPCCR_THREAD_Pos) /*!< FPCCR: processor mode active bit Mask */ + +#define FPU_FPCCR_USER_Pos 1U /*!< FPCCR: privilege level bit Position */ +#define FPU_FPCCR_USER_Msk (1UL << FPU_FPCCR_USER_Pos) /*!< FPCCR: privilege level bit Mask */ + +#define FPU_FPCCR_LSPACT_Pos 0U /*!< FPCCR: Lazy state preservation active bit Position */ +#define FPU_FPCCR_LSPACT_Msk (1UL /*<< FPU_FPCCR_LSPACT_Pos*/) /*!< FPCCR: Lazy state preservation active bit Mask */ + +/* Floating-Point Context Address Register Definitions */ +#define FPU_FPCAR_ADDRESS_Pos 3U /*!< FPCAR: ADDRESS bit Position */ +#define FPU_FPCAR_ADDRESS_Msk (0x1FFFFFFFUL << FPU_FPCAR_ADDRESS_Pos) /*!< FPCAR: ADDRESS bit Mask */ + +/* Floating-Point Default Status Control Register Definitions */ +#define FPU_FPDSCR_AHP_Pos 26U /*!< FPDSCR: AHP bit Position */ +#define FPU_FPDSCR_AHP_Msk (1UL << FPU_FPDSCR_AHP_Pos) /*!< FPDSCR: AHP bit Mask */ + +#define FPU_FPDSCR_DN_Pos 25U /*!< FPDSCR: DN bit Position */ +#define FPU_FPDSCR_DN_Msk (1UL << FPU_FPDSCR_DN_Pos) /*!< FPDSCR: DN bit Mask */ + +#define FPU_FPDSCR_FZ_Pos 24U /*!< FPDSCR: FZ bit Position */ +#define FPU_FPDSCR_FZ_Msk (1UL << FPU_FPDSCR_FZ_Pos) /*!< FPDSCR: FZ bit Mask */ + +#define FPU_FPDSCR_RMode_Pos 22U /*!< FPDSCR: RMode bit Position */ +#define FPU_FPDSCR_RMode_Msk (3UL << FPU_FPDSCR_RMode_Pos) /*!< FPDSCR: RMode bit Mask */ + +/* Media and FP Feature Register 0 Definitions */ +#define FPU_MVFR0_FP_rounding_modes_Pos 28U /*!< MVFR0: FP rounding modes bits Position */ +#define FPU_MVFR0_FP_rounding_modes_Msk (0xFUL << FPU_MVFR0_FP_rounding_modes_Pos) /*!< MVFR0: FP rounding modes bits Mask */ + +#define FPU_MVFR0_Short_vectors_Pos 24U /*!< MVFR0: Short vectors bits Position */ +#define FPU_MVFR0_Short_vectors_Msk (0xFUL << FPU_MVFR0_Short_vectors_Pos) /*!< MVFR0: Short vectors bits Mask */ + +#define FPU_MVFR0_Square_root_Pos 20U /*!< MVFR0: Square root bits Position */ +#define FPU_MVFR0_Square_root_Msk (0xFUL << FPU_MVFR0_Square_root_Pos) /*!< MVFR0: Square root bits Mask */ + +#define FPU_MVFR0_Divide_Pos 16U /*!< MVFR0: Divide bits Position */ +#define FPU_MVFR0_Divide_Msk (0xFUL << FPU_MVFR0_Divide_Pos) /*!< MVFR0: Divide bits Mask */ + +#define FPU_MVFR0_FP_excep_trapping_Pos 12U /*!< MVFR0: FP exception trapping bits Position */ +#define FPU_MVFR0_FP_excep_trapping_Msk (0xFUL << FPU_MVFR0_FP_excep_trapping_Pos) /*!< MVFR0: FP exception trapping bits Mask */ + +#define FPU_MVFR0_Double_precision_Pos 8U /*!< MVFR0: Double-precision bits Position */ +#define FPU_MVFR0_Double_precision_Msk (0xFUL << FPU_MVFR0_Double_precision_Pos) /*!< MVFR0: Double-precision bits Mask */ + +#define FPU_MVFR0_Single_precision_Pos 4U /*!< MVFR0: Single-precision bits Position */ +#define FPU_MVFR0_Single_precision_Msk (0xFUL << FPU_MVFR0_Single_precision_Pos) /*!< MVFR0: Single-precision bits Mask */ + +#define FPU_MVFR0_A_SIMD_registers_Pos 0U /*!< MVFR0: A_SIMD registers bits Position */ +#define FPU_MVFR0_A_SIMD_registers_Msk (0xFUL /*<< FPU_MVFR0_A_SIMD_registers_Pos*/) /*!< MVFR0: A_SIMD registers bits Mask */ + +/* Media and FP Feature Register 1 Definitions */ +#define FPU_MVFR1_FP_fused_MAC_Pos 28U /*!< MVFR1: FP fused MAC bits Position */ +#define FPU_MVFR1_FP_fused_MAC_Msk (0xFUL << FPU_MVFR1_FP_fused_MAC_Pos) /*!< MVFR1: FP fused MAC bits Mask */ + +#define FPU_MVFR1_FP_HPFP_Pos 24U /*!< MVFR1: FP HPFP bits Position */ +#define FPU_MVFR1_FP_HPFP_Msk (0xFUL << FPU_MVFR1_FP_HPFP_Pos) /*!< MVFR1: FP HPFP bits Mask */ + +#define FPU_MVFR1_D_NaN_mode_Pos 4U /*!< MVFR1: D_NaN mode bits Position */ +#define FPU_MVFR1_D_NaN_mode_Msk (0xFUL << FPU_MVFR1_D_NaN_mode_Pos) /*!< MVFR1: D_NaN mode bits Mask */ + +#define FPU_MVFR1_FtZ_mode_Pos 0U /*!< MVFR1: FtZ mode bits Position */ +#define FPU_MVFR1_FtZ_mode_Msk (0xFUL /*<< FPU_MVFR1_FtZ_mode_Pos*/) /*!< MVFR1: FtZ mode bits Mask */ + +/* Media and FP Feature Register 2 Definitions */ + +#define FPU_MVFR2_VFP_Misc_Pos 4U /*!< MVFR2: VFP Misc bits Position */ +#define FPU_MVFR2_VFP_Misc_Msk (0xFUL << FPU_MVFR2_VFP_Misc_Pos) /*!< MVFR2: VFP Misc bits Mask */ + +/*@} end of group CMSIS_FPU */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Type definitions for the Core Debug Registers + @{ + */ + +/** + \brief Structure type to access the Core Debug Register (CoreDebug). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< CoreDebug DHCSR: DBGKEY Mask */ + +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< CoreDebug DHCSR: S_RESET_ST Mask */ + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< CoreDebug DHCSR: S_RETIRE_ST Mask */ + +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< CoreDebug DHCSR: S_LOCKUP Mask */ + +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< CoreDebug DHCSR: S_SLEEP Mask */ + +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< CoreDebug DHCSR: S_HALT Mask */ + +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< CoreDebug DHCSR: S_REGRDY Mask */ + +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos 5U /*!< CoreDebug DHCSR: C_SNAPSTALL Position */ +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk (1UL << CoreDebug_DHCSR_C_SNAPSTALL_Pos) /*!< CoreDebug DHCSR: C_SNAPSTALL Mask */ + +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< CoreDebug DHCSR: C_MASKINTS Mask */ + +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< CoreDebug DHCSR: C_STEP Mask */ + +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< CoreDebug DHCSR: C_HALT Mask */ + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< CoreDebug DHCSR: C_DEBUGEN Mask */ + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< CoreDebug DCRSR: REGWnR Mask */ + +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< CoreDebug DCRSR: REGSEL Mask */ + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_TRCENA_Pos 24U /*!< CoreDebug DEMCR: TRCENA Position */ +#define CoreDebug_DEMCR_TRCENA_Msk (1UL << CoreDebug_DEMCR_TRCENA_Pos) /*!< CoreDebug DEMCR: TRCENA Mask */ + +#define CoreDebug_DEMCR_MON_REQ_Pos 19U /*!< CoreDebug DEMCR: MON_REQ Position */ +#define CoreDebug_DEMCR_MON_REQ_Msk (1UL << CoreDebug_DEMCR_MON_REQ_Pos) /*!< CoreDebug DEMCR: MON_REQ Mask */ + +#define CoreDebug_DEMCR_MON_STEP_Pos 18U /*!< CoreDebug DEMCR: MON_STEP Position */ +#define CoreDebug_DEMCR_MON_STEP_Msk (1UL << CoreDebug_DEMCR_MON_STEP_Pos) /*!< CoreDebug DEMCR: MON_STEP Mask */ + +#define CoreDebug_DEMCR_MON_PEND_Pos 17U /*!< CoreDebug DEMCR: MON_PEND Position */ +#define CoreDebug_DEMCR_MON_PEND_Msk (1UL << CoreDebug_DEMCR_MON_PEND_Pos) /*!< CoreDebug DEMCR: MON_PEND Mask */ + +#define CoreDebug_DEMCR_MON_EN_Pos 16U /*!< CoreDebug DEMCR: MON_EN Position */ +#define CoreDebug_DEMCR_MON_EN_Msk (1UL << CoreDebug_DEMCR_MON_EN_Pos) /*!< CoreDebug DEMCR: MON_EN Mask */ + +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< CoreDebug DEMCR: VC_HARDERR Mask */ + +#define CoreDebug_DEMCR_VC_INTERR_Pos 9U /*!< CoreDebug DEMCR: VC_INTERR Position */ +#define CoreDebug_DEMCR_VC_INTERR_Msk (1UL << CoreDebug_DEMCR_VC_INTERR_Pos) /*!< CoreDebug DEMCR: VC_INTERR Mask */ + +#define CoreDebug_DEMCR_VC_BUSERR_Pos 8U /*!< CoreDebug DEMCR: VC_BUSERR Position */ +#define CoreDebug_DEMCR_VC_BUSERR_Msk (1UL << CoreDebug_DEMCR_VC_BUSERR_Pos) /*!< CoreDebug DEMCR: VC_BUSERR Mask */ + +#define CoreDebug_DEMCR_VC_STATERR_Pos 7U /*!< CoreDebug DEMCR: VC_STATERR Position */ +#define CoreDebug_DEMCR_VC_STATERR_Msk (1UL << CoreDebug_DEMCR_VC_STATERR_Pos) /*!< CoreDebug DEMCR: VC_STATERR Mask */ + +#define CoreDebug_DEMCR_VC_CHKERR_Pos 6U /*!< CoreDebug DEMCR: VC_CHKERR Position */ +#define CoreDebug_DEMCR_VC_CHKERR_Msk (1UL << CoreDebug_DEMCR_VC_CHKERR_Pos) /*!< CoreDebug DEMCR: VC_CHKERR Mask */ + +#define CoreDebug_DEMCR_VC_NOCPERR_Pos 5U /*!< CoreDebug DEMCR: VC_NOCPERR Position */ +#define CoreDebug_DEMCR_VC_NOCPERR_Msk (1UL << CoreDebug_DEMCR_VC_NOCPERR_Pos) /*!< CoreDebug DEMCR: VC_NOCPERR Mask */ + +#define CoreDebug_DEMCR_VC_MMERR_Pos 4U /*!< CoreDebug DEMCR: VC_MMERR Position */ +#define CoreDebug_DEMCR_VC_MMERR_Msk (1UL << CoreDebug_DEMCR_VC_MMERR_Pos) /*!< CoreDebug DEMCR: VC_MMERR Mask */ + +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< CoreDebug DEMCR: VC_CORERESET Mask */ + +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ +#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ +#define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ +#define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ +#define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ +#define CoreDebug_BASE (0xE000EDF0UL) /*!< Core Debug Base Address */ +#define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ +#define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ +#define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + +#define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ +#define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ +#define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ +#define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ +#define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ +#define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ +#define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ +#define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE) /*!< Core Debug configuration struct */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ +#endif + +#define FPU_BASE (SCS_BASE + 0x0F30UL) /*!< Floating Point Unit */ +#define FPU ((FPU_Type *) FPU_BASE ) /*!< Floating Point Unit */ + +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* The following EXC_RETURN values are saved the LR on exception entry */ +#define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */ +#define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */ +#define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */ +#define EXC_RETURN_HANDLER_FPU (0xFFFFFFE1UL) /* return to Handler mode, uses MSP after return, restore floating-point state */ +#define EXC_RETURN_THREAD_MSP_FPU (0xFFFFFFE9UL) /* return to Thread mode, uses MSP after return, restore floating-point state */ +#define EXC_RETURN_THREAD_PSP_FPU (0xFFFFFFEDUL) /* return to Thread mode, uses PSP after return, restore floating-point state */ + + +/** + \brief Set Priority Grouping + \details Sets the priority grouping field using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IP[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IP[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + __DSB(); +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +/*@} end of CMSIS_Core_NVICFunctions */ + + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv7.h" + +#endif + + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + uint32_t mvfr0; + + mvfr0 = SCB->MVFR0; + if ((mvfr0 & (FPU_MVFR0_Single_precision_Msk | FPU_MVFR0_Double_precision_Msk)) == 0x220U) + { + return 2U; /* Double + Single precision FPU */ + } + else if ((mvfr0 & (FPU_MVFR0_Single_precision_Msk | FPU_MVFR0_Double_precision_Msk)) == 0x020U) + { + return 1U; /* Single precision FPU */ + } + else + { + return 0U; /* No FPU */ + } +} + +/*@} end of CMSIS_Core_FpuFunctions */ + + +/* ########################## Cache functions #################################### */ + +#if ((defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U)) || \ + (defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U))) +#include "cachel1_armv7.h" +#endif + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM7_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/Drivers/CMSIS/Include/core_cm85.h b/Drivers/CMSIS/Include/core_cm85.h new file mode 100644 index 0000000..6046311 --- /dev/null +++ b/Drivers/CMSIS/Include/core_cm85.h @@ -0,0 +1,4672 @@ +/**************************************************************************//** + * @file core_cm85.h + * @brief CMSIS Cortex-M85 Core Peripheral Access Layer Header File + * @version V1.0.4 + * @date 21. April 2022 + ******************************************************************************/ +/* + * Copyright (c) 2022 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#elif defined ( __GNUC__ ) + #pragma GCC diagnostic ignored "-Wpedantic" /* disable pedantic warning due to unnamed structs/unions */ +#endif + +#ifndef __CORE_CM85_H_GENERIC +#define __CORE_CM85_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M85 + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS CM85 definitions */ + +#define __CORTEX_M (85U) /*!< Cortex-M Core */ + +#if defined ( __CC_ARM ) + #error Legacy Arm Compiler does not support Armv8.1-M target architecture. +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_FP + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined(__ARM_FEATURE_DSP) + #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined(__ARM_FEATURE_DSP) + #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined(__ARM_FEATURE_DSP) + #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM85_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM85_H_DEPENDANT +#define __CORE_CM85_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM85_REV + #define __CM85_REV 0x0001U + #warning "__CM85_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #if __FPU_PRESENT != 0U + #ifndef __FPU_DP + #define __FPU_DP 0U + #warning "__FPU_DP not defined in device header file; using default!" + #endif + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __ICACHE_PRESENT + #define __ICACHE_PRESENT 0U + #warning "__ICACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DCACHE_PRESENT + #define __DCACHE_PRESENT 0U + #warning "__DCACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 1U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __PMU_PRESENT + #define __PMU_PRESENT 0U + #warning "__PMU_PRESENT not defined in device header file; using default!" + #endif + + #if __PMU_PRESENT != 0U + #ifndef __PMU_NUM_EVENTCNT + #define __PMU_NUM_EVENTCNT 8U + #warning "__PMU_NUM_EVENTCNT not defined in device header file; using default!" + #elif (__PMU_NUM_EVENTCNT > 8 || __PMU_NUM_EVENTCNT < 2) + #error "__PMU_NUM_EVENTCNT is out of range in device header file!" */ + #endif + #endif + + #ifndef __SAUREGION_PRESENT + #define __SAUREGION_PRESENT 0U + #warning "__SAUREGION_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DSP_PRESENT + #define __DSP_PRESENT 0U + #warning "__DSP_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M85 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core EWIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core PMU Register + - Core MPU Register + - Core SAU Register + - Core FPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:16; /*!< bit: 0..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:7; /*!< bit: 20..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + +#define APSR_GE_Pos 16U /*!< APSR: GE Position */ +#define APSR_GE_Msk (0xFUL << APSR_GE_Pos) /*!< APSR: GE Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:7; /*!< bit: 9..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:1; /*!< bit: 20 Reserved */ + uint32_t B:1; /*!< bit: 21 BTI active (read 0) */ + uint32_t _reserved2:2; /*!< bit: 22..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t IT:2; /*!< bit: 25..26 saved IT state (read 0) */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_IT_Pos 25U /*!< xPSR: IT Position */ +#define xPSR_IT_Msk (3UL << xPSR_IT_Pos) /*!< xPSR: IT Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_B_Pos 21U /*!< xPSR: B Position */ +#define xPSR_B_Msk (1UL << xPSR_B_Pos) /*!< xPSR: B Mask */ + +#define xPSR_GE_Pos 16U /*!< xPSR: GE Position */ +#define xPSR_GE_Msk (0xFUL << xPSR_GE_Pos) /*!< xPSR: GE Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack-pointer select */ + uint32_t FPCA:1; /*!< bit: 2 Floating-point context active */ + uint32_t SFPA:1; /*!< bit: 3 Secure floating-point active */ + uint32_t BTI_EN:1; /*!< bit: 4 Privileged branch target identification enable */ + uint32_t UBTI_EN:1; /*!< bit: 5 Unprivileged branch target identification enable */ + uint32_t PAC_EN:1; /*!< bit: 6 Privileged pointer authentication enable */ + uint32_t UPAC_EN:1; /*!< bit: 7 Unprivileged pointer authentication enable */ + uint32_t _reserved1:24; /*!< bit: 8..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_UPAC_EN_Pos 7U /*!< CONTROL: UPAC_EN Position */ +#define CONTROL_UPAC_EN_Msk (1UL << CONTROL_UPAC_EN_Pos) /*!< CONTROL: UPAC_EN Mask */ + +#define CONTROL_PAC_EN_Pos 6U /*!< CONTROL: PAC_EN Position */ +#define CONTROL_PAC_EN_Msk (1UL << CONTROL_PAC_EN_Pos) /*!< CONTROL: PAC_EN Mask */ + +#define CONTROL_UBTI_EN_Pos 5U /*!< CONTROL: UBTI_EN Position */ +#define CONTROL_UBTI_EN_Msk (1UL << CONTROL_UBTI_EN_Pos) /*!< CONTROL: UBTI_EN Mask */ + +#define CONTROL_BTI_EN_Pos 4U /*!< CONTROL: BTI_EN Position */ +#define CONTROL_BTI_EN_Msk (1UL << CONTROL_BTI_EN_Pos) /*!< CONTROL: BTI_EN Mask */ + +#define CONTROL_SFPA_Pos 3U /*!< CONTROL: SFPA Position */ +#define CONTROL_SFPA_Msk (1UL << CONTROL_SFPA_Pos) /*!< CONTROL: SFPA Mask */ + +#define CONTROL_FPCA_Pos 2U /*!< CONTROL: FPCA Position */ +#define CONTROL_FPCA_Msk (1UL << CONTROL_FPCA_Pos) /*!< CONTROL: FPCA Mask */ + +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[16U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[16U]; + __IOM uint32_t ICER[16U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[16U]; + __IOM uint32_t ISPR[16U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[16U]; + __IOM uint32_t ICPR[16U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[16U]; + __IOM uint32_t IABR[16U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[16U]; + __IOM uint32_t ITNS[16U]; /*!< Offset: 0x280 (R/W) Interrupt Non-Secure State Register */ + uint32_t RESERVED5[16U]; + __IOM uint8_t IPR[496U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED6[580U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/* Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHPR[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t ID_PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t ID_DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ID_AFR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t ID_MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ID_ISAR[6U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + __IM uint32_t CLIDR; /*!< Offset: 0x078 (R/ ) Cache Level ID register */ + __IM uint32_t CTR; /*!< Offset: 0x07C (R/ ) Cache Type register */ + __IM uint32_t CCSIDR; /*!< Offset: 0x080 (R/ ) Cache Size ID Register */ + __IOM uint32_t CSSELR; /*!< Offset: 0x084 (R/W) Cache Size Selection Register */ + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ + __IOM uint32_t NSACR; /*!< Offset: 0x08C (R/W) Non-Secure Access Control Register */ + uint32_t RESERVED7[21U]; + __IOM uint32_t SFSR; /*!< Offset: 0x0E4 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x0E8 (R/W) Secure Fault Address Register */ + uint32_t RESERVED3[69U]; + __OM uint32_t STIR; /*!< Offset: 0x200 ( /W) Software Triggered Interrupt Register */ + __IOM uint32_t RFSR; /*!< Offset: 0x204 (R/W) RAS Fault Status Register */ + uint32_t RESERVED4[14U]; + __IM uint32_t MVFR0; /*!< Offset: 0x240 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x244 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x248 (R/ ) Media and VFP Feature Register 2 */ + uint32_t RESERVED5[1U]; + __OM uint32_t ICIALLU; /*!< Offset: 0x250 ( /W) I-Cache Invalidate All to PoU */ + uint32_t RESERVED6[1U]; + __OM uint32_t ICIMVAU; /*!< Offset: 0x258 ( /W) I-Cache Invalidate by MVA to PoU */ + __OM uint32_t DCIMVAC; /*!< Offset: 0x25C ( /W) D-Cache Invalidate by MVA to PoC */ + __OM uint32_t DCISW; /*!< Offset: 0x260 ( /W) D-Cache Invalidate by Set-way */ + __OM uint32_t DCCMVAU; /*!< Offset: 0x264 ( /W) D-Cache Clean by MVA to PoU */ + __OM uint32_t DCCMVAC; /*!< Offset: 0x268 ( /W) D-Cache Clean by MVA to PoC */ + __OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */ + __OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */ + __OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */ + __OM uint32_t BPIALL; /*!< Offset: 0x278 ( /W) Branch Predictor Invalidate All */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_PENDNMISET_Pos 31U /*!< SCB ICSR: PENDNMISET Position */ +#define SCB_ICSR_PENDNMISET_Msk (1UL << SCB_ICSR_PENDNMISET_Pos) /*!< SCB ICSR: PENDNMISET Mask */ + +#define SCB_ICSR_NMIPENDSET_Pos SCB_ICSR_PENDNMISET_Pos /*!< SCB ICSR: NMIPENDSET Position, backward compatibility */ +#define SCB_ICSR_NMIPENDSET_Msk SCB_ICSR_PENDNMISET_Msk /*!< SCB ICSR: NMIPENDSET Mask, backward compatibility */ + +#define SCB_ICSR_PENDNMICLR_Pos 30U /*!< SCB ICSR: PENDNMICLR Position */ +#define SCB_ICSR_PENDNMICLR_Msk (1UL << SCB_ICSR_PENDNMICLR_Pos) /*!< SCB ICSR: PENDNMICLR Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_STTNS_Pos 24U /*!< SCB ICSR: STTNS Position (Security Extension) */ +#define SCB_ICSR_STTNS_Msk (1UL << SCB_ICSR_STTNS_Pos) /*!< SCB ICSR: STTNS Mask (Security Extension) */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIS_Pos 14U /*!< SCB AIRCR: PRIS Position */ +#define SCB_AIRCR_PRIS_Msk (1UL << SCB_AIRCR_PRIS_Pos) /*!< SCB AIRCR: PRIS Mask */ + +#define SCB_AIRCR_BFHFNMINS_Pos 13U /*!< SCB AIRCR: BFHFNMINS Position */ +#define SCB_AIRCR_BFHFNMINS_Msk (1UL << SCB_AIRCR_BFHFNMINS_Pos) /*!< SCB AIRCR: BFHFNMINS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_IESB_Pos 5U /*!< SCB AIRCR: Implicit ESB Enable Position */ +#define SCB_AIRCR_IESB_Msk (1UL << SCB_AIRCR_IESB_Pos) /*!< SCB AIRCR: Implicit ESB Enable Mask */ + +#define SCB_AIRCR_DIT_Pos 4U /*!< SCB AIRCR: Data Independent Timing Position */ +#define SCB_AIRCR_DIT_Msk (1UL << SCB_AIRCR_DIT_Pos) /*!< SCB AIRCR: Data Independent Timing Mask */ + +#define SCB_AIRCR_SYSRESETREQS_Pos 3U /*!< SCB AIRCR: SYSRESETREQS Position */ +#define SCB_AIRCR_SYSRESETREQS_Msk (1UL << SCB_AIRCR_SYSRESETREQS_Pos) /*!< SCB AIRCR: SYSRESETREQS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEPS_Pos 3U /*!< SCB SCR: SLEEPDEEPS Position */ +#define SCB_SCR_SLEEPDEEPS_Msk (1UL << SCB_SCR_SLEEPDEEPS_Pos) /*!< SCB SCR: SLEEPDEEPS Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_TRD_Pos 20U /*!< SCB CCR: TRD Position */ +#define SCB_CCR_TRD_Msk (1UL << SCB_CCR_TRD_Pos) /*!< SCB CCR: TRD Mask */ + +#define SCB_CCR_LOB_Pos 19U /*!< SCB CCR: LOB Position */ +#define SCB_CCR_LOB_Msk (1UL << SCB_CCR_LOB_Pos) /*!< SCB CCR: LOB Mask */ + +#define SCB_CCR_BP_Pos 18U /*!< SCB CCR: BP Position */ +#define SCB_CCR_BP_Msk (1UL << SCB_CCR_BP_Pos) /*!< SCB CCR: BP Mask */ + +#define SCB_CCR_IC_Pos 17U /*!< SCB CCR: IC Position */ +#define SCB_CCR_IC_Msk (1UL << SCB_CCR_IC_Pos) /*!< SCB CCR: IC Mask */ + +#define SCB_CCR_DC_Pos 16U /*!< SCB CCR: DC Position */ +#define SCB_CCR_DC_Msk (1UL << SCB_CCR_DC_Pos) /*!< SCB CCR: DC Mask */ + +#define SCB_CCR_STKOFHFNMIGN_Pos 10U /*!< SCB CCR: STKOFHFNMIGN Position */ +#define SCB_CCR_STKOFHFNMIGN_Msk (1UL << SCB_CCR_STKOFHFNMIGN_Pos) /*!< SCB CCR: STKOFHFNMIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_HARDFAULTPENDED_Pos 21U /*!< SCB SHCSR: HARDFAULTPENDED Position */ +#define SCB_SHCSR_HARDFAULTPENDED_Msk (1UL << SCB_SHCSR_HARDFAULTPENDED_Pos) /*!< SCB SHCSR: HARDFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTPENDED_Pos 20U /*!< SCB SHCSR: SECUREFAULTPENDED Position */ +#define SCB_SHCSR_SECUREFAULTPENDED_Msk (1UL << SCB_SHCSR_SECUREFAULTPENDED_Pos) /*!< SCB SHCSR: SECUREFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTENA_Pos 19U /*!< SCB SHCSR: SECUREFAULTENA Position */ +#define SCB_SHCSR_SECUREFAULTENA_Msk (1UL << SCB_SHCSR_SECUREFAULTENA_Pos) /*!< SCB SHCSR: SECUREFAULTENA Mask */ + +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_NMIACT_Pos 5U /*!< SCB SHCSR: NMIACT Position */ +#define SCB_SHCSR_NMIACT_Msk (1UL << SCB_SHCSR_NMIACT_Pos) /*!< SCB SHCSR: NMIACT Mask */ + +#define SCB_SHCSR_SECUREFAULTACT_Pos 4U /*!< SCB SHCSR: SECUREFAULTACT Position */ +#define SCB_SHCSR_SECUREFAULTACT_Msk (1UL << SCB_SHCSR_SECUREFAULTACT_Pos) /*!< SCB SHCSR: SECUREFAULTACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_HARDFAULTACT_Pos 2U /*!< SCB SHCSR: HARDFAULTACT Position */ +#define SCB_SHCSR_HARDFAULTACT_Msk (1UL << SCB_SHCSR_HARDFAULTACT_Pos) /*!< SCB SHCSR: HARDFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/* SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/* BusFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_LSPERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 5U) /*!< SCB CFSR (BFSR): LSPERR Position */ +#define SCB_CFSR_LSPERR_Msk (1UL << SCB_CFSR_LSPERR_Pos) /*!< SCB CFSR (BFSR): LSPERR Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/* UsageFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_STKOF_Pos (SCB_CFSR_USGFAULTSR_Pos + 4U) /*!< SCB CFSR (UFSR): STKOF Position */ +#define SCB_CFSR_STKOF_Msk (1UL << SCB_CFSR_STKOF_Pos) /*!< SCB CFSR (UFSR): STKOF Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/* SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/* SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_PMU_Pos 5U /*!< SCB DFSR: PMU Position */ +#define SCB_DFSR_PMU_Msk (1UL << SCB_DFSR_PMU_Pos) /*!< SCB DFSR: PMU Mask */ + +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/* SCB Non-Secure Access Control Register Definitions */ +#define SCB_NSACR_CP11_Pos 11U /*!< SCB NSACR: CP11 Position */ +#define SCB_NSACR_CP11_Msk (1UL << SCB_NSACR_CP11_Pos) /*!< SCB NSACR: CP11 Mask */ + +#define SCB_NSACR_CP10_Pos 10U /*!< SCB NSACR: CP10 Position */ +#define SCB_NSACR_CP10_Msk (1UL << SCB_NSACR_CP10_Pos) /*!< SCB NSACR: CP10 Mask */ + +#define SCB_NSACR_CP7_Pos 7U /*!< SCB NSACR: CP7 Position */ +#define SCB_NSACR_CP7_Msk (1UL << SCB_NSACR_CP7_Pos) /*!< SCB NSACR: CP7 Mask */ + +#define SCB_NSACR_CP6_Pos 6U /*!< SCB NSACR: CP6 Position */ +#define SCB_NSACR_CP6_Msk (1UL << SCB_NSACR_CP6_Pos) /*!< SCB NSACR: CP6 Mask */ + +#define SCB_NSACR_CP5_Pos 5U /*!< SCB NSACR: CP5 Position */ +#define SCB_NSACR_CP5_Msk (1UL << SCB_NSACR_CP5_Pos) /*!< SCB NSACR: CP5 Mask */ + +#define SCB_NSACR_CP4_Pos 4U /*!< SCB NSACR: CP4 Position */ +#define SCB_NSACR_CP4_Msk (1UL << SCB_NSACR_CP4_Pos) /*!< SCB NSACR: CP4 Mask */ + +#define SCB_NSACR_CP3_Pos 3U /*!< SCB NSACR: CP3 Position */ +#define SCB_NSACR_CP3_Msk (1UL << SCB_NSACR_CP3_Pos) /*!< SCB NSACR: CP3 Mask */ + +#define SCB_NSACR_CP2_Pos 2U /*!< SCB NSACR: CP2 Position */ +#define SCB_NSACR_CP2_Msk (1UL << SCB_NSACR_CP2_Pos) /*!< SCB NSACR: CP2 Mask */ + +#define SCB_NSACR_CP1_Pos 1U /*!< SCB NSACR: CP1 Position */ +#define SCB_NSACR_CP1_Msk (1UL << SCB_NSACR_CP1_Pos) /*!< SCB NSACR: CP1 Mask */ + +#define SCB_NSACR_CP0_Pos 0U /*!< SCB NSACR: CP0 Position */ +#define SCB_NSACR_CP0_Msk (1UL /*<< SCB_NSACR_CP0_Pos*/) /*!< SCB NSACR: CP0 Mask */ + +/* SCB Debug Feature Register 0 Definitions */ +#define SCB_ID_DFR_UDE_Pos 28U /*!< SCB ID_DFR: UDE Position */ +#define SCB_ID_DFR_UDE_Msk (0xFUL << SCB_ID_DFR_UDE_Pos) /*!< SCB ID_DFR: UDE Mask */ + +#define SCB_ID_DFR_MProfDbg_Pos 20U /*!< SCB ID_DFR: MProfDbg Position */ +#define SCB_ID_DFR_MProfDbg_Msk (0xFUL << SCB_ID_DFR_MProfDbg_Pos) /*!< SCB ID_DFR: MProfDbg Mask */ + +/* SCB Cache Level ID Register Definitions */ +#define SCB_CLIDR_LOUU_Pos 27U /*!< SCB CLIDR: LoUU Position */ +#define SCB_CLIDR_LOUU_Msk (7UL << SCB_CLIDR_LOUU_Pos) /*!< SCB CLIDR: LoUU Mask */ + +#define SCB_CLIDR_LOC_Pos 24U /*!< SCB CLIDR: LoC Position */ +#define SCB_CLIDR_LOC_Msk (7UL << SCB_CLIDR_LOC_Pos) /*!< SCB CLIDR: LoC Mask */ + +/* SCB Cache Type Register Definitions */ +#define SCB_CTR_FORMAT_Pos 29U /*!< SCB CTR: Format Position */ +#define SCB_CTR_FORMAT_Msk (7UL << SCB_CTR_FORMAT_Pos) /*!< SCB CTR: Format Mask */ + +#define SCB_CTR_CWG_Pos 24U /*!< SCB CTR: CWG Position */ +#define SCB_CTR_CWG_Msk (0xFUL << SCB_CTR_CWG_Pos) /*!< SCB CTR: CWG Mask */ + +#define SCB_CTR_ERG_Pos 20U /*!< SCB CTR: ERG Position */ +#define SCB_CTR_ERG_Msk (0xFUL << SCB_CTR_ERG_Pos) /*!< SCB CTR: ERG Mask */ + +#define SCB_CTR_DMINLINE_Pos 16U /*!< SCB CTR: DminLine Position */ +#define SCB_CTR_DMINLINE_Msk (0xFUL << SCB_CTR_DMINLINE_Pos) /*!< SCB CTR: DminLine Mask */ + +#define SCB_CTR_IMINLINE_Pos 0U /*!< SCB CTR: ImInLine Position */ +#define SCB_CTR_IMINLINE_Msk (0xFUL /*<< SCB_CTR_IMINLINE_Pos*/) /*!< SCB CTR: ImInLine Mask */ + +/* SCB Cache Size ID Register Definitions */ +#define SCB_CCSIDR_WT_Pos 31U /*!< SCB CCSIDR: WT Position */ +#define SCB_CCSIDR_WT_Msk (1UL << SCB_CCSIDR_WT_Pos) /*!< SCB CCSIDR: WT Mask */ + +#define SCB_CCSIDR_WB_Pos 30U /*!< SCB CCSIDR: WB Position */ +#define SCB_CCSIDR_WB_Msk (1UL << SCB_CCSIDR_WB_Pos) /*!< SCB CCSIDR: WB Mask */ + +#define SCB_CCSIDR_RA_Pos 29U /*!< SCB CCSIDR: RA Position */ +#define SCB_CCSIDR_RA_Msk (1UL << SCB_CCSIDR_RA_Pos) /*!< SCB CCSIDR: RA Mask */ + +#define SCB_CCSIDR_WA_Pos 28U /*!< SCB CCSIDR: WA Position */ +#define SCB_CCSIDR_WA_Msk (1UL << SCB_CCSIDR_WA_Pos) /*!< SCB CCSIDR: WA Mask */ + +#define SCB_CCSIDR_NUMSETS_Pos 13U /*!< SCB CCSIDR: NumSets Position */ +#define SCB_CCSIDR_NUMSETS_Msk (0x7FFFUL << SCB_CCSIDR_NUMSETS_Pos) /*!< SCB CCSIDR: NumSets Mask */ + +#define SCB_CCSIDR_ASSOCIATIVITY_Pos 3U /*!< SCB CCSIDR: Associativity Position */ +#define SCB_CCSIDR_ASSOCIATIVITY_Msk (0x3FFUL << SCB_CCSIDR_ASSOCIATIVITY_Pos) /*!< SCB CCSIDR: Associativity Mask */ + +#define SCB_CCSIDR_LINESIZE_Pos 0U /*!< SCB CCSIDR: LineSize Position */ +#define SCB_CCSIDR_LINESIZE_Msk (7UL /*<< SCB_CCSIDR_LINESIZE_Pos*/) /*!< SCB CCSIDR: LineSize Mask */ + +/* SCB Cache Size Selection Register Definitions */ +#define SCB_CSSELR_LEVEL_Pos 1U /*!< SCB CSSELR: Level Position */ +#define SCB_CSSELR_LEVEL_Msk (7UL << SCB_CSSELR_LEVEL_Pos) /*!< SCB CSSELR: Level Mask */ + +#define SCB_CSSELR_IND_Pos 0U /*!< SCB CSSELR: InD Position */ +#define SCB_CSSELR_IND_Msk (1UL /*<< SCB_CSSELR_IND_Pos*/) /*!< SCB CSSELR: InD Mask */ + +/* SCB Software Triggered Interrupt Register Definitions */ +#define SCB_STIR_INTID_Pos 0U /*!< SCB STIR: INTID Position */ +#define SCB_STIR_INTID_Msk (0x1FFUL /*<< SCB_STIR_INTID_Pos*/) /*!< SCB STIR: INTID Mask */ + +/* SCB RAS Fault Status Register Definitions */ +#define SCB_RFSR_V_Pos 31U /*!< SCB RFSR: V Position */ +#define SCB_RFSR_V_Msk (1UL << SCB_RFSR_V_Pos) /*!< SCB RFSR: V Mask */ + +#define SCB_RFSR_IS_Pos 16U /*!< SCB RFSR: IS Position */ +#define SCB_RFSR_IS_Msk (0x7FFFUL << SCB_RFSR_IS_Pos) /*!< SCB RFSR: IS Mask */ + +#define SCB_RFSR_UET_Pos 0U /*!< SCB RFSR: UET Position */ +#define SCB_RFSR_UET_Msk (3UL /*<< SCB_RFSR_UET_Pos*/) /*!< SCB RFSR: UET Mask */ + +/* SCB D-Cache Invalidate by Set-way Register Definitions */ +#define SCB_DCISW_WAY_Pos 30U /*!< SCB DCISW: Way Position */ +#define SCB_DCISW_WAY_Msk (3UL << SCB_DCISW_WAY_Pos) /*!< SCB DCISW: Way Mask */ + +#define SCB_DCISW_SET_Pos 5U /*!< SCB DCISW: Set Position */ +#define SCB_DCISW_SET_Msk (0x1FFUL << SCB_DCISW_SET_Pos) /*!< SCB DCISW: Set Mask */ + +/* SCB D-Cache Clean by Set-way Register Definitions */ +#define SCB_DCCSW_WAY_Pos 30U /*!< SCB DCCSW: Way Position */ +#define SCB_DCCSW_WAY_Msk (3UL << SCB_DCCSW_WAY_Pos) /*!< SCB DCCSW: Way Mask */ + +#define SCB_DCCSW_SET_Pos 5U /*!< SCB DCCSW: Set Position */ +#define SCB_DCCSW_SET_Msk (0x1FFUL << SCB_DCCSW_SET_Pos) /*!< SCB DCCSW: Set Mask */ + +/* SCB D-Cache Clean and Invalidate by Set-way Register Definitions */ +#define SCB_DCCISW_WAY_Pos 30U /*!< SCB DCCISW: Way Position */ +#define SCB_DCCISW_WAY_Msk (3UL << SCB_DCCISW_WAY_Pos) /*!< SCB DCCISW: Way Mask */ + +#define SCB_DCCISW_SET_Pos 5U /*!< SCB DCCISW: Set Position */ +#define SCB_DCCISW_SET_Msk (0x1FFUL << SCB_DCCISW_SET_Pos) /*!< SCB DCCISW: Set Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ICB Implementation Control Block register (ICB) + \brief Type definitions for the Implementation Control Block Register + @{ + */ + +/** + \brief Structure type to access the Implementation Control Block (ICB). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ + __IOM uint32_t CPPWR; /*!< Offset: 0x00C (R/W) Coprocessor Power Control Register */ +} ICB_Type; + +/* Auxiliary Control Register Definitions */ +#define ICB_ACTLR_DISCRITAXIRUW_Pos 27U /*!< ACTLR: DISCRITAXIRUW Position */ +#define ICB_ACTLR_DISCRITAXIRUW_Msk (1UL << ICB_ACTLR_DISCRITAXIRUW_Pos) /*!< ACTLR: DISCRITAXIRUW Mask */ + +#define ICB_ACTLR_DISCRITAXIRUR_Pos 15U /*!< ACTLR: DISCRITAXIRUR Position */ +#define ICB_ACTLR_DISCRITAXIRUR_Msk (1UL << ICB_ACTLR_DISCRITAXIRUR_Pos) /*!< ACTLR: DISCRITAXIRUR Mask */ + +#define ICB_ACTLR_EVENTBUSEN_Pos 14U /*!< ACTLR: EVENTBUSEN Position */ +#define ICB_ACTLR_EVENTBUSEN_Msk (1UL << ICB_ACTLR_EVENTBUSEN_Pos) /*!< ACTLR: EVENTBUSEN Mask */ + +#define ICB_ACTLR_EVENTBUSEN_S_Pos 13U /*!< ACTLR: EVENTBUSEN_S Position */ +#define ICB_ACTLR_EVENTBUSEN_S_Msk (1UL << ICB_ACTLR_EVENTBUSEN_S_Pos) /*!< ACTLR: EVENTBUSEN_S Mask */ + +#define ICB_ACTLR_DISITMATBFLUSH_Pos 12U /*!< ACTLR: DISITMATBFLUSH Position */ +#define ICB_ACTLR_DISITMATBFLUSH_Msk (1UL << ICB_ACTLR_DISITMATBFLUSH_Pos) /*!< ACTLR: DISITMATBFLUSH Mask */ + +#define ICB_ACTLR_DISNWAMODE_Pos 11U /*!< ACTLR: DISNWAMODE Position */ +#define ICB_ACTLR_DISNWAMODE_Msk (1UL << ICB_ACTLR_DISNWAMODE_Pos) /*!< ACTLR: DISNWAMODE Mask */ + +#define ICB_ACTLR_FPEXCODIS_Pos 10U /*!< ACTLR: FPEXCODIS Position */ +#define ICB_ACTLR_FPEXCODIS_Msk (1UL << ICB_ACTLR_FPEXCODIS_Pos) /*!< ACTLR: FPEXCODIS Mask */ + +/* Interrupt Controller Type Register Definitions */ +#define ICB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define ICB_ICTR_INTLINESNUM_Msk (0xFUL /*<< ICB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_ICB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) ITM Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) ITM Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ + uint32_t RESERVED3[32U]; + uint32_t RESERVED4[43U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ + uint32_t RESERVED5[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) ITM Device Architecture Register */ + uint32_t RESERVED6[3U]; + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) ITM Device Type Register */ + __IM uint32_t PID4; /*!< Offset: 0xFD0 (R/ ) ITM Peripheral Identification Register #4 */ + __IM uint32_t PID5; /*!< Offset: 0xFD4 (R/ ) ITM Peripheral Identification Register #5 */ + __IM uint32_t PID6; /*!< Offset: 0xFD8 (R/ ) ITM Peripheral Identification Register #6 */ + __IM uint32_t PID7; /*!< Offset: 0xFDC (R/ ) ITM Peripheral Identification Register #7 */ + __IM uint32_t PID0; /*!< Offset: 0xFE0 (R/ ) ITM Peripheral Identification Register #0 */ + __IM uint32_t PID1; /*!< Offset: 0xFE4 (R/ ) ITM Peripheral Identification Register #1 */ + __IM uint32_t PID2; /*!< Offset: 0xFE8 (R/ ) ITM Peripheral Identification Register #2 */ + __IM uint32_t PID3; /*!< Offset: 0xFEC (R/ ) ITM Peripheral Identification Register #3 */ + __IM uint32_t CID0; /*!< Offset: 0xFF0 (R/ ) ITM Component Identification Register #0 */ + __IM uint32_t CID1; /*!< Offset: 0xFF4 (R/ ) ITM Component Identification Register #1 */ + __IM uint32_t CID2; /*!< Offset: 0xFF8 (R/ ) ITM Component Identification Register #2 */ + __IM uint32_t CID3; /*!< Offset: 0xFFC (R/ ) ITM Component Identification Register #3 */ +} ITM_Type; + +/* ITM Stimulus Port Register Definitions */ +#define ITM_STIM_DISABLED_Pos 1U /*!< ITM STIM: DISABLED Position */ +#define ITM_STIM_DISABLED_Msk (0x1UL << ITM_STIM_DISABLED_Pos) /*!< ITM STIM: DISABLED Mask */ + +#define ITM_STIM_FIFOREADY_Pos 0U /*!< ITM STIM: FIFOREADY Position */ +#define ITM_STIM_FIFOREADY_Msk (0x1UL /*<< ITM_STIM_FIFOREADY_Pos*/) /*!< ITM STIM: FIFOREADY Mask */ + +/* ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/* ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TRACEBUSID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TRACEBUSID_Msk (0x7FUL << ITM_TCR_TRACEBUSID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPRESCALE_Pos 8U /*!< ITM TCR: TSPRESCALE Position */ +#define ITM_TCR_TSPRESCALE_Msk (3UL << ITM_TCR_TSPRESCALE_Pos) /*!< ITM TCR: TSPRESCALE Mask */ + +#define ITM_TCR_STALLENA_Pos 5U /*!< ITM TCR: STALLENA Position */ +#define ITM_TCR_STALLENA_Msk (1UL << ITM_TCR_STALLENA_Pos) /*!< ITM TCR: STALLENA Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/* ITM Lock Status Register Definitions */ +#define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ +#define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ + +#define ITM_LSR_Access_Pos 1U /*!< ITM LSR: Access Position */ +#define ITM_LSR_Access_Msk (1UL << ITM_LSR_Access_Pos) /*!< ITM LSR: Access Mask */ + +#define ITM_LSR_Present_Pos 0U /*!< ITM LSR: Present Position */ +#define ITM_LSR_Present_Msk (1UL /*<< ITM_LSR_Present_Pos*/) /*!< ITM LSR: Present Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + uint32_t RESERVED3[1U]; + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED4[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + uint32_t RESERVED5[1U]; + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED6[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + uint32_t RESERVED7[1U]; + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ + uint32_t RESERVED8[1U]; + __IOM uint32_t COMP4; /*!< Offset: 0x060 (R/W) Comparator Register 4 */ + uint32_t RESERVED9[1U]; + __IOM uint32_t FUNCTION4; /*!< Offset: 0x068 (R/W) Function Register 4 */ + uint32_t RESERVED10[1U]; + __IOM uint32_t COMP5; /*!< Offset: 0x070 (R/W) Comparator Register 5 */ + uint32_t RESERVED11[1U]; + __IOM uint32_t FUNCTION5; /*!< Offset: 0x078 (R/W) Function Register 5 */ + uint32_t RESERVED12[1U]; + __IOM uint32_t COMP6; /*!< Offset: 0x080 (R/W) Comparator Register 6 */ + uint32_t RESERVED13[1U]; + __IOM uint32_t FUNCTION6; /*!< Offset: 0x088 (R/W) Function Register 6 */ + uint32_t RESERVED14[1U]; + __IOM uint32_t COMP7; /*!< Offset: 0x090 (R/W) Comparator Register 7 */ + uint32_t RESERVED15[1U]; + __IOM uint32_t FUNCTION7; /*!< Offset: 0x098 (R/W) Function Register 7 */ + uint32_t RESERVED16[1U]; + __IOM uint32_t COMP8; /*!< Offset: 0x0A0 (R/W) Comparator Register 8 */ + uint32_t RESERVED17[1U]; + __IOM uint32_t FUNCTION8; /*!< Offset: 0x0A8 (R/W) Function Register 8 */ + uint32_t RESERVED18[1U]; + __IOM uint32_t COMP9; /*!< Offset: 0x0B0 (R/W) Comparator Register 9 */ + uint32_t RESERVED19[1U]; + __IOM uint32_t FUNCTION9; /*!< Offset: 0x0B8 (R/W) Function Register 9 */ + uint32_t RESERVED20[1U]; + __IOM uint32_t COMP10; /*!< Offset: 0x0C0 (R/W) Comparator Register 10 */ + uint32_t RESERVED21[1U]; + __IOM uint32_t FUNCTION10; /*!< Offset: 0x0C8 (R/W) Function Register 10 */ + uint32_t RESERVED22[1U]; + __IOM uint32_t COMP11; /*!< Offset: 0x0D0 (R/W) Comparator Register 11 */ + uint32_t RESERVED23[1U]; + __IOM uint32_t FUNCTION11; /*!< Offset: 0x0D8 (R/W) Function Register 11 */ + uint32_t RESERVED24[1U]; + __IOM uint32_t COMP12; /*!< Offset: 0x0E0 (R/W) Comparator Register 12 */ + uint32_t RESERVED25[1U]; + __IOM uint32_t FUNCTION12; /*!< Offset: 0x0E8 (R/W) Function Register 12 */ + uint32_t RESERVED26[1U]; + __IOM uint32_t COMP13; /*!< Offset: 0x0F0 (R/W) Comparator Register 13 */ + uint32_t RESERVED27[1U]; + __IOM uint32_t FUNCTION13; /*!< Offset: 0x0F8 (R/W) Function Register 13 */ + uint32_t RESERVED28[1U]; + __IOM uint32_t COMP14; /*!< Offset: 0x100 (R/W) Comparator Register 14 */ + uint32_t RESERVED29[1U]; + __IOM uint32_t FUNCTION14; /*!< Offset: 0x108 (R/W) Function Register 14 */ + uint32_t RESERVED30[1U]; + __IOM uint32_t COMP15; /*!< Offset: 0x110 (R/W) Comparator Register 15 */ + uint32_t RESERVED31[1U]; + __IOM uint32_t FUNCTION15; /*!< Offset: 0x118 (R/W) Function Register 15 */ + uint32_t RESERVED32[934U]; + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R ) Lock Status Register */ + uint32_t RESERVED33[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) Device Architecture Register */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCDISS_Pos 23U /*!< DWT CTRL: CYCDISS Position */ +#define DWT_CTRL_CYCDISS_Msk (0x1UL << DWT_CTRL_CYCDISS_Pos) /*!< DWT CTRL: CYCDISS Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (0x1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (0x1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (0x1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (0x1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (0x1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (0x1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (0x1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (0x1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (0x1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (0x1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/* DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/* DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/* DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/* DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/* DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_ID_Pos 27U /*!< DWT FUNCTION: ID Position */ +#define DWT_FUNCTION_ID_Msk (0x1FUL << DWT_FUNCTION_ID_Pos) /*!< DWT FUNCTION: ID Mask */ + +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_ACTION_Pos 4U /*!< DWT FUNCTION: ACTION Position */ +#define DWT_FUNCTION_ACTION_Msk (0x1UL << DWT_FUNCTION_ACTION_Pos) /*!< DWT FUNCTION: ACTION Mask */ + +#define DWT_FUNCTION_MATCH_Pos 0U /*!< DWT FUNCTION: MATCH Position */ +#define DWT_FUNCTION_MATCH_Msk (0xFUL /*<< DWT_FUNCTION_MATCH_Pos*/) /*!< DWT FUNCTION: MATCH Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup MemSysCtl_Type Memory System Control Registers (IMPLEMENTATION DEFINED) + \brief Type definitions for the Memory System Control Registers (MEMSYSCTL) + @{ + */ + +/** + \brief Structure type to access the Memory System Control Registers (MEMSYSCTL). + */ +typedef struct +{ + __IOM uint32_t MSCR; /*!< Offset: 0x000 (R/W) Memory System Control Register */ + __IOM uint32_t PFCR; /*!< Offset: 0x004 (R/W) Prefetcher Control Register */ + uint32_t RESERVED1[2U]; + __IOM uint32_t ITCMCR; /*!< Offset: 0x010 (R/W) ITCM Control Register */ + __IOM uint32_t DTCMCR; /*!< Offset: 0x014 (R/W) DTCM Control Register */ + __IOM uint32_t PAHBCR; /*!< Offset: 0x018 (R/W) P-AHB Control Register */ + uint32_t RESERVED2[313U]; + __IOM uint32_t ITGU_CTRL; /*!< Offset: 0x500 (R/W) ITGU Control Register */ + __IOM uint32_t ITGU_CFG; /*!< Offset: 0x504 (R/W) ITGU Configuration Register */ + uint32_t RESERVED3[2U]; + __IOM uint32_t ITGU_LUT[16U]; /*!< Offset: 0x510 (R/W) ITGU Look Up Table Register */ + uint32_t RESERVED4[44U]; + __IOM uint32_t DTGU_CTRL; /*!< Offset: 0x600 (R/W) DTGU Control Registers */ + __IOM uint32_t DTGU_CFG; /*!< Offset: 0x604 (R/W) DTGU Configuration Register */ + uint32_t RESERVED5[2U]; + __IOM uint32_t DTGU_LUT[16U]; /*!< Offset: 0x610 (R/W) DTGU Look Up Table Register */ +} MemSysCtl_Type; + +/* MEMSYSCTL Memory System Control Register (MSCR) Register Definitions */ +#define MEMSYSCTL_MSCR_CPWRDN_Pos 17U /*!< MEMSYSCTL MSCR: CPWRDN Position */ +#define MEMSYSCTL_MSCR_CPWRDN_Msk (0x1UL << MEMSYSCTL_MSCR_CPWRDN_Pos) /*!< MEMSYSCTL MSCR: CPWRDN Mask */ + +#define MEMSYSCTL_MSCR_DCCLEAN_Pos 16U /*!< MEMSYSCTL MSCR: DCCLEAN Position */ +#define MEMSYSCTL_MSCR_DCCLEAN_Msk (0x1UL << MEMSYSCTL_MSCR_DCCLEAN_Pos) /*!< MEMSYSCTL MSCR: DCCLEAN Mask */ + +#define MEMSYSCTL_MSCR_ICACTIVE_Pos 13U /*!< MEMSYSCTL MSCR: ICACTIVE Position */ +#define MEMSYSCTL_MSCR_ICACTIVE_Msk (0x1UL << MEMSYSCTL_MSCR_ICACTIVE_Pos) /*!< MEMSYSCTL MSCR: ICACTIVE Mask */ + +#define MEMSYSCTL_MSCR_DCACTIVE_Pos 12U /*!< MEMSYSCTL MSCR: DCACTIVE Position */ +#define MEMSYSCTL_MSCR_DCACTIVE_Msk (0x1UL << MEMSYSCTL_MSCR_DCACTIVE_Pos) /*!< MEMSYSCTL MSCR: DCACTIVE Mask */ + +#define MEMSYSCTL_MSCR_EVECCFAULT_Pos 3U /*!< MEMSYSCTL MSCR: EVECCFAULT Position */ +#define MEMSYSCTL_MSCR_EVECCFAULT_Msk (0x1UL << MEMSYSCTL_MSCR_EVECCFAULT_Pos) /*!< MEMSYSCTL MSCR: EVECCFAULT Mask */ + +#define MEMSYSCTL_MSCR_FORCEWT_Pos 2U /*!< MEMSYSCTL MSCR: FORCEWT Position */ +#define MEMSYSCTL_MSCR_FORCEWT_Msk (0x1UL << MEMSYSCTL_MSCR_FORCEWT_Pos) /*!< MEMSYSCTL MSCR: FORCEWT Mask */ + +#define MEMSYSCTL_MSCR_ECCEN_Pos 1U /*!< MEMSYSCTL MSCR: ECCEN Position */ +#define MEMSYSCTL_MSCR_ECCEN_Msk (0x1UL << MEMSYSCTL_MSCR_ECCEN_Pos) /*!< MEMSYSCTL MSCR: ECCEN Mask */ + +/* MEMSYSCTL Prefetcher Control Register (PFCR) Register Definitions */ +#define MEMSYSCTL_PFCR_DIS_NLP_Pos 7U /*!< MEMSYSCTL PFCR: DIS_NLP Position */ +#define MEMSYSCTL_PFCR_DIS_NLP_Msk (0x1UL << MEMSYSCTL_PFCR_DIS_NLP_Pos) /*!< MEMSYSCTL PFCR: DIS_NLP Mask */ + +#define MEMSYSCTL_PFCR_ENABLE_Pos 0U /*!< MEMSYSCTL PFCR: ENABLE Position */ +#define MEMSYSCTL_PFCR_ENABLE_Msk (0x1UL /*<< MEMSYSCTL_PFCR_ENABLE_Pos*/) /*!< MEMSYSCTL PFCR: ENABLE Mask */ + +/* MEMSYSCTL ITCM Control Register (ITCMCR) Register Definitions */ +#define MEMSYSCTL_ITCMCR_SZ_Pos 3U /*!< MEMSYSCTL ITCMCR: SZ Position */ +#define MEMSYSCTL_ITCMCR_SZ_Msk (0xFUL << MEMSYSCTL_ITCMCR_SZ_Pos) /*!< MEMSYSCTL ITCMCR: SZ Mask */ + +#define MEMSYSCTL_ITCMCR_EN_Pos 0U /*!< MEMSYSCTL ITCMCR: EN Position */ +#define MEMSYSCTL_ITCMCR_EN_Msk (0x1UL /*<< MEMSYSCTL_ITCMCR_EN_Pos*/) /*!< MEMSYSCTL ITCMCR: EN Mask */ + +/* MEMSYSCTL DTCM Control Register (DTCMCR) Register Definitions */ +#define MEMSYSCTL_DTCMCR_SZ_Pos 3U /*!< MEMSYSCTL DTCMCR: SZ Position */ +#define MEMSYSCTL_DTCMCR_SZ_Msk (0xFUL << MEMSYSCTL_DTCMCR_SZ_Pos) /*!< MEMSYSCTL DTCMCR: SZ Mask */ + +#define MEMSYSCTL_DTCMCR_EN_Pos 0U /*!< MEMSYSCTL DTCMCR: EN Position */ +#define MEMSYSCTL_DTCMCR_EN_Msk (0x1UL /*<< MEMSYSCTL_DTCMCR_EN_Pos*/) /*!< MEMSYSCTL DTCMCR: EN Mask */ + +/* MEMSYSCTL P-AHB Control Register (PAHBCR) Register Definitions */ +#define MEMSYSCTL_PAHBCR_SZ_Pos 1U /*!< MEMSYSCTL PAHBCR: SZ Position */ +#define MEMSYSCTL_PAHBCR_SZ_Msk (0x7UL << MEMSYSCTL_PAHBCR_SZ_Pos) /*!< MEMSYSCTL PAHBCR: SZ Mask */ + +#define MEMSYSCTL_PAHBCR_EN_Pos 0U /*!< MEMSYSCTL PAHBCR: EN Position */ +#define MEMSYSCTL_PAHBCR_EN_Msk (0x1UL /*<< MEMSYSCTL_PAHBCR_EN_Pos*/) /*!< MEMSYSCTL PAHBCR: EN Mask */ + +/* MEMSYSCTL ITGU Control Register (ITGU_CTRL) Register Definitions */ +#define MEMSYSCTL_ITGU_CTRL_DEREN_Pos 1U /*!< MEMSYSCTL ITGU_CTRL: DEREN Position */ +#define MEMSYSCTL_ITGU_CTRL_DEREN_Msk (0x1UL << MEMSYSCTL_ITGU_CTRL_DEREN_Pos) /*!< MEMSYSCTL ITGU_CTRL: DEREN Mask */ + +#define MEMSYSCTL_ITGU_CTRL_DBFEN_Pos 0U /*!< MEMSYSCTL ITGU_CTRL: DBFEN Position */ +#define MEMSYSCTL_ITGU_CTRL_DBFEN_Msk (0x1UL /*<< MEMSYSCTL_ITGU_CTRL_DBFEN_Pos*/) /*!< MEMSYSCTL ITGU_CTRL: DBFEN Mask */ + +/* MEMSYSCTL ITGU Configuration Register (ITGU_CFG) Register Definitions */ +#define MEMSYSCTL_ITGU_CFG_PRESENT_Pos 31U /*!< MEMSYSCTL ITGU_CFG: PRESENT Position */ +#define MEMSYSCTL_ITGU_CFG_PRESENT_Msk (0x1UL << MEMSYSCTL_ITGU_CFG_PRESENT_Pos) /*!< MEMSYSCTL ITGU_CFG: PRESENT Mask */ + +#define MEMSYSCTL_ITGU_CFG_NUMBLKS_Pos 8U /*!< MEMSYSCTL ITGU_CFG: NUMBLKS Position */ +#define MEMSYSCTL_ITGU_CFG_NUMBLKS_Msk (0xFUL << MEMSYSCTL_ITGU_CFG_NUMBLKS_Pos) /*!< MEMSYSCTL ITGU_CFG: NUMBLKS Mask */ + +#define MEMSYSCTL_ITGU_CFG_BLKSZ_Pos 0U /*!< MEMSYSCTL ITGU_CFG: BLKSZ Position */ +#define MEMSYSCTL_ITGU_CFG_BLKSZ_Msk (0xFUL /*<< MEMSYSCTL_ITGU_CFG_BLKSZ_Pos*/) /*!< MEMSYSCTL ITGU_CFG: BLKSZ Mask */ + +/* MEMSYSCTL DTGU Control Registers (DTGU_CTRL) Register Definitions */ +#define MEMSYSCTL_DTGU_CTRL_DEREN_Pos 1U /*!< MEMSYSCTL DTGU_CTRL: DEREN Position */ +#define MEMSYSCTL_DTGU_CTRL_DEREN_Msk (0x1UL << MEMSYSCTL_DTGU_CTRL_DEREN_Pos) /*!< MEMSYSCTL DTGU_CTRL: DEREN Mask */ + +#define MEMSYSCTL_DTGU_CTRL_DBFEN_Pos 0U /*!< MEMSYSCTL DTGU_CTRL: DBFEN Position */ +#define MEMSYSCTL_DTGU_CTRL_DBFEN_Msk (0x1UL /*<< MEMSYSCTL_DTGU_CTRL_DBFEN_Pos*/) /*!< MEMSYSCTL DTGU_CTRL: DBFEN Mask */ + +/* MEMSYSCTL DTGU Configuration Register (DTGU_CFG) Register Definitions */ +#define MEMSYSCTL_DTGU_CFG_PRESENT_Pos 31U /*!< MEMSYSCTL DTGU_CFG: PRESENT Position */ +#define MEMSYSCTL_DTGU_CFG_PRESENT_Msk (0x1UL << MEMSYSCTL_DTGU_CFG_PRESENT_Pos) /*!< MEMSYSCTL DTGU_CFG: PRESENT Mask */ + +#define MEMSYSCTL_DTGU_CFG_NUMBLKS_Pos 8U /*!< MEMSYSCTL DTGU_CFG: NUMBLKS Position */ +#define MEMSYSCTL_DTGU_CFG_NUMBLKS_Msk (0xFUL << MEMSYSCTL_DTGU_CFG_NUMBLKS_Pos) /*!< MEMSYSCTL DTGU_CFG: NUMBLKS Mask */ + +#define MEMSYSCTL_DTGU_CFG_BLKSZ_Pos 0U /*!< MEMSYSCTL DTGU_CFG: BLKSZ Position */ +#define MEMSYSCTL_DTGU_CFG_BLKSZ_Msk (0xFUL /*<< MEMSYSCTL_DTGU_CFG_BLKSZ_Pos*/) /*!< MEMSYSCTL DTGU_CFG: BLKSZ Mask */ + + +/*@}*/ /* end of group MemSysCtl_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup PwrModCtl_Type Power Mode Control Registers + \brief Type definitions for the Power Mode Control Registers (PWRMODCTL) + @{ + */ + +/** + \brief Structure type to access the Power Mode Control Registers (PWRMODCTL). + */ +typedef struct +{ + __IOM uint32_t CPDLPSTATE; /*!< Offset: 0x000 (R/W) Core Power Domain Low Power State Register */ + __IOM uint32_t DPDLPSTATE; /*!< Offset: 0x004 (R/W) Debug Power Domain Low Power State Register */ +} PwrModCtl_Type; + +/* PWRMODCTL Core Power Domain Low Power State (CPDLPSTATE) Register Definitions */ +#define PWRMODCTL_CPDLPSTATE_RLPSTATE_Pos 8U /*!< PWRMODCTL CPDLPSTATE: RLPSTATE Position */ +#define PWRMODCTL_CPDLPSTATE_RLPSTATE_Msk (0x3UL << PWRMODCTL_CPDLPSTATE_RLPSTATE_Pos) /*!< PWRMODCTL CPDLPSTATE: RLPSTATE Mask */ + +#define PWRMODCTL_CPDLPSTATE_ELPSTATE_Pos 4U /*!< PWRMODCTL CPDLPSTATE: ELPSTATE Position */ +#define PWRMODCTL_CPDLPSTATE_ELPSTATE_Msk (0x3UL << PWRMODCTL_CPDLPSTATE_ELPSTATE_Pos) /*!< PWRMODCTL CPDLPSTATE: ELPSTATE Mask */ + +#define PWRMODCTL_CPDLPSTATE_CLPSTATE_Pos 0U /*!< PWRMODCTL CPDLPSTATE: CLPSTATE Position */ +#define PWRMODCTL_CPDLPSTATE_CLPSTATE_Msk (0x3UL /*<< PWRMODCTL_CPDLPSTATE_CLPSTATE_Pos*/) /*!< PWRMODCTL CPDLPSTATE: CLPSTATE Mask */ + +/* PWRMODCTL Debug Power Domain Low Power State (DPDLPSTATE) Register Definitions */ +#define PWRMODCTL_DPDLPSTATE_DLPSTATE_Pos 0U /*!< PWRMODCTL DPDLPSTATE: DLPSTATE Position */ +#define PWRMODCTL_DPDLPSTATE_DLPSTATE_Msk (0x3UL /*<< PWRMODCTL_DPDLPSTATE_DLPSTATE_Pos*/) /*!< PWRMODCTL DPDLPSTATE: DLPSTATE Mask */ + +/*@}*/ /* end of group PwrModCtl_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup EWIC_Type External Wakeup Interrupt Controller Registers + \brief Type definitions for the External Wakeup Interrupt Controller Registers (EWIC) + @{ + */ + +/** + \brief Structure type to access the External Wakeup Interrupt Controller Registers (EWIC). + */ +typedef struct +{ + __OM uint32_t EVENTSPR; /*!< Offset: 0x000 ( /W) Event Set Pending Register */ + uint32_t RESERVED0[31U]; + __IM uint32_t EVENTMASKA; /*!< Offset: 0x080 (R/W) Event Mask A Register */ + __IM uint32_t EVENTMASK[15]; /*!< Offset: 0x084 (R/W) Event Mask Register */ +} EWIC_Type; + +/* EWIC External Wakeup Interrupt Controller (EVENTSPR) Register Definitions */ +#define EWIC_EVENTSPR_EDBGREQ_Pos 2U /*!< EWIC EVENTSPR: EDBGREQ Position */ +#define EWIC_EVENTSPR_EDBGREQ_Msk (0x1UL << EWIC_EVENTSPR_EDBGREQ_Pos) /*!< EWIC EVENTSPR: EDBGREQ Mask */ + +#define EWIC_EVENTSPR_NMI_Pos 1U /*!< EWIC EVENTSPR: NMI Position */ +#define EWIC_EVENTSPR_NMI_Msk (0x1UL << EWIC_EVENTSPR_NMI_Pos) /*!< EWIC EVENTSPR: NMI Mask */ + +#define EWIC_EVENTSPR_EVENT_Pos 0U /*!< EWIC EVENTSPR: EVENT Position */ +#define EWIC_EVENTSPR_EVENT_Msk (0x1UL /*<< EWIC_EVENTSPR_EVENT_Pos*/) /*!< EWIC EVENTSPR: EVENT Mask */ + +/* EWIC External Wakeup Interrupt Controller (EVENTMASKA) Register Definitions */ +#define EWIC_EVENTMASKA_EDBGREQ_Pos 2U /*!< EWIC EVENTMASKA: EDBGREQ Position */ +#define EWIC_EVENTMASKA_EDBGREQ_Msk (0x1UL << EWIC_EVENTMASKA_EDBGREQ_Pos) /*!< EWIC EVENTMASKA: EDBGREQ Mask */ + +#define EWIC_EVENTMASKA_NMI_Pos 1U /*!< EWIC EVENTMASKA: NMI Position */ +#define EWIC_EVENTMASKA_NMI_Msk (0x1UL << EWIC_EVENTMASKA_NMI_Pos) /*!< EWIC EVENTMASKA: NMI Mask */ + +#define EWIC_EVENTMASKA_EVENT_Pos 0U /*!< EWIC EVENTMASKA: EVENT Position */ +#define EWIC_EVENTMASKA_EVENT_Msk (0x1UL /*<< EWIC_EVENTMASKA_EVENT_Pos*/) /*!< EWIC EVENTMASKA: EVENT Mask */ + +/* EWIC External Wakeup Interrupt Controller (EVENTMASK) Register Definitions */ +#define EWIC_EVENTMASK_IRQ_Pos 0U /*!< EWIC EVENTMASKA: IRQ Position */ +#define EWIC_EVENTMASK_IRQ_Msk (0xFFFFFFFFUL /*<< EWIC_EVENTMASKA_IRQ_Pos*/) /*!< EWIC EVENTMASKA: IRQ Mask */ + +/*@}*/ /* end of group EWIC_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup ErrBnk_Type Error Banking Registers (IMPLEMENTATION DEFINED) + \brief Type definitions for the Error Banking Registers (ERRBNK) + @{ + */ + +/** + \brief Structure type to access the Error Banking Registers (ERRBNK). + */ +typedef struct +{ + __IOM uint32_t IEBR0; /*!< Offset: 0x000 (R/W) Instruction Cache Error Bank Register 0 */ + __IOM uint32_t IEBR1; /*!< Offset: 0x004 (R/W) Instruction Cache Error Bank Register 1 */ + uint32_t RESERVED0[2U]; + __IOM uint32_t DEBR0; /*!< Offset: 0x010 (R/W) Data Cache Error Bank Register 0 */ + __IOM uint32_t DEBR1; /*!< Offset: 0x014 (R/W) Data Cache Error Bank Register 1 */ + uint32_t RESERVED1[2U]; + __IOM uint32_t TEBR0; /*!< Offset: 0x020 (R/W) TCM Error Bank Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t TEBR1; /*!< Offset: 0x028 (R/W) TCM Error Bank Register 1 */ +} ErrBnk_Type; + +/* ERRBNK Instruction Cache Error Bank Register 0 (IEBR0) Register Definitions */ +#define ERRBNK_IEBR0_SWDEF_Pos 30U /*!< ERRBNK IEBR0: SWDEF Position */ +#define ERRBNK_IEBR0_SWDEF_Msk (0x3UL << ERRBNK_IEBR0_SWDEF_Pos) /*!< ERRBNK IEBR0: SWDEF Mask */ + +#define ERRBNK_IEBR0_BANK_Pos 16U /*!< ERRBNK IEBR0: BANK Position */ +#define ERRBNK_IEBR0_BANK_Msk (0x1UL << ERRBNK_IEBR0_BANK_Pos) /*!< ERRBNK IEBR0: BANK Mask */ + +#define ERRBNK_IEBR0_LOCATION_Pos 2U /*!< ERRBNK IEBR0: LOCATION Position */ +#define ERRBNK_IEBR0_LOCATION_Msk (0x3FFFUL << ERRBNK_IEBR0_LOCATION_Pos) /*!< ERRBNK IEBR0: LOCATION Mask */ + +#define ERRBNK_IEBR0_LOCKED_Pos 1U /*!< ERRBNK IEBR0: LOCKED Position */ +#define ERRBNK_IEBR0_LOCKED_Msk (0x1UL << ERRBNK_IEBR0_LOCKED_Pos) /*!< ERRBNK IEBR0: LOCKED Mask */ + +#define ERRBNK_IEBR0_VALID_Pos 0U /*!< ERRBNK IEBR0: VALID Position */ +#define ERRBNK_IEBR0_VALID_Msk (0x1UL << /*ERRBNK_IEBR0_VALID_Pos*/) /*!< ERRBNK IEBR0: VALID Mask */ + +/* ERRBNK Instruction Cache Error Bank Register 1 (IEBR1) Register Definitions */ +#define ERRBNK_IEBR1_SWDEF_Pos 30U /*!< ERRBNK IEBR1: SWDEF Position */ +#define ERRBNK_IEBR1_SWDEF_Msk (0x3UL << ERRBNK_IEBR1_SWDEF_Pos) /*!< ERRBNK IEBR1: SWDEF Mask */ + +#define ERRBNK_IEBR1_BANK_Pos 16U /*!< ERRBNK IEBR1: BANK Position */ +#define ERRBNK_IEBR1_BANK_Msk (0x1UL << ERRBNK_IEBR1_BANK_Pos) /*!< ERRBNK IEBR1: BANK Mask */ + +#define ERRBNK_IEBR1_LOCATION_Pos 2U /*!< ERRBNK IEBR1: LOCATION Position */ +#define ERRBNK_IEBR1_LOCATION_Msk (0x3FFFUL << ERRBNK_IEBR1_LOCATION_Pos) /*!< ERRBNK IEBR1: LOCATION Mask */ + +#define ERRBNK_IEBR1_LOCKED_Pos 1U /*!< ERRBNK IEBR1: LOCKED Position */ +#define ERRBNK_IEBR1_LOCKED_Msk (0x1UL << ERRBNK_IEBR1_LOCKED_Pos) /*!< ERRBNK IEBR1: LOCKED Mask */ + +#define ERRBNK_IEBR1_VALID_Pos 0U /*!< ERRBNK IEBR1: VALID Position */ +#define ERRBNK_IEBR1_VALID_Msk (0x1UL << /*ERRBNK_IEBR1_VALID_Pos*/) /*!< ERRBNK IEBR1: VALID Mask */ + +/* ERRBNK Data Cache Error Bank Register 0 (DEBR0) Register Definitions */ +#define ERRBNK_DEBR0_SWDEF_Pos 30U /*!< ERRBNK DEBR0: SWDEF Position */ +#define ERRBNK_DEBR0_SWDEF_Msk (0x3UL << ERRBNK_DEBR0_SWDEF_Pos) /*!< ERRBNK DEBR0: SWDEF Mask */ + +#define ERRBNK_DEBR0_TYPE_Pos 17U /*!< ERRBNK DEBR0: TYPE Position */ +#define ERRBNK_DEBR0_TYPE_Msk (0x1UL << ERRBNK_DEBR0_TYPE_Pos) /*!< ERRBNK DEBR0: TYPE Mask */ + +#define ERRBNK_DEBR0_BANK_Pos 16U /*!< ERRBNK DEBR0: BANK Position */ +#define ERRBNK_DEBR0_BANK_Msk (0x1UL << ERRBNK_DEBR0_BANK_Pos) /*!< ERRBNK DEBR0: BANK Mask */ + +#define ERRBNK_DEBR0_LOCATION_Pos 2U /*!< ERRBNK DEBR0: LOCATION Position */ +#define ERRBNK_DEBR0_LOCATION_Msk (0x3FFFUL << ERRBNK_DEBR0_LOCATION_Pos) /*!< ERRBNK DEBR0: LOCATION Mask */ + +#define ERRBNK_DEBR0_LOCKED_Pos 1U /*!< ERRBNK DEBR0: LOCKED Position */ +#define ERRBNK_DEBR0_LOCKED_Msk (0x1UL << ERRBNK_DEBR0_LOCKED_Pos) /*!< ERRBNK DEBR0: LOCKED Mask */ + +#define ERRBNK_DEBR0_VALID_Pos 0U /*!< ERRBNK DEBR0: VALID Position */ +#define ERRBNK_DEBR0_VALID_Msk (0x1UL << /*ERRBNK_DEBR0_VALID_Pos*/) /*!< ERRBNK DEBR0: VALID Mask */ + +/* ERRBNK Data Cache Error Bank Register 1 (DEBR1) Register Definitions */ +#define ERRBNK_DEBR1_SWDEF_Pos 30U /*!< ERRBNK DEBR1: SWDEF Position */ +#define ERRBNK_DEBR1_SWDEF_Msk (0x3UL << ERRBNK_DEBR1_SWDEF_Pos) /*!< ERRBNK DEBR1: SWDEF Mask */ + +#define ERRBNK_DEBR1_TYPE_Pos 17U /*!< ERRBNK DEBR1: TYPE Position */ +#define ERRBNK_DEBR1_TYPE_Msk (0x1UL << ERRBNK_DEBR1_TYPE_Pos) /*!< ERRBNK DEBR1: TYPE Mask */ + +#define ERRBNK_DEBR1_BANK_Pos 16U /*!< ERRBNK DEBR1: BANK Position */ +#define ERRBNK_DEBR1_BANK_Msk (0x1UL << ERRBNK_DEBR1_BANK_Pos) /*!< ERRBNK DEBR1: BANK Mask */ + +#define ERRBNK_DEBR1_LOCATION_Pos 2U /*!< ERRBNK DEBR1: LOCATION Position */ +#define ERRBNK_DEBR1_LOCATION_Msk (0x3FFFUL << ERRBNK_DEBR1_LOCATION_Pos) /*!< ERRBNK DEBR1: LOCATION Mask */ + +#define ERRBNK_DEBR1_LOCKED_Pos 1U /*!< ERRBNK DEBR1: LOCKED Position */ +#define ERRBNK_DEBR1_LOCKED_Msk (0x1UL << ERRBNK_DEBR1_LOCKED_Pos) /*!< ERRBNK DEBR1: LOCKED Mask */ + +#define ERRBNK_DEBR1_VALID_Pos 0U /*!< ERRBNK DEBR1: VALID Position */ +#define ERRBNK_DEBR1_VALID_Msk (0x1UL << /*ERRBNK_DEBR1_VALID_Pos*/) /*!< ERRBNK DEBR1: VALID Mask */ + +/* ERRBNK TCM Error Bank Register 0 (TEBR0) Register Definitions */ +#define ERRBNK_TEBR0_SWDEF_Pos 30U /*!< ERRBNK TEBR0: SWDEF Position */ +#define ERRBNK_TEBR0_SWDEF_Msk (0x3UL << ERRBNK_TEBR0_SWDEF_Pos) /*!< ERRBNK TEBR0: SWDEF Mask */ + +#define ERRBNK_TEBR0_POISON_Pos 28U /*!< ERRBNK TEBR0: POISON Position */ +#define ERRBNK_TEBR0_POISON_Msk (0x1UL << ERRBNK_TEBR0_POISON_Pos) /*!< ERRBNK TEBR0: POISON Mask */ + +#define ERRBNK_TEBR0_TYPE_Pos 27U /*!< ERRBNK TEBR0: TYPE Position */ +#define ERRBNK_TEBR0_TYPE_Msk (0x1UL << ERRBNK_TEBR0_TYPE_Pos) /*!< ERRBNK TEBR0: TYPE Mask */ + +#define ERRBNK_TEBR0_BANK_Pos 24U /*!< ERRBNK TEBR0: BANK Position */ +#define ERRBNK_TEBR0_BANK_Msk (0x3UL << ERRBNK_TEBR0_BANK_Pos) /*!< ERRBNK TEBR0: BANK Mask */ + +#define ERRBNK_TEBR0_LOCATION_Pos 2U /*!< ERRBNK TEBR0: LOCATION Position */ +#define ERRBNK_TEBR0_LOCATION_Msk (0x3FFFFFUL << ERRBNK_TEBR0_LOCATION_Pos) /*!< ERRBNK TEBR0: LOCATION Mask */ + +#define ERRBNK_TEBR0_LOCKED_Pos 1U /*!< ERRBNK TEBR0: LOCKED Position */ +#define ERRBNK_TEBR0_LOCKED_Msk (0x1UL << ERRBNK_TEBR0_LOCKED_Pos) /*!< ERRBNK TEBR0: LOCKED Mask */ + +#define ERRBNK_TEBR0_VALID_Pos 0U /*!< ERRBNK TEBR0: VALID Position */ +#define ERRBNK_TEBR0_VALID_Msk (0x1UL << /*ERRBNK_TEBR0_VALID_Pos*/) /*!< ERRBNK TEBR0: VALID Mask */ + +/* ERRBNK TCM Error Bank Register 1 (TEBR1) Register Definitions */ +#define ERRBNK_TEBR1_SWDEF_Pos 30U /*!< ERRBNK TEBR1: SWDEF Position */ +#define ERRBNK_TEBR1_SWDEF_Msk (0x3UL << ERRBNK_TEBR1_SWDEF_Pos) /*!< ERRBNK TEBR1: SWDEF Mask */ + +#define ERRBNK_TEBR1_POISON_Pos 28U /*!< ERRBNK TEBR1: POISON Position */ +#define ERRBNK_TEBR1_POISON_Msk (0x1UL << ERRBNK_TEBR1_POISON_Pos) /*!< ERRBNK TEBR1: POISON Mask */ + +#define ERRBNK_TEBR1_TYPE_Pos 27U /*!< ERRBNK TEBR1: TYPE Position */ +#define ERRBNK_TEBR1_TYPE_Msk (0x1UL << ERRBNK_TEBR1_TYPE_Pos) /*!< ERRBNK TEBR1: TYPE Mask */ + +#define ERRBNK_TEBR1_BANK_Pos 24U /*!< ERRBNK TEBR1: BANK Position */ +#define ERRBNK_TEBR1_BANK_Msk (0x3UL << ERRBNK_TEBR1_BANK_Pos) /*!< ERRBNK TEBR1: BANK Mask */ + +#define ERRBNK_TEBR1_LOCATION_Pos 2U /*!< ERRBNK TEBR1: LOCATION Position */ +#define ERRBNK_TEBR1_LOCATION_Msk (0x3FFFFFUL << ERRBNK_TEBR1_LOCATION_Pos) /*!< ERRBNK TEBR1: LOCATION Mask */ + +#define ERRBNK_TEBR1_LOCKED_Pos 1U /*!< ERRBNK TEBR1: LOCKED Position */ +#define ERRBNK_TEBR1_LOCKED_Msk (0x1UL << ERRBNK_TEBR1_LOCKED_Pos) /*!< ERRBNK TEBR1: LOCKED Mask */ + +#define ERRBNK_TEBR1_VALID_Pos 0U /*!< ERRBNK TEBR1: VALID Position */ +#define ERRBNK_TEBR1_VALID_Msk (0x1UL << /*ERRBNK_TEBR1_VALID_Pos*/) /*!< ERRBNK TEBR1: VALID Mask */ + +/*@}*/ /* end of group ErrBnk_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup PrcCfgInf_Type Processor Configuration Information Registers (IMPLEMENTATION DEFINED) + \brief Type definitions for the Processor Configuration Information Registerss (PRCCFGINF) + @{ + */ + +/** + \brief Structure type to access the Processor Configuration Information Registerss (PRCCFGINF). + */ +typedef struct +{ + __OM uint32_t CFGINFOSEL; /*!< Offset: 0x000 ( /W) Processor Configuration Information Selection Register */ + __IM uint32_t CFGINFORD; /*!< Offset: 0x004 (R/ ) Processor Configuration Information Read Data Register */ +} PrcCfgInf_Type; + +/* PRCCFGINF Processor Configuration Information Selection Register (CFGINFOSEL) Definitions */ + +/* PRCCFGINF Processor Configuration Information Read Data Register (CFGINFORD) Definitions */ + +/*@}*/ /* end of group PrcCfgInf_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Sizes Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Sizes Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IOM uint32_t PSCR; /*!< Offset: 0x308 (R/W) Periodic Synchronization Control Register */ + uint32_t RESERVED3[809U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) Software Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) Software Lock Status Register */ + uint32_t RESERVED4[4U]; + __IM uint32_t TYPE; /*!< Offset: 0xFC8 (R/ ) Device Identifier Register */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Register */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_SWOSCALER_Pos 0U /*!< TPI ACPR: SWOSCALER Position */ +#define TPI_ACPR_SWOSCALER_Msk (0xFFFFUL /*<< TPI_ACPR_SWOSCALER_Pos*/) /*!< TPI ACPR: SWOSCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_FOnMan_Pos 6U /*!< TPI FFCR: FOnMan Position */ +#define TPI_FFCR_FOnMan_Msk (0x1UL << TPI_FFCR_FOnMan_Pos) /*!< TPI FFCR: FOnMan Mask */ + +#define TPI_FFCR_EnFmt_Pos 0U /*!< TPI FFCR: EnFmt Position */ +#define TPI_FFCR_EnFmt_Msk (0x3UL << /*TPI_FFCR_EnFmt_Pos*/) /*!< TPI FFCR: EnFmt Mask */ + +/* TPI Periodic Synchronization Control Register Definitions */ +#define TPI_PSCR_PSCount_Pos 0U /*!< TPI PSCR: PSCount Position */ +#define TPI_PSCR_PSCount_Msk (0x1FUL /*<< TPI_PSCR_PSCount_Pos*/) /*!< TPI PSCR: TPSCount Mask */ + +/* TPI Software Lock Status Register Definitions */ +#define TPI_LSR_nTT_Pos 1U /*!< TPI LSR: Not thirty-two bit. Position */ +#define TPI_LSR_nTT_Msk (0x1UL << TPI_LSR_nTT_Pos) /*!< TPI LSR: Not thirty-two bit. Mask */ + +#define TPI_LSR_SLK_Pos 1U /*!< TPI LSR: Software Lock status Position */ +#define TPI_LSR_SLK_Msk (0x1UL << TPI_LSR_SLK_Pos) /*!< TPI LSR: Software Lock status Mask */ + +#define TPI_LSR_SLI_Pos 0U /*!< TPI LSR: Software Lock implemented Position */ +#define TPI_LSR_SLI_Msk (0x1UL /*<< TPI_LSR_SLI_Pos*/) /*!< TPI LSR: Software Lock implemented Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_FIFOSZ_Pos 6U /*!< TPI DEVID: FIFO depth Position */ +#define TPI_DEVID_FIFOSZ_Msk (0x7UL << TPI_DEVID_FIFOSZ_Pos) /*!< TPI DEVID: FIFO depth Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + +#if defined (__PMU_PRESENT) && (__PMU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_PMU Performance Monitoring Unit (PMU) + \brief Type definitions for the Performance Monitoring Unit (PMU) + @{ + */ + +/** + \brief Structure type to access the Performance Monitoring Unit (PMU). + */ +typedef struct +{ + __IOM uint32_t EVCNTR[__PMU_NUM_EVENTCNT]; /*!< Offset: 0x0 (R/W) PMU Event Counter Registers */ +#if __PMU_NUM_EVENTCNT<31 + uint32_t RESERVED0[31U-__PMU_NUM_EVENTCNT]; +#endif + __IOM uint32_t CCNTR; /*!< Offset: 0x7C (R/W) PMU Cycle Counter Register */ + uint32_t RESERVED1[224]; + __IOM uint32_t EVTYPER[__PMU_NUM_EVENTCNT]; /*!< Offset: 0x400 (R/W) PMU Event Type and Filter Registers */ +#if __PMU_NUM_EVENTCNT<31 + uint32_t RESERVED2[31U-__PMU_NUM_EVENTCNT]; +#endif + __IOM uint32_t CCFILTR; /*!< Offset: 0x47C (R/W) PMU Cycle Counter Filter Register */ + uint32_t RESERVED3[480]; + __IOM uint32_t CNTENSET; /*!< Offset: 0xC00 (R/W) PMU Count Enable Set Register */ + uint32_t RESERVED4[7]; + __IOM uint32_t CNTENCLR; /*!< Offset: 0xC20 (R/W) PMU Count Enable Clear Register */ + uint32_t RESERVED5[7]; + __IOM uint32_t INTENSET; /*!< Offset: 0xC40 (R/W) PMU Interrupt Enable Set Register */ + uint32_t RESERVED6[7]; + __IOM uint32_t INTENCLR; /*!< Offset: 0xC60 (R/W) PMU Interrupt Enable Clear Register */ + uint32_t RESERVED7[7]; + __IOM uint32_t OVSCLR; /*!< Offset: 0xC80 (R/W) PMU Overflow Flag Status Clear Register */ + uint32_t RESERVED8[7]; + __IOM uint32_t SWINC; /*!< Offset: 0xCA0 (R/W) PMU Software Increment Register */ + uint32_t RESERVED9[7]; + __IOM uint32_t OVSSET; /*!< Offset: 0xCC0 (R/W) PMU Overflow Flag Status Set Register */ + uint32_t RESERVED10[79]; + __IOM uint32_t TYPE; /*!< Offset: 0xE00 (R/W) PMU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0xE04 (R/W) PMU Control Register */ + uint32_t RESERVED11[108]; + __IOM uint32_t AUTHSTATUS; /*!< Offset: 0xFB8 (R/W) PMU Authentication Status Register */ + __IOM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/W) PMU Device Architecture Register */ + uint32_t RESERVED12[3]; + __IOM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/W) PMU Device Type Register */ + __IOM uint32_t PIDR4; /*!< Offset: 0xFD0 (R/W) PMU Peripheral Identification Register 4 */ + uint32_t RESERVED13[3]; + __IOM uint32_t PIDR0; /*!< Offset: 0xFE0 (R/W) PMU Peripheral Identification Register 0 */ + __IOM uint32_t PIDR1; /*!< Offset: 0xFE4 (R/W) PMU Peripheral Identification Register 1 */ + __IOM uint32_t PIDR2; /*!< Offset: 0xFE8 (R/W) PMU Peripheral Identification Register 2 */ + __IOM uint32_t PIDR3; /*!< Offset: 0xFEC (R/W) PMU Peripheral Identification Register 3 */ + __IOM uint32_t CIDR0; /*!< Offset: 0xFF0 (R/W) PMU Component Identification Register 0 */ + __IOM uint32_t CIDR1; /*!< Offset: 0xFF4 (R/W) PMU Component Identification Register 1 */ + __IOM uint32_t CIDR2; /*!< Offset: 0xFF8 (R/W) PMU Component Identification Register 2 */ + __IOM uint32_t CIDR3; /*!< Offset: 0xFFC (R/W) PMU Component Identification Register 3 */ +} PMU_Type; + +/** \brief PMU Event Counter Registers (0-30) Definitions */ + +#define PMU_EVCNTR_CNT_Pos 0U /*!< PMU EVCNTR: Counter Position */ +#define PMU_EVCNTR_CNT_Msk (0xFFFFUL /*<< PMU_EVCNTRx_CNT_Pos*/) /*!< PMU EVCNTR: Counter Mask */ + +/** \brief PMU Event Type and Filter Registers (0-30) Definitions */ + +#define PMU_EVTYPER_EVENTTOCNT_Pos 0U /*!< PMU EVTYPER: Event to Count Position */ +#define PMU_EVTYPER_EVENTTOCNT_Msk (0xFFFFUL /*<< EVTYPERx_EVENTTOCNT_Pos*/) /*!< PMU EVTYPER: Event to Count Mask */ + +/** \brief PMU Count Enable Set Register Definitions */ + +#define PMU_CNTENSET_CNT0_ENABLE_Pos 0U /*!< PMU CNTENSET: Event Counter 0 Enable Set Position */ +#define PMU_CNTENSET_CNT0_ENABLE_Msk (1UL /*<< PMU_CNTENSET_CNT0_ENABLE_Pos*/) /*!< PMU CNTENSET: Event Counter 0 Enable Set Mask */ + +#define PMU_CNTENSET_CNT1_ENABLE_Pos 1U /*!< PMU CNTENSET: Event Counter 1 Enable Set Position */ +#define PMU_CNTENSET_CNT1_ENABLE_Msk (1UL << PMU_CNTENSET_CNT1_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 1 Enable Set Mask */ + +#define PMU_CNTENSET_CNT2_ENABLE_Pos 2U /*!< PMU CNTENSET: Event Counter 2 Enable Set Position */ +#define PMU_CNTENSET_CNT2_ENABLE_Msk (1UL << PMU_CNTENSET_CNT2_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 2 Enable Set Mask */ + +#define PMU_CNTENSET_CNT3_ENABLE_Pos 3U /*!< PMU CNTENSET: Event Counter 3 Enable Set Position */ +#define PMU_CNTENSET_CNT3_ENABLE_Msk (1UL << PMU_CNTENSET_CNT3_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 3 Enable Set Mask */ + +#define PMU_CNTENSET_CNT4_ENABLE_Pos 4U /*!< PMU CNTENSET: Event Counter 4 Enable Set Position */ +#define PMU_CNTENSET_CNT4_ENABLE_Msk (1UL << PMU_CNTENSET_CNT4_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 4 Enable Set Mask */ + +#define PMU_CNTENSET_CNT5_ENABLE_Pos 5U /*!< PMU CNTENSET: Event Counter 5 Enable Set Position */ +#define PMU_CNTENSET_CNT5_ENABLE_Msk (1UL << PMU_CNTENSET_CNT5_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 5 Enable Set Mask */ + +#define PMU_CNTENSET_CNT6_ENABLE_Pos 6U /*!< PMU CNTENSET: Event Counter 6 Enable Set Position */ +#define PMU_CNTENSET_CNT6_ENABLE_Msk (1UL << PMU_CNTENSET_CNT6_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 6 Enable Set Mask */ + +#define PMU_CNTENSET_CNT7_ENABLE_Pos 7U /*!< PMU CNTENSET: Event Counter 7 Enable Set Position */ +#define PMU_CNTENSET_CNT7_ENABLE_Msk (1UL << PMU_CNTENSET_CNT7_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 7 Enable Set Mask */ + +#define PMU_CNTENSET_CNT8_ENABLE_Pos 8U /*!< PMU CNTENSET: Event Counter 8 Enable Set Position */ +#define PMU_CNTENSET_CNT8_ENABLE_Msk (1UL << PMU_CNTENSET_CNT8_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 8 Enable Set Mask */ + +#define PMU_CNTENSET_CNT9_ENABLE_Pos 9U /*!< PMU CNTENSET: Event Counter 9 Enable Set Position */ +#define PMU_CNTENSET_CNT9_ENABLE_Msk (1UL << PMU_CNTENSET_CNT9_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 9 Enable Set Mask */ + +#define PMU_CNTENSET_CNT10_ENABLE_Pos 10U /*!< PMU CNTENSET: Event Counter 10 Enable Set Position */ +#define PMU_CNTENSET_CNT10_ENABLE_Msk (1UL << PMU_CNTENSET_CNT10_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 10 Enable Set Mask */ + +#define PMU_CNTENSET_CNT11_ENABLE_Pos 11U /*!< PMU CNTENSET: Event Counter 11 Enable Set Position */ +#define PMU_CNTENSET_CNT11_ENABLE_Msk (1UL << PMU_CNTENSET_CNT11_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 11 Enable Set Mask */ + +#define PMU_CNTENSET_CNT12_ENABLE_Pos 12U /*!< PMU CNTENSET: Event Counter 12 Enable Set Position */ +#define PMU_CNTENSET_CNT12_ENABLE_Msk (1UL << PMU_CNTENSET_CNT12_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 12 Enable Set Mask */ + +#define PMU_CNTENSET_CNT13_ENABLE_Pos 13U /*!< PMU CNTENSET: Event Counter 13 Enable Set Position */ +#define PMU_CNTENSET_CNT13_ENABLE_Msk (1UL << PMU_CNTENSET_CNT13_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 13 Enable Set Mask */ + +#define PMU_CNTENSET_CNT14_ENABLE_Pos 14U /*!< PMU CNTENSET: Event Counter 14 Enable Set Position */ +#define PMU_CNTENSET_CNT14_ENABLE_Msk (1UL << PMU_CNTENSET_CNT14_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 14 Enable Set Mask */ + +#define PMU_CNTENSET_CNT15_ENABLE_Pos 15U /*!< PMU CNTENSET: Event Counter 15 Enable Set Position */ +#define PMU_CNTENSET_CNT15_ENABLE_Msk (1UL << PMU_CNTENSET_CNT15_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 15 Enable Set Mask */ + +#define PMU_CNTENSET_CNT16_ENABLE_Pos 16U /*!< PMU CNTENSET: Event Counter 16 Enable Set Position */ +#define PMU_CNTENSET_CNT16_ENABLE_Msk (1UL << PMU_CNTENSET_CNT16_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 16 Enable Set Mask */ + +#define PMU_CNTENSET_CNT17_ENABLE_Pos 17U /*!< PMU CNTENSET: Event Counter 17 Enable Set Position */ +#define PMU_CNTENSET_CNT17_ENABLE_Msk (1UL << PMU_CNTENSET_CNT17_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 17 Enable Set Mask */ + +#define PMU_CNTENSET_CNT18_ENABLE_Pos 18U /*!< PMU CNTENSET: Event Counter 18 Enable Set Position */ +#define PMU_CNTENSET_CNT18_ENABLE_Msk (1UL << PMU_CNTENSET_CNT18_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 18 Enable Set Mask */ + +#define PMU_CNTENSET_CNT19_ENABLE_Pos 19U /*!< PMU CNTENSET: Event Counter 19 Enable Set Position */ +#define PMU_CNTENSET_CNT19_ENABLE_Msk (1UL << PMU_CNTENSET_CNT19_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 19 Enable Set Mask */ + +#define PMU_CNTENSET_CNT20_ENABLE_Pos 20U /*!< PMU CNTENSET: Event Counter 20 Enable Set Position */ +#define PMU_CNTENSET_CNT20_ENABLE_Msk (1UL << PMU_CNTENSET_CNT20_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 20 Enable Set Mask */ + +#define PMU_CNTENSET_CNT21_ENABLE_Pos 21U /*!< PMU CNTENSET: Event Counter 21 Enable Set Position */ +#define PMU_CNTENSET_CNT21_ENABLE_Msk (1UL << PMU_CNTENSET_CNT21_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 21 Enable Set Mask */ + +#define PMU_CNTENSET_CNT22_ENABLE_Pos 22U /*!< PMU CNTENSET: Event Counter 22 Enable Set Position */ +#define PMU_CNTENSET_CNT22_ENABLE_Msk (1UL << PMU_CNTENSET_CNT22_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 22 Enable Set Mask */ + +#define PMU_CNTENSET_CNT23_ENABLE_Pos 23U /*!< PMU CNTENSET: Event Counter 23 Enable Set Position */ +#define PMU_CNTENSET_CNT23_ENABLE_Msk (1UL << PMU_CNTENSET_CNT23_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 23 Enable Set Mask */ + +#define PMU_CNTENSET_CNT24_ENABLE_Pos 24U /*!< PMU CNTENSET: Event Counter 24 Enable Set Position */ +#define PMU_CNTENSET_CNT24_ENABLE_Msk (1UL << PMU_CNTENSET_CNT24_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 24 Enable Set Mask */ + +#define PMU_CNTENSET_CNT25_ENABLE_Pos 25U /*!< PMU CNTENSET: Event Counter 25 Enable Set Position */ +#define PMU_CNTENSET_CNT25_ENABLE_Msk (1UL << PMU_CNTENSET_CNT25_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 25 Enable Set Mask */ + +#define PMU_CNTENSET_CNT26_ENABLE_Pos 26U /*!< PMU CNTENSET: Event Counter 26 Enable Set Position */ +#define PMU_CNTENSET_CNT26_ENABLE_Msk (1UL << PMU_CNTENSET_CNT26_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 26 Enable Set Mask */ + +#define PMU_CNTENSET_CNT27_ENABLE_Pos 27U /*!< PMU CNTENSET: Event Counter 27 Enable Set Position */ +#define PMU_CNTENSET_CNT27_ENABLE_Msk (1UL << PMU_CNTENSET_CNT27_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 27 Enable Set Mask */ + +#define PMU_CNTENSET_CNT28_ENABLE_Pos 28U /*!< PMU CNTENSET: Event Counter 28 Enable Set Position */ +#define PMU_CNTENSET_CNT28_ENABLE_Msk (1UL << PMU_CNTENSET_CNT28_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 28 Enable Set Mask */ + +#define PMU_CNTENSET_CNT29_ENABLE_Pos 29U /*!< PMU CNTENSET: Event Counter 29 Enable Set Position */ +#define PMU_CNTENSET_CNT29_ENABLE_Msk (1UL << PMU_CNTENSET_CNT29_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 29 Enable Set Mask */ + +#define PMU_CNTENSET_CNT30_ENABLE_Pos 30U /*!< PMU CNTENSET: Event Counter 30 Enable Set Position */ +#define PMU_CNTENSET_CNT30_ENABLE_Msk (1UL << PMU_CNTENSET_CNT30_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 30 Enable Set Mask */ + +#define PMU_CNTENSET_CCNTR_ENABLE_Pos 31U /*!< PMU CNTENSET: Cycle Counter Enable Set Position */ +#define PMU_CNTENSET_CCNTR_ENABLE_Msk (1UL << PMU_CNTENSET_CCNTR_ENABLE_Pos) /*!< PMU CNTENSET: Cycle Counter Enable Set Mask */ + +/** \brief PMU Count Enable Clear Register Definitions */ + +#define PMU_CNTENSET_CNT0_ENABLE_Pos 0U /*!< PMU CNTENCLR: Event Counter 0 Enable Clear Position */ +#define PMU_CNTENCLR_CNT0_ENABLE_Msk (1UL /*<< PMU_CNTENCLR_CNT0_ENABLE_Pos*/) /*!< PMU CNTENCLR: Event Counter 0 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT1_ENABLE_Pos 1U /*!< PMU CNTENCLR: Event Counter 1 Enable Clear Position */ +#define PMU_CNTENCLR_CNT1_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT1_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 1 Enable Clear */ + +#define PMU_CNTENCLR_CNT2_ENABLE_Pos 2U /*!< PMU CNTENCLR: Event Counter 2 Enable Clear Position */ +#define PMU_CNTENCLR_CNT2_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT2_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 2 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT3_ENABLE_Pos 3U /*!< PMU CNTENCLR: Event Counter 3 Enable Clear Position */ +#define PMU_CNTENCLR_CNT3_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT3_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 3 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT4_ENABLE_Pos 4U /*!< PMU CNTENCLR: Event Counter 4 Enable Clear Position */ +#define PMU_CNTENCLR_CNT4_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT4_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 4 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT5_ENABLE_Pos 5U /*!< PMU CNTENCLR: Event Counter 5 Enable Clear Position */ +#define PMU_CNTENCLR_CNT5_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT5_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 5 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT6_ENABLE_Pos 6U /*!< PMU CNTENCLR: Event Counter 6 Enable Clear Position */ +#define PMU_CNTENCLR_CNT6_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT6_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 6 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT7_ENABLE_Pos 7U /*!< PMU CNTENCLR: Event Counter 7 Enable Clear Position */ +#define PMU_CNTENCLR_CNT7_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT7_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 7 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT8_ENABLE_Pos 8U /*!< PMU CNTENCLR: Event Counter 8 Enable Clear Position */ +#define PMU_CNTENCLR_CNT8_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT8_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 8 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT9_ENABLE_Pos 9U /*!< PMU CNTENCLR: Event Counter 9 Enable Clear Position */ +#define PMU_CNTENCLR_CNT9_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT9_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 9 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT10_ENABLE_Pos 10U /*!< PMU CNTENCLR: Event Counter 10 Enable Clear Position */ +#define PMU_CNTENCLR_CNT10_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT10_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 10 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT11_ENABLE_Pos 11U /*!< PMU CNTENCLR: Event Counter 11 Enable Clear Position */ +#define PMU_CNTENCLR_CNT11_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT11_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 11 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT12_ENABLE_Pos 12U /*!< PMU CNTENCLR: Event Counter 12 Enable Clear Position */ +#define PMU_CNTENCLR_CNT12_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT12_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 12 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT13_ENABLE_Pos 13U /*!< PMU CNTENCLR: Event Counter 13 Enable Clear Position */ +#define PMU_CNTENCLR_CNT13_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT13_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 13 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT14_ENABLE_Pos 14U /*!< PMU CNTENCLR: Event Counter 14 Enable Clear Position */ +#define PMU_CNTENCLR_CNT14_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT14_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 14 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT15_ENABLE_Pos 15U /*!< PMU CNTENCLR: Event Counter 15 Enable Clear Position */ +#define PMU_CNTENCLR_CNT15_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT15_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 15 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT16_ENABLE_Pos 16U /*!< PMU CNTENCLR: Event Counter 16 Enable Clear Position */ +#define PMU_CNTENCLR_CNT16_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT16_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 16 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT17_ENABLE_Pos 17U /*!< PMU CNTENCLR: Event Counter 17 Enable Clear Position */ +#define PMU_CNTENCLR_CNT17_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT17_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 17 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT18_ENABLE_Pos 18U /*!< PMU CNTENCLR: Event Counter 18 Enable Clear Position */ +#define PMU_CNTENCLR_CNT18_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT18_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 18 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT19_ENABLE_Pos 19U /*!< PMU CNTENCLR: Event Counter 19 Enable Clear Position */ +#define PMU_CNTENCLR_CNT19_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT19_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 19 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT20_ENABLE_Pos 20U /*!< PMU CNTENCLR: Event Counter 20 Enable Clear Position */ +#define PMU_CNTENCLR_CNT20_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT20_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 20 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT21_ENABLE_Pos 21U /*!< PMU CNTENCLR: Event Counter 21 Enable Clear Position */ +#define PMU_CNTENCLR_CNT21_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT21_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 21 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT22_ENABLE_Pos 22U /*!< PMU CNTENCLR: Event Counter 22 Enable Clear Position */ +#define PMU_CNTENCLR_CNT22_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT22_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 22 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT23_ENABLE_Pos 23U /*!< PMU CNTENCLR: Event Counter 23 Enable Clear Position */ +#define PMU_CNTENCLR_CNT23_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT23_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 23 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT24_ENABLE_Pos 24U /*!< PMU CNTENCLR: Event Counter 24 Enable Clear Position */ +#define PMU_CNTENCLR_CNT24_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT24_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 24 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT25_ENABLE_Pos 25U /*!< PMU CNTENCLR: Event Counter 25 Enable Clear Position */ +#define PMU_CNTENCLR_CNT25_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT25_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 25 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT26_ENABLE_Pos 26U /*!< PMU CNTENCLR: Event Counter 26 Enable Clear Position */ +#define PMU_CNTENCLR_CNT26_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT26_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 26 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT27_ENABLE_Pos 27U /*!< PMU CNTENCLR: Event Counter 27 Enable Clear Position */ +#define PMU_CNTENCLR_CNT27_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT27_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 27 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT28_ENABLE_Pos 28U /*!< PMU CNTENCLR: Event Counter 28 Enable Clear Position */ +#define PMU_CNTENCLR_CNT28_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT28_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 28 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT29_ENABLE_Pos 29U /*!< PMU CNTENCLR: Event Counter 29 Enable Clear Position */ +#define PMU_CNTENCLR_CNT29_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT29_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 29 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT30_ENABLE_Pos 30U /*!< PMU CNTENCLR: Event Counter 30 Enable Clear Position */ +#define PMU_CNTENCLR_CNT30_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT30_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 30 Enable Clear Mask */ + +#define PMU_CNTENCLR_CCNTR_ENABLE_Pos 31U /*!< PMU CNTENCLR: Cycle Counter Enable Clear Position */ +#define PMU_CNTENCLR_CCNTR_ENABLE_Msk (1UL << PMU_CNTENCLR_CCNTR_ENABLE_Pos) /*!< PMU CNTENCLR: Cycle Counter Enable Clear Mask */ + +/** \brief PMU Interrupt Enable Set Register Definitions */ + +#define PMU_INTENSET_CNT0_ENABLE_Pos 0U /*!< PMU INTENSET: Event Counter 0 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT0_ENABLE_Msk (1UL /*<< PMU_INTENSET_CNT0_ENABLE_Pos*/) /*!< PMU INTENSET: Event Counter 0 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT1_ENABLE_Pos 1U /*!< PMU INTENSET: Event Counter 1 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT1_ENABLE_Msk (1UL << PMU_INTENSET_CNT1_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 1 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT2_ENABLE_Pos 2U /*!< PMU INTENSET: Event Counter 2 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT2_ENABLE_Msk (1UL << PMU_INTENSET_CNT2_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 2 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT3_ENABLE_Pos 3U /*!< PMU INTENSET: Event Counter 3 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT3_ENABLE_Msk (1UL << PMU_INTENSET_CNT3_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 3 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT4_ENABLE_Pos 4U /*!< PMU INTENSET: Event Counter 4 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT4_ENABLE_Msk (1UL << PMU_INTENSET_CNT4_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 4 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT5_ENABLE_Pos 5U /*!< PMU INTENSET: Event Counter 5 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT5_ENABLE_Msk (1UL << PMU_INTENSET_CNT5_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 5 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT6_ENABLE_Pos 6U /*!< PMU INTENSET: Event Counter 6 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT6_ENABLE_Msk (1UL << PMU_INTENSET_CNT6_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 6 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT7_ENABLE_Pos 7U /*!< PMU INTENSET: Event Counter 7 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT7_ENABLE_Msk (1UL << PMU_INTENSET_CNT7_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 7 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT8_ENABLE_Pos 8U /*!< PMU INTENSET: Event Counter 8 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT8_ENABLE_Msk (1UL << PMU_INTENSET_CNT8_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 8 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT9_ENABLE_Pos 9U /*!< PMU INTENSET: Event Counter 9 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT9_ENABLE_Msk (1UL << PMU_INTENSET_CNT9_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 9 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT10_ENABLE_Pos 10U /*!< PMU INTENSET: Event Counter 10 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT10_ENABLE_Msk (1UL << PMU_INTENSET_CNT10_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 10 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT11_ENABLE_Pos 11U /*!< PMU INTENSET: Event Counter 11 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT11_ENABLE_Msk (1UL << PMU_INTENSET_CNT11_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 11 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT12_ENABLE_Pos 12U /*!< PMU INTENSET: Event Counter 12 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT12_ENABLE_Msk (1UL << PMU_INTENSET_CNT12_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 12 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT13_ENABLE_Pos 13U /*!< PMU INTENSET: Event Counter 13 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT13_ENABLE_Msk (1UL << PMU_INTENSET_CNT13_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 13 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT14_ENABLE_Pos 14U /*!< PMU INTENSET: Event Counter 14 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT14_ENABLE_Msk (1UL << PMU_INTENSET_CNT14_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 14 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT15_ENABLE_Pos 15U /*!< PMU INTENSET: Event Counter 15 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT15_ENABLE_Msk (1UL << PMU_INTENSET_CNT15_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 15 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT16_ENABLE_Pos 16U /*!< PMU INTENSET: Event Counter 16 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT16_ENABLE_Msk (1UL << PMU_INTENSET_CNT16_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 16 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT17_ENABLE_Pos 17U /*!< PMU INTENSET: Event Counter 17 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT17_ENABLE_Msk (1UL << PMU_INTENSET_CNT17_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 17 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT18_ENABLE_Pos 18U /*!< PMU INTENSET: Event Counter 18 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT18_ENABLE_Msk (1UL << PMU_INTENSET_CNT18_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 18 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT19_ENABLE_Pos 19U /*!< PMU INTENSET: Event Counter 19 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT19_ENABLE_Msk (1UL << PMU_INTENSET_CNT19_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 19 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT20_ENABLE_Pos 20U /*!< PMU INTENSET: Event Counter 20 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT20_ENABLE_Msk (1UL << PMU_INTENSET_CNT20_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 20 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT21_ENABLE_Pos 21U /*!< PMU INTENSET: Event Counter 21 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT21_ENABLE_Msk (1UL << PMU_INTENSET_CNT21_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 21 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT22_ENABLE_Pos 22U /*!< PMU INTENSET: Event Counter 22 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT22_ENABLE_Msk (1UL << PMU_INTENSET_CNT22_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 22 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT23_ENABLE_Pos 23U /*!< PMU INTENSET: Event Counter 23 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT23_ENABLE_Msk (1UL << PMU_INTENSET_CNT23_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 23 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT24_ENABLE_Pos 24U /*!< PMU INTENSET: Event Counter 24 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT24_ENABLE_Msk (1UL << PMU_INTENSET_CNT24_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 24 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT25_ENABLE_Pos 25U /*!< PMU INTENSET: Event Counter 25 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT25_ENABLE_Msk (1UL << PMU_INTENSET_CNT25_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 25 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT26_ENABLE_Pos 26U /*!< PMU INTENSET: Event Counter 26 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT26_ENABLE_Msk (1UL << PMU_INTENSET_CNT26_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 26 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT27_ENABLE_Pos 27U /*!< PMU INTENSET: Event Counter 27 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT27_ENABLE_Msk (1UL << PMU_INTENSET_CNT27_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 27 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT28_ENABLE_Pos 28U /*!< PMU INTENSET: Event Counter 28 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT28_ENABLE_Msk (1UL << PMU_INTENSET_CNT28_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 28 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT29_ENABLE_Pos 29U /*!< PMU INTENSET: Event Counter 29 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT29_ENABLE_Msk (1UL << PMU_INTENSET_CNT29_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 29 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT30_ENABLE_Pos 30U /*!< PMU INTENSET: Event Counter 30 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT30_ENABLE_Msk (1UL << PMU_INTENSET_CNT30_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 30 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CYCCNT_ENABLE_Pos 31U /*!< PMU INTENSET: Cycle Counter Interrupt Enable Set Position */ +#define PMU_INTENSET_CCYCNT_ENABLE_Msk (1UL << PMU_INTENSET_CYCCNT_ENABLE_Pos) /*!< PMU INTENSET: Cycle Counter Interrupt Enable Set Mask */ + +/** \brief PMU Interrupt Enable Clear Register Definitions */ + +#define PMU_INTENSET_CNT0_ENABLE_Pos 0U /*!< PMU INTENCLR: Event Counter 0 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT0_ENABLE_Msk (1UL /*<< PMU_INTENCLR_CNT0_ENABLE_Pos*/) /*!< PMU INTENCLR: Event Counter 0 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT1_ENABLE_Pos 1U /*!< PMU INTENCLR: Event Counter 1 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT1_ENABLE_Msk (1UL << PMU_INTENCLR_CNT1_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 1 Interrupt Enable Clear */ + +#define PMU_INTENCLR_CNT2_ENABLE_Pos 2U /*!< PMU INTENCLR: Event Counter 2 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT2_ENABLE_Msk (1UL << PMU_INTENCLR_CNT2_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 2 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT3_ENABLE_Pos 3U /*!< PMU INTENCLR: Event Counter 3 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT3_ENABLE_Msk (1UL << PMU_INTENCLR_CNT3_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 3 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT4_ENABLE_Pos 4U /*!< PMU INTENCLR: Event Counter 4 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT4_ENABLE_Msk (1UL << PMU_INTENCLR_CNT4_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 4 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT5_ENABLE_Pos 5U /*!< PMU INTENCLR: Event Counter 5 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT5_ENABLE_Msk (1UL << PMU_INTENCLR_CNT5_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 5 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT6_ENABLE_Pos 6U /*!< PMU INTENCLR: Event Counter 6 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT6_ENABLE_Msk (1UL << PMU_INTENCLR_CNT6_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 6 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT7_ENABLE_Pos 7U /*!< PMU INTENCLR: Event Counter 7 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT7_ENABLE_Msk (1UL << PMU_INTENCLR_CNT7_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 7 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT8_ENABLE_Pos 8U /*!< PMU INTENCLR: Event Counter 8 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT8_ENABLE_Msk (1UL << PMU_INTENCLR_CNT8_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 8 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT9_ENABLE_Pos 9U /*!< PMU INTENCLR: Event Counter 9 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT9_ENABLE_Msk (1UL << PMU_INTENCLR_CNT9_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 9 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT10_ENABLE_Pos 10U /*!< PMU INTENCLR: Event Counter 10 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT10_ENABLE_Msk (1UL << PMU_INTENCLR_CNT10_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 10 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT11_ENABLE_Pos 11U /*!< PMU INTENCLR: Event Counter 11 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT11_ENABLE_Msk (1UL << PMU_INTENCLR_CNT11_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 11 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT12_ENABLE_Pos 12U /*!< PMU INTENCLR: Event Counter 12 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT12_ENABLE_Msk (1UL << PMU_INTENCLR_CNT12_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 12 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT13_ENABLE_Pos 13U /*!< PMU INTENCLR: Event Counter 13 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT13_ENABLE_Msk (1UL << PMU_INTENCLR_CNT13_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 13 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT14_ENABLE_Pos 14U /*!< PMU INTENCLR: Event Counter 14 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT14_ENABLE_Msk (1UL << PMU_INTENCLR_CNT14_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 14 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT15_ENABLE_Pos 15U /*!< PMU INTENCLR: Event Counter 15 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT15_ENABLE_Msk (1UL << PMU_INTENCLR_CNT15_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 15 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT16_ENABLE_Pos 16U /*!< PMU INTENCLR: Event Counter 16 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT16_ENABLE_Msk (1UL << PMU_INTENCLR_CNT16_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 16 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT17_ENABLE_Pos 17U /*!< PMU INTENCLR: Event Counter 17 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT17_ENABLE_Msk (1UL << PMU_INTENCLR_CNT17_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 17 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT18_ENABLE_Pos 18U /*!< PMU INTENCLR: Event Counter 18 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT18_ENABLE_Msk (1UL << PMU_INTENCLR_CNT18_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 18 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT19_ENABLE_Pos 19U /*!< PMU INTENCLR: Event Counter 19 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT19_ENABLE_Msk (1UL << PMU_INTENCLR_CNT19_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 19 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT20_ENABLE_Pos 20U /*!< PMU INTENCLR: Event Counter 20 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT20_ENABLE_Msk (1UL << PMU_INTENCLR_CNT20_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 20 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT21_ENABLE_Pos 21U /*!< PMU INTENCLR: Event Counter 21 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT21_ENABLE_Msk (1UL << PMU_INTENCLR_CNT21_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 21 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT22_ENABLE_Pos 22U /*!< PMU INTENCLR: Event Counter 22 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT22_ENABLE_Msk (1UL << PMU_INTENCLR_CNT22_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 22 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT23_ENABLE_Pos 23U /*!< PMU INTENCLR: Event Counter 23 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT23_ENABLE_Msk (1UL << PMU_INTENCLR_CNT23_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 23 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT24_ENABLE_Pos 24U /*!< PMU INTENCLR: Event Counter 24 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT24_ENABLE_Msk (1UL << PMU_INTENCLR_CNT24_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 24 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT25_ENABLE_Pos 25U /*!< PMU INTENCLR: Event Counter 25 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT25_ENABLE_Msk (1UL << PMU_INTENCLR_CNT25_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 25 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT26_ENABLE_Pos 26U /*!< PMU INTENCLR: Event Counter 26 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT26_ENABLE_Msk (1UL << PMU_INTENCLR_CNT26_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 26 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT27_ENABLE_Pos 27U /*!< PMU INTENCLR: Event Counter 27 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT27_ENABLE_Msk (1UL << PMU_INTENCLR_CNT27_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 27 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT28_ENABLE_Pos 28U /*!< PMU INTENCLR: Event Counter 28 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT28_ENABLE_Msk (1UL << PMU_INTENCLR_CNT28_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 28 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT29_ENABLE_Pos 29U /*!< PMU INTENCLR: Event Counter 29 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT29_ENABLE_Msk (1UL << PMU_INTENCLR_CNT29_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 29 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT30_ENABLE_Pos 30U /*!< PMU INTENCLR: Event Counter 30 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT30_ENABLE_Msk (1UL << PMU_INTENCLR_CNT30_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 30 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CYCCNT_ENABLE_Pos 31U /*!< PMU INTENCLR: Cycle Counter Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CYCCNT_ENABLE_Msk (1UL << PMU_INTENCLR_CYCCNT_ENABLE_Pos) /*!< PMU INTENCLR: Cycle Counter Interrupt Enable Clear Mask */ + +/** \brief PMU Overflow Flag Status Set Register Definitions */ + +#define PMU_OVSSET_CNT0_STATUS_Pos 0U /*!< PMU OVSSET: Event Counter 0 Overflow Set Position */ +#define PMU_OVSSET_CNT0_STATUS_Msk (1UL /*<< PMU_OVSSET_CNT0_STATUS_Pos*/) /*!< PMU OVSSET: Event Counter 0 Overflow Set Mask */ + +#define PMU_OVSSET_CNT1_STATUS_Pos 1U /*!< PMU OVSSET: Event Counter 1 Overflow Set Position */ +#define PMU_OVSSET_CNT1_STATUS_Msk (1UL << PMU_OVSSET_CNT1_STATUS_Pos) /*!< PMU OVSSET: Event Counter 1 Overflow Set Mask */ + +#define PMU_OVSSET_CNT2_STATUS_Pos 2U /*!< PMU OVSSET: Event Counter 2 Overflow Set Position */ +#define PMU_OVSSET_CNT2_STATUS_Msk (1UL << PMU_OVSSET_CNT2_STATUS_Pos) /*!< PMU OVSSET: Event Counter 2 Overflow Set Mask */ + +#define PMU_OVSSET_CNT3_STATUS_Pos 3U /*!< PMU OVSSET: Event Counter 3 Overflow Set Position */ +#define PMU_OVSSET_CNT3_STATUS_Msk (1UL << PMU_OVSSET_CNT3_STATUS_Pos) /*!< PMU OVSSET: Event Counter 3 Overflow Set Mask */ + +#define PMU_OVSSET_CNT4_STATUS_Pos 4U /*!< PMU OVSSET: Event Counter 4 Overflow Set Position */ +#define PMU_OVSSET_CNT4_STATUS_Msk (1UL << PMU_OVSSET_CNT4_STATUS_Pos) /*!< PMU OVSSET: Event Counter 4 Overflow Set Mask */ + +#define PMU_OVSSET_CNT5_STATUS_Pos 5U /*!< PMU OVSSET: Event Counter 5 Overflow Set Position */ +#define PMU_OVSSET_CNT5_STATUS_Msk (1UL << PMU_OVSSET_CNT5_STATUS_Pos) /*!< PMU OVSSET: Event Counter 5 Overflow Set Mask */ + +#define PMU_OVSSET_CNT6_STATUS_Pos 6U /*!< PMU OVSSET: Event Counter 6 Overflow Set Position */ +#define PMU_OVSSET_CNT6_STATUS_Msk (1UL << PMU_OVSSET_CNT6_STATUS_Pos) /*!< PMU OVSSET: Event Counter 6 Overflow Set Mask */ + +#define PMU_OVSSET_CNT7_STATUS_Pos 7U /*!< PMU OVSSET: Event Counter 7 Overflow Set Position */ +#define PMU_OVSSET_CNT7_STATUS_Msk (1UL << PMU_OVSSET_CNT7_STATUS_Pos) /*!< PMU OVSSET: Event Counter 7 Overflow Set Mask */ + +#define PMU_OVSSET_CNT8_STATUS_Pos 8U /*!< PMU OVSSET: Event Counter 8 Overflow Set Position */ +#define PMU_OVSSET_CNT8_STATUS_Msk (1UL << PMU_OVSSET_CNT8_STATUS_Pos) /*!< PMU OVSSET: Event Counter 8 Overflow Set Mask */ + +#define PMU_OVSSET_CNT9_STATUS_Pos 9U /*!< PMU OVSSET: Event Counter 9 Overflow Set Position */ +#define PMU_OVSSET_CNT9_STATUS_Msk (1UL << PMU_OVSSET_CNT9_STATUS_Pos) /*!< PMU OVSSET: Event Counter 9 Overflow Set Mask */ + +#define PMU_OVSSET_CNT10_STATUS_Pos 10U /*!< PMU OVSSET: Event Counter 10 Overflow Set Position */ +#define PMU_OVSSET_CNT10_STATUS_Msk (1UL << PMU_OVSSET_CNT10_STATUS_Pos) /*!< PMU OVSSET: Event Counter 10 Overflow Set Mask */ + +#define PMU_OVSSET_CNT11_STATUS_Pos 11U /*!< PMU OVSSET: Event Counter 11 Overflow Set Position */ +#define PMU_OVSSET_CNT11_STATUS_Msk (1UL << PMU_OVSSET_CNT11_STATUS_Pos) /*!< PMU OVSSET: Event Counter 11 Overflow Set Mask */ + +#define PMU_OVSSET_CNT12_STATUS_Pos 12U /*!< PMU OVSSET: Event Counter 12 Overflow Set Position */ +#define PMU_OVSSET_CNT12_STATUS_Msk (1UL << PMU_OVSSET_CNT12_STATUS_Pos) /*!< PMU OVSSET: Event Counter 12 Overflow Set Mask */ + +#define PMU_OVSSET_CNT13_STATUS_Pos 13U /*!< PMU OVSSET: Event Counter 13 Overflow Set Position */ +#define PMU_OVSSET_CNT13_STATUS_Msk (1UL << PMU_OVSSET_CNT13_STATUS_Pos) /*!< PMU OVSSET: Event Counter 13 Overflow Set Mask */ + +#define PMU_OVSSET_CNT14_STATUS_Pos 14U /*!< PMU OVSSET: Event Counter 14 Overflow Set Position */ +#define PMU_OVSSET_CNT14_STATUS_Msk (1UL << PMU_OVSSET_CNT14_STATUS_Pos) /*!< PMU OVSSET: Event Counter 14 Overflow Set Mask */ + +#define PMU_OVSSET_CNT15_STATUS_Pos 15U /*!< PMU OVSSET: Event Counter 15 Overflow Set Position */ +#define PMU_OVSSET_CNT15_STATUS_Msk (1UL << PMU_OVSSET_CNT15_STATUS_Pos) /*!< PMU OVSSET: Event Counter 15 Overflow Set Mask */ + +#define PMU_OVSSET_CNT16_STATUS_Pos 16U /*!< PMU OVSSET: Event Counter 16 Overflow Set Position */ +#define PMU_OVSSET_CNT16_STATUS_Msk (1UL << PMU_OVSSET_CNT16_STATUS_Pos) /*!< PMU OVSSET: Event Counter 16 Overflow Set Mask */ + +#define PMU_OVSSET_CNT17_STATUS_Pos 17U /*!< PMU OVSSET: Event Counter 17 Overflow Set Position */ +#define PMU_OVSSET_CNT17_STATUS_Msk (1UL << PMU_OVSSET_CNT17_STATUS_Pos) /*!< PMU OVSSET: Event Counter 17 Overflow Set Mask */ + +#define PMU_OVSSET_CNT18_STATUS_Pos 18U /*!< PMU OVSSET: Event Counter 18 Overflow Set Position */ +#define PMU_OVSSET_CNT18_STATUS_Msk (1UL << PMU_OVSSET_CNT18_STATUS_Pos) /*!< PMU OVSSET: Event Counter 18 Overflow Set Mask */ + +#define PMU_OVSSET_CNT19_STATUS_Pos 19U /*!< PMU OVSSET: Event Counter 19 Overflow Set Position */ +#define PMU_OVSSET_CNT19_STATUS_Msk (1UL << PMU_OVSSET_CNT19_STATUS_Pos) /*!< PMU OVSSET: Event Counter 19 Overflow Set Mask */ + +#define PMU_OVSSET_CNT20_STATUS_Pos 20U /*!< PMU OVSSET: Event Counter 20 Overflow Set Position */ +#define PMU_OVSSET_CNT20_STATUS_Msk (1UL << PMU_OVSSET_CNT20_STATUS_Pos) /*!< PMU OVSSET: Event Counter 20 Overflow Set Mask */ + +#define PMU_OVSSET_CNT21_STATUS_Pos 21U /*!< PMU OVSSET: Event Counter 21 Overflow Set Position */ +#define PMU_OVSSET_CNT21_STATUS_Msk (1UL << PMU_OVSSET_CNT21_STATUS_Pos) /*!< PMU OVSSET: Event Counter 21 Overflow Set Mask */ + +#define PMU_OVSSET_CNT22_STATUS_Pos 22U /*!< PMU OVSSET: Event Counter 22 Overflow Set Position */ +#define PMU_OVSSET_CNT22_STATUS_Msk (1UL << PMU_OVSSET_CNT22_STATUS_Pos) /*!< PMU OVSSET: Event Counter 22 Overflow Set Mask */ + +#define PMU_OVSSET_CNT23_STATUS_Pos 23U /*!< PMU OVSSET: Event Counter 23 Overflow Set Position */ +#define PMU_OVSSET_CNT23_STATUS_Msk (1UL << PMU_OVSSET_CNT23_STATUS_Pos) /*!< PMU OVSSET: Event Counter 23 Overflow Set Mask */ + +#define PMU_OVSSET_CNT24_STATUS_Pos 24U /*!< PMU OVSSET: Event Counter 24 Overflow Set Position */ +#define PMU_OVSSET_CNT24_STATUS_Msk (1UL << PMU_OVSSET_CNT24_STATUS_Pos) /*!< PMU OVSSET: Event Counter 24 Overflow Set Mask */ + +#define PMU_OVSSET_CNT25_STATUS_Pos 25U /*!< PMU OVSSET: Event Counter 25 Overflow Set Position */ +#define PMU_OVSSET_CNT25_STATUS_Msk (1UL << PMU_OVSSET_CNT25_STATUS_Pos) /*!< PMU OVSSET: Event Counter 25 Overflow Set Mask */ + +#define PMU_OVSSET_CNT26_STATUS_Pos 26U /*!< PMU OVSSET: Event Counter 26 Overflow Set Position */ +#define PMU_OVSSET_CNT26_STATUS_Msk (1UL << PMU_OVSSET_CNT26_STATUS_Pos) /*!< PMU OVSSET: Event Counter 26 Overflow Set Mask */ + +#define PMU_OVSSET_CNT27_STATUS_Pos 27U /*!< PMU OVSSET: Event Counter 27 Overflow Set Position */ +#define PMU_OVSSET_CNT27_STATUS_Msk (1UL << PMU_OVSSET_CNT27_STATUS_Pos) /*!< PMU OVSSET: Event Counter 27 Overflow Set Mask */ + +#define PMU_OVSSET_CNT28_STATUS_Pos 28U /*!< PMU OVSSET: Event Counter 28 Overflow Set Position */ +#define PMU_OVSSET_CNT28_STATUS_Msk (1UL << PMU_OVSSET_CNT28_STATUS_Pos) /*!< PMU OVSSET: Event Counter 28 Overflow Set Mask */ + +#define PMU_OVSSET_CNT29_STATUS_Pos 29U /*!< PMU OVSSET: Event Counter 29 Overflow Set Position */ +#define PMU_OVSSET_CNT29_STATUS_Msk (1UL << PMU_OVSSET_CNT29_STATUS_Pos) /*!< PMU OVSSET: Event Counter 29 Overflow Set Mask */ + +#define PMU_OVSSET_CNT30_STATUS_Pos 30U /*!< PMU OVSSET: Event Counter 30 Overflow Set Position */ +#define PMU_OVSSET_CNT30_STATUS_Msk (1UL << PMU_OVSSET_CNT30_STATUS_Pos) /*!< PMU OVSSET: Event Counter 30 Overflow Set Mask */ + +#define PMU_OVSSET_CYCCNT_STATUS_Pos 31U /*!< PMU OVSSET: Cycle Counter Overflow Set Position */ +#define PMU_OVSSET_CYCCNT_STATUS_Msk (1UL << PMU_OVSSET_CYCCNT_STATUS_Pos) /*!< PMU OVSSET: Cycle Counter Overflow Set Mask */ + +/** \brief PMU Overflow Flag Status Clear Register Definitions */ + +#define PMU_OVSCLR_CNT0_STATUS_Pos 0U /*!< PMU OVSCLR: Event Counter 0 Overflow Clear Position */ +#define PMU_OVSCLR_CNT0_STATUS_Msk (1UL /*<< PMU_OVSCLR_CNT0_STATUS_Pos*/) /*!< PMU OVSCLR: Event Counter 0 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT1_STATUS_Pos 1U /*!< PMU OVSCLR: Event Counter 1 Overflow Clear Position */ +#define PMU_OVSCLR_CNT1_STATUS_Msk (1UL << PMU_OVSCLR_CNT1_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 1 Overflow Clear */ + +#define PMU_OVSCLR_CNT2_STATUS_Pos 2U /*!< PMU OVSCLR: Event Counter 2 Overflow Clear Position */ +#define PMU_OVSCLR_CNT2_STATUS_Msk (1UL << PMU_OVSCLR_CNT2_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 2 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT3_STATUS_Pos 3U /*!< PMU OVSCLR: Event Counter 3 Overflow Clear Position */ +#define PMU_OVSCLR_CNT3_STATUS_Msk (1UL << PMU_OVSCLR_CNT3_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 3 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT4_STATUS_Pos 4U /*!< PMU OVSCLR: Event Counter 4 Overflow Clear Position */ +#define PMU_OVSCLR_CNT4_STATUS_Msk (1UL << PMU_OVSCLR_CNT4_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 4 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT5_STATUS_Pos 5U /*!< PMU OVSCLR: Event Counter 5 Overflow Clear Position */ +#define PMU_OVSCLR_CNT5_STATUS_Msk (1UL << PMU_OVSCLR_CNT5_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 5 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT6_STATUS_Pos 6U /*!< PMU OVSCLR: Event Counter 6 Overflow Clear Position */ +#define PMU_OVSCLR_CNT6_STATUS_Msk (1UL << PMU_OVSCLR_CNT6_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 6 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT7_STATUS_Pos 7U /*!< PMU OVSCLR: Event Counter 7 Overflow Clear Position */ +#define PMU_OVSCLR_CNT7_STATUS_Msk (1UL << PMU_OVSCLR_CNT7_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 7 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT8_STATUS_Pos 8U /*!< PMU OVSCLR: Event Counter 8 Overflow Clear Position */ +#define PMU_OVSCLR_CNT8_STATUS_Msk (1UL << PMU_OVSCLR_CNT8_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 8 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT9_STATUS_Pos 9U /*!< PMU OVSCLR: Event Counter 9 Overflow Clear Position */ +#define PMU_OVSCLR_CNT9_STATUS_Msk (1UL << PMU_OVSCLR_CNT9_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 9 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT10_STATUS_Pos 10U /*!< PMU OVSCLR: Event Counter 10 Overflow Clear Position */ +#define PMU_OVSCLR_CNT10_STATUS_Msk (1UL << PMU_OVSCLR_CNT10_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 10 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT11_STATUS_Pos 11U /*!< PMU OVSCLR: Event Counter 11 Overflow Clear Position */ +#define PMU_OVSCLR_CNT11_STATUS_Msk (1UL << PMU_OVSCLR_CNT11_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 11 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT12_STATUS_Pos 12U /*!< PMU OVSCLR: Event Counter 12 Overflow Clear Position */ +#define PMU_OVSCLR_CNT12_STATUS_Msk (1UL << PMU_OVSCLR_CNT12_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 12 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT13_STATUS_Pos 13U /*!< PMU OVSCLR: Event Counter 13 Overflow Clear Position */ +#define PMU_OVSCLR_CNT13_STATUS_Msk (1UL << PMU_OVSCLR_CNT13_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 13 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT14_STATUS_Pos 14U /*!< PMU OVSCLR: Event Counter 14 Overflow Clear Position */ +#define PMU_OVSCLR_CNT14_STATUS_Msk (1UL << PMU_OVSCLR_CNT14_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 14 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT15_STATUS_Pos 15U /*!< PMU OVSCLR: Event Counter 15 Overflow Clear Position */ +#define PMU_OVSCLR_CNT15_STATUS_Msk (1UL << PMU_OVSCLR_CNT15_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 15 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT16_STATUS_Pos 16U /*!< PMU OVSCLR: Event Counter 16 Overflow Clear Position */ +#define PMU_OVSCLR_CNT16_STATUS_Msk (1UL << PMU_OVSCLR_CNT16_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 16 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT17_STATUS_Pos 17U /*!< PMU OVSCLR: Event Counter 17 Overflow Clear Position */ +#define PMU_OVSCLR_CNT17_STATUS_Msk (1UL << PMU_OVSCLR_CNT17_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 17 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT18_STATUS_Pos 18U /*!< PMU OVSCLR: Event Counter 18 Overflow Clear Position */ +#define PMU_OVSCLR_CNT18_STATUS_Msk (1UL << PMU_OVSCLR_CNT18_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 18 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT19_STATUS_Pos 19U /*!< PMU OVSCLR: Event Counter 19 Overflow Clear Position */ +#define PMU_OVSCLR_CNT19_STATUS_Msk (1UL << PMU_OVSCLR_CNT19_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 19 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT20_STATUS_Pos 20U /*!< PMU OVSCLR: Event Counter 20 Overflow Clear Position */ +#define PMU_OVSCLR_CNT20_STATUS_Msk (1UL << PMU_OVSCLR_CNT20_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 20 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT21_STATUS_Pos 21U /*!< PMU OVSCLR: Event Counter 21 Overflow Clear Position */ +#define PMU_OVSCLR_CNT21_STATUS_Msk (1UL << PMU_OVSCLR_CNT21_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 21 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT22_STATUS_Pos 22U /*!< PMU OVSCLR: Event Counter 22 Overflow Clear Position */ +#define PMU_OVSCLR_CNT22_STATUS_Msk (1UL << PMU_OVSCLR_CNT22_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 22 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT23_STATUS_Pos 23U /*!< PMU OVSCLR: Event Counter 23 Overflow Clear Position */ +#define PMU_OVSCLR_CNT23_STATUS_Msk (1UL << PMU_OVSCLR_CNT23_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 23 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT24_STATUS_Pos 24U /*!< PMU OVSCLR: Event Counter 24 Overflow Clear Position */ +#define PMU_OVSCLR_CNT24_STATUS_Msk (1UL << PMU_OVSCLR_CNT24_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 24 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT25_STATUS_Pos 25U /*!< PMU OVSCLR: Event Counter 25 Overflow Clear Position */ +#define PMU_OVSCLR_CNT25_STATUS_Msk (1UL << PMU_OVSCLR_CNT25_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 25 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT26_STATUS_Pos 26U /*!< PMU OVSCLR: Event Counter 26 Overflow Clear Position */ +#define PMU_OVSCLR_CNT26_STATUS_Msk (1UL << PMU_OVSCLR_CNT26_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 26 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT27_STATUS_Pos 27U /*!< PMU OVSCLR: Event Counter 27 Overflow Clear Position */ +#define PMU_OVSCLR_CNT27_STATUS_Msk (1UL << PMU_OVSCLR_CNT27_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 27 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT28_STATUS_Pos 28U /*!< PMU OVSCLR: Event Counter 28 Overflow Clear Position */ +#define PMU_OVSCLR_CNT28_STATUS_Msk (1UL << PMU_OVSCLR_CNT28_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 28 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT29_STATUS_Pos 29U /*!< PMU OVSCLR: Event Counter 29 Overflow Clear Position */ +#define PMU_OVSCLR_CNT29_STATUS_Msk (1UL << PMU_OVSCLR_CNT29_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 29 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT30_STATUS_Pos 30U /*!< PMU OVSCLR: Event Counter 30 Overflow Clear Position */ +#define PMU_OVSCLR_CNT30_STATUS_Msk (1UL << PMU_OVSCLR_CNT30_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 30 Overflow Clear Mask */ + +#define PMU_OVSCLR_CYCCNT_STATUS_Pos 31U /*!< PMU OVSCLR: Cycle Counter Overflow Clear Position */ +#define PMU_OVSCLR_CYCCNT_STATUS_Msk (1UL << PMU_OVSCLR_CYCCNT_STATUS_Pos) /*!< PMU OVSCLR: Cycle Counter Overflow Clear Mask */ + +/** \brief PMU Software Increment Counter */ + +#define PMU_SWINC_CNT0_Pos 0U /*!< PMU SWINC: Event Counter 0 Software Increment Position */ +#define PMU_SWINC_CNT0_Msk (1UL /*<< PMU_SWINC_CNT0_Pos */) /*!< PMU SWINC: Event Counter 0 Software Increment Mask */ + +#define PMU_SWINC_CNT1_Pos 1U /*!< PMU SWINC: Event Counter 1 Software Increment Position */ +#define PMU_SWINC_CNT1_Msk (1UL << PMU_SWINC_CNT1_Pos) /*!< PMU SWINC: Event Counter 1 Software Increment Mask */ + +#define PMU_SWINC_CNT2_Pos 2U /*!< PMU SWINC: Event Counter 2 Software Increment Position */ +#define PMU_SWINC_CNT2_Msk (1UL << PMU_SWINC_CNT2_Pos) /*!< PMU SWINC: Event Counter 2 Software Increment Mask */ + +#define PMU_SWINC_CNT3_Pos 3U /*!< PMU SWINC: Event Counter 3 Software Increment Position */ +#define PMU_SWINC_CNT3_Msk (1UL << PMU_SWINC_CNT3_Pos) /*!< PMU SWINC: Event Counter 3 Software Increment Mask */ + +#define PMU_SWINC_CNT4_Pos 4U /*!< PMU SWINC: Event Counter 4 Software Increment Position */ +#define PMU_SWINC_CNT4_Msk (1UL << PMU_SWINC_CNT4_Pos) /*!< PMU SWINC: Event Counter 4 Software Increment Mask */ + +#define PMU_SWINC_CNT5_Pos 5U /*!< PMU SWINC: Event Counter 5 Software Increment Position */ +#define PMU_SWINC_CNT5_Msk (1UL << PMU_SWINC_CNT5_Pos) /*!< PMU SWINC: Event Counter 5 Software Increment Mask */ + +#define PMU_SWINC_CNT6_Pos 6U /*!< PMU SWINC: Event Counter 6 Software Increment Position */ +#define PMU_SWINC_CNT6_Msk (1UL << PMU_SWINC_CNT6_Pos) /*!< PMU SWINC: Event Counter 6 Software Increment Mask */ + +#define PMU_SWINC_CNT7_Pos 7U /*!< PMU SWINC: Event Counter 7 Software Increment Position */ +#define PMU_SWINC_CNT7_Msk (1UL << PMU_SWINC_CNT7_Pos) /*!< PMU SWINC: Event Counter 7 Software Increment Mask */ + +#define PMU_SWINC_CNT8_Pos 8U /*!< PMU SWINC: Event Counter 8 Software Increment Position */ +#define PMU_SWINC_CNT8_Msk (1UL << PMU_SWINC_CNT8_Pos) /*!< PMU SWINC: Event Counter 8 Software Increment Mask */ + +#define PMU_SWINC_CNT9_Pos 9U /*!< PMU SWINC: Event Counter 9 Software Increment Position */ +#define PMU_SWINC_CNT9_Msk (1UL << PMU_SWINC_CNT9_Pos) /*!< PMU SWINC: Event Counter 9 Software Increment Mask */ + +#define PMU_SWINC_CNT10_Pos 10U /*!< PMU SWINC: Event Counter 10 Software Increment Position */ +#define PMU_SWINC_CNT10_Msk (1UL << PMU_SWINC_CNT10_Pos) /*!< PMU SWINC: Event Counter 10 Software Increment Mask */ + +#define PMU_SWINC_CNT11_Pos 11U /*!< PMU SWINC: Event Counter 11 Software Increment Position */ +#define PMU_SWINC_CNT11_Msk (1UL << PMU_SWINC_CNT11_Pos) /*!< PMU SWINC: Event Counter 11 Software Increment Mask */ + +#define PMU_SWINC_CNT12_Pos 12U /*!< PMU SWINC: Event Counter 12 Software Increment Position */ +#define PMU_SWINC_CNT12_Msk (1UL << PMU_SWINC_CNT12_Pos) /*!< PMU SWINC: Event Counter 12 Software Increment Mask */ + +#define PMU_SWINC_CNT13_Pos 13U /*!< PMU SWINC: Event Counter 13 Software Increment Position */ +#define PMU_SWINC_CNT13_Msk (1UL << PMU_SWINC_CNT13_Pos) /*!< PMU SWINC: Event Counter 13 Software Increment Mask */ + +#define PMU_SWINC_CNT14_Pos 14U /*!< PMU SWINC: Event Counter 14 Software Increment Position */ +#define PMU_SWINC_CNT14_Msk (1UL << PMU_SWINC_CNT14_Pos) /*!< PMU SWINC: Event Counter 14 Software Increment Mask */ + +#define PMU_SWINC_CNT15_Pos 15U /*!< PMU SWINC: Event Counter 15 Software Increment Position */ +#define PMU_SWINC_CNT15_Msk (1UL << PMU_SWINC_CNT15_Pos) /*!< PMU SWINC: Event Counter 15 Software Increment Mask */ + +#define PMU_SWINC_CNT16_Pos 16U /*!< PMU SWINC: Event Counter 16 Software Increment Position */ +#define PMU_SWINC_CNT16_Msk (1UL << PMU_SWINC_CNT16_Pos) /*!< PMU SWINC: Event Counter 16 Software Increment Mask */ + +#define PMU_SWINC_CNT17_Pos 17U /*!< PMU SWINC: Event Counter 17 Software Increment Position */ +#define PMU_SWINC_CNT17_Msk (1UL << PMU_SWINC_CNT17_Pos) /*!< PMU SWINC: Event Counter 17 Software Increment Mask */ + +#define PMU_SWINC_CNT18_Pos 18U /*!< PMU SWINC: Event Counter 18 Software Increment Position */ +#define PMU_SWINC_CNT18_Msk (1UL << PMU_SWINC_CNT18_Pos) /*!< PMU SWINC: Event Counter 18 Software Increment Mask */ + +#define PMU_SWINC_CNT19_Pos 19U /*!< PMU SWINC: Event Counter 19 Software Increment Position */ +#define PMU_SWINC_CNT19_Msk (1UL << PMU_SWINC_CNT19_Pos) /*!< PMU SWINC: Event Counter 19 Software Increment Mask */ + +#define PMU_SWINC_CNT20_Pos 20U /*!< PMU SWINC: Event Counter 20 Software Increment Position */ +#define PMU_SWINC_CNT20_Msk (1UL << PMU_SWINC_CNT20_Pos) /*!< PMU SWINC: Event Counter 20 Software Increment Mask */ + +#define PMU_SWINC_CNT21_Pos 21U /*!< PMU SWINC: Event Counter 21 Software Increment Position */ +#define PMU_SWINC_CNT21_Msk (1UL << PMU_SWINC_CNT21_Pos) /*!< PMU SWINC: Event Counter 21 Software Increment Mask */ + +#define PMU_SWINC_CNT22_Pos 22U /*!< PMU SWINC: Event Counter 22 Software Increment Position */ +#define PMU_SWINC_CNT22_Msk (1UL << PMU_SWINC_CNT22_Pos) /*!< PMU SWINC: Event Counter 22 Software Increment Mask */ + +#define PMU_SWINC_CNT23_Pos 23U /*!< PMU SWINC: Event Counter 23 Software Increment Position */ +#define PMU_SWINC_CNT23_Msk (1UL << PMU_SWINC_CNT23_Pos) /*!< PMU SWINC: Event Counter 23 Software Increment Mask */ + +#define PMU_SWINC_CNT24_Pos 24U /*!< PMU SWINC: Event Counter 24 Software Increment Position */ +#define PMU_SWINC_CNT24_Msk (1UL << PMU_SWINC_CNT24_Pos) /*!< PMU SWINC: Event Counter 24 Software Increment Mask */ + +#define PMU_SWINC_CNT25_Pos 25U /*!< PMU SWINC: Event Counter 25 Software Increment Position */ +#define PMU_SWINC_CNT25_Msk (1UL << PMU_SWINC_CNT25_Pos) /*!< PMU SWINC: Event Counter 25 Software Increment Mask */ + +#define PMU_SWINC_CNT26_Pos 26U /*!< PMU SWINC: Event Counter 26 Software Increment Position */ +#define PMU_SWINC_CNT26_Msk (1UL << PMU_SWINC_CNT26_Pos) /*!< PMU SWINC: Event Counter 26 Software Increment Mask */ + +#define PMU_SWINC_CNT27_Pos 27U /*!< PMU SWINC: Event Counter 27 Software Increment Position */ +#define PMU_SWINC_CNT27_Msk (1UL << PMU_SWINC_CNT27_Pos) /*!< PMU SWINC: Event Counter 27 Software Increment Mask */ + +#define PMU_SWINC_CNT28_Pos 28U /*!< PMU SWINC: Event Counter 28 Software Increment Position */ +#define PMU_SWINC_CNT28_Msk (1UL << PMU_SWINC_CNT28_Pos) /*!< PMU SWINC: Event Counter 28 Software Increment Mask */ + +#define PMU_SWINC_CNT29_Pos 29U /*!< PMU SWINC: Event Counter 29 Software Increment Position */ +#define PMU_SWINC_CNT29_Msk (1UL << PMU_SWINC_CNT29_Pos) /*!< PMU SWINC: Event Counter 29 Software Increment Mask */ + +#define PMU_SWINC_CNT30_Pos 30U /*!< PMU SWINC: Event Counter 30 Software Increment Position */ +#define PMU_SWINC_CNT30_Msk (1UL << PMU_SWINC_CNT30_Pos) /*!< PMU SWINC: Event Counter 30 Software Increment Mask */ + +/** \brief PMU Control Register Definitions */ + +#define PMU_CTRL_ENABLE_Pos 0U /*!< PMU CTRL: ENABLE Position */ +#define PMU_CTRL_ENABLE_Msk (1UL /*<< PMU_CTRL_ENABLE_Pos*/) /*!< PMU CTRL: ENABLE Mask */ + +#define PMU_CTRL_EVENTCNT_RESET_Pos 1U /*!< PMU CTRL: Event Counter Reset Position */ +#define PMU_CTRL_EVENTCNT_RESET_Msk (1UL << PMU_CTRL_EVENTCNT_RESET_Pos) /*!< PMU CTRL: Event Counter Reset Mask */ + +#define PMU_CTRL_CYCCNT_RESET_Pos 2U /*!< PMU CTRL: Cycle Counter Reset Position */ +#define PMU_CTRL_CYCCNT_RESET_Msk (1UL << PMU_CTRL_CYCCNT_RESET_Pos) /*!< PMU CTRL: Cycle Counter Reset Mask */ + +#define PMU_CTRL_CYCCNT_DISABLE_Pos 5U /*!< PMU CTRL: Disable Cycle Counter Position */ +#define PMU_CTRL_CYCCNT_DISABLE_Msk (1UL << PMU_CTRL_CYCCNT_DISABLE_Pos) /*!< PMU CTRL: Disable Cycle Counter Mask */ + +#define PMU_CTRL_FRZ_ON_OV_Pos 9U /*!< PMU CTRL: Freeze-on-overflow Position */ +#define PMU_CTRL_FRZ_ON_OV_Msk (1UL << PMU_CTRL_FRZ_ON_OVERFLOW_Pos) /*!< PMU CTRL: Freeze-on-overflow Mask */ + +#define PMU_CTRL_TRACE_ON_OV_Pos 11U /*!< PMU CTRL: Trace-on-overflow Position */ +#define PMU_CTRL_TRACE_ON_OV_Msk (1UL << PMU_CTRL_TRACE_ON_OVERFLOW_Pos) /*!< PMU CTRL: Trace-on-overflow Mask */ + +/** \brief PMU Type Register Definitions */ + +#define PMU_TYPE_NUM_CNTS_Pos 0U /*!< PMU TYPE: Number of Counters Position */ +#define PMU_TYPE_NUM_CNTS_Msk (0xFFUL /*<< PMU_TYPE_NUM_CNTS_Pos*/) /*!< PMU TYPE: Number of Counters Mask */ + +#define PMU_TYPE_SIZE_CNTS_Pos 8U /*!< PMU TYPE: Size of Counters Position */ +#define PMU_TYPE_SIZE_CNTS_Msk (0x3FUL << PMU_TYPE_SIZE_CNTS_Pos) /*!< PMU TYPE: Size of Counters Mask */ + +#define PMU_TYPE_CYCCNT_PRESENT_Pos 14U /*!< PMU TYPE: Cycle Counter Present Position */ +#define PMU_TYPE_CYCCNT_PRESENT_Msk (1UL << PMU_TYPE_CYCCNT_PRESENT_Pos) /*!< PMU TYPE: Cycle Counter Present Mask */ + +#define PMU_TYPE_FRZ_OV_SUPPORT_Pos 21U /*!< PMU TYPE: Freeze-on-overflow Support Position */ +#define PMU_TYPE_FRZ_OV_SUPPORT_Msk (1UL << PMU_TYPE_FRZ_OV_SUPPORT_Pos) /*!< PMU TYPE: Freeze-on-overflow Support Mask */ + +#define PMU_TYPE_TRACE_ON_OV_SUPPORT_Pos 23U /*!< PMU TYPE: Trace-on-overflow Support Position */ +#define PMU_TYPE_TRACE_ON_OV_SUPPORT_Msk (1UL << PMU_TYPE_FRZ_OV_SUPPORT_Pos) /*!< PMU TYPE: Trace-on-overflow Support Mask */ + +/** \brief PMU Authentication Status Register Definitions */ + +#define PMU_AUTHSTATUS_NSID_Pos 0U /*!< PMU AUTHSTATUS: Non-secure Invasive Debug Position */ +#define PMU_AUTHSTATUS_NSID_Msk (0x3UL /*<< PMU_AUTHSTATUS_NSID_Pos*/) /*!< PMU AUTHSTATUS: Non-secure Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_NSNID_Pos 2U /*!< PMU AUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_NSNID_Msk (0x3UL << PMU_AUTHSTATUS_NSNID_Pos) /*!< PMU AUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SID_Pos 4U /*!< PMU AUTHSTATUS: Secure Invasive Debug Position */ +#define PMU_AUTHSTATUS_SID_Msk (0x3UL << PMU_AUTHSTATUS_SID_Pos) /*!< PMU AUTHSTATUS: Secure Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SNID_Pos 6U /*!< PMU AUTHSTATUS: Secure Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_SNID_Msk (0x3UL << PMU_AUTHSTATUS_SNID_Pos) /*!< PMU AUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define PMU_AUTHSTATUS_NSUID_Pos 16U /*!< PMU AUTHSTATUS: Non-secure Unprivileged Invasive Debug Position */ +#define PMU_AUTHSTATUS_NSUID_Msk (0x3UL << PMU_AUTHSTATUS_NSUID_Pos) /*!< PMU AUTHSTATUS: Non-secure Unprivileged Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_NSUNID_Pos 18U /*!< PMU AUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_NSUNID_Msk (0x3UL << PMU_AUTHSTATUS_NSUNID_Pos) /*!< PMU AUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SUID_Pos 20U /*!< PMU AUTHSTATUS: Secure Unprivileged Invasive Debug Position */ +#define PMU_AUTHSTATUS_SUID_Msk (0x3UL << PMU_AUTHSTATUS_SUID_Pos) /*!< PMU AUTHSTATUS: Secure Unprivileged Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SUNID_Pos 22U /*!< PMU AUTHSTATUS: Secure Unprivileged Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_SUNID_Msk (0x3UL << PMU_AUTHSTATUS_SUNID_Pos) /*!< PMU AUTHSTATUS: Secure Unprivileged Non-invasive Debug Mask */ + + +/*@} end of group CMSIS_PMU */ +#endif + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) MPU Region Limit Address Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Region Base Address Register Alias 1 */ + __IOM uint32_t RLAR_A1; /*!< Offset: 0x018 (R/W) MPU Region Limit Address Register Alias 1 */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Region Base Address Register Alias 2 */ + __IOM uint32_t RLAR_A2; /*!< Offset: 0x020 (R/W) MPU Region Limit Address Register Alias 2 */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Region Base Address Register Alias 3 */ + __IOM uint32_t RLAR_A3; /*!< Offset: 0x028 (R/W) MPU Region Limit Address Register Alias 3 */ + uint32_t RESERVED0[1]; + union { + __IOM uint32_t MAIR[2]; + struct { + __IOM uint32_t MAIR0; /*!< Offset: 0x030 (R/W) MPU Memory Attribute Indirection Register 0 */ + __IOM uint32_t MAIR1; /*!< Offset: 0x034 (R/W) MPU Memory Attribute Indirection Register 1 */ + }; + }; +} MPU_Type; + +#define MPU_TYPE_RALIASES 4U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_BASE_Pos 5U /*!< MPU RBAR: BASE Position */ +#define MPU_RBAR_BASE_Msk (0x7FFFFFFUL << MPU_RBAR_BASE_Pos) /*!< MPU RBAR: BASE Mask */ + +#define MPU_RBAR_SH_Pos 3U /*!< MPU RBAR: SH Position */ +#define MPU_RBAR_SH_Msk (0x3UL << MPU_RBAR_SH_Pos) /*!< MPU RBAR: SH Mask */ + +#define MPU_RBAR_AP_Pos 1U /*!< MPU RBAR: AP Position */ +#define MPU_RBAR_AP_Msk (0x3UL << MPU_RBAR_AP_Pos) /*!< MPU RBAR: AP Mask */ + +#define MPU_RBAR_XN_Pos 0U /*!< MPU RBAR: XN Position */ +#define MPU_RBAR_XN_Msk (01UL /*<< MPU_RBAR_XN_Pos*/) /*!< MPU RBAR: XN Mask */ + +/* MPU Region Limit Address Register Definitions */ +#define MPU_RLAR_LIMIT_Pos 5U /*!< MPU RLAR: LIMIT Position */ +#define MPU_RLAR_LIMIT_Msk (0x7FFFFFFUL << MPU_RLAR_LIMIT_Pos) /*!< MPU RLAR: LIMIT Mask */ + +#define MPU_RLAR_PXN_Pos 4U /*!< MPU RLAR: PXN Position */ +#define MPU_RLAR_PXN_Msk (1UL << MPU_RLAR_PXN_Pos) /*!< MPU RLAR: PXN Mask */ + +#define MPU_RLAR_AttrIndx_Pos 1U /*!< MPU RLAR: AttrIndx Position */ +#define MPU_RLAR_AttrIndx_Msk (7UL << MPU_RLAR_AttrIndx_Pos) /*!< MPU RLAR: AttrIndx Mask */ + +#define MPU_RLAR_EN_Pos 0U /*!< MPU RLAR: Region enable bit Position */ +#define MPU_RLAR_EN_Msk (1UL /*<< MPU_RLAR_EN_Pos*/) /*!< MPU RLAR: Region enable bit Disable Mask */ + +/* MPU Memory Attribute Indirection Register 0 Definitions */ +#define MPU_MAIR0_Attr3_Pos 24U /*!< MPU MAIR0: Attr3 Position */ +#define MPU_MAIR0_Attr3_Msk (0xFFUL << MPU_MAIR0_Attr3_Pos) /*!< MPU MAIR0: Attr3 Mask */ + +#define MPU_MAIR0_Attr2_Pos 16U /*!< MPU MAIR0: Attr2 Position */ +#define MPU_MAIR0_Attr2_Msk (0xFFUL << MPU_MAIR0_Attr2_Pos) /*!< MPU MAIR0: Attr2 Mask */ + +#define MPU_MAIR0_Attr1_Pos 8U /*!< MPU MAIR0: Attr1 Position */ +#define MPU_MAIR0_Attr1_Msk (0xFFUL << MPU_MAIR0_Attr1_Pos) /*!< MPU MAIR0: Attr1 Mask */ + +#define MPU_MAIR0_Attr0_Pos 0U /*!< MPU MAIR0: Attr0 Position */ +#define MPU_MAIR0_Attr0_Msk (0xFFUL /*<< MPU_MAIR0_Attr0_Pos*/) /*!< MPU MAIR0: Attr0 Mask */ + +/* MPU Memory Attribute Indirection Register 1 Definitions */ +#define MPU_MAIR1_Attr7_Pos 24U /*!< MPU MAIR1: Attr7 Position */ +#define MPU_MAIR1_Attr7_Msk (0xFFUL << MPU_MAIR1_Attr7_Pos) /*!< MPU MAIR1: Attr7 Mask */ + +#define MPU_MAIR1_Attr6_Pos 16U /*!< MPU MAIR1: Attr6 Position */ +#define MPU_MAIR1_Attr6_Msk (0xFFUL << MPU_MAIR1_Attr6_Pos) /*!< MPU MAIR1: Attr6 Mask */ + +#define MPU_MAIR1_Attr5_Pos 8U /*!< MPU MAIR1: Attr5 Position */ +#define MPU_MAIR1_Attr5_Msk (0xFFUL << MPU_MAIR1_Attr5_Pos) /*!< MPU MAIR1: Attr5 Mask */ + +#define MPU_MAIR1_Attr4_Pos 0U /*!< MPU MAIR1: Attr4 Position */ +#define MPU_MAIR1_Attr4_Msk (0xFFUL /*<< MPU_MAIR1_Attr4_Pos*/) /*!< MPU MAIR1: Attr4 Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SAU Security Attribution Unit (SAU) + \brief Type definitions for the Security Attribution Unit (SAU) + @{ + */ + +/** + \brief Structure type to access the Security Attribution Unit (SAU). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SAU Control Register */ + __IM uint32_t TYPE; /*!< Offset: 0x004 (R/ ) SAU Type Register */ +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) SAU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) SAU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) SAU Region Limit Address Register */ +#else + uint32_t RESERVED0[3]; +#endif + __IOM uint32_t SFSR; /*!< Offset: 0x014 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x018 (R/W) Secure Fault Address Register */ +} SAU_Type; + +/* SAU Control Register Definitions */ +#define SAU_CTRL_ALLNS_Pos 1U /*!< SAU CTRL: ALLNS Position */ +#define SAU_CTRL_ALLNS_Msk (1UL << SAU_CTRL_ALLNS_Pos) /*!< SAU CTRL: ALLNS Mask */ + +#define SAU_CTRL_ENABLE_Pos 0U /*!< SAU CTRL: ENABLE Position */ +#define SAU_CTRL_ENABLE_Msk (1UL /*<< SAU_CTRL_ENABLE_Pos*/) /*!< SAU CTRL: ENABLE Mask */ + +/* SAU Type Register Definitions */ +#define SAU_TYPE_SREGION_Pos 0U /*!< SAU TYPE: SREGION Position */ +#define SAU_TYPE_SREGION_Msk (0xFFUL /*<< SAU_TYPE_SREGION_Pos*/) /*!< SAU TYPE: SREGION Mask */ + +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) +/* SAU Region Number Register Definitions */ +#define SAU_RNR_REGION_Pos 0U /*!< SAU RNR: REGION Position */ +#define SAU_RNR_REGION_Msk (0xFFUL /*<< SAU_RNR_REGION_Pos*/) /*!< SAU RNR: REGION Mask */ + +/* SAU Region Base Address Register Definitions */ +#define SAU_RBAR_BADDR_Pos 5U /*!< SAU RBAR: BADDR Position */ +#define SAU_RBAR_BADDR_Msk (0x7FFFFFFUL << SAU_RBAR_BADDR_Pos) /*!< SAU RBAR: BADDR Mask */ + +/* SAU Region Limit Address Register Definitions */ +#define SAU_RLAR_LADDR_Pos 5U /*!< SAU RLAR: LADDR Position */ +#define SAU_RLAR_LADDR_Msk (0x7FFFFFFUL << SAU_RLAR_LADDR_Pos) /*!< SAU RLAR: LADDR Mask */ + +#define SAU_RLAR_NSC_Pos 1U /*!< SAU RLAR: NSC Position */ +#define SAU_RLAR_NSC_Msk (1UL << SAU_RLAR_NSC_Pos) /*!< SAU RLAR: NSC Mask */ + +#define SAU_RLAR_ENABLE_Pos 0U /*!< SAU RLAR: ENABLE Position */ +#define SAU_RLAR_ENABLE_Msk (1UL /*<< SAU_RLAR_ENABLE_Pos*/) /*!< SAU RLAR: ENABLE Mask */ + +#endif /* defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) */ + +/* Secure Fault Status Register Definitions */ +#define SAU_SFSR_LSERR_Pos 7U /*!< SAU SFSR: LSERR Position */ +#define SAU_SFSR_LSERR_Msk (1UL << SAU_SFSR_LSERR_Pos) /*!< SAU SFSR: LSERR Mask */ + +#define SAU_SFSR_SFARVALID_Pos 6U /*!< SAU SFSR: SFARVALID Position */ +#define SAU_SFSR_SFARVALID_Msk (1UL << SAU_SFSR_SFARVALID_Pos) /*!< SAU SFSR: SFARVALID Mask */ + +#define SAU_SFSR_LSPERR_Pos 5U /*!< SAU SFSR: LSPERR Position */ +#define SAU_SFSR_LSPERR_Msk (1UL << SAU_SFSR_LSPERR_Pos) /*!< SAU SFSR: LSPERR Mask */ + +#define SAU_SFSR_INVTRAN_Pos 4U /*!< SAU SFSR: INVTRAN Position */ +#define SAU_SFSR_INVTRAN_Msk (1UL << SAU_SFSR_INVTRAN_Pos) /*!< SAU SFSR: INVTRAN Mask */ + +#define SAU_SFSR_AUVIOL_Pos 3U /*!< SAU SFSR: AUVIOL Position */ +#define SAU_SFSR_AUVIOL_Msk (1UL << SAU_SFSR_AUVIOL_Pos) /*!< SAU SFSR: AUVIOL Mask */ + +#define SAU_SFSR_INVER_Pos 2U /*!< SAU SFSR: INVER Position */ +#define SAU_SFSR_INVER_Msk (1UL << SAU_SFSR_INVER_Pos) /*!< SAU SFSR: INVER Mask */ + +#define SAU_SFSR_INVIS_Pos 1U /*!< SAU SFSR: INVIS Position */ +#define SAU_SFSR_INVIS_Msk (1UL << SAU_SFSR_INVIS_Pos) /*!< SAU SFSR: INVIS Mask */ + +#define SAU_SFSR_INVEP_Pos 0U /*!< SAU SFSR: INVEP Position */ +#define SAU_SFSR_INVEP_Msk (1UL /*<< SAU_SFSR_INVEP_Pos*/) /*!< SAU SFSR: INVEP Mask */ + +/*@} end of group CMSIS_SAU */ +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_FPU Floating Point Unit (FPU) + \brief Type definitions for the Floating Point Unit (FPU) + @{ + */ + +/** + \brief Structure type to access the Floating Point Unit (FPU). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IOM uint32_t FPCCR; /*!< Offset: 0x004 (R/W) Floating-Point Context Control Register */ + __IOM uint32_t FPCAR; /*!< Offset: 0x008 (R/W) Floating-Point Context Address Register */ + __IOM uint32_t FPDSCR; /*!< Offset: 0x00C (R/W) Floating-Point Default Status Control Register */ + __IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x018 (R/ ) Media and VFP Feature Register 2 */ +} FPU_Type; + +/* Floating-Point Context Control Register Definitions */ +#define FPU_FPCCR_ASPEN_Pos 31U /*!< FPCCR: ASPEN bit Position */ +#define FPU_FPCCR_ASPEN_Msk (1UL << FPU_FPCCR_ASPEN_Pos) /*!< FPCCR: ASPEN bit Mask */ + +#define FPU_FPCCR_LSPEN_Pos 30U /*!< FPCCR: LSPEN Position */ +#define FPU_FPCCR_LSPEN_Msk (1UL << FPU_FPCCR_LSPEN_Pos) /*!< FPCCR: LSPEN bit Mask */ + +#define FPU_FPCCR_LSPENS_Pos 29U /*!< FPCCR: LSPENS Position */ +#define FPU_FPCCR_LSPENS_Msk (1UL << FPU_FPCCR_LSPENS_Pos) /*!< FPCCR: LSPENS bit Mask */ + +#define FPU_FPCCR_CLRONRET_Pos 28U /*!< FPCCR: CLRONRET Position */ +#define FPU_FPCCR_CLRONRET_Msk (1UL << FPU_FPCCR_CLRONRET_Pos) /*!< FPCCR: CLRONRET bit Mask */ + +#define FPU_FPCCR_CLRONRETS_Pos 27U /*!< FPCCR: CLRONRETS Position */ +#define FPU_FPCCR_CLRONRETS_Msk (1UL << FPU_FPCCR_CLRONRETS_Pos) /*!< FPCCR: CLRONRETS bit Mask */ + +#define FPU_FPCCR_TS_Pos 26U /*!< FPCCR: TS Position */ +#define FPU_FPCCR_TS_Msk (1UL << FPU_FPCCR_TS_Pos) /*!< FPCCR: TS bit Mask */ + +#define FPU_FPCCR_UFRDY_Pos 10U /*!< FPCCR: UFRDY Position */ +#define FPU_FPCCR_UFRDY_Msk (1UL << FPU_FPCCR_UFRDY_Pos) /*!< FPCCR: UFRDY bit Mask */ + +#define FPU_FPCCR_SPLIMVIOL_Pos 9U /*!< FPCCR: SPLIMVIOL Position */ +#define FPU_FPCCR_SPLIMVIOL_Msk (1UL << FPU_FPCCR_SPLIMVIOL_Pos) /*!< FPCCR: SPLIMVIOL bit Mask */ + +#define FPU_FPCCR_MONRDY_Pos 8U /*!< FPCCR: MONRDY Position */ +#define FPU_FPCCR_MONRDY_Msk (1UL << FPU_FPCCR_MONRDY_Pos) /*!< FPCCR: MONRDY bit Mask */ + +#define FPU_FPCCR_SFRDY_Pos 7U /*!< FPCCR: SFRDY Position */ +#define FPU_FPCCR_SFRDY_Msk (1UL << FPU_FPCCR_SFRDY_Pos) /*!< FPCCR: SFRDY bit Mask */ + +#define FPU_FPCCR_BFRDY_Pos 6U /*!< FPCCR: BFRDY Position */ +#define FPU_FPCCR_BFRDY_Msk (1UL << FPU_FPCCR_BFRDY_Pos) /*!< FPCCR: BFRDY bit Mask */ + +#define FPU_FPCCR_MMRDY_Pos 5U /*!< FPCCR: MMRDY Position */ +#define FPU_FPCCR_MMRDY_Msk (1UL << FPU_FPCCR_MMRDY_Pos) /*!< FPCCR: MMRDY bit Mask */ + +#define FPU_FPCCR_HFRDY_Pos 4U /*!< FPCCR: HFRDY Position */ +#define FPU_FPCCR_HFRDY_Msk (1UL << FPU_FPCCR_HFRDY_Pos) /*!< FPCCR: HFRDY bit Mask */ + +#define FPU_FPCCR_THREAD_Pos 3U /*!< FPCCR: processor mode bit Position */ +#define FPU_FPCCR_THREAD_Msk (1UL << FPU_FPCCR_THREAD_Pos) /*!< FPCCR: processor mode active bit Mask */ + +#define FPU_FPCCR_S_Pos 2U /*!< FPCCR: Security status of the FP context bit Position */ +#define FPU_FPCCR_S_Msk (1UL << FPU_FPCCR_S_Pos) /*!< FPCCR: Security status of the FP context bit Mask */ + +#define FPU_FPCCR_USER_Pos 1U /*!< FPCCR: privilege level bit Position */ +#define FPU_FPCCR_USER_Msk (1UL << FPU_FPCCR_USER_Pos) /*!< FPCCR: privilege level bit Mask */ + +#define FPU_FPCCR_LSPACT_Pos 0U /*!< FPCCR: Lazy state preservation active bit Position */ +#define FPU_FPCCR_LSPACT_Msk (1UL /*<< FPU_FPCCR_LSPACT_Pos*/) /*!< FPCCR: Lazy state preservation active bit Mask */ + +/* Floating-Point Context Address Register Definitions */ +#define FPU_FPCAR_ADDRESS_Pos 3U /*!< FPCAR: ADDRESS bit Position */ +#define FPU_FPCAR_ADDRESS_Msk (0x1FFFFFFFUL << FPU_FPCAR_ADDRESS_Pos) /*!< FPCAR: ADDRESS bit Mask */ + +/* Floating-Point Default Status Control Register Definitions */ +#define FPU_FPDSCR_AHP_Pos 26U /*!< FPDSCR: AHP bit Position */ +#define FPU_FPDSCR_AHP_Msk (1UL << FPU_FPDSCR_AHP_Pos) /*!< FPDSCR: AHP bit Mask */ + +#define FPU_FPDSCR_DN_Pos 25U /*!< FPDSCR: DN bit Position */ +#define FPU_FPDSCR_DN_Msk (1UL << FPU_FPDSCR_DN_Pos) /*!< FPDSCR: DN bit Mask */ + +#define FPU_FPDSCR_FZ_Pos 24U /*!< FPDSCR: FZ bit Position */ +#define FPU_FPDSCR_FZ_Msk (1UL << FPU_FPDSCR_FZ_Pos) /*!< FPDSCR: FZ bit Mask */ + +#define FPU_FPDSCR_RMode_Pos 22U /*!< FPDSCR: RMode bit Position */ +#define FPU_FPDSCR_RMode_Msk (3UL << FPU_FPDSCR_RMode_Pos) /*!< FPDSCR: RMode bit Mask */ + +#define FPU_FPDSCR_FZ16_Pos 19U /*!< FPDSCR: FZ16 bit Position */ +#define FPU_FPDSCR_FZ16_Msk (1UL << FPU_FPDSCR_FZ16_Pos) /*!< FPDSCR: FZ16 bit Mask */ + +#define FPU_FPDSCR_LTPSIZE_Pos 16U /*!< FPDSCR: LTPSIZE bit Position */ +#define FPU_FPDSCR_LTPSIZE_Msk (7UL << FPU_FPDSCR_LTPSIZE_Pos) /*!< FPDSCR: LTPSIZE bit Mask */ + +/* Media and VFP Feature Register 0 Definitions */ +#define FPU_MVFR0_FPRound_Pos 28U /*!< MVFR0: FPRound bits Position */ +#define FPU_MVFR0_FPRound_Msk (0xFUL << FPU_MVFR0_FPRound_Pos) /*!< MVFR0: FPRound bits Mask */ + +#define FPU_MVFR0_FPSqrt_Pos 20U /*!< MVFR0: FPSqrt bits Position */ +#define FPU_MVFR0_FPSqrt_Msk (0xFUL << FPU_MVFR0_FPSqrt_Pos) /*!< MVFR0: FPSqrt bits Mask */ + +#define FPU_MVFR0_FPDivide_Pos 16U /*!< MVFR0: FPDivide bits Position */ +#define FPU_MVFR0_FPDivide_Msk (0xFUL << FPU_MVFR0_FPDivide_Pos) /*!< MVFR0: Divide bits Mask */ + +#define FPU_MVFR0_FPDP_Pos 8U /*!< MVFR0: FPDP bits Position */ +#define FPU_MVFR0_FPDP_Msk (0xFUL << FPU_MVFR0_FPDP_Pos) /*!< MVFR0: FPDP bits Mask */ + +#define FPU_MVFR0_FPSP_Pos 4U /*!< MVFR0: FPSP bits Position */ +#define FPU_MVFR0_FPSP_Msk (0xFUL << FPU_MVFR0_FPSP_Pos) /*!< MVFR0: FPSP bits Mask */ + +#define FPU_MVFR0_SIMDReg_Pos 0U /*!< MVFR0: SIMDReg bits Position */ +#define FPU_MVFR0_SIMDReg_Msk (0xFUL /*<< FPU_MVFR0_SIMDReg_Pos*/) /*!< MVFR0: SIMDReg bits Mask */ + +/* Media and VFP Feature Register 1 Definitions */ +#define FPU_MVFR1_FMAC_Pos 28U /*!< MVFR1: FMAC bits Position */ +#define FPU_MVFR1_FMAC_Msk (0xFUL << FPU_MVFR1_FMAC_Pos) /*!< MVFR1: FMAC bits Mask */ + +#define FPU_MVFR1_FPHP_Pos 24U /*!< MVFR1: FPHP bits Position */ +#define FPU_MVFR1_FPHP_Msk (0xFUL << FPU_MVFR1_FPHP_Pos) /*!< MVFR1: FPHP bits Mask */ + +#define FPU_MVFR1_FP16_Pos 20U /*!< MVFR1: FP16 bits Position */ +#define FPU_MVFR1_FP16_Msk (0xFUL << FPU_MVFR1_FP16_Pos) /*!< MVFR1: FP16 bits Mask */ + +#define FPU_MVFR1_MVE_Pos 8U /*!< MVFR1: MVE bits Position */ +#define FPU_MVFR1_MVE_Msk (0xFUL << FPU_MVFR1_MVE_Pos) /*!< MVFR1: MVE bits Mask */ + +#define FPU_MVFR1_FPDNaN_Pos 4U /*!< MVFR1: FPDNaN bits Position */ +#define FPU_MVFR1_FPDNaN_Msk (0xFUL << FPU_MVFR1_FPDNaN_Pos) /*!< MVFR1: FPDNaN bits Mask */ + +#define FPU_MVFR1_FPFtZ_Pos 0U /*!< MVFR1: FPFtZ bits Position */ +#define FPU_MVFR1_FPFtZ_Msk (0xFUL /*<< FPU_MVFR1_FPFtZ_Pos*/) /*!< MVFR1: FPFtZ bits Mask */ + +/* Media and VFP Feature Register 2 Definitions */ +#define FPU_MVFR2_FPMisc_Pos 4U /*!< MVFR2: FPMisc bits Position */ +#define FPU_MVFR2_FPMisc_Msk (0xFUL << FPU_MVFR2_FPMisc_Pos) /*!< MVFR2: FPMisc bits Mask */ + +/*@} end of group CMSIS_FPU */ + +/* CoreDebug is deprecated. replaced by DCB (Debug Control Block) */ +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Type definitions for the Core Debug Registers + @{ + */ + +/** + \brief \deprecated Structure type to access the Core Debug Register (CoreDebug). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + __OM uint32_t DSCEMCR; /*!< Offset: 0x010 ( /W) Debug Set Clear Exception and Monitor Control Register */ + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< \deprecated CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< \deprecated CoreDebug DHCSR: DBGKEY Mask */ + +#define CoreDebug_DHCSR_S_RESTART_ST_Pos 26U /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Position */ +#define CoreDebug_DHCSR_S_RESTART_ST_Msk (1UL << CoreDebug_DHCSR_S_RESTART_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Mask */ + +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Mask */ + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Mask */ + +#define CoreDebug_DHCSR_S_FPD_Pos 23U /*!< \deprecated CoreDebug DHCSR: S_FPD Position */ +#define CoreDebug_DHCSR_S_FPD_Msk (1UL << CoreDebug_DHCSR_S_FPD_Pos) /*!< \deprecated CoreDebug DHCSR: S_FPD Mask */ + +#define CoreDebug_DHCSR_S_SUIDE_Pos 22U /*!< \deprecated CoreDebug DHCSR: S_SUIDE Position */ +#define CoreDebug_DHCSR_S_SUIDE_Msk (1UL << CoreDebug_DHCSR_S_SUIDE_Pos) /*!< \deprecated CoreDebug DHCSR: S_SUIDE Mask */ + +#define CoreDebug_DHCSR_S_NSUIDE_Pos 21U /*!< \deprecated CoreDebug DHCSR: S_NSUIDE Position */ +#define CoreDebug_DHCSR_S_NSUIDE_Msk (1UL << CoreDebug_DHCSR_S_NSUIDE_Pos) /*!< \deprecated CoreDebug DHCSR: S_NSUIDE Mask */ + +#define CoreDebug_DHCSR_S_SDE_Pos 20U /*!< \deprecated CoreDebug DHCSR: S_SDE Position */ +#define CoreDebug_DHCSR_S_SDE_Msk (1UL << CoreDebug_DHCSR_S_SDE_Pos) /*!< \deprecated CoreDebug DHCSR: S_SDE Mask */ + +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Mask */ + +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< \deprecated CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< \deprecated CoreDebug DHCSR: S_SLEEP Mask */ + +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< \deprecated CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: S_HALT Mask */ + +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< \deprecated CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< \deprecated CoreDebug DHCSR: S_REGRDY Mask */ + +#define CoreDebug_DHCSR_C_PMOV_Pos 6U /*!< \deprecated CoreDebug DHCSR: C_PMOV Position */ +#define CoreDebug_DHCSR_C_PMOV_Msk (1UL << CoreDebug_DHCSR_C_PMOV_Pos) /*!< \deprecated CoreDebug DHCSR: C_PMOV Mask */ + +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos 5U /*!< \deprecated CoreDebug DHCSR: C_SNAPSTALL Position */ +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk (1UL << CoreDebug_DHCSR_C_SNAPSTALL_Pos) /*!< \deprecated CoreDebug DHCSR: C_SNAPSTALL Mask */ + +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Mask */ + +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< \deprecated CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< \deprecated CoreDebug DHCSR: C_STEP Mask */ + +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< \deprecated CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: C_HALT Mask */ + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Mask */ + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< \deprecated CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< \deprecated CoreDebug DCRSR: REGWnR Mask */ + +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< \deprecated CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< \deprecated CoreDebug DCRSR: REGSEL Mask */ + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_TRCENA_Pos 24U /*!< \deprecated CoreDebug DEMCR: TRCENA Position */ +#define CoreDebug_DEMCR_TRCENA_Msk (1UL << CoreDebug_DEMCR_TRCENA_Pos) /*!< \deprecated CoreDebug DEMCR: TRCENA Mask */ + +#define CoreDebug_DEMCR_MON_REQ_Pos 19U /*!< \deprecated CoreDebug DEMCR: MON_REQ Position */ +#define CoreDebug_DEMCR_MON_REQ_Msk (1UL << CoreDebug_DEMCR_MON_REQ_Pos) /*!< \deprecated CoreDebug DEMCR: MON_REQ Mask */ + +#define CoreDebug_DEMCR_MON_STEP_Pos 18U /*!< \deprecated CoreDebug DEMCR: MON_STEP Position */ +#define CoreDebug_DEMCR_MON_STEP_Msk (1UL << CoreDebug_DEMCR_MON_STEP_Pos) /*!< \deprecated CoreDebug DEMCR: MON_STEP Mask */ + +#define CoreDebug_DEMCR_MON_PEND_Pos 17U /*!< \deprecated CoreDebug DEMCR: MON_PEND Position */ +#define CoreDebug_DEMCR_MON_PEND_Msk (1UL << CoreDebug_DEMCR_MON_PEND_Pos) /*!< \deprecated CoreDebug DEMCR: MON_PEND Mask */ + +#define CoreDebug_DEMCR_MON_EN_Pos 16U /*!< \deprecated CoreDebug DEMCR: MON_EN Position */ +#define CoreDebug_DEMCR_MON_EN_Msk (1UL << CoreDebug_DEMCR_MON_EN_Pos) /*!< \deprecated CoreDebug DEMCR: MON_EN Mask */ + +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Mask */ + +#define CoreDebug_DEMCR_VC_INTERR_Pos 9U /*!< \deprecated CoreDebug DEMCR: VC_INTERR Position */ +#define CoreDebug_DEMCR_VC_INTERR_Msk (1UL << CoreDebug_DEMCR_VC_INTERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_INTERR Mask */ + +#define CoreDebug_DEMCR_VC_BUSERR_Pos 8U /*!< \deprecated CoreDebug DEMCR: VC_BUSERR Position */ +#define CoreDebug_DEMCR_VC_BUSERR_Msk (1UL << CoreDebug_DEMCR_VC_BUSERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_BUSERR Mask */ + +#define CoreDebug_DEMCR_VC_STATERR_Pos 7U /*!< \deprecated CoreDebug DEMCR: VC_STATERR Position */ +#define CoreDebug_DEMCR_VC_STATERR_Msk (1UL << CoreDebug_DEMCR_VC_STATERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_STATERR Mask */ + +#define CoreDebug_DEMCR_VC_CHKERR_Pos 6U /*!< \deprecated CoreDebug DEMCR: VC_CHKERR Position */ +#define CoreDebug_DEMCR_VC_CHKERR_Msk (1UL << CoreDebug_DEMCR_VC_CHKERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_CHKERR Mask */ + +#define CoreDebug_DEMCR_VC_NOCPERR_Pos 5U /*!< \deprecated CoreDebug DEMCR: VC_NOCPERR Position */ +#define CoreDebug_DEMCR_VC_NOCPERR_Msk (1UL << CoreDebug_DEMCR_VC_NOCPERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_NOCPERR Mask */ + +#define CoreDebug_DEMCR_VC_MMERR_Pos 4U /*!< \deprecated CoreDebug DEMCR: VC_MMERR Position */ +#define CoreDebug_DEMCR_VC_MMERR_Msk (1UL << CoreDebug_DEMCR_VC_MMERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_MMERR Mask */ + +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Mask */ + +/* Debug Set Clear Exception and Monitor Control Register Definitions */ +#define CoreDebug_DSCEMCR_CLR_MON_REQ_Pos 19U /*!< \deprecated CoreDebug DSCEMCR: CLR_MON_REQ, Position */ +#define CoreDebug_DSCEMCR_CLR_MON_REQ_Msk (1UL << CoreDebug_DSCEMCR_CLR_MON_REQ_Pos) /*!< \deprecated CoreDebug DSCEMCR: CLR_MON_REQ, Mask */ + +#define CoreDebug_DSCEMCR_CLR_MON_PEND_Pos 17U /*!< \deprecated CoreDebug DSCEMCR: CLR_MON_PEND, Position */ +#define CoreDebug_DSCEMCR_CLR_MON_PEND_Msk (1UL << CoreDebug_DSCEMCR_CLR_MON_PEND_Pos) /*!< \deprecated CoreDebug DSCEMCR: CLR_MON_PEND, Mask */ + +#define CoreDebug_DSCEMCR_SET_MON_REQ_Pos 3U /*!< \deprecated CoreDebug DSCEMCR: SET_MON_REQ, Position */ +#define CoreDebug_DSCEMCR_SET_MON_REQ_Msk (1UL << CoreDebug_DSCEMCR_SET_MON_REQ_Pos) /*!< \deprecated CoreDebug DSCEMCR: SET_MON_REQ, Mask */ + +#define CoreDebug_DSCEMCR_SET_MON_PEND_Pos 1U /*!< \deprecated CoreDebug DSCEMCR: SET_MON_PEND, Position */ +#define CoreDebug_DSCEMCR_SET_MON_PEND_Msk (1UL << CoreDebug_DSCEMCR_SET_MON_PEND_Pos) /*!< \deprecated CoreDebug DSCEMCR: SET_MON_PEND, Mask */ + +/* Debug Authentication Control Register Definitions */ +#define CoreDebug_DAUTHCTRL_UIDEN_Pos 10U /*!< \deprecated CoreDebug DAUTHCTRL: UIDEN, Position */ +#define CoreDebug_DAUTHCTRL_UIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_UIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: UIDEN, Mask */ + +#define CoreDebug_DAUTHCTRL_UIDAPEN_Pos 9U /*!< \deprecated CoreDebug DAUTHCTRL: UIDAPEN, Position */ +#define CoreDebug_DAUTHCTRL_UIDAPEN_Msk (1UL << CoreDebug_DAUTHCTRL_UIDAPEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: UIDAPEN, Mask */ + +#define CoreDebug_DAUTHCTRL_FSDMA_Pos 8U /*!< \deprecated CoreDebug DAUTHCTRL: FSDMA, Position */ +#define CoreDebug_DAUTHCTRL_FSDMA_Msk (1UL << CoreDebug_DAUTHCTRL_FSDMA_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: FSDMA, Mask */ + +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Position */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Mask */ + +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Msk (1UL << CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Mask */ + +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Position */ +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Mask */ + +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Msk (1UL /*<< CoreDebug_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Mask */ + +/* Debug Security Control and Status Register Definitions */ +#define CoreDebug_DSCSR_CDS_Pos 16U /*!< \deprecated CoreDebug DSCSR: CDS Position */ +#define CoreDebug_DSCSR_CDS_Msk (1UL << CoreDebug_DSCSR_CDS_Pos) /*!< \deprecated CoreDebug DSCSR: CDS Mask */ + +#define CoreDebug_DSCSR_SBRSEL_Pos 1U /*!< \deprecated CoreDebug DSCSR: SBRSEL Position */ +#define CoreDebug_DSCSR_SBRSEL_Msk (1UL << CoreDebug_DSCSR_SBRSEL_Pos) /*!< \deprecated CoreDebug DSCSR: SBRSEL Mask */ + +#define CoreDebug_DSCSR_SBRSELEN_Pos 0U /*!< \deprecated CoreDebug DSCSR: SBRSELEN Position */ +#define CoreDebug_DSCSR_SBRSELEN_Msk (1UL /*<< CoreDebug_DSCSR_SBRSELEN_Pos*/) /*!< \deprecated CoreDebug DSCSR: SBRSELEN Mask */ + +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DCB Debug Control Block + \brief Type definitions for the Debug Control Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Control Block Registers (DCB). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + __OM uint32_t DSCEMCR; /*!< Offset: 0x010 ( /W) Debug Set Clear Exception and Monitor Control Register */ + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} DCB_Type; + +/* DHCSR, Debug Halting Control and Status Register Definitions */ +#define DCB_DHCSR_DBGKEY_Pos 16U /*!< DCB DHCSR: Debug key Position */ +#define DCB_DHCSR_DBGKEY_Msk (0xFFFFUL << DCB_DHCSR_DBGKEY_Pos) /*!< DCB DHCSR: Debug key Mask */ + +#define DCB_DHCSR_S_RESTART_ST_Pos 26U /*!< DCB DHCSR: Restart sticky status Position */ +#define DCB_DHCSR_S_RESTART_ST_Msk (0x1UL << DCB_DHCSR_S_RESTART_ST_Pos) /*!< DCB DHCSR: Restart sticky status Mask */ + +#define DCB_DHCSR_S_RESET_ST_Pos 25U /*!< DCB DHCSR: Reset sticky status Position */ +#define DCB_DHCSR_S_RESET_ST_Msk (0x1UL << DCB_DHCSR_S_RESET_ST_Pos) /*!< DCB DHCSR: Reset sticky status Mask */ + +#define DCB_DHCSR_S_RETIRE_ST_Pos 24U /*!< DCB DHCSR: Retire sticky status Position */ +#define DCB_DHCSR_S_RETIRE_ST_Msk (0x1UL << DCB_DHCSR_S_RETIRE_ST_Pos) /*!< DCB DHCSR: Retire sticky status Mask */ + +#define DCB_DHCSR_S_FPD_Pos 23U /*!< DCB DHCSR: Floating-point registers Debuggable Position */ +#define DCB_DHCSR_S_FPD_Msk (0x1UL << DCB_DHCSR_S_FPD_Pos) /*!< DCB DHCSR: Floating-point registers Debuggable Mask */ + +#define DCB_DHCSR_S_SUIDE_Pos 22U /*!< DCB DHCSR: Secure unprivileged halting debug enabled Position */ +#define DCB_DHCSR_S_SUIDE_Msk (0x1UL << DCB_DHCSR_S_SUIDE_Pos) /*!< DCB DHCSR: Secure unprivileged halting debug enabled Mask */ + +#define DCB_DHCSR_S_NSUIDE_Pos 21U /*!< DCB DHCSR: Non-secure unprivileged halting debug enabled Position */ +#define DCB_DHCSR_S_NSUIDE_Msk (0x1UL << DCB_DHCSR_S_NSUIDE_Pos) /*!< DCB DHCSR: Non-secure unprivileged halting debug enabled Mask */ + +#define DCB_DHCSR_S_SDE_Pos 20U /*!< DCB DHCSR: Secure debug enabled Position */ +#define DCB_DHCSR_S_SDE_Msk (0x1UL << DCB_DHCSR_S_SDE_Pos) /*!< DCB DHCSR: Secure debug enabled Mask */ + +#define DCB_DHCSR_S_LOCKUP_Pos 19U /*!< DCB DHCSR: Lockup status Position */ +#define DCB_DHCSR_S_LOCKUP_Msk (0x1UL << DCB_DHCSR_S_LOCKUP_Pos) /*!< DCB DHCSR: Lockup status Mask */ + +#define DCB_DHCSR_S_SLEEP_Pos 18U /*!< DCB DHCSR: Sleeping status Position */ +#define DCB_DHCSR_S_SLEEP_Msk (0x1UL << DCB_DHCSR_S_SLEEP_Pos) /*!< DCB DHCSR: Sleeping status Mask */ + +#define DCB_DHCSR_S_HALT_Pos 17U /*!< DCB DHCSR: Halted status Position */ +#define DCB_DHCSR_S_HALT_Msk (0x1UL << DCB_DHCSR_S_HALT_Pos) /*!< DCB DHCSR: Halted status Mask */ + +#define DCB_DHCSR_S_REGRDY_Pos 16U /*!< DCB DHCSR: Register ready status Position */ +#define DCB_DHCSR_S_REGRDY_Msk (0x1UL << DCB_DHCSR_S_REGRDY_Pos) /*!< DCB DHCSR: Register ready status Mask */ + +#define DCB_DHCSR_C_PMOV_Pos 6U /*!< DCB DHCSR: Halt on PMU overflow control Position */ +#define DCB_DHCSR_C_PMOV_Msk (0x1UL << DCB_DHCSR_C_PMOV_Pos) /*!< DCB DHCSR: Halt on PMU overflow control Mask */ + +#define DCB_DHCSR_C_SNAPSTALL_Pos 5U /*!< DCB DHCSR: Snap stall control Position */ +#define DCB_DHCSR_C_SNAPSTALL_Msk (0x1UL << DCB_DHCSR_C_SNAPSTALL_Pos) /*!< DCB DHCSR: Snap stall control Mask */ + +#define DCB_DHCSR_C_MASKINTS_Pos 3U /*!< DCB DHCSR: Mask interrupts control Position */ +#define DCB_DHCSR_C_MASKINTS_Msk (0x1UL << DCB_DHCSR_C_MASKINTS_Pos) /*!< DCB DHCSR: Mask interrupts control Mask */ + +#define DCB_DHCSR_C_STEP_Pos 2U /*!< DCB DHCSR: Step control Position */ +#define DCB_DHCSR_C_STEP_Msk (0x1UL << DCB_DHCSR_C_STEP_Pos) /*!< DCB DHCSR: Step control Mask */ + +#define DCB_DHCSR_C_HALT_Pos 1U /*!< DCB DHCSR: Halt control Position */ +#define DCB_DHCSR_C_HALT_Msk (0x1UL << DCB_DHCSR_C_HALT_Pos) /*!< DCB DHCSR: Halt control Mask */ + +#define DCB_DHCSR_C_DEBUGEN_Pos 0U /*!< DCB DHCSR: Debug enable control Position */ +#define DCB_DHCSR_C_DEBUGEN_Msk (0x1UL /*<< DCB_DHCSR_C_DEBUGEN_Pos*/) /*!< DCB DHCSR: Debug enable control Mask */ + +/* DCRSR, Debug Core Register Select Register Definitions */ +#define DCB_DCRSR_REGWnR_Pos 16U /*!< DCB DCRSR: Register write/not-read Position */ +#define DCB_DCRSR_REGWnR_Msk (0x1UL << DCB_DCRSR_REGWnR_Pos) /*!< DCB DCRSR: Register write/not-read Mask */ + +#define DCB_DCRSR_REGSEL_Pos 0U /*!< DCB DCRSR: Register selector Position */ +#define DCB_DCRSR_REGSEL_Msk (0x7FUL /*<< DCB_DCRSR_REGSEL_Pos*/) /*!< DCB DCRSR: Register selector Mask */ + +/* DCRDR, Debug Core Register Data Register Definitions */ +#define DCB_DCRDR_DBGTMP_Pos 0U /*!< DCB DCRDR: Data temporary buffer Position */ +#define DCB_DCRDR_DBGTMP_Msk (0xFFFFFFFFUL /*<< DCB_DCRDR_DBGTMP_Pos*/) /*!< DCB DCRDR: Data temporary buffer Mask */ + +/* DEMCR, Debug Exception and Monitor Control Register Definitions */ +#define DCB_DEMCR_TRCENA_Pos 24U /*!< DCB DEMCR: Trace enable Position */ +#define DCB_DEMCR_TRCENA_Msk (0x1UL << DCB_DEMCR_TRCENA_Pos) /*!< DCB DEMCR: Trace enable Mask */ + +#define DCB_DEMCR_MONPRKEY_Pos 23U /*!< DCB DEMCR: Monitor pend req key Position */ +#define DCB_DEMCR_MONPRKEY_Msk (0x1UL << DCB_DEMCR_MONPRKEY_Pos) /*!< DCB DEMCR: Monitor pend req key Mask */ + +#define DCB_DEMCR_UMON_EN_Pos 21U /*!< DCB DEMCR: Unprivileged monitor enable Position */ +#define DCB_DEMCR_UMON_EN_Msk (0x1UL << DCB_DEMCR_UMON_EN_Pos) /*!< DCB DEMCR: Unprivileged monitor enable Mask */ + +#define DCB_DEMCR_SDME_Pos 20U /*!< DCB DEMCR: Secure DebugMonitor enable Position */ +#define DCB_DEMCR_SDME_Msk (0x1UL << DCB_DEMCR_SDME_Pos) /*!< DCB DEMCR: Secure DebugMonitor enable Mask */ + +#define DCB_DEMCR_MON_REQ_Pos 19U /*!< DCB DEMCR: Monitor request Position */ +#define DCB_DEMCR_MON_REQ_Msk (0x1UL << DCB_DEMCR_MON_REQ_Pos) /*!< DCB DEMCR: Monitor request Mask */ + +#define DCB_DEMCR_MON_STEP_Pos 18U /*!< DCB DEMCR: Monitor step Position */ +#define DCB_DEMCR_MON_STEP_Msk (0x1UL << DCB_DEMCR_MON_STEP_Pos) /*!< DCB DEMCR: Monitor step Mask */ + +#define DCB_DEMCR_MON_PEND_Pos 17U /*!< DCB DEMCR: Monitor pend Position */ +#define DCB_DEMCR_MON_PEND_Msk (0x1UL << DCB_DEMCR_MON_PEND_Pos) /*!< DCB DEMCR: Monitor pend Mask */ + +#define DCB_DEMCR_MON_EN_Pos 16U /*!< DCB DEMCR: Monitor enable Position */ +#define DCB_DEMCR_MON_EN_Msk (0x1UL << DCB_DEMCR_MON_EN_Pos) /*!< DCB DEMCR: Monitor enable Mask */ + +#define DCB_DEMCR_VC_SFERR_Pos 11U /*!< DCB DEMCR: Vector Catch SecureFault Position */ +#define DCB_DEMCR_VC_SFERR_Msk (0x1UL << DCB_DEMCR_VC_SFERR_Pos) /*!< DCB DEMCR: Vector Catch SecureFault Mask */ + +#define DCB_DEMCR_VC_HARDERR_Pos 10U /*!< DCB DEMCR: Vector Catch HardFault errors Position */ +#define DCB_DEMCR_VC_HARDERR_Msk (0x1UL << DCB_DEMCR_VC_HARDERR_Pos) /*!< DCB DEMCR: Vector Catch HardFault errors Mask */ + +#define DCB_DEMCR_VC_INTERR_Pos 9U /*!< DCB DEMCR: Vector Catch interrupt errors Position */ +#define DCB_DEMCR_VC_INTERR_Msk (0x1UL << DCB_DEMCR_VC_INTERR_Pos) /*!< DCB DEMCR: Vector Catch interrupt errors Mask */ + +#define DCB_DEMCR_VC_BUSERR_Pos 8U /*!< DCB DEMCR: Vector Catch BusFault errors Position */ +#define DCB_DEMCR_VC_BUSERR_Msk (0x1UL << DCB_DEMCR_VC_BUSERR_Pos) /*!< DCB DEMCR: Vector Catch BusFault errors Mask */ + +#define DCB_DEMCR_VC_STATERR_Pos 7U /*!< DCB DEMCR: Vector Catch state errors Position */ +#define DCB_DEMCR_VC_STATERR_Msk (0x1UL << DCB_DEMCR_VC_STATERR_Pos) /*!< DCB DEMCR: Vector Catch state errors Mask */ + +#define DCB_DEMCR_VC_CHKERR_Pos 6U /*!< DCB DEMCR: Vector Catch check errors Position */ +#define DCB_DEMCR_VC_CHKERR_Msk (0x1UL << DCB_DEMCR_VC_CHKERR_Pos) /*!< DCB DEMCR: Vector Catch check errors Mask */ + +#define DCB_DEMCR_VC_NOCPERR_Pos 5U /*!< DCB DEMCR: Vector Catch NOCP errors Position */ +#define DCB_DEMCR_VC_NOCPERR_Msk (0x1UL << DCB_DEMCR_VC_NOCPERR_Pos) /*!< DCB DEMCR: Vector Catch NOCP errors Mask */ + +#define DCB_DEMCR_VC_MMERR_Pos 4U /*!< DCB DEMCR: Vector Catch MemManage errors Position */ +#define DCB_DEMCR_VC_MMERR_Msk (0x1UL << DCB_DEMCR_VC_MMERR_Pos) /*!< DCB DEMCR: Vector Catch MemManage errors Mask */ + +#define DCB_DEMCR_VC_CORERESET_Pos 0U /*!< DCB DEMCR: Vector Catch Core reset Position */ +#define DCB_DEMCR_VC_CORERESET_Msk (0x1UL /*<< DCB_DEMCR_VC_CORERESET_Pos*/) /*!< DCB DEMCR: Vector Catch Core reset Mask */ + +/* DSCEMCR, Debug Set Clear Exception and Monitor Control Register Definitions */ +#define DCB_DSCEMCR_CLR_MON_REQ_Pos 19U /*!< DCB DSCEMCR: Clear monitor request Position */ +#define DCB_DSCEMCR_CLR_MON_REQ_Msk (0x1UL << DCB_DSCEMCR_CLR_MON_REQ_Pos) /*!< DCB DSCEMCR: Clear monitor request Mask */ + +#define DCB_DSCEMCR_CLR_MON_PEND_Pos 17U /*!< DCB DSCEMCR: Clear monitor pend Position */ +#define DCB_DSCEMCR_CLR_MON_PEND_Msk (0x1UL << DCB_DSCEMCR_CLR_MON_PEND_Pos) /*!< DCB DSCEMCR: Clear monitor pend Mask */ + +#define DCB_DSCEMCR_SET_MON_REQ_Pos 3U /*!< DCB DSCEMCR: Set monitor request Position */ +#define DCB_DSCEMCR_SET_MON_REQ_Msk (0x1UL << DCB_DSCEMCR_SET_MON_REQ_Pos) /*!< DCB DSCEMCR: Set monitor request Mask */ + +#define DCB_DSCEMCR_SET_MON_PEND_Pos 1U /*!< DCB DSCEMCR: Set monitor pend Position */ +#define DCB_DSCEMCR_SET_MON_PEND_Msk (0x1UL << DCB_DSCEMCR_SET_MON_PEND_Pos) /*!< DCB DSCEMCR: Set monitor pend Mask */ + +/* DAUTHCTRL, Debug Authentication Control Register Definitions */ +#define DCB_DAUTHCTRL_UIDEN_Pos 10U /*!< DCB DAUTHCTRL: Unprivileged Invasive Debug Enable Position */ +#define DCB_DAUTHCTRL_UIDEN_Msk (0x1UL << DCB_DAUTHCTRL_UIDEN_Pos) /*!< DCB DAUTHCTRL: Unprivileged Invasive Debug Enable Mask */ + +#define DCB_DAUTHCTRL_UIDAPEN_Pos 9U /*!< DCB DAUTHCTRL: Unprivileged Invasive DAP Access Enable Position */ +#define DCB_DAUTHCTRL_UIDAPEN_Msk (0x1UL << DCB_DAUTHCTRL_UIDAPEN_Pos) /*!< DCB DAUTHCTRL: Unprivileged Invasive DAP Access Enable Mask */ + +#define DCB_DAUTHCTRL_FSDMA_Pos 8U /*!< DCB DAUTHCTRL: Force Secure DebugMonitor Allowed Position */ +#define DCB_DAUTHCTRL_FSDMA_Msk (0x1UL << DCB_DAUTHCTRL_FSDMA_Pos) /*!< DCB DAUTHCTRL: Force Secure DebugMonitor Allowed Mask */ + +#define DCB_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPNIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPNIDENSEL_Msk (0x1UL << DCB_DAUTHCTRL_SPNIDENSEL_Pos) /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Mask */ + +#define DCB_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< DCB DAUTHCTRL: Secure invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPIDENSEL_Msk (0x1UL /*<< DCB_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< DCB DAUTHCTRL: Secure invasive debug enable select Mask */ + +/* DSCSR, Debug Security Control and Status Register Definitions */ +#define DCB_DSCSR_CDSKEY_Pos 17U /*!< DCB DSCSR: CDS write-enable key Position */ +#define DCB_DSCSR_CDSKEY_Msk (0x1UL << DCB_DSCSR_CDSKEY_Pos) /*!< DCB DSCSR: CDS write-enable key Mask */ + +#define DCB_DSCSR_CDS_Pos 16U /*!< DCB DSCSR: Current domain Secure Position */ +#define DCB_DSCSR_CDS_Msk (0x1UL << DCB_DSCSR_CDS_Pos) /*!< DCB DSCSR: Current domain Secure Mask */ + +#define DCB_DSCSR_SBRSEL_Pos 1U /*!< DCB DSCSR: Secure banked register select Position */ +#define DCB_DSCSR_SBRSEL_Msk (0x1UL << DCB_DSCSR_SBRSEL_Pos) /*!< DCB DSCSR: Secure banked register select Mask */ + +#define DCB_DSCSR_SBRSELEN_Pos 0U /*!< DCB DSCSR: Secure banked register select enable Position */ +#define DCB_DSCSR_SBRSELEN_Msk (0x1UL /*<< DCB_DSCSR_SBRSELEN_Pos*/) /*!< DCB DSCSR: Secure banked register select enable Mask */ + +/*@} end of group CMSIS_DCB */ + + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DIB Debug Identification Block + \brief Type definitions for the Debug Identification Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Identification Block Registers (DIB). + */ +typedef struct +{ + __OM uint32_t DLAR; /*!< Offset: 0x000 ( /W) SCS Software Lock Access Register */ + __IM uint32_t DLSR; /*!< Offset: 0x004 (R/ ) SCS Software Lock Status Register */ + __IM uint32_t DAUTHSTATUS; /*!< Offset: 0x008 (R/ ) Debug Authentication Status Register */ + __IM uint32_t DDEVARCH; /*!< Offset: 0x00C (R/ ) SCS Device Architecture Register */ + __IM uint32_t DDEVTYPE; /*!< Offset: 0x010 (R/ ) SCS Device Type Register */ +} DIB_Type; + +/* DLAR, SCS Software Lock Access Register Definitions */ +#define DIB_DLAR_KEY_Pos 0U /*!< DIB DLAR: KEY Position */ +#define DIB_DLAR_KEY_Msk (0xFFFFFFFFUL /*<< DIB_DLAR_KEY_Pos */) /*!< DIB DLAR: KEY Mask */ + +/* DLSR, SCS Software Lock Status Register Definitions */ +#define DIB_DLSR_nTT_Pos 2U /*!< DIB DLSR: Not thirty-two bit Position */ +#define DIB_DLSR_nTT_Msk (0x1UL << DIB_DLSR_nTT_Pos ) /*!< DIB DLSR: Not thirty-two bit Mask */ + +#define DIB_DLSR_SLK_Pos 1U /*!< DIB DLSR: Software Lock status Position */ +#define DIB_DLSR_SLK_Msk (0x1UL << DIB_DLSR_SLK_Pos ) /*!< DIB DLSR: Software Lock status Mask */ + +#define DIB_DLSR_SLI_Pos 0U /*!< DIB DLSR: Software Lock implemented Position */ +#define DIB_DLSR_SLI_Msk (0x1UL /*<< DIB_DLSR_SLI_Pos*/) /*!< DIB DLSR: Software Lock implemented Mask */ + +/* DAUTHSTATUS, Debug Authentication Status Register Definitions */ +#define DIB_DAUTHSTATUS_SUNID_Pos 22U /*!< DIB DAUTHSTATUS: Secure Unprivileged Non-invasive Debug Allowed Position */ +#define DIB_DAUTHSTATUS_SUNID_Msk (0x3UL << DIB_DAUTHSTATUS_SUNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Unprivileged Non-invasive Debug Allowed Mask */ + +#define DIB_DAUTHSTATUS_SUID_Pos 20U /*!< DIB DAUTHSTATUS: Secure Unprivileged Invasive Debug Allowed Position */ +#define DIB_DAUTHSTATUS_SUID_Msk (0x3UL << DIB_DAUTHSTATUS_SUID_Pos ) /*!< DIB DAUTHSTATUS: Secure Unprivileged Invasive Debug Allowed Mask */ + +#define DIB_DAUTHSTATUS_NSUNID_Pos 18U /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Allo Position */ +#define DIB_DAUTHSTATUS_NSUNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSUNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Allo Mask */ + +#define DIB_DAUTHSTATUS_NSUID_Pos 16U /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Invasive Debug Allowed Position */ +#define DIB_DAUTHSTATUS_NSUID_Msk (0x3UL << DIB_DAUTHSTATUS_NSUID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Invasive Debug Allowed Mask */ + +#define DIB_DAUTHSTATUS_SNID_Pos 6U /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_SNID_Msk (0x3UL << DIB_DAUTHSTATUS_SNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_SID_Pos 4U /*!< DIB DAUTHSTATUS: Secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_SID_Msk (0x3UL << DIB_DAUTHSTATUS_SID_Pos ) /*!< DIB DAUTHSTATUS: Secure Invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSNID_Pos 2U /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSID_Pos 0U /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSID_Msk (0x3UL /*<< DIB_DAUTHSTATUS_NSID_Pos*/) /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Mask */ + +/* DDEVARCH, SCS Device Architecture Register Definitions */ +#define DIB_DDEVARCH_ARCHITECT_Pos 21U /*!< DIB DDEVARCH: Architect Position */ +#define DIB_DDEVARCH_ARCHITECT_Msk (0x7FFUL << DIB_DDEVARCH_ARCHITECT_Pos ) /*!< DIB DDEVARCH: Architect Mask */ + +#define DIB_DDEVARCH_PRESENT_Pos 20U /*!< DIB DDEVARCH: DEVARCH Present Position */ +#define DIB_DDEVARCH_PRESENT_Msk (0x1FUL << DIB_DDEVARCH_PRESENT_Pos ) /*!< DIB DDEVARCH: DEVARCH Present Mask */ + +#define DIB_DDEVARCH_REVISION_Pos 16U /*!< DIB DDEVARCH: Revision Position */ +#define DIB_DDEVARCH_REVISION_Msk (0xFUL << DIB_DDEVARCH_REVISION_Pos ) /*!< DIB DDEVARCH: Revision Mask */ + +#define DIB_DDEVARCH_ARCHVER_Pos 12U /*!< DIB DDEVARCH: Architecture Version Position */ +#define DIB_DDEVARCH_ARCHVER_Msk (0xFUL << DIB_DDEVARCH_ARCHVER_Pos ) /*!< DIB DDEVARCH: Architecture Version Mask */ + +#define DIB_DDEVARCH_ARCHPART_Pos 0U /*!< DIB DDEVARCH: Architecture Part Position */ +#define DIB_DDEVARCH_ARCHPART_Msk (0xFFFUL /*<< DIB_DDEVARCH_ARCHPART_Pos*/) /*!< DIB DDEVARCH: Architecture Part Mask */ + +/* DDEVTYPE, SCS Device Type Register Definitions */ +#define DIB_DDEVTYPE_SUB_Pos 4U /*!< DIB DDEVTYPE: Sub-type Position */ +#define DIB_DDEVTYPE_SUB_Msk (0xFUL << DIB_DDEVTYPE_SUB_Pos ) /*!< DIB DDEVTYPE: Sub-type Mask */ + +#define DIB_DDEVTYPE_MAJOR_Pos 0U /*!< DIB DDEVTYPE: Major type Position */ +#define DIB_DDEVTYPE_MAJOR_Msk (0xFUL /*<< DIB_DDEVTYPE_MAJOR_Pos*/) /*!< DIB DDEVTYPE: Major type Mask */ + + +/*@} end of group CMSIS_DIB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ + #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ + #define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ + #define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ + #define MEMSYSCTL_BASE (0xE001E000UL) /*!< Memory System Control Base Address */ + #define ERRBNK_BASE (0xE001E100UL) /*!< Error Banking Base Address */ + #define PWRMODCTL_BASE (0xE001E300UL) /*!< Power Mode Control Base Address */ + #define EWIC_BASE (0xE001E400UL) /*!< External Wakeup Interrupt Controller Base Address */ + #define PRCCFGINF_BASE (0xE001E700UL) /*!< Processor Configuration Information Base Address */ + #define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ + #define CoreDebug_BASE (0xE000EDF0UL) /*!< \deprecated Core Debug Base Address */ + #define DCB_BASE (0xE000EDF0UL) /*!< DCB Base Address */ + #define DIB_BASE (0xE000EFB0UL) /*!< DIB Base Address */ + #define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ + #define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ + #define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + + #define ICB ((ICB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ + #define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ + #define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ + #define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + #define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ + #define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ + #define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ + #define MEMSYSCTL ((MemSysCtl_Type *) MEMSYSCTL_BASE ) /*!< Memory System Control configuration struct */ + #define ERRBNK ((ErrBnk_Type *) ERRBNK_BASE ) /*!< Error Banking configuration struct */ + #define PWRMODCTL ((PwrModCtl_Type *) PWRMODCTL_BASE ) /*!< Power Mode Control configuration struct */ + #define EWIC ((EWIC_Type *) EWIC_BASE ) /*!< EWIC configuration struct */ + #define PRCCFGINF ((PrcCfgInf_Type *) PRCCFGINF_BASE ) /*!< Processor Configuration Information configuration struct */ + #define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE ) /*!< \deprecated Core Debug configuration struct */ + #define DCB ((DCB_Type *) DCB_BASE ) /*!< DCB configuration struct */ + #define DIB ((DIB_Type *) DIB_BASE ) /*!< DIB configuration struct */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ + #endif + + #if defined (__PMU_PRESENT) && (__PMU_PRESENT == 1U) + #define PMU_BASE (0xE0003000UL) /*!< PMU Base Address */ + #define PMU ((PMU_Type *) PMU_BASE ) /*!< PMU configuration struct */ + #endif + + #if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SAU_BASE (SCS_BASE + 0x0DD0UL) /*!< Security Attribution Unit */ + #define SAU ((SAU_Type *) SAU_BASE ) /*!< Security Attribution Unit */ + #endif + + #define FPU_BASE (SCS_BASE + 0x0F30UL) /*!< Floating Point Unit */ + #define FPU ((FPU_Type *) FPU_BASE ) /*!< Floating Point Unit */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SCS_BASE_NS (0xE002E000UL) /*!< System Control Space Base Address (non-secure address space) */ + #define CoreDebug_BASE_NS (0xE002EDF0UL) /*!< \deprecated Core Debug Base Address (non-secure address space) */ + #define DCB_BASE_NS (0xE002EDF0UL) /*!< DCB Base Address (non-secure address space) */ + #define DIB_BASE_NS (0xE002EFB0UL) /*!< DIB Base Address (non-secure address space) */ + #define SysTick_BASE_NS (SCS_BASE_NS + 0x0010UL) /*!< SysTick Base Address (non-secure address space) */ + #define NVIC_BASE_NS (SCS_BASE_NS + 0x0100UL) /*!< NVIC Base Address (non-secure address space) */ + #define SCB_BASE_NS (SCS_BASE_NS + 0x0D00UL) /*!< System Control Block Base Address (non-secure address space) */ + + #define ICB_NS ((ICB_Type *) SCS_BASE_NS ) /*!< System control Register not in SCB(non-secure address space) */ + #define SCB_NS ((SCB_Type *) SCB_BASE_NS ) /*!< SCB configuration struct (non-secure address space) */ + #define SysTick_NS ((SysTick_Type *) SysTick_BASE_NS ) /*!< SysTick configuration struct (non-secure address space) */ + #define NVIC_NS ((NVIC_Type *) NVIC_BASE_NS ) /*!< NVIC configuration struct (non-secure address space) */ + #define CoreDebug_NS ((CoreDebug_Type *) CoreDebug_BASE_NS) /*!< \deprecated Core Debug configuration struct (non-secure address space) */ + #define DCB_NS ((DCB_Type *) DCB_BASE_NS ) /*!< DCB configuration struct (non-secure address space) */ + #define DIB_NS ((DIB_Type *) DIB_BASE_NS ) /*!< DIB configuration struct (non-secure address space) */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE_NS (SCS_BASE_NS + 0x0D90UL) /*!< Memory Protection Unit (non-secure address space) */ + #define MPU_NS ((MPU_Type *) MPU_BASE_NS ) /*!< Memory Protection Unit (non-secure address space) */ + #endif + + #define FPU_BASE_NS (SCS_BASE_NS + 0x0F30UL) /*!< Floating Point Unit (non-secure address space) */ + #define FPU_NS ((FPU_Type *) FPU_BASE_NS ) /*!< Floating Point Unit (non-secure address space) */ + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ +/*@} */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_register_aliases Backwards Compatibility Aliases + \brief Register alias definitions for backwards compatibility. + @{ + */ + +/*@} */ + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* Special LR values for Secure/Non-Secure call handling and exception handling */ + +/* Function Return Payload (from ARMv8-M Architecture Reference Manual) LR value on entry from Secure BLXNS */ +#define FNC_RETURN (0xFEFFFFFFUL) /* bit [0] ignored when processing a branch */ + +/* The following EXC_RETURN mask values are used to evaluate the LR on exception entry */ +#define EXC_RETURN_PREFIX (0xFF000000UL) /* bits [31:24] set to indicate an EXC_RETURN value */ +#define EXC_RETURN_S (0x00000040UL) /* bit [6] stack used to push registers: 0=Non-secure 1=Secure */ +#define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */ +#define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */ +#define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */ +#define EXC_RETURN_SPSEL (0x00000004UL) /* bit [2] stack pointer used to restore context: 0=MSP 1=PSP */ +#define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */ + +/* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */ +#if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) /* Value for processors with floating-point extension: */ +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125AUL) /* bit [0] SFTC must match LR bit[4] EXC_RETURN_FTYPE */ +#else +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125BUL) /* Value for processors without floating-point extension */ +#endif + + +/** + \brief Set Priority Grouping + \details Sets the priority grouping field using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Interrupt Target State + \details Reads the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + \return 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_GetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Target State + \details Sets the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_SetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] |= ((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Clear Interrupt Target State + \details Clears the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_ClearTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] &= ~((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + __DSB(); +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Priority Grouping (non-secure) + \details Sets the non-secure priority grouping field when in secure state using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void TZ_NVIC_SetPriorityGrouping_NS(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB_NS->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB_NS->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping (non-secure) + \details Reads the priority grouping field from the non-secure NVIC when in secure state. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriorityGrouping_NS(void) +{ + return ((uint32_t)((SCB_NS->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt (non-secure) + \details Enables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_EnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status (non-secure) + \details Returns a device specific interrupt enable status from the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetEnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt (non-secure) + \details Disables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_DisableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Pending Interrupt (non-secure) + \details Reads the NVIC pending register in the non-secure NVIC when in secure state and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt (non-secure) + \details Sets the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_SetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt (non-secure) + \details Clears the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_ClearPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt (non-secure) + \details Reads the active register in non-secure NVIC when in secure state and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetActive_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority (non-secure) + \details Sets the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every non-secure processor exception. + */ +__STATIC_INLINE void TZ_NVIC_SetPriority_NS(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority (non-secure) + \details Reads the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriority_NS(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC_NS->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) &&(__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv8.h" + +#endif + +/* ########################## PMU functions and events #################################### */ + +#if defined (__PMU_PRESENT) && (__PMU_PRESENT == 1U) + +#include "pmu_armv8.h" + +/** + \brief Cortex-M85 PMU events + \note Architectural PMU events can be found in pmu_armv8.h +*/ + +#define ARMCM85_PMU_ECC_ERR 0xC000 /*!< One or more Error Correcting Code (ECC) errors detected */ +#define ARMCM85_PMU_ECC_ERR_MBIT 0xC001 /*!< One or more multi-bit ECC errors detected */ +#define ARMCM85_PMU_ECC_ERR_DCACHE 0xC010 /*!< One or more ECC errors in the data cache */ +#define ARMCM85_PMU_ECC_ERR_ICACHE 0xC011 /*!< One or more ECC errors in the instruction cache */ +#define ARMCM85_PMU_ECC_ERR_MBIT_DCACHE 0xC012 /*!< One or more multi-bit ECC errors in the data cache */ +#define ARMCM85_PMU_ECC_ERR_MBIT_ICACHE 0xC013 /*!< One or more multi-bit ECC errors in the instruction cache */ +#define ARMCM85_PMU_ECC_ERR_DTCM 0xC020 /*!< One or more ECC errors in the Data Tightly Coupled Memory (DTCM) */ +#define ARMCM85_PMU_ECC_ERR_ITCM 0xC021 /*!< One or more ECC errors in the Instruction Tightly Coupled Memory (ITCM) */ +#define ARMCM85_PMU_ECC_ERR_MBIT_DTCM 0xC022 /*!< One or more multi-bit ECC errors in the DTCM */ +#define ARMCM85_PMU_ECC_ERR_MBIT_ITCM 0xC023 /*!< One or more multi-bit ECC errors in the ITCM */ +#define ARMCM85_PMU_PF_LINEFILL 0xC100 /*!< The prefetcher starts a line-fill */ +#define ARMCM85_PMU_PF_CANCEL 0xC101 /*!< The prefetcher stops prefetching */ +#define ARMCM85_PMU_PF_DROP_LINEFILL 0xC102 /*!< A linefill triggered by a prefetcher has been dropped because of lack of buffering */ +#define ARMCM85_PMU_NWAMODE_ENTER 0xC200 /*!< No write-allocate mode entry */ +#define ARMCM85_PMU_NWAMODE 0xC201 /*!< Write-allocate store is not allocated into the data cache due to no-write-allocate mode */ +#define ARMCM85_PMU_SAHB_ACCESS 0xC300 /*!< Read or write access on the S-AHB interface to the TCM */ +#define ARMCM85_PMU_PAHB_ACCESS 0xC301 /*!< Read or write access on the P-AHB write interface */ +#define ARMCM85_PMU_AXI_WRITE_ACCESS 0xC302 /*!< Any beat access to M-AXI write interface */ +#define ARMCM85_PMU_AXI_READ_ACCESS 0xC303 /*!< Any beat access to M-AXI read interface */ +#define ARMCM85_PMU_DOSTIMEOUT_DOUBLE 0xC400 /*!< Denial of Service timeout has fired twice and caused buffers to drain to allow forward progress */ +#define ARMCM85_PMU_DOSTIMEOUT_TRIPLE 0xC401 /*!< Denial of Service timeout has fired three times and blocked the LSU to force forward progress */ + +#endif + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + uint32_t mvfr0; + + mvfr0 = FPU->MVFR0; + if ((mvfr0 & (FPU_MVFR0_FPSP_Msk | FPU_MVFR0_FPDP_Msk)) == 0x220U) + { + return 2U; /* Double + Single precision FPU */ + } + else if ((mvfr0 & (FPU_MVFR0_FPSP_Msk | FPU_MVFR0_FPDP_Msk)) == 0x020U) + { + return 1U; /* Single precision FPU */ + } + else + { + return 0U; /* No FPU */ + } +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + +/* ########################## MVE functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_MveFunctions MVE Functions + \brief Function that provides MVE type. + @{ + */ + +/** + \brief get MVE type + \details returns the MVE type + \returns + - \b 0: No Vector Extension (MVE) + - \b 1: Integer Vector Extension (MVE-I) + - \b 2: Floating-point Vector Extension (MVE-F) + */ +__STATIC_INLINE uint32_t SCB_GetMVEType(void) +{ + const uint32_t mvfr1 = FPU->MVFR1; + if ((mvfr1 & FPU_MVFR1_MVE_Msk) == (0x2U << FPU_MVFR1_MVE_Pos)) + { + return 2U; + } + else if ((mvfr1 & FPU_MVFR1_MVE_Msk) == (0x1U << FPU_MVFR1_MVE_Pos)) + { + return 1U; + } + else + { + return 0U; + } +} + + +/*@} end of CMSIS_Core_MveFunctions */ + + +/* ########################## Cache functions #################################### */ + +#if ((defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U)) || \ + (defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U))) +#include "cachel1_armv7.h" +#endif + + +/* ########################## SAU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SAUFunctions SAU Functions + \brief Functions that configure the SAU. + @{ + */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + +/** + \brief Enable SAU + \details Enables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Enable(void) +{ + SAU->CTRL |= (SAU_CTRL_ENABLE_Msk); +} + + + +/** + \brief Disable SAU + \details Disables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Disable(void) +{ + SAU->CTRL &= ~(SAU_CTRL_ENABLE_Msk); +} + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_SAUFunctions */ + + + +/* ################### PAC Key functions ########################### */ + +#if (defined (__ARM_FEATURE_PAUTH) && (__ARM_FEATURE_PAUTH == 1)) +#include "pac_armv81.h" +#endif + + +/* ################################## Debug Control function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DCBFunctions Debug Control Functions + \brief Functions that access the Debug Control Block. + @{ + */ + + +/** + \brief Set Debug Authentication Control Register + \details writes to Debug Authentication Control register. + \param [in] value value to be writen. + */ +__STATIC_INLINE void DCB_SetAuthCtrl(uint32_t value) +{ + __DSB(); + __ISB(); + DCB->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register + \details Reads Debug Authentication Control register. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t DCB_GetAuthCtrl(void) +{ + return (DCB->DAUTHCTRL); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Debug Authentication Control Register (non-secure) + \details writes to non-secure Debug Authentication Control register when in secure state. + \param [in] value value to be writen + */ +__STATIC_INLINE void TZ_DCB_SetAuthCtrl_NS(uint32_t value) +{ + __DSB(); + __ISB(); + DCB_NS->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register (non-secure) + \details Reads non-secure Debug Authentication Control register when in secure state. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t TZ_DCB_GetAuthCtrl_NS(void) +{ + return (DCB_NS->DAUTHCTRL); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## Debug Identification function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DIBFunctions Debug Identification Functions + \brief Functions that access the Debug Identification Block. + @{ + */ + + +/** + \brief Get Debug Authentication Status Register + \details Reads Debug Authentication Status register. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t DIB_GetAuthStatus(void) +{ + return (DIB->DAUTHSTATUS); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Debug Authentication Status Register (non-secure) + \details Reads non-secure Debug Authentication Status register when in secure state. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t TZ_DIB_GetAuthStatus_NS(void) +{ + return (DIB_NS->DAUTHSTATUS); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief System Tick Configuration (non-secure) + \details Initializes the non-secure System Timer and its interrupt when in secure state, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function TZ_SysTick_Config_NS is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + + */ +__STATIC_INLINE uint32_t TZ_SysTick_Config_NS(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick_NS->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + TZ_NVIC_SetPriority_NS (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick_NS->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick_NS->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM85_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/Drivers/CMSIS/Include/core_sc000.h b/Drivers/CMSIS/Include/core_sc000.h new file mode 100644 index 0000000..dbc755f --- /dev/null +++ b/Drivers/CMSIS/Include/core_sc000.h @@ -0,0 +1,1030 @@ +/**************************************************************************//** + * @file core_sc000.h + * @brief CMSIS SC000 Core Peripheral Access Layer Header File + * @version V5.0.7 + * @date 27. March 2020 + ******************************************************************************/ +/* + * Copyright (c) 2009-2020 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CORE_SC000_H_GENERIC +#define __CORE_SC000_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup SC000 + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS SC000 definitions */ +#define __SC000_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __SC000_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __SC000_CMSIS_VERSION ((__SC000_CMSIS_VERSION_MAIN << 16U) | \ + __SC000_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_SC (000U) /*!< Cortex secure core */ + +/** __FPU_USED indicates whether an FPU is used or not. + This core does not support an FPU at all +*/ +#define __FPU_USED 0U + +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_FP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_SC000_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_SC000_H_DEPENDANT +#define __CORE_SC000_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __SC000_REV + #define __SC000_REV 0x0000U + #warning "__SC000_REV not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 0U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 2U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group SC000 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core MPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:28; /*!< bit: 0..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:15; /*!< bit: 9..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t _reserved1:3; /*!< bit: 25..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t _reserved0:1; /*!< bit: 0 Reserved */ + uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */ + uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[1U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[31U]; + __IOM uint32_t ICER[1U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[31U]; + __IOM uint32_t ISPR[1U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[31U]; + __IOM uint32_t ICPR[1U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[31U]; + uint32_t RESERVED4[64U]; + __IOM uint32_t IP[8U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register */ +} NVIC_Type; + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + uint32_t RESERVED0[1U]; + __IOM uint32_t SHP[2U]; /*!< Offset: 0x01C (R/W) System Handlers Priority Registers. [0] is RESERVED */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + uint32_t RESERVED1[154U]; + __IOM uint32_t SFCR; /*!< Offset: 0x290 (R/W) Security Features Control Register */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */ +#define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */ +#define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ +} SCnSCB_Type; + +/* Auxiliary Control Register Definitions */ +#define SCnSCB_ACTLR_DISMCYCINT_Pos 0U /*!< ACTLR: DISMCYCINT Position */ +#define SCnSCB_ACTLR_DISMCYCINT_Msk (1UL /*<< SCnSCB_ACTLR_DISMCYCINT_Pos*/) /*!< ACTLR: DISMCYCINT Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region RNRber Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RASR; /*!< Offset: 0x010 (R/W) MPU Region Attribute and Size Register */ +} MPU_Type; + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_ADDR_Pos 8U /*!< MPU RBAR: ADDR Position */ +#define MPU_RBAR_ADDR_Msk (0xFFFFFFUL << MPU_RBAR_ADDR_Pos) /*!< MPU RBAR: ADDR Mask */ + +#define MPU_RBAR_VALID_Pos 4U /*!< MPU RBAR: VALID Position */ +#define MPU_RBAR_VALID_Msk (1UL << MPU_RBAR_VALID_Pos) /*!< MPU RBAR: VALID Mask */ + +#define MPU_RBAR_REGION_Pos 0U /*!< MPU RBAR: REGION Position */ +#define MPU_RBAR_REGION_Msk (0xFUL /*<< MPU_RBAR_REGION_Pos*/) /*!< MPU RBAR: REGION Mask */ + +/* MPU Region Attribute and Size Register Definitions */ +#define MPU_RASR_ATTRS_Pos 16U /*!< MPU RASR: MPU Region Attribute field Position */ +#define MPU_RASR_ATTRS_Msk (0xFFFFUL << MPU_RASR_ATTRS_Pos) /*!< MPU RASR: MPU Region Attribute field Mask */ + +#define MPU_RASR_XN_Pos 28U /*!< MPU RASR: ATTRS.XN Position */ +#define MPU_RASR_XN_Msk (1UL << MPU_RASR_XN_Pos) /*!< MPU RASR: ATTRS.XN Mask */ + +#define MPU_RASR_AP_Pos 24U /*!< MPU RASR: ATTRS.AP Position */ +#define MPU_RASR_AP_Msk (0x7UL << MPU_RASR_AP_Pos) /*!< MPU RASR: ATTRS.AP Mask */ + +#define MPU_RASR_TEX_Pos 19U /*!< MPU RASR: ATTRS.TEX Position */ +#define MPU_RASR_TEX_Msk (0x7UL << MPU_RASR_TEX_Pos) /*!< MPU RASR: ATTRS.TEX Mask */ + +#define MPU_RASR_S_Pos 18U /*!< MPU RASR: ATTRS.S Position */ +#define MPU_RASR_S_Msk (1UL << MPU_RASR_S_Pos) /*!< MPU RASR: ATTRS.S Mask */ + +#define MPU_RASR_C_Pos 17U /*!< MPU RASR: ATTRS.C Position */ +#define MPU_RASR_C_Msk (1UL << MPU_RASR_C_Pos) /*!< MPU RASR: ATTRS.C Mask */ + +#define MPU_RASR_B_Pos 16U /*!< MPU RASR: ATTRS.B Position */ +#define MPU_RASR_B_Msk (1UL << MPU_RASR_B_Pos) /*!< MPU RASR: ATTRS.B Mask */ + +#define MPU_RASR_SRD_Pos 8U /*!< MPU RASR: Sub-Region Disable Position */ +#define MPU_RASR_SRD_Msk (0xFFUL << MPU_RASR_SRD_Pos) /*!< MPU RASR: Sub-Region Disable Mask */ + +#define MPU_RASR_SIZE_Pos 1U /*!< MPU RASR: Region Size Field Position */ +#define MPU_RASR_SIZE_Msk (0x1FUL << MPU_RASR_SIZE_Pos) /*!< MPU RASR: Region Size Field Mask */ + +#define MPU_RASR_ENABLE_Pos 0U /*!< MPU RASR: Region enable bit Position */ +#define MPU_RASR_ENABLE_Msk (1UL /*<< MPU_RASR_ENABLE_Pos*/) /*!< MPU RASR: Region enable bit Disable Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief SC000 Core Debug Registers (DCB registers, SHCSR, and DFSR) are only accessible over DAP and not via processor. + Therefore they are not covered by the SC000 header file. + @{ + */ +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ +#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ +#define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ +#define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ +#define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + +#define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ +#define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ +#define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ +#define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ +#endif + +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else +/*#define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping not available for SC000 */ +/*#define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping not available for SC000 */ + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ +/*#define NVIC_GetActive __NVIC_GetActive not available for SC000 */ + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* The following EXC_RETURN values are saved the LR on exception entry */ +#define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */ +#define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */ +#define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */ + + +/* Interrupt Priorities are WORD accessible only under Armv6-M */ +/* The following MACROS handle generation of the register offset and byte masks */ +#define _BIT_SHIFT(IRQn) ( ((((uint32_t)(int32_t)(IRQn)) ) & 0x03UL) * 8UL) +#define _SHP_IDX(IRQn) ( (((((uint32_t)(int32_t)(IRQn)) & 0x0FUL)-8UL) >> 2UL) ) +#define _IP_IDX(IRQn) ( (((uint32_t)(int32_t)(IRQn)) >> 2UL) ) + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IP[_IP_IDX(IRQn)] = ((uint32_t)(NVIC->IP[_IP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } + else + { + SCB->SHP[_SHP_IDX(IRQn)] = ((uint32_t)(SCB->SHP[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IP[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return((uint32_t)(((SCB->SHP[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + /* ARM Application Note 321 states that the M0 and M0+ do not require the architectural barrier - assume SC000 is the same */ +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = ((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + SCB_AIRCR_SYSRESETREQ_Msk); + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +/*@} end of CMSIS_Core_NVICFunctions */ + + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + return 0U; /* No FPU */ +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_SC000_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/Drivers/CMSIS/Include/core_sc300.h b/Drivers/CMSIS/Include/core_sc300.h new file mode 100644 index 0000000..d666210 --- /dev/null +++ b/Drivers/CMSIS/Include/core_sc300.h @@ -0,0 +1,1917 @@ +/**************************************************************************//** + * @file core_sc300.h + * @brief CMSIS SC300 Core Peripheral Access Layer Header File + * @version V5.0.10 + * @date 04. June 2021 + ******************************************************************************/ +/* + * Copyright (c) 2009-2021 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CORE_SC300_H_GENERIC +#define __CORE_SC300_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup SC3000 + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS SC300 definitions */ +#define __SC300_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __SC300_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __SC300_CMSIS_VERSION ((__SC300_CMSIS_VERSION_MAIN << 16U) | \ + __SC300_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_SC (300U) /*!< Cortex secure core */ + +/** __FPU_USED indicates whether an FPU is used or not. + This core does not support an FPU at all +*/ +#define __FPU_USED 0U + +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_FP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_SC300_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_SC300_H_DEPENDANT +#define __CORE_SC300_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __SC300_REV + #define __SC300_REV 0x0000U + #warning "__SC300_REV not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 1U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group SC300 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:27; /*!< bit: 0..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:1; /*!< bit: 9 Reserved */ + uint32_t ICI_IT_1:6; /*!< bit: 10..15 ICI/IT part 1 */ + uint32_t _reserved1:8; /*!< bit: 16..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit */ + uint32_t ICI_IT_2:2; /*!< bit: 25..26 ICI/IT part 2 */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_ICI_IT_2_Pos 25U /*!< xPSR: ICI/IT part 2 Position */ +#define xPSR_ICI_IT_2_Msk (3UL << xPSR_ICI_IT_2_Pos) /*!< xPSR: ICI/IT part 2 Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_ICI_IT_1_Pos 10U /*!< xPSR: ICI/IT part 1 Position */ +#define xPSR_ICI_IT_1_Msk (0x3FUL << xPSR_ICI_IT_1_Pos) /*!< xPSR: ICI/IT part 1 Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */ + uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[8U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[24U]; + __IOM uint32_t ICER[8U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RESERVED1[24U]; + __IOM uint32_t ISPR[8U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[24U]; + __IOM uint32_t ICPR[8U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[24U]; + __IOM uint32_t IABR[8U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[56U]; + __IOM uint8_t IP[240U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED5[644U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/* Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHP[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ADR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ISAR[5U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + uint32_t RESERVED0[5U]; + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ + uint32_t RESERVED1[129U]; + __IOM uint32_t SFCR; /*!< Offset: 0x290 (R/W) Security Features Control Register */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */ +#define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLBASE_Pos 29U /*!< SCB VTOR: TBLBASE Position */ +#define SCB_VTOR_TBLBASE_Msk (1UL << SCB_VTOR_TBLBASE_Pos) /*!< SCB VTOR: TBLBASE Mask */ + +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x3FFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +#define SCB_AIRCR_VECTRESET_Pos 0U /*!< SCB AIRCR: VECTRESET Position */ +#define SCB_AIRCR_VECTRESET_Msk (1UL /*<< SCB_AIRCR_VECTRESET_Pos*/) /*!< SCB AIRCR: VECTRESET Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */ +#define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +#define SCB_CCR_NONBASETHRDENA_Pos 0U /*!< SCB CCR: NONBASETHRDENA Position */ +#define SCB_CCR_NONBASETHRDENA_Msk (1UL /*<< SCB_CCR_NONBASETHRDENA_Pos*/) /*!< SCB CCR: NONBASETHRDENA Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/* SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/* BusFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/* UsageFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/* SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/* SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ +} SCnSCB_Type; + +/* Interrupt Controller Type Register Definitions */ +#define SCnSCB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/* Auxiliary Control Register Definitions */ +#define SCnSCB_ACTLR_DISFOLD_Pos 2U /*!< ACTLR: DISFOLD Position */ +#define SCnSCB_ACTLR_DISFOLD_Msk (1UL << SCnSCB_ACTLR_DISFOLD_Pos) /*!< ACTLR: DISFOLD Mask */ + +#define SCnSCB_ACTLR_DISDEFWBUF_Pos 1U /*!< ACTLR: DISDEFWBUF Position */ +#define SCnSCB_ACTLR_DISDEFWBUF_Msk (1UL << SCnSCB_ACTLR_DISDEFWBUF_Pos) /*!< ACTLR: DISDEFWBUF Mask */ + +#define SCnSCB_ACTLR_DISMCYCINT_Pos 0U /*!< ACTLR: DISMCYCINT Position */ +#define SCnSCB_ACTLR_DISMCYCINT_Msk (1UL /*<< SCnSCB_ACTLR_DISMCYCINT_Pos*/) /*!< ACTLR: DISMCYCINT Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) ITM Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) ITM Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ + uint32_t RESERVED3[32U]; + uint32_t RESERVED4[43U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ + uint32_t RESERVED5[6U]; + __IM uint32_t PID4; /*!< Offset: 0xFD0 (R/ ) ITM Peripheral Identification Register #4 */ + __IM uint32_t PID5; /*!< Offset: 0xFD4 (R/ ) ITM Peripheral Identification Register #5 */ + __IM uint32_t PID6; /*!< Offset: 0xFD8 (R/ ) ITM Peripheral Identification Register #6 */ + __IM uint32_t PID7; /*!< Offset: 0xFDC (R/ ) ITM Peripheral Identification Register #7 */ + __IM uint32_t PID0; /*!< Offset: 0xFE0 (R/ ) ITM Peripheral Identification Register #0 */ + __IM uint32_t PID1; /*!< Offset: 0xFE4 (R/ ) ITM Peripheral Identification Register #1 */ + __IM uint32_t PID2; /*!< Offset: 0xFE8 (R/ ) ITM Peripheral Identification Register #2 */ + __IM uint32_t PID3; /*!< Offset: 0xFEC (R/ ) ITM Peripheral Identification Register #3 */ + __IM uint32_t CID0; /*!< Offset: 0xFF0 (R/ ) ITM Component Identification Register #0 */ + __IM uint32_t CID1; /*!< Offset: 0xFF4 (R/ ) ITM Component Identification Register #1 */ + __IM uint32_t CID2; /*!< Offset: 0xFF8 (R/ ) ITM Component Identification Register #2 */ + __IM uint32_t CID3; /*!< Offset: 0xFFC (R/ ) ITM Component Identification Register #3 */ +} ITM_Type; + +/* ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/* ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TraceBusID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TraceBusID_Msk (0x7FUL << ITM_TCR_TraceBusID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPrescale_Pos 8U /*!< ITM TCR: TSPrescale Position */ +#define ITM_TCR_TSPrescale_Msk (3UL << ITM_TCR_TSPrescale_Pos) /*!< ITM TCR: TSPrescale Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/* ITM Lock Status Register Definitions */ +#define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ +#define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ + +#define ITM_LSR_Access_Pos 1U /*!< ITM LSR: Access Position */ +#define ITM_LSR_Access_Msk (1UL << ITM_LSR_Access_Pos) /*!< ITM LSR: Access Mask */ + +#define ITM_LSR_Present_Pos 0U /*!< ITM LSR: Present Position */ +#define ITM_LSR_Present_Msk (1UL /*<< ITM_LSR_Present_Pos*/) /*!< ITM LSR: Present Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + __IOM uint32_t MASK0; /*!< Offset: 0x024 (R/W) Mask Register 0 */ + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED0[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + __IOM uint32_t MASK1; /*!< Offset: 0x034 (R/W) Mask Register 1 */ + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + __IOM uint32_t MASK2; /*!< Offset: 0x044 (R/W) Mask Register 2 */ + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + __IOM uint32_t MASK3; /*!< Offset: 0x054 (R/W) Mask Register 3 */ + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (0x1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (0x1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (0x1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (0x1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (0x1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (0x1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (0x1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (0x1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (0x1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (0x1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/* DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/* DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/* DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/* DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/* DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/* DWT Comparator Mask Register Definitions */ +#define DWT_MASK_MASK_Pos 0U /*!< DWT MASK: MASK Position */ +#define DWT_MASK_MASK_Msk (0x1FUL /*<< DWT_MASK_MASK_Pos*/) /*!< DWT MASK: MASK Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVADDR1_Pos 16U /*!< DWT FUNCTION: DATAVADDR1 Position */ +#define DWT_FUNCTION_DATAVADDR1_Msk (0xFUL << DWT_FUNCTION_DATAVADDR1_Pos) /*!< DWT FUNCTION: DATAVADDR1 Mask */ + +#define DWT_FUNCTION_DATAVADDR0_Pos 12U /*!< DWT FUNCTION: DATAVADDR0 Position */ +#define DWT_FUNCTION_DATAVADDR0_Msk (0xFUL << DWT_FUNCTION_DATAVADDR0_Pos) /*!< DWT FUNCTION: DATAVADDR0 Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_LNK1ENA_Pos 9U /*!< DWT FUNCTION: LNK1ENA Position */ +#define DWT_FUNCTION_LNK1ENA_Msk (0x1UL << DWT_FUNCTION_LNK1ENA_Pos) /*!< DWT FUNCTION: LNK1ENA Mask */ + +#define DWT_FUNCTION_DATAVMATCH_Pos 8U /*!< DWT FUNCTION: DATAVMATCH Position */ +#define DWT_FUNCTION_DATAVMATCH_Msk (0x1UL << DWT_FUNCTION_DATAVMATCH_Pos) /*!< DWT FUNCTION: DATAVMATCH Mask */ + +#define DWT_FUNCTION_CYCMATCH_Pos 7U /*!< DWT FUNCTION: CYCMATCH Position */ +#define DWT_FUNCTION_CYCMATCH_Msk (0x1UL << DWT_FUNCTION_CYCMATCH_Pos) /*!< DWT FUNCTION: CYCMATCH Mask */ + +#define DWT_FUNCTION_EMITRANGE_Pos 5U /*!< DWT FUNCTION: EMITRANGE Position */ +#define DWT_FUNCTION_EMITRANGE_Msk (0x1UL << DWT_FUNCTION_EMITRANGE_Pos) /*!< DWT FUNCTION: EMITRANGE Mask */ + +#define DWT_FUNCTION_FUNCTION_Pos 0U /*!< DWT FUNCTION: FUNCTION Position */ +#define DWT_FUNCTION_FUNCTION_Msk (0xFUL /*<< DWT_FUNCTION_FUNCTION_Pos*/) /*!< DWT FUNCTION: FUNCTION Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Size Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Size Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IM uint32_t FSCR; /*!< Offset: 0x308 (R/ ) Formatter Synchronization Counter Register */ + uint32_t RESERVED3[759U]; + __IM uint32_t TRIGGER; /*!< Offset: 0xEE8 (R/ ) TRIGGER Register */ + __IM uint32_t FIFO0; /*!< Offset: 0xEEC (R/ ) Integration ETM Data */ + __IM uint32_t ITATBCTR2; /*!< Offset: 0xEF0 (R/ ) ITATBCTR2 */ + uint32_t RESERVED4[1U]; + __IM uint32_t ITATBCTR0; /*!< Offset: 0xEF8 (R/ ) ITATBCTR0 */ + __IM uint32_t FIFO1; /*!< Offset: 0xEFC (R/ ) Integration ITM Data */ + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control */ + uint32_t RESERVED5[39U]; + __IOM uint32_t CLAIMSET; /*!< Offset: 0xFA0 (R/W) Claim tag set */ + __IOM uint32_t CLAIMCLR; /*!< Offset: 0xFA4 (R/W) Claim tag clear */ + uint32_t RESERVED7[8U]; + __IM uint32_t DEVID; /*!< Offset: 0xFC8 (R/ ) TPIU_DEVID */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) TPIU_DEVTYPE */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_PRESCALER_Pos 0U /*!< TPI ACPR: PRESCALER Position */ +#define TPI_ACPR_PRESCALER_Msk (0x1FFFUL /*<< TPI_ACPR_PRESCALER_Pos*/) /*!< TPI ACPR: PRESCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_EnFCont_Pos 1U /*!< TPI FFCR: EnFCont Position */ +#define TPI_FFCR_EnFCont_Msk (0x1UL << TPI_FFCR_EnFCont_Pos) /*!< TPI FFCR: EnFCont Mask */ + +/* TPI TRIGGER Register Definitions */ +#define TPI_TRIGGER_TRIGGER_Pos 0U /*!< TPI TRIGGER: TRIGGER Position */ +#define TPI_TRIGGER_TRIGGER_Msk (0x1UL /*<< TPI_TRIGGER_TRIGGER_Pos*/) /*!< TPI TRIGGER: TRIGGER Mask */ + +/* TPI Integration ETM Data Register Definitions (FIFO0) */ +#define TPI_FIFO0_ITM_ATVALID_Pos 29U /*!< TPI FIFO0: ITM_ATVALID Position */ +#define TPI_FIFO0_ITM_ATVALID_Msk (0x1UL << TPI_FIFO0_ITM_ATVALID_Pos) /*!< TPI FIFO0: ITM_ATVALID Mask */ + +#define TPI_FIFO0_ITM_bytecount_Pos 27U /*!< TPI FIFO0: ITM_bytecount Position */ +#define TPI_FIFO0_ITM_bytecount_Msk (0x3UL << TPI_FIFO0_ITM_bytecount_Pos) /*!< TPI FIFO0: ITM_bytecount Mask */ + +#define TPI_FIFO0_ETM_ATVALID_Pos 26U /*!< TPI FIFO0: ETM_ATVALID Position */ +#define TPI_FIFO0_ETM_ATVALID_Msk (0x1UL << TPI_FIFO0_ETM_ATVALID_Pos) /*!< TPI FIFO0: ETM_ATVALID Mask */ + +#define TPI_FIFO0_ETM_bytecount_Pos 24U /*!< TPI FIFO0: ETM_bytecount Position */ +#define TPI_FIFO0_ETM_bytecount_Msk (0x3UL << TPI_FIFO0_ETM_bytecount_Pos) /*!< TPI FIFO0: ETM_bytecount Mask */ + +#define TPI_FIFO0_ETM2_Pos 16U /*!< TPI FIFO0: ETM2 Position */ +#define TPI_FIFO0_ETM2_Msk (0xFFUL << TPI_FIFO0_ETM2_Pos) /*!< TPI FIFO0: ETM2 Mask */ + +#define TPI_FIFO0_ETM1_Pos 8U /*!< TPI FIFO0: ETM1 Position */ +#define TPI_FIFO0_ETM1_Msk (0xFFUL << TPI_FIFO0_ETM1_Pos) /*!< TPI FIFO0: ETM1 Mask */ + +#define TPI_FIFO0_ETM0_Pos 0U /*!< TPI FIFO0: ETM0 Position */ +#define TPI_FIFO0_ETM0_Msk (0xFFUL /*<< TPI_FIFO0_ETM0_Pos*/) /*!< TPI FIFO0: ETM0 Mask */ + +/* TPI ITATBCTR2 Register Definitions */ +#define TPI_ITATBCTR2_ATREADY2_Pos 0U /*!< TPI ITATBCTR2: ATREADY2 Position */ +#define TPI_ITATBCTR2_ATREADY2_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY2_Pos*/) /*!< TPI ITATBCTR2: ATREADY2 Mask */ + +#define TPI_ITATBCTR2_ATREADY1_Pos 0U /*!< TPI ITATBCTR2: ATREADY1 Position */ +#define TPI_ITATBCTR2_ATREADY1_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY1_Pos*/) /*!< TPI ITATBCTR2: ATREADY1 Mask */ + +/* TPI Integration ITM Data Register Definitions (FIFO1) */ +#define TPI_FIFO1_ITM_ATVALID_Pos 29U /*!< TPI FIFO1: ITM_ATVALID Position */ +#define TPI_FIFO1_ITM_ATVALID_Msk (0x1UL << TPI_FIFO1_ITM_ATVALID_Pos) /*!< TPI FIFO1: ITM_ATVALID Mask */ + +#define TPI_FIFO1_ITM_bytecount_Pos 27U /*!< TPI FIFO1: ITM_bytecount Position */ +#define TPI_FIFO1_ITM_bytecount_Msk (0x3UL << TPI_FIFO1_ITM_bytecount_Pos) /*!< TPI FIFO1: ITM_bytecount Mask */ + +#define TPI_FIFO1_ETM_ATVALID_Pos 26U /*!< TPI FIFO1: ETM_ATVALID Position */ +#define TPI_FIFO1_ETM_ATVALID_Msk (0x1UL << TPI_FIFO1_ETM_ATVALID_Pos) /*!< TPI FIFO1: ETM_ATVALID Mask */ + +#define TPI_FIFO1_ETM_bytecount_Pos 24U /*!< TPI FIFO1: ETM_bytecount Position */ +#define TPI_FIFO1_ETM_bytecount_Msk (0x3UL << TPI_FIFO1_ETM_bytecount_Pos) /*!< TPI FIFO1: ETM_bytecount Mask */ + +#define TPI_FIFO1_ITM2_Pos 16U /*!< TPI FIFO1: ITM2 Position */ +#define TPI_FIFO1_ITM2_Msk (0xFFUL << TPI_FIFO1_ITM2_Pos) /*!< TPI FIFO1: ITM2 Mask */ + +#define TPI_FIFO1_ITM1_Pos 8U /*!< TPI FIFO1: ITM1 Position */ +#define TPI_FIFO1_ITM1_Msk (0xFFUL << TPI_FIFO1_ITM1_Pos) /*!< TPI FIFO1: ITM1 Mask */ + +#define TPI_FIFO1_ITM0_Pos 0U /*!< TPI FIFO1: ITM0 Position */ +#define TPI_FIFO1_ITM0_Msk (0xFFUL /*<< TPI_FIFO1_ITM0_Pos*/) /*!< TPI FIFO1: ITM0 Mask */ + +/* TPI ITATBCTR0 Register Definitions */ +#define TPI_ITATBCTR0_ATREADY2_Pos 0U /*!< TPI ITATBCTR0: ATREADY2 Position */ +#define TPI_ITATBCTR0_ATREADY2_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY2_Pos*/) /*!< TPI ITATBCTR0: ATREADY2 Mask */ + +#define TPI_ITATBCTR0_ATREADY1_Pos 0U /*!< TPI ITATBCTR0: ATREADY1 Position */ +#define TPI_ITATBCTR0_ATREADY1_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY1_Pos*/) /*!< TPI ITATBCTR0: ATREADY1 Mask */ + +/* TPI Integration Mode Control Register Definitions */ +#define TPI_ITCTRL_Mode_Pos 0U /*!< TPI ITCTRL: Mode Position */ +#define TPI_ITCTRL_Mode_Msk (0x3UL /*<< TPI_ITCTRL_Mode_Pos*/) /*!< TPI ITCTRL: Mode Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_MinBufSz_Pos 6U /*!< TPI DEVID: MinBufSz Position */ +#define TPI_DEVID_MinBufSz_Msk (0x7UL << TPI_DEVID_MinBufSz_Pos) /*!< TPI DEVID: MinBufSz Mask */ + +#define TPI_DEVID_AsynClkIn_Pos 5U /*!< TPI DEVID: AsynClkIn Position */ +#define TPI_DEVID_AsynClkIn_Msk (0x1UL << TPI_DEVID_AsynClkIn_Pos) /*!< TPI DEVID: AsynClkIn Mask */ + +#define TPI_DEVID_NrTraceInput_Pos 0U /*!< TPI DEVID: NrTraceInput Position */ +#define TPI_DEVID_NrTraceInput_Msk (0x1FUL /*<< TPI_DEVID_NrTraceInput_Pos*/) /*!< TPI DEVID: NrTraceInput Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region RNRber Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RASR; /*!< Offset: 0x010 (R/W) MPU Region Attribute and Size Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Alias 1 Region Base Address Register */ + __IOM uint32_t RASR_A1; /*!< Offset: 0x018 (R/W) MPU Alias 1 Region Attribute and Size Register */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Alias 2 Region Base Address Register */ + __IOM uint32_t RASR_A2; /*!< Offset: 0x020 (R/W) MPU Alias 2 Region Attribute and Size Register */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Alias 3 Region Base Address Register */ + __IOM uint32_t RASR_A3; /*!< Offset: 0x028 (R/W) MPU Alias 3 Region Attribute and Size Register */ +} MPU_Type; + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_ADDR_Pos 5U /*!< MPU RBAR: ADDR Position */ +#define MPU_RBAR_ADDR_Msk (0x7FFFFFFUL << MPU_RBAR_ADDR_Pos) /*!< MPU RBAR: ADDR Mask */ + +#define MPU_RBAR_VALID_Pos 4U /*!< MPU RBAR: VALID Position */ +#define MPU_RBAR_VALID_Msk (1UL << MPU_RBAR_VALID_Pos) /*!< MPU RBAR: VALID Mask */ + +#define MPU_RBAR_REGION_Pos 0U /*!< MPU RBAR: REGION Position */ +#define MPU_RBAR_REGION_Msk (0xFUL /*<< MPU_RBAR_REGION_Pos*/) /*!< MPU RBAR: REGION Mask */ + +/* MPU Region Attribute and Size Register Definitions */ +#define MPU_RASR_ATTRS_Pos 16U /*!< MPU RASR: MPU Region Attribute field Position */ +#define MPU_RASR_ATTRS_Msk (0xFFFFUL << MPU_RASR_ATTRS_Pos) /*!< MPU RASR: MPU Region Attribute field Mask */ + +#define MPU_RASR_XN_Pos 28U /*!< MPU RASR: ATTRS.XN Position */ +#define MPU_RASR_XN_Msk (1UL << MPU_RASR_XN_Pos) /*!< MPU RASR: ATTRS.XN Mask */ + +#define MPU_RASR_AP_Pos 24U /*!< MPU RASR: ATTRS.AP Position */ +#define MPU_RASR_AP_Msk (0x7UL << MPU_RASR_AP_Pos) /*!< MPU RASR: ATTRS.AP Mask */ + +#define MPU_RASR_TEX_Pos 19U /*!< MPU RASR: ATTRS.TEX Position */ +#define MPU_RASR_TEX_Msk (0x7UL << MPU_RASR_TEX_Pos) /*!< MPU RASR: ATTRS.TEX Mask */ + +#define MPU_RASR_S_Pos 18U /*!< MPU RASR: ATTRS.S Position */ +#define MPU_RASR_S_Msk (1UL << MPU_RASR_S_Pos) /*!< MPU RASR: ATTRS.S Mask */ + +#define MPU_RASR_C_Pos 17U /*!< MPU RASR: ATTRS.C Position */ +#define MPU_RASR_C_Msk (1UL << MPU_RASR_C_Pos) /*!< MPU RASR: ATTRS.C Mask */ + +#define MPU_RASR_B_Pos 16U /*!< MPU RASR: ATTRS.B Position */ +#define MPU_RASR_B_Msk (1UL << MPU_RASR_B_Pos) /*!< MPU RASR: ATTRS.B Mask */ + +#define MPU_RASR_SRD_Pos 8U /*!< MPU RASR: Sub-Region Disable Position */ +#define MPU_RASR_SRD_Msk (0xFFUL << MPU_RASR_SRD_Pos) /*!< MPU RASR: Sub-Region Disable Mask */ + +#define MPU_RASR_SIZE_Pos 1U /*!< MPU RASR: Region Size Field Position */ +#define MPU_RASR_SIZE_Msk (0x1FUL << MPU_RASR_SIZE_Pos) /*!< MPU RASR: Region Size Field Mask */ + +#define MPU_RASR_ENABLE_Pos 0U /*!< MPU RASR: Region enable bit Position */ +#define MPU_RASR_ENABLE_Msk (1UL /*<< MPU_RASR_ENABLE_Pos*/) /*!< MPU RASR: Region enable bit Disable Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Type definitions for the Core Debug Registers + @{ + */ + +/** + \brief Structure type to access the Core Debug Register (CoreDebug). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< CoreDebug DHCSR: DBGKEY Mask */ + +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< CoreDebug DHCSR: S_RESET_ST Mask */ + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< CoreDebug DHCSR: S_RETIRE_ST Mask */ + +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< CoreDebug DHCSR: S_LOCKUP Mask */ + +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< CoreDebug DHCSR: S_SLEEP Mask */ + +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< CoreDebug DHCSR: S_HALT Mask */ + +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< CoreDebug DHCSR: S_REGRDY Mask */ + +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos 5U /*!< CoreDebug DHCSR: C_SNAPSTALL Position */ +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk (1UL << CoreDebug_DHCSR_C_SNAPSTALL_Pos) /*!< CoreDebug DHCSR: C_SNAPSTALL Mask */ + +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< CoreDebug DHCSR: C_MASKINTS Mask */ + +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< CoreDebug DHCSR: C_STEP Mask */ + +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< CoreDebug DHCSR: C_HALT Mask */ + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< CoreDebug DHCSR: C_DEBUGEN Mask */ + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< CoreDebug DCRSR: REGWnR Mask */ + +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< CoreDebug DCRSR: REGSEL Mask */ + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_TRCENA_Pos 24U /*!< CoreDebug DEMCR: TRCENA Position */ +#define CoreDebug_DEMCR_TRCENA_Msk (1UL << CoreDebug_DEMCR_TRCENA_Pos) /*!< CoreDebug DEMCR: TRCENA Mask */ + +#define CoreDebug_DEMCR_MON_REQ_Pos 19U /*!< CoreDebug DEMCR: MON_REQ Position */ +#define CoreDebug_DEMCR_MON_REQ_Msk (1UL << CoreDebug_DEMCR_MON_REQ_Pos) /*!< CoreDebug DEMCR: MON_REQ Mask */ + +#define CoreDebug_DEMCR_MON_STEP_Pos 18U /*!< CoreDebug DEMCR: MON_STEP Position */ +#define CoreDebug_DEMCR_MON_STEP_Msk (1UL << CoreDebug_DEMCR_MON_STEP_Pos) /*!< CoreDebug DEMCR: MON_STEP Mask */ + +#define CoreDebug_DEMCR_MON_PEND_Pos 17U /*!< CoreDebug DEMCR: MON_PEND Position */ +#define CoreDebug_DEMCR_MON_PEND_Msk (1UL << CoreDebug_DEMCR_MON_PEND_Pos) /*!< CoreDebug DEMCR: MON_PEND Mask */ + +#define CoreDebug_DEMCR_MON_EN_Pos 16U /*!< CoreDebug DEMCR: MON_EN Position */ +#define CoreDebug_DEMCR_MON_EN_Msk (1UL << CoreDebug_DEMCR_MON_EN_Pos) /*!< CoreDebug DEMCR: MON_EN Mask */ + +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< CoreDebug DEMCR: VC_HARDERR Mask */ + +#define CoreDebug_DEMCR_VC_INTERR_Pos 9U /*!< CoreDebug DEMCR: VC_INTERR Position */ +#define CoreDebug_DEMCR_VC_INTERR_Msk (1UL << CoreDebug_DEMCR_VC_INTERR_Pos) /*!< CoreDebug DEMCR: VC_INTERR Mask */ + +#define CoreDebug_DEMCR_VC_BUSERR_Pos 8U /*!< CoreDebug DEMCR: VC_BUSERR Position */ +#define CoreDebug_DEMCR_VC_BUSERR_Msk (1UL << CoreDebug_DEMCR_VC_BUSERR_Pos) /*!< CoreDebug DEMCR: VC_BUSERR Mask */ + +#define CoreDebug_DEMCR_VC_STATERR_Pos 7U /*!< CoreDebug DEMCR: VC_STATERR Position */ +#define CoreDebug_DEMCR_VC_STATERR_Msk (1UL << CoreDebug_DEMCR_VC_STATERR_Pos) /*!< CoreDebug DEMCR: VC_STATERR Mask */ + +#define CoreDebug_DEMCR_VC_CHKERR_Pos 6U /*!< CoreDebug DEMCR: VC_CHKERR Position */ +#define CoreDebug_DEMCR_VC_CHKERR_Msk (1UL << CoreDebug_DEMCR_VC_CHKERR_Pos) /*!< CoreDebug DEMCR: VC_CHKERR Mask */ + +#define CoreDebug_DEMCR_VC_NOCPERR_Pos 5U /*!< CoreDebug DEMCR: VC_NOCPERR Position */ +#define CoreDebug_DEMCR_VC_NOCPERR_Msk (1UL << CoreDebug_DEMCR_VC_NOCPERR_Pos) /*!< CoreDebug DEMCR: VC_NOCPERR Mask */ + +#define CoreDebug_DEMCR_VC_MMERR_Pos 4U /*!< CoreDebug DEMCR: VC_MMERR Position */ +#define CoreDebug_DEMCR_VC_MMERR_Msk (1UL << CoreDebug_DEMCR_VC_MMERR_Pos) /*!< CoreDebug DEMCR: VC_MMERR Mask */ + +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< CoreDebug DEMCR: VC_CORERESET Mask */ + +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ +#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ +#define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ +#define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ +#define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ +#define CoreDebug_BASE (0xE000EDF0UL) /*!< Core Debug Base Address */ +#define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ +#define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ +#define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + +#define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ +#define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ +#define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ +#define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ +#define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ +#define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ +#define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ +#define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE) /*!< Core Debug configuration struct */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ +#endif + +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* The following EXC_RETURN values are saved the LR on exception entry */ +#define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */ +#define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */ +#define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */ + + +/** + \brief Set Priority Grouping + \details Sets the priority grouping field using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IP[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHP[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IP[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHP[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + /* ARM Application Note 321 states that the M3 does not require the architectural barrier */ +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +/*@} end of CMSIS_Core_NVICFunctions */ + + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + return 0U; /* No FPU */ +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_SC300_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/Drivers/CMSIS/Include/core_starmc1.h b/Drivers/CMSIS/Include/core_starmc1.h new file mode 100644 index 0000000..d86c8d3 --- /dev/null +++ b/Drivers/CMSIS/Include/core_starmc1.h @@ -0,0 +1,3592 @@ +/**************************************************************************//** + * @file core_starmc1.h + * @brief CMSIS ArmChina STAR-MC1 Core Peripheral Access Layer Header File + * @version V1.0.2 + * @date 07. April 2022 + ******************************************************************************/ +/* + * Copyright (c) 2009-2018 Arm Limited. + * Copyright (c) 2018-2022 Arm China. + * All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#elif defined ( __GNUC__ ) + #pragma GCC diagnostic ignored "-Wpedantic" /* disable pedantic warning due to unnamed structs/unions */ +#endif + +#ifndef __CORE_STAR_H_GENERIC +#define __CORE_STAR_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup STAR-MC1 + @{ + */ + +#include "cmsis_version.h" + +/* Macro Define for STAR-MC1 */ +#define __STAR_MC (1U) /*!< STAR-MC Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + For this, __FPU_PRESENT has to be checked prior to making use of FPU specific registers and functions. +*/ +#if defined ( __CC_ARM ) + #if defined (__TARGET_FPU_VFP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined (__ARM_FP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __ICCARM__ ) + #if defined (__ARMVFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __TI_ARM__ ) + #if defined (__TI_VFP_SUPPORT__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TASKING__ ) + #if defined (__FPU_VFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_STAR_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_STAR_H_DEPENDANT +#define __CORE_STAR_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __STAR_REV + #define __STAR_REV 0x0000U + #warning "__STAR_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __SAUREGION_PRESENT + #define __SAUREGION_PRESENT 0U + #warning "__SAUREGION_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DSP_PRESENT + #define __DSP_PRESENT 0U + #warning "__DSP_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __ICACHE_PRESENT + #define __ICACHE_PRESENT 0U + #warning "__ICACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DCACHE_PRESENT + #define __DCACHE_PRESENT 0U + #warning "__DCACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DTCM_PRESENT + #define __DTCM_PRESENT 0U + #warning "__DTCM_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group STAR-MC1 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + - Core SAU Register + - Core FPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for STAR-MC1 processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:16; /*!< bit: 0..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:7; /*!< bit: 20..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + +#define APSR_GE_Pos 16U /*!< APSR: GE Position */ +#define APSR_GE_Msk (0xFUL << APSR_GE_Pos) /*!< APSR: GE Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:7; /*!< bit: 9..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:4; /*!< bit: 20..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t IT:2; /*!< bit: 25..26 saved IT state (read 0) */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_IT_Pos 25U /*!< xPSR: IT Position */ +#define xPSR_IT_Msk (3UL << xPSR_IT_Pos) /*!< xPSR: IT Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_GE_Pos 16U /*!< xPSR: GE Position */ +#define xPSR_GE_Msk (0xFUL << xPSR_GE_Pos) /*!< xPSR: GE Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack-pointer select */ + uint32_t FPCA:1; /*!< bit: 2 Floating-point context active */ + uint32_t SFPA:1; /*!< bit: 3 Secure floating-point active */ + uint32_t _reserved1:28; /*!< bit: 4..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SFPA_Pos 3U /*!< CONTROL: SFPA Position */ +#define CONTROL_SFPA_Msk (1UL << CONTROL_SFPA_Pos) /*!< CONTROL: SFPA Mask */ + +#define CONTROL_FPCA_Pos 2U /*!< CONTROL: FPCA Position */ +#define CONTROL_FPCA_Msk (1UL << CONTROL_FPCA_Pos) /*!< CONTROL: FPCA Mask */ + +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[16U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[16U]; + __IOM uint32_t ICER[16U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[16U]; + __IOM uint32_t ISPR[16U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[16U]; + __IOM uint32_t ICPR[16U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[16U]; + __IOM uint32_t IABR[16U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[16U]; + __IOM uint32_t ITNS[16U]; /*!< Offset: 0x280 (R/W) Interrupt Non-Secure State Register */ + uint32_t RESERVED5[16U]; + __IOM uint8_t IPR[496U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED6[580U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/* Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHPR[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t ID_PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t ID_DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ID_AFR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t ID_MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ID_ISAR[5U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + uint32_t RESERVED0[1U]; + __IM uint32_t CLIDR; /*!< Offset: 0x078 (R/ ) Cache Level ID register */ + __IM uint32_t CTR; /*!< Offset: 0x07C (R/ ) Cache Type register */ + __IM uint32_t CCSIDR; /*!< Offset: 0x080 (R/ ) Cache Size ID Register */ + __IOM uint32_t CSSELR; /*!< Offset: 0x084 (R/W) Cache Size Selection Register */ + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ + __IOM uint32_t NSACR; /*!< Offset: 0x08C (R/W) Non-Secure Access Control Register */ + uint32_t RESERVED_ADD1[21U]; + __IOM uint32_t SFSR; /*!< Offset: 0x0E4 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x0E8 (R/W) Secure Fault Address Register */ + uint32_t RESERVED3[69U]; + __OM uint32_t STIR; /*!< Offset: F00-D00=0x200 ( /W) Software Triggered Interrupt Register */ + uint32_t RESERVED4[15U]; + __IM uint32_t MVFR0; /*!< Offset: 0x240 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x244 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x248 (R/ ) Media and VFP Feature Register 2 */ + uint32_t RESERVED5[1U]; + __OM uint32_t ICIALLU; /*!< Offset: 0x250 ( /W) I-Cache Invalidate All to PoU */ + uint32_t RESERVED6[1U]; + __OM uint32_t ICIMVAU; /*!< Offset: 0x258 ( /W) I-Cache Invalidate by MVA to PoU */ + __OM uint32_t DCIMVAC; /*!< Offset: 0x25C ( /W) D-Cache Invalidate by MVA to PoC */ + __OM uint32_t DCISW; /*!< Offset: 0x260 ( /W) D-Cache Invalidate by Set-way */ + __OM uint32_t DCCMVAU; /*!< Offset: 0x264 ( /W) D-Cache Clean by MVA to PoU */ + __OM uint32_t DCCMVAC; /*!< Offset: 0x268 ( /W) D-Cache Clean by MVA to PoC */ + __OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */ + __OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */ + __OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */ +} SCB_Type; + +typedef struct +{ + __IOM uint32_t CACR; /*!< Offset: 0x0 (R/W) L1 Cache Control Register */ + __IOM uint32_t ITCMCR; /*!< Offset: 0x10 (R/W) Instruction Tightly-Coupled Memory Control Register */ + __IOM uint32_t DTCMCR; /*!< Offset: 0x14 (R/W) Data Tightly-Coupled Memory Control Registers */ +}EMSS_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_PENDNMISET_Pos 31U /*!< SCB ICSR: PENDNMISET Position */ +#define SCB_ICSR_PENDNMISET_Msk (1UL << SCB_ICSR_PENDNMISET_Pos) /*!< SCB ICSR: PENDNMISET Mask */ + +#define SCB_ICSR_NMIPENDSET_Pos SCB_ICSR_PENDNMISET_Pos /*!< SCB ICSR: NMIPENDSET Position, backward compatibility */ +#define SCB_ICSR_NMIPENDSET_Msk SCB_ICSR_PENDNMISET_Msk /*!< SCB ICSR: NMIPENDSET Mask, backward compatibility */ + +#define SCB_ICSR_PENDNMICLR_Pos 30U /*!< SCB ICSR: PENDNMICLR Position */ +#define SCB_ICSR_PENDNMICLR_Msk (1UL << SCB_ICSR_PENDNMICLR_Pos) /*!< SCB ICSR: PENDNMICLR Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_STTNS_Pos 24U /*!< SCB ICSR: STTNS Position (Security Extension) */ +#define SCB_ICSR_STTNS_Msk (1UL << SCB_ICSR_STTNS_Pos) /*!< SCB ICSR: STTNS Mask (Security Extension) */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIS_Pos 14U /*!< SCB AIRCR: PRIS Position */ +#define SCB_AIRCR_PRIS_Msk (1UL << SCB_AIRCR_PRIS_Pos) /*!< SCB AIRCR: PRIS Mask */ + +#define SCB_AIRCR_BFHFNMINS_Pos 13U /*!< SCB AIRCR: BFHFNMINS Position */ +#define SCB_AIRCR_BFHFNMINS_Msk (1UL << SCB_AIRCR_BFHFNMINS_Pos) /*!< SCB AIRCR: BFHFNMINS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_SYSRESETREQS_Pos 3U /*!< SCB AIRCR: SYSRESETREQS Position */ +#define SCB_AIRCR_SYSRESETREQS_Msk (1UL << SCB_AIRCR_SYSRESETREQS_Pos) /*!< SCB AIRCR: SYSRESETREQS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEPS_Pos 3U /*!< SCB SCR: SLEEPDEEPS Position */ +#define SCB_SCR_SLEEPDEEPS_Msk (1UL << SCB_SCR_SLEEPDEEPS_Pos) /*!< SCB SCR: SLEEPDEEPS Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_BP_Pos 18U /*!< SCB CCR: BP Position */ +#define SCB_CCR_BP_Msk (1UL << SCB_CCR_BP_Pos) /*!< SCB CCR: BP Mask */ + +#define SCB_CCR_IC_Pos 17U /*!< SCB CCR: IC Position */ +#define SCB_CCR_IC_Msk (1UL << SCB_CCR_IC_Pos) /*!< SCB CCR: IC Mask */ + +#define SCB_CCR_DC_Pos 16U /*!< SCB CCR: DC Position */ +#define SCB_CCR_DC_Msk (1UL << SCB_CCR_DC_Pos) /*!< SCB CCR: DC Mask */ + +#define SCB_CCR_STKOFHFNMIGN_Pos 10U /*!< SCB CCR: STKOFHFNMIGN Position */ +#define SCB_CCR_STKOFHFNMIGN_Msk (1UL << SCB_CCR_STKOFHFNMIGN_Pos) /*!< SCB CCR: STKOFHFNMIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_HARDFAULTPENDED_Pos 21U /*!< SCB SHCSR: HARDFAULTPENDED Position */ +#define SCB_SHCSR_HARDFAULTPENDED_Msk (1UL << SCB_SHCSR_HARDFAULTPENDED_Pos) /*!< SCB SHCSR: HARDFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTPENDED_Pos 20U /*!< SCB SHCSR: SECUREFAULTPENDED Position */ +#define SCB_SHCSR_SECUREFAULTPENDED_Msk (1UL << SCB_SHCSR_SECUREFAULTPENDED_Pos) /*!< SCB SHCSR: SECUREFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTENA_Pos 19U /*!< SCB SHCSR: SECUREFAULTENA Position */ +#define SCB_SHCSR_SECUREFAULTENA_Msk (1UL << SCB_SHCSR_SECUREFAULTENA_Pos) /*!< SCB SHCSR: SECUREFAULTENA Mask */ + +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_NMIACT_Pos 5U /*!< SCB SHCSR: NMIACT Position */ +#define SCB_SHCSR_NMIACT_Msk (1UL << SCB_SHCSR_NMIACT_Pos) /*!< SCB SHCSR: NMIACT Mask */ + +#define SCB_SHCSR_SECUREFAULTACT_Pos 4U /*!< SCB SHCSR: SECUREFAULTACT Position */ +#define SCB_SHCSR_SECUREFAULTACT_Msk (1UL << SCB_SHCSR_SECUREFAULTACT_Pos) /*!< SCB SHCSR: SECUREFAULTACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_HARDFAULTACT_Pos 2U /*!< SCB SHCSR: HARDFAULTACT Position */ +#define SCB_SHCSR_HARDFAULTACT_Msk (1UL << SCB_SHCSR_HARDFAULTACT_Pos) /*!< SCB SHCSR: HARDFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/* SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/* BusFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_LSPERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 5U) /*!< SCB CFSR (BFSR): LSPERR Position */ +#define SCB_CFSR_LSPERR_Msk (1UL << SCB_CFSR_LSPERR_Pos) /*!< SCB CFSR (BFSR): LSPERR Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/* UsageFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_STKOF_Pos (SCB_CFSR_USGFAULTSR_Pos + 4U) /*!< SCB CFSR (UFSR): STKOF Position */ +#define SCB_CFSR_STKOF_Msk (1UL << SCB_CFSR_STKOF_Pos) /*!< SCB CFSR (UFSR): STKOF Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/* SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/* SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/* SCB Non-Secure Access Control Register Definitions */ +#define SCB_NSACR_CP11_Pos 11U /*!< SCB NSACR: CP11 Position */ +#define SCB_NSACR_CP11_Msk (1UL << SCB_NSACR_CP11_Pos) /*!< SCB NSACR: CP11 Mask */ + +#define SCB_NSACR_CP10_Pos 10U /*!< SCB NSACR: CP10 Position */ +#define SCB_NSACR_CP10_Msk (1UL << SCB_NSACR_CP10_Pos) /*!< SCB NSACR: CP10 Mask */ + +#define SCB_NSACR_CPn_Pos 0U /*!< SCB NSACR: CPn Position */ +#define SCB_NSACR_CPn_Msk (1UL /*<< SCB_NSACR_CPn_Pos*/) /*!< SCB NSACR: CPn Mask */ + +/* SCB Cache Level ID Register Definitions */ +#define SCB_CLIDR_LOUU_Pos 27U /*!< SCB CLIDR: LoUU Position */ +#define SCB_CLIDR_LOUU_Msk (7UL << SCB_CLIDR_LOUU_Pos) /*!< SCB CLIDR: LoUU Mask */ + +#define SCB_CLIDR_LOC_Pos 24U /*!< SCB CLIDR: LoC Position */ +#define SCB_CLIDR_LOC_Msk (7UL << SCB_CLIDR_LOC_Pos) /*!< SCB CLIDR: LoC Mask */ + +#define SCB_CLIDR_IC_Pos 0U /*!< SCB CLIDR: IC Position */ +#define SCB_CLIDR_IC_Msk (1UL << SCB_CLIDR_IC_Pos) /*!< SCB CLIDR: IC Mask */ + +#define SCB_CLIDR_DC_Pos 1U /*!< SCB CLIDR: DC Position */ +#define SCB_CLIDR_DC_Msk (1UL << SCB_CLIDR_DC_Pos) /*!< SCB CLIDR: DC Mask */ + + + +/* SCB Cache Type Register Definitions */ +#define SCB_CTR_FORMAT_Pos 29U /*!< SCB CTR: Format Position */ +#define SCB_CTR_FORMAT_Msk (7UL << SCB_CTR_FORMAT_Pos) /*!< SCB CTR: Format Mask */ + +#define SCB_CTR_CWG_Pos 24U /*!< SCB CTR: CWG Position */ +#define SCB_CTR_CWG_Msk (0xFUL << SCB_CTR_CWG_Pos) /*!< SCB CTR: CWG Mask */ + +#define SCB_CTR_ERG_Pos 20U /*!< SCB CTR: ERG Position */ +#define SCB_CTR_ERG_Msk (0xFUL << SCB_CTR_ERG_Pos) /*!< SCB CTR: ERG Mask */ + +#define SCB_CTR_DMINLINE_Pos 16U /*!< SCB CTR: DminLine Position */ +#define SCB_CTR_DMINLINE_Msk (0xFUL << SCB_CTR_DMINLINE_Pos) /*!< SCB CTR: DminLine Mask */ + +#define SCB_CTR_IMINLINE_Pos 0U /*!< SCB CTR: ImInLine Position */ +#define SCB_CTR_IMINLINE_Msk (0xFUL /*<< SCB_CTR_IMINLINE_Pos*/) /*!< SCB CTR: ImInLine Mask */ + +/* SCB Cache Size ID Register Definitions */ +#define SCB_CCSIDR_WT_Pos 31U /*!< SCB CCSIDR: WT Position */ +#define SCB_CCSIDR_WT_Msk (1UL << SCB_CCSIDR_WT_Pos) /*!< SCB CCSIDR: WT Mask */ + +#define SCB_CCSIDR_WB_Pos 30U /*!< SCB CCSIDR: WB Position */ +#define SCB_CCSIDR_WB_Msk (1UL << SCB_CCSIDR_WB_Pos) /*!< SCB CCSIDR: WB Mask */ + +#define SCB_CCSIDR_RA_Pos 29U /*!< SCB CCSIDR: RA Position */ +#define SCB_CCSIDR_RA_Msk (1UL << SCB_CCSIDR_RA_Pos) /*!< SCB CCSIDR: RA Mask */ + +#define SCB_CCSIDR_WA_Pos 28U /*!< SCB CCSIDR: WA Position */ +#define SCB_CCSIDR_WA_Msk (1UL << SCB_CCSIDR_WA_Pos) /*!< SCB CCSIDR: WA Mask */ + +#define SCB_CCSIDR_NUMSETS_Pos 13U /*!< SCB CCSIDR: NumSets Position */ +#define SCB_CCSIDR_NUMSETS_Msk (0x7FFFUL << SCB_CCSIDR_NUMSETS_Pos) /*!< SCB CCSIDR: NumSets Mask */ + +#define SCB_CCSIDR_ASSOCIATIVITY_Pos 3U /*!< SCB CCSIDR: Associativity Position */ +#define SCB_CCSIDR_ASSOCIATIVITY_Msk (0x3FFUL << SCB_CCSIDR_ASSOCIATIVITY_Pos) /*!< SCB CCSIDR: Associativity Mask */ + +#define SCB_CCSIDR_LINESIZE_Pos 0U /*!< SCB CCSIDR: LineSize Position */ +#define SCB_CCSIDR_LINESIZE_Msk (7UL /*<< SCB_CCSIDR_LINESIZE_Pos*/) /*!< SCB CCSIDR: LineSize Mask */ + +/* SCB Cache Size Selection Register Definitions */ +#define SCB_CSSELR_LEVEL_Pos 1U /*!< SCB CSSELR: Level Position */ +#define SCB_CSSELR_LEVEL_Msk (7UL << SCB_CSSELR_LEVEL_Pos) /*!< SCB CSSELR: Level Mask */ + +#define SCB_CSSELR_IND_Pos 0U /*!< SCB CSSELR: InD Position */ +#define SCB_CSSELR_IND_Msk (1UL /*<< SCB_CSSELR_IND_Pos*/) /*!< SCB CSSELR: InD Mask */ + +/* SCB Software Triggered Interrupt Register Definitions */ +#define SCB_STIR_INTID_Pos 0U /*!< SCB STIR: INTID Position */ +#define SCB_STIR_INTID_Msk (0x1FFUL /*<< SCB_STIR_INTID_Pos*/) /*!< SCB STIR: INTID Mask */ + +/* SCB D-Cache line Invalidate by Set-way Register Definitions */ +#define SCB_DCISW_LEVEL_Pos 1U /*!< SCB DCISW: Level Position */ +#define SCB_DCISW_LEVEL_Msk (7UL << SCB_DCISW_LEVEL_Pos) /*!< SCB DCISW: Level Mask */ + +#define SCB_DCISW_WAY_Pos 30U /*!< SCB DCISW: Way Position */ +#define SCB_DCISW_WAY_Msk (3UL << SCB_DCISW_WAY_Pos) /*!< SCB DCISW: Way Mask */ + +#define SCB_DCISW_SET_Pos 5U /*!< SCB DCISW: Set Position */ +#define SCB_DCISW_SET_Msk (0xFFUL << SCB_DCISW_SET_Pos) /*!< SCB DCISW: Set Mask */ + +/* SCB D-Cache Clean line by Set-way Register Definitions */ +#define SCB_DCCSW_LEVEL_Pos 1U /*!< SCB DCCSW: Level Position */ +#define SCB_DCCSW_LEVEL_Msk (7UL << SCB_DCCSW_LEVEL_Pos) /*!< SCB DCCSW: Level Mask */ + +#define SCB_DCCSW_WAY_Pos 30U /*!< SCB DCCSW: Way Position */ +#define SCB_DCCSW_WAY_Msk (3UL << SCB_DCCSW_WAY_Pos) /*!< SCB DCCSW: Way Mask */ + +#define SCB_DCCSW_SET_Pos 5U /*!< SCB DCCSW: Set Position */ +#define SCB_DCCSW_SET_Msk (0xFFUL << SCB_DCCSW_SET_Pos) /*!< SCB DCCSW: Set Mask */ + +/* SCB D-Cache Clean and Invalidate by Set-way Register Definitions */ +#define SCB_DCCISW_LEVEL_Pos 1U /*!< SCB DCCISW: Level Position */ +#define SCB_DCCISW_LEVEL_Msk (7UL << SCB_DCCISW_LEVEL_Pos) /*!< SCB DCCISW: Level Mask */ + +#define SCB_DCCISW_WAY_Pos 30U /*!< SCB DCCISW: Way Position */ +#define SCB_DCCISW_WAY_Msk (3UL << SCB_DCCISW_WAY_Pos) /*!< SCB DCCISW: Way Mask */ + +#define SCB_DCCISW_SET_Pos 5U /*!< SCB DCCISW: Set Position */ +#define SCB_DCCISW_SET_Msk (0xFFUL << SCB_DCCISW_SET_Pos) /*!< SCB DCCISW: Set Mask */ + +/* ArmChina: Implementation Defined */ +/* Instruction Tightly-Coupled Memory Control Register Definitions */ +#define SCB_ITCMCR_SZ_Pos 3U /*!< SCB ITCMCR: SZ Position */ +#define SCB_ITCMCR_SZ_Msk (0xFUL << SCB_ITCMCR_SZ_Pos) /*!< SCB ITCMCR: SZ Mask */ + +#define SCB_ITCMCR_EN_Pos 0U /*!< SCB ITCMCR: EN Position */ +#define SCB_ITCMCR_EN_Msk (1UL /*<< SCB_ITCMCR_EN_Pos*/) /*!< SCB ITCMCR: EN Mask */ + +/* Data Tightly-Coupled Memory Control Register Definitions */ +#define SCB_DTCMCR_SZ_Pos 3U /*!< SCB DTCMCR: SZ Position */ +#define SCB_DTCMCR_SZ_Msk (0xFUL << SCB_DTCMCR_SZ_Pos) /*!< SCB DTCMCR: SZ Mask */ + +#define SCB_DTCMCR_EN_Pos 0U /*!< SCB DTCMCR: EN Position */ +#define SCB_DTCMCR_EN_Msk (1UL /*<< SCB_DTCMCR_EN_Pos*/) /*!< SCB DTCMCR: EN Mask */ + +/* L1 Cache Control Register Definitions */ +#define SCB_CACR_DCCLEAN_Pos 16U /*!< SCB CACR: DCCLEAN Position */ +#define SCB_CACR_DCCLEAN_Msk (1UL << SCB_CACR_FORCEWT_Pos) /*!< SCB CACR: DCCLEAN Mask */ + +#define SCB_CACR_ICACTIVE_Pos 13U /*!< SCB CACR: ICACTIVE Position */ +#define SCB_CACR_ICACTIVE_Msk (1UL << SCB_CACR_FORCEWT_Pos) /*!< SCB CACR: ICACTIVE Mask */ + +#define SCB_CACR_DCACTIVE_Pos 12U /*!< SCB CACR: DCACTIVE Position */ +#define SCB_CACR_DCACTIVE_Msk (1UL << SCB_CACR_FORCEWT_Pos) /*!< SCB CACR: DCACTIVE Mask */ + +#define SCB_CACR_FORCEWT_Pos 2U /*!< SCB CACR: FORCEWT Position */ +#define SCB_CACR_FORCEWT_Msk (1UL << SCB_CACR_FORCEWT_Pos) /*!< SCB CACR: FORCEWT Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ + __IOM uint32_t CPPWR; /*!< Offset: 0x00C (R/W) Coprocessor Power Control Register */ +} SCnSCB_Type; + +/* Interrupt Controller Type Register Definitions */ +#define SCnSCB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) ITM Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) ITM Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ + uint32_t RESERVED3[32U]; + uint32_t RESERVED4[43U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ + uint32_t RESERVED5[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) ITM Device Architecture Register */ + uint32_t RESERVED6[4U]; + __IM uint32_t PID4; /*!< Offset: 0xFD0 (R/ ) ITM Peripheral Identification Register #4 */ + __IM uint32_t PID5; /*!< Offset: 0xFD4 (R/ ) ITM Peripheral Identification Register #5 */ + __IM uint32_t PID6; /*!< Offset: 0xFD8 (R/ ) ITM Peripheral Identification Register #6 */ + __IM uint32_t PID7; /*!< Offset: 0xFDC (R/ ) ITM Peripheral Identification Register #7 */ + __IM uint32_t PID0; /*!< Offset: 0xFE0 (R/ ) ITM Peripheral Identification Register #0 */ + __IM uint32_t PID1; /*!< Offset: 0xFE4 (R/ ) ITM Peripheral Identification Register #1 */ + __IM uint32_t PID2; /*!< Offset: 0xFE8 (R/ ) ITM Peripheral Identification Register #2 */ + __IM uint32_t PID3; /*!< Offset: 0xFEC (R/ ) ITM Peripheral Identification Register #3 */ + __IM uint32_t CID0; /*!< Offset: 0xFF0 (R/ ) ITM Component Identification Register #0 */ + __IM uint32_t CID1; /*!< Offset: 0xFF4 (R/ ) ITM Component Identification Register #1 */ + __IM uint32_t CID2; /*!< Offset: 0xFF8 (R/ ) ITM Component Identification Register #2 */ + __IM uint32_t CID3; /*!< Offset: 0xFFC (R/ ) ITM Component Identification Register #3 */ +} ITM_Type; + +/* ITM Stimulus Port Register Definitions */ +#define ITM_STIM_DISABLED_Pos 1U /*!< ITM STIM: DISABLED Position */ +#define ITM_STIM_DISABLED_Msk (0x1UL << ITM_STIM_DISABLED_Pos) /*!< ITM STIM: DISABLED Mask */ + +#define ITM_STIM_FIFOREADY_Pos 0U /*!< ITM STIM: FIFOREADY Position */ +#define ITM_STIM_FIFOREADY_Msk (0x1UL /*<< ITM_STIM_FIFOREADY_Pos*/) /*!< ITM STIM: FIFOREADY Mask */ + +/* ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFFFFFFFFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/* ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TRACEBUSID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TRACEBUSID_Msk (0x7FUL << ITM_TCR_TRACEBUSID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPRESCALE_Pos 8U /*!< ITM TCR: TSPRESCALE Position */ +#define ITM_TCR_TSPRESCALE_Msk (3UL << ITM_TCR_TSPRESCALE_Pos) /*!< ITM TCR: TSPRESCALE Mask */ + +#define ITM_TCR_STALLENA_Pos 5U /*!< ITM TCR: STALLENA Position */ +#define ITM_TCR_STALLENA_Msk (1UL << ITM_TCR_STALLENA_Pos) /*!< ITM TCR: STALLENA Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/* ITM Lock Status Register Definitions */ +#define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ +#define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ + +#define ITM_LSR_Access_Pos 1U /*!< ITM LSR: Access Position */ +#define ITM_LSR_Access_Msk (1UL << ITM_LSR_Access_Pos) /*!< ITM LSR: Access Mask */ + +#define ITM_LSR_Present_Pos 0U /*!< ITM LSR: Present Position */ +#define ITM_LSR_Present_Msk (1UL /*<< ITM_LSR_Present_Pos*/) /*!< ITM LSR: Present Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + uint32_t RESERVED3[1U]; + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED4[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + uint32_t RESERVED5[1U]; + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED6[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + uint32_t RESERVED7[1U]; + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ + uint32_t RESERVED8[1U]; + __IOM uint32_t COMP4; /*!< Offset: 0x060 (R/W) Comparator Register 4 */ + uint32_t RESERVED9[1U]; + __IOM uint32_t FUNCTION4; /*!< Offset: 0x068 (R/W) Function Register 4 */ + uint32_t RESERVED10[1U]; + __IOM uint32_t COMP5; /*!< Offset: 0x070 (R/W) Comparator Register 5 */ + uint32_t RESERVED11[1U]; + __IOM uint32_t FUNCTION5; /*!< Offset: 0x078 (R/W) Function Register 5 */ + uint32_t RESERVED12[1U]; + __IOM uint32_t COMP6; /*!< Offset: 0x080 (R/W) Comparator Register 6 */ + uint32_t RESERVED13[1U]; + __IOM uint32_t FUNCTION6; /*!< Offset: 0x088 (R/W) Function Register 6 */ + uint32_t RESERVED14[1U]; + __IOM uint32_t COMP7; /*!< Offset: 0x090 (R/W) Comparator Register 7 */ + uint32_t RESERVED15[1U]; + __IOM uint32_t FUNCTION7; /*!< Offset: 0x098 (R/W) Function Register 7 */ + uint32_t RESERVED16[1U]; + __IOM uint32_t COMP8; /*!< Offset: 0x0A0 (R/W) Comparator Register 8 */ + uint32_t RESERVED17[1U]; + __IOM uint32_t FUNCTION8; /*!< Offset: 0x0A8 (R/W) Function Register 8 */ + uint32_t RESERVED18[1U]; + __IOM uint32_t COMP9; /*!< Offset: 0x0B0 (R/W) Comparator Register 9 */ + uint32_t RESERVED19[1U]; + __IOM uint32_t FUNCTION9; /*!< Offset: 0x0B8 (R/W) Function Register 9 */ + uint32_t RESERVED20[1U]; + __IOM uint32_t COMP10; /*!< Offset: 0x0C0 (R/W) Comparator Register 10 */ + uint32_t RESERVED21[1U]; + __IOM uint32_t FUNCTION10; /*!< Offset: 0x0C8 (R/W) Function Register 10 */ + uint32_t RESERVED22[1U]; + __IOM uint32_t COMP11; /*!< Offset: 0x0D0 (R/W) Comparator Register 11 */ + uint32_t RESERVED23[1U]; + __IOM uint32_t FUNCTION11; /*!< Offset: 0x0D8 (R/W) Function Register 11 */ + uint32_t RESERVED24[1U]; + __IOM uint32_t COMP12; /*!< Offset: 0x0E0 (R/W) Comparator Register 12 */ + uint32_t RESERVED25[1U]; + __IOM uint32_t FUNCTION12; /*!< Offset: 0x0E8 (R/W) Function Register 12 */ + uint32_t RESERVED26[1U]; + __IOM uint32_t COMP13; /*!< Offset: 0x0F0 (R/W) Comparator Register 13 */ + uint32_t RESERVED27[1U]; + __IOM uint32_t FUNCTION13; /*!< Offset: 0x0F8 (R/W) Function Register 13 */ + uint32_t RESERVED28[1U]; + __IOM uint32_t COMP14; /*!< Offset: 0x100 (R/W) Comparator Register 14 */ + uint32_t RESERVED29[1U]; + __IOM uint32_t FUNCTION14; /*!< Offset: 0x108 (R/W) Function Register 14 */ + uint32_t RESERVED30[1U]; + __IOM uint32_t COMP15; /*!< Offset: 0x110 (R/W) Comparator Register 15 */ + uint32_t RESERVED31[1U]; + __IOM uint32_t FUNCTION15; /*!< Offset: 0x118 (R/W) Function Register 15 */ + uint32_t RESERVED32[934U]; + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R ) Lock Status Register */ + uint32_t RESERVED33[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) Device Architecture Register */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCDISS_Pos 23U /*!< DWT CTRL: CYCDISS Position */ +#define DWT_CTRL_CYCDISS_Msk (0x1UL << DWT_CTRL_CYCDISS_Pos) /*!< DWT CTRL: CYCDISS Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (0x1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (0x1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (0x1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (0x1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (0x1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (0x1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (0x1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (0x1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (0x1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (0x1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/* DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/* DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/* DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/* DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/* DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_ID_Pos 27U /*!< DWT FUNCTION: ID Position */ +#define DWT_FUNCTION_ID_Msk (0x1FUL << DWT_FUNCTION_ID_Pos) /*!< DWT FUNCTION: ID Mask */ + +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_ACTION_Pos 4U /*!< DWT FUNCTION: ACTION Position */ +#define DWT_FUNCTION_ACTION_Msk (0x1UL << DWT_FUNCTION_ACTION_Pos) /*!< DWT FUNCTION: ACTION Mask */ + +#define DWT_FUNCTION_MATCH_Pos 0U /*!< DWT FUNCTION: MATCH Position */ +#define DWT_FUNCTION_MATCH_Msk (0xFUL /*<< DWT_FUNCTION_MATCH_Pos*/) /*!< DWT FUNCTION: MATCH Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Size Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Size Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IOM uint32_t PSCR; /*!< Offset: 0x308 (R/W) Periodic Synchronization Control Register */ + uint32_t RESERVED3[759U]; + __IM uint32_t TRIGGER; /*!< Offset: 0xEE8 (R/ ) TRIGGER Register */ + __IM uint32_t ITFTTD0; /*!< Offset: 0xEEC (R/ ) Integration Test FIFO Test Data 0 Register */ + __IOM uint32_t ITATBCTR2; /*!< Offset: 0xEF0 (R/W) Integration Test ATB Control Register 2 */ + uint32_t RESERVED4[1U]; + __IM uint32_t ITATBCTR0; /*!< Offset: 0xEF8 (R/ ) Integration Test ATB Control Register 0 */ + __IM uint32_t ITFTTD1; /*!< Offset: 0xEFC (R/ ) Integration Test FIFO Test Data 1 Register */ + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control */ + uint32_t RESERVED5[39U]; + __IOM uint32_t CLAIMSET; /*!< Offset: 0xFA0 (R/W) Claim tag set */ + __IOM uint32_t CLAIMCLR; /*!< Offset: 0xFA4 (R/W) Claim tag clear */ + uint32_t RESERVED7[8U]; + __IM uint32_t DEVID; /*!< Offset: 0xFC8 (R/ ) Device Configuration Register */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Identifier Register */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_PRESCALER_Pos 0U /*!< TPI ACPR: PRESCALER Position */ +#define TPI_ACPR_PRESCALER_Msk (0x1FFFUL /*<< TPI_ACPR_PRESCALER_Pos*/) /*!< TPI ACPR: PRESCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_FOnMan_Pos 6U /*!< TPI FFCR: FOnMan Position */ +#define TPI_FFCR_FOnMan_Msk (0x1UL << TPI_FFCR_FOnMan_Pos) /*!< TPI FFCR: FOnMan Mask */ + +#define TPI_FFCR_EnFCont_Pos 1U /*!< TPI FFCR: EnFCont Position */ +#define TPI_FFCR_EnFCont_Msk (0x1UL << TPI_FFCR_EnFCont_Pos) /*!< TPI FFCR: EnFCont Mask */ + +/* TPI TRIGGER Register Definitions */ +#define TPI_TRIGGER_TRIGGER_Pos 0U /*!< TPI TRIGGER: TRIGGER Position */ +#define TPI_TRIGGER_TRIGGER_Msk (0x1UL /*<< TPI_TRIGGER_TRIGGER_Pos*/) /*!< TPI TRIGGER: TRIGGER Mask */ + +/* TPI Integration Test FIFO Test Data 0 Register Definitions */ +#define TPI_ITFTTD0_ATB_IF2_ATVALID_Pos 29U /*!< TPI ITFTTD0: ATB Interface 2 ATVALIDPosition */ +#define TPI_ITFTTD0_ATB_IF2_ATVALID_Msk (0x3UL << TPI_ITFTTD0_ATB_IF2_ATVALID_Pos) /*!< TPI ITFTTD0: ATB Interface 2 ATVALID Mask */ + +#define TPI_ITFTTD0_ATB_IF2_bytecount_Pos 27U /*!< TPI ITFTTD0: ATB Interface 2 byte count Position */ +#define TPI_ITFTTD0_ATB_IF2_bytecount_Msk (0x3UL << TPI_ITFTTD0_ATB_IF2_bytecount_Pos) /*!< TPI ITFTTD0: ATB Interface 2 byte count Mask */ + +#define TPI_ITFTTD0_ATB_IF1_ATVALID_Pos 26U /*!< TPI ITFTTD0: ATB Interface 1 ATVALID Position */ +#define TPI_ITFTTD0_ATB_IF1_ATVALID_Msk (0x3UL << TPI_ITFTTD0_ATB_IF1_ATVALID_Pos) /*!< TPI ITFTTD0: ATB Interface 1 ATVALID Mask */ + +#define TPI_ITFTTD0_ATB_IF1_bytecount_Pos 24U /*!< TPI ITFTTD0: ATB Interface 1 byte count Position */ +#define TPI_ITFTTD0_ATB_IF1_bytecount_Msk (0x3UL << TPI_ITFTTD0_ATB_IF1_bytecount_Pos) /*!< TPI ITFTTD0: ATB Interface 1 byte countt Mask */ + +#define TPI_ITFTTD0_ATB_IF1_data2_Pos 16U /*!< TPI ITFTTD0: ATB Interface 1 data2 Position */ +#define TPI_ITFTTD0_ATB_IF1_data2_Msk (0xFFUL << TPI_ITFTTD0_ATB_IF1_data1_Pos) /*!< TPI ITFTTD0: ATB Interface 1 data2 Mask */ + +#define TPI_ITFTTD0_ATB_IF1_data1_Pos 8U /*!< TPI ITFTTD0: ATB Interface 1 data1 Position */ +#define TPI_ITFTTD0_ATB_IF1_data1_Msk (0xFFUL << TPI_ITFTTD0_ATB_IF1_data1_Pos) /*!< TPI ITFTTD0: ATB Interface 1 data1 Mask */ + +#define TPI_ITFTTD0_ATB_IF1_data0_Pos 0U /*!< TPI ITFTTD0: ATB Interface 1 data0 Position */ +#define TPI_ITFTTD0_ATB_IF1_data0_Msk (0xFFUL /*<< TPI_ITFTTD0_ATB_IF1_data0_Pos*/) /*!< TPI ITFTTD0: ATB Interface 1 data0 Mask */ + +/* TPI Integration Test ATB Control Register 2 Register Definitions */ +#define TPI_ITATBCTR2_AFVALID2S_Pos 1U /*!< TPI ITATBCTR2: AFVALID2S Position */ +#define TPI_ITATBCTR2_AFVALID2S_Msk (0x1UL << TPI_ITATBCTR2_AFVALID2S_Pos) /*!< TPI ITATBCTR2: AFVALID2SS Mask */ + +#define TPI_ITATBCTR2_AFVALID1S_Pos 1U /*!< TPI ITATBCTR2: AFVALID1S Position */ +#define TPI_ITATBCTR2_AFVALID1S_Msk (0x1UL << TPI_ITATBCTR2_AFVALID1S_Pos) /*!< TPI ITATBCTR2: AFVALID1SS Mask */ + +#define TPI_ITATBCTR2_ATREADY2S_Pos 0U /*!< TPI ITATBCTR2: ATREADY2S Position */ +#define TPI_ITATBCTR2_ATREADY2S_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY2S_Pos*/) /*!< TPI ITATBCTR2: ATREADY2S Mask */ + +#define TPI_ITATBCTR2_ATREADY1S_Pos 0U /*!< TPI ITATBCTR2: ATREADY1S Position */ +#define TPI_ITATBCTR2_ATREADY1S_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY1S_Pos*/) /*!< TPI ITATBCTR2: ATREADY1S Mask */ + +/* TPI Integration Test FIFO Test Data 1 Register Definitions */ +#define TPI_ITFTTD1_ATB_IF2_ATVALID_Pos 29U /*!< TPI ITFTTD1: ATB Interface 2 ATVALID Position */ +#define TPI_ITFTTD1_ATB_IF2_ATVALID_Msk (0x3UL << TPI_ITFTTD1_ATB_IF2_ATVALID_Pos) /*!< TPI ITFTTD1: ATB Interface 2 ATVALID Mask */ + +#define TPI_ITFTTD1_ATB_IF2_bytecount_Pos 27U /*!< TPI ITFTTD1: ATB Interface 2 byte count Position */ +#define TPI_ITFTTD1_ATB_IF2_bytecount_Msk (0x3UL << TPI_ITFTTD1_ATB_IF2_bytecount_Pos) /*!< TPI ITFTTD1: ATB Interface 2 byte count Mask */ + +#define TPI_ITFTTD1_ATB_IF1_ATVALID_Pos 26U /*!< TPI ITFTTD1: ATB Interface 1 ATVALID Position */ +#define TPI_ITFTTD1_ATB_IF1_ATVALID_Msk (0x3UL << TPI_ITFTTD1_ATB_IF1_ATVALID_Pos) /*!< TPI ITFTTD1: ATB Interface 1 ATVALID Mask */ + +#define TPI_ITFTTD1_ATB_IF1_bytecount_Pos 24U /*!< TPI ITFTTD1: ATB Interface 1 byte count Position */ +#define TPI_ITFTTD1_ATB_IF1_bytecount_Msk (0x3UL << TPI_ITFTTD1_ATB_IF1_bytecount_Pos) /*!< TPI ITFTTD1: ATB Interface 1 byte countt Mask */ + +#define TPI_ITFTTD1_ATB_IF2_data2_Pos 16U /*!< TPI ITFTTD1: ATB Interface 2 data2 Position */ +#define TPI_ITFTTD1_ATB_IF2_data2_Msk (0xFFUL << TPI_ITFTTD1_ATB_IF2_data1_Pos) /*!< TPI ITFTTD1: ATB Interface 2 data2 Mask */ + +#define TPI_ITFTTD1_ATB_IF2_data1_Pos 8U /*!< TPI ITFTTD1: ATB Interface 2 data1 Position */ +#define TPI_ITFTTD1_ATB_IF2_data1_Msk (0xFFUL << TPI_ITFTTD1_ATB_IF2_data1_Pos) /*!< TPI ITFTTD1: ATB Interface 2 data1 Mask */ + +#define TPI_ITFTTD1_ATB_IF2_data0_Pos 0U /*!< TPI ITFTTD1: ATB Interface 2 data0 Position */ +#define TPI_ITFTTD1_ATB_IF2_data0_Msk (0xFFUL /*<< TPI_ITFTTD1_ATB_IF2_data0_Pos*/) /*!< TPI ITFTTD1: ATB Interface 2 data0 Mask */ + +/* TPI Integration Test ATB Control Register 0 Definitions */ +#define TPI_ITATBCTR0_AFVALID2S_Pos 1U /*!< TPI ITATBCTR0: AFVALID2S Position */ +#define TPI_ITATBCTR0_AFVALID2S_Msk (0x1UL << TPI_ITATBCTR0_AFVALID2S_Pos) /*!< TPI ITATBCTR0: AFVALID2SS Mask */ + +#define TPI_ITATBCTR0_AFVALID1S_Pos 1U /*!< TPI ITATBCTR0: AFVALID1S Position */ +#define TPI_ITATBCTR0_AFVALID1S_Msk (0x1UL << TPI_ITATBCTR0_AFVALID1S_Pos) /*!< TPI ITATBCTR0: AFVALID1SS Mask */ + +#define TPI_ITATBCTR0_ATREADY2S_Pos 0U /*!< TPI ITATBCTR0: ATREADY2S Position */ +#define TPI_ITATBCTR0_ATREADY2S_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY2S_Pos*/) /*!< TPI ITATBCTR0: ATREADY2S Mask */ + +#define TPI_ITATBCTR0_ATREADY1S_Pos 0U /*!< TPI ITATBCTR0: ATREADY1S Position */ +#define TPI_ITATBCTR0_ATREADY1S_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY1S_Pos*/) /*!< TPI ITATBCTR0: ATREADY1S Mask */ + +/* TPI Integration Mode Control Register Definitions */ +#define TPI_ITCTRL_Mode_Pos 0U /*!< TPI ITCTRL: Mode Position */ +#define TPI_ITCTRL_Mode_Msk (0x3UL /*<< TPI_ITCTRL_Mode_Pos*/) /*!< TPI ITCTRL: Mode Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_FIFOSZ_Pos 6U /*!< TPI DEVID: FIFOSZ Position */ +#define TPI_DEVID_FIFOSZ_Msk (0x7UL << TPI_DEVID_FIFOSZ_Pos) /*!< TPI DEVID: FIFOSZ Mask */ + +#define TPI_DEVID_NrTraceInput_Pos 0U /*!< TPI DEVID: NrTraceInput Position */ +#define TPI_DEVID_NrTraceInput_Msk (0x3FUL /*<< TPI_DEVID_NrTraceInput_Pos*/) /*!< TPI DEVID: NrTraceInput Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) MPU Region Limit Address Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Region Base Address Register Alias 1 */ + __IOM uint32_t RLAR_A1; /*!< Offset: 0x018 (R/W) MPU Region Limit Address Register Alias 1 */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Region Base Address Register Alias 2 */ + __IOM uint32_t RLAR_A2; /*!< Offset: 0x020 (R/W) MPU Region Limit Address Register Alias 2 */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Region Base Address Register Alias 3 */ + __IOM uint32_t RLAR_A3; /*!< Offset: 0x028 (R/W) MPU Region Limit Address Register Alias 3 */ + uint32_t RESERVED0[1]; + union { + __IOM uint32_t MAIR[2]; + struct { + __IOM uint32_t MAIR0; /*!< Offset: 0x030 (R/W) MPU Memory Attribute Indirection Register 0 */ + __IOM uint32_t MAIR1; /*!< Offset: 0x034 (R/W) MPU Memory Attribute Indirection Register 1 */ + }; + }; +} MPU_Type; + +#define MPU_TYPE_RALIASES 4U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_BASE_Pos 5U /*!< MPU RBAR: BASE Position */ +#define MPU_RBAR_BASE_Msk (0x7FFFFFFUL << MPU_RBAR_BASE_Pos) /*!< MPU RBAR: BASE Mask */ + +#define MPU_RBAR_SH_Pos 3U /*!< MPU RBAR: SH Position */ +#define MPU_RBAR_SH_Msk (0x3UL << MPU_RBAR_SH_Pos) /*!< MPU RBAR: SH Mask */ + +#define MPU_RBAR_AP_Pos 1U /*!< MPU RBAR: AP Position */ +#define MPU_RBAR_AP_Msk (0x3UL << MPU_RBAR_AP_Pos) /*!< MPU RBAR: AP Mask */ + +#define MPU_RBAR_XN_Pos 0U /*!< MPU RBAR: XN Position */ +#define MPU_RBAR_XN_Msk (01UL /*<< MPU_RBAR_XN_Pos*/) /*!< MPU RBAR: XN Mask */ + +/* MPU Region Limit Address Register Definitions */ +#define MPU_RLAR_LIMIT_Pos 5U /*!< MPU RLAR: LIMIT Position */ +#define MPU_RLAR_LIMIT_Msk (0x7FFFFFFUL << MPU_RLAR_LIMIT_Pos) /*!< MPU RLAR: LIMIT Mask */ + +#define MPU_RLAR_AttrIndx_Pos 1U /*!< MPU RLAR: AttrIndx Position */ +#define MPU_RLAR_AttrIndx_Msk (0x7UL << MPU_RLAR_AttrIndx_Pos) /*!< MPU RLAR: AttrIndx Mask */ + +#define MPU_RLAR_EN_Pos 0U /*!< MPU RLAR: Region enable bit Position */ +#define MPU_RLAR_EN_Msk (1UL /*<< MPU_RLAR_EN_Pos*/) /*!< MPU RLAR: Region enable bit Disable Mask */ + +/* MPU Memory Attribute Indirection Register 0 Definitions */ +#define MPU_MAIR0_Attr3_Pos 24U /*!< MPU MAIR0: Attr3 Position */ +#define MPU_MAIR0_Attr3_Msk (0xFFUL << MPU_MAIR0_Attr3_Pos) /*!< MPU MAIR0: Attr3 Mask */ + +#define MPU_MAIR0_Attr2_Pos 16U /*!< MPU MAIR0: Attr2 Position */ +#define MPU_MAIR0_Attr2_Msk (0xFFUL << MPU_MAIR0_Attr2_Pos) /*!< MPU MAIR0: Attr2 Mask */ + +#define MPU_MAIR0_Attr1_Pos 8U /*!< MPU MAIR0: Attr1 Position */ +#define MPU_MAIR0_Attr1_Msk (0xFFUL << MPU_MAIR0_Attr1_Pos) /*!< MPU MAIR0: Attr1 Mask */ + +#define MPU_MAIR0_Attr0_Pos 0U /*!< MPU MAIR0: Attr0 Position */ +#define MPU_MAIR0_Attr0_Msk (0xFFUL /*<< MPU_MAIR0_Attr0_Pos*/) /*!< MPU MAIR0: Attr0 Mask */ + +/* MPU Memory Attribute Indirection Register 1 Definitions */ +#define MPU_MAIR1_Attr7_Pos 24U /*!< MPU MAIR1: Attr7 Position */ +#define MPU_MAIR1_Attr7_Msk (0xFFUL << MPU_MAIR1_Attr7_Pos) /*!< MPU MAIR1: Attr7 Mask */ + +#define MPU_MAIR1_Attr6_Pos 16U /*!< MPU MAIR1: Attr6 Position */ +#define MPU_MAIR1_Attr6_Msk (0xFFUL << MPU_MAIR1_Attr6_Pos) /*!< MPU MAIR1: Attr6 Mask */ + +#define MPU_MAIR1_Attr5_Pos 8U /*!< MPU MAIR1: Attr5 Position */ +#define MPU_MAIR1_Attr5_Msk (0xFFUL << MPU_MAIR1_Attr5_Pos) /*!< MPU MAIR1: Attr5 Mask */ + +#define MPU_MAIR1_Attr4_Pos 0U /*!< MPU MAIR1: Attr4 Position */ +#define MPU_MAIR1_Attr4_Msk (0xFFUL /*<< MPU_MAIR1_Attr4_Pos*/) /*!< MPU MAIR1: Attr4 Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SAU Security Attribution Unit (SAU) + \brief Type definitions for the Security Attribution Unit (SAU) + @{ + */ + +/** + \brief Structure type to access the Security Attribution Unit (SAU). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SAU Control Register */ + __IM uint32_t TYPE; /*!< Offset: 0x004 (R/ ) SAU Type Register */ +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) SAU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) SAU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) SAU Region Limit Address Register */ +#else + uint32_t RESERVED0[3]; +#endif + __IOM uint32_t SFSR; /*!< Offset: 0x014 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x018 (R/W) Secure Fault Address Register */ +} SAU_Type; + +/* SAU Control Register Definitions */ +#define SAU_CTRL_ALLNS_Pos 1U /*!< SAU CTRL: ALLNS Position */ +#define SAU_CTRL_ALLNS_Msk (1UL << SAU_CTRL_ALLNS_Pos) /*!< SAU CTRL: ALLNS Mask */ + +#define SAU_CTRL_ENABLE_Pos 0U /*!< SAU CTRL: ENABLE Position */ +#define SAU_CTRL_ENABLE_Msk (1UL /*<< SAU_CTRL_ENABLE_Pos*/) /*!< SAU CTRL: ENABLE Mask */ + +/* SAU Type Register Definitions */ +#define SAU_TYPE_SREGION_Pos 0U /*!< SAU TYPE: SREGION Position */ +#define SAU_TYPE_SREGION_Msk (0xFFUL /*<< SAU_TYPE_SREGION_Pos*/) /*!< SAU TYPE: SREGION Mask */ + +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) +/* SAU Region Number Register Definitions */ +#define SAU_RNR_REGION_Pos 0U /*!< SAU RNR: REGION Position */ +#define SAU_RNR_REGION_Msk (0xFFUL /*<< SAU_RNR_REGION_Pos*/) /*!< SAU RNR: REGION Mask */ + +/* SAU Region Base Address Register Definitions */ +#define SAU_RBAR_BADDR_Pos 5U /*!< SAU RBAR: BADDR Position */ +#define SAU_RBAR_BADDR_Msk (0x7FFFFFFUL << SAU_RBAR_BADDR_Pos) /*!< SAU RBAR: BADDR Mask */ + +/* SAU Region Limit Address Register Definitions */ +#define SAU_RLAR_LADDR_Pos 5U /*!< SAU RLAR: LADDR Position */ +#define SAU_RLAR_LADDR_Msk (0x7FFFFFFUL << SAU_RLAR_LADDR_Pos) /*!< SAU RLAR: LADDR Mask */ + +#define SAU_RLAR_NSC_Pos 1U /*!< SAU RLAR: NSC Position */ +#define SAU_RLAR_NSC_Msk (1UL << SAU_RLAR_NSC_Pos) /*!< SAU RLAR: NSC Mask */ + +#define SAU_RLAR_ENABLE_Pos 0U /*!< SAU RLAR: ENABLE Position */ +#define SAU_RLAR_ENABLE_Msk (1UL /*<< SAU_RLAR_ENABLE_Pos*/) /*!< SAU RLAR: ENABLE Mask */ + +#endif /* defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) */ + +/* Secure Fault Status Register Definitions */ +#define SAU_SFSR_LSERR_Pos 7U /*!< SAU SFSR: LSERR Position */ +#define SAU_SFSR_LSERR_Msk (1UL << SAU_SFSR_LSERR_Pos) /*!< SAU SFSR: LSERR Mask */ + +#define SAU_SFSR_SFARVALID_Pos 6U /*!< SAU SFSR: SFARVALID Position */ +#define SAU_SFSR_SFARVALID_Msk (1UL << SAU_SFSR_SFARVALID_Pos) /*!< SAU SFSR: SFARVALID Mask */ + +#define SAU_SFSR_LSPERR_Pos 5U /*!< SAU SFSR: LSPERR Position */ +#define SAU_SFSR_LSPERR_Msk (1UL << SAU_SFSR_LSPERR_Pos) /*!< SAU SFSR: LSPERR Mask */ + +#define SAU_SFSR_INVTRAN_Pos 4U /*!< SAU SFSR: INVTRAN Position */ +#define SAU_SFSR_INVTRAN_Msk (1UL << SAU_SFSR_INVTRAN_Pos) /*!< SAU SFSR: INVTRAN Mask */ + +#define SAU_SFSR_AUVIOL_Pos 3U /*!< SAU SFSR: AUVIOL Position */ +#define SAU_SFSR_AUVIOL_Msk (1UL << SAU_SFSR_AUVIOL_Pos) /*!< SAU SFSR: AUVIOL Mask */ + +#define SAU_SFSR_INVER_Pos 2U /*!< SAU SFSR: INVER Position */ +#define SAU_SFSR_INVER_Msk (1UL << SAU_SFSR_INVER_Pos) /*!< SAU SFSR: INVER Mask */ + +#define SAU_SFSR_INVIS_Pos 1U /*!< SAU SFSR: INVIS Position */ +#define SAU_SFSR_INVIS_Msk (1UL << SAU_SFSR_INVIS_Pos) /*!< SAU SFSR: INVIS Mask */ + +#define SAU_SFSR_INVEP_Pos 0U /*!< SAU SFSR: INVEP Position */ +#define SAU_SFSR_INVEP_Msk (1UL /*<< SAU_SFSR_INVEP_Pos*/) /*!< SAU SFSR: INVEP Mask */ + +/*@} end of group CMSIS_SAU */ +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_FPU Floating Point Unit (FPU) + \brief Type definitions for the Floating Point Unit (FPU) + @{ + */ + +/** + \brief Structure type to access the Floating Point Unit (FPU). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IOM uint32_t FPCCR; /*!< Offset: 0x004 (R/W) Floating-Point Context Control Register */ + __IOM uint32_t FPCAR; /*!< Offset: 0x008 (R/W) Floating-Point Context Address Register */ + __IOM uint32_t FPDSCR; /*!< Offset: 0x00C (R/W) Floating-Point Default Status Control Register */ + __IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x018 (R/ ) Media and VFP Feature Register 2 */ +} FPU_Type; + +/* Floating-Point Context Control Register Definitions */ +#define FPU_FPCCR_ASPEN_Pos 31U /*!< FPCCR: ASPEN bit Position */ +#define FPU_FPCCR_ASPEN_Msk (1UL << FPU_FPCCR_ASPEN_Pos) /*!< FPCCR: ASPEN bit Mask */ + +#define FPU_FPCCR_LSPEN_Pos 30U /*!< FPCCR: LSPEN Position */ +#define FPU_FPCCR_LSPEN_Msk (1UL << FPU_FPCCR_LSPEN_Pos) /*!< FPCCR: LSPEN bit Mask */ + +#define FPU_FPCCR_LSPENS_Pos 29U /*!< FPCCR: LSPENS Position */ +#define FPU_FPCCR_LSPENS_Msk (1UL << FPU_FPCCR_LSPENS_Pos) /*!< FPCCR: LSPENS bit Mask */ + +#define FPU_FPCCR_CLRONRET_Pos 28U /*!< FPCCR: CLRONRET Position */ +#define FPU_FPCCR_CLRONRET_Msk (1UL << FPU_FPCCR_CLRONRET_Pos) /*!< FPCCR: CLRONRET bit Mask */ + +#define FPU_FPCCR_CLRONRETS_Pos 27U /*!< FPCCR: CLRONRETS Position */ +#define FPU_FPCCR_CLRONRETS_Msk (1UL << FPU_FPCCR_CLRONRETS_Pos) /*!< FPCCR: CLRONRETS bit Mask */ + +#define FPU_FPCCR_TS_Pos 26U /*!< FPCCR: TS Position */ +#define FPU_FPCCR_TS_Msk (1UL << FPU_FPCCR_TS_Pos) /*!< FPCCR: TS bit Mask */ + +#define FPU_FPCCR_UFRDY_Pos 10U /*!< FPCCR: UFRDY Position */ +#define FPU_FPCCR_UFRDY_Msk (1UL << FPU_FPCCR_UFRDY_Pos) /*!< FPCCR: UFRDY bit Mask */ + +#define FPU_FPCCR_SPLIMVIOL_Pos 9U /*!< FPCCR: SPLIMVIOL Position */ +#define FPU_FPCCR_SPLIMVIOL_Msk (1UL << FPU_FPCCR_SPLIMVIOL_Pos) /*!< FPCCR: SPLIMVIOL bit Mask */ + +#define FPU_FPCCR_MONRDY_Pos 8U /*!< FPCCR: MONRDY Position */ +#define FPU_FPCCR_MONRDY_Msk (1UL << FPU_FPCCR_MONRDY_Pos) /*!< FPCCR: MONRDY bit Mask */ + +#define FPU_FPCCR_SFRDY_Pos 7U /*!< FPCCR: SFRDY Position */ +#define FPU_FPCCR_SFRDY_Msk (1UL << FPU_FPCCR_SFRDY_Pos) /*!< FPCCR: SFRDY bit Mask */ + +#define FPU_FPCCR_BFRDY_Pos 6U /*!< FPCCR: BFRDY Position */ +#define FPU_FPCCR_BFRDY_Msk (1UL << FPU_FPCCR_BFRDY_Pos) /*!< FPCCR: BFRDY bit Mask */ + +#define FPU_FPCCR_MMRDY_Pos 5U /*!< FPCCR: MMRDY Position */ +#define FPU_FPCCR_MMRDY_Msk (1UL << FPU_FPCCR_MMRDY_Pos) /*!< FPCCR: MMRDY bit Mask */ + +#define FPU_FPCCR_HFRDY_Pos 4U /*!< FPCCR: HFRDY Position */ +#define FPU_FPCCR_HFRDY_Msk (1UL << FPU_FPCCR_HFRDY_Pos) /*!< FPCCR: HFRDY bit Mask */ + +#define FPU_FPCCR_THREAD_Pos 3U /*!< FPCCR: processor mode bit Position */ +#define FPU_FPCCR_THREAD_Msk (1UL << FPU_FPCCR_THREAD_Pos) /*!< FPCCR: processor mode active bit Mask */ + +#define FPU_FPCCR_S_Pos 2U /*!< FPCCR: Security status of the FP context bit Position */ +#define FPU_FPCCR_S_Msk (1UL << FPU_FPCCR_S_Pos) /*!< FPCCR: Security status of the FP context bit Mask */ + +#define FPU_FPCCR_USER_Pos 1U /*!< FPCCR: privilege level bit Position */ +#define FPU_FPCCR_USER_Msk (1UL << FPU_FPCCR_USER_Pos) /*!< FPCCR: privilege level bit Mask */ + +#define FPU_FPCCR_LSPACT_Pos 0U /*!< FPCCR: Lazy state preservation active bit Position */ +#define FPU_FPCCR_LSPACT_Msk (1UL /*<< FPU_FPCCR_LSPACT_Pos*/) /*!< FPCCR: Lazy state preservation active bit Mask */ + +/* Floating-Point Context Address Register Definitions */ +#define FPU_FPCAR_ADDRESS_Pos 3U /*!< FPCAR: ADDRESS bit Position */ +#define FPU_FPCAR_ADDRESS_Msk (0x1FFFFFFFUL << FPU_FPCAR_ADDRESS_Pos) /*!< FPCAR: ADDRESS bit Mask */ + +/* Floating-Point Default Status Control Register Definitions */ +#define FPU_FPDSCR_AHP_Pos 26U /*!< FPDSCR: AHP bit Position */ +#define FPU_FPDSCR_AHP_Msk (1UL << FPU_FPDSCR_AHP_Pos) /*!< FPDSCR: AHP bit Mask */ + +#define FPU_FPDSCR_DN_Pos 25U /*!< FPDSCR: DN bit Position */ +#define FPU_FPDSCR_DN_Msk (1UL << FPU_FPDSCR_DN_Pos) /*!< FPDSCR: DN bit Mask */ + +#define FPU_FPDSCR_FZ_Pos 24U /*!< FPDSCR: FZ bit Position */ +#define FPU_FPDSCR_FZ_Msk (1UL << FPU_FPDSCR_FZ_Pos) /*!< FPDSCR: FZ bit Mask */ + +#define FPU_FPDSCR_RMode_Pos 22U /*!< FPDSCR: RMode bit Position */ +#define FPU_FPDSCR_RMode_Msk (3UL << FPU_FPDSCR_RMode_Pos) /*!< FPDSCR: RMode bit Mask */ + +/* Media and VFP Feature Register 0 Definitions */ +#define FPU_MVFR0_FP_rounding_modes_Pos 28U /*!< MVFR0: FP rounding modes bits Position */ +#define FPU_MVFR0_FP_rounding_modes_Msk (0xFUL << FPU_MVFR0_FP_rounding_modes_Pos) /*!< MVFR0: FP rounding modes bits Mask */ + +#define FPU_MVFR0_Short_vectors_Pos 24U /*!< MVFR0: Short vectors bits Position */ +#define FPU_MVFR0_Short_vectors_Msk (0xFUL << FPU_MVFR0_Short_vectors_Pos) /*!< MVFR0: Short vectors bits Mask */ + +#define FPU_MVFR0_Square_root_Pos 20U /*!< MVFR0: Square root bits Position */ +#define FPU_MVFR0_Square_root_Msk (0xFUL << FPU_MVFR0_Square_root_Pos) /*!< MVFR0: Square root bits Mask */ + +#define FPU_MVFR0_Divide_Pos 16U /*!< MVFR0: Divide bits Position */ +#define FPU_MVFR0_Divide_Msk (0xFUL << FPU_MVFR0_Divide_Pos) /*!< MVFR0: Divide bits Mask */ + +#define FPU_MVFR0_FP_excep_trapping_Pos 12U /*!< MVFR0: FP exception trapping bits Position */ +#define FPU_MVFR0_FP_excep_trapping_Msk (0xFUL << FPU_MVFR0_FP_excep_trapping_Pos) /*!< MVFR0: FP exception trapping bits Mask */ + +#define FPU_MVFR0_Double_precision_Pos 8U /*!< MVFR0: Double-precision bits Position */ +#define FPU_MVFR0_Double_precision_Msk (0xFUL << FPU_MVFR0_Double_precision_Pos) /*!< MVFR0: Double-precision bits Mask */ + +#define FPU_MVFR0_Single_precision_Pos 4U /*!< MVFR0: Single-precision bits Position */ +#define FPU_MVFR0_Single_precision_Msk (0xFUL << FPU_MVFR0_Single_precision_Pos) /*!< MVFR0: Single-precision bits Mask */ + +#define FPU_MVFR0_A_SIMD_registers_Pos 0U /*!< MVFR0: A_SIMD registers bits Position */ +#define FPU_MVFR0_A_SIMD_registers_Msk (0xFUL /*<< FPU_MVFR0_A_SIMD_registers_Pos*/) /*!< MVFR0: A_SIMD registers bits Mask */ + +/* Media and VFP Feature Register 1 Definitions */ +#define FPU_MVFR1_FP_fused_MAC_Pos 28U /*!< MVFR1: FP fused MAC bits Position */ +#define FPU_MVFR1_FP_fused_MAC_Msk (0xFUL << FPU_MVFR1_FP_fused_MAC_Pos) /*!< MVFR1: FP fused MAC bits Mask */ + +#define FPU_MVFR1_FP_HPFP_Pos 24U /*!< MVFR1: FP HPFP bits Position */ +#define FPU_MVFR1_FP_HPFP_Msk (0xFUL << FPU_MVFR1_FP_HPFP_Pos) /*!< MVFR1: FP HPFP bits Mask */ + +#define FPU_MVFR1_D_NaN_mode_Pos 4U /*!< MVFR1: D_NaN mode bits Position */ +#define FPU_MVFR1_D_NaN_mode_Msk (0xFUL << FPU_MVFR1_D_NaN_mode_Pos) /*!< MVFR1: D_NaN mode bits Mask */ + +#define FPU_MVFR1_FtZ_mode_Pos 0U /*!< MVFR1: FtZ mode bits Position */ +#define FPU_MVFR1_FtZ_mode_Msk (0xFUL /*<< FPU_MVFR1_FtZ_mode_Pos*/) /*!< MVFR1: FtZ mode bits Mask */ + +/* Media and VFP Feature Register 2 Definitions */ +#define FPU_MVFR2_FPMisc_Pos 4U /*!< MVFR2: FPMisc bits Position */ +#define FPU_MVFR2_FPMisc_Msk (0xFUL << FPU_MVFR2_FPMisc_Pos) /*!< MVFR2: FPMisc bits Mask */ + +/*@} end of group CMSIS_FPU */ + + + + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DCB Debug Control Block + \brief Type definitions for the Debug Control Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Control Block Registers (DCB). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + uint32_t RESERVED0[1U]; + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} DCB_Type; + +/* DHCSR, Debug Halting Control and Status Register Definitions */ +#define DCB_DHCSR_DBGKEY_Pos 16U /*!< DCB DHCSR: Debug key Position */ +#define DCB_DHCSR_DBGKEY_Msk (0xFFFFUL << DCB_DHCSR_DBGKEY_Pos) /*!< DCB DHCSR: Debug key Mask */ + +#define DCB_DHCSR_S_RESTART_ST_Pos 26U /*!< DCB DHCSR: Restart sticky status Position */ +#define DCB_DHCSR_S_RESTART_ST_Msk (0x1UL << DCB_DHCSR_S_RESTART_ST_Pos) /*!< DCB DHCSR: Restart sticky status Mask */ + +#define DCB_DHCSR_S_RESET_ST_Pos 25U /*!< DCB DHCSR: Reset sticky status Position */ +#define DCB_DHCSR_S_RESET_ST_Msk (0x1UL << DCB_DHCSR_S_RESET_ST_Pos) /*!< DCB DHCSR: Reset sticky status Mask */ + +#define DCB_DHCSR_S_RETIRE_ST_Pos 24U /*!< DCB DHCSR: Retire sticky status Position */ +#define DCB_DHCSR_S_RETIRE_ST_Msk (0x1UL << DCB_DHCSR_S_RETIRE_ST_Pos) /*!< DCB DHCSR: Retire sticky status Mask */ + +#define DCB_DHCSR_S_SDE_Pos 20U /*!< DCB DHCSR: Secure debug enabled Position */ +#define DCB_DHCSR_S_SDE_Msk (0x1UL << DCB_DHCSR_S_SDE_Pos) /*!< DCB DHCSR: Secure debug enabled Mask */ + +#define DCB_DHCSR_S_LOCKUP_Pos 19U /*!< DCB DHCSR: Lockup status Position */ +#define DCB_DHCSR_S_LOCKUP_Msk (0x1UL << DCB_DHCSR_S_LOCKUP_Pos) /*!< DCB DHCSR: Lockup status Mask */ + +#define DCB_DHCSR_S_SLEEP_Pos 18U /*!< DCB DHCSR: Sleeping status Position */ +#define DCB_DHCSR_S_SLEEP_Msk (0x1UL << DCB_DHCSR_S_SLEEP_Pos) /*!< DCB DHCSR: Sleeping status Mask */ + +#define DCB_DHCSR_S_HALT_Pos 17U /*!< DCB DHCSR: Halted status Position */ +#define DCB_DHCSR_S_HALT_Msk (0x1UL << DCB_DHCSR_S_HALT_Pos) /*!< DCB DHCSR: Halted status Mask */ + +#define DCB_DHCSR_S_REGRDY_Pos 16U /*!< DCB DHCSR: Register ready status Position */ +#define DCB_DHCSR_S_REGRDY_Msk (0x1UL << DCB_DHCSR_S_REGRDY_Pos) /*!< DCB DHCSR: Register ready status Mask */ + +#define DCB_DHCSR_C_SNAPSTALL_Pos 5U /*!< DCB DHCSR: Snap stall control Position */ +#define DCB_DHCSR_C_SNAPSTALL_Msk (0x1UL << DCB_DHCSR_C_SNAPSTALL_Pos) /*!< DCB DHCSR: Snap stall control Mask */ + +#define DCB_DHCSR_C_MASKINTS_Pos 3U /*!< DCB DHCSR: Mask interrupts control Position */ +#define DCB_DHCSR_C_MASKINTS_Msk (0x1UL << DCB_DHCSR_C_MASKINTS_Pos) /*!< DCB DHCSR: Mask interrupts control Mask */ + +#define DCB_DHCSR_C_STEP_Pos 2U /*!< DCB DHCSR: Step control Position */ +#define DCB_DHCSR_C_STEP_Msk (0x1UL << DCB_DHCSR_C_STEP_Pos) /*!< DCB DHCSR: Step control Mask */ + +#define DCB_DHCSR_C_HALT_Pos 1U /*!< DCB DHCSR: Halt control Position */ +#define DCB_DHCSR_C_HALT_Msk (0x1UL << DCB_DHCSR_C_HALT_Pos) /*!< DCB DHCSR: Halt control Mask */ + +#define DCB_DHCSR_C_DEBUGEN_Pos 0U /*!< DCB DHCSR: Debug enable control Position */ +#define DCB_DHCSR_C_DEBUGEN_Msk (0x1UL /*<< DCB_DHCSR_C_DEBUGEN_Pos*/) /*!< DCB DHCSR: Debug enable control Mask */ + +/* DCRSR, Debug Core Register Select Register Definitions */ +#define DCB_DCRSR_REGWnR_Pos 16U /*!< DCB DCRSR: Register write/not-read Position */ +#define DCB_DCRSR_REGWnR_Msk (0x1UL << DCB_DCRSR_REGWnR_Pos) /*!< DCB DCRSR: Register write/not-read Mask */ + +#define DCB_DCRSR_REGSEL_Pos 0U /*!< DCB DCRSR: Register selector Position */ +#define DCB_DCRSR_REGSEL_Msk (0x7FUL /*<< DCB_DCRSR_REGSEL_Pos*/) /*!< DCB DCRSR: Register selector Mask */ + +/* DCRDR, Debug Core Register Data Register Definitions */ +#define DCB_DCRDR_DBGTMP_Pos 0U /*!< DCB DCRDR: Data temporary buffer Position */ +#define DCB_DCRDR_DBGTMP_Msk (0xFFFFFFFFUL /*<< DCB_DCRDR_DBGTMP_Pos*/) /*!< DCB DCRDR: Data temporary buffer Mask */ + +/* DEMCR, Debug Exception and Monitor Control Register Definitions */ +#define DCB_DEMCR_TRCENA_Pos 24U /*!< DCB DEMCR: Trace enable Position */ +#define DCB_DEMCR_TRCENA_Msk (0x1UL << DCB_DEMCR_TRCENA_Pos) /*!< DCB DEMCR: Trace enable Mask */ + +#define DCB_DEMCR_MONPRKEY_Pos 23U /*!< DCB DEMCR: Monitor pend req key Position */ +#define DCB_DEMCR_MONPRKEY_Msk (0x1UL << DCB_DEMCR_MONPRKEY_Pos) /*!< DCB DEMCR: Monitor pend req key Mask */ + +#define DCB_DEMCR_UMON_EN_Pos 21U /*!< DCB DEMCR: Unprivileged monitor enable Position */ +#define DCB_DEMCR_UMON_EN_Msk (0x1UL << DCB_DEMCR_UMON_EN_Pos) /*!< DCB DEMCR: Unprivileged monitor enable Mask */ + +#define DCB_DEMCR_SDME_Pos 20U /*!< DCB DEMCR: Secure DebugMonitor enable Position */ +#define DCB_DEMCR_SDME_Msk (0x1UL << DCB_DEMCR_SDME_Pos) /*!< DCB DEMCR: Secure DebugMonitor enable Mask */ + +#define DCB_DEMCR_MON_REQ_Pos 19U /*!< DCB DEMCR: Monitor request Position */ +#define DCB_DEMCR_MON_REQ_Msk (0x1UL << DCB_DEMCR_MON_REQ_Pos) /*!< DCB DEMCR: Monitor request Mask */ + +#define DCB_DEMCR_MON_STEP_Pos 18U /*!< DCB DEMCR: Monitor step Position */ +#define DCB_DEMCR_MON_STEP_Msk (0x1UL << DCB_DEMCR_MON_STEP_Pos) /*!< DCB DEMCR: Monitor step Mask */ + +#define DCB_DEMCR_MON_PEND_Pos 17U /*!< DCB DEMCR: Monitor pend Position */ +#define DCB_DEMCR_MON_PEND_Msk (0x1UL << DCB_DEMCR_MON_PEND_Pos) /*!< DCB DEMCR: Monitor pend Mask */ + +#define DCB_DEMCR_MON_EN_Pos 16U /*!< DCB DEMCR: Monitor enable Position */ +#define DCB_DEMCR_MON_EN_Msk (0x1UL << DCB_DEMCR_MON_EN_Pos) /*!< DCB DEMCR: Monitor enable Mask */ + +#define DCB_DEMCR_VC_SFERR_Pos 11U /*!< DCB DEMCR: Vector Catch SecureFault Position */ +#define DCB_DEMCR_VC_SFERR_Msk (0x1UL << DCB_DEMCR_VC_SFERR_Pos) /*!< DCB DEMCR: Vector Catch SecureFault Mask */ + +#define DCB_DEMCR_VC_HARDERR_Pos 10U /*!< DCB DEMCR: Vector Catch HardFault errors Position */ +#define DCB_DEMCR_VC_HARDERR_Msk (0x1UL << DCB_DEMCR_VC_HARDERR_Pos) /*!< DCB DEMCR: Vector Catch HardFault errors Mask */ + +#define DCB_DEMCR_VC_INTERR_Pos 9U /*!< DCB DEMCR: Vector Catch interrupt errors Position */ +#define DCB_DEMCR_VC_INTERR_Msk (0x1UL << DCB_DEMCR_VC_INTERR_Pos) /*!< DCB DEMCR: Vector Catch interrupt errors Mask */ + +#define DCB_DEMCR_VC_BUSERR_Pos 8U /*!< DCB DEMCR: Vector Catch BusFault errors Position */ +#define DCB_DEMCR_VC_BUSERR_Msk (0x1UL << DCB_DEMCR_VC_BUSERR_Pos) /*!< DCB DEMCR: Vector Catch BusFault errors Mask */ + +#define DCB_DEMCR_VC_STATERR_Pos 7U /*!< DCB DEMCR: Vector Catch state errors Position */ +#define DCB_DEMCR_VC_STATERR_Msk (0x1UL << DCB_DEMCR_VC_STATERR_Pos) /*!< DCB DEMCR: Vector Catch state errors Mask */ + +#define DCB_DEMCR_VC_CHKERR_Pos 6U /*!< DCB DEMCR: Vector Catch check errors Position */ +#define DCB_DEMCR_VC_CHKERR_Msk (0x1UL << DCB_DEMCR_VC_CHKERR_Pos) /*!< DCB DEMCR: Vector Catch check errors Mask */ + +#define DCB_DEMCR_VC_NOCPERR_Pos 5U /*!< DCB DEMCR: Vector Catch NOCP errors Position */ +#define DCB_DEMCR_VC_NOCPERR_Msk (0x1UL << DCB_DEMCR_VC_NOCPERR_Pos) /*!< DCB DEMCR: Vector Catch NOCP errors Mask */ + +#define DCB_DEMCR_VC_MMERR_Pos 4U /*!< DCB DEMCR: Vector Catch MemManage errors Position */ +#define DCB_DEMCR_VC_MMERR_Msk (0x1UL << DCB_DEMCR_VC_MMERR_Pos) /*!< DCB DEMCR: Vector Catch MemManage errors Mask */ + +#define DCB_DEMCR_VC_CORERESET_Pos 0U /*!< DCB DEMCR: Vector Catch Core reset Position */ +#define DCB_DEMCR_VC_CORERESET_Msk (0x1UL /*<< DCB_DEMCR_VC_CORERESET_Pos*/) /*!< DCB DEMCR: Vector Catch Core reset Mask */ + +/* DAUTHCTRL, Debug Authentication Control Register Definitions */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPNIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPNIDENSEL_Msk (0x1UL << DCB_DAUTHCTRL_SPNIDENSEL_Pos) /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Mask */ + +#define DCB_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< DCB DAUTHCTRL: Secure invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPIDENSEL_Msk (0x1UL /*<< DCB_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< DCB DAUTHCTRL: Secure invasive debug enable select Mask */ + +/* DSCSR, Debug Security Control and Status Register Definitions */ +#define DCB_DSCSR_CDSKEY_Pos 17U /*!< DCB DSCSR: CDS write-enable key Position */ +#define DCB_DSCSR_CDSKEY_Msk (0x1UL << DCB_DSCSR_CDSKEY_Pos) /*!< DCB DSCSR: CDS write-enable key Mask */ + +#define DCB_DSCSR_CDS_Pos 16U /*!< DCB DSCSR: Current domain Secure Position */ +#define DCB_DSCSR_CDS_Msk (0x1UL << DCB_DSCSR_CDS_Pos) /*!< DCB DSCSR: Current domain Secure Mask */ + +#define DCB_DSCSR_SBRSEL_Pos 1U /*!< DCB DSCSR: Secure banked register select Position */ +#define DCB_DSCSR_SBRSEL_Msk (0x1UL << DCB_DSCSR_SBRSEL_Pos) /*!< DCB DSCSR: Secure banked register select Mask */ + +#define DCB_DSCSR_SBRSELEN_Pos 0U /*!< DCB DSCSR: Secure banked register select enable Position */ +#define DCB_DSCSR_SBRSELEN_Msk (0x1UL /*<< DCB_DSCSR_SBRSELEN_Pos*/) /*!< DCB DSCSR: Secure banked register select enable Mask */ + +/*@} end of group CMSIS_DCB */ + + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DIB Debug Identification Block + \brief Type definitions for the Debug Identification Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Identification Block Registers (DIB). + */ +typedef struct +{ + __OM uint32_t DLAR; /*!< Offset: 0x000 ( /W) SCS Software Lock Access Register */ + __IM uint32_t DLSR; /*!< Offset: 0x004 (R/ ) SCS Software Lock Status Register */ + __IM uint32_t DAUTHSTATUS; /*!< Offset: 0x008 (R/ ) Debug Authentication Status Register */ + __IM uint32_t DDEVARCH; /*!< Offset: 0x00C (R/ ) SCS Device Architecture Register */ + __IM uint32_t DDEVTYPE; /*!< Offset: 0x010 (R/ ) SCS Device Type Register */ +} DIB_Type; + +/* DLAR, SCS Software Lock Access Register Definitions */ +#define DIB_DLAR_KEY_Pos 0U /*!< DIB DLAR: KEY Position */ +#define DIB_DLAR_KEY_Msk (0xFFFFFFFFUL /*<< DIB_DLAR_KEY_Pos */) /*!< DIB DLAR: KEY Mask */ + +/* DLSR, SCS Software Lock Status Register Definitions */ +#define DIB_DLSR_nTT_Pos 2U /*!< DIB DLSR: Not thirty-two bit Position */ +#define DIB_DLSR_nTT_Msk (0x1UL << DIB_DLSR_nTT_Pos ) /*!< DIB DLSR: Not thirty-two bit Mask */ + +#define DIB_DLSR_SLK_Pos 1U /*!< DIB DLSR: Software Lock status Position */ +#define DIB_DLSR_SLK_Msk (0x1UL << DIB_DLSR_SLK_Pos ) /*!< DIB DLSR: Software Lock status Mask */ + +#define DIB_DLSR_SLI_Pos 0U /*!< DIB DLSR: Software Lock implemented Position */ +#define DIB_DLSR_SLI_Msk (0x1UL /*<< DIB_DLSR_SLI_Pos*/) /*!< DIB DLSR: Software Lock implemented Mask */ + +/* DAUTHSTATUS, Debug Authentication Status Register Definitions */ +#define DIB_DAUTHSTATUS_SNID_Pos 6U /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_SNID_Msk (0x3UL << DIB_DAUTHSTATUS_SNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_SID_Pos 4U /*!< DIB DAUTHSTATUS: Secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_SID_Msk (0x3UL << DIB_DAUTHSTATUS_SID_Pos ) /*!< DIB DAUTHSTATUS: Secure Invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSNID_Pos 2U /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSID_Pos 0U /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSID_Msk (0x3UL /*<< DIB_DAUTHSTATUS_NSID_Pos*/) /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Mask */ + +/* DDEVARCH, SCS Device Architecture Register Definitions */ +#define DIB_DDEVARCH_ARCHITECT_Pos 21U /*!< DIB DDEVARCH: Architect Position */ +#define DIB_DDEVARCH_ARCHITECT_Msk (0x7FFUL << DIB_DDEVARCH_ARCHITECT_Pos ) /*!< DIB DDEVARCH: Architect Mask */ + +#define DIB_DDEVARCH_PRESENT_Pos 20U /*!< DIB DDEVARCH: DEVARCH Present Position */ +#define DIB_DDEVARCH_PRESENT_Msk (0x1FUL << DIB_DDEVARCH_PRESENT_Pos ) /*!< DIB DDEVARCH: DEVARCH Present Mask */ + +#define DIB_DDEVARCH_REVISION_Pos 16U /*!< DIB DDEVARCH: Revision Position */ +#define DIB_DDEVARCH_REVISION_Msk (0xFUL << DIB_DDEVARCH_REVISION_Pos ) /*!< DIB DDEVARCH: Revision Mask */ + +#define DIB_DDEVARCH_ARCHVER_Pos 12U /*!< DIB DDEVARCH: Architecture Version Position */ +#define DIB_DDEVARCH_ARCHVER_Msk (0xFUL << DIB_DDEVARCH_ARCHVER_Pos ) /*!< DIB DDEVARCH: Architecture Version Mask */ + +#define DIB_DDEVARCH_ARCHPART_Pos 0U /*!< DIB DDEVARCH: Architecture Part Position */ +#define DIB_DDEVARCH_ARCHPART_Msk (0xFFFUL /*<< DIB_DDEVARCH_ARCHPART_Pos*/) /*!< DIB DDEVARCH: Architecture Part Mask */ + +/* DDEVTYPE, SCS Device Type Register Definitions */ +#define DIB_DDEVTYPE_SUB_Pos 4U /*!< DIB DDEVTYPE: Sub-type Position */ +#define DIB_DDEVTYPE_SUB_Msk (0xFUL << DIB_DDEVTYPE_SUB_Pos ) /*!< DIB DDEVTYPE: Sub-type Mask */ + +#define DIB_DDEVTYPE_MAJOR_Pos 0U /*!< DIB DDEVTYPE: Major type Position */ +#define DIB_DDEVTYPE_MAJOR_Msk (0xFUL /*<< DIB_DDEVTYPE_MAJOR_Pos*/) /*!< DIB DDEVTYPE: Major type Mask */ + + +/*@} end of group CMSIS_DIB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ + #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ + #define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ + #define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ + #define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ + #define DCB_BASE (0xE000EDF0UL) /*!< DCB Base Address */ + #define DIB_BASE (0xE000EFB0UL) /*!< DIB Base Address */ + #define EMSS_BASE (0xE001E000UL) /*!AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Interrupt Target State + \details Reads the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + \return 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_GetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Target State + \details Sets the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_SetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] |= ((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Clear Interrupt Target State + \details Clears the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_ClearTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] &= ~((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + __DSB(); +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses including + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +/** + \brief Software Reset + \details Initiates a system reset request to reset the CPU. + */ +__NO_RETURN __STATIC_INLINE void __SW_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses including + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_BFHFNMINS_Msk) | /* Keep BFHFNMINS unchanged. Use this Reset function in case your case need to keep it */ + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | /* Keep priority group unchanged */ + SCB_AIRCR_SYSRESETREQ_Msk ); + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Priority Grouping (non-secure) + \details Sets the non-secure priority grouping field when in secure state using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void TZ_NVIC_SetPriorityGrouping_NS(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB_NS->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB_NS->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping (non-secure) + \details Reads the priority grouping field from the non-secure NVIC when in secure state. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriorityGrouping_NS(void) +{ + return ((uint32_t)((SCB_NS->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt (non-secure) + \details Enables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_EnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status (non-secure) + \details Returns a device specific interrupt enable status from the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetEnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt (non-secure) + \details Disables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_DisableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Pending Interrupt (non-secure) + \details Reads the NVIC pending register in the non-secure NVIC when in secure state and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt (non-secure) + \details Sets the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_SetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt (non-secure) + \details Clears the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_ClearPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt (non-secure) + \details Reads the active register in non-secure NVIC when in secure state and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetActive_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority (non-secure) + \details Sets the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every non-secure processor exception. + */ +__STATIC_INLINE void TZ_NVIC_SetPriority_NS(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority (non-secure) + \details Reads the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriority_NS(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC_NS->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) &&(__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv8.h" + +#endif + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + uint32_t mvfr0; + + mvfr0 = FPU->MVFR0; + if ((mvfr0 & (FPU_MVFR0_Single_precision_Msk | FPU_MVFR0_Double_precision_Msk)) == 0x220U) + { + return 2U; /* Double + Single precision FPU */ + } + else if ((mvfr0 & (FPU_MVFR0_Single_precision_Msk | FPU_MVFR0_Double_precision_Msk)) == 0x020U) + { + return 1U; /* Single precision FPU */ + } + else + { + return 0U; /* No FPU */ + } +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ########################## SAU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SAUFunctions SAU Functions + \brief Functions that configure the SAU. + @{ + */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + +/** + \brief Enable SAU + \details Enables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Enable(void) +{ + SAU->CTRL |= (SAU_CTRL_ENABLE_Msk); +} + + + +/** + \brief Disable SAU + \details Disables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Disable(void) +{ + SAU->CTRL &= ~(SAU_CTRL_ENABLE_Msk); +} + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_SAUFunctions */ + + + +/* ################################## Debug Control function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DCBFunctions Debug Control Functions + \brief Functions that access the Debug Control Block. + @{ + */ + + +/** + \brief Set Debug Authentication Control Register + \details writes to Debug Authentication Control register. + \param [in] value value to be writen. + */ +__STATIC_INLINE void DCB_SetAuthCtrl(uint32_t value) +{ + __DSB(); + __ISB(); + DCB->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register + \details Reads Debug Authentication Control register. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t DCB_GetAuthCtrl(void) +{ + return (DCB->DAUTHCTRL); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Debug Authentication Control Register (non-secure) + \details writes to non-secure Debug Authentication Control register when in secure state. + \param [in] value value to be writen + */ +__STATIC_INLINE void TZ_DCB_SetAuthCtrl_NS(uint32_t value) +{ + __DSB(); + __ISB(); + DCB_NS->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register (non-secure) + \details Reads non-secure Debug Authentication Control register when in secure state. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t TZ_DCB_GetAuthCtrl_NS(void) +{ + return (DCB_NS->DAUTHCTRL); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## Debug Identification function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DIBFunctions Debug Identification Functions + \brief Functions that access the Debug Identification Block. + @{ + */ + + +/** + \brief Get Debug Authentication Status Register + \details Reads Debug Authentication Status register. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t DIB_GetAuthStatus(void) +{ + return (DIB->DAUTHSTATUS); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Debug Authentication Status Register (non-secure) + \details Reads non-secure Debug Authentication Status register when in secure state. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t TZ_DIB_GetAuthStatus_NS(void) +{ + return (DIB_NS->DAUTHSTATUS); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + +#if ((defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U)) || \ + (defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U))) + +/* ########################## Cache functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_CacheFunctions Cache Functions + \brief Functions that configure Instruction and Data cache. + @{ + */ + +/* Cache Size ID Register Macros */ +#define CCSIDR_WAYS(x) (((x) & SCB_CCSIDR_ASSOCIATIVITY_Msk) >> SCB_CCSIDR_ASSOCIATIVITY_Pos) +#define CCSIDR_SETS(x) (((x) & SCB_CCSIDR_NUMSETS_Msk ) >> SCB_CCSIDR_NUMSETS_Pos ) + +#define __SCB_DCACHE_LINE_SIZE 32U /*!< STAR-MC1 cache line size is fixed to 32 bytes (8 words). See also register SCB_CCSIDR */ +#define __SCB_ICACHE_LINE_SIZE 32U /*!< STAR-MC1 cache line size is fixed to 32 bytes (8 words). See also register SCB_CCSIDR */ + +/** + \brief Enable I-Cache + \details Turns on I-Cache + */ +__STATIC_FORCEINLINE void SCB_EnableICache (void) +{ + #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U) + if (SCB->CCR & SCB_CCR_IC_Msk) return; /* return if ICache is already enabled */ + + __DSB(); + __ISB(); + SCB->ICIALLU = 0UL; /* invalidate I-Cache */ + __DSB(); + __ISB(); + SCB->CCR |= (uint32_t)SCB_CCR_IC_Msk; /* enable I-Cache */ + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Disable I-Cache + \details Turns off I-Cache + */ +__STATIC_FORCEINLINE void SCB_DisableICache (void) +{ + #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U) + __DSB(); + __ISB(); + SCB->CCR &= ~(uint32_t)SCB_CCR_IC_Msk; /* disable I-Cache */ + SCB->ICIALLU = 0UL; /* invalidate I-Cache */ + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Invalidate I-Cache + \details Invalidates I-Cache + */ +__STATIC_FORCEINLINE void SCB_InvalidateICache (void) +{ + #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U) + __DSB(); + __ISB(); + SCB->ICIALLU = 0UL; + __DSB(); + __ISB(); + #endif +} + + +/** + \brief I-Cache Invalidate by address + \details Invalidates I-Cache for the given address. + I-Cache is invalidated starting from a 32 byte aligned address in 32 byte granularity. + I-Cache memory blocks which are part of given address + given size are invalidated. + \param[in] addr address + \param[in] isize size of memory block (in number of bytes) +*/ +__STATIC_FORCEINLINE void SCB_InvalidateICache_by_Addr (void *addr, int32_t isize) +{ + #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U) + if ( isize > 0 ) { + int32_t op_size = isize + (((uint32_t)addr) & (__SCB_ICACHE_LINE_SIZE - 1U)); + uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_ICACHE_LINE_SIZE - 1U) */; + + __DSB(); + + do { + SCB->ICIMVAU = op_addr; /* register accepts only 32byte aligned values, only bits 31..5 are valid */ + op_addr += __SCB_ICACHE_LINE_SIZE; + op_size -= __SCB_ICACHE_LINE_SIZE; + } while ( op_size > 0 ); + + __DSB(); + __ISB(); + } + #endif +} + + +/** + \brief Enable D-Cache + \details Turns on D-Cache + */ +__STATIC_FORCEINLINE void SCB_EnableDCache (void) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + + if (SCB->CCR & SCB_CCR_DC_Msk) return; /* return if DCache is already enabled */ + + SCB->CSSELR = 0U; /* select Level 1 data cache */ + __DSB(); + + ccsidr = SCB->CCSIDR; + + /* invalidate D-Cache */ + sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + do { + ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + do { + SCB->DCISW = (((sets << SCB_DCISW_SET_Pos) & SCB_DCISW_SET_Msk) | + ((ways << SCB_DCISW_WAY_Pos) & SCB_DCISW_WAY_Msk) ); + #if defined ( __CC_ARM ) + __schedule_barrier(); + #endif + } while (ways-- != 0U); + } while(sets-- != 0U); + __DSB(); + + SCB->CCR |= (uint32_t)SCB_CCR_DC_Msk; /* enable D-Cache */ + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Disable D-Cache + \details Turns off D-Cache + */ +__STATIC_FORCEINLINE void SCB_DisableDCache (void) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + + SCB->CSSELR = 0U; /* select Level 1 data cache */ + __DSB(); + + SCB->CCR &= ~(uint32_t)SCB_CCR_DC_Msk; /* disable D-Cache */ + __DSB(); + + ccsidr = SCB->CCSIDR; + + /* clean & invalidate D-Cache */ + sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + do { + ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + do { + SCB->DCCISW = (((sets << SCB_DCCISW_SET_Pos) & SCB_DCCISW_SET_Msk) | + ((ways << SCB_DCCISW_WAY_Pos) & SCB_DCCISW_WAY_Msk) ); + #if defined ( __CC_ARM ) + __schedule_barrier(); + #endif + } while (ways-- != 0U); + } while(sets-- != 0U); + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Invalidate D-Cache + \details Invalidates D-Cache + */ +__STATIC_FORCEINLINE void SCB_InvalidateDCache (void) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + + SCB->CSSELR = 0U; /* select Level 1 data cache */ + __DSB(); + + ccsidr = SCB->CCSIDR; + + /* invalidate D-Cache */ + sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + do { + ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + do { + SCB->DCISW = (((sets << SCB_DCISW_SET_Pos) & SCB_DCISW_SET_Msk) | + ((ways << SCB_DCISW_WAY_Pos) & SCB_DCISW_WAY_Msk) ); + #if defined ( __CC_ARM ) + __schedule_barrier(); + #endif + } while (ways-- != 0U); + } while(sets-- != 0U); + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Clean D-Cache + \details Cleans D-Cache + */ +__STATIC_FORCEINLINE void SCB_CleanDCache (void) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + + SCB->CSSELR = 0U; /* select Level 1 data cache */ + __DSB(); + + ccsidr = SCB->CCSIDR; + + /* clean D-Cache */ + sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + do { + ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + do { + SCB->DCCSW = (((sets << SCB_DCCSW_SET_Pos) & SCB_DCCSW_SET_Msk) | + ((ways << SCB_DCCSW_WAY_Pos) & SCB_DCCSW_WAY_Msk) ); + #if defined ( __CC_ARM ) + __schedule_barrier(); + #endif + } while (ways-- != 0U); + } while(sets-- != 0U); + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Clean & Invalidate D-Cache + \details Cleans and Invalidates D-Cache + */ +__STATIC_FORCEINLINE void SCB_CleanInvalidateDCache (void) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + + SCB->CSSELR = 0U; /* select Level 1 data cache */ + __DSB(); + + ccsidr = SCB->CCSIDR; + + /* clean & invalidate D-Cache */ + sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + do { + ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + do { + SCB->DCCISW = (((sets << SCB_DCCISW_SET_Pos) & SCB_DCCISW_SET_Msk) | + ((ways << SCB_DCCISW_WAY_Pos) & SCB_DCCISW_WAY_Msk) ); + #if defined ( __CC_ARM ) + __schedule_barrier(); + #endif + } while (ways-- != 0U); + } while(sets-- != 0U); + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief D-Cache Invalidate by address + \details Invalidates D-Cache for the given address. + D-Cache is invalidated starting from a 32 byte aligned address in 32 byte granularity. + D-Cache memory blocks which are part of given address + given size are invalidated. + \param[in] addr address + \param[in] dsize size of memory block (in number of bytes) +*/ +__STATIC_FORCEINLINE void SCB_InvalidateDCache_by_Addr (void *addr, int32_t dsize) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + if ( dsize > 0 ) { + int32_t op_size = dsize + (((uint32_t)addr) & (__SCB_DCACHE_LINE_SIZE - 1U)); + uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_DCACHE_LINE_SIZE - 1U) */; + + __DSB(); + + do { + SCB->DCIMVAC = op_addr; /* register accepts only 32byte aligned values, only bits 31..5 are valid */ + op_addr += __SCB_DCACHE_LINE_SIZE; + op_size -= __SCB_DCACHE_LINE_SIZE; + } while ( op_size > 0 ); + + __DSB(); + __ISB(); + } + #endif +} + + +/** + \brief D-Cache Clean by address + \details Cleans D-Cache for the given address + D-Cache is cleaned starting from a 32 byte aligned address in 32 byte granularity. + D-Cache memory blocks which are part of given address + given size are cleaned. + \param[in] addr address + \param[in] dsize size of memory block (in number of bytes) +*/ +__STATIC_FORCEINLINE void SCB_CleanDCache_by_Addr (uint32_t *addr, int32_t dsize) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + if ( dsize > 0 ) { + int32_t op_size = dsize + (((uint32_t)addr) & (__SCB_DCACHE_LINE_SIZE - 1U)); + uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_DCACHE_LINE_SIZE - 1U) */; + + __DSB(); + + do { + SCB->DCCMVAC = op_addr; /* register accepts only 32byte aligned values, only bits 31..5 are valid */ + op_addr += __SCB_DCACHE_LINE_SIZE; + op_size -= __SCB_DCACHE_LINE_SIZE; + } while ( op_size > 0 ); + + __DSB(); + __ISB(); + } + #endif +} + + +/** + \brief D-Cache Clean and Invalidate by address + \details Cleans and invalidates D_Cache for the given address + D-Cache is cleaned and invalidated starting from a 32 byte aligned address in 32 byte granularity. + D-Cache memory blocks which are part of given address + given size are cleaned and invalidated. + \param[in] addr address (aligned to 32-byte boundary) + \param[in] dsize size of memory block (in number of bytes) +*/ +__STATIC_FORCEINLINE void SCB_CleanInvalidateDCache_by_Addr (uint32_t *addr, int32_t dsize) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + if ( dsize > 0 ) { + int32_t op_size = dsize + (((uint32_t)addr) & (__SCB_DCACHE_LINE_SIZE - 1U)); + uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_DCACHE_LINE_SIZE - 1U) */; + + __DSB(); + + do { + SCB->DCCIMVAC = op_addr; /* register accepts only 32byte aligned values, only bits 31..5 are valid */ + op_addr += __SCB_DCACHE_LINE_SIZE; + op_size -= __SCB_DCACHE_LINE_SIZE; + } while ( op_size > 0 ); + + __DSB(); + __ISB(); + } + #endif +} + +/*@} end of CMSIS_Core_CacheFunctions */ +#endif + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief System Tick Configuration (non-secure) + \details Initializes the non-secure System Timer and its interrupt when in secure state, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function TZ_SysTick_Config_NS is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + + */ +__STATIC_INLINE uint32_t TZ_SysTick_Config_NS(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick_NS->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + TZ_NVIC_SetPriority_NS (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick_NS->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick_NS->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_STAR_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/Drivers/CMSIS/Include/mpu_armv7.h b/Drivers/CMSIS/Include/mpu_armv7.h new file mode 100644 index 0000000..d9eedf8 --- /dev/null +++ b/Drivers/CMSIS/Include/mpu_armv7.h @@ -0,0 +1,275 @@ +/****************************************************************************** + * @file mpu_armv7.h + * @brief CMSIS MPU API for Armv7-M MPU + * @version V5.1.2 + * @date 25. May 2020 + ******************************************************************************/ +/* + * Copyright (c) 2017-2020 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef ARM_MPU_ARMV7_H +#define ARM_MPU_ARMV7_H + +#define ARM_MPU_REGION_SIZE_32B ((uint8_t)0x04U) ///!< MPU Region Size 32 Bytes +#define ARM_MPU_REGION_SIZE_64B ((uint8_t)0x05U) ///!< MPU Region Size 64 Bytes +#define ARM_MPU_REGION_SIZE_128B ((uint8_t)0x06U) ///!< MPU Region Size 128 Bytes +#define ARM_MPU_REGION_SIZE_256B ((uint8_t)0x07U) ///!< MPU Region Size 256 Bytes +#define ARM_MPU_REGION_SIZE_512B ((uint8_t)0x08U) ///!< MPU Region Size 512 Bytes +#define ARM_MPU_REGION_SIZE_1KB ((uint8_t)0x09U) ///!< MPU Region Size 1 KByte +#define ARM_MPU_REGION_SIZE_2KB ((uint8_t)0x0AU) ///!< MPU Region Size 2 KBytes +#define ARM_MPU_REGION_SIZE_4KB ((uint8_t)0x0BU) ///!< MPU Region Size 4 KBytes +#define ARM_MPU_REGION_SIZE_8KB ((uint8_t)0x0CU) ///!< MPU Region Size 8 KBytes +#define ARM_MPU_REGION_SIZE_16KB ((uint8_t)0x0DU) ///!< MPU Region Size 16 KBytes +#define ARM_MPU_REGION_SIZE_32KB ((uint8_t)0x0EU) ///!< MPU Region Size 32 KBytes +#define ARM_MPU_REGION_SIZE_64KB ((uint8_t)0x0FU) ///!< MPU Region Size 64 KBytes +#define ARM_MPU_REGION_SIZE_128KB ((uint8_t)0x10U) ///!< MPU Region Size 128 KBytes +#define ARM_MPU_REGION_SIZE_256KB ((uint8_t)0x11U) ///!< MPU Region Size 256 KBytes +#define ARM_MPU_REGION_SIZE_512KB ((uint8_t)0x12U) ///!< MPU Region Size 512 KBytes +#define ARM_MPU_REGION_SIZE_1MB ((uint8_t)0x13U) ///!< MPU Region Size 1 MByte +#define ARM_MPU_REGION_SIZE_2MB ((uint8_t)0x14U) ///!< MPU Region Size 2 MBytes +#define ARM_MPU_REGION_SIZE_4MB ((uint8_t)0x15U) ///!< MPU Region Size 4 MBytes +#define ARM_MPU_REGION_SIZE_8MB ((uint8_t)0x16U) ///!< MPU Region Size 8 MBytes +#define ARM_MPU_REGION_SIZE_16MB ((uint8_t)0x17U) ///!< MPU Region Size 16 MBytes +#define ARM_MPU_REGION_SIZE_32MB ((uint8_t)0x18U) ///!< MPU Region Size 32 MBytes +#define ARM_MPU_REGION_SIZE_64MB ((uint8_t)0x19U) ///!< MPU Region Size 64 MBytes +#define ARM_MPU_REGION_SIZE_128MB ((uint8_t)0x1AU) ///!< MPU Region Size 128 MBytes +#define ARM_MPU_REGION_SIZE_256MB ((uint8_t)0x1BU) ///!< MPU Region Size 256 MBytes +#define ARM_MPU_REGION_SIZE_512MB ((uint8_t)0x1CU) ///!< MPU Region Size 512 MBytes +#define ARM_MPU_REGION_SIZE_1GB ((uint8_t)0x1DU) ///!< MPU Region Size 1 GByte +#define ARM_MPU_REGION_SIZE_2GB ((uint8_t)0x1EU) ///!< MPU Region Size 2 GBytes +#define ARM_MPU_REGION_SIZE_4GB ((uint8_t)0x1FU) ///!< MPU Region Size 4 GBytes + +#define ARM_MPU_AP_NONE 0U ///!< MPU Access Permission no access +#define ARM_MPU_AP_PRIV 1U ///!< MPU Access Permission privileged access only +#define ARM_MPU_AP_URO 2U ///!< MPU Access Permission unprivileged access read-only +#define ARM_MPU_AP_FULL 3U ///!< MPU Access Permission full access +#define ARM_MPU_AP_PRO 5U ///!< MPU Access Permission privileged access read-only +#define ARM_MPU_AP_RO 6U ///!< MPU Access Permission read-only access + +/** MPU Region Base Address Register Value +* +* \param Region The region to be configured, number 0 to 15. +* \param BaseAddress The base address for the region. +*/ +#define ARM_MPU_RBAR(Region, BaseAddress) \ + (((BaseAddress) & MPU_RBAR_ADDR_Msk) | \ + ((Region) & MPU_RBAR_REGION_Msk) | \ + (MPU_RBAR_VALID_Msk)) + +/** +* MPU Memory Access Attributes +* +* \param TypeExtField Type extension field, allows you to configure memory access type, for example strongly ordered, peripheral. +* \param IsShareable Region is shareable between multiple bus masters. +* \param IsCacheable Region is cacheable, i.e. its value may be kept in cache. +* \param IsBufferable Region is bufferable, i.e. using write-back caching. Cacheable but non-bufferable regions use write-through policy. +*/ +#define ARM_MPU_ACCESS_(TypeExtField, IsShareable, IsCacheable, IsBufferable) \ + ((((TypeExtField) << MPU_RASR_TEX_Pos) & MPU_RASR_TEX_Msk) | \ + (((IsShareable) << MPU_RASR_S_Pos) & MPU_RASR_S_Msk) | \ + (((IsCacheable) << MPU_RASR_C_Pos) & MPU_RASR_C_Msk) | \ + (((IsBufferable) << MPU_RASR_B_Pos) & MPU_RASR_B_Msk)) + +/** +* MPU Region Attribute and Size Register Value +* +* \param DisableExec Instruction access disable bit, 1= disable instruction fetches. +* \param AccessPermission Data access permissions, allows you to configure read/write access for User and Privileged mode. +* \param AccessAttributes Memory access attribution, see \ref ARM_MPU_ACCESS_. +* \param SubRegionDisable Sub-region disable field. +* \param Size Region size of the region to be configured, for example 4K, 8K. +*/ +#define ARM_MPU_RASR_EX(DisableExec, AccessPermission, AccessAttributes, SubRegionDisable, Size) \ + ((((DisableExec) << MPU_RASR_XN_Pos) & MPU_RASR_XN_Msk) | \ + (((AccessPermission) << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk) | \ + (((AccessAttributes) & (MPU_RASR_TEX_Msk | MPU_RASR_S_Msk | MPU_RASR_C_Msk | MPU_RASR_B_Msk))) | \ + (((SubRegionDisable) << MPU_RASR_SRD_Pos) & MPU_RASR_SRD_Msk) | \ + (((Size) << MPU_RASR_SIZE_Pos) & MPU_RASR_SIZE_Msk) | \ + (((MPU_RASR_ENABLE_Msk)))) + +/** +* MPU Region Attribute and Size Register Value +* +* \param DisableExec Instruction access disable bit, 1= disable instruction fetches. +* \param AccessPermission Data access permissions, allows you to configure read/write access for User and Privileged mode. +* \param TypeExtField Type extension field, allows you to configure memory access type, for example strongly ordered, peripheral. +* \param IsShareable Region is shareable between multiple bus masters. +* \param IsCacheable Region is cacheable, i.e. its value may be kept in cache. +* \param IsBufferable Region is bufferable, i.e. using write-back caching. Cacheable but non-bufferable regions use write-through policy. +* \param SubRegionDisable Sub-region disable field. +* \param Size Region size of the region to be configured, for example 4K, 8K. +*/ +#define ARM_MPU_RASR(DisableExec, AccessPermission, TypeExtField, IsShareable, IsCacheable, IsBufferable, SubRegionDisable, Size) \ + ARM_MPU_RASR_EX(DisableExec, AccessPermission, ARM_MPU_ACCESS_(TypeExtField, IsShareable, IsCacheable, IsBufferable), SubRegionDisable, Size) + +/** +* MPU Memory Access Attribute for strongly ordered memory. +* - TEX: 000b +* - Shareable +* - Non-cacheable +* - Non-bufferable +*/ +#define ARM_MPU_ACCESS_ORDERED ARM_MPU_ACCESS_(0U, 1U, 0U, 0U) + +/** +* MPU Memory Access Attribute for device memory. +* - TEX: 000b (if shareable) or 010b (if non-shareable) +* - Shareable or non-shareable +* - Non-cacheable +* - Bufferable (if shareable) or non-bufferable (if non-shareable) +* +* \param IsShareable Configures the device memory as shareable or non-shareable. +*/ +#define ARM_MPU_ACCESS_DEVICE(IsShareable) ((IsShareable) ? ARM_MPU_ACCESS_(0U, 1U, 0U, 1U) : ARM_MPU_ACCESS_(2U, 0U, 0U, 0U)) + +/** +* MPU Memory Access Attribute for normal memory. +* - TEX: 1BBb (reflecting outer cacheability rules) +* - Shareable or non-shareable +* - Cacheable or non-cacheable (reflecting inner cacheability rules) +* - Bufferable or non-bufferable (reflecting inner cacheability rules) +* +* \param OuterCp Configures the outer cache policy. +* \param InnerCp Configures the inner cache policy. +* \param IsShareable Configures the memory as shareable or non-shareable. +*/ +#define ARM_MPU_ACCESS_NORMAL(OuterCp, InnerCp, IsShareable) ARM_MPU_ACCESS_((4U | (OuterCp)), IsShareable, ((InnerCp) >> 1U), ((InnerCp) & 1U)) + +/** +* MPU Memory Access Attribute non-cacheable policy. +*/ +#define ARM_MPU_CACHEP_NOCACHE 0U + +/** +* MPU Memory Access Attribute write-back, write and read allocate policy. +*/ +#define ARM_MPU_CACHEP_WB_WRA 1U + +/** +* MPU Memory Access Attribute write-through, no write allocate policy. +*/ +#define ARM_MPU_CACHEP_WT_NWA 2U + +/** +* MPU Memory Access Attribute write-back, no write allocate policy. +*/ +#define ARM_MPU_CACHEP_WB_NWA 3U + + +/** +* Struct for a single MPU Region +*/ +typedef struct { + uint32_t RBAR; //!< The region base address register value (RBAR) + uint32_t RASR; //!< The region attribute and size register value (RASR) \ref MPU_RASR +} ARM_MPU_Region_t; + +/** Enable the MPU. +* \param MPU_Control Default access permissions for unconfigured regions. +*/ +__STATIC_INLINE void ARM_MPU_Enable(uint32_t MPU_Control) +{ + __DMB(); + MPU->CTRL = MPU_Control | MPU_CTRL_ENABLE_Msk; +#ifdef SCB_SHCSR_MEMFAULTENA_Msk + SCB->SHCSR |= SCB_SHCSR_MEMFAULTENA_Msk; +#endif + __DSB(); + __ISB(); +} + +/** Disable the MPU. +*/ +__STATIC_INLINE void ARM_MPU_Disable(void) +{ + __DMB(); +#ifdef SCB_SHCSR_MEMFAULTENA_Msk + SCB->SHCSR &= ~SCB_SHCSR_MEMFAULTENA_Msk; +#endif + MPU->CTRL &= ~MPU_CTRL_ENABLE_Msk; + __DSB(); + __ISB(); +} + +/** Clear and disable the given MPU region. +* \param rnr Region number to be cleared. +*/ +__STATIC_INLINE void ARM_MPU_ClrRegion(uint32_t rnr) +{ + MPU->RNR = rnr; + MPU->RASR = 0U; +} + +/** Configure an MPU region. +* \param rbar Value for RBAR register. +* \param rasr Value for RASR register. +*/ +__STATIC_INLINE void ARM_MPU_SetRegion(uint32_t rbar, uint32_t rasr) +{ + MPU->RBAR = rbar; + MPU->RASR = rasr; +} + +/** Configure the given MPU region. +* \param rnr Region number to be configured. +* \param rbar Value for RBAR register. +* \param rasr Value for RASR register. +*/ +__STATIC_INLINE void ARM_MPU_SetRegionEx(uint32_t rnr, uint32_t rbar, uint32_t rasr) +{ + MPU->RNR = rnr; + MPU->RBAR = rbar; + MPU->RASR = rasr; +} + +/** Memcpy with strictly ordered memory access, e.g. used by code in ARM_MPU_Load(). +* \param dst Destination data is copied to. +* \param src Source data is copied from. +* \param len Amount of data words to be copied. +*/ +__STATIC_INLINE void ARM_MPU_OrderedMemcpy(volatile uint32_t* dst, const uint32_t* __RESTRICT src, uint32_t len) +{ + uint32_t i; + for (i = 0U; i < len; ++i) + { + dst[i] = src[i]; + } +} + +/** Load the given number of MPU regions from a table. +* \param table Pointer to the MPU configuration table. +* \param cnt Amount of regions to be configured. +*/ +__STATIC_INLINE void ARM_MPU_Load(ARM_MPU_Region_t const* table, uint32_t cnt) +{ + const uint32_t rowWordSize = sizeof(ARM_MPU_Region_t)/4U; + while (cnt > MPU_TYPE_RALIASES) { + ARM_MPU_OrderedMemcpy(&(MPU->RBAR), &(table->RBAR), MPU_TYPE_RALIASES*rowWordSize); + table += MPU_TYPE_RALIASES; + cnt -= MPU_TYPE_RALIASES; + } + ARM_MPU_OrderedMemcpy(&(MPU->RBAR), &(table->RBAR), cnt*rowWordSize); +} + +#endif diff --git a/Drivers/CMSIS/Include/mpu_armv8.h b/Drivers/CMSIS/Include/mpu_armv8.h new file mode 100644 index 0000000..3de16ef --- /dev/null +++ b/Drivers/CMSIS/Include/mpu_armv8.h @@ -0,0 +1,352 @@ +/****************************************************************************** + * @file mpu_armv8.h + * @brief CMSIS MPU API for Armv8-M and Armv8.1-M MPU + * @version V5.1.3 + * @date 03. February 2021 + ******************************************************************************/ +/* + * Copyright (c) 2017-2021 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef ARM_MPU_ARMV8_H +#define ARM_MPU_ARMV8_H + +/** \brief Attribute for device memory (outer only) */ +#define ARM_MPU_ATTR_DEVICE ( 0U ) + +/** \brief Attribute for non-cacheable, normal memory */ +#define ARM_MPU_ATTR_NON_CACHEABLE ( 4U ) + +/** \brief Attribute for normal memory (outer and inner) +* \param NT Non-Transient: Set to 1 for non-transient data. +* \param WB Write-Back: Set to 1 to use write-back update policy. +* \param RA Read Allocation: Set to 1 to use cache allocation on read miss. +* \param WA Write Allocation: Set to 1 to use cache allocation on write miss. +*/ +#define ARM_MPU_ATTR_MEMORY_(NT, WB, RA, WA) \ + ((((NT) & 1U) << 3U) | (((WB) & 1U) << 2U) | (((RA) & 1U) << 1U) | ((WA) & 1U)) + +/** \brief Device memory type non Gathering, non Re-ordering, non Early Write Acknowledgement */ +#define ARM_MPU_ATTR_DEVICE_nGnRnE (0U) + +/** \brief Device memory type non Gathering, non Re-ordering, Early Write Acknowledgement */ +#define ARM_MPU_ATTR_DEVICE_nGnRE (1U) + +/** \brief Device memory type non Gathering, Re-ordering, Early Write Acknowledgement */ +#define ARM_MPU_ATTR_DEVICE_nGRE (2U) + +/** \brief Device memory type Gathering, Re-ordering, Early Write Acknowledgement */ +#define ARM_MPU_ATTR_DEVICE_GRE (3U) + +/** \brief Memory Attribute +* \param O Outer memory attributes +* \param I O == ARM_MPU_ATTR_DEVICE: Device memory attributes, else: Inner memory attributes +*/ +#define ARM_MPU_ATTR(O, I) ((((O) & 0xFU) << 4U) | ((((O) & 0xFU) != 0U) ? ((I) & 0xFU) : (((I) & 0x3U) << 2U))) + +/** \brief Normal memory non-shareable */ +#define ARM_MPU_SH_NON (0U) + +/** \brief Normal memory outer shareable */ +#define ARM_MPU_SH_OUTER (2U) + +/** \brief Normal memory inner shareable */ +#define ARM_MPU_SH_INNER (3U) + +/** \brief Memory access permissions +* \param RO Read-Only: Set to 1 for read-only memory. +* \param NP Non-Privileged: Set to 1 for non-privileged memory. +*/ +#define ARM_MPU_AP_(RO, NP) ((((RO) & 1U) << 1U) | ((NP) & 1U)) + +/** \brief Region Base Address Register value +* \param BASE The base address bits [31:5] of a memory region. The value is zero extended. Effective address gets 32 byte aligned. +* \param SH Defines the Shareability domain for this memory region. +* \param RO Read-Only: Set to 1 for a read-only memory region. +* \param NP Non-Privileged: Set to 1 for a non-privileged memory region. +* \oaram XN eXecute Never: Set to 1 for a non-executable memory region. +*/ +#define ARM_MPU_RBAR(BASE, SH, RO, NP, XN) \ + (((BASE) & MPU_RBAR_BASE_Msk) | \ + (((SH) << MPU_RBAR_SH_Pos) & MPU_RBAR_SH_Msk) | \ + ((ARM_MPU_AP_(RO, NP) << MPU_RBAR_AP_Pos) & MPU_RBAR_AP_Msk) | \ + (((XN) << MPU_RBAR_XN_Pos) & MPU_RBAR_XN_Msk)) + +/** \brief Region Limit Address Register value +* \param LIMIT The limit address bits [31:5] for this memory region. The value is one extended. +* \param IDX The attribute index to be associated with this memory region. +*/ +#define ARM_MPU_RLAR(LIMIT, IDX) \ + (((LIMIT) & MPU_RLAR_LIMIT_Msk) | \ + (((IDX) << MPU_RLAR_AttrIndx_Pos) & MPU_RLAR_AttrIndx_Msk) | \ + (MPU_RLAR_EN_Msk)) + +#if defined(MPU_RLAR_PXN_Pos) + +/** \brief Region Limit Address Register with PXN value +* \param LIMIT The limit address bits [31:5] for this memory region. The value is one extended. +* \param PXN Privileged execute never. Defines whether code can be executed from this privileged region. +* \param IDX The attribute index to be associated with this memory region. +*/ +#define ARM_MPU_RLAR_PXN(LIMIT, PXN, IDX) \ + (((LIMIT) & MPU_RLAR_LIMIT_Msk) | \ + (((PXN) << MPU_RLAR_PXN_Pos) & MPU_RLAR_PXN_Msk) | \ + (((IDX) << MPU_RLAR_AttrIndx_Pos) & MPU_RLAR_AttrIndx_Msk) | \ + (MPU_RLAR_EN_Msk)) + +#endif + +/** +* Struct for a single MPU Region +*/ +typedef struct { + uint32_t RBAR; /*!< Region Base Address Register value */ + uint32_t RLAR; /*!< Region Limit Address Register value */ +} ARM_MPU_Region_t; + +/** Enable the MPU. +* \param MPU_Control Default access permissions for unconfigured regions. +*/ +__STATIC_INLINE void ARM_MPU_Enable(uint32_t MPU_Control) +{ + __DMB(); + MPU->CTRL = MPU_Control | MPU_CTRL_ENABLE_Msk; +#ifdef SCB_SHCSR_MEMFAULTENA_Msk + SCB->SHCSR |= SCB_SHCSR_MEMFAULTENA_Msk; +#endif + __DSB(); + __ISB(); +} + +/** Disable the MPU. +*/ +__STATIC_INLINE void ARM_MPU_Disable(void) +{ + __DMB(); +#ifdef SCB_SHCSR_MEMFAULTENA_Msk + SCB->SHCSR &= ~SCB_SHCSR_MEMFAULTENA_Msk; +#endif + MPU->CTRL &= ~MPU_CTRL_ENABLE_Msk; + __DSB(); + __ISB(); +} + +#ifdef MPU_NS +/** Enable the Non-secure MPU. +* \param MPU_Control Default access permissions for unconfigured regions. +*/ +__STATIC_INLINE void ARM_MPU_Enable_NS(uint32_t MPU_Control) +{ + __DMB(); + MPU_NS->CTRL = MPU_Control | MPU_CTRL_ENABLE_Msk; +#ifdef SCB_SHCSR_MEMFAULTENA_Msk + SCB_NS->SHCSR |= SCB_SHCSR_MEMFAULTENA_Msk; +#endif + __DSB(); + __ISB(); +} + +/** Disable the Non-secure MPU. +*/ +__STATIC_INLINE void ARM_MPU_Disable_NS(void) +{ + __DMB(); +#ifdef SCB_SHCSR_MEMFAULTENA_Msk + SCB_NS->SHCSR &= ~SCB_SHCSR_MEMFAULTENA_Msk; +#endif + MPU_NS->CTRL &= ~MPU_CTRL_ENABLE_Msk; + __DSB(); + __ISB(); +} +#endif + +/** Set the memory attribute encoding to the given MPU. +* \param mpu Pointer to the MPU to be configured. +* \param idx The attribute index to be set [0-7] +* \param attr The attribute value to be set. +*/ +__STATIC_INLINE void ARM_MPU_SetMemAttrEx(MPU_Type* mpu, uint8_t idx, uint8_t attr) +{ + const uint8_t reg = idx / 4U; + const uint32_t pos = ((idx % 4U) * 8U); + const uint32_t mask = 0xFFU << pos; + + if (reg >= (sizeof(mpu->MAIR) / sizeof(mpu->MAIR[0]))) { + return; // invalid index + } + + mpu->MAIR[reg] = ((mpu->MAIR[reg] & ~mask) | ((attr << pos) & mask)); +} + +/** Set the memory attribute encoding. +* \param idx The attribute index to be set [0-7] +* \param attr The attribute value to be set. +*/ +__STATIC_INLINE void ARM_MPU_SetMemAttr(uint8_t idx, uint8_t attr) +{ + ARM_MPU_SetMemAttrEx(MPU, idx, attr); +} + +#ifdef MPU_NS +/** Set the memory attribute encoding to the Non-secure MPU. +* \param idx The attribute index to be set [0-7] +* \param attr The attribute value to be set. +*/ +__STATIC_INLINE void ARM_MPU_SetMemAttr_NS(uint8_t idx, uint8_t attr) +{ + ARM_MPU_SetMemAttrEx(MPU_NS, idx, attr); +} +#endif + +/** Clear and disable the given MPU region of the given MPU. +* \param mpu Pointer to MPU to be used. +* \param rnr Region number to be cleared. +*/ +__STATIC_INLINE void ARM_MPU_ClrRegionEx(MPU_Type* mpu, uint32_t rnr) +{ + mpu->RNR = rnr; + mpu->RLAR = 0U; +} + +/** Clear and disable the given MPU region. +* \param rnr Region number to be cleared. +*/ +__STATIC_INLINE void ARM_MPU_ClrRegion(uint32_t rnr) +{ + ARM_MPU_ClrRegionEx(MPU, rnr); +} + +#ifdef MPU_NS +/** Clear and disable the given Non-secure MPU region. +* \param rnr Region number to be cleared. +*/ +__STATIC_INLINE void ARM_MPU_ClrRegion_NS(uint32_t rnr) +{ + ARM_MPU_ClrRegionEx(MPU_NS, rnr); +} +#endif + +/** Configure the given MPU region of the given MPU. +* \param mpu Pointer to MPU to be used. +* \param rnr Region number to be configured. +* \param rbar Value for RBAR register. +* \param rlar Value for RLAR register. +*/ +__STATIC_INLINE void ARM_MPU_SetRegionEx(MPU_Type* mpu, uint32_t rnr, uint32_t rbar, uint32_t rlar) +{ + mpu->RNR = rnr; + mpu->RBAR = rbar; + mpu->RLAR = rlar; +} + +/** Configure the given MPU region. +* \param rnr Region number to be configured. +* \param rbar Value for RBAR register. +* \param rlar Value for RLAR register. +*/ +__STATIC_INLINE void ARM_MPU_SetRegion(uint32_t rnr, uint32_t rbar, uint32_t rlar) +{ + ARM_MPU_SetRegionEx(MPU, rnr, rbar, rlar); +} + +#ifdef MPU_NS +/** Configure the given Non-secure MPU region. +* \param rnr Region number to be configured. +* \param rbar Value for RBAR register. +* \param rlar Value for RLAR register. +*/ +__STATIC_INLINE void ARM_MPU_SetRegion_NS(uint32_t rnr, uint32_t rbar, uint32_t rlar) +{ + ARM_MPU_SetRegionEx(MPU_NS, rnr, rbar, rlar); +} +#endif + +/** Memcpy with strictly ordered memory access, e.g. used by code in ARM_MPU_LoadEx() +* \param dst Destination data is copied to. +* \param src Source data is copied from. +* \param len Amount of data words to be copied. +*/ +__STATIC_INLINE void ARM_MPU_OrderedMemcpy(volatile uint32_t* dst, const uint32_t* __RESTRICT src, uint32_t len) +{ + uint32_t i; + for (i = 0U; i < len; ++i) + { + dst[i] = src[i]; + } +} + +/** Load the given number of MPU regions from a table to the given MPU. +* \param mpu Pointer to the MPU registers to be used. +* \param rnr First region number to be configured. +* \param table Pointer to the MPU configuration table. +* \param cnt Amount of regions to be configured. +*/ +__STATIC_INLINE void ARM_MPU_LoadEx(MPU_Type* mpu, uint32_t rnr, ARM_MPU_Region_t const* table, uint32_t cnt) +{ + const uint32_t rowWordSize = sizeof(ARM_MPU_Region_t)/4U; + if (cnt == 1U) { + mpu->RNR = rnr; + ARM_MPU_OrderedMemcpy(&(mpu->RBAR), &(table->RBAR), rowWordSize); + } else { + uint32_t rnrBase = rnr & ~(MPU_TYPE_RALIASES-1U); + uint32_t rnrOffset = rnr % MPU_TYPE_RALIASES; + + mpu->RNR = rnrBase; + while ((rnrOffset + cnt) > MPU_TYPE_RALIASES) { + uint32_t c = MPU_TYPE_RALIASES - rnrOffset; + ARM_MPU_OrderedMemcpy(&(mpu->RBAR)+(rnrOffset*2U), &(table->RBAR), c*rowWordSize); + table += c; + cnt -= c; + rnrOffset = 0U; + rnrBase += MPU_TYPE_RALIASES; + mpu->RNR = rnrBase; + } + + ARM_MPU_OrderedMemcpy(&(mpu->RBAR)+(rnrOffset*2U), &(table->RBAR), cnt*rowWordSize); + } +} + +/** Load the given number of MPU regions from a table. +* \param rnr First region number to be configured. +* \param table Pointer to the MPU configuration table. +* \param cnt Amount of regions to be configured. +*/ +__STATIC_INLINE void ARM_MPU_Load(uint32_t rnr, ARM_MPU_Region_t const* table, uint32_t cnt) +{ + ARM_MPU_LoadEx(MPU, rnr, table, cnt); +} + +#ifdef MPU_NS +/** Load the given number of MPU regions from a table to the Non-secure MPU. +* \param rnr First region number to be configured. +* \param table Pointer to the MPU configuration table. +* \param cnt Amount of regions to be configured. +*/ +__STATIC_INLINE void ARM_MPU_Load_NS(uint32_t rnr, ARM_MPU_Region_t const* table, uint32_t cnt) +{ + ARM_MPU_LoadEx(MPU_NS, rnr, table, cnt); +} +#endif + +#endif + diff --git a/Drivers/CMSIS/Include/pac_armv81.h b/Drivers/CMSIS/Include/pac_armv81.h new file mode 100644 index 0000000..854b60a --- /dev/null +++ b/Drivers/CMSIS/Include/pac_armv81.h @@ -0,0 +1,206 @@ +/****************************************************************************** + * @file pac_armv81.h + * @brief CMSIS PAC key functions for Armv8.1-M PAC extension + * @version V1.0.0 + * @date 23. March 2022 + ******************************************************************************/ +/* + * Copyright (c) 2022 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef PAC_ARMV81_H +#define PAC_ARMV81_H + + +/* ################### PAC Key functions ########################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_PacKeyFunctions PAC Key functions + \brief Functions that access the PAC keys. + @{ + */ + +#if (defined (__ARM_FEATURE_PAUTH) && (__ARM_FEATURE_PAUTH == 1)) + +/** + \brief read the PAC key used for privileged mode + \details Reads the PAC key stored in the PAC_KEY_P registers. + \param [out] pPacKey 128bit PAC key + */ +__STATIC_FORCEINLINE void __get_PAC_KEY_P (uint32_t* pPacKey) { + __ASM volatile ( + "mrs r1, pac_key_p_0\n" + "str r1,[%0,#0]\n" + "mrs r1, pac_key_p_1\n" + "str r1,[%0,#4]\n" + "mrs r1, pac_key_p_2\n" + "str r1,[%0,#8]\n" + "mrs r1, pac_key_p_3\n" + "str r1,[%0,#12]\n" + : : "r" (pPacKey) : "memory", "r1" + ); +} + +/** + \brief write the PAC key used for privileged mode + \details writes the given PAC key to the PAC_KEY_P registers. + \param [in] pPacKey 128bit PAC key + */ +__STATIC_FORCEINLINE void __set_PAC_KEY_P (uint32_t* pPacKey) { + __ASM volatile ( + "ldr r1,[%0,#0]\n" + "msr pac_key_p_0, r1\n" + "ldr r1,[%0,#4]\n" + "msr pac_key_p_1, r1\n" + "ldr r1,[%0,#8]\n" + "msr pac_key_p_2, r1\n" + "ldr r1,[%0,#12]\n" + "msr pac_key_p_3, r1\n" + : : "r" (pPacKey) : "memory", "r1" + ); +} + +/** + \brief read the PAC key used for unprivileged mode + \details Reads the PAC key stored in the PAC_KEY_U registers. + \param [out] pPacKey 128bit PAC key + */ +__STATIC_FORCEINLINE void __get_PAC_KEY_U (uint32_t* pPacKey) { + __ASM volatile ( + "mrs r1, pac_key_u_0\n" + "str r1,[%0,#0]\n" + "mrs r1, pac_key_u_1\n" + "str r1,[%0,#4]\n" + "mrs r1, pac_key_u_2\n" + "str r1,[%0,#8]\n" + "mrs r1, pac_key_u_3\n" + "str r1,[%0,#12]\n" + : : "r" (pPacKey) : "memory", "r1" + ); +} + +/** + \brief write the PAC key used for unprivileged mode + \details writes the given PAC key to the PAC_KEY_U registers. + \param [in] pPacKey 128bit PAC key + */ +__STATIC_FORCEINLINE void __set_PAC_KEY_U (uint32_t* pPacKey) { + __ASM volatile ( + "ldr r1,[%0,#0]\n" + "msr pac_key_u_0, r1\n" + "ldr r1,[%0,#4]\n" + "msr pac_key_u_1, r1\n" + "ldr r1,[%0,#8]\n" + "msr pac_key_u_2, r1\n" + "ldr r1,[%0,#12]\n" + "msr pac_key_u_3, r1\n" + : : "r" (pPacKey) : "memory", "r1" + ); +} + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) + +/** + \brief read the PAC key used for privileged mode (non-secure) + \details Reads the PAC key stored in the non-secure PAC_KEY_P registers when in secure mode. + \param [out] pPacKey 128bit PAC key + */ +__STATIC_FORCEINLINE void __TZ_get_PAC_KEY_P_NS (uint32_t* pPacKey) { + __ASM volatile ( + "mrs r1, pac_key_p_0_ns\n" + "str r1,[%0,#0]\n" + "mrs r1, pac_key_p_1_ns\n" + "str r1,[%0,#4]\n" + "mrs r1, pac_key_p_2_ns\n" + "str r1,[%0,#8]\n" + "mrs r1, pac_key_p_3_ns\n" + "str r1,[%0,#12]\n" + : : "r" (pPacKey) : "memory", "r1" + ); +} + +/** + \brief write the PAC key used for privileged mode (non-secure) + \details writes the given PAC key to the non-secure PAC_KEY_P registers when in secure mode. + \param [in] pPacKey 128bit PAC key + */ +__STATIC_FORCEINLINE void __TZ_set_PAC_KEY_P_NS (uint32_t* pPacKey) { + __ASM volatile ( + "ldr r1,[%0,#0]\n" + "msr pac_key_p_0_ns, r1\n" + "ldr r1,[%0,#4]\n" + "msr pac_key_p_1_ns, r1\n" + "ldr r1,[%0,#8]\n" + "msr pac_key_p_2_ns, r1\n" + "ldr r1,[%0,#12]\n" + "msr pac_key_p_3_ns, r1\n" + : : "r" (pPacKey) : "memory", "r1" + ); +} + +/** + \brief read the PAC key used for unprivileged mode (non-secure) + \details Reads the PAC key stored in the non-secure PAC_KEY_U registers when in secure mode. + \param [out] pPacKey 128bit PAC key + */ +__STATIC_FORCEINLINE void __TZ_get_PAC_KEY_U_NS (uint32_t* pPacKey) { + __ASM volatile ( + "mrs r1, pac_key_u_0_ns\n" + "str r1,[%0,#0]\n" + "mrs r1, pac_key_u_1_ns\n" + "str r1,[%0,#4]\n" + "mrs r1, pac_key_u_2_ns\n" + "str r1,[%0,#8]\n" + "mrs r1, pac_key_u_3_ns\n" + "str r1,[%0,#12]\n" + : : "r" (pPacKey) : "memory", "r1" + ); +} + +/** + \brief write the PAC key used for unprivileged mode (non-secure) + \details writes the given PAC key to the non-secure PAC_KEY_U registers when in secure mode. + \param [in] pPacKey 128bit PAC key + */ +__STATIC_FORCEINLINE void __TZ_set_PAC_KEY_U_NS (uint32_t* pPacKey) { + __ASM volatile ( + "ldr r1,[%0,#0]\n" + "msr pac_key_u_0_ns, r1\n" + "ldr r1,[%0,#4]\n" + "msr pac_key_u_1_ns, r1\n" + "ldr r1,[%0,#8]\n" + "msr pac_key_u_2_ns, r1\n" + "ldr r1,[%0,#12]\n" + "msr pac_key_u_3_ns, r1\n" + : : "r" (pPacKey) : "memory", "r1" + ); +} + +#endif /* (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) */ + +#endif /* (defined (__ARM_FEATURE_PAUTH) && (__ARM_FEATURE_PAUTH == 1)) */ + +/*@} end of CMSIS_Core_PacKeyFunctions */ + + +#endif /* PAC_ARMV81_H */ diff --git a/Drivers/CMSIS/Include/pmu_armv8.h b/Drivers/CMSIS/Include/pmu_armv8.h new file mode 100644 index 0000000..f8f3d89 --- /dev/null +++ b/Drivers/CMSIS/Include/pmu_armv8.h @@ -0,0 +1,337 @@ +/****************************************************************************** + * @file pmu_armv8.h + * @brief CMSIS PMU API for Armv8.1-M PMU + * @version V1.0.1 + * @date 15. April 2020 + ******************************************************************************/ +/* + * Copyright (c) 2020 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef ARM_PMU_ARMV8_H +#define ARM_PMU_ARMV8_H + +/** + * \brief PMU Events + * \note See the Armv8.1-M Architecture Reference Manual for full details on these PMU events. + * */ + +#define ARM_PMU_SW_INCR 0x0000 /*!< Software update to the PMU_SWINC register, architecturally executed and condition code check pass */ +#define ARM_PMU_L1I_CACHE_REFILL 0x0001 /*!< L1 I-Cache refill */ +#define ARM_PMU_L1D_CACHE_REFILL 0x0003 /*!< L1 D-Cache refill */ +#define ARM_PMU_L1D_CACHE 0x0004 /*!< L1 D-Cache access */ +#define ARM_PMU_LD_RETIRED 0x0006 /*!< Memory-reading instruction architecturally executed and condition code check pass */ +#define ARM_PMU_ST_RETIRED 0x0007 /*!< Memory-writing instruction architecturally executed and condition code check pass */ +#define ARM_PMU_INST_RETIRED 0x0008 /*!< Instruction architecturally executed */ +#define ARM_PMU_EXC_TAKEN 0x0009 /*!< Exception entry */ +#define ARM_PMU_EXC_RETURN 0x000A /*!< Exception return instruction architecturally executed and the condition code check pass */ +#define ARM_PMU_PC_WRITE_RETIRED 0x000C /*!< Software change to the Program Counter (PC). Instruction is architecturally executed and condition code check pass */ +#define ARM_PMU_BR_IMMED_RETIRED 0x000D /*!< Immediate branch architecturally executed */ +#define ARM_PMU_BR_RETURN_RETIRED 0x000E /*!< Function return instruction architecturally executed and the condition code check pass */ +#define ARM_PMU_UNALIGNED_LDST_RETIRED 0x000F /*!< Unaligned memory memory-reading or memory-writing instruction architecturally executed and condition code check pass */ +#define ARM_PMU_BR_MIS_PRED 0x0010 /*!< Mispredicted or not predicted branch speculatively executed */ +#define ARM_PMU_CPU_CYCLES 0x0011 /*!< Cycle */ +#define ARM_PMU_BR_PRED 0x0012 /*!< Predictable branch speculatively executed */ +#define ARM_PMU_MEM_ACCESS 0x0013 /*!< Data memory access */ +#define ARM_PMU_L1I_CACHE 0x0014 /*!< Level 1 instruction cache access */ +#define ARM_PMU_L1D_CACHE_WB 0x0015 /*!< Level 1 data cache write-back */ +#define ARM_PMU_L2D_CACHE 0x0016 /*!< Level 2 data cache access */ +#define ARM_PMU_L2D_CACHE_REFILL 0x0017 /*!< Level 2 data cache refill */ +#define ARM_PMU_L2D_CACHE_WB 0x0018 /*!< Level 2 data cache write-back */ +#define ARM_PMU_BUS_ACCESS 0x0019 /*!< Bus access */ +#define ARM_PMU_MEMORY_ERROR 0x001A /*!< Local memory error */ +#define ARM_PMU_INST_SPEC 0x001B /*!< Instruction speculatively executed */ +#define ARM_PMU_BUS_CYCLES 0x001D /*!< Bus cycles */ +#define ARM_PMU_CHAIN 0x001E /*!< For an odd numbered counter, increment when an overflow occurs on the preceding even-numbered counter on the same PE */ +#define ARM_PMU_L1D_CACHE_ALLOCATE 0x001F /*!< Level 1 data cache allocation without refill */ +#define ARM_PMU_L2D_CACHE_ALLOCATE 0x0020 /*!< Level 2 data cache allocation without refill */ +#define ARM_PMU_BR_RETIRED 0x0021 /*!< Branch instruction architecturally executed */ +#define ARM_PMU_BR_MIS_PRED_RETIRED 0x0022 /*!< Mispredicted branch instruction architecturally executed */ +#define ARM_PMU_STALL_FRONTEND 0x0023 /*!< No operation issued because of the frontend */ +#define ARM_PMU_STALL_BACKEND 0x0024 /*!< No operation issued because of the backend */ +#define ARM_PMU_L2I_CACHE 0x0027 /*!< Level 2 instruction cache access */ +#define ARM_PMU_L2I_CACHE_REFILL 0x0028 /*!< Level 2 instruction cache refill */ +#define ARM_PMU_L3D_CACHE_ALLOCATE 0x0029 /*!< Level 3 data cache allocation without refill */ +#define ARM_PMU_L3D_CACHE_REFILL 0x002A /*!< Level 3 data cache refill */ +#define ARM_PMU_L3D_CACHE 0x002B /*!< Level 3 data cache access */ +#define ARM_PMU_L3D_CACHE_WB 0x002C /*!< Level 3 data cache write-back */ +#define ARM_PMU_LL_CACHE_RD 0x0036 /*!< Last level data cache read */ +#define ARM_PMU_LL_CACHE_MISS_RD 0x0037 /*!< Last level data cache read miss */ +#define ARM_PMU_L1D_CACHE_MISS_RD 0x0039 /*!< Level 1 data cache read miss */ +#define ARM_PMU_OP_COMPLETE 0x003A /*!< Operation retired */ +#define ARM_PMU_OP_SPEC 0x003B /*!< Operation speculatively executed */ +#define ARM_PMU_STALL 0x003C /*!< Stall cycle for instruction or operation not sent for execution */ +#define ARM_PMU_STALL_OP_BACKEND 0x003D /*!< Stall cycle for instruction or operation not sent for execution due to pipeline backend */ +#define ARM_PMU_STALL_OP_FRONTEND 0x003E /*!< Stall cycle for instruction or operation not sent for execution due to pipeline frontend */ +#define ARM_PMU_STALL_OP 0x003F /*!< Instruction or operation slots not occupied each cycle */ +#define ARM_PMU_L1D_CACHE_RD 0x0040 /*!< Level 1 data cache read */ +#define ARM_PMU_LE_RETIRED 0x0100 /*!< Loop end instruction executed */ +#define ARM_PMU_LE_SPEC 0x0101 /*!< Loop end instruction speculatively executed */ +#define ARM_PMU_BF_RETIRED 0x0104 /*!< Branch future instruction architecturally executed and condition code check pass */ +#define ARM_PMU_BF_SPEC 0x0105 /*!< Branch future instruction speculatively executed and condition code check pass */ +#define ARM_PMU_LE_CANCEL 0x0108 /*!< Loop end instruction not taken */ +#define ARM_PMU_BF_CANCEL 0x0109 /*!< Branch future instruction not taken */ +#define ARM_PMU_SE_CALL_S 0x0114 /*!< Call to secure function, resulting in Security state change */ +#define ARM_PMU_SE_CALL_NS 0x0115 /*!< Call to non-secure function, resulting in Security state change */ +#define ARM_PMU_DWT_CMPMATCH0 0x0118 /*!< DWT comparator 0 match */ +#define ARM_PMU_DWT_CMPMATCH1 0x0119 /*!< DWT comparator 1 match */ +#define ARM_PMU_DWT_CMPMATCH2 0x011A /*!< DWT comparator 2 match */ +#define ARM_PMU_DWT_CMPMATCH3 0x011B /*!< DWT comparator 3 match */ +#define ARM_PMU_MVE_INST_RETIRED 0x0200 /*!< MVE instruction architecturally executed */ +#define ARM_PMU_MVE_INST_SPEC 0x0201 /*!< MVE instruction speculatively executed */ +#define ARM_PMU_MVE_FP_RETIRED 0x0204 /*!< MVE floating-point instruction architecturally executed */ +#define ARM_PMU_MVE_FP_SPEC 0x0205 /*!< MVE floating-point instruction speculatively executed */ +#define ARM_PMU_MVE_FP_HP_RETIRED 0x0208 /*!< MVE half-precision floating-point instruction architecturally executed */ +#define ARM_PMU_MVE_FP_HP_SPEC 0x0209 /*!< MVE half-precision floating-point instruction speculatively executed */ +#define ARM_PMU_MVE_FP_SP_RETIRED 0x020C /*!< MVE single-precision floating-point instruction architecturally executed */ +#define ARM_PMU_MVE_FP_SP_SPEC 0x020D /*!< MVE single-precision floating-point instruction speculatively executed */ +#define ARM_PMU_MVE_FP_MAC_RETIRED 0x0214 /*!< MVE floating-point multiply or multiply-accumulate instruction architecturally executed */ +#define ARM_PMU_MVE_FP_MAC_SPEC 0x0215 /*!< MVE floating-point multiply or multiply-accumulate instruction speculatively executed */ +#define ARM_PMU_MVE_INT_RETIRED 0x0224 /*!< MVE integer instruction architecturally executed */ +#define ARM_PMU_MVE_INT_SPEC 0x0225 /*!< MVE integer instruction speculatively executed */ +#define ARM_PMU_MVE_INT_MAC_RETIRED 0x0228 /*!< MVE multiply or multiply-accumulate instruction architecturally executed */ +#define ARM_PMU_MVE_INT_MAC_SPEC 0x0229 /*!< MVE multiply or multiply-accumulate instruction speculatively executed */ +#define ARM_PMU_MVE_LDST_RETIRED 0x0238 /*!< MVE load or store instruction architecturally executed */ +#define ARM_PMU_MVE_LDST_SPEC 0x0239 /*!< MVE load or store instruction speculatively executed */ +#define ARM_PMU_MVE_LD_RETIRED 0x023C /*!< MVE load instruction architecturally executed */ +#define ARM_PMU_MVE_LD_SPEC 0x023D /*!< MVE load instruction speculatively executed */ +#define ARM_PMU_MVE_ST_RETIRED 0x0240 /*!< MVE store instruction architecturally executed */ +#define ARM_PMU_MVE_ST_SPEC 0x0241 /*!< MVE store instruction speculatively executed */ +#define ARM_PMU_MVE_LDST_CONTIG_RETIRED 0x0244 /*!< MVE contiguous load or store instruction architecturally executed */ +#define ARM_PMU_MVE_LDST_CONTIG_SPEC 0x0245 /*!< MVE contiguous load or store instruction speculatively executed */ +#define ARM_PMU_MVE_LD_CONTIG_RETIRED 0x0248 /*!< MVE contiguous load instruction architecturally executed */ +#define ARM_PMU_MVE_LD_CONTIG_SPEC 0x0249 /*!< MVE contiguous load instruction speculatively executed */ +#define ARM_PMU_MVE_ST_CONTIG_RETIRED 0x024C /*!< MVE contiguous store instruction architecturally executed */ +#define ARM_PMU_MVE_ST_CONTIG_SPEC 0x024D /*!< MVE contiguous store instruction speculatively executed */ +#define ARM_PMU_MVE_LDST_NONCONTIG_RETIRED 0x0250 /*!< MVE non-contiguous load or store instruction architecturally executed */ +#define ARM_PMU_MVE_LDST_NONCONTIG_SPEC 0x0251 /*!< MVE non-contiguous load or store instruction speculatively executed */ +#define ARM_PMU_MVE_LD_NONCONTIG_RETIRED 0x0254 /*!< MVE non-contiguous load instruction architecturally executed */ +#define ARM_PMU_MVE_LD_NONCONTIG_SPEC 0x0255 /*!< MVE non-contiguous load instruction speculatively executed */ +#define ARM_PMU_MVE_ST_NONCONTIG_RETIRED 0x0258 /*!< MVE non-contiguous store instruction architecturally executed */ +#define ARM_PMU_MVE_ST_NONCONTIG_SPEC 0x0259 /*!< MVE non-contiguous store instruction speculatively executed */ +#define ARM_PMU_MVE_LDST_MULTI_RETIRED 0x025C /*!< MVE memory instruction targeting multiple registers architecturally executed */ +#define ARM_PMU_MVE_LDST_MULTI_SPEC 0x025D /*!< MVE memory instruction targeting multiple registers speculatively executed */ +#define ARM_PMU_MVE_LD_MULTI_RETIRED 0x0260 /*!< MVE memory load instruction targeting multiple registers architecturally executed */ +#define ARM_PMU_MVE_LD_MULTI_SPEC 0x0261 /*!< MVE memory load instruction targeting multiple registers speculatively executed */ +#define ARM_PMU_MVE_ST_MULTI_RETIRED 0x0261 /*!< MVE memory store instruction targeting multiple registers architecturally executed */ +#define ARM_PMU_MVE_ST_MULTI_SPEC 0x0265 /*!< MVE memory store instruction targeting multiple registers speculatively executed */ +#define ARM_PMU_MVE_LDST_UNALIGNED_RETIRED 0x028C /*!< MVE unaligned memory load or store instruction architecturally executed */ +#define ARM_PMU_MVE_LDST_UNALIGNED_SPEC 0x028D /*!< MVE unaligned memory load or store instruction speculatively executed */ +#define ARM_PMU_MVE_LD_UNALIGNED_RETIRED 0x0290 /*!< MVE unaligned load instruction architecturally executed */ +#define ARM_PMU_MVE_LD_UNALIGNED_SPEC 0x0291 /*!< MVE unaligned load instruction speculatively executed */ +#define ARM_PMU_MVE_ST_UNALIGNED_RETIRED 0x0294 /*!< MVE unaligned store instruction architecturally executed */ +#define ARM_PMU_MVE_ST_UNALIGNED_SPEC 0x0295 /*!< MVE unaligned store instruction speculatively executed */ +#define ARM_PMU_MVE_LDST_UNALIGNED_NONCONTIG_RETIRED 0x0298 /*!< MVE unaligned noncontiguous load or store instruction architecturally executed */ +#define ARM_PMU_MVE_LDST_UNALIGNED_NONCONTIG_SPEC 0x0299 /*!< MVE unaligned noncontiguous load or store instruction speculatively executed */ +#define ARM_PMU_MVE_VREDUCE_RETIRED 0x02A0 /*!< MVE vector reduction instruction architecturally executed */ +#define ARM_PMU_MVE_VREDUCE_SPEC 0x02A1 /*!< MVE vector reduction instruction speculatively executed */ +#define ARM_PMU_MVE_VREDUCE_FP_RETIRED 0x02A4 /*!< MVE floating-point vector reduction instruction architecturally executed */ +#define ARM_PMU_MVE_VREDUCE_FP_SPEC 0x02A5 /*!< MVE floating-point vector reduction instruction speculatively executed */ +#define ARM_PMU_MVE_VREDUCE_INT_RETIRED 0x02A8 /*!< MVE integer vector reduction instruction architecturally executed */ +#define ARM_PMU_MVE_VREDUCE_INT_SPEC 0x02A9 /*!< MVE integer vector reduction instruction speculatively executed */ +#define ARM_PMU_MVE_PRED 0x02B8 /*!< Cycles where one or more predicated beats architecturally executed */ +#define ARM_PMU_MVE_STALL 0x02CC /*!< Stall cycles caused by an MVE instruction */ +#define ARM_PMU_MVE_STALL_RESOURCE 0x02CD /*!< Stall cycles caused by an MVE instruction because of resource conflicts */ +#define ARM_PMU_MVE_STALL_RESOURCE_MEM 0x02CE /*!< Stall cycles caused by an MVE instruction because of memory resource conflicts */ +#define ARM_PMU_MVE_STALL_RESOURCE_FP 0x02CF /*!< Stall cycles caused by an MVE instruction because of floating-point resource conflicts */ +#define ARM_PMU_MVE_STALL_RESOURCE_INT 0x02D0 /*!< Stall cycles caused by an MVE instruction because of integer resource conflicts */ +#define ARM_PMU_MVE_STALL_BREAK 0x02D3 /*!< Stall cycles caused by an MVE chain break */ +#define ARM_PMU_MVE_STALL_DEPENDENCY 0x02D4 /*!< Stall cycles caused by MVE register dependency */ +#define ARM_PMU_ITCM_ACCESS 0x4007 /*!< Instruction TCM access */ +#define ARM_PMU_DTCM_ACCESS 0x4008 /*!< Data TCM access */ +#define ARM_PMU_TRCEXTOUT0 0x4010 /*!< ETM external output 0 */ +#define ARM_PMU_TRCEXTOUT1 0x4011 /*!< ETM external output 1 */ +#define ARM_PMU_TRCEXTOUT2 0x4012 /*!< ETM external output 2 */ +#define ARM_PMU_TRCEXTOUT3 0x4013 /*!< ETM external output 3 */ +#define ARM_PMU_CTI_TRIGOUT4 0x4018 /*!< Cross-trigger Interface output trigger 4 */ +#define ARM_PMU_CTI_TRIGOUT5 0x4019 /*!< Cross-trigger Interface output trigger 5 */ +#define ARM_PMU_CTI_TRIGOUT6 0x401A /*!< Cross-trigger Interface output trigger 6 */ +#define ARM_PMU_CTI_TRIGOUT7 0x401B /*!< Cross-trigger Interface output trigger 7 */ + +/** \brief PMU Functions */ + +__STATIC_INLINE void ARM_PMU_Enable(void); +__STATIC_INLINE void ARM_PMU_Disable(void); + +__STATIC_INLINE void ARM_PMU_Set_EVTYPER(uint32_t num, uint32_t type); + +__STATIC_INLINE void ARM_PMU_CYCCNT_Reset(void); +__STATIC_INLINE void ARM_PMU_EVCNTR_ALL_Reset(void); + +__STATIC_INLINE void ARM_PMU_CNTR_Enable(uint32_t mask); +__STATIC_INLINE void ARM_PMU_CNTR_Disable(uint32_t mask); + +__STATIC_INLINE uint32_t ARM_PMU_Get_CCNTR(void); +__STATIC_INLINE uint32_t ARM_PMU_Get_EVCNTR(uint32_t num); + +__STATIC_INLINE uint32_t ARM_PMU_Get_CNTR_OVS(void); +__STATIC_INLINE void ARM_PMU_Set_CNTR_OVS(uint32_t mask); + +__STATIC_INLINE void ARM_PMU_Set_CNTR_IRQ_Enable(uint32_t mask); +__STATIC_INLINE void ARM_PMU_Set_CNTR_IRQ_Disable(uint32_t mask); + +__STATIC_INLINE void ARM_PMU_CNTR_Increment(uint32_t mask); + +/** + \brief Enable the PMU +*/ +__STATIC_INLINE void ARM_PMU_Enable(void) +{ + PMU->CTRL |= PMU_CTRL_ENABLE_Msk; +} + +/** + \brief Disable the PMU +*/ +__STATIC_INLINE void ARM_PMU_Disable(void) +{ + PMU->CTRL &= ~PMU_CTRL_ENABLE_Msk; +} + +/** + \brief Set event to count for PMU eventer counter + \param [in] num Event counter (0-30) to configure + \param [in] type Event to count +*/ +__STATIC_INLINE void ARM_PMU_Set_EVTYPER(uint32_t num, uint32_t type) +{ + PMU->EVTYPER[num] = type; +} + +/** + \brief Reset cycle counter +*/ +__STATIC_INLINE void ARM_PMU_CYCCNT_Reset(void) +{ + PMU->CTRL |= PMU_CTRL_CYCCNT_RESET_Msk; +} + +/** + \brief Reset all event counters +*/ +__STATIC_INLINE void ARM_PMU_EVCNTR_ALL_Reset(void) +{ + PMU->CTRL |= PMU_CTRL_EVENTCNT_RESET_Msk; +} + +/** + \brief Enable counters + \param [in] mask Counters to enable + \note Enables one or more of the following: + - event counters (0-30) + - cycle counter +*/ +__STATIC_INLINE void ARM_PMU_CNTR_Enable(uint32_t mask) +{ + PMU->CNTENSET = mask; +} + +/** + \brief Disable counters + \param [in] mask Counters to enable + \note Disables one or more of the following: + - event counters (0-30) + - cycle counter +*/ +__STATIC_INLINE void ARM_PMU_CNTR_Disable(uint32_t mask) +{ + PMU->CNTENCLR = mask; +} + +/** + \brief Read cycle counter + \return Cycle count +*/ +__STATIC_INLINE uint32_t ARM_PMU_Get_CCNTR(void) +{ + return PMU->CCNTR; +} + +/** + \brief Read event counter + \param [in] num Event counter (0-30) to read + \return Event count +*/ +__STATIC_INLINE uint32_t ARM_PMU_Get_EVCNTR(uint32_t num) +{ + return PMU_EVCNTR_CNT_Msk & PMU->EVCNTR[num]; +} + +/** + \brief Read counter overflow status + \return Counter overflow status bits for the following: + - event counters (0-30) + - cycle counter +*/ +__STATIC_INLINE uint32_t ARM_PMU_Get_CNTR_OVS(void) +{ + return PMU->OVSSET; +} + +/** + \brief Clear counter overflow status + \param [in] mask Counter overflow status bits to clear + \note Clears overflow status bits for one or more of the following: + - event counters (0-30) + - cycle counter +*/ +__STATIC_INLINE void ARM_PMU_Set_CNTR_OVS(uint32_t mask) +{ + PMU->OVSCLR = mask; +} + +/** + \brief Enable counter overflow interrupt request + \param [in] mask Counter overflow interrupt request bits to set + \note Sets overflow interrupt request bits for one or more of the following: + - event counters (0-30) + - cycle counter +*/ +__STATIC_INLINE void ARM_PMU_Set_CNTR_IRQ_Enable(uint32_t mask) +{ + PMU->INTENSET = mask; +} + +/** + \brief Disable counter overflow interrupt request + \param [in] mask Counter overflow interrupt request bits to clear + \note Clears overflow interrupt request bits for one or more of the following: + - event counters (0-30) + - cycle counter +*/ +__STATIC_INLINE void ARM_PMU_Set_CNTR_IRQ_Disable(uint32_t mask) +{ + PMU->INTENCLR = mask; +} + +/** + \brief Software increment event counter + \param [in] mask Counters to increment + \note Software increment bits for one or more event counters (0-30) +*/ +__STATIC_INLINE void ARM_PMU_CNTR_Increment(uint32_t mask) +{ + PMU->SWINC = mask; +} + +#endif diff --git a/Drivers/CMSIS/Include/tz_context.h b/Drivers/CMSIS/Include/tz_context.h new file mode 100644 index 0000000..0d09749 --- /dev/null +++ b/Drivers/CMSIS/Include/tz_context.h @@ -0,0 +1,70 @@ +/****************************************************************************** + * @file tz_context.h + * @brief Context Management for Armv8-M TrustZone + * @version V1.0.1 + * @date 10. January 2018 + ******************************************************************************/ +/* + * Copyright (c) 2017-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef TZ_CONTEXT_H +#define TZ_CONTEXT_H + +#include + +#ifndef TZ_MODULEID_T +#define TZ_MODULEID_T +/// \details Data type that identifies secure software modules called by a process. +typedef uint32_t TZ_ModuleId_t; +#endif + +/// \details TZ Memory ID identifies an allocated memory slot. +typedef uint32_t TZ_MemoryId_t; + +/// Initialize secure context memory system +/// \return execution status (1: success, 0: error) +uint32_t TZ_InitContextSystem_S (void); + +/// Allocate context memory for calling secure software modules in TrustZone +/// \param[in] module identifies software modules called from non-secure mode +/// \return value != 0 id TrustZone memory slot identifier +/// \return value 0 no memory available or internal error +TZ_MemoryId_t TZ_AllocModuleContext_S (TZ_ModuleId_t module); + +/// Free context memory that was previously allocated with \ref TZ_AllocModuleContext_S +/// \param[in] id TrustZone memory slot identifier +/// \return execution status (1: success, 0: error) +uint32_t TZ_FreeModuleContext_S (TZ_MemoryId_t id); + +/// Load secure context (called on RTOS thread context switch) +/// \param[in] id TrustZone memory slot identifier +/// \return execution status (1: success, 0: error) +uint32_t TZ_LoadContext_S (TZ_MemoryId_t id); + +/// Store secure context (called on RTOS thread context switch) +/// \param[in] id TrustZone memory slot identifier +/// \return execution status (1: success, 0: error) +uint32_t TZ_StoreContext_S (TZ_MemoryId_t id); + +#endif // TZ_CONTEXT_H diff --git a/Drivers/CMSIS/LICENSE.txt b/Drivers/CMSIS/LICENSE.txt new file mode 100644 index 0000000..8dada3e --- /dev/null +++ b/Drivers/CMSIS/LICENSE.txt @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/Legacy/stm32_hal_legacy.h b/Drivers/STM32F4xx_HAL_Driver/Inc/Legacy/stm32_hal_legacy.h new file mode 100644 index 0000000..239d149 --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/Legacy/stm32_hal_legacy.h @@ -0,0 +1,4359 @@ +/** + ****************************************************************************** + * @file stm32_hal_legacy.h + * @author MCD Application Team + * @brief This file contains aliases definition for the STM32Cube HAL constants + * macros and functions maintained for legacy purpose. + ****************************************************************************** + * @attention + * + * Copyright (c) 2021 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32_HAL_LEGACY +#define STM32_HAL_LEGACY + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +/* Exported types ------------------------------------------------------------*/ +/* Exported constants --------------------------------------------------------*/ + +/** @defgroup HAL_AES_Aliased_Defines HAL CRYP Aliased Defines maintained for legacy purpose + * @{ + */ +#define AES_FLAG_RDERR CRYP_FLAG_RDERR +#define AES_FLAG_WRERR CRYP_FLAG_WRERR +#define AES_CLEARFLAG_CCF CRYP_CLEARFLAG_CCF +#define AES_CLEARFLAG_RDERR CRYP_CLEARFLAG_RDERR +#define AES_CLEARFLAG_WRERR CRYP_CLEARFLAG_WRERR +#if defined(STM32H7) || defined(STM32MP1) +#define CRYP_DATATYPE_32B CRYP_NO_SWAP +#define CRYP_DATATYPE_16B CRYP_HALFWORD_SWAP +#define CRYP_DATATYPE_8B CRYP_BYTE_SWAP +#define CRYP_DATATYPE_1B CRYP_BIT_SWAP +#endif /* STM32H7 || STM32MP1 */ +/** + * @} + */ + +/** @defgroup HAL_ADC_Aliased_Defines HAL ADC Aliased Defines maintained for legacy purpose + * @{ + */ +#define ADC_RESOLUTION12b ADC_RESOLUTION_12B +#define ADC_RESOLUTION10b ADC_RESOLUTION_10B +#define ADC_RESOLUTION8b ADC_RESOLUTION_8B +#define ADC_RESOLUTION6b ADC_RESOLUTION_6B +#define OVR_DATA_OVERWRITTEN ADC_OVR_DATA_OVERWRITTEN +#define OVR_DATA_PRESERVED ADC_OVR_DATA_PRESERVED +#define EOC_SINGLE_CONV ADC_EOC_SINGLE_CONV +#define EOC_SEQ_CONV ADC_EOC_SEQ_CONV +#define EOC_SINGLE_SEQ_CONV ADC_EOC_SINGLE_SEQ_CONV +#define REGULAR_GROUP ADC_REGULAR_GROUP +#define INJECTED_GROUP ADC_INJECTED_GROUP +#define REGULAR_INJECTED_GROUP ADC_REGULAR_INJECTED_GROUP +#define AWD_EVENT ADC_AWD_EVENT +#define AWD1_EVENT ADC_AWD1_EVENT +#define AWD2_EVENT ADC_AWD2_EVENT +#define AWD3_EVENT ADC_AWD3_EVENT +#define OVR_EVENT ADC_OVR_EVENT +#define JQOVF_EVENT ADC_JQOVF_EVENT +#define ALL_CHANNELS ADC_ALL_CHANNELS +#define REGULAR_CHANNELS ADC_REGULAR_CHANNELS +#define INJECTED_CHANNELS ADC_INJECTED_CHANNELS +#define SYSCFG_FLAG_SENSOR_ADC ADC_FLAG_SENSOR +#define SYSCFG_FLAG_VREF_ADC ADC_FLAG_VREFINT +#define ADC_CLOCKPRESCALER_PCLK_DIV1 ADC_CLOCK_SYNC_PCLK_DIV1 +#define ADC_CLOCKPRESCALER_PCLK_DIV2 ADC_CLOCK_SYNC_PCLK_DIV2 +#define ADC_CLOCKPRESCALER_PCLK_DIV4 ADC_CLOCK_SYNC_PCLK_DIV4 +#define ADC_CLOCKPRESCALER_PCLK_DIV6 ADC_CLOCK_SYNC_PCLK_DIV6 +#define ADC_CLOCKPRESCALER_PCLK_DIV8 ADC_CLOCK_SYNC_PCLK_DIV8 +#define ADC_EXTERNALTRIG0_T6_TRGO ADC_EXTERNALTRIGCONV_T6_TRGO +#define ADC_EXTERNALTRIG1_T21_CC2 ADC_EXTERNALTRIGCONV_T21_CC2 +#define ADC_EXTERNALTRIG2_T2_TRGO ADC_EXTERNALTRIGCONV_T2_TRGO +#define ADC_EXTERNALTRIG3_T2_CC4 ADC_EXTERNALTRIGCONV_T2_CC4 +#define ADC_EXTERNALTRIG4_T22_TRGO ADC_EXTERNALTRIGCONV_T22_TRGO +#define ADC_EXTERNALTRIG7_EXT_IT11 ADC_EXTERNALTRIGCONV_EXT_IT11 +#define ADC_CLOCK_ASYNC ADC_CLOCK_ASYNC_DIV1 +#define ADC_EXTERNALTRIG_EDGE_NONE ADC_EXTERNALTRIGCONVEDGE_NONE +#define ADC_EXTERNALTRIG_EDGE_RISING ADC_EXTERNALTRIGCONVEDGE_RISING +#define ADC_EXTERNALTRIG_EDGE_FALLING ADC_EXTERNALTRIGCONVEDGE_FALLING +#define ADC_EXTERNALTRIG_EDGE_RISINGFALLING ADC_EXTERNALTRIGCONVEDGE_RISINGFALLING +#define ADC_SAMPLETIME_2CYCLE_5 ADC_SAMPLETIME_2CYCLES_5 + +#define HAL_ADC_STATE_BUSY_REG HAL_ADC_STATE_REG_BUSY +#define HAL_ADC_STATE_BUSY_INJ HAL_ADC_STATE_INJ_BUSY +#define HAL_ADC_STATE_EOC_REG HAL_ADC_STATE_REG_EOC +#define HAL_ADC_STATE_EOC_INJ HAL_ADC_STATE_INJ_EOC +#define HAL_ADC_STATE_ERROR HAL_ADC_STATE_ERROR_INTERNAL +#define HAL_ADC_STATE_BUSY HAL_ADC_STATE_BUSY_INTERNAL +#define HAL_ADC_STATE_AWD HAL_ADC_STATE_AWD1 + +#if defined(STM32H7) +#define ADC_CHANNEL_VBAT_DIV4 ADC_CHANNEL_VBAT +#endif /* STM32H7 */ + +#if defined(STM32U5) +#define ADC_SAMPLETIME_5CYCLE ADC_SAMPLETIME_5CYCLES +#define ADC_SAMPLETIME_391CYCLES_5 ADC_SAMPLETIME_391CYCLES +#define ADC4_SAMPLETIME_160CYCLES_5 ADC4_SAMPLETIME_814CYCLES_5 +#endif /* STM32U5 */ + +#if defined(STM32H5) +#define ADC_CHANNEL_VCORE ADC_CHANNEL_VDDCORE +#endif /* STM32H5 */ +/** + * @} + */ + +/** @defgroup HAL_CEC_Aliased_Defines HAL CEC Aliased Defines maintained for legacy purpose + * @{ + */ + +#define __HAL_CEC_GET_IT __HAL_CEC_GET_FLAG + +/** + * @} + */ + +/** @defgroup HAL_COMP_Aliased_Defines HAL COMP Aliased Defines maintained for legacy purpose + * @{ + */ +#define COMP_WINDOWMODE_DISABLED COMP_WINDOWMODE_DISABLE +#define COMP_WINDOWMODE_ENABLED COMP_WINDOWMODE_ENABLE +#define COMP_EXTI_LINE_COMP1_EVENT COMP_EXTI_LINE_COMP1 +#define COMP_EXTI_LINE_COMP2_EVENT COMP_EXTI_LINE_COMP2 +#define COMP_EXTI_LINE_COMP3_EVENT COMP_EXTI_LINE_COMP3 +#define COMP_EXTI_LINE_COMP4_EVENT COMP_EXTI_LINE_COMP4 +#define COMP_EXTI_LINE_COMP5_EVENT COMP_EXTI_LINE_COMP5 +#define COMP_EXTI_LINE_COMP6_EVENT COMP_EXTI_LINE_COMP6 +#define COMP_EXTI_LINE_COMP7_EVENT COMP_EXTI_LINE_COMP7 +#if defined(STM32L0) +#define COMP_LPTIMCONNECTION_ENABLED ((uint32_t)0x00000003U) /*!< COMPX output generic naming: connected to LPTIM + input 1 for COMP1, LPTIM input 2 for COMP2 */ +#endif +#define COMP_OUTPUT_COMP6TIM2OCREFCLR COMP_OUTPUT_COMP6_TIM2OCREFCLR +#if defined(STM32F373xC) || defined(STM32F378xx) +#define COMP_OUTPUT_TIM3IC1 COMP_OUTPUT_COMP1_TIM3IC1 +#define COMP_OUTPUT_TIM3OCREFCLR COMP_OUTPUT_COMP1_TIM3OCREFCLR +#endif /* STM32F373xC || STM32F378xx */ + +#if defined(STM32L0) || defined(STM32L4) +#define COMP_WINDOWMODE_ENABLE COMP_WINDOWMODE_COMP1_INPUT_PLUS_COMMON + +#define COMP_NONINVERTINGINPUT_IO1 COMP_INPUT_PLUS_IO1 +#define COMP_NONINVERTINGINPUT_IO2 COMP_INPUT_PLUS_IO2 +#define COMP_NONINVERTINGINPUT_IO3 COMP_INPUT_PLUS_IO3 +#define COMP_NONINVERTINGINPUT_IO4 COMP_INPUT_PLUS_IO4 +#define COMP_NONINVERTINGINPUT_IO5 COMP_INPUT_PLUS_IO5 +#define COMP_NONINVERTINGINPUT_IO6 COMP_INPUT_PLUS_IO6 + +#define COMP_INVERTINGINPUT_1_4VREFINT COMP_INPUT_MINUS_1_4VREFINT +#define COMP_INVERTINGINPUT_1_2VREFINT COMP_INPUT_MINUS_1_2VREFINT +#define COMP_INVERTINGINPUT_3_4VREFINT COMP_INPUT_MINUS_3_4VREFINT +#define COMP_INVERTINGINPUT_VREFINT COMP_INPUT_MINUS_VREFINT +#define COMP_INVERTINGINPUT_DAC1_CH1 COMP_INPUT_MINUS_DAC1_CH1 +#define COMP_INVERTINGINPUT_DAC1_CH2 COMP_INPUT_MINUS_DAC1_CH2 +#define COMP_INVERTINGINPUT_DAC1 COMP_INPUT_MINUS_DAC1_CH1 +#define COMP_INVERTINGINPUT_DAC2 COMP_INPUT_MINUS_DAC1_CH2 +#define COMP_INVERTINGINPUT_IO1 COMP_INPUT_MINUS_IO1 +#if defined(STM32L0) +/* Issue fixed on STM32L0 COMP driver: only 2 dedicated IO (IO1 and IO2), */ +/* IO2 was wrongly assigned to IO shared with DAC and IO3 was corresponding */ +/* to the second dedicated IO (only for COMP2). */ +#define COMP_INVERTINGINPUT_IO2 COMP_INPUT_MINUS_DAC1_CH2 +#define COMP_INVERTINGINPUT_IO3 COMP_INPUT_MINUS_IO2 +#else +#define COMP_INVERTINGINPUT_IO2 COMP_INPUT_MINUS_IO2 +#define COMP_INVERTINGINPUT_IO3 COMP_INPUT_MINUS_IO3 +#endif +#define COMP_INVERTINGINPUT_IO4 COMP_INPUT_MINUS_IO4 +#define COMP_INVERTINGINPUT_IO5 COMP_INPUT_MINUS_IO5 + +#define COMP_OUTPUTLEVEL_LOW COMP_OUTPUT_LEVEL_LOW +#define COMP_OUTPUTLEVEL_HIGH COMP_OUTPUT_LEVEL_HIGH + +/* Note: Literal "COMP_FLAG_LOCK" kept for legacy purpose. */ +/* To check COMP lock state, use macro "__HAL_COMP_IS_LOCKED()". */ +#if defined(COMP_CSR_LOCK) +#define COMP_FLAG_LOCK COMP_CSR_LOCK +#elif defined(COMP_CSR_COMP1LOCK) +#define COMP_FLAG_LOCK COMP_CSR_COMP1LOCK +#elif defined(COMP_CSR_COMPxLOCK) +#define COMP_FLAG_LOCK COMP_CSR_COMPxLOCK +#endif + +#if defined(STM32L4) +#define COMP_BLANKINGSRCE_TIM1OC5 COMP_BLANKINGSRC_TIM1_OC5_COMP1 +#define COMP_BLANKINGSRCE_TIM2OC3 COMP_BLANKINGSRC_TIM2_OC3_COMP1 +#define COMP_BLANKINGSRCE_TIM3OC3 COMP_BLANKINGSRC_TIM3_OC3_COMP1 +#define COMP_BLANKINGSRCE_TIM3OC4 COMP_BLANKINGSRC_TIM3_OC4_COMP2 +#define COMP_BLANKINGSRCE_TIM8OC5 COMP_BLANKINGSRC_TIM8_OC5_COMP2 +#define COMP_BLANKINGSRCE_TIM15OC1 COMP_BLANKINGSRC_TIM15_OC1_COMP2 +#define COMP_BLANKINGSRCE_NONE COMP_BLANKINGSRC_NONE +#endif + +#if defined(STM32L0) +#define COMP_MODE_HIGHSPEED COMP_POWERMODE_MEDIUMSPEED +#define COMP_MODE_LOWSPEED COMP_POWERMODE_ULTRALOWPOWER +#else +#define COMP_MODE_HIGHSPEED COMP_POWERMODE_HIGHSPEED +#define COMP_MODE_MEDIUMSPEED COMP_POWERMODE_MEDIUMSPEED +#define COMP_MODE_LOWPOWER COMP_POWERMODE_LOWPOWER +#define COMP_MODE_ULTRALOWPOWER COMP_POWERMODE_ULTRALOWPOWER +#endif + +#endif + +#if defined(STM32U5) +#define __HAL_COMP_COMP1_EXTI_CLEAR_RASING_FLAG __HAL_COMP_COMP1_EXTI_CLEAR_RISING_FLAG +#endif + +/** + * @} + */ + +/** @defgroup HAL_CORTEX_Aliased_Defines HAL CORTEX Aliased Defines maintained for legacy purpose + * @{ + */ +#define __HAL_CORTEX_SYSTICKCLK_CONFIG HAL_SYSTICK_CLKSourceConfig +#if defined(STM32U5) +#define MPU_DEVICE_nGnRnE MPU_DEVICE_NGNRNE +#define MPU_DEVICE_nGnRE MPU_DEVICE_NGNRE +#define MPU_DEVICE_nGRE MPU_DEVICE_NGRE +#endif /* STM32U5 */ +/** + * @} + */ + +/** @defgroup CRC_Aliases CRC API aliases + * @{ + */ +#if defined(STM32H5) || defined(STM32C0) +#else +#define HAL_CRC_Input_Data_Reverse HAL_CRCEx_Input_Data_Reverse /*!< Aliased to HAL_CRCEx_Input_Data_Reverse for + inter STM32 series compatibility */ +#define HAL_CRC_Output_Data_Reverse HAL_CRCEx_Output_Data_Reverse /*!< Aliased to HAL_CRCEx_Output_Data_Reverse for + inter STM32 series compatibility */ +#endif +/** + * @} + */ + +/** @defgroup HAL_CRC_Aliased_Defines HAL CRC Aliased Defines maintained for legacy purpose + * @{ + */ + +#define CRC_OUTPUTDATA_INVERSION_DISABLED CRC_OUTPUTDATA_INVERSION_DISABLE +#define CRC_OUTPUTDATA_INVERSION_ENABLED CRC_OUTPUTDATA_INVERSION_ENABLE + +/** + * @} + */ + +/** @defgroup HAL_DAC_Aliased_Defines HAL DAC Aliased Defines maintained for legacy purpose + * @{ + */ + +#define DAC1_CHANNEL_1 DAC_CHANNEL_1 +#define DAC1_CHANNEL_2 DAC_CHANNEL_2 +#define DAC2_CHANNEL_1 DAC_CHANNEL_1 +#define DAC_WAVE_NONE 0x00000000U +#define DAC_WAVE_NOISE DAC_CR_WAVE1_0 +#define DAC_WAVE_TRIANGLE DAC_CR_WAVE1_1 +#define DAC_WAVEGENERATION_NONE DAC_WAVE_NONE +#define DAC_WAVEGENERATION_NOISE DAC_WAVE_NOISE +#define DAC_WAVEGENERATION_TRIANGLE DAC_WAVE_TRIANGLE + +#if defined(STM32G4) || defined(STM32H7) || defined (STM32U5) +#define DAC_CHIPCONNECT_DISABLE DAC_CHIPCONNECT_EXTERNAL +#define DAC_CHIPCONNECT_ENABLE DAC_CHIPCONNECT_INTERNAL +#endif + +#if defined(STM32U5) +#define DAC_TRIGGER_STOP_LPTIM1_OUT DAC_TRIGGER_STOP_LPTIM1_CH1 +#define DAC_TRIGGER_STOP_LPTIM3_OUT DAC_TRIGGER_STOP_LPTIM3_CH1 +#define DAC_TRIGGER_LPTIM1_OUT DAC_TRIGGER_LPTIM1_CH1 +#define DAC_TRIGGER_LPTIM3_OUT DAC_TRIGGER_LPTIM3_CH1 +#endif + +#if defined(STM32H5) +#define DAC_TRIGGER_LPTIM1_OUT DAC_TRIGGER_LPTIM1_CH1 +#define DAC_TRIGGER_LPTIM2_OUT DAC_TRIGGER_LPTIM2_CH1 +#endif + +#if defined(STM32L1) || defined(STM32L4) || defined(STM32G0) || defined(STM32L5) || defined(STM32H7) || \ + defined(STM32F4) || defined(STM32G4) +#define HAL_DAC_MSP_INIT_CB_ID HAL_DAC_MSPINIT_CB_ID +#define HAL_DAC_MSP_DEINIT_CB_ID HAL_DAC_MSPDEINIT_CB_ID +#endif + +/** + * @} + */ + +/** @defgroup HAL_DMA_Aliased_Defines HAL DMA Aliased Defines maintained for legacy purpose + * @{ + */ +#define HAL_REMAPDMA_ADC_DMA_CH2 DMA_REMAP_ADC_DMA_CH2 +#define HAL_REMAPDMA_USART1_TX_DMA_CH4 DMA_REMAP_USART1_TX_DMA_CH4 +#define HAL_REMAPDMA_USART1_RX_DMA_CH5 DMA_REMAP_USART1_RX_DMA_CH5 +#define HAL_REMAPDMA_TIM16_DMA_CH4 DMA_REMAP_TIM16_DMA_CH4 +#define HAL_REMAPDMA_TIM17_DMA_CH2 DMA_REMAP_TIM17_DMA_CH2 +#define HAL_REMAPDMA_USART3_DMA_CH32 DMA_REMAP_USART3_DMA_CH32 +#define HAL_REMAPDMA_TIM16_DMA_CH6 DMA_REMAP_TIM16_DMA_CH6 +#define HAL_REMAPDMA_TIM17_DMA_CH7 DMA_REMAP_TIM17_DMA_CH7 +#define HAL_REMAPDMA_SPI2_DMA_CH67 DMA_REMAP_SPI2_DMA_CH67 +#define HAL_REMAPDMA_USART2_DMA_CH67 DMA_REMAP_USART2_DMA_CH67 +#define HAL_REMAPDMA_I2C1_DMA_CH76 DMA_REMAP_I2C1_DMA_CH76 +#define HAL_REMAPDMA_TIM1_DMA_CH6 DMA_REMAP_TIM1_DMA_CH6 +#define HAL_REMAPDMA_TIM2_DMA_CH7 DMA_REMAP_TIM2_DMA_CH7 +#define HAL_REMAPDMA_TIM3_DMA_CH6 DMA_REMAP_TIM3_DMA_CH6 + +#define IS_HAL_REMAPDMA IS_DMA_REMAP +#define __HAL_REMAPDMA_CHANNEL_ENABLE __HAL_DMA_REMAP_CHANNEL_ENABLE +#define __HAL_REMAPDMA_CHANNEL_DISABLE __HAL_DMA_REMAP_CHANNEL_DISABLE + +#if defined(STM32L4) + +#define HAL_DMAMUX1_REQUEST_GEN_EXTI0 HAL_DMAMUX1_REQ_GEN_EXTI0 +#define HAL_DMAMUX1_REQUEST_GEN_EXTI1 HAL_DMAMUX1_REQ_GEN_EXTI1 +#define HAL_DMAMUX1_REQUEST_GEN_EXTI2 HAL_DMAMUX1_REQ_GEN_EXTI2 +#define HAL_DMAMUX1_REQUEST_GEN_EXTI3 HAL_DMAMUX1_REQ_GEN_EXTI3 +#define HAL_DMAMUX1_REQUEST_GEN_EXTI4 HAL_DMAMUX1_REQ_GEN_EXTI4 +#define HAL_DMAMUX1_REQUEST_GEN_EXTI5 HAL_DMAMUX1_REQ_GEN_EXTI5 +#define HAL_DMAMUX1_REQUEST_GEN_EXTI6 HAL_DMAMUX1_REQ_GEN_EXTI6 +#define HAL_DMAMUX1_REQUEST_GEN_EXTI7 HAL_DMAMUX1_REQ_GEN_EXTI7 +#define HAL_DMAMUX1_REQUEST_GEN_EXTI8 HAL_DMAMUX1_REQ_GEN_EXTI8 +#define HAL_DMAMUX1_REQUEST_GEN_EXTI9 HAL_DMAMUX1_REQ_GEN_EXTI9 +#define HAL_DMAMUX1_REQUEST_GEN_EXTI10 HAL_DMAMUX1_REQ_GEN_EXTI10 +#define HAL_DMAMUX1_REQUEST_GEN_EXTI11 HAL_DMAMUX1_REQ_GEN_EXTI11 +#define HAL_DMAMUX1_REQUEST_GEN_EXTI12 HAL_DMAMUX1_REQ_GEN_EXTI12 +#define HAL_DMAMUX1_REQUEST_GEN_EXTI13 HAL_DMAMUX1_REQ_GEN_EXTI13 +#define HAL_DMAMUX1_REQUEST_GEN_EXTI14 HAL_DMAMUX1_REQ_GEN_EXTI14 +#define HAL_DMAMUX1_REQUEST_GEN_EXTI15 HAL_DMAMUX1_REQ_GEN_EXTI15 +#define HAL_DMAMUX1_REQUEST_GEN_DMAMUX1_CH0_EVT HAL_DMAMUX1_REQ_GEN_DMAMUX1_CH0_EVT +#define HAL_DMAMUX1_REQUEST_GEN_DMAMUX1_CH1_EVT HAL_DMAMUX1_REQ_GEN_DMAMUX1_CH1_EVT +#define HAL_DMAMUX1_REQUEST_GEN_DMAMUX1_CH2_EVT HAL_DMAMUX1_REQ_GEN_DMAMUX1_CH2_EVT +#define HAL_DMAMUX1_REQUEST_GEN_DMAMUX1_CH3_EVT HAL_DMAMUX1_REQ_GEN_DMAMUX1_CH3_EVT +#define HAL_DMAMUX1_REQUEST_GEN_LPTIM1_OUT HAL_DMAMUX1_REQ_GEN_LPTIM1_OUT +#define HAL_DMAMUX1_REQUEST_GEN_LPTIM2_OUT HAL_DMAMUX1_REQ_GEN_LPTIM2_OUT +#define HAL_DMAMUX1_REQUEST_GEN_DSI_TE HAL_DMAMUX1_REQ_GEN_DSI_TE +#define HAL_DMAMUX1_REQUEST_GEN_DSI_EOT HAL_DMAMUX1_REQ_GEN_DSI_EOT +#define HAL_DMAMUX1_REQUEST_GEN_DMA2D_EOT HAL_DMAMUX1_REQ_GEN_DMA2D_EOT +#define HAL_DMAMUX1_REQUEST_GEN_LTDC_IT HAL_DMAMUX1_REQ_GEN_LTDC_IT + +#define HAL_DMAMUX_REQUEST_GEN_NO_EVENT HAL_DMAMUX_REQ_GEN_NO_EVENT +#define HAL_DMAMUX_REQUEST_GEN_RISING HAL_DMAMUX_REQ_GEN_RISING +#define HAL_DMAMUX_REQUEST_GEN_FALLING HAL_DMAMUX_REQ_GEN_FALLING +#define HAL_DMAMUX_REQUEST_GEN_RISING_FALLING HAL_DMAMUX_REQ_GEN_RISING_FALLING + +#if defined(STM32L4R5xx) || defined(STM32L4R9xx) || defined(STM32L4R9xx) || defined(STM32L4S5xx) || \ + defined(STM32L4S7xx) || defined(STM32L4S9xx) +#define DMA_REQUEST_DCMI_PSSI DMA_REQUEST_DCMI +#endif + +#endif /* STM32L4 */ + +#if defined(STM32G0) +#define DMA_REQUEST_DAC1_CHANNEL1 DMA_REQUEST_DAC1_CH1 +#define DMA_REQUEST_DAC1_CHANNEL2 DMA_REQUEST_DAC1_CH2 +#define DMA_REQUEST_TIM16_TRIG_COM DMA_REQUEST_TIM16_COM +#define DMA_REQUEST_TIM17_TRIG_COM DMA_REQUEST_TIM17_COM + +#define LL_DMAMUX_REQ_TIM16_TRIG_COM LL_DMAMUX_REQ_TIM16_COM +#define LL_DMAMUX_REQ_TIM17_TRIG_COM LL_DMAMUX_REQ_TIM17_COM +#endif + +#if defined(STM32H7) + +#define DMA_REQUEST_DAC1 DMA_REQUEST_DAC1_CH1 +#define DMA_REQUEST_DAC2 DMA_REQUEST_DAC1_CH2 + +#define BDMA_REQUEST_LP_UART1_RX BDMA_REQUEST_LPUART1_RX +#define BDMA_REQUEST_LP_UART1_TX BDMA_REQUEST_LPUART1_TX + +#define HAL_DMAMUX1_REQUEST_GEN_DMAMUX1_CH0_EVT HAL_DMAMUX1_REQ_GEN_DMAMUX1_CH0_EVT +#define HAL_DMAMUX1_REQUEST_GEN_DMAMUX1_CH1_EVT HAL_DMAMUX1_REQ_GEN_DMAMUX1_CH1_EVT +#define HAL_DMAMUX1_REQUEST_GEN_DMAMUX1_CH2_EVT HAL_DMAMUX1_REQ_GEN_DMAMUX1_CH2_EVT +#define HAL_DMAMUX1_REQUEST_GEN_LPTIM1_OUT HAL_DMAMUX1_REQ_GEN_LPTIM1_OUT +#define HAL_DMAMUX1_REQUEST_GEN_LPTIM2_OUT HAL_DMAMUX1_REQ_GEN_LPTIM2_OUT +#define HAL_DMAMUX1_REQUEST_GEN_LPTIM3_OUT HAL_DMAMUX1_REQ_GEN_LPTIM3_OUT +#define HAL_DMAMUX1_REQUEST_GEN_EXTI0 HAL_DMAMUX1_REQ_GEN_EXTI0 +#define HAL_DMAMUX1_REQUEST_GEN_TIM12_TRGO HAL_DMAMUX1_REQ_GEN_TIM12_TRGO + +#define HAL_DMAMUX2_REQUEST_GEN_DMAMUX2_CH0_EVT HAL_DMAMUX2_REQ_GEN_DMAMUX2_CH0_EVT +#define HAL_DMAMUX2_REQUEST_GEN_DMAMUX2_CH1_EVT HAL_DMAMUX2_REQ_GEN_DMAMUX2_CH1_EVT +#define HAL_DMAMUX2_REQUEST_GEN_DMAMUX2_CH2_EVT HAL_DMAMUX2_REQ_GEN_DMAMUX2_CH2_EVT +#define HAL_DMAMUX2_REQUEST_GEN_DMAMUX2_CH3_EVT HAL_DMAMUX2_REQ_GEN_DMAMUX2_CH3_EVT +#define HAL_DMAMUX2_REQUEST_GEN_DMAMUX2_CH4_EVT HAL_DMAMUX2_REQ_GEN_DMAMUX2_CH4_EVT +#define HAL_DMAMUX2_REQUEST_GEN_DMAMUX2_CH5_EVT HAL_DMAMUX2_REQ_GEN_DMAMUX2_CH5_EVT +#define HAL_DMAMUX2_REQUEST_GEN_DMAMUX2_CH6_EVT HAL_DMAMUX2_REQ_GEN_DMAMUX2_CH6_EVT +#define HAL_DMAMUX2_REQUEST_GEN_LPUART1_RX_WKUP HAL_DMAMUX2_REQ_GEN_LPUART1_RX_WKUP +#define HAL_DMAMUX2_REQUEST_GEN_LPUART1_TX_WKUP HAL_DMAMUX2_REQ_GEN_LPUART1_TX_WKUP +#define HAL_DMAMUX2_REQUEST_GEN_LPTIM2_WKUP HAL_DMAMUX2_REQ_GEN_LPTIM2_WKUP +#define HAL_DMAMUX2_REQUEST_GEN_LPTIM2_OUT HAL_DMAMUX2_REQ_GEN_LPTIM2_OUT +#define HAL_DMAMUX2_REQUEST_GEN_LPTIM3_WKUP HAL_DMAMUX2_REQ_GEN_LPTIM3_WKUP +#define HAL_DMAMUX2_REQUEST_GEN_LPTIM3_OUT HAL_DMAMUX2_REQ_GEN_LPTIM3_OUT +#define HAL_DMAMUX2_REQUEST_GEN_LPTIM4_WKUP HAL_DMAMUX2_REQ_GEN_LPTIM4_WKUP +#define HAL_DMAMUX2_REQUEST_GEN_LPTIM5_WKUP HAL_DMAMUX2_REQ_GEN_LPTIM5_WKUP +#define HAL_DMAMUX2_REQUEST_GEN_I2C4_WKUP HAL_DMAMUX2_REQ_GEN_I2C4_WKUP +#define HAL_DMAMUX2_REQUEST_GEN_SPI6_WKUP HAL_DMAMUX2_REQ_GEN_SPI6_WKUP +#define HAL_DMAMUX2_REQUEST_GEN_COMP1_OUT HAL_DMAMUX2_REQ_GEN_COMP1_OUT +#define HAL_DMAMUX2_REQUEST_GEN_COMP2_OUT HAL_DMAMUX2_REQ_GEN_COMP2_OUT +#define HAL_DMAMUX2_REQUEST_GEN_RTC_WKUP HAL_DMAMUX2_REQ_GEN_RTC_WKUP +#define HAL_DMAMUX2_REQUEST_GEN_EXTI0 HAL_DMAMUX2_REQ_GEN_EXTI0 +#define HAL_DMAMUX2_REQUEST_GEN_EXTI2 HAL_DMAMUX2_REQ_GEN_EXTI2 +#define HAL_DMAMUX2_REQUEST_GEN_I2C4_IT_EVT HAL_DMAMUX2_REQ_GEN_I2C4_IT_EVT +#define HAL_DMAMUX2_REQUEST_GEN_SPI6_IT HAL_DMAMUX2_REQ_GEN_SPI6_IT +#define HAL_DMAMUX2_REQUEST_GEN_LPUART1_TX_IT HAL_DMAMUX2_REQ_GEN_LPUART1_TX_IT +#define HAL_DMAMUX2_REQUEST_GEN_LPUART1_RX_IT HAL_DMAMUX2_REQ_GEN_LPUART1_RX_IT +#define HAL_DMAMUX2_REQUEST_GEN_ADC3_IT HAL_DMAMUX2_REQ_GEN_ADC3_IT +#define HAL_DMAMUX2_REQUEST_GEN_ADC3_AWD1_OUT HAL_DMAMUX2_REQ_GEN_ADC3_AWD1_OUT +#define HAL_DMAMUX2_REQUEST_GEN_BDMA_CH0_IT HAL_DMAMUX2_REQ_GEN_BDMA_CH0_IT +#define HAL_DMAMUX2_REQUEST_GEN_BDMA_CH1_IT HAL_DMAMUX2_REQ_GEN_BDMA_CH1_IT + +#define HAL_DMAMUX_REQUEST_GEN_NO_EVENT HAL_DMAMUX_REQ_GEN_NO_EVENT +#define HAL_DMAMUX_REQUEST_GEN_RISING HAL_DMAMUX_REQ_GEN_RISING +#define HAL_DMAMUX_REQUEST_GEN_FALLING HAL_DMAMUX_REQ_GEN_FALLING +#define HAL_DMAMUX_REQUEST_GEN_RISING_FALLING HAL_DMAMUX_REQ_GEN_RISING_FALLING + +#define DFSDM_FILTER_EXT_TRIG_LPTIM1 DFSDM_FILTER_EXT_TRIG_LPTIM1_OUT +#define DFSDM_FILTER_EXT_TRIG_LPTIM2 DFSDM_FILTER_EXT_TRIG_LPTIM2_OUT +#define DFSDM_FILTER_EXT_TRIG_LPTIM3 DFSDM_FILTER_EXT_TRIG_LPTIM3_OUT + +#define DAC_TRIGGER_LP1_OUT DAC_TRIGGER_LPTIM1_OUT +#define DAC_TRIGGER_LP2_OUT DAC_TRIGGER_LPTIM2_OUT + +#endif /* STM32H7 */ + +#if defined(STM32U5) +#define GPDMA1_REQUEST_DCMI GPDMA1_REQUEST_DCMI_PSSI +#endif /* STM32U5 */ +/** + * @} + */ + +/** @defgroup HAL_FLASH_Aliased_Defines HAL FLASH Aliased Defines maintained for legacy purpose + * @{ + */ + +#define TYPEPROGRAM_BYTE FLASH_TYPEPROGRAM_BYTE +#define TYPEPROGRAM_HALFWORD FLASH_TYPEPROGRAM_HALFWORD +#define TYPEPROGRAM_WORD FLASH_TYPEPROGRAM_WORD +#define TYPEPROGRAM_DOUBLEWORD FLASH_TYPEPROGRAM_DOUBLEWORD +#define TYPEERASE_SECTORS FLASH_TYPEERASE_SECTORS +#define TYPEERASE_PAGES FLASH_TYPEERASE_PAGES +#define TYPEERASE_PAGEERASE FLASH_TYPEERASE_PAGES +#define TYPEERASE_MASSERASE FLASH_TYPEERASE_MASSERASE +#define WRPSTATE_DISABLE OB_WRPSTATE_DISABLE +#define WRPSTATE_ENABLE OB_WRPSTATE_ENABLE +#define HAL_FLASH_TIMEOUT_VALUE FLASH_TIMEOUT_VALUE +#define OBEX_PCROP OPTIONBYTE_PCROP +#define OBEX_BOOTCONFIG OPTIONBYTE_BOOTCONFIG +#define PCROPSTATE_DISABLE OB_PCROP_STATE_DISABLE +#define PCROPSTATE_ENABLE OB_PCROP_STATE_ENABLE +#define TYPEERASEDATA_BYTE FLASH_TYPEERASEDATA_BYTE +#define TYPEERASEDATA_HALFWORD FLASH_TYPEERASEDATA_HALFWORD +#define TYPEERASEDATA_WORD FLASH_TYPEERASEDATA_WORD +#define TYPEPROGRAMDATA_BYTE FLASH_TYPEPROGRAMDATA_BYTE +#define TYPEPROGRAMDATA_HALFWORD FLASH_TYPEPROGRAMDATA_HALFWORD +#define TYPEPROGRAMDATA_WORD FLASH_TYPEPROGRAMDATA_WORD +#define TYPEPROGRAMDATA_FASTBYTE FLASH_TYPEPROGRAMDATA_FASTBYTE +#define TYPEPROGRAMDATA_FASTHALFWORD FLASH_TYPEPROGRAMDATA_FASTHALFWORD +#define TYPEPROGRAMDATA_FASTWORD FLASH_TYPEPROGRAMDATA_FASTWORD +#define PAGESIZE FLASH_PAGE_SIZE +#define TYPEPROGRAM_FASTBYTE FLASH_TYPEPROGRAM_BYTE +#define TYPEPROGRAM_FASTHALFWORD FLASH_TYPEPROGRAM_HALFWORD +#define TYPEPROGRAM_FASTWORD FLASH_TYPEPROGRAM_WORD +#define VOLTAGE_RANGE_1 FLASH_VOLTAGE_RANGE_1 +#define VOLTAGE_RANGE_2 FLASH_VOLTAGE_RANGE_2 +#define VOLTAGE_RANGE_3 FLASH_VOLTAGE_RANGE_3 +#define VOLTAGE_RANGE_4 FLASH_VOLTAGE_RANGE_4 +#define TYPEPROGRAM_FAST FLASH_TYPEPROGRAM_FAST +#define TYPEPROGRAM_FAST_AND_LAST FLASH_TYPEPROGRAM_FAST_AND_LAST +#define WRPAREA_BANK1_AREAA OB_WRPAREA_BANK1_AREAA +#define WRPAREA_BANK1_AREAB OB_WRPAREA_BANK1_AREAB +#define WRPAREA_BANK2_AREAA OB_WRPAREA_BANK2_AREAA +#define WRPAREA_BANK2_AREAB OB_WRPAREA_BANK2_AREAB +#define IWDG_STDBY_FREEZE OB_IWDG_STDBY_FREEZE +#define IWDG_STDBY_ACTIVE OB_IWDG_STDBY_RUN +#define IWDG_STOP_FREEZE OB_IWDG_STOP_FREEZE +#define IWDG_STOP_ACTIVE OB_IWDG_STOP_RUN +#define FLASH_ERROR_NONE HAL_FLASH_ERROR_NONE +#define FLASH_ERROR_RD HAL_FLASH_ERROR_RD +#define FLASH_ERROR_PG HAL_FLASH_ERROR_PROG +#define FLASH_ERROR_PGP HAL_FLASH_ERROR_PGS +#define FLASH_ERROR_WRP HAL_FLASH_ERROR_WRP +#define FLASH_ERROR_OPTV HAL_FLASH_ERROR_OPTV +#define FLASH_ERROR_OPTVUSR HAL_FLASH_ERROR_OPTVUSR +#define FLASH_ERROR_PROG HAL_FLASH_ERROR_PROG +#define FLASH_ERROR_OP HAL_FLASH_ERROR_OPERATION +#define FLASH_ERROR_PGA HAL_FLASH_ERROR_PGA +#define FLASH_ERROR_SIZE HAL_FLASH_ERROR_SIZE +#define FLASH_ERROR_SIZ HAL_FLASH_ERROR_SIZE +#define FLASH_ERROR_PGS HAL_FLASH_ERROR_PGS +#define FLASH_ERROR_MIS HAL_FLASH_ERROR_MIS +#define FLASH_ERROR_FAST HAL_FLASH_ERROR_FAST +#define FLASH_ERROR_FWWERR HAL_FLASH_ERROR_FWWERR +#define FLASH_ERROR_NOTZERO HAL_FLASH_ERROR_NOTZERO +#define FLASH_ERROR_OPERATION HAL_FLASH_ERROR_OPERATION +#define FLASH_ERROR_ERS HAL_FLASH_ERROR_ERS +#define OB_WDG_SW OB_IWDG_SW +#define OB_WDG_HW OB_IWDG_HW +#define OB_SDADC12_VDD_MONITOR_SET OB_SDACD_VDD_MONITOR_SET +#define OB_SDADC12_VDD_MONITOR_RESET OB_SDACD_VDD_MONITOR_RESET +#define OB_RAM_PARITY_CHECK_SET OB_SRAM_PARITY_SET +#define OB_RAM_PARITY_CHECK_RESET OB_SRAM_PARITY_RESET +#define IS_OB_SDADC12_VDD_MONITOR IS_OB_SDACD_VDD_MONITOR +#define OB_RDP_LEVEL0 OB_RDP_LEVEL_0 +#define OB_RDP_LEVEL1 OB_RDP_LEVEL_1 +#define OB_RDP_LEVEL2 OB_RDP_LEVEL_2 +#if defined(STM32G0) || defined(STM32C0) +#define OB_BOOT_LOCK_DISABLE OB_BOOT_ENTRY_FORCED_NONE +#define OB_BOOT_LOCK_ENABLE OB_BOOT_ENTRY_FORCED_FLASH +#else +#define OB_BOOT_ENTRY_FORCED_NONE OB_BOOT_LOCK_DISABLE +#define OB_BOOT_ENTRY_FORCED_FLASH OB_BOOT_LOCK_ENABLE +#endif +#if defined(STM32H7) +#define FLASH_FLAG_SNECCE_BANK1RR FLASH_FLAG_SNECCERR_BANK1 +#define FLASH_FLAG_DBECCE_BANK1RR FLASH_FLAG_DBECCERR_BANK1 +#define FLASH_FLAG_STRBER_BANK1R FLASH_FLAG_STRBERR_BANK1 +#define FLASH_FLAG_SNECCE_BANK2RR FLASH_FLAG_SNECCERR_BANK2 +#define FLASH_FLAG_DBECCE_BANK2RR FLASH_FLAG_DBECCERR_BANK2 +#define FLASH_FLAG_STRBER_BANK2R FLASH_FLAG_STRBERR_BANK2 +#define FLASH_FLAG_WDW FLASH_FLAG_WBNE +#define OB_WRP_SECTOR_All OB_WRP_SECTOR_ALL +#endif /* STM32H7 */ +#if defined(STM32U5) +#define OB_USER_nRST_STOP OB_USER_NRST_STOP +#define OB_USER_nRST_STDBY OB_USER_NRST_STDBY +#define OB_USER_nRST_SHDW OB_USER_NRST_SHDW +#define OB_USER_nSWBOOT0 OB_USER_NSWBOOT0 +#define OB_USER_nBOOT0 OB_USER_NBOOT0 +#define OB_nBOOT0_RESET OB_NBOOT0_RESET +#define OB_nBOOT0_SET OB_NBOOT0_SET +#define OB_USER_SRAM134_RST OB_USER_SRAM_RST +#define OB_SRAM134_RST_ERASE OB_SRAM_RST_ERASE +#define OB_SRAM134_RST_NOT_ERASE OB_SRAM_RST_NOT_ERASE +#endif /* STM32U5 */ +#if defined(STM32U0) +#define OB_USER_nRST_STOP OB_USER_NRST_STOP +#define OB_USER_nRST_STDBY OB_USER_NRST_STDBY +#define OB_USER_nRST_SHDW OB_USER_NRST_SHDW +#define OB_USER_nBOOT_SEL OB_USER_NBOOT_SEL +#define OB_USER_nBOOT0 OB_USER_NBOOT0 +#define OB_USER_nBOOT1 OB_USER_NBOOT1 +#define OB_nBOOT0_RESET OB_NBOOT0_RESET +#define OB_nBOOT0_SET OB_NBOOT0_SET +#endif /* STM32U0 */ + +/** + * @} + */ + +/** @defgroup HAL_JPEG_Aliased_Macros HAL JPEG Aliased Macros maintained for legacy purpose + * @{ + */ + +#if defined(STM32H7) +#define __HAL_RCC_JPEG_CLK_ENABLE __HAL_RCC_JPGDECEN_CLK_ENABLE +#define __HAL_RCC_JPEG_CLK_DISABLE __HAL_RCC_JPGDECEN_CLK_DISABLE +#define __HAL_RCC_JPEG_FORCE_RESET __HAL_RCC_JPGDECRST_FORCE_RESET +#define __HAL_RCC_JPEG_RELEASE_RESET __HAL_RCC_JPGDECRST_RELEASE_RESET +#define __HAL_RCC_JPEG_CLK_SLEEP_ENABLE __HAL_RCC_JPGDEC_CLK_SLEEP_ENABLE +#define __HAL_RCC_JPEG_CLK_SLEEP_DISABLE __HAL_RCC_JPGDEC_CLK_SLEEP_DISABLE +#endif /* STM32H7 */ + +/** + * @} + */ + +/** @defgroup HAL_SYSCFG_Aliased_Defines HAL SYSCFG Aliased Defines maintained for legacy purpose + * @{ + */ + +#define HAL_SYSCFG_FASTMODEPLUS_I2C_PA9 I2C_FASTMODEPLUS_PA9 +#define HAL_SYSCFG_FASTMODEPLUS_I2C_PA10 I2C_FASTMODEPLUS_PA10 +#define HAL_SYSCFG_FASTMODEPLUS_I2C_PB6 I2C_FASTMODEPLUS_PB6 +#define HAL_SYSCFG_FASTMODEPLUS_I2C_PB7 I2C_FASTMODEPLUS_PB7 +#define HAL_SYSCFG_FASTMODEPLUS_I2C_PB8 I2C_FASTMODEPLUS_PB8 +#define HAL_SYSCFG_FASTMODEPLUS_I2C_PB9 I2C_FASTMODEPLUS_PB9 +#define HAL_SYSCFG_FASTMODEPLUS_I2C1 I2C_FASTMODEPLUS_I2C1 +#define HAL_SYSCFG_FASTMODEPLUS_I2C2 I2C_FASTMODEPLUS_I2C2 +#define HAL_SYSCFG_FASTMODEPLUS_I2C3 I2C_FASTMODEPLUS_I2C3 +#if defined(STM32G4) + +#define HAL_SYSCFG_EnableIOAnalogSwitchBooster HAL_SYSCFG_EnableIOSwitchBooster +#define HAL_SYSCFG_DisableIOAnalogSwitchBooster HAL_SYSCFG_DisableIOSwitchBooster +#define HAL_SYSCFG_EnableIOAnalogSwitchVDD HAL_SYSCFG_EnableIOSwitchVDD +#define HAL_SYSCFG_DisableIOAnalogSwitchVDD HAL_SYSCFG_DisableIOSwitchVDD +#endif /* STM32G4 */ + +#if defined(STM32H5) +#define SYSCFG_IT_FPU_IOC SBS_IT_FPU_IOC +#define SYSCFG_IT_FPU_DZC SBS_IT_FPU_DZC +#define SYSCFG_IT_FPU_UFC SBS_IT_FPU_UFC +#define SYSCFG_IT_FPU_OFC SBS_IT_FPU_OFC +#define SYSCFG_IT_FPU_IDC SBS_IT_FPU_IDC +#define SYSCFG_IT_FPU_IXC SBS_IT_FPU_IXC + +#define SYSCFG_BREAK_FLASH_ECC SBS_BREAK_FLASH_ECC +#define SYSCFG_BREAK_PVD SBS_BREAK_PVD +#define SYSCFG_BREAK_SRAM_ECC SBS_BREAK_SRAM_ECC +#define SYSCFG_BREAK_LOCKUP SBS_BREAK_LOCKUP + +#define SYSCFG_VREFBUF_VOLTAGE_SCALE0 VREFBUF_VOLTAGE_SCALE0 +#define SYSCFG_VREFBUF_VOLTAGE_SCALE1 VREFBUF_VOLTAGE_SCALE1 +#define SYSCFG_VREFBUF_VOLTAGE_SCALE2 VREFBUF_VOLTAGE_SCALE2 +#define SYSCFG_VREFBUF_VOLTAGE_SCALE3 VREFBUF_VOLTAGE_SCALE3 + +#define SYSCFG_VREFBUF_HIGH_IMPEDANCE_DISABLE VREFBUF_HIGH_IMPEDANCE_DISABLE +#define SYSCFG_VREFBUF_HIGH_IMPEDANCE_ENABLE VREFBUF_HIGH_IMPEDANCE_ENABLE + +#define SYSCFG_FASTMODEPLUS_PB6 SBS_FASTMODEPLUS_PB6 +#define SYSCFG_FASTMODEPLUS_PB7 SBS_FASTMODEPLUS_PB7 +#define SYSCFG_FASTMODEPLUS_PB8 SBS_FASTMODEPLUS_PB8 +#define SYSCFG_FASTMODEPLUS_PB9 SBS_FASTMODEPLUS_PB9 + +#define SYSCFG_ETH_MII SBS_ETH_MII +#define SYSCFG_ETH_RMII SBS_ETH_RMII +#define IS_SYSCFG_ETHERNET_CONFIG IS_SBS_ETHERNET_CONFIG + +#define SYSCFG_MEMORIES_ERASE_FLAG_IPMEE SBS_MEMORIES_ERASE_FLAG_IPMEE +#define SYSCFG_MEMORIES_ERASE_FLAG_MCLR SBS_MEMORIES_ERASE_FLAG_MCLR +#define IS_SYSCFG_MEMORIES_ERASE_FLAG IS_SBS_MEMORIES_ERASE_FLAG + +#define IS_SYSCFG_CODE_CONFIG IS_SBS_CODE_CONFIG + +#define SYSCFG_MPU_NSEC SBS_MPU_NSEC +#define SYSCFG_VTOR_NSEC SBS_VTOR_NSEC +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +#define SYSCFG_SAU SBS_SAU +#define SYSCFG_MPU_SEC SBS_MPU_SEC +#define SYSCFG_VTOR_AIRCR_SEC SBS_VTOR_AIRCR_SEC +#define SYSCFG_LOCK_ALL SBS_LOCK_ALL +#else +#define SYSCFG_LOCK_ALL SBS_LOCK_ALL +#endif /* __ARM_FEATURE_CMSE */ + +#define SYSCFG_CLK SBS_CLK +#define SYSCFG_CLASSB SBS_CLASSB +#define SYSCFG_FPU SBS_FPU +#define SYSCFG_ALL SBS_ALL + +#define SYSCFG_SEC SBS_SEC +#define SYSCFG_NSEC SBS_NSEC + +#define __HAL_SYSCFG_FPU_INTERRUPT_ENABLE __HAL_SBS_FPU_INTERRUPT_ENABLE +#define __HAL_SYSCFG_FPU_INTERRUPT_DISABLE __HAL_SBS_FPU_INTERRUPT_DISABLE + +#define __HAL_SYSCFG_BREAK_ECC_LOCK __HAL_SBS_BREAK_ECC_LOCK +#define __HAL_SYSCFG_BREAK_LOCKUP_LOCK __HAL_SBS_BREAK_LOCKUP_LOCK +#define __HAL_SYSCFG_BREAK_PVD_LOCK __HAL_SBS_BREAK_PVD_LOCK +#define __HAL_SYSCFG_BREAK_SRAM_ECC_LOCK __HAL_SBS_BREAK_SRAM_ECC_LOCK + +#define __HAL_SYSCFG_FASTMODEPLUS_ENABLE __HAL_SBS_FASTMODEPLUS_ENABLE +#define __HAL_SYSCFG_FASTMODEPLUS_DISABLE __HAL_SBS_FASTMODEPLUS_DISABLE + +#define __HAL_SYSCFG_GET_MEMORIES_ERASE_STATUS __HAL_SBS_GET_MEMORIES_ERASE_STATUS +#define __HAL_SYSCFG_CLEAR_MEMORIES_ERASE_STATUS __HAL_SBS_CLEAR_MEMORIES_ERASE_STATUS + +#define IS_SYSCFG_FPU_INTERRUPT IS_SBS_FPU_INTERRUPT +#define IS_SYSCFG_BREAK_CONFIG IS_SBS_BREAK_CONFIG +#define IS_SYSCFG_VREFBUF_VOLTAGE_SCALE IS_VREFBUF_VOLTAGE_SCALE +#define IS_SYSCFG_VREFBUF_HIGH_IMPEDANCE IS_VREFBUF_HIGH_IMPEDANCE +#define IS_SYSCFG_VREFBUF_TRIMMING IS_VREFBUF_TRIMMING +#define IS_SYSCFG_FASTMODEPLUS IS_SBS_FASTMODEPLUS +#define IS_SYSCFG_ITEMS_ATTRIBUTES IS_SBS_ITEMS_ATTRIBUTES +#define IS_SYSCFG_ATTRIBUTES IS_SBS_ATTRIBUTES +#define IS_SYSCFG_LOCK_ITEMS IS_SBS_LOCK_ITEMS + +#define HAL_SYSCFG_VREFBUF_VoltageScalingConfig HAL_VREFBUF_VoltageScalingConfig +#define HAL_SYSCFG_VREFBUF_HighImpedanceConfig HAL_VREFBUF_HighImpedanceConfig +#define HAL_SYSCFG_VREFBUF_TrimmingConfig HAL_VREFBUF_TrimmingConfig +#define HAL_SYSCFG_EnableVREFBUF HAL_EnableVREFBUF +#define HAL_SYSCFG_DisableVREFBUF HAL_DisableVREFBUF + +#define HAL_SYSCFG_EnableIOAnalogSwitchBooster HAL_SBS_EnableIOAnalogSwitchBooster +#define HAL_SYSCFG_DisableIOAnalogSwitchBooster HAL_SBS_DisableIOAnalogSwitchBooster +#define HAL_SYSCFG_ETHInterfaceSelect HAL_SBS_ETHInterfaceSelect + +#define HAL_SYSCFG_Lock HAL_SBS_Lock +#define HAL_SYSCFG_GetLock HAL_SBS_GetLock + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +#define HAL_SYSCFG_ConfigAttributes HAL_SBS_ConfigAttributes +#define HAL_SYSCFG_GetConfigAttributes HAL_SBS_GetConfigAttributes +#endif /* __ARM_FEATURE_CMSE */ + +#endif /* STM32H5 */ + + +/** + * @} + */ + + +/** @defgroup LL_FMC_Aliased_Defines LL FMC Aliased Defines maintained for compatibility purpose + * @{ + */ +#if defined(STM32L4) || defined(STM32F7) || defined(STM32H7) || defined(STM32G4) +#define FMC_NAND_PCC_WAIT_FEATURE_DISABLE FMC_NAND_WAIT_FEATURE_DISABLE +#define FMC_NAND_PCC_WAIT_FEATURE_ENABLE FMC_NAND_WAIT_FEATURE_ENABLE +#define FMC_NAND_PCC_MEM_BUS_WIDTH_8 FMC_NAND_MEM_BUS_WIDTH_8 +#define FMC_NAND_PCC_MEM_BUS_WIDTH_16 FMC_NAND_MEM_BUS_WIDTH_16 +#elif defined(STM32F1) || defined(STM32F2) || defined(STM32F3) || defined(STM32F4) +#define FMC_NAND_WAIT_FEATURE_DISABLE FMC_NAND_PCC_WAIT_FEATURE_DISABLE +#define FMC_NAND_WAIT_FEATURE_ENABLE FMC_NAND_PCC_WAIT_FEATURE_ENABLE +#define FMC_NAND_MEM_BUS_WIDTH_8 FMC_NAND_PCC_MEM_BUS_WIDTH_8 +#define FMC_NAND_MEM_BUS_WIDTH_16 FMC_NAND_PCC_MEM_BUS_WIDTH_16 +#endif +/** + * @} + */ + +/** @defgroup LL_FSMC_Aliased_Defines LL FSMC Aliased Defines maintained for legacy purpose + * @{ + */ + +#define FSMC_NORSRAM_TYPEDEF FSMC_NORSRAM_TypeDef +#define FSMC_NORSRAM_EXTENDED_TYPEDEF FSMC_NORSRAM_EXTENDED_TypeDef +/** + * @} + */ + +/** @defgroup HAL_GPIO_Aliased_Macros HAL GPIO Aliased Macros maintained for legacy purpose + * @{ + */ +#define GET_GPIO_SOURCE GPIO_GET_INDEX +#define GET_GPIO_INDEX GPIO_GET_INDEX + +#if defined(STM32F4) +#define GPIO_AF12_SDMMC GPIO_AF12_SDIO +#define GPIO_AF12_SDMMC1 GPIO_AF12_SDIO +#endif + +#if defined(STM32F7) +#define GPIO_AF12_SDIO GPIO_AF12_SDMMC1 +#define GPIO_AF12_SDMMC GPIO_AF12_SDMMC1 +#endif + +#if defined(STM32L4) +#define GPIO_AF12_SDIO GPIO_AF12_SDMMC1 +#define GPIO_AF12_SDMMC GPIO_AF12_SDMMC1 +#endif + +#if defined(STM32H7) +#define GPIO_AF7_SDIO1 GPIO_AF7_SDMMC1 +#define GPIO_AF8_SDIO1 GPIO_AF8_SDMMC1 +#define GPIO_AF12_SDIO1 GPIO_AF12_SDMMC1 +#define GPIO_AF9_SDIO2 GPIO_AF9_SDMMC2 +#define GPIO_AF10_SDIO2 GPIO_AF10_SDMMC2 +#define GPIO_AF11_SDIO2 GPIO_AF11_SDMMC2 + +#if defined (STM32H743xx) || defined (STM32H753xx) || defined (STM32H750xx) || defined (STM32H742xx) || \ + defined (STM32H745xx) || defined (STM32H755xx) || defined (STM32H747xx) || defined (STM32H757xx) +#define GPIO_AF10_OTG2_HS GPIO_AF10_OTG2_FS +#define GPIO_AF10_OTG1_FS GPIO_AF10_OTG1_HS +#define GPIO_AF12_OTG2_FS GPIO_AF12_OTG1_FS +#endif /*STM32H743xx || STM32H753xx || STM32H750xx || STM32H742xx || STM32H745xx || STM32H755xx || STM32H747xx || \ + STM32H757xx */ +#endif /* STM32H7 */ + +#define GPIO_AF0_LPTIM GPIO_AF0_LPTIM1 +#define GPIO_AF1_LPTIM GPIO_AF1_LPTIM1 +#define GPIO_AF2_LPTIM GPIO_AF2_LPTIM1 + +#if defined(STM32L0) || defined(STM32L4) || defined(STM32F4) || defined(STM32F2) || defined(STM32F7) || \ + defined(STM32G4) || defined(STM32H7) || defined(STM32WB) || defined(STM32U5) +#define GPIO_SPEED_LOW GPIO_SPEED_FREQ_LOW +#define GPIO_SPEED_MEDIUM GPIO_SPEED_FREQ_MEDIUM +#define GPIO_SPEED_FAST GPIO_SPEED_FREQ_HIGH +#define GPIO_SPEED_HIGH GPIO_SPEED_FREQ_VERY_HIGH +#endif /* STM32L0 || STM32L4 || STM32F4 || STM32F2 || STM32F7 || STM32G4 || STM32H7 || STM32WB || STM32U5*/ + +#if defined(STM32L1) +#define GPIO_SPEED_VERY_LOW GPIO_SPEED_FREQ_LOW +#define GPIO_SPEED_LOW GPIO_SPEED_FREQ_MEDIUM +#define GPIO_SPEED_MEDIUM GPIO_SPEED_FREQ_HIGH +#define GPIO_SPEED_HIGH GPIO_SPEED_FREQ_VERY_HIGH +#endif /* STM32L1 */ + +#if defined(STM32F0) || defined(STM32F3) || defined(STM32F1) +#define GPIO_SPEED_LOW GPIO_SPEED_FREQ_LOW +#define GPIO_SPEED_MEDIUM GPIO_SPEED_FREQ_MEDIUM +#define GPIO_SPEED_HIGH GPIO_SPEED_FREQ_HIGH +#endif /* STM32F0 || STM32F3 || STM32F1 */ + +#define GPIO_AF6_DFSDM GPIO_AF6_DFSDM1 + +#if defined(STM32U5) || defined(STM32H5) +#define GPIO_AF0_RTC_50Hz GPIO_AF0_RTC_50HZ +#endif /* STM32U5 || STM32H5 */ +#if defined(STM32U5) +#define GPIO_AF0_S2DSTOP GPIO_AF0_SRDSTOP +#define GPIO_AF11_LPGPIO GPIO_AF11_LPGPIO1 +#endif /* STM32U5 */ +/** + * @} + */ + +/** @defgroup HAL_GTZC_Aliased_Defines HAL GTZC Aliased Defines maintained for legacy purpose + * @{ + */ +#if defined(STM32U5) +#define GTZC_PERIPH_DCMI GTZC_PERIPH_DCMI_PSSI +#define GTZC_PERIPH_LTDC GTZC_PERIPH_LTDCUSB +#endif /* STM32U5 */ +#if defined(STM32H5) +#define GTZC_PERIPH_DAC12 GTZC_PERIPH_DAC1 +#define GTZC_PERIPH_ADC12 GTZC_PERIPH_ADC +#define GTZC_PERIPH_USBFS GTZC_PERIPH_USB +#endif /* STM32H5 */ +#if defined(STM32H5) || defined(STM32U5) +#define GTZC_MCPBB_NB_VCTR_REG_MAX GTZC_MPCBB_NB_VCTR_REG_MAX +#define GTZC_MCPBB_NB_LCK_VCTR_REG_MAX GTZC_MPCBB_NB_LCK_VCTR_REG_MAX +#define GTZC_MCPBB_SUPERBLOCK_UNLOCKED GTZC_MPCBB_SUPERBLOCK_UNLOCKED +#define GTZC_MCPBB_SUPERBLOCK_LOCKED GTZC_MPCBB_SUPERBLOCK_LOCKED +#define GTZC_MCPBB_BLOCK_NSEC GTZC_MPCBB_BLOCK_NSEC +#define GTZC_MCPBB_BLOCK_SEC GTZC_MPCBB_BLOCK_SEC +#define GTZC_MCPBB_BLOCK_NPRIV GTZC_MPCBB_BLOCK_NPRIV +#define GTZC_MCPBB_BLOCK_PRIV GTZC_MPCBB_BLOCK_PRIV +#define GTZC_MCPBB_LOCK_OFF GTZC_MPCBB_LOCK_OFF +#define GTZC_MCPBB_LOCK_ON GTZC_MPCBB_LOCK_ON +#endif /* STM32H5 || STM32U5 */ +/** + * @} + */ + +/** @defgroup HAL_HRTIM_Aliased_Macros HAL HRTIM Aliased Macros maintained for legacy purpose + * @{ + */ +#define HRTIM_TIMDELAYEDPROTECTION_DISABLED HRTIM_TIMER_A_B_C_DELAYEDPROTECTION_DISABLED +#define HRTIM_TIMDELAYEDPROTECTION_DELAYEDOUT1_EEV68 HRTIM_TIMER_A_B_C_DELAYEDPROTECTION_DELAYEDOUT1_EEV6 +#define HRTIM_TIMDELAYEDPROTECTION_DELAYEDOUT2_EEV68 HRTIM_TIMER_A_B_C_DELAYEDPROTECTION_DELAYEDOUT2_EEV6 +#define HRTIM_TIMDELAYEDPROTECTION_DELAYEDBOTH_EEV68 HRTIM_TIMER_A_B_C_DELAYEDPROTECTION_DELAYEDBOTH_EEV6 +#define HRTIM_TIMDELAYEDPROTECTION_BALANCED_EEV68 HRTIM_TIMER_A_B_C_DELAYEDPROTECTION_BALANCED_EEV6 +#define HRTIM_TIMDELAYEDPROTECTION_DELAYEDOUT1_DEEV79 HRTIM_TIMER_A_B_C_DELAYEDPROTECTION_DELAYEDOUT1_DEEV7 +#define HRTIM_TIMDELAYEDPROTECTION_DELAYEDOUT2_DEEV79 HRTIM_TIMER_A_B_C_DELAYEDPROTECTION_DELAYEDOUT2_DEEV7 +#define HRTIM_TIMDELAYEDPROTECTION_DELAYEDBOTH_EEV79 HRTIM_TIMER_A_B_C_DELAYEDPROTECTION_DELAYEDBOTH_EEV7 +#define HRTIM_TIMDELAYEDPROTECTION_BALANCED_EEV79 HRTIM_TIMER_A_B_C_DELAYEDPROTECTION_BALANCED_EEV7 + +#define __HAL_HRTIM_SetCounter __HAL_HRTIM_SETCOUNTER +#define __HAL_HRTIM_GetCounter __HAL_HRTIM_GETCOUNTER +#define __HAL_HRTIM_SetPeriod __HAL_HRTIM_SETPERIOD +#define __HAL_HRTIM_GetPeriod __HAL_HRTIM_GETPERIOD +#define __HAL_HRTIM_SetClockPrescaler __HAL_HRTIM_SETCLOCKPRESCALER +#define __HAL_HRTIM_GetClockPrescaler __HAL_HRTIM_GETCLOCKPRESCALER +#define __HAL_HRTIM_SetCompare __HAL_HRTIM_SETCOMPARE +#define __HAL_HRTIM_GetCompare __HAL_HRTIM_GETCOMPARE + +#if defined(STM32G4) +#define HAL_HRTIM_ExternalEventCounterConfig HAL_HRTIM_ExtEventCounterConfig +#define HAL_HRTIM_ExternalEventCounterEnable HAL_HRTIM_ExtEventCounterEnable +#define HAL_HRTIM_ExternalEventCounterDisable HAL_HRTIM_ExtEventCounterDisable +#define HAL_HRTIM_ExternalEventCounterReset HAL_HRTIM_ExtEventCounterReset +#define HRTIM_TIMEEVENT_A HRTIM_EVENTCOUNTER_A +#define HRTIM_TIMEEVENT_B HRTIM_EVENTCOUNTER_B +#define HRTIM_TIMEEVENTRESETMODE_UNCONDITIONAL HRTIM_EVENTCOUNTER_RSTMODE_UNCONDITIONAL +#define HRTIM_TIMEEVENTRESETMODE_CONDITIONAL HRTIM_EVENTCOUNTER_RSTMODE_CONDITIONAL +#endif /* STM32G4 */ + +#if defined(STM32H7) +#define HRTIM_OUTPUTSET_TIMAEV1_TIMBCMP1 HRTIM_OUTPUTSET_TIMEV_1 +#define HRTIM_OUTPUTSET_TIMAEV2_TIMBCMP2 HRTIM_OUTPUTSET_TIMEV_2 +#define HRTIM_OUTPUTSET_TIMAEV3_TIMCCMP2 HRTIM_OUTPUTSET_TIMEV_3 +#define HRTIM_OUTPUTSET_TIMAEV4_TIMCCMP3 HRTIM_OUTPUTSET_TIMEV_4 +#define HRTIM_OUTPUTSET_TIMAEV5_TIMDCMP1 HRTIM_OUTPUTSET_TIMEV_5 +#define HRTIM_OUTPUTSET_TIMAEV6_TIMDCMP2 HRTIM_OUTPUTSET_TIMEV_6 +#define HRTIM_OUTPUTSET_TIMAEV7_TIMECMP3 HRTIM_OUTPUTSET_TIMEV_7 +#define HRTIM_OUTPUTSET_TIMAEV8_TIMECMP4 HRTIM_OUTPUTSET_TIMEV_8 +#define HRTIM_OUTPUTSET_TIMAEV9_TIMFCMP4 HRTIM_OUTPUTSET_TIMEV_9 +#define HRTIM_OUTPUTSET_TIMBEV1_TIMACMP1 HRTIM_OUTPUTSET_TIMEV_1 +#define HRTIM_OUTPUTSET_TIMBEV2_TIMACMP2 HRTIM_OUTPUTSET_TIMEV_2 +#define HRTIM_OUTPUTSET_TIMBEV3_TIMCCMP3 HRTIM_OUTPUTSET_TIMEV_3 +#define HRTIM_OUTPUTSET_TIMBEV4_TIMCCMP4 HRTIM_OUTPUTSET_TIMEV_4 +#define HRTIM_OUTPUTSET_TIMBEV5_TIMDCMP3 HRTIM_OUTPUTSET_TIMEV_5 +#define HRTIM_OUTPUTSET_TIMBEV6_TIMDCMP4 HRTIM_OUTPUTSET_TIMEV_6 +#define HRTIM_OUTPUTSET_TIMBEV7_TIMECMP1 HRTIM_OUTPUTSET_TIMEV_7 +#define HRTIM_OUTPUTSET_TIMBEV8_TIMECMP2 HRTIM_OUTPUTSET_TIMEV_8 +#define HRTIM_OUTPUTSET_TIMBEV9_TIMFCMP3 HRTIM_OUTPUTSET_TIMEV_9 +#define HRTIM_OUTPUTSET_TIMCEV1_TIMACMP1 HRTIM_OUTPUTSET_TIMEV_1 +#define HRTIM_OUTPUTSET_TIMCEV2_TIMACMP2 HRTIM_OUTPUTSET_TIMEV_2 +#define HRTIM_OUTPUTSET_TIMCEV3_TIMBCMP2 HRTIM_OUTPUTSET_TIMEV_3 +#define HRTIM_OUTPUTSET_TIMCEV4_TIMBCMP3 HRTIM_OUTPUTSET_TIMEV_4 +#define HRTIM_OUTPUTSET_TIMCEV5_TIMDCMP2 HRTIM_OUTPUTSET_TIMEV_5 +#define HRTIM_OUTPUTSET_TIMCEV6_TIMDCMP4 HRTIM_OUTPUTSET_TIMEV_6 +#define HRTIM_OUTPUTSET_TIMCEV7_TIMECMP3 HRTIM_OUTPUTSET_TIMEV_7 +#define HRTIM_OUTPUTSET_TIMCEV8_TIMECMP4 HRTIM_OUTPUTSET_TIMEV_8 +#define HRTIM_OUTPUTSET_TIMCEV9_TIMFCMP2 HRTIM_OUTPUTSET_TIMEV_9 +#define HRTIM_OUTPUTSET_TIMDEV1_TIMACMP1 HRTIM_OUTPUTSET_TIMEV_1 +#define HRTIM_OUTPUTSET_TIMDEV2_TIMACMP4 HRTIM_OUTPUTSET_TIMEV_2 +#define HRTIM_OUTPUTSET_TIMDEV3_TIMBCMP2 HRTIM_OUTPUTSET_TIMEV_3 +#define HRTIM_OUTPUTSET_TIMDEV4_TIMBCMP4 HRTIM_OUTPUTSET_TIMEV_4 +#define HRTIM_OUTPUTSET_TIMDEV5_TIMCCMP4 HRTIM_OUTPUTSET_TIMEV_5 +#define HRTIM_OUTPUTSET_TIMDEV6_TIMECMP1 HRTIM_OUTPUTSET_TIMEV_6 +#define HRTIM_OUTPUTSET_TIMDEV7_TIMECMP4 HRTIM_OUTPUTSET_TIMEV_7 +#define HRTIM_OUTPUTSET_TIMDEV8_TIMFCMP1 HRTIM_OUTPUTSET_TIMEV_8 +#define HRTIM_OUTPUTSET_TIMDEV9_TIMFCMP3 HRTIM_OUTPUTSET_TIMEV_9 +#define HRTIM_OUTPUTSET_TIMEEV1_TIMACMP4 HRTIM_OUTPUTSET_TIMEV_1 +#define HRTIM_OUTPUTSET_TIMEEV2_TIMBCMP3 HRTIM_OUTPUTSET_TIMEV_2 +#define HRTIM_OUTPUTSET_TIMEEV3_TIMBCMP4 HRTIM_OUTPUTSET_TIMEV_3 +#define HRTIM_OUTPUTSET_TIMEEV4_TIMCCMP1 HRTIM_OUTPUTSET_TIMEV_4 +#define HRTIM_OUTPUTSET_TIMEEV5_TIMDCMP2 HRTIM_OUTPUTSET_TIMEV_5 +#define HRTIM_OUTPUTSET_TIMEEV6_TIMDCMP1 HRTIM_OUTPUTSET_TIMEV_6 +#define HRTIM_OUTPUTSET_TIMEEV7_TIMDCMP2 HRTIM_OUTPUTSET_TIMEV_7 +#define HRTIM_OUTPUTSET_TIMEEV8_TIMFCMP3 HRTIM_OUTPUTSET_TIMEV_8 +#define HRTIM_OUTPUTSET_TIMEEV9_TIMFCMP4 HRTIM_OUTPUTSET_TIMEV_9 +#define HRTIM_OUTPUTSET_TIMFEV1_TIMACMP3 HRTIM_OUTPUTSET_TIMEV_1 +#define HRTIM_OUTPUTSET_TIMFEV2_TIMBCMP1 HRTIM_OUTPUTSET_TIMEV_2 +#define HRTIM_OUTPUTSET_TIMFEV3_TIMBCMP4 HRTIM_OUTPUTSET_TIMEV_3 +#define HRTIM_OUTPUTSET_TIMFEV4_TIMCCMP1 HRTIM_OUTPUTSET_TIMEV_4 +#define HRTIM_OUTPUTSET_TIMFEV5_TIMCCMP4 HRTIM_OUTPUTSET_TIMEV_5 +#define HRTIM_OUTPUTSET_TIMFEV6_TIMDCMP3 HRTIM_OUTPUTSET_TIMEV_6 +#define HRTIM_OUTPUTSET_TIMFEV7_TIMDCMP4 HRTIM_OUTPUTSET_TIMEV_7 +#define HRTIM_OUTPUTSET_TIMFEV8_TIMECMP2 HRTIM_OUTPUTSET_TIMEV_8 +#define HRTIM_OUTPUTSET_TIMFEV9_TIMECMP3 HRTIM_OUTPUTSET_TIMEV_9 + +#define HRTIM_OUTPUTRESET_TIMAEV1_TIMBCMP1 HRTIM_OUTPUTSET_TIMEV_1 +#define HRTIM_OUTPUTRESET_TIMAEV2_TIMBCMP2 HRTIM_OUTPUTSET_TIMEV_2 +#define HRTIM_OUTPUTRESET_TIMAEV3_TIMCCMP2 HRTIM_OUTPUTSET_TIMEV_3 +#define HRTIM_OUTPUTRESET_TIMAEV4_TIMCCMP3 HRTIM_OUTPUTSET_TIMEV_4 +#define HRTIM_OUTPUTRESET_TIMAEV5_TIMDCMP1 HRTIM_OUTPUTSET_TIMEV_5 +#define HRTIM_OUTPUTRESET_TIMAEV6_TIMDCMP2 HRTIM_OUTPUTSET_TIMEV_6 +#define HRTIM_OUTPUTRESET_TIMAEV7_TIMECMP3 HRTIM_OUTPUTSET_TIMEV_7 +#define HRTIM_OUTPUTRESET_TIMAEV8_TIMECMP4 HRTIM_OUTPUTSET_TIMEV_8 +#define HRTIM_OUTPUTRESET_TIMAEV9_TIMFCMP4 HRTIM_OUTPUTSET_TIMEV_9 +#define HRTIM_OUTPUTRESET_TIMBEV1_TIMACMP1 HRTIM_OUTPUTSET_TIMEV_1 +#define HRTIM_OUTPUTRESET_TIMBEV2_TIMACMP2 HRTIM_OUTPUTSET_TIMEV_2 +#define HRTIM_OUTPUTRESET_TIMBEV3_TIMCCMP3 HRTIM_OUTPUTSET_TIMEV_3 +#define HRTIM_OUTPUTRESET_TIMBEV4_TIMCCMP4 HRTIM_OUTPUTSET_TIMEV_4 +#define HRTIM_OUTPUTRESET_TIMBEV5_TIMDCMP3 HRTIM_OUTPUTSET_TIMEV_5 +#define HRTIM_OUTPUTRESET_TIMBEV6_TIMDCMP4 HRTIM_OUTPUTSET_TIMEV_6 +#define HRTIM_OUTPUTRESET_TIMBEV7_TIMECMP1 HRTIM_OUTPUTSET_TIMEV_7 +#define HRTIM_OUTPUTRESET_TIMBEV8_TIMECMP2 HRTIM_OUTPUTSET_TIMEV_8 +#define HRTIM_OUTPUTRESET_TIMBEV9_TIMFCMP3 HRTIM_OUTPUTSET_TIMEV_9 +#define HRTIM_OUTPUTRESET_TIMCEV1_TIMACMP1 HRTIM_OUTPUTSET_TIMEV_1 +#define HRTIM_OUTPUTRESET_TIMCEV2_TIMACMP2 HRTIM_OUTPUTSET_TIMEV_2 +#define HRTIM_OUTPUTRESET_TIMCEV3_TIMBCMP2 HRTIM_OUTPUTSET_TIMEV_3 +#define HRTIM_OUTPUTRESET_TIMCEV4_TIMBCMP3 HRTIM_OUTPUTSET_TIMEV_4 +#define HRTIM_OUTPUTRESET_TIMCEV5_TIMDCMP2 HRTIM_OUTPUTSET_TIMEV_5 +#define HRTIM_OUTPUTRESET_TIMCEV6_TIMDCMP4 HRTIM_OUTPUTSET_TIMEV_6 +#define HRTIM_OUTPUTRESET_TIMCEV7_TIMECMP3 HRTIM_OUTPUTSET_TIMEV_7 +#define HRTIM_OUTPUTRESET_TIMCEV8_TIMECMP4 HRTIM_OUTPUTSET_TIMEV_8 +#define HRTIM_OUTPUTRESET_TIMCEV9_TIMFCMP2 HRTIM_OUTPUTSET_TIMEV_9 +#define HRTIM_OUTPUTRESET_TIMDEV1_TIMACMP1 HRTIM_OUTPUTSET_TIMEV_1 +#define HRTIM_OUTPUTRESET_TIMDEV2_TIMACMP4 HRTIM_OUTPUTSET_TIMEV_2 +#define HRTIM_OUTPUTRESET_TIMDEV3_TIMBCMP2 HRTIM_OUTPUTSET_TIMEV_3 +#define HRTIM_OUTPUTRESET_TIMDEV4_TIMBCMP4 HRTIM_OUTPUTSET_TIMEV_4 +#define HRTIM_OUTPUTRESET_TIMDEV5_TIMCCMP4 HRTIM_OUTPUTSET_TIMEV_5 +#define HRTIM_OUTPUTRESET_TIMDEV6_TIMECMP1 HRTIM_OUTPUTSET_TIMEV_6 +#define HRTIM_OUTPUTRESET_TIMDEV7_TIMECMP4 HRTIM_OUTPUTSET_TIMEV_7 +#define HRTIM_OUTPUTRESET_TIMDEV8_TIMFCMP1 HRTIM_OUTPUTSET_TIMEV_8 +#define HRTIM_OUTPUTRESET_TIMDEV9_TIMFCMP3 HRTIM_OUTPUTSET_TIMEV_9 +#define HRTIM_OUTPUTRESET_TIMEEV1_TIMACMP4 HRTIM_OUTPUTSET_TIMEV_1 +#define HRTIM_OUTPUTRESET_TIMEEV2_TIMBCMP3 HRTIM_OUTPUTSET_TIMEV_2 +#define HRTIM_OUTPUTRESET_TIMEEV3_TIMBCMP4 HRTIM_OUTPUTSET_TIMEV_3 +#define HRTIM_OUTPUTRESET_TIMEEV4_TIMCCMP1 HRTIM_OUTPUTSET_TIMEV_4 +#define HRTIM_OUTPUTRESET_TIMEEV5_TIMDCMP2 HRTIM_OUTPUTSET_TIMEV_5 +#define HRTIM_OUTPUTRESET_TIMEEV6_TIMDCMP1 HRTIM_OUTPUTSET_TIMEV_6 +#define HRTIM_OUTPUTRESET_TIMEEV7_TIMDCMP2 HRTIM_OUTPUTSET_TIMEV_7 +#define HRTIM_OUTPUTRESET_TIMEEV8_TIMFCMP3 HRTIM_OUTPUTSET_TIMEV_8 +#define HRTIM_OUTPUTRESET_TIMEEV9_TIMFCMP4 HRTIM_OUTPUTSET_TIMEV_9 +#define HRTIM_OUTPUTRESET_TIMFEV1_TIMACMP3 HRTIM_OUTPUTSET_TIMEV_1 +#define HRTIM_OUTPUTRESET_TIMFEV2_TIMBCMP1 HRTIM_OUTPUTSET_TIMEV_2 +#define HRTIM_OUTPUTRESET_TIMFEV3_TIMBCMP4 HRTIM_OUTPUTSET_TIMEV_3 +#define HRTIM_OUTPUTRESET_TIMFEV4_TIMCCMP1 HRTIM_OUTPUTSET_TIMEV_4 +#define HRTIM_OUTPUTRESET_TIMFEV5_TIMCCMP4 HRTIM_OUTPUTSET_TIMEV_5 +#define HRTIM_OUTPUTRESET_TIMFEV6_TIMDCMP3 HRTIM_OUTPUTSET_TIMEV_6 +#define HRTIM_OUTPUTRESET_TIMFEV7_TIMDCMP4 HRTIM_OUTPUTSET_TIMEV_7 +#define HRTIM_OUTPUTRESET_TIMFEV8_TIMECMP2 HRTIM_OUTPUTSET_TIMEV_8 +#define HRTIM_OUTPUTRESET_TIMFEV9_TIMECMP3 HRTIM_OUTPUTSET_TIMEV_9 +#endif /* STM32H7 */ + +#if defined(STM32F3) +/** @brief Constants defining available sources associated to external events. + */ +#define HRTIM_EVENTSRC_1 (0x00000000U) +#define HRTIM_EVENTSRC_2 (HRTIM_EECR1_EE1SRC_0) +#define HRTIM_EVENTSRC_3 (HRTIM_EECR1_EE1SRC_1) +#define HRTIM_EVENTSRC_4 (HRTIM_EECR1_EE1SRC_1 | HRTIM_EECR1_EE1SRC_0) + +/** @brief Constants defining the DLL calibration periods (in micro seconds) + */ +#define HRTIM_CALIBRATIONRATE_7300 0x00000000U +#define HRTIM_CALIBRATIONRATE_910 (HRTIM_DLLCR_CALRTE_0) +#define HRTIM_CALIBRATIONRATE_114 (HRTIM_DLLCR_CALRTE_1) +#define HRTIM_CALIBRATIONRATE_14 (HRTIM_DLLCR_CALRTE_1 | HRTIM_DLLCR_CALRTE_0) + +#endif /* STM32F3 */ +/** + * @} + */ + +/** @defgroup HAL_I2C_Aliased_Defines HAL I2C Aliased Defines maintained for legacy purpose + * @{ + */ +#define I2C_DUALADDRESS_DISABLED I2C_DUALADDRESS_DISABLE +#define I2C_DUALADDRESS_ENABLED I2C_DUALADDRESS_ENABLE +#define I2C_GENERALCALL_DISABLED I2C_GENERALCALL_DISABLE +#define I2C_GENERALCALL_ENABLED I2C_GENERALCALL_ENABLE +#define I2C_NOSTRETCH_DISABLED I2C_NOSTRETCH_DISABLE +#define I2C_NOSTRETCH_ENABLED I2C_NOSTRETCH_ENABLE +#define I2C_ANALOGFILTER_ENABLED I2C_ANALOGFILTER_ENABLE +#define I2C_ANALOGFILTER_DISABLED I2C_ANALOGFILTER_DISABLE +#if defined(STM32F0) || defined(STM32F1) || defined(STM32F3) || defined(STM32G0) || defined(STM32L4) || \ + defined(STM32L1) || defined(STM32F7) +#define HAL_I2C_STATE_MEM_BUSY_TX HAL_I2C_STATE_BUSY_TX +#define HAL_I2C_STATE_MEM_BUSY_RX HAL_I2C_STATE_BUSY_RX +#define HAL_I2C_STATE_MASTER_BUSY_TX HAL_I2C_STATE_BUSY_TX +#define HAL_I2C_STATE_MASTER_BUSY_RX HAL_I2C_STATE_BUSY_RX +#define HAL_I2C_STATE_SLAVE_BUSY_TX HAL_I2C_STATE_BUSY_TX +#define HAL_I2C_STATE_SLAVE_BUSY_RX HAL_I2C_STATE_BUSY_RX +#endif +/** + * @} + */ + +/** @defgroup HAL_IRDA_Aliased_Defines HAL IRDA Aliased Defines maintained for legacy purpose + * @{ + */ +#define IRDA_ONE_BIT_SAMPLE_DISABLED IRDA_ONE_BIT_SAMPLE_DISABLE +#define IRDA_ONE_BIT_SAMPLE_ENABLED IRDA_ONE_BIT_SAMPLE_ENABLE + +/** + * @} + */ + +/** @defgroup HAL_IWDG_Aliased_Defines HAL IWDG Aliased Defines maintained for legacy purpose + * @{ + */ +#define KR_KEY_RELOAD IWDG_KEY_RELOAD +#define KR_KEY_ENABLE IWDG_KEY_ENABLE +#define KR_KEY_EWA IWDG_KEY_WRITE_ACCESS_ENABLE +#define KR_KEY_DWA IWDG_KEY_WRITE_ACCESS_DISABLE +/** + * @} + */ + +/** @defgroup HAL_LPTIM_Aliased_Defines HAL LPTIM Aliased Defines maintained for legacy purpose + * @{ + */ + +#define LPTIM_CLOCKSAMPLETIME_DIRECTTRANSISTION LPTIM_CLOCKSAMPLETIME_DIRECTTRANSITION +#define LPTIM_CLOCKSAMPLETIME_2TRANSISTIONS LPTIM_CLOCKSAMPLETIME_2TRANSITIONS +#define LPTIM_CLOCKSAMPLETIME_4TRANSISTIONS LPTIM_CLOCKSAMPLETIME_4TRANSITIONS +#define LPTIM_CLOCKSAMPLETIME_8TRANSISTIONS LPTIM_CLOCKSAMPLETIME_8TRANSITIONS + +#define LPTIM_CLOCKPOLARITY_RISINGEDGE LPTIM_CLOCKPOLARITY_RISING +#define LPTIM_CLOCKPOLARITY_FALLINGEDGE LPTIM_CLOCKPOLARITY_FALLING +#define LPTIM_CLOCKPOLARITY_BOTHEDGES LPTIM_CLOCKPOLARITY_RISING_FALLING + +#define LPTIM_TRIGSAMPLETIME_DIRECTTRANSISTION LPTIM_TRIGSAMPLETIME_DIRECTTRANSITION +#define LPTIM_TRIGSAMPLETIME_2TRANSISTIONS LPTIM_TRIGSAMPLETIME_2TRANSITIONS +#define LPTIM_TRIGSAMPLETIME_4TRANSISTIONS LPTIM_TRIGSAMPLETIME_4TRANSITIONS +#define LPTIM_TRIGSAMPLETIME_8TRANSISTIONS LPTIM_TRIGSAMPLETIME_8TRANSITIONS + +/* The following 3 definition have also been present in a temporary version of lptim.h */ +/* They need to be renamed also to the right name, just in case */ +#define LPTIM_TRIGSAMPLETIME_2TRANSITION LPTIM_TRIGSAMPLETIME_2TRANSITIONS +#define LPTIM_TRIGSAMPLETIME_4TRANSITION LPTIM_TRIGSAMPLETIME_4TRANSITIONS +#define LPTIM_TRIGSAMPLETIME_8TRANSITION LPTIM_TRIGSAMPLETIME_8TRANSITIONS + + +/** @defgroup HAL_LPTIM_Aliased_Defines HAL LPTIM Aliased Defines maintained for legacy purpose + * @{ + */ +#define HAL_LPTIM_ReadCompare HAL_LPTIM_ReadCapturedValue +/** + * @} + */ + +#if defined(STM32U5) +#define LPTIM_ISR_CC1 LPTIM_ISR_CC1IF +#define LPTIM_ISR_CC2 LPTIM_ISR_CC2IF +#define LPTIM_CHANNEL_ALL 0x00000000U +#endif /* STM32U5 */ +/** + * @} + */ + +/** @defgroup HAL_NAND_Aliased_Defines HAL NAND Aliased Defines maintained for legacy purpose + * @{ + */ +#define HAL_NAND_Read_Page HAL_NAND_Read_Page_8b +#define HAL_NAND_Write_Page HAL_NAND_Write_Page_8b +#define HAL_NAND_Read_SpareArea HAL_NAND_Read_SpareArea_8b +#define HAL_NAND_Write_SpareArea HAL_NAND_Write_SpareArea_8b + +#define NAND_AddressTypedef NAND_AddressTypeDef + +#define __ARRAY_ADDRESS ARRAY_ADDRESS +#define __ADDR_1st_CYCLE ADDR_1ST_CYCLE +#define __ADDR_2nd_CYCLE ADDR_2ND_CYCLE +#define __ADDR_3rd_CYCLE ADDR_3RD_CYCLE +#define __ADDR_4th_CYCLE ADDR_4TH_CYCLE +/** + * @} + */ + +/** @defgroup HAL_NOR_Aliased_Defines HAL NOR Aliased Defines maintained for legacy purpose + * @{ + */ +#define NOR_StatusTypedef HAL_NOR_StatusTypeDef +#define NOR_SUCCESS HAL_NOR_STATUS_SUCCESS +#define NOR_ONGOING HAL_NOR_STATUS_ONGOING +#define NOR_ERROR HAL_NOR_STATUS_ERROR +#define NOR_TIMEOUT HAL_NOR_STATUS_TIMEOUT + +#define __NOR_WRITE NOR_WRITE +#define __NOR_ADDR_SHIFT NOR_ADDR_SHIFT +/** + * @} + */ + +/** @defgroup HAL_OPAMP_Aliased_Defines HAL OPAMP Aliased Defines maintained for legacy purpose + * @{ + */ + +#define OPAMP_NONINVERTINGINPUT_VP0 OPAMP_NONINVERTINGINPUT_IO0 +#define OPAMP_NONINVERTINGINPUT_VP1 OPAMP_NONINVERTINGINPUT_IO1 +#define OPAMP_NONINVERTINGINPUT_VP2 OPAMP_NONINVERTINGINPUT_IO2 +#define OPAMP_NONINVERTINGINPUT_VP3 OPAMP_NONINVERTINGINPUT_IO3 + +#define OPAMP_SEC_NONINVERTINGINPUT_VP0 OPAMP_SEC_NONINVERTINGINPUT_IO0 +#define OPAMP_SEC_NONINVERTINGINPUT_VP1 OPAMP_SEC_NONINVERTINGINPUT_IO1 +#define OPAMP_SEC_NONINVERTINGINPUT_VP2 OPAMP_SEC_NONINVERTINGINPUT_IO2 +#define OPAMP_SEC_NONINVERTINGINPUT_VP3 OPAMP_SEC_NONINVERTINGINPUT_IO3 + +#define OPAMP_INVERTINGINPUT_VM0 OPAMP_INVERTINGINPUT_IO0 +#define OPAMP_INVERTINGINPUT_VM1 OPAMP_INVERTINGINPUT_IO1 + +#define IOPAMP_INVERTINGINPUT_VM0 OPAMP_INVERTINGINPUT_IO0 +#define IOPAMP_INVERTINGINPUT_VM1 OPAMP_INVERTINGINPUT_IO1 + +#define OPAMP_SEC_INVERTINGINPUT_VM0 OPAMP_SEC_INVERTINGINPUT_IO0 +#define OPAMP_SEC_INVERTINGINPUT_VM1 OPAMP_SEC_INVERTINGINPUT_IO1 + +#define OPAMP_INVERTINGINPUT_VINM OPAMP_SEC_INVERTINGINPUT_IO1 + +#define OPAMP_PGACONNECT_NO OPAMP_PGA_CONNECT_INVERTINGINPUT_NO +#define OPAMP_PGACONNECT_VM0 OPAMP_PGA_CONNECT_INVERTINGINPUT_IO0 +#define OPAMP_PGACONNECT_VM1 OPAMP_PGA_CONNECT_INVERTINGINPUT_IO1 + +#if defined(STM32L1) || defined(STM32L4) || defined(STM32L5) || defined(STM32H7) || defined(STM32G4) || defined(STM32U5) +#define HAL_OPAMP_MSP_INIT_CB_ID HAL_OPAMP_MSPINIT_CB_ID +#define HAL_OPAMP_MSP_DEINIT_CB_ID HAL_OPAMP_MSPDEINIT_CB_ID +#endif + +#if defined(STM32L4) || defined(STM32L5) +#define OPAMP_POWERMODE_NORMAL OPAMP_POWERMODE_NORMALPOWER +#elif defined(STM32G4) +#define OPAMP_POWERMODE_NORMAL OPAMP_POWERMODE_NORMALSPEED +#endif + +/** + * @} + */ + +/** @defgroup HAL_I2S_Aliased_Defines HAL I2S Aliased Defines maintained for legacy purpose + * @{ + */ +#define I2S_STANDARD_PHILLIPS I2S_STANDARD_PHILIPS + +#if defined(STM32H7) +#define I2S_IT_TXE I2S_IT_TXP +#define I2S_IT_RXNE I2S_IT_RXP + +#define I2S_FLAG_TXE I2S_FLAG_TXP +#define I2S_FLAG_RXNE I2S_FLAG_RXP +#endif + +#if defined(STM32F7) +#define I2S_CLOCK_SYSCLK I2S_CLOCK_PLL +#endif +/** + * @} + */ + +/** @defgroup HAL_PCCARD_Aliased_Defines HAL PCCARD Aliased Defines maintained for legacy purpose + * @{ + */ + +/* Compact Flash-ATA registers description */ +#define CF_DATA ATA_DATA +#define CF_SECTOR_COUNT ATA_SECTOR_COUNT +#define CF_SECTOR_NUMBER ATA_SECTOR_NUMBER +#define CF_CYLINDER_LOW ATA_CYLINDER_LOW +#define CF_CYLINDER_HIGH ATA_CYLINDER_HIGH +#define CF_CARD_HEAD ATA_CARD_HEAD +#define CF_STATUS_CMD ATA_STATUS_CMD +#define CF_STATUS_CMD_ALTERNATE ATA_STATUS_CMD_ALTERNATE +#define CF_COMMON_DATA_AREA ATA_COMMON_DATA_AREA + +/* Compact Flash-ATA commands */ +#define CF_READ_SECTOR_CMD ATA_READ_SECTOR_CMD +#define CF_WRITE_SECTOR_CMD ATA_WRITE_SECTOR_CMD +#define CF_ERASE_SECTOR_CMD ATA_ERASE_SECTOR_CMD +#define CF_IDENTIFY_CMD ATA_IDENTIFY_CMD + +#define PCCARD_StatusTypedef HAL_PCCARD_StatusTypeDef +#define PCCARD_SUCCESS HAL_PCCARD_STATUS_SUCCESS +#define PCCARD_ONGOING HAL_PCCARD_STATUS_ONGOING +#define PCCARD_ERROR HAL_PCCARD_STATUS_ERROR +#define PCCARD_TIMEOUT HAL_PCCARD_STATUS_TIMEOUT +/** + * @} + */ + +/** @defgroup HAL_RTC_Aliased_Defines HAL RTC Aliased Defines maintained for legacy purpose + * @{ + */ + +#define FORMAT_BIN RTC_FORMAT_BIN +#define FORMAT_BCD RTC_FORMAT_BCD + +#define RTC_ALARMSUBSECONDMASK_None RTC_ALARMSUBSECONDMASK_NONE +#define RTC_TAMPERERASEBACKUP_DISABLED RTC_TAMPER_ERASE_BACKUP_DISABLE +#define RTC_TAMPERMASK_FLAG_DISABLED RTC_TAMPERMASK_FLAG_DISABLE +#define RTC_TAMPERMASK_FLAG_ENABLED RTC_TAMPERMASK_FLAG_ENABLE + +#define RTC_MASKTAMPERFLAG_DISABLED RTC_TAMPERMASK_FLAG_DISABLE +#define RTC_MASKTAMPERFLAG_ENABLED RTC_TAMPERMASK_FLAG_ENABLE +#define RTC_TAMPERERASEBACKUP_ENABLED RTC_TAMPER_ERASE_BACKUP_ENABLE +#define RTC_TAMPER1_2_INTERRUPT RTC_ALL_TAMPER_INTERRUPT +#define RTC_TAMPER1_2_3_INTERRUPT RTC_ALL_TAMPER_INTERRUPT + +#define RTC_TIMESTAMPPIN_PC13 RTC_TIMESTAMPPIN_DEFAULT +#define RTC_TIMESTAMPPIN_PA0 RTC_TIMESTAMPPIN_POS1 +#define RTC_TIMESTAMPPIN_PI8 RTC_TIMESTAMPPIN_POS1 +#define RTC_TIMESTAMPPIN_PC1 RTC_TIMESTAMPPIN_POS2 + +#define RTC_OUTPUT_REMAP_PC13 RTC_OUTPUT_REMAP_NONE +#define RTC_OUTPUT_REMAP_PB14 RTC_OUTPUT_REMAP_POS1 +#define RTC_OUTPUT_REMAP_PB2 RTC_OUTPUT_REMAP_POS1 + +#define RTC_TAMPERPIN_PC13 RTC_TAMPERPIN_DEFAULT +#define RTC_TAMPERPIN_PA0 RTC_TAMPERPIN_POS1 +#define RTC_TAMPERPIN_PI8 RTC_TAMPERPIN_POS1 + +#if defined(STM32H5) || defined(STM32H7RS) +#define TAMP_SECRETDEVICE_ERASE_NONE TAMP_DEVICESECRETS_ERASE_NONE +#define TAMP_SECRETDEVICE_ERASE_BKP_SRAM TAMP_DEVICESECRETS_ERASE_BKPSRAM +#endif /* STM32H5 || STM32H7RS */ + +#if defined(STM32WBA) +#define TAMP_SECRETDEVICE_ERASE_NONE TAMP_DEVICESECRETS_ERASE_NONE +#define TAMP_SECRETDEVICE_ERASE_SRAM2 TAMP_DEVICESECRETS_ERASE_SRAM2 +#define TAMP_SECRETDEVICE_ERASE_RHUK TAMP_DEVICESECRETS_ERASE_RHUK +#define TAMP_SECRETDEVICE_ERASE_ICACHE TAMP_DEVICESECRETS_ERASE_ICACHE +#define TAMP_SECRETDEVICE_ERASE_SAES_AES_HASH TAMP_DEVICESECRETS_ERASE_SAES_AES_HASH +#define TAMP_SECRETDEVICE_ERASE_PKA_SRAM TAMP_DEVICESECRETS_ERASE_PKA_SRAM +#define TAMP_SECRETDEVICE_ERASE_ALL TAMP_DEVICESECRETS_ERASE_ALL +#endif /* STM32WBA */ + +#if defined(STM32H5) || defined(STM32WBA) || defined(STM32H7RS) +#define TAMP_SECRETDEVICE_ERASE_DISABLE TAMP_DEVICESECRETS_ERASE_NONE +#define TAMP_SECRETDEVICE_ERASE_ENABLE TAMP_SECRETDEVICE_ERASE_ALL +#endif /* STM32H5 || STM32WBA || STM32H7RS */ + +#if defined(STM32F7) +#define RTC_TAMPCR_TAMPXE RTC_TAMPER_ENABLE_BITS_MASK +#define RTC_TAMPCR_TAMPXIE RTC_TAMPER_IT_ENABLE_BITS_MASK +#endif /* STM32F7 */ + +#if defined(STM32H7) +#define RTC_TAMPCR_TAMPXE RTC_TAMPER_X +#define RTC_TAMPCR_TAMPXIE RTC_TAMPER_X_INTERRUPT +#endif /* STM32H7 */ + +#if defined(STM32F7) || defined(STM32H7) || defined(STM32L0) +#define RTC_TAMPER1_INTERRUPT RTC_IT_TAMP1 +#define RTC_TAMPER2_INTERRUPT RTC_IT_TAMP2 +#define RTC_TAMPER3_INTERRUPT RTC_IT_TAMP3 +#define RTC_ALL_TAMPER_INTERRUPT RTC_IT_TAMP +#endif /* STM32F7 || STM32H7 || STM32L0 */ + +/** + * @} + */ + + +/** @defgroup HAL_SMARTCARD_Aliased_Defines HAL SMARTCARD Aliased Defines maintained for legacy purpose + * @{ + */ +#define SMARTCARD_NACK_ENABLED SMARTCARD_NACK_ENABLE +#define SMARTCARD_NACK_DISABLED SMARTCARD_NACK_DISABLE + +#define SMARTCARD_ONEBIT_SAMPLING_DISABLED SMARTCARD_ONE_BIT_SAMPLE_DISABLE +#define SMARTCARD_ONEBIT_SAMPLING_ENABLED SMARTCARD_ONE_BIT_SAMPLE_ENABLE +#define SMARTCARD_ONEBIT_SAMPLING_DISABLE SMARTCARD_ONE_BIT_SAMPLE_DISABLE +#define SMARTCARD_ONEBIT_SAMPLING_ENABLE SMARTCARD_ONE_BIT_SAMPLE_ENABLE + +#define SMARTCARD_TIMEOUT_DISABLED SMARTCARD_TIMEOUT_DISABLE +#define SMARTCARD_TIMEOUT_ENABLED SMARTCARD_TIMEOUT_ENABLE + +#define SMARTCARD_LASTBIT_DISABLED SMARTCARD_LASTBIT_DISABLE +#define SMARTCARD_LASTBIT_ENABLED SMARTCARD_LASTBIT_ENABLE +/** + * @} + */ + + +/** @defgroup HAL_SMBUS_Aliased_Defines HAL SMBUS Aliased Defines maintained for legacy purpose + * @{ + */ +#define SMBUS_DUALADDRESS_DISABLED SMBUS_DUALADDRESS_DISABLE +#define SMBUS_DUALADDRESS_ENABLED SMBUS_DUALADDRESS_ENABLE +#define SMBUS_GENERALCALL_DISABLED SMBUS_GENERALCALL_DISABLE +#define SMBUS_GENERALCALL_ENABLED SMBUS_GENERALCALL_ENABLE +#define SMBUS_NOSTRETCH_DISABLED SMBUS_NOSTRETCH_DISABLE +#define SMBUS_NOSTRETCH_ENABLED SMBUS_NOSTRETCH_ENABLE +#define SMBUS_ANALOGFILTER_ENABLED SMBUS_ANALOGFILTER_ENABLE +#define SMBUS_ANALOGFILTER_DISABLED SMBUS_ANALOGFILTER_DISABLE +#define SMBUS_PEC_DISABLED SMBUS_PEC_DISABLE +#define SMBUS_PEC_ENABLED SMBUS_PEC_ENABLE +#define HAL_SMBUS_STATE_SLAVE_LISTEN HAL_SMBUS_STATE_LISTEN +/** + * @} + */ + +/** @defgroup HAL_SPI_Aliased_Defines HAL SPI Aliased Defines maintained for legacy purpose + * @{ + */ +#define SPI_TIMODE_DISABLED SPI_TIMODE_DISABLE +#define SPI_TIMODE_ENABLED SPI_TIMODE_ENABLE + +#define SPI_CRCCALCULATION_DISABLED SPI_CRCCALCULATION_DISABLE +#define SPI_CRCCALCULATION_ENABLED SPI_CRCCALCULATION_ENABLE + +#define SPI_NSS_PULSE_DISABLED SPI_NSS_PULSE_DISABLE +#define SPI_NSS_PULSE_ENABLED SPI_NSS_PULSE_ENABLE + +#if defined(STM32H7) + +#define SPI_FLAG_TXE SPI_FLAG_TXP +#define SPI_FLAG_RXNE SPI_FLAG_RXP + +#define SPI_IT_TXE SPI_IT_TXP +#define SPI_IT_RXNE SPI_IT_RXP + +#define SPI_FRLVL_EMPTY SPI_RX_FIFO_0PACKET +#define SPI_FRLVL_QUARTER_FULL SPI_RX_FIFO_1PACKET +#define SPI_FRLVL_HALF_FULL SPI_RX_FIFO_2PACKET +#define SPI_FRLVL_FULL SPI_RX_FIFO_3PACKET + +#endif /* STM32H7 */ + +/** + * @} + */ + +/** @defgroup HAL_TIM_Aliased_Defines HAL TIM Aliased Defines maintained for legacy purpose + * @{ + */ +#define CCER_CCxE_MASK TIM_CCER_CCxE_MASK +#define CCER_CCxNE_MASK TIM_CCER_CCxNE_MASK + +#define TIM_DMABase_CR1 TIM_DMABASE_CR1 +#define TIM_DMABase_CR2 TIM_DMABASE_CR2 +#define TIM_DMABase_SMCR TIM_DMABASE_SMCR +#define TIM_DMABase_DIER TIM_DMABASE_DIER +#define TIM_DMABase_SR TIM_DMABASE_SR +#define TIM_DMABase_EGR TIM_DMABASE_EGR +#define TIM_DMABase_CCMR1 TIM_DMABASE_CCMR1 +#define TIM_DMABase_CCMR2 TIM_DMABASE_CCMR2 +#define TIM_DMABase_CCER TIM_DMABASE_CCER +#define TIM_DMABase_CNT TIM_DMABASE_CNT +#define TIM_DMABase_PSC TIM_DMABASE_PSC +#define TIM_DMABase_ARR TIM_DMABASE_ARR +#define TIM_DMABase_RCR TIM_DMABASE_RCR +#define TIM_DMABase_CCR1 TIM_DMABASE_CCR1 +#define TIM_DMABase_CCR2 TIM_DMABASE_CCR2 +#define TIM_DMABase_CCR3 TIM_DMABASE_CCR3 +#define TIM_DMABase_CCR4 TIM_DMABASE_CCR4 +#define TIM_DMABase_BDTR TIM_DMABASE_BDTR +#define TIM_DMABase_DCR TIM_DMABASE_DCR +#define TIM_DMABase_DMAR TIM_DMABASE_DMAR +#define TIM_DMABase_OR1 TIM_DMABASE_OR1 +#define TIM_DMABase_CCMR3 TIM_DMABASE_CCMR3 +#define TIM_DMABase_CCR5 TIM_DMABASE_CCR5 +#define TIM_DMABase_CCR6 TIM_DMABASE_CCR6 +#define TIM_DMABase_OR2 TIM_DMABASE_OR2 +#define TIM_DMABase_OR3 TIM_DMABASE_OR3 +#define TIM_DMABase_OR TIM_DMABASE_OR + +#define TIM_EventSource_Update TIM_EVENTSOURCE_UPDATE +#define TIM_EventSource_CC1 TIM_EVENTSOURCE_CC1 +#define TIM_EventSource_CC2 TIM_EVENTSOURCE_CC2 +#define TIM_EventSource_CC3 TIM_EVENTSOURCE_CC3 +#define TIM_EventSource_CC4 TIM_EVENTSOURCE_CC4 +#define TIM_EventSource_COM TIM_EVENTSOURCE_COM +#define TIM_EventSource_Trigger TIM_EVENTSOURCE_TRIGGER +#define TIM_EventSource_Break TIM_EVENTSOURCE_BREAK +#define TIM_EventSource_Break2 TIM_EVENTSOURCE_BREAK2 + +#define TIM_DMABurstLength_1Transfer TIM_DMABURSTLENGTH_1TRANSFER +#define TIM_DMABurstLength_2Transfers TIM_DMABURSTLENGTH_2TRANSFERS +#define TIM_DMABurstLength_3Transfers TIM_DMABURSTLENGTH_3TRANSFERS +#define TIM_DMABurstLength_4Transfers TIM_DMABURSTLENGTH_4TRANSFERS +#define TIM_DMABurstLength_5Transfers TIM_DMABURSTLENGTH_5TRANSFERS +#define TIM_DMABurstLength_6Transfers TIM_DMABURSTLENGTH_6TRANSFERS +#define TIM_DMABurstLength_7Transfers TIM_DMABURSTLENGTH_7TRANSFERS +#define TIM_DMABurstLength_8Transfers TIM_DMABURSTLENGTH_8TRANSFERS +#define TIM_DMABurstLength_9Transfers TIM_DMABURSTLENGTH_9TRANSFERS +#define TIM_DMABurstLength_10Transfers TIM_DMABURSTLENGTH_10TRANSFERS +#define TIM_DMABurstLength_11Transfers TIM_DMABURSTLENGTH_11TRANSFERS +#define TIM_DMABurstLength_12Transfers TIM_DMABURSTLENGTH_12TRANSFERS +#define TIM_DMABurstLength_13Transfers TIM_DMABURSTLENGTH_13TRANSFERS +#define TIM_DMABurstLength_14Transfers TIM_DMABURSTLENGTH_14TRANSFERS +#define TIM_DMABurstLength_15Transfers TIM_DMABURSTLENGTH_15TRANSFERS +#define TIM_DMABurstLength_16Transfers TIM_DMABURSTLENGTH_16TRANSFERS +#define TIM_DMABurstLength_17Transfers TIM_DMABURSTLENGTH_17TRANSFERS +#define TIM_DMABurstLength_18Transfers TIM_DMABURSTLENGTH_18TRANSFERS + +#if defined(STM32L0) +#define TIM22_TI1_GPIO1 TIM22_TI1_GPIO +#define TIM22_TI1_GPIO2 TIM22_TI1_GPIO +#endif + +#if defined(STM32F3) +#define IS_TIM_HALL_INTERFACE_INSTANCE IS_TIM_HALL_SENSOR_INTERFACE_INSTANCE +#endif + +#if defined(STM32H7) +#define TIM_TIM1_ETR_COMP1_OUT TIM_TIM1_ETR_COMP1 +#define TIM_TIM1_ETR_COMP2_OUT TIM_TIM1_ETR_COMP2 +#define TIM_TIM8_ETR_COMP1_OUT TIM_TIM8_ETR_COMP1 +#define TIM_TIM8_ETR_COMP2_OUT TIM_TIM8_ETR_COMP2 +#define TIM_TIM2_ETR_COMP1_OUT TIM_TIM2_ETR_COMP1 +#define TIM_TIM2_ETR_COMP2_OUT TIM_TIM2_ETR_COMP2 +#define TIM_TIM3_ETR_COMP1_OUT TIM_TIM3_ETR_COMP1 +#define TIM_TIM1_TI1_COMP1_OUT TIM_TIM1_TI1_COMP1 +#define TIM_TIM8_TI1_COMP2_OUT TIM_TIM8_TI1_COMP2 +#define TIM_TIM2_TI4_COMP1_OUT TIM_TIM2_TI4_COMP1 +#define TIM_TIM2_TI4_COMP2_OUT TIM_TIM2_TI4_COMP2 +#define TIM_TIM2_TI4_COMP1COMP2_OUT TIM_TIM2_TI4_COMP1_COMP2 +#define TIM_TIM3_TI1_COMP1_OUT TIM_TIM3_TI1_COMP1 +#define TIM_TIM3_TI1_COMP2_OUT TIM_TIM3_TI1_COMP2 +#define TIM_TIM3_TI1_COMP1COMP2_OUT TIM_TIM3_TI1_COMP1_COMP2 +#endif + +#if defined(STM32U5) +#define OCREF_CLEAR_SELECT_Pos OCREF_CLEAR_SELECT_POS +#define OCREF_CLEAR_SELECT_Msk OCREF_CLEAR_SELECT_MSK +#endif +/** + * @} + */ + +/** @defgroup HAL_TSC_Aliased_Defines HAL TSC Aliased Defines maintained for legacy purpose + * @{ + */ +#define TSC_SYNC_POL_FALL TSC_SYNC_POLARITY_FALLING +#define TSC_SYNC_POL_RISE_HIGH TSC_SYNC_POLARITY_RISING +/** + * @} + */ + +/** @defgroup HAL_UART_Aliased_Defines HAL UART Aliased Defines maintained for legacy purpose + * @{ + */ +#define UART_ONEBIT_SAMPLING_DISABLED UART_ONE_BIT_SAMPLE_DISABLE +#define UART_ONEBIT_SAMPLING_ENABLED UART_ONE_BIT_SAMPLE_ENABLE +#define UART_ONE_BIT_SAMPLE_DISABLED UART_ONE_BIT_SAMPLE_DISABLE +#define UART_ONE_BIT_SAMPLE_ENABLED UART_ONE_BIT_SAMPLE_ENABLE + +#define __HAL_UART_ONEBIT_ENABLE __HAL_UART_ONE_BIT_SAMPLE_ENABLE +#define __HAL_UART_ONEBIT_DISABLE __HAL_UART_ONE_BIT_SAMPLE_DISABLE + +#define __DIV_SAMPLING16 UART_DIV_SAMPLING16 +#define __DIVMANT_SAMPLING16 UART_DIVMANT_SAMPLING16 +#define __DIVFRAQ_SAMPLING16 UART_DIVFRAQ_SAMPLING16 +#define __UART_BRR_SAMPLING16 UART_BRR_SAMPLING16 + +#define __DIV_SAMPLING8 UART_DIV_SAMPLING8 +#define __DIVMANT_SAMPLING8 UART_DIVMANT_SAMPLING8 +#define __DIVFRAQ_SAMPLING8 UART_DIVFRAQ_SAMPLING8 +#define __UART_BRR_SAMPLING8 UART_BRR_SAMPLING8 + +#define __DIV_LPUART UART_DIV_LPUART + +#define UART_WAKEUPMETHODE_IDLELINE UART_WAKEUPMETHOD_IDLELINE +#define UART_WAKEUPMETHODE_ADDRESSMARK UART_WAKEUPMETHOD_ADDRESSMARK + +/** + * @} + */ + + +/** @defgroup HAL_USART_Aliased_Defines HAL USART Aliased Defines maintained for legacy purpose + * @{ + */ + +#define USART_CLOCK_DISABLED USART_CLOCK_DISABLE +#define USART_CLOCK_ENABLED USART_CLOCK_ENABLE + +#define USARTNACK_ENABLED USART_NACK_ENABLE +#define USARTNACK_DISABLED USART_NACK_DISABLE +/** + * @} + */ + +/** @defgroup HAL_WWDG_Aliased_Defines HAL WWDG Aliased Defines maintained for legacy purpose + * @{ + */ +#define CFR_BASE WWDG_CFR_BASE + +/** + * @} + */ + +/** @defgroup HAL_CAN_Aliased_Defines HAL CAN Aliased Defines maintained for legacy purpose + * @{ + */ +#define CAN_FilterFIFO0 CAN_FILTER_FIFO0 +#define CAN_FilterFIFO1 CAN_FILTER_FIFO1 +#define CAN_IT_RQCP0 CAN_IT_TME +#define CAN_IT_RQCP1 CAN_IT_TME +#define CAN_IT_RQCP2 CAN_IT_TME +#define INAK_TIMEOUT CAN_TIMEOUT_VALUE +#define SLAK_TIMEOUT CAN_TIMEOUT_VALUE +#define CAN_TXSTATUS_FAILED ((uint8_t)0x00U) +#define CAN_TXSTATUS_OK ((uint8_t)0x01U) +#define CAN_TXSTATUS_PENDING ((uint8_t)0x02U) + +/** + * @} + */ + +/** @defgroup HAL_ETH_Aliased_Defines HAL ETH Aliased Defines maintained for legacy purpose + * @{ + */ + +#define VLAN_TAG ETH_VLAN_TAG +#define MIN_ETH_PAYLOAD ETH_MIN_ETH_PAYLOAD +#define MAX_ETH_PAYLOAD ETH_MAX_ETH_PAYLOAD +#define JUMBO_FRAME_PAYLOAD ETH_JUMBO_FRAME_PAYLOAD +#define MACMIIAR_CR_MASK ETH_MACMIIAR_CR_MASK +#define MACCR_CLEAR_MASK ETH_MACCR_CLEAR_MASK +#define MACFCR_CLEAR_MASK ETH_MACFCR_CLEAR_MASK +#define DMAOMR_CLEAR_MASK ETH_DMAOMR_CLEAR_MASK + +#define ETH_MMCCR 0x00000100U +#define ETH_MMCRIR 0x00000104U +#define ETH_MMCTIR 0x00000108U +#define ETH_MMCRIMR 0x0000010CU +#define ETH_MMCTIMR 0x00000110U +#define ETH_MMCTGFSCCR 0x0000014CU +#define ETH_MMCTGFMSCCR 0x00000150U +#define ETH_MMCTGFCR 0x00000168U +#define ETH_MMCRFCECR 0x00000194U +#define ETH_MMCRFAECR 0x00000198U +#define ETH_MMCRGUFCR 0x000001C4U + +#define ETH_MAC_TXFIFO_FULL 0x02000000U /* Tx FIFO full */ +#define ETH_MAC_TXFIFONOT_EMPTY 0x01000000U /* Tx FIFO not empty */ +#define ETH_MAC_TXFIFO_WRITE_ACTIVE 0x00400000U /* Tx FIFO write active */ +#define ETH_MAC_TXFIFO_IDLE 0x00000000U /* Tx FIFO read status: Idle */ +#define ETH_MAC_TXFIFO_READ 0x00100000U /* Tx FIFO read status: Read (transferring data to + the MAC transmitter) */ +#define ETH_MAC_TXFIFO_WAITING 0x00200000U /* Tx FIFO read status: Waiting for TxStatus from + MAC transmitter */ +#define ETH_MAC_TXFIFO_WRITING 0x00300000U /* Tx FIFO read status: Writing the received TxStatus + or flushing the TxFIFO */ +#define ETH_MAC_TRANSMISSION_PAUSE 0x00080000U /* MAC transmitter in pause */ +#define ETH_MAC_TRANSMITFRAMECONTROLLER_IDLE 0x00000000U /* MAC transmit frame controller: Idle */ +#define ETH_MAC_TRANSMITFRAMECONTROLLER_WAITING 0x00020000U /* MAC transmit frame controller: Waiting for Status + of previous frame or IFG/backoff period to be over */ +#define ETH_MAC_TRANSMITFRAMECONTROLLER_GENRATING_PCF 0x00040000U /* MAC transmit frame controller: Generating and + transmitting a Pause control frame (in full duplex mode) */ +#define ETH_MAC_TRANSMITFRAMECONTROLLER_TRANSFERRING 0x00060000U /* MAC transmit frame controller: Transferring input + frame for transmission */ +#define ETH_MAC_MII_TRANSMIT_ACTIVE 0x00010000U /* MAC MII transmit engine active */ +#define ETH_MAC_RXFIFO_EMPTY 0x00000000U /* Rx FIFO fill level: empty */ +#define ETH_MAC_RXFIFO_BELOW_THRESHOLD 0x00000100U /* Rx FIFO fill level: fill-level below flow-control + de-activate threshold */ +#define ETH_MAC_RXFIFO_ABOVE_THRESHOLD 0x00000200U /* Rx FIFO fill level: fill-level above flow-control + activate threshold */ +#define ETH_MAC_RXFIFO_FULL 0x00000300U /* Rx FIFO fill level: full */ +#if defined(STM32F1) +#else +#define ETH_MAC_READCONTROLLER_IDLE 0x00000000U /* Rx FIFO read controller IDLE state */ +#define ETH_MAC_READCONTROLLER_READING_DATA 0x00000020U /* Rx FIFO read controller Reading frame data */ +#define ETH_MAC_READCONTROLLER_READING_STATUS 0x00000040U /* Rx FIFO read controller Reading frame status + (or time-stamp) */ +#endif +#define ETH_MAC_READCONTROLLER_FLUSHING 0x00000060U /* Rx FIFO read controller Flushing the frame data and + status */ +#define ETH_MAC_RXFIFO_WRITE_ACTIVE 0x00000010U /* Rx FIFO write controller active */ +#define ETH_MAC_SMALL_FIFO_NOTACTIVE 0x00000000U /* MAC small FIFO read / write controllers not active */ +#define ETH_MAC_SMALL_FIFO_READ_ACTIVE 0x00000002U /* MAC small FIFO read controller active */ +#define ETH_MAC_SMALL_FIFO_WRITE_ACTIVE 0x00000004U /* MAC small FIFO write controller active */ +#define ETH_MAC_SMALL_FIFO_RW_ACTIVE 0x00000006U /* MAC small FIFO read / write controllers active */ +#define ETH_MAC_MII_RECEIVE_PROTOCOL_ACTIVE 0x00000001U /* MAC MII receive protocol engine active */ + +#define ETH_TxPacketConfig ETH_TxPacketConfigTypeDef /* Transmit Packet Configuration structure definition */ + +/** + * @} + */ + +/** @defgroup HAL_DCMI_Aliased_Defines HAL DCMI Aliased Defines maintained for legacy purpose + * @{ + */ +#define HAL_DCMI_ERROR_OVF HAL_DCMI_ERROR_OVR +#define DCMI_IT_OVF DCMI_IT_OVR +#define DCMI_FLAG_OVFRI DCMI_FLAG_OVRRI +#define DCMI_FLAG_OVFMI DCMI_FLAG_OVRMI + +#define HAL_DCMI_ConfigCROP HAL_DCMI_ConfigCrop +#define HAL_DCMI_EnableCROP HAL_DCMI_EnableCrop +#define HAL_DCMI_DisableCROP HAL_DCMI_DisableCrop + +/** + * @} + */ + +#if defined(STM32L4) || defined(STM32F7) || defined(STM32F427xx) || defined(STM32F437xx) \ + || defined(STM32F429xx) || defined(STM32F439xx) || defined(STM32F469xx) || defined(STM32F479xx) \ + || defined(STM32H7) +/** @defgroup HAL_DMA2D_Aliased_Defines HAL DMA2D Aliased Defines maintained for legacy purpose + * @{ + */ +#define DMA2D_ARGB8888 DMA2D_OUTPUT_ARGB8888 +#define DMA2D_RGB888 DMA2D_OUTPUT_RGB888 +#define DMA2D_RGB565 DMA2D_OUTPUT_RGB565 +#define DMA2D_ARGB1555 DMA2D_OUTPUT_ARGB1555 +#define DMA2D_ARGB4444 DMA2D_OUTPUT_ARGB4444 + +#define CM_ARGB8888 DMA2D_INPUT_ARGB8888 +#define CM_RGB888 DMA2D_INPUT_RGB888 +#define CM_RGB565 DMA2D_INPUT_RGB565 +#define CM_ARGB1555 DMA2D_INPUT_ARGB1555 +#define CM_ARGB4444 DMA2D_INPUT_ARGB4444 +#define CM_L8 DMA2D_INPUT_L8 +#define CM_AL44 DMA2D_INPUT_AL44 +#define CM_AL88 DMA2D_INPUT_AL88 +#define CM_L4 DMA2D_INPUT_L4 +#define CM_A8 DMA2D_INPUT_A8 +#define CM_A4 DMA2D_INPUT_A4 +/** + * @} + */ +#endif /* STM32L4 || STM32F7 || STM32F4 || STM32H7 */ + +#if defined(STM32L4) || defined(STM32F7) || defined(STM32F427xx) || defined(STM32F437xx) \ + || defined(STM32F429xx) || defined(STM32F439xx) || defined(STM32F469xx) || defined(STM32F479xx) \ + || defined(STM32H7) || defined(STM32U5) +/** @defgroup DMA2D_Aliases DMA2D API Aliases + * @{ + */ +#define HAL_DMA2D_DisableCLUT HAL_DMA2D_CLUTLoading_Abort /*!< Aliased to HAL_DMA2D_CLUTLoading_Abort + for compatibility with legacy code */ +/** + * @} + */ + +#endif /* STM32L4 || STM32F7 || STM32F4 || STM32H7 || STM32U5 */ + +/** @defgroup HAL_PPP_Aliased_Defines HAL PPP Aliased Defines maintained for legacy purpose + * @{ + */ + +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ + +/** @defgroup HAL_CRYP_Aliased_Functions HAL CRYP Aliased Functions maintained for legacy purpose + * @{ + */ +#define HAL_CRYP_ComputationCpltCallback HAL_CRYPEx_ComputationCpltCallback +/** + * @} + */ + +/** @defgroup HAL_DCACHE_Aliased_Functions HAL DCACHE Aliased Functions maintained for legacy purpose + * @{ + */ + +#if defined(STM32U5) +#define HAL_DCACHE_CleanInvalidateByAddr HAL_DCACHE_CleanInvalidByAddr +#define HAL_DCACHE_CleanInvalidateByAddr_IT HAL_DCACHE_CleanInvalidByAddr_IT +#endif /* STM32U5 */ + +/** + * @} + */ + +#if !defined(STM32F2) +/** @defgroup HASH_alias HASH API alias + * @{ + */ +#define HAL_HASHEx_IRQHandler HAL_HASH_IRQHandler /*!< Redirection for compatibility with legacy code */ +/** + * + * @} + */ +#endif /* STM32F2 */ +/** @defgroup HAL_HASH_Aliased_Functions HAL HASH Aliased Functions maintained for legacy purpose + * @{ + */ +#define HAL_HASH_STATETypeDef HAL_HASH_StateTypeDef +#define HAL_HASHPhaseTypeDef HAL_HASH_PhaseTypeDef +#define HAL_HMAC_MD5_Finish HAL_HASH_MD5_Finish +#define HAL_HMAC_SHA1_Finish HAL_HASH_SHA1_Finish +#define HAL_HMAC_SHA224_Finish HAL_HASH_SHA224_Finish +#define HAL_HMAC_SHA256_Finish HAL_HASH_SHA256_Finish + +/*HASH Algorithm Selection*/ + +#define HASH_AlgoSelection_SHA1 HASH_ALGOSELECTION_SHA1 +#define HASH_AlgoSelection_SHA224 HASH_ALGOSELECTION_SHA224 +#define HASH_AlgoSelection_SHA256 HASH_ALGOSELECTION_SHA256 +#define HASH_AlgoSelection_MD5 HASH_ALGOSELECTION_MD5 + +#define HASH_AlgoMode_HASH HASH_ALGOMODE_HASH +#define HASH_AlgoMode_HMAC HASH_ALGOMODE_HMAC + +#define HASH_HMACKeyType_ShortKey HASH_HMAC_KEYTYPE_SHORTKEY +#define HASH_HMACKeyType_LongKey HASH_HMAC_KEYTYPE_LONGKEY + +#if defined(STM32L4) || defined(STM32L5) || defined(STM32F2) || defined(STM32F4) || defined(STM32F7) || defined(STM32H7) + +#define HAL_HASH_MD5_Accumulate HAL_HASH_MD5_Accmlt +#define HAL_HASH_MD5_Accumulate_End HAL_HASH_MD5_Accmlt_End +#define HAL_HASH_MD5_Accumulate_IT HAL_HASH_MD5_Accmlt_IT +#define HAL_HASH_MD5_Accumulate_End_IT HAL_HASH_MD5_Accmlt_End_IT + +#define HAL_HASH_SHA1_Accumulate HAL_HASH_SHA1_Accmlt +#define HAL_HASH_SHA1_Accumulate_End HAL_HASH_SHA1_Accmlt_End +#define HAL_HASH_SHA1_Accumulate_IT HAL_HASH_SHA1_Accmlt_IT +#define HAL_HASH_SHA1_Accumulate_End_IT HAL_HASH_SHA1_Accmlt_End_IT + +#define HAL_HASHEx_SHA224_Accumulate HAL_HASHEx_SHA224_Accmlt +#define HAL_HASHEx_SHA224_Accumulate_End HAL_HASHEx_SHA224_Accmlt_End +#define HAL_HASHEx_SHA224_Accumulate_IT HAL_HASHEx_SHA224_Accmlt_IT +#define HAL_HASHEx_SHA224_Accumulate_End_IT HAL_HASHEx_SHA224_Accmlt_End_IT + +#define HAL_HASHEx_SHA256_Accumulate HAL_HASHEx_SHA256_Accmlt +#define HAL_HASHEx_SHA256_Accumulate_End HAL_HASHEx_SHA256_Accmlt_End +#define HAL_HASHEx_SHA256_Accumulate_IT HAL_HASHEx_SHA256_Accmlt_IT +#define HAL_HASHEx_SHA256_Accumulate_End_IT HAL_HASHEx_SHA256_Accmlt_End_IT + +#endif /* STM32L4 || STM32L5 || STM32F2 || STM32F4 || STM32F7 || STM32H7 */ +/** + * @} + */ + +/** @defgroup HAL_Aliased_Functions HAL Generic Aliased Functions maintained for legacy purpose + * @{ + */ +#define HAL_EnableDBGSleepMode HAL_DBGMCU_EnableDBGSleepMode +#define HAL_DisableDBGSleepMode HAL_DBGMCU_DisableDBGSleepMode +#define HAL_EnableDBGStopMode HAL_DBGMCU_EnableDBGStopMode +#define HAL_DisableDBGStopMode HAL_DBGMCU_DisableDBGStopMode +#define HAL_EnableDBGStandbyMode HAL_DBGMCU_EnableDBGStandbyMode +#define HAL_DisableDBGStandbyMode HAL_DBGMCU_DisableDBGStandbyMode +#define HAL_DBG_LowPowerConfig(Periph, cmd) (((cmd\ + )==ENABLE)? HAL_DBGMCU_DBG_EnableLowPowerConfig(Periph) : \ + HAL_DBGMCU_DBG_DisableLowPowerConfig(Periph)) +#define HAL_VREFINT_OutputSelect HAL_SYSCFG_VREFINT_OutputSelect +#define HAL_Lock_Cmd(cmd) (((cmd)==ENABLE) ? HAL_SYSCFG_Enable_Lock_VREFINT() : HAL_SYSCFG_Disable_Lock_VREFINT()) +#if defined(STM32L0) +#else +#define HAL_VREFINT_Cmd(cmd) (((cmd)==ENABLE)? HAL_SYSCFG_EnableVREFINT() : HAL_SYSCFG_DisableVREFINT()) +#endif +#define HAL_ADC_EnableBuffer_Cmd(cmd) (((cmd)==ENABLE) ? HAL_ADCEx_EnableVREFINT() : HAL_ADCEx_DisableVREFINT()) +#define HAL_ADC_EnableBufferSensor_Cmd(cmd) (((cmd\ + )==ENABLE) ? HAL_ADCEx_EnableVREFINTTempSensor() : \ + HAL_ADCEx_DisableVREFINTTempSensor()) +#if defined(STM32H7A3xx) || defined(STM32H7B3xx) || defined(STM32H7B0xx) || defined(STM32H7A3xxQ) || \ + defined(STM32H7B3xxQ) || defined(STM32H7B0xxQ) +#define HAL_EnableSRDomainDBGStopMode HAL_EnableDomain3DBGStopMode +#define HAL_DisableSRDomainDBGStopMode HAL_DisableDomain3DBGStopMode +#define HAL_EnableSRDomainDBGStandbyMode HAL_EnableDomain3DBGStandbyMode +#define HAL_DisableSRDomainDBGStandbyMode HAL_DisableDomain3DBGStandbyMode +#endif /* STM32H7A3xx || STM32H7B3xx || STM32H7B0xx || STM32H7A3xxQ || STM32H7B3xxQ || STM32H7B0xxQ */ + +/** + * @} + */ + +/** @defgroup HAL_FLASH_Aliased_Functions HAL FLASH Aliased Functions maintained for legacy purpose + * @{ + */ +#define FLASH_HalfPageProgram HAL_FLASHEx_HalfPageProgram +#define FLASH_EnableRunPowerDown HAL_FLASHEx_EnableRunPowerDown +#define FLASH_DisableRunPowerDown HAL_FLASHEx_DisableRunPowerDown +#define HAL_DATA_EEPROMEx_Unlock HAL_FLASHEx_DATAEEPROM_Unlock +#define HAL_DATA_EEPROMEx_Lock HAL_FLASHEx_DATAEEPROM_Lock +#define HAL_DATA_EEPROMEx_Erase HAL_FLASHEx_DATAEEPROM_Erase +#define HAL_DATA_EEPROMEx_Program HAL_FLASHEx_DATAEEPROM_Program + +/** + * @} + */ + +/** @defgroup HAL_I2C_Aliased_Functions HAL I2C Aliased Functions maintained for legacy purpose + * @{ + */ +#define HAL_I2CEx_AnalogFilter_Config HAL_I2CEx_ConfigAnalogFilter +#define HAL_I2CEx_DigitalFilter_Config HAL_I2CEx_ConfigDigitalFilter +#define HAL_FMPI2CEx_AnalogFilter_Config HAL_FMPI2CEx_ConfigAnalogFilter +#define HAL_FMPI2CEx_DigitalFilter_Config HAL_FMPI2CEx_ConfigDigitalFilter + +#define HAL_I2CFastModePlusConfig(SYSCFG_I2CFastModePlus, cmd) (((cmd) == ENABLE)? \ + HAL_I2CEx_EnableFastModePlus(SYSCFG_I2CFastModePlus): \ + HAL_I2CEx_DisableFastModePlus(SYSCFG_I2CFastModePlus)) + +#if defined(STM32H7) || defined(STM32WB) || defined(STM32G0) || defined(STM32F0) || defined(STM32F1) || \ + defined(STM32F2) || defined(STM32F3) || defined(STM32F4) || defined(STM32F7) || defined(STM32L0) || \ + defined(STM32L4) || defined(STM32L5) || defined(STM32G4) || defined(STM32L1) +#define HAL_I2C_Master_Sequential_Transmit_IT HAL_I2C_Master_Seq_Transmit_IT +#define HAL_I2C_Master_Sequential_Receive_IT HAL_I2C_Master_Seq_Receive_IT +#define HAL_I2C_Slave_Sequential_Transmit_IT HAL_I2C_Slave_Seq_Transmit_IT +#define HAL_I2C_Slave_Sequential_Receive_IT HAL_I2C_Slave_Seq_Receive_IT +#endif /* STM32H7 || STM32WB || STM32G0 || STM32F0 || STM32F1 || STM32F2 || STM32F3 || STM32F4 || STM32F7 || STM32L0 || + STM32L4 || STM32L5 || STM32G4 || STM32L1 */ +#if defined(STM32H7) || defined(STM32WB) || defined(STM32G0) || defined(STM32F4) || defined(STM32F7) || \ + defined(STM32L0) || defined(STM32L4) || defined(STM32L5) || defined(STM32G4)|| defined(STM32L1) +#define HAL_I2C_Master_Sequential_Transmit_DMA HAL_I2C_Master_Seq_Transmit_DMA +#define HAL_I2C_Master_Sequential_Receive_DMA HAL_I2C_Master_Seq_Receive_DMA +#define HAL_I2C_Slave_Sequential_Transmit_DMA HAL_I2C_Slave_Seq_Transmit_DMA +#define HAL_I2C_Slave_Sequential_Receive_DMA HAL_I2C_Slave_Seq_Receive_DMA +#endif /* STM32H7 || STM32WB || STM32G0 || STM32F4 || STM32F7 || STM32L0 || STM32L4 || STM32L5 || STM32G4 || STM32L1 */ + +#if defined(STM32F4) +#define HAL_FMPI2C_Master_Sequential_Transmit_IT HAL_FMPI2C_Master_Seq_Transmit_IT +#define HAL_FMPI2C_Master_Sequential_Receive_IT HAL_FMPI2C_Master_Seq_Receive_IT +#define HAL_FMPI2C_Slave_Sequential_Transmit_IT HAL_FMPI2C_Slave_Seq_Transmit_IT +#define HAL_FMPI2C_Slave_Sequential_Receive_IT HAL_FMPI2C_Slave_Seq_Receive_IT +#define HAL_FMPI2C_Master_Sequential_Transmit_DMA HAL_FMPI2C_Master_Seq_Transmit_DMA +#define HAL_FMPI2C_Master_Sequential_Receive_DMA HAL_FMPI2C_Master_Seq_Receive_DMA +#define HAL_FMPI2C_Slave_Sequential_Transmit_DMA HAL_FMPI2C_Slave_Seq_Transmit_DMA +#define HAL_FMPI2C_Slave_Sequential_Receive_DMA HAL_FMPI2C_Slave_Seq_Receive_DMA +#endif /* STM32F4 */ +/** + * @} + */ + +/** @defgroup HAL_PWR_Aliased HAL PWR Aliased maintained for legacy purpose + * @{ + */ + +#if defined(STM32G0) +#define HAL_PWR_ConfigPVD HAL_PWREx_ConfigPVD +#define HAL_PWR_EnablePVD HAL_PWREx_EnablePVD +#define HAL_PWR_DisablePVD HAL_PWREx_DisablePVD +#define HAL_PWR_PVD_IRQHandler HAL_PWREx_PVD_IRQHandler +#endif +#define HAL_PWR_PVDConfig HAL_PWR_ConfigPVD +#define HAL_PWR_DisableBkUpReg HAL_PWREx_DisableBkUpReg +#define HAL_PWR_DisableFlashPowerDown HAL_PWREx_DisableFlashPowerDown +#define HAL_PWR_DisableVddio2Monitor HAL_PWREx_DisableVddio2Monitor +#define HAL_PWR_EnableBkUpReg HAL_PWREx_EnableBkUpReg +#define HAL_PWR_EnableFlashPowerDown HAL_PWREx_EnableFlashPowerDown +#define HAL_PWR_EnableVddio2Monitor HAL_PWREx_EnableVddio2Monitor +#define HAL_PWR_PVD_PVM_IRQHandler HAL_PWREx_PVD_PVM_IRQHandler +#define HAL_PWR_PVDLevelConfig HAL_PWR_ConfigPVD +#define HAL_PWR_Vddio2Monitor_IRQHandler HAL_PWREx_Vddio2Monitor_IRQHandler +#define HAL_PWR_Vddio2MonitorCallback HAL_PWREx_Vddio2MonitorCallback +#define HAL_PWREx_ActivateOverDrive HAL_PWREx_EnableOverDrive +#define HAL_PWREx_DeactivateOverDrive HAL_PWREx_DisableOverDrive +#define HAL_PWREx_DisableSDADCAnalog HAL_PWREx_DisableSDADC +#define HAL_PWREx_EnableSDADCAnalog HAL_PWREx_EnableSDADC +#define HAL_PWREx_PVMConfig HAL_PWREx_ConfigPVM + +#define PWR_MODE_NORMAL PWR_PVD_MODE_NORMAL +#define PWR_MODE_IT_RISING PWR_PVD_MODE_IT_RISING +#define PWR_MODE_IT_FALLING PWR_PVD_MODE_IT_FALLING +#define PWR_MODE_IT_RISING_FALLING PWR_PVD_MODE_IT_RISING_FALLING +#define PWR_MODE_EVENT_RISING PWR_PVD_MODE_EVENT_RISING +#define PWR_MODE_EVENT_FALLING PWR_PVD_MODE_EVENT_FALLING +#define PWR_MODE_EVENT_RISING_FALLING PWR_PVD_MODE_EVENT_RISING_FALLING + +#define CR_OFFSET_BB PWR_CR_OFFSET_BB +#define CSR_OFFSET_BB PWR_CSR_OFFSET_BB +#define PMODE_BIT_NUMBER VOS_BIT_NUMBER +#define CR_PMODE_BB CR_VOS_BB + +#define DBP_BitNumber DBP_BIT_NUMBER +#define PVDE_BitNumber PVDE_BIT_NUMBER +#define PMODE_BitNumber PMODE_BIT_NUMBER +#define EWUP_BitNumber EWUP_BIT_NUMBER +#define FPDS_BitNumber FPDS_BIT_NUMBER +#define ODEN_BitNumber ODEN_BIT_NUMBER +#define ODSWEN_BitNumber ODSWEN_BIT_NUMBER +#define MRLVDS_BitNumber MRLVDS_BIT_NUMBER +#define LPLVDS_BitNumber LPLVDS_BIT_NUMBER +#define BRE_BitNumber BRE_BIT_NUMBER + +#define PWR_MODE_EVT PWR_PVD_MODE_NORMAL + +#if defined (STM32U5) +#define PWR_SRAM1_PAGE1_STOP_RETENTION PWR_SRAM1_PAGE1_STOP +#define PWR_SRAM1_PAGE2_STOP_RETENTION PWR_SRAM1_PAGE2_STOP +#define PWR_SRAM1_PAGE3_STOP_RETENTION PWR_SRAM1_PAGE3_STOP +#define PWR_SRAM1_PAGE4_STOP_RETENTION PWR_SRAM1_PAGE4_STOP +#define PWR_SRAM1_PAGE5_STOP_RETENTION PWR_SRAM1_PAGE5_STOP +#define PWR_SRAM1_PAGE6_STOP_RETENTION PWR_SRAM1_PAGE6_STOP +#define PWR_SRAM1_PAGE7_STOP_RETENTION PWR_SRAM1_PAGE7_STOP +#define PWR_SRAM1_PAGE8_STOP_RETENTION PWR_SRAM1_PAGE8_STOP +#define PWR_SRAM1_PAGE9_STOP_RETENTION PWR_SRAM1_PAGE9_STOP +#define PWR_SRAM1_PAGE10_STOP_RETENTION PWR_SRAM1_PAGE10_STOP +#define PWR_SRAM1_PAGE11_STOP_RETENTION PWR_SRAM1_PAGE11_STOP +#define PWR_SRAM1_PAGE12_STOP_RETENTION PWR_SRAM1_PAGE12_STOP +#define PWR_SRAM1_FULL_STOP_RETENTION PWR_SRAM1_FULL_STOP + +#define PWR_SRAM2_PAGE1_STOP_RETENTION PWR_SRAM2_PAGE1_STOP +#define PWR_SRAM2_PAGE2_STOP_RETENTION PWR_SRAM2_PAGE2_STOP +#define PWR_SRAM2_FULL_STOP_RETENTION PWR_SRAM2_FULL_STOP + +#define PWR_SRAM3_PAGE1_STOP_RETENTION PWR_SRAM3_PAGE1_STOP +#define PWR_SRAM3_PAGE2_STOP_RETENTION PWR_SRAM3_PAGE2_STOP +#define PWR_SRAM3_PAGE3_STOP_RETENTION PWR_SRAM3_PAGE3_STOP +#define PWR_SRAM3_PAGE4_STOP_RETENTION PWR_SRAM3_PAGE4_STOP +#define PWR_SRAM3_PAGE5_STOP_RETENTION PWR_SRAM3_PAGE5_STOP +#define PWR_SRAM3_PAGE6_STOP_RETENTION PWR_SRAM3_PAGE6_STOP +#define PWR_SRAM3_PAGE7_STOP_RETENTION PWR_SRAM3_PAGE7_STOP +#define PWR_SRAM3_PAGE8_STOP_RETENTION PWR_SRAM3_PAGE8_STOP +#define PWR_SRAM3_PAGE9_STOP_RETENTION PWR_SRAM3_PAGE9_STOP +#define PWR_SRAM3_PAGE10_STOP_RETENTION PWR_SRAM3_PAGE10_STOP +#define PWR_SRAM3_PAGE11_STOP_RETENTION PWR_SRAM3_PAGE11_STOP +#define PWR_SRAM3_PAGE12_STOP_RETENTION PWR_SRAM3_PAGE12_STOP +#define PWR_SRAM3_PAGE13_STOP_RETENTION PWR_SRAM3_PAGE13_STOP +#define PWR_SRAM3_FULL_STOP_RETENTION PWR_SRAM3_FULL_STOP + +#define PWR_SRAM4_FULL_STOP_RETENTION PWR_SRAM4_FULL_STOP + +#define PWR_SRAM5_PAGE1_STOP_RETENTION PWR_SRAM5_PAGE1_STOP +#define PWR_SRAM5_PAGE2_STOP_RETENTION PWR_SRAM5_PAGE2_STOP +#define PWR_SRAM5_PAGE3_STOP_RETENTION PWR_SRAM5_PAGE3_STOP +#define PWR_SRAM5_PAGE4_STOP_RETENTION PWR_SRAM5_PAGE4_STOP +#define PWR_SRAM5_PAGE5_STOP_RETENTION PWR_SRAM5_PAGE5_STOP +#define PWR_SRAM5_PAGE6_STOP_RETENTION PWR_SRAM5_PAGE6_STOP +#define PWR_SRAM5_PAGE7_STOP_RETENTION PWR_SRAM5_PAGE7_STOP +#define PWR_SRAM5_PAGE8_STOP_RETENTION PWR_SRAM5_PAGE8_STOP +#define PWR_SRAM5_PAGE9_STOP_RETENTION PWR_SRAM5_PAGE9_STOP +#define PWR_SRAM5_PAGE10_STOP_RETENTION PWR_SRAM5_PAGE10_STOP +#define PWR_SRAM5_PAGE11_STOP_RETENTION PWR_SRAM5_PAGE11_STOP +#define PWR_SRAM5_PAGE12_STOP_RETENTION PWR_SRAM5_PAGE12_STOP +#define PWR_SRAM5_PAGE13_STOP_RETENTION PWR_SRAM5_PAGE13_STOP +#define PWR_SRAM5_FULL_STOP_RETENTION PWR_SRAM5_FULL_STOP + +#define PWR_SRAM6_PAGE1_STOP_RETENTION PWR_SRAM6_PAGE1_STOP +#define PWR_SRAM6_PAGE2_STOP_RETENTION PWR_SRAM6_PAGE2_STOP +#define PWR_SRAM6_PAGE3_STOP_RETENTION PWR_SRAM6_PAGE3_STOP +#define PWR_SRAM6_PAGE4_STOP_RETENTION PWR_SRAM6_PAGE4_STOP +#define PWR_SRAM6_PAGE5_STOP_RETENTION PWR_SRAM6_PAGE5_STOP +#define PWR_SRAM6_PAGE6_STOP_RETENTION PWR_SRAM6_PAGE6_STOP +#define PWR_SRAM6_PAGE7_STOP_RETENTION PWR_SRAM6_PAGE7_STOP +#define PWR_SRAM6_PAGE8_STOP_RETENTION PWR_SRAM6_PAGE8_STOP +#define PWR_SRAM6_FULL_STOP_RETENTION PWR_SRAM6_FULL_STOP + + +#define PWR_ICACHE_FULL_STOP_RETENTION PWR_ICACHE_FULL_STOP +#define PWR_DCACHE1_FULL_STOP_RETENTION PWR_DCACHE1_FULL_STOP +#define PWR_DCACHE2_FULL_STOP_RETENTION PWR_DCACHE2_FULL_STOP +#define PWR_DMA2DRAM_FULL_STOP_RETENTION PWR_DMA2DRAM_FULL_STOP +#define PWR_PERIPHRAM_FULL_STOP_RETENTION PWR_PERIPHRAM_FULL_STOP +#define PWR_PKA32RAM_FULL_STOP_RETENTION PWR_PKA32RAM_FULL_STOP +#define PWR_GRAPHICPRAM_FULL_STOP_RETENTION PWR_GRAPHICPRAM_FULL_STOP +#define PWR_DSIRAM_FULL_STOP_RETENTION PWR_DSIRAM_FULL_STOP +#define PWR_JPEGRAM_FULL_STOP_RETENTION PWR_JPEGRAM_FULL_STOP + + +#define PWR_SRAM2_PAGE1_STANDBY_RETENTION PWR_SRAM2_PAGE1_STANDBY +#define PWR_SRAM2_PAGE2_STANDBY_RETENTION PWR_SRAM2_PAGE2_STANDBY +#define PWR_SRAM2_FULL_STANDBY_RETENTION PWR_SRAM2_FULL_STANDBY + +#define PWR_SRAM1_FULL_RUN_RETENTION PWR_SRAM1_FULL_RUN +#define PWR_SRAM2_FULL_RUN_RETENTION PWR_SRAM2_FULL_RUN +#define PWR_SRAM3_FULL_RUN_RETENTION PWR_SRAM3_FULL_RUN +#define PWR_SRAM4_FULL_RUN_RETENTION PWR_SRAM4_FULL_RUN +#define PWR_SRAM5_FULL_RUN_RETENTION PWR_SRAM5_FULL_RUN +#define PWR_SRAM6_FULL_RUN_RETENTION PWR_SRAM6_FULL_RUN + +#define PWR_ALL_RAM_RUN_RETENTION_MASK PWR_ALL_RAM_RUN_MASK +#endif + +/** + * @} + */ + +/** @defgroup HAL_RTC_Aliased_Functions HAL RTC Aliased Functions maintained for legacy purpose + * @{ + */ +#if defined(STM32H5) || defined(STM32WBA) || defined(STM32H7RS) +#define HAL_RTCEx_SetBoothardwareKey HAL_RTCEx_LockBootHardwareKey +#define HAL_RTCEx_BKUPBlock_Enable HAL_RTCEx_BKUPBlock +#define HAL_RTCEx_BKUPBlock_Disable HAL_RTCEx_BKUPUnblock +#define HAL_RTCEx_Erase_SecretDev_Conf HAL_RTCEx_ConfigEraseDeviceSecrets +#endif /* STM32H5 || STM32WBA || STM32H7RS */ + +/** + * @} + */ + +/** @defgroup HAL_SMBUS_Aliased_Functions HAL SMBUS Aliased Functions maintained for legacy purpose + * @{ + */ +#define HAL_SMBUS_Slave_Listen_IT HAL_SMBUS_EnableListen_IT +#define HAL_SMBUS_SlaveAddrCallback HAL_SMBUS_AddrCallback +#define HAL_SMBUS_SlaveListenCpltCallback HAL_SMBUS_ListenCpltCallback +/** + * @} + */ + +/** @defgroup HAL_SPI_Aliased_Functions HAL SPI Aliased Functions maintained for legacy purpose + * @{ + */ +#define HAL_SPI_FlushRxFifo HAL_SPIEx_FlushRxFifo +/** + * @} + */ + +/** @defgroup HAL_TIM_Aliased_Functions HAL TIM Aliased Functions maintained for legacy purpose + * @{ + */ +#define HAL_TIM_DMADelayPulseCplt TIM_DMADelayPulseCplt +#define HAL_TIM_DMAError TIM_DMAError +#define HAL_TIM_DMACaptureCplt TIM_DMACaptureCplt +#define HAL_TIMEx_DMACommutationCplt TIMEx_DMACommutationCplt +#if defined(STM32H7) || defined(STM32G0) || defined(STM32F0) || defined(STM32F1) || defined(STM32F2) || \ + defined(STM32F3) || defined(STM32F4) || defined(STM32F7) || defined(STM32L0) || defined(STM32L4) +#define HAL_TIM_SlaveConfigSynchronization HAL_TIM_SlaveConfigSynchro +#define HAL_TIM_SlaveConfigSynchronization_IT HAL_TIM_SlaveConfigSynchro_IT +#define HAL_TIMEx_CommutationCallback HAL_TIMEx_CommutCallback +#define HAL_TIMEx_ConfigCommutationEvent HAL_TIMEx_ConfigCommutEvent +#define HAL_TIMEx_ConfigCommutationEvent_IT HAL_TIMEx_ConfigCommutEvent_IT +#define HAL_TIMEx_ConfigCommutationEvent_DMA HAL_TIMEx_ConfigCommutEvent_DMA +#endif /* STM32H7 || STM32G0 || STM32F0 || STM32F1 || STM32F2 || STM32F3 || STM32F4 || STM32F7 || STM32L0 */ +/** + * @} + */ + +/** @defgroup HAL_UART_Aliased_Functions HAL UART Aliased Functions maintained for legacy purpose + * @{ + */ +#define HAL_UART_WakeupCallback HAL_UARTEx_WakeupCallback +/** + * @} + */ + +/** @defgroup HAL_LTDC_Aliased_Functions HAL LTDC Aliased Functions maintained for legacy purpose + * @{ + */ +#define HAL_LTDC_LineEvenCallback HAL_LTDC_LineEventCallback +#define HAL_LTDC_Relaod HAL_LTDC_Reload +#define HAL_LTDC_StructInitFromVideoConfig HAL_LTDCEx_StructInitFromVideoConfig +#define HAL_LTDC_StructInitFromAdaptedCommandConfig HAL_LTDCEx_StructInitFromAdaptedCommandConfig +/** + * @} + */ + + +/** @defgroup HAL_PPP_Aliased_Functions HAL PPP Aliased Functions maintained for legacy purpose + * @{ + */ + +/** + * @} + */ + +/* Exported macros ------------------------------------------------------------*/ + +/** @defgroup HAL_AES_Aliased_Macros HAL CRYP Aliased Macros maintained for legacy purpose + * @{ + */ +#define AES_IT_CC CRYP_IT_CC +#define AES_IT_ERR CRYP_IT_ERR +#define AES_FLAG_CCF CRYP_FLAG_CCF +/** + * @} + */ + +/** @defgroup HAL_Aliased_Macros HAL Generic Aliased Macros maintained for legacy purpose + * @{ + */ +#define __HAL_GET_BOOT_MODE __HAL_SYSCFG_GET_BOOT_MODE +#define __HAL_REMAPMEMORY_FLASH __HAL_SYSCFG_REMAPMEMORY_FLASH +#define __HAL_REMAPMEMORY_SYSTEMFLASH __HAL_SYSCFG_REMAPMEMORY_SYSTEMFLASH +#define __HAL_REMAPMEMORY_SRAM __HAL_SYSCFG_REMAPMEMORY_SRAM +#define __HAL_REMAPMEMORY_FMC __HAL_SYSCFG_REMAPMEMORY_FMC +#define __HAL_REMAPMEMORY_FMC_SDRAM __HAL_SYSCFG_REMAPMEMORY_FMC_SDRAM +#define __HAL_REMAPMEMORY_FSMC __HAL_SYSCFG_REMAPMEMORY_FSMC +#define __HAL_REMAPMEMORY_QUADSPI __HAL_SYSCFG_REMAPMEMORY_QUADSPI +#define __HAL_FMC_BANK __HAL_SYSCFG_FMC_BANK +#define __HAL_GET_FLAG __HAL_SYSCFG_GET_FLAG +#define __HAL_CLEAR_FLAG __HAL_SYSCFG_CLEAR_FLAG +#define __HAL_VREFINT_OUT_ENABLE __HAL_SYSCFG_VREFINT_OUT_ENABLE +#define __HAL_VREFINT_OUT_DISABLE __HAL_SYSCFG_VREFINT_OUT_DISABLE +#define __HAL_SYSCFG_SRAM2_WRP_ENABLE __HAL_SYSCFG_SRAM2_WRP_0_31_ENABLE + +#define SYSCFG_FLAG_VREF_READY SYSCFG_FLAG_VREFINT_READY +#define SYSCFG_FLAG_RC48 RCC_FLAG_HSI48 +#define IS_SYSCFG_FASTMODEPLUS_CONFIG IS_I2C_FASTMODEPLUS +#define UFB_MODE_BitNumber UFB_MODE_BIT_NUMBER +#define CMP_PD_BitNumber CMP_PD_BIT_NUMBER + +/** + * @} + */ + + +/** @defgroup HAL_ADC_Aliased_Macros HAL ADC Aliased Macros maintained for legacy purpose + * @{ + */ +#define __ADC_ENABLE __HAL_ADC_ENABLE +#define __ADC_DISABLE __HAL_ADC_DISABLE +#define __HAL_ADC_ENABLING_CONDITIONS ADC_ENABLING_CONDITIONS +#define __HAL_ADC_DISABLING_CONDITIONS ADC_DISABLING_CONDITIONS +#define __HAL_ADC_IS_ENABLED ADC_IS_ENABLE +#define __ADC_IS_ENABLED ADC_IS_ENABLE +#define __HAL_ADC_IS_SOFTWARE_START_REGULAR ADC_IS_SOFTWARE_START_REGULAR +#define __HAL_ADC_IS_SOFTWARE_START_INJECTED ADC_IS_SOFTWARE_START_INJECTED +#define __HAL_ADC_IS_CONVERSION_ONGOING_REGULAR_INJECTED ADC_IS_CONVERSION_ONGOING_REGULAR_INJECTED +#define __HAL_ADC_IS_CONVERSION_ONGOING_REGULAR ADC_IS_CONVERSION_ONGOING_REGULAR +#define __HAL_ADC_IS_CONVERSION_ONGOING_INJECTED ADC_IS_CONVERSION_ONGOING_INJECTED +#define __HAL_ADC_IS_CONVERSION_ONGOING ADC_IS_CONVERSION_ONGOING +#define __HAL_ADC_CLEAR_ERRORCODE ADC_CLEAR_ERRORCODE + +#define __HAL_ADC_GET_RESOLUTION ADC_GET_RESOLUTION +#define __HAL_ADC_JSQR_RK ADC_JSQR_RK +#define __HAL_ADC_CFGR_AWD1CH ADC_CFGR_AWD1CH_SHIFT +#define __HAL_ADC_CFGR_AWD23CR ADC_CFGR_AWD23CR +#define __HAL_ADC_CFGR_INJECT_AUTO_CONVERSION ADC_CFGR_INJECT_AUTO_CONVERSION +#define __HAL_ADC_CFGR_INJECT_CONTEXT_QUEUE ADC_CFGR_INJECT_CONTEXT_QUEUE +#define __HAL_ADC_CFGR_INJECT_DISCCONTINUOUS ADC_CFGR_INJECT_DISCCONTINUOUS +#define __HAL_ADC_CFGR_REG_DISCCONTINUOUS ADC_CFGR_REG_DISCCONTINUOUS +#define __HAL_ADC_CFGR_DISCONTINUOUS_NUM ADC_CFGR_DISCONTINUOUS_NUM +#define __HAL_ADC_CFGR_AUTOWAIT ADC_CFGR_AUTOWAIT +#define __HAL_ADC_CFGR_CONTINUOUS ADC_CFGR_CONTINUOUS +#define __HAL_ADC_CFGR_OVERRUN ADC_CFGR_OVERRUN +#define __HAL_ADC_CFGR_DMACONTREQ ADC_CFGR_DMACONTREQ +#define __HAL_ADC_CFGR_EXTSEL ADC_CFGR_EXTSEL_SET +#define __HAL_ADC_JSQR_JEXTSEL ADC_JSQR_JEXTSEL_SET +#define __HAL_ADC_OFR_CHANNEL ADC_OFR_CHANNEL +#define __HAL_ADC_DIFSEL_CHANNEL ADC_DIFSEL_CHANNEL +#define __HAL_ADC_CALFACT_DIFF_SET ADC_CALFACT_DIFF_SET +#define __HAL_ADC_CALFACT_DIFF_GET ADC_CALFACT_DIFF_GET +#define __HAL_ADC_TRX_HIGHTHRESHOLD ADC_TRX_HIGHTHRESHOLD + +#define __HAL_ADC_OFFSET_SHIFT_RESOLUTION ADC_OFFSET_SHIFT_RESOLUTION +#define __HAL_ADC_AWD1THRESHOLD_SHIFT_RESOLUTION ADC_AWD1THRESHOLD_SHIFT_RESOLUTION +#define __HAL_ADC_AWD23THRESHOLD_SHIFT_RESOLUTION ADC_AWD23THRESHOLD_SHIFT_RESOLUTION +#define __HAL_ADC_COMMON_REGISTER ADC_COMMON_REGISTER +#define __HAL_ADC_COMMON_CCR_MULTI ADC_COMMON_CCR_MULTI +#define __HAL_ADC_MULTIMODE_IS_ENABLED ADC_MULTIMODE_IS_ENABLE +#define __ADC_MULTIMODE_IS_ENABLED ADC_MULTIMODE_IS_ENABLE +#define __HAL_ADC_NONMULTIMODE_OR_MULTIMODEMASTER ADC_NONMULTIMODE_OR_MULTIMODEMASTER +#define __HAL_ADC_COMMON_ADC_OTHER ADC_COMMON_ADC_OTHER +#define __HAL_ADC_MULTI_SLAVE ADC_MULTI_SLAVE + +#define __HAL_ADC_SQR1_L ADC_SQR1_L_SHIFT +#define __HAL_ADC_JSQR_JL ADC_JSQR_JL_SHIFT +#define __HAL_ADC_JSQR_RK_JL ADC_JSQR_RK_JL +#define __HAL_ADC_CR1_DISCONTINUOUS_NUM ADC_CR1_DISCONTINUOUS_NUM +#define __HAL_ADC_CR1_SCAN ADC_CR1_SCAN_SET +#define __HAL_ADC_CONVCYCLES_MAX_RANGE ADC_CONVCYCLES_MAX_RANGE +#define __HAL_ADC_CLOCK_PRESCALER_RANGE ADC_CLOCK_PRESCALER_RANGE +#define __HAL_ADC_GET_CLOCK_PRESCALER ADC_GET_CLOCK_PRESCALER + +#define __HAL_ADC_SQR1 ADC_SQR1 +#define __HAL_ADC_SMPR1 ADC_SMPR1 +#define __HAL_ADC_SMPR2 ADC_SMPR2 +#define __HAL_ADC_SQR3_RK ADC_SQR3_RK +#define __HAL_ADC_SQR2_RK ADC_SQR2_RK +#define __HAL_ADC_SQR1_RK ADC_SQR1_RK +#define __HAL_ADC_CR2_CONTINUOUS ADC_CR2_CONTINUOUS +#define __HAL_ADC_CR1_DISCONTINUOUS ADC_CR1_DISCONTINUOUS +#define __HAL_ADC_CR1_SCANCONV ADC_CR1_SCANCONV +#define __HAL_ADC_CR2_EOCSelection ADC_CR2_EOCSelection +#define __HAL_ADC_CR2_DMAContReq ADC_CR2_DMAContReq +#define __HAL_ADC_JSQR ADC_JSQR + +#define __HAL_ADC_CHSELR_CHANNEL ADC_CHSELR_CHANNEL +#define __HAL_ADC_CFGR1_REG_DISCCONTINUOUS ADC_CFGR1_REG_DISCCONTINUOUS +#define __HAL_ADC_CFGR1_AUTOOFF ADC_CFGR1_AUTOOFF +#define __HAL_ADC_CFGR1_AUTOWAIT ADC_CFGR1_AUTOWAIT +#define __HAL_ADC_CFGR1_CONTINUOUS ADC_CFGR1_CONTINUOUS +#define __HAL_ADC_CFGR1_OVERRUN ADC_CFGR1_OVERRUN +#define __HAL_ADC_CFGR1_SCANDIR ADC_CFGR1_SCANDIR +#define __HAL_ADC_CFGR1_DMACONTREQ ADC_CFGR1_DMACONTREQ + +/** + * @} + */ + +/** @defgroup HAL_DAC_Aliased_Macros HAL DAC Aliased Macros maintained for legacy purpose + * @{ + */ +#define __HAL_DHR12R1_ALIGNEMENT DAC_DHR12R1_ALIGNMENT +#define __HAL_DHR12R2_ALIGNEMENT DAC_DHR12R2_ALIGNMENT +#define __HAL_DHR12RD_ALIGNEMENT DAC_DHR12RD_ALIGNMENT +#define IS_DAC_GENERATE_WAVE IS_DAC_WAVE + +/** + * @} + */ + +/** @defgroup HAL_DBGMCU_Aliased_Macros HAL DBGMCU Aliased Macros maintained for legacy purpose + * @{ + */ +#define __HAL_FREEZE_TIM1_DBGMCU __HAL_DBGMCU_FREEZE_TIM1 +#define __HAL_UNFREEZE_TIM1_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM1 +#define __HAL_FREEZE_TIM2_DBGMCU __HAL_DBGMCU_FREEZE_TIM2 +#define __HAL_UNFREEZE_TIM2_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM2 +#define __HAL_FREEZE_TIM3_DBGMCU __HAL_DBGMCU_FREEZE_TIM3 +#define __HAL_UNFREEZE_TIM3_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM3 +#define __HAL_FREEZE_TIM4_DBGMCU __HAL_DBGMCU_FREEZE_TIM4 +#define __HAL_UNFREEZE_TIM4_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM4 +#define __HAL_FREEZE_TIM5_DBGMCU __HAL_DBGMCU_FREEZE_TIM5 +#define __HAL_UNFREEZE_TIM5_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM5 +#define __HAL_FREEZE_TIM6_DBGMCU __HAL_DBGMCU_FREEZE_TIM6 +#define __HAL_UNFREEZE_TIM6_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM6 +#define __HAL_FREEZE_TIM7_DBGMCU __HAL_DBGMCU_FREEZE_TIM7 +#define __HAL_UNFREEZE_TIM7_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM7 +#define __HAL_FREEZE_TIM8_DBGMCU __HAL_DBGMCU_FREEZE_TIM8 +#define __HAL_UNFREEZE_TIM8_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM8 + +#define __HAL_FREEZE_TIM9_DBGMCU __HAL_DBGMCU_FREEZE_TIM9 +#define __HAL_UNFREEZE_TIM9_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM9 +#define __HAL_FREEZE_TIM10_DBGMCU __HAL_DBGMCU_FREEZE_TIM10 +#define __HAL_UNFREEZE_TIM10_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM10 +#define __HAL_FREEZE_TIM11_DBGMCU __HAL_DBGMCU_FREEZE_TIM11 +#define __HAL_UNFREEZE_TIM11_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM11 +#define __HAL_FREEZE_TIM12_DBGMCU __HAL_DBGMCU_FREEZE_TIM12 +#define __HAL_UNFREEZE_TIM12_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM12 +#define __HAL_FREEZE_TIM13_DBGMCU __HAL_DBGMCU_FREEZE_TIM13 +#define __HAL_UNFREEZE_TIM13_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM13 +#define __HAL_FREEZE_TIM14_DBGMCU __HAL_DBGMCU_FREEZE_TIM14 +#define __HAL_UNFREEZE_TIM14_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM14 +#define __HAL_FREEZE_CAN2_DBGMCU __HAL_DBGMCU_FREEZE_CAN2 +#define __HAL_UNFREEZE_CAN2_DBGMCU __HAL_DBGMCU_UNFREEZE_CAN2 + + +#define __HAL_FREEZE_TIM15_DBGMCU __HAL_DBGMCU_FREEZE_TIM15 +#define __HAL_UNFREEZE_TIM15_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM15 +#define __HAL_FREEZE_TIM16_DBGMCU __HAL_DBGMCU_FREEZE_TIM16 +#define __HAL_UNFREEZE_TIM16_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM16 +#define __HAL_FREEZE_TIM17_DBGMCU __HAL_DBGMCU_FREEZE_TIM17 +#define __HAL_UNFREEZE_TIM17_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM17 +#define __HAL_FREEZE_RTC_DBGMCU __HAL_DBGMCU_FREEZE_RTC +#define __HAL_UNFREEZE_RTC_DBGMCU __HAL_DBGMCU_UNFREEZE_RTC +#if defined(STM32H7) +#define __HAL_FREEZE_WWDG_DBGMCU __HAL_DBGMCU_FREEZE_WWDG1 +#define __HAL_UNFREEZE_WWDG_DBGMCU __HAL_DBGMCU_UnFreeze_WWDG1 +#define __HAL_FREEZE_IWDG_DBGMCU __HAL_DBGMCU_FREEZE_IWDG1 +#define __HAL_UNFREEZE_IWDG_DBGMCU __HAL_DBGMCU_UnFreeze_IWDG1 +#else +#define __HAL_FREEZE_WWDG_DBGMCU __HAL_DBGMCU_FREEZE_WWDG +#define __HAL_UNFREEZE_WWDG_DBGMCU __HAL_DBGMCU_UNFREEZE_WWDG +#define __HAL_FREEZE_IWDG_DBGMCU __HAL_DBGMCU_FREEZE_IWDG +#define __HAL_UNFREEZE_IWDG_DBGMCU __HAL_DBGMCU_UNFREEZE_IWDG +#endif /* STM32H7 */ +#define __HAL_FREEZE_I2C1_TIMEOUT_DBGMCU __HAL_DBGMCU_FREEZE_I2C1_TIMEOUT +#define __HAL_UNFREEZE_I2C1_TIMEOUT_DBGMCU __HAL_DBGMCU_UNFREEZE_I2C1_TIMEOUT +#define __HAL_FREEZE_I2C2_TIMEOUT_DBGMCU __HAL_DBGMCU_FREEZE_I2C2_TIMEOUT +#define __HAL_UNFREEZE_I2C2_TIMEOUT_DBGMCU __HAL_DBGMCU_UNFREEZE_I2C2_TIMEOUT +#define __HAL_FREEZE_I2C3_TIMEOUT_DBGMCU __HAL_DBGMCU_FREEZE_I2C3_TIMEOUT +#define __HAL_UNFREEZE_I2C3_TIMEOUT_DBGMCU __HAL_DBGMCU_UNFREEZE_I2C3_TIMEOUT +#define __HAL_FREEZE_CAN1_DBGMCU __HAL_DBGMCU_FREEZE_CAN1 +#define __HAL_UNFREEZE_CAN1_DBGMCU __HAL_DBGMCU_UNFREEZE_CAN1 +#define __HAL_FREEZE_LPTIM1_DBGMCU __HAL_DBGMCU_FREEZE_LPTIM1 +#define __HAL_UNFREEZE_LPTIM1_DBGMCU __HAL_DBGMCU_UNFREEZE_LPTIM1 +#define __HAL_FREEZE_LPTIM2_DBGMCU __HAL_DBGMCU_FREEZE_LPTIM2 +#define __HAL_UNFREEZE_LPTIM2_DBGMCU __HAL_DBGMCU_UNFREEZE_LPTIM2 + +/** + * @} + */ + +/** @defgroup HAL_COMP_Aliased_Macros HAL COMP Aliased Macros maintained for legacy purpose + * @{ + */ +#if defined(STM32F3) +#define COMP_START __HAL_COMP_ENABLE +#define COMP_STOP __HAL_COMP_DISABLE +#define COMP_LOCK __HAL_COMP_LOCK + +#if defined(STM32F301x8) || defined(STM32F302x8) || defined(STM32F318xx) || defined(STM32F303x8) || \ + defined(STM32F334x8) || defined(STM32F328xx) +#define __HAL_COMP_EXTI_RISING_IT_ENABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_ENABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_ENABLE_RISING_EDGE() : \ + __HAL_COMP_COMP6_EXTI_ENABLE_RISING_EDGE()) +#define __HAL_COMP_EXTI_RISING_IT_DISABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_DISABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_DISABLE_RISING_EDGE() : \ + __HAL_COMP_COMP6_EXTI_DISABLE_RISING_EDGE()) +#define __HAL_COMP_EXTI_FALLING_IT_ENABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_ENABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_ENABLE_FALLING_EDGE() : \ + __HAL_COMP_COMP6_EXTI_ENABLE_FALLING_EDGE()) +#define __HAL_COMP_EXTI_FALLING_IT_DISABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_DISABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_DISABLE_FALLING_EDGE() : \ + __HAL_COMP_COMP6_EXTI_DISABLE_FALLING_EDGE()) +#define __HAL_COMP_EXTI_ENABLE_IT(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_ENABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_ENABLE_IT() : \ + __HAL_COMP_COMP6_EXTI_ENABLE_IT()) +#define __HAL_COMP_EXTI_DISABLE_IT(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_DISABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_DISABLE_IT() : \ + __HAL_COMP_COMP6_EXTI_DISABLE_IT()) +#define __HAL_COMP_EXTI_GET_FLAG(__FLAG__) (((__FLAG__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_GET_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_GET_FLAG() : \ + __HAL_COMP_COMP6_EXTI_GET_FLAG()) +#define __HAL_COMP_EXTI_CLEAR_FLAG(__FLAG__) (((__FLAG__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_CLEAR_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_CLEAR_FLAG() : \ + __HAL_COMP_COMP6_EXTI_CLEAR_FLAG()) +#endif +#if defined(STM32F302xE) || defined(STM32F302xC) +#define __HAL_COMP_EXTI_RISING_IT_ENABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_ENABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_ENABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_ENABLE_RISING_EDGE() : \ + __HAL_COMP_COMP6_EXTI_ENABLE_RISING_EDGE()) +#define __HAL_COMP_EXTI_RISING_IT_DISABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_DISABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_DISABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_DISABLE_RISING_EDGE() : \ + __HAL_COMP_COMP6_EXTI_DISABLE_RISING_EDGE()) +#define __HAL_COMP_EXTI_FALLING_IT_ENABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_ENABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_ENABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_ENABLE_FALLING_EDGE() : \ + __HAL_COMP_COMP6_EXTI_ENABLE_FALLING_EDGE()) +#define __HAL_COMP_EXTI_FALLING_IT_DISABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_DISABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_DISABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_DISABLE_FALLING_EDGE() : \ + __HAL_COMP_COMP6_EXTI_DISABLE_FALLING_EDGE()) +#define __HAL_COMP_EXTI_ENABLE_IT(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_ENABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_ENABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_ENABLE_IT() : \ + __HAL_COMP_COMP6_EXTI_ENABLE_IT()) +#define __HAL_COMP_EXTI_DISABLE_IT(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_DISABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_DISABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_DISABLE_IT() : \ + __HAL_COMP_COMP6_EXTI_DISABLE_IT()) +#define __HAL_COMP_EXTI_GET_FLAG(__FLAG__) (((__FLAG__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_GET_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_GET_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_GET_FLAG() : \ + __HAL_COMP_COMP6_EXTI_GET_FLAG()) +#define __HAL_COMP_EXTI_CLEAR_FLAG(__FLAG__) (((__FLAG__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_CLEAR_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_CLEAR_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_CLEAR_FLAG() : \ + __HAL_COMP_COMP6_EXTI_CLEAR_FLAG()) +#endif +#if defined(STM32F303xE) || defined(STM32F398xx) || defined(STM32F303xC) || defined(STM32F358xx) +#define __HAL_COMP_EXTI_RISING_IT_ENABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_ENABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_ENABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP3) ? __HAL_COMP_COMP3_EXTI_ENABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_ENABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP5) ? __HAL_COMP_COMP5_EXTI_ENABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP6) ? __HAL_COMP_COMP6_EXTI_ENABLE_RISING_EDGE() : \ + __HAL_COMP_COMP7_EXTI_ENABLE_RISING_EDGE()) +#define __HAL_COMP_EXTI_RISING_IT_DISABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_DISABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_DISABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP3) ? __HAL_COMP_COMP3_EXTI_DISABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_DISABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP5) ? __HAL_COMP_COMP5_EXTI_DISABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP6) ? __HAL_COMP_COMP6_EXTI_DISABLE_RISING_EDGE() : \ + __HAL_COMP_COMP7_EXTI_DISABLE_RISING_EDGE()) +#define __HAL_COMP_EXTI_FALLING_IT_ENABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_ENABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_ENABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP3) ? __HAL_COMP_COMP3_EXTI_ENABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_ENABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP5) ? __HAL_COMP_COMP5_EXTI_ENABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP6) ? __HAL_COMP_COMP6_EXTI_ENABLE_FALLING_EDGE() : \ + __HAL_COMP_COMP7_EXTI_ENABLE_FALLING_EDGE()) +#define __HAL_COMP_EXTI_FALLING_IT_DISABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_DISABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_DISABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP3) ? __HAL_COMP_COMP3_EXTI_DISABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_DISABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP5) ? __HAL_COMP_COMP5_EXTI_DISABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP6) ? __HAL_COMP_COMP6_EXTI_DISABLE_FALLING_EDGE() : \ + __HAL_COMP_COMP7_EXTI_DISABLE_FALLING_EDGE()) +#define __HAL_COMP_EXTI_ENABLE_IT(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_ENABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_ENABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP3) ? __HAL_COMP_COMP3_EXTI_ENABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_ENABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP5) ? __HAL_COMP_COMP5_EXTI_ENABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP6) ? __HAL_COMP_COMP6_EXTI_ENABLE_IT() : \ + __HAL_COMP_COMP7_EXTI_ENABLE_IT()) +#define __HAL_COMP_EXTI_DISABLE_IT(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_DISABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_DISABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP3) ? __HAL_COMP_COMP3_EXTI_DISABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_DISABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP5) ? __HAL_COMP_COMP5_EXTI_DISABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP6) ? __HAL_COMP_COMP6_EXTI_DISABLE_IT() : \ + __HAL_COMP_COMP7_EXTI_DISABLE_IT()) +#define __HAL_COMP_EXTI_GET_FLAG(__FLAG__) (((__FLAG__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_GET_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_GET_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP3) ? __HAL_COMP_COMP3_EXTI_GET_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_GET_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP5) ? __HAL_COMP_COMP5_EXTI_GET_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP6) ? __HAL_COMP_COMP6_EXTI_GET_FLAG() : \ + __HAL_COMP_COMP7_EXTI_GET_FLAG()) +#define __HAL_COMP_EXTI_CLEAR_FLAG(__FLAG__) (((__FLAG__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_CLEAR_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_CLEAR_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP3) ? __HAL_COMP_COMP3_EXTI_CLEAR_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_CLEAR_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP5) ? __HAL_COMP_COMP5_EXTI_CLEAR_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP6) ? __HAL_COMP_COMP6_EXTI_CLEAR_FLAG() : \ + __HAL_COMP_COMP7_EXTI_CLEAR_FLAG()) +#endif +#if defined(STM32F373xC) ||defined(STM32F378xx) +#define __HAL_COMP_EXTI_RISING_IT_ENABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_ENABLE_RISING_EDGE() : \ + __HAL_COMP_COMP2_EXTI_ENABLE_RISING_EDGE()) +#define __HAL_COMP_EXTI_RISING_IT_DISABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_DISABLE_RISING_EDGE() : \ + __HAL_COMP_COMP2_EXTI_DISABLE_RISING_EDGE()) +#define __HAL_COMP_EXTI_FALLING_IT_ENABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_ENABLE_FALLING_EDGE() : \ + __HAL_COMP_COMP2_EXTI_ENABLE_FALLING_EDGE()) +#define __HAL_COMP_EXTI_FALLING_IT_DISABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_DISABLE_FALLING_EDGE() : \ + __HAL_COMP_COMP2_EXTI_DISABLE_FALLING_EDGE()) +#define __HAL_COMP_EXTI_ENABLE_IT(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_ENABLE_IT() : \ + __HAL_COMP_COMP2_EXTI_ENABLE_IT()) +#define __HAL_COMP_EXTI_DISABLE_IT(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_DISABLE_IT() : \ + __HAL_COMP_COMP2_EXTI_DISABLE_IT()) +#define __HAL_COMP_EXTI_GET_FLAG(__FLAG__) (((__FLAG__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_GET_FLAG() : \ + __HAL_COMP_COMP2_EXTI_GET_FLAG()) +#define __HAL_COMP_EXTI_CLEAR_FLAG(__FLAG__) (((__FLAG__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_CLEAR_FLAG() : \ + __HAL_COMP_COMP2_EXTI_CLEAR_FLAG()) +#endif +#else +#define __HAL_COMP_EXTI_RISING_IT_ENABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_ENABLE_RISING_EDGE() : \ + __HAL_COMP_COMP2_EXTI_ENABLE_RISING_EDGE()) +#define __HAL_COMP_EXTI_RISING_IT_DISABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_DISABLE_RISING_EDGE() : \ + __HAL_COMP_COMP2_EXTI_DISABLE_RISING_EDGE()) +#define __HAL_COMP_EXTI_FALLING_IT_ENABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_ENABLE_FALLING_EDGE() : \ + __HAL_COMP_COMP2_EXTI_ENABLE_FALLING_EDGE()) +#define __HAL_COMP_EXTI_FALLING_IT_DISABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_DISABLE_FALLING_EDGE() : \ + __HAL_COMP_COMP2_EXTI_DISABLE_FALLING_EDGE()) +#define __HAL_COMP_EXTI_ENABLE_IT(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_ENABLE_IT() : \ + __HAL_COMP_COMP2_EXTI_ENABLE_IT()) +#define __HAL_COMP_EXTI_DISABLE_IT(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_DISABLE_IT() : \ + __HAL_COMP_COMP2_EXTI_DISABLE_IT()) +#define __HAL_COMP_EXTI_GET_FLAG(__FLAG__) (((__FLAG__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_GET_FLAG() : \ + __HAL_COMP_COMP2_EXTI_GET_FLAG()) +#define __HAL_COMP_EXTI_CLEAR_FLAG(__FLAG__) (((__FLAG__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_CLEAR_FLAG() : \ + __HAL_COMP_COMP2_EXTI_CLEAR_FLAG()) +#endif + +#define __HAL_COMP_GET_EXTI_LINE COMP_GET_EXTI_LINE + +#if defined(STM32L0) || defined(STM32L4) +/* Note: On these STM32 families, the only argument of this macro */ +/* is COMP_FLAG_LOCK. */ +/* This macro is replaced by __HAL_COMP_IS_LOCKED with only HAL handle */ +/* argument. */ +#define __HAL_COMP_GET_FLAG(__HANDLE__, __FLAG__) (__HAL_COMP_IS_LOCKED(__HANDLE__)) +#endif +/** + * @} + */ + +#if defined(STM32L0) || defined(STM32L4) +/** @defgroup HAL_COMP_Aliased_Functions HAL COMP Aliased Functions maintained for legacy purpose + * @{ + */ +#define HAL_COMP_Start_IT HAL_COMP_Start /* Function considered as legacy as EXTI event or IT configuration is + done into HAL_COMP_Init() */ +#define HAL_COMP_Stop_IT HAL_COMP_Stop /* Function considered as legacy as EXTI event or IT configuration is + done into HAL_COMP_Init() */ +/** + * @} + */ +#endif + +/** @defgroup HAL_DAC_Aliased_Macros HAL DAC Aliased Macros maintained for legacy purpose + * @{ + */ + +#define IS_DAC_WAVE(WAVE) (((WAVE) == DAC_WAVE_NONE) || \ + ((WAVE) == DAC_WAVE_NOISE)|| \ + ((WAVE) == DAC_WAVE_TRIANGLE)) + +/** + * @} + */ + +/** @defgroup HAL_FLASH_Aliased_Macros HAL FLASH Aliased Macros maintained for legacy purpose + * @{ + */ + +#define IS_WRPAREA IS_OB_WRPAREA +#define IS_TYPEPROGRAM IS_FLASH_TYPEPROGRAM +#define IS_TYPEPROGRAMFLASH IS_FLASH_TYPEPROGRAM +#define IS_TYPEERASE IS_FLASH_TYPEERASE +#define IS_NBSECTORS IS_FLASH_NBSECTORS +#define IS_OB_WDG_SOURCE IS_OB_IWDG_SOURCE + +/** + * @} + */ + +/** @defgroup HAL_I2C_Aliased_Macros HAL I2C Aliased Macros maintained for legacy purpose + * @{ + */ + +#define __HAL_I2C_RESET_CR2 I2C_RESET_CR2 +#define __HAL_I2C_GENERATE_START I2C_GENERATE_START +#if defined(STM32F1) +#define __HAL_I2C_FREQ_RANGE I2C_FREQRANGE +#else +#define __HAL_I2C_FREQ_RANGE I2C_FREQ_RANGE +#endif /* STM32F1 */ +#define __HAL_I2C_RISE_TIME I2C_RISE_TIME +#define __HAL_I2C_SPEED_STANDARD I2C_SPEED_STANDARD +#define __HAL_I2C_SPEED_FAST I2C_SPEED_FAST +#define __HAL_I2C_SPEED I2C_SPEED +#define __HAL_I2C_7BIT_ADD_WRITE I2C_7BIT_ADD_WRITE +#define __HAL_I2C_7BIT_ADD_READ I2C_7BIT_ADD_READ +#define __HAL_I2C_10BIT_ADDRESS I2C_10BIT_ADDRESS +#define __HAL_I2C_10BIT_HEADER_WRITE I2C_10BIT_HEADER_WRITE +#define __HAL_I2C_10BIT_HEADER_READ I2C_10BIT_HEADER_READ +#define __HAL_I2C_MEM_ADD_MSB I2C_MEM_ADD_MSB +#define __HAL_I2C_MEM_ADD_LSB I2C_MEM_ADD_LSB +#define __HAL_I2C_FREQRANGE I2C_FREQRANGE +/** + * @} + */ + +/** @defgroup HAL_I2S_Aliased_Macros HAL I2S Aliased Macros maintained for legacy purpose + * @{ + */ + +#define IS_I2S_INSTANCE IS_I2S_ALL_INSTANCE +#define IS_I2S_INSTANCE_EXT IS_I2S_ALL_INSTANCE_EXT + +#if defined(STM32H7) +#define __HAL_I2S_CLEAR_FREFLAG __HAL_I2S_CLEAR_TIFREFLAG +#endif + +/** + * @} + */ + +/** @defgroup HAL_IRDA_Aliased_Macros HAL IRDA Aliased Macros maintained for legacy purpose + * @{ + */ + +#define __IRDA_DISABLE __HAL_IRDA_DISABLE +#define __IRDA_ENABLE __HAL_IRDA_ENABLE + +#define __HAL_IRDA_GETCLOCKSOURCE IRDA_GETCLOCKSOURCE +#define __HAL_IRDA_MASK_COMPUTATION IRDA_MASK_COMPUTATION +#define __IRDA_GETCLOCKSOURCE IRDA_GETCLOCKSOURCE +#define __IRDA_MASK_COMPUTATION IRDA_MASK_COMPUTATION + +#define IS_IRDA_ONEBIT_SAMPLE IS_IRDA_ONE_BIT_SAMPLE + + +/** + * @} + */ + + +/** @defgroup HAL_IWDG_Aliased_Macros HAL IWDG Aliased Macros maintained for legacy purpose + * @{ + */ +#define __HAL_IWDG_ENABLE_WRITE_ACCESS IWDG_ENABLE_WRITE_ACCESS +#define __HAL_IWDG_DISABLE_WRITE_ACCESS IWDG_DISABLE_WRITE_ACCESS +/** + * @} + */ + + +/** @defgroup HAL_LPTIM_Aliased_Macros HAL LPTIM Aliased Macros maintained for legacy purpose + * @{ + */ + +#define __HAL_LPTIM_ENABLE_INTERRUPT __HAL_LPTIM_ENABLE_IT +#define __HAL_LPTIM_DISABLE_INTERRUPT __HAL_LPTIM_DISABLE_IT +#define __HAL_LPTIM_GET_ITSTATUS __HAL_LPTIM_GET_IT_SOURCE + +/** + * @} + */ + + +/** @defgroup HAL_OPAMP_Aliased_Macros HAL OPAMP Aliased Macros maintained for legacy purpose + * @{ + */ +#define __OPAMP_CSR_OPAXPD OPAMP_CSR_OPAXPD +#define __OPAMP_CSR_S3SELX OPAMP_CSR_S3SELX +#define __OPAMP_CSR_S4SELX OPAMP_CSR_S4SELX +#define __OPAMP_CSR_S5SELX OPAMP_CSR_S5SELX +#define __OPAMP_CSR_S6SELX OPAMP_CSR_S6SELX +#define __OPAMP_CSR_OPAXCAL_L OPAMP_CSR_OPAXCAL_L +#define __OPAMP_CSR_OPAXCAL_H OPAMP_CSR_OPAXCAL_H +#define __OPAMP_CSR_OPAXLPM OPAMP_CSR_OPAXLPM +#define __OPAMP_CSR_ALL_SWITCHES OPAMP_CSR_ALL_SWITCHES +#define __OPAMP_CSR_ANAWSELX OPAMP_CSR_ANAWSELX +#define __OPAMP_CSR_OPAXCALOUT OPAMP_CSR_OPAXCALOUT +#define __OPAMP_OFFSET_TRIM_BITSPOSITION OPAMP_OFFSET_TRIM_BITSPOSITION +#define __OPAMP_OFFSET_TRIM_SET OPAMP_OFFSET_TRIM_SET + +/** + * @} + */ + + +/** @defgroup HAL_PWR_Aliased_Macros HAL PWR Aliased Macros maintained for legacy purpose + * @{ + */ +#define __HAL_PVD_EVENT_DISABLE __HAL_PWR_PVD_EXTI_DISABLE_EVENT +#define __HAL_PVD_EVENT_ENABLE __HAL_PWR_PVD_EXTI_ENABLE_EVENT +#define __HAL_PVD_EXTI_FALLINGTRIGGER_DISABLE __HAL_PWR_PVD_EXTI_DISABLE_FALLING_EDGE +#define __HAL_PVD_EXTI_FALLINGTRIGGER_ENABLE __HAL_PWR_PVD_EXTI_ENABLE_FALLING_EDGE +#define __HAL_PVD_EXTI_RISINGTRIGGER_DISABLE __HAL_PWR_PVD_EXTI_DISABLE_RISING_EDGE +#define __HAL_PVD_EXTI_RISINGTRIGGER_ENABLE __HAL_PWR_PVD_EXTI_ENABLE_RISING_EDGE +#define __HAL_PVM_EVENT_DISABLE __HAL_PWR_PVM_EVENT_DISABLE +#define __HAL_PVM_EVENT_ENABLE __HAL_PWR_PVM_EVENT_ENABLE +#define __HAL_PVM_EXTI_FALLINGTRIGGER_DISABLE __HAL_PWR_PVM_EXTI_FALLINGTRIGGER_DISABLE +#define __HAL_PVM_EXTI_FALLINGTRIGGER_ENABLE __HAL_PWR_PVM_EXTI_FALLINGTRIGGER_ENABLE +#define __HAL_PVM_EXTI_RISINGTRIGGER_DISABLE __HAL_PWR_PVM_EXTI_RISINGTRIGGER_DISABLE +#define __HAL_PVM_EXTI_RISINGTRIGGER_ENABLE __HAL_PWR_PVM_EXTI_RISINGTRIGGER_ENABLE +#define __HAL_PWR_INTERNALWAKEUP_DISABLE HAL_PWREx_DisableInternalWakeUpLine +#define __HAL_PWR_INTERNALWAKEUP_ENABLE HAL_PWREx_EnableInternalWakeUpLine +#define __HAL_PWR_PULL_UP_DOWN_CONFIG_DISABLE HAL_PWREx_DisablePullUpPullDownConfig +#define __HAL_PWR_PULL_UP_DOWN_CONFIG_ENABLE HAL_PWREx_EnablePullUpPullDownConfig +#define __HAL_PWR_PVD_EXTI_CLEAR_EGDE_TRIGGER() do { __HAL_PWR_PVD_EXTI_DISABLE_RISING_EDGE(); \ + __HAL_PWR_PVD_EXTI_DISABLE_FALLING_EDGE(); \ + } while(0) +#define __HAL_PWR_PVD_EXTI_EVENT_DISABLE __HAL_PWR_PVD_EXTI_DISABLE_EVENT +#define __HAL_PWR_PVD_EXTI_EVENT_ENABLE __HAL_PWR_PVD_EXTI_ENABLE_EVENT +#define __HAL_PWR_PVD_EXTI_FALLINGTRIGGER_DISABLE __HAL_PWR_PVD_EXTI_DISABLE_FALLING_EDGE +#define __HAL_PWR_PVD_EXTI_FALLINGTRIGGER_ENABLE __HAL_PWR_PVD_EXTI_ENABLE_FALLING_EDGE +#define __HAL_PWR_PVD_EXTI_RISINGTRIGGER_DISABLE __HAL_PWR_PVD_EXTI_DISABLE_RISING_EDGE +#define __HAL_PWR_PVD_EXTI_RISINGTRIGGER_ENABLE __HAL_PWR_PVD_EXTI_ENABLE_RISING_EDGE +#define __HAL_PWR_PVD_EXTI_SET_FALLING_EGDE_TRIGGER __HAL_PWR_PVD_EXTI_ENABLE_FALLING_EDGE +#define __HAL_PWR_PVD_EXTI_SET_RISING_EDGE_TRIGGER __HAL_PWR_PVD_EXTI_ENABLE_RISING_EDGE +#define __HAL_PWR_PVM_DISABLE() do { HAL_PWREx_DisablePVM1();HAL_PWREx_DisablePVM2(); \ + HAL_PWREx_DisablePVM3();HAL_PWREx_DisablePVM4(); \ + } while(0) +#define __HAL_PWR_PVM_ENABLE() do { HAL_PWREx_EnablePVM1();HAL_PWREx_EnablePVM2(); \ + HAL_PWREx_EnablePVM3();HAL_PWREx_EnablePVM4(); \ + } while(0) +#define __HAL_PWR_SRAM2CONTENT_PRESERVE_DISABLE HAL_PWREx_DisableSRAM2ContentRetention +#define __HAL_PWR_SRAM2CONTENT_PRESERVE_ENABLE HAL_PWREx_EnableSRAM2ContentRetention +#define __HAL_PWR_VDDIO2_DISABLE HAL_PWREx_DisableVddIO2 +#define __HAL_PWR_VDDIO2_ENABLE HAL_PWREx_EnableVddIO2 +#define __HAL_PWR_VDDIO2_EXTI_CLEAR_EGDE_TRIGGER __HAL_PWR_VDDIO2_EXTI_DISABLE_FALLING_EDGE +#define __HAL_PWR_VDDIO2_EXTI_SET_FALLING_EGDE_TRIGGER __HAL_PWR_VDDIO2_EXTI_ENABLE_FALLING_EDGE +#define __HAL_PWR_VDDUSB_DISABLE HAL_PWREx_DisableVddUSB +#define __HAL_PWR_VDDUSB_ENABLE HAL_PWREx_EnableVddUSB + +#if defined (STM32F4) +#define __HAL_PVD_EXTI_ENABLE_IT(PWR_EXTI_LINE_PVD) __HAL_PWR_PVD_EXTI_ENABLE_IT() +#define __HAL_PVD_EXTI_DISABLE_IT(PWR_EXTI_LINE_PVD) __HAL_PWR_PVD_EXTI_DISABLE_IT() +#define __HAL_PVD_EXTI_GET_FLAG(PWR_EXTI_LINE_PVD) __HAL_PWR_PVD_EXTI_GET_FLAG() +#define __HAL_PVD_EXTI_CLEAR_FLAG(PWR_EXTI_LINE_PVD) __HAL_PWR_PVD_EXTI_CLEAR_FLAG() +#define __HAL_PVD_EXTI_GENERATE_SWIT(PWR_EXTI_LINE_PVD) __HAL_PWR_PVD_EXTI_GENERATE_SWIT() +#else +#define __HAL_PVD_EXTI_CLEAR_FLAG __HAL_PWR_PVD_EXTI_CLEAR_FLAG +#define __HAL_PVD_EXTI_DISABLE_IT __HAL_PWR_PVD_EXTI_DISABLE_IT +#define __HAL_PVD_EXTI_ENABLE_IT __HAL_PWR_PVD_EXTI_ENABLE_IT +#define __HAL_PVD_EXTI_GENERATE_SWIT __HAL_PWR_PVD_EXTI_GENERATE_SWIT +#define __HAL_PVD_EXTI_GET_FLAG __HAL_PWR_PVD_EXTI_GET_FLAG +#endif /* STM32F4 */ +/** + * @} + */ + + +/** @defgroup HAL_RCC_Aliased HAL RCC Aliased maintained for legacy purpose + * @{ + */ + +#define RCC_StopWakeUpClock_MSI RCC_STOP_WAKEUPCLOCK_MSI +#define RCC_StopWakeUpClock_HSI RCC_STOP_WAKEUPCLOCK_HSI + +#define HAL_RCC_CCSCallback HAL_RCC_CSSCallback +#define HAL_RC48_EnableBuffer_Cmd(cmd) (((cmd)==ENABLE) ? \ + HAL_RCCEx_EnableHSI48_VREFINT() : HAL_RCCEx_DisableHSI48_VREFINT()) + +#define __ADC_CLK_DISABLE __HAL_RCC_ADC_CLK_DISABLE +#define __ADC_CLK_ENABLE __HAL_RCC_ADC_CLK_ENABLE +#define __ADC_CLK_SLEEP_DISABLE __HAL_RCC_ADC_CLK_SLEEP_DISABLE +#define __ADC_CLK_SLEEP_ENABLE __HAL_RCC_ADC_CLK_SLEEP_ENABLE +#define __ADC_FORCE_RESET __HAL_RCC_ADC_FORCE_RESET +#define __ADC_RELEASE_RESET __HAL_RCC_ADC_RELEASE_RESET +#define __ADC1_CLK_DISABLE __HAL_RCC_ADC1_CLK_DISABLE +#define __ADC1_CLK_ENABLE __HAL_RCC_ADC1_CLK_ENABLE +#define __ADC1_FORCE_RESET __HAL_RCC_ADC1_FORCE_RESET +#define __ADC1_RELEASE_RESET __HAL_RCC_ADC1_RELEASE_RESET +#define __ADC1_CLK_SLEEP_ENABLE __HAL_RCC_ADC1_CLK_SLEEP_ENABLE +#define __ADC1_CLK_SLEEP_DISABLE __HAL_RCC_ADC1_CLK_SLEEP_DISABLE +#define __ADC2_CLK_DISABLE __HAL_RCC_ADC2_CLK_DISABLE +#define __ADC2_CLK_ENABLE __HAL_RCC_ADC2_CLK_ENABLE +#define __ADC2_FORCE_RESET __HAL_RCC_ADC2_FORCE_RESET +#define __ADC2_RELEASE_RESET __HAL_RCC_ADC2_RELEASE_RESET +#define __ADC3_CLK_DISABLE __HAL_RCC_ADC3_CLK_DISABLE +#define __ADC3_CLK_ENABLE __HAL_RCC_ADC3_CLK_ENABLE +#define __ADC3_FORCE_RESET __HAL_RCC_ADC3_FORCE_RESET +#define __ADC3_RELEASE_RESET __HAL_RCC_ADC3_RELEASE_RESET +#define __AES_CLK_DISABLE __HAL_RCC_AES_CLK_DISABLE +#define __AES_CLK_ENABLE __HAL_RCC_AES_CLK_ENABLE +#define __AES_CLK_SLEEP_DISABLE __HAL_RCC_AES_CLK_SLEEP_DISABLE +#define __AES_CLK_SLEEP_ENABLE __HAL_RCC_AES_CLK_SLEEP_ENABLE +#define __AES_FORCE_RESET __HAL_RCC_AES_FORCE_RESET +#define __AES_RELEASE_RESET __HAL_RCC_AES_RELEASE_RESET +#define __CRYP_CLK_SLEEP_ENABLE __HAL_RCC_CRYP_CLK_SLEEP_ENABLE +#define __CRYP_CLK_SLEEP_DISABLE __HAL_RCC_CRYP_CLK_SLEEP_DISABLE +#define __CRYP_CLK_ENABLE __HAL_RCC_CRYP_CLK_ENABLE +#define __CRYP_CLK_DISABLE __HAL_RCC_CRYP_CLK_DISABLE +#define __CRYP_FORCE_RESET __HAL_RCC_CRYP_FORCE_RESET +#define __CRYP_RELEASE_RESET __HAL_RCC_CRYP_RELEASE_RESET +#define __AFIO_CLK_DISABLE __HAL_RCC_AFIO_CLK_DISABLE +#define __AFIO_CLK_ENABLE __HAL_RCC_AFIO_CLK_ENABLE +#define __AFIO_FORCE_RESET __HAL_RCC_AFIO_FORCE_RESET +#define __AFIO_RELEASE_RESET __HAL_RCC_AFIO_RELEASE_RESET +#define __AHB_FORCE_RESET __HAL_RCC_AHB_FORCE_RESET +#define __AHB_RELEASE_RESET __HAL_RCC_AHB_RELEASE_RESET +#define __AHB1_FORCE_RESET __HAL_RCC_AHB1_FORCE_RESET +#define __AHB1_RELEASE_RESET __HAL_RCC_AHB1_RELEASE_RESET +#define __AHB2_FORCE_RESET __HAL_RCC_AHB2_FORCE_RESET +#define __AHB2_RELEASE_RESET __HAL_RCC_AHB2_RELEASE_RESET +#define __AHB3_FORCE_RESET __HAL_RCC_AHB3_FORCE_RESET +#define __AHB3_RELEASE_RESET __HAL_RCC_AHB3_RELEASE_RESET +#define __APB1_FORCE_RESET __HAL_RCC_APB1_FORCE_RESET +#define __APB1_RELEASE_RESET __HAL_RCC_APB1_RELEASE_RESET +#define __APB2_FORCE_RESET __HAL_RCC_APB2_FORCE_RESET +#define __APB2_RELEASE_RESET __HAL_RCC_APB2_RELEASE_RESET +#if defined(STM32C0) +#define __HAL_RCC_APB1_FORCE_RESET __HAL_RCC_APB1_GRP1_FORCE_RESET +#define __HAL_RCC_APB1_RELEASE_RESET __HAL_RCC_APB1_GRP1_RELEASE_RESET +#define __HAL_RCC_APB2_FORCE_RESET __HAL_RCC_APB1_GRP2_FORCE_RESET +#define __HAL_RCC_APB2_RELEASE_RESET __HAL_RCC_APB1_GRP2_RELEASE_RESET +#endif /* STM32C0 */ +#define __BKP_CLK_DISABLE __HAL_RCC_BKP_CLK_DISABLE +#define __BKP_CLK_ENABLE __HAL_RCC_BKP_CLK_ENABLE +#define __BKP_FORCE_RESET __HAL_RCC_BKP_FORCE_RESET +#define __BKP_RELEASE_RESET __HAL_RCC_BKP_RELEASE_RESET +#define __CAN1_CLK_DISABLE __HAL_RCC_CAN1_CLK_DISABLE +#define __CAN1_CLK_ENABLE __HAL_RCC_CAN1_CLK_ENABLE +#define __CAN1_CLK_SLEEP_DISABLE __HAL_RCC_CAN1_CLK_SLEEP_DISABLE +#define __CAN1_CLK_SLEEP_ENABLE __HAL_RCC_CAN1_CLK_SLEEP_ENABLE +#define __CAN1_FORCE_RESET __HAL_RCC_CAN1_FORCE_RESET +#define __CAN1_RELEASE_RESET __HAL_RCC_CAN1_RELEASE_RESET +#define __CAN_CLK_DISABLE __HAL_RCC_CAN1_CLK_DISABLE +#define __CAN_CLK_ENABLE __HAL_RCC_CAN1_CLK_ENABLE +#define __CAN_FORCE_RESET __HAL_RCC_CAN1_FORCE_RESET +#define __CAN_RELEASE_RESET __HAL_RCC_CAN1_RELEASE_RESET +#define __CAN2_CLK_DISABLE __HAL_RCC_CAN2_CLK_DISABLE +#define __CAN2_CLK_ENABLE __HAL_RCC_CAN2_CLK_ENABLE +#define __CAN2_FORCE_RESET __HAL_RCC_CAN2_FORCE_RESET +#define __CAN2_RELEASE_RESET __HAL_RCC_CAN2_RELEASE_RESET +#define __CEC_CLK_DISABLE __HAL_RCC_CEC_CLK_DISABLE +#define __CEC_CLK_ENABLE __HAL_RCC_CEC_CLK_ENABLE +#define __COMP_CLK_DISABLE __HAL_RCC_COMP_CLK_DISABLE +#define __COMP_CLK_ENABLE __HAL_RCC_COMP_CLK_ENABLE +#define __COMP_FORCE_RESET __HAL_RCC_COMP_FORCE_RESET +#define __COMP_RELEASE_RESET __HAL_RCC_COMP_RELEASE_RESET +#define __COMP_CLK_SLEEP_ENABLE __HAL_RCC_COMP_CLK_SLEEP_ENABLE +#define __COMP_CLK_SLEEP_DISABLE __HAL_RCC_COMP_CLK_SLEEP_DISABLE +#define __CEC_FORCE_RESET __HAL_RCC_CEC_FORCE_RESET +#define __CEC_RELEASE_RESET __HAL_RCC_CEC_RELEASE_RESET +#define __CRC_CLK_DISABLE __HAL_RCC_CRC_CLK_DISABLE +#define __CRC_CLK_ENABLE __HAL_RCC_CRC_CLK_ENABLE +#define __CRC_CLK_SLEEP_DISABLE __HAL_RCC_CRC_CLK_SLEEP_DISABLE +#define __CRC_CLK_SLEEP_ENABLE __HAL_RCC_CRC_CLK_SLEEP_ENABLE +#define __CRC_FORCE_RESET __HAL_RCC_CRC_FORCE_RESET +#define __CRC_RELEASE_RESET __HAL_RCC_CRC_RELEASE_RESET +#define __DAC_CLK_DISABLE __HAL_RCC_DAC_CLK_DISABLE +#define __DAC_CLK_ENABLE __HAL_RCC_DAC_CLK_ENABLE +#define __DAC_FORCE_RESET __HAL_RCC_DAC_FORCE_RESET +#define __DAC_RELEASE_RESET __HAL_RCC_DAC_RELEASE_RESET +#define __DAC1_CLK_DISABLE __HAL_RCC_DAC1_CLK_DISABLE +#define __DAC1_CLK_ENABLE __HAL_RCC_DAC1_CLK_ENABLE +#define __DAC1_CLK_SLEEP_DISABLE __HAL_RCC_DAC1_CLK_SLEEP_DISABLE +#define __DAC1_CLK_SLEEP_ENABLE __HAL_RCC_DAC1_CLK_SLEEP_ENABLE +#define __DAC1_FORCE_RESET __HAL_RCC_DAC1_FORCE_RESET +#define __DAC1_RELEASE_RESET __HAL_RCC_DAC1_RELEASE_RESET +#define __DBGMCU_CLK_ENABLE __HAL_RCC_DBGMCU_CLK_ENABLE +#define __DBGMCU_CLK_DISABLE __HAL_RCC_DBGMCU_CLK_DISABLE +#define __DBGMCU_FORCE_RESET __HAL_RCC_DBGMCU_FORCE_RESET +#define __DBGMCU_RELEASE_RESET __HAL_RCC_DBGMCU_RELEASE_RESET +#define __DFSDM_CLK_DISABLE __HAL_RCC_DFSDM_CLK_DISABLE +#define __DFSDM_CLK_ENABLE __HAL_RCC_DFSDM_CLK_ENABLE +#define __DFSDM_CLK_SLEEP_DISABLE __HAL_RCC_DFSDM_CLK_SLEEP_DISABLE +#define __DFSDM_CLK_SLEEP_ENABLE __HAL_RCC_DFSDM_CLK_SLEEP_ENABLE +#define __DFSDM_FORCE_RESET __HAL_RCC_DFSDM_FORCE_RESET +#define __DFSDM_RELEASE_RESET __HAL_RCC_DFSDM_RELEASE_RESET +#define __DMA1_CLK_DISABLE __HAL_RCC_DMA1_CLK_DISABLE +#define __DMA1_CLK_ENABLE __HAL_RCC_DMA1_CLK_ENABLE +#define __DMA1_CLK_SLEEP_DISABLE __HAL_RCC_DMA1_CLK_SLEEP_DISABLE +#define __DMA1_CLK_SLEEP_ENABLE __HAL_RCC_DMA1_CLK_SLEEP_ENABLE +#define __DMA1_FORCE_RESET __HAL_RCC_DMA1_FORCE_RESET +#define __DMA1_RELEASE_RESET __HAL_RCC_DMA1_RELEASE_RESET +#define __DMA2_CLK_DISABLE __HAL_RCC_DMA2_CLK_DISABLE +#define __DMA2_CLK_ENABLE __HAL_RCC_DMA2_CLK_ENABLE +#define __DMA2_CLK_SLEEP_DISABLE __HAL_RCC_DMA2_CLK_SLEEP_DISABLE +#define __DMA2_CLK_SLEEP_ENABLE __HAL_RCC_DMA2_CLK_SLEEP_ENABLE +#define __DMA2_FORCE_RESET __HAL_RCC_DMA2_FORCE_RESET +#define __DMA2_RELEASE_RESET __HAL_RCC_DMA2_RELEASE_RESET +#define __ETHMAC_CLK_DISABLE __HAL_RCC_ETHMAC_CLK_DISABLE +#define __ETHMAC_CLK_ENABLE __HAL_RCC_ETHMAC_CLK_ENABLE +#define __ETHMAC_FORCE_RESET __HAL_RCC_ETHMAC_FORCE_RESET +#define __ETHMAC_RELEASE_RESET __HAL_RCC_ETHMAC_RELEASE_RESET +#define __ETHMACRX_CLK_DISABLE __HAL_RCC_ETHMACRX_CLK_DISABLE +#define __ETHMACRX_CLK_ENABLE __HAL_RCC_ETHMACRX_CLK_ENABLE +#define __ETHMACTX_CLK_DISABLE __HAL_RCC_ETHMACTX_CLK_DISABLE +#define __ETHMACTX_CLK_ENABLE __HAL_RCC_ETHMACTX_CLK_ENABLE +#define __FIREWALL_CLK_DISABLE __HAL_RCC_FIREWALL_CLK_DISABLE +#define __FIREWALL_CLK_ENABLE __HAL_RCC_FIREWALL_CLK_ENABLE +#define __FLASH_CLK_DISABLE __HAL_RCC_FLASH_CLK_DISABLE +#define __FLASH_CLK_ENABLE __HAL_RCC_FLASH_CLK_ENABLE +#define __FLASH_CLK_SLEEP_DISABLE __HAL_RCC_FLASH_CLK_SLEEP_DISABLE +#define __FLASH_CLK_SLEEP_ENABLE __HAL_RCC_FLASH_CLK_SLEEP_ENABLE +#define __FLASH_FORCE_RESET __HAL_RCC_FLASH_FORCE_RESET +#define __FLASH_RELEASE_RESET __HAL_RCC_FLASH_RELEASE_RESET +#define __FLITF_CLK_DISABLE __HAL_RCC_FLITF_CLK_DISABLE +#define __FLITF_CLK_ENABLE __HAL_RCC_FLITF_CLK_ENABLE +#define __FLITF_FORCE_RESET __HAL_RCC_FLITF_FORCE_RESET +#define __FLITF_RELEASE_RESET __HAL_RCC_FLITF_RELEASE_RESET +#define __FLITF_CLK_SLEEP_ENABLE __HAL_RCC_FLITF_CLK_SLEEP_ENABLE +#define __FLITF_CLK_SLEEP_DISABLE __HAL_RCC_FLITF_CLK_SLEEP_DISABLE +#define __FMC_CLK_DISABLE __HAL_RCC_FMC_CLK_DISABLE +#define __FMC_CLK_ENABLE __HAL_RCC_FMC_CLK_ENABLE +#define __FMC_CLK_SLEEP_DISABLE __HAL_RCC_FMC_CLK_SLEEP_DISABLE +#define __FMC_CLK_SLEEP_ENABLE __HAL_RCC_FMC_CLK_SLEEP_ENABLE +#define __FMC_FORCE_RESET __HAL_RCC_FMC_FORCE_RESET +#define __FMC_RELEASE_RESET __HAL_RCC_FMC_RELEASE_RESET +#define __FSMC_CLK_DISABLE __HAL_RCC_FSMC_CLK_DISABLE +#define __FSMC_CLK_ENABLE __HAL_RCC_FSMC_CLK_ENABLE +#define __GPIOA_CLK_DISABLE __HAL_RCC_GPIOA_CLK_DISABLE +#define __GPIOA_CLK_ENABLE __HAL_RCC_GPIOA_CLK_ENABLE +#define __GPIOA_CLK_SLEEP_DISABLE __HAL_RCC_GPIOA_CLK_SLEEP_DISABLE +#define __GPIOA_CLK_SLEEP_ENABLE __HAL_RCC_GPIOA_CLK_SLEEP_ENABLE +#define __GPIOA_FORCE_RESET __HAL_RCC_GPIOA_FORCE_RESET +#define __GPIOA_RELEASE_RESET __HAL_RCC_GPIOA_RELEASE_RESET +#define __GPIOB_CLK_DISABLE __HAL_RCC_GPIOB_CLK_DISABLE +#define __GPIOB_CLK_ENABLE __HAL_RCC_GPIOB_CLK_ENABLE +#define __GPIOB_CLK_SLEEP_DISABLE __HAL_RCC_GPIOB_CLK_SLEEP_DISABLE +#define __GPIOB_CLK_SLEEP_ENABLE __HAL_RCC_GPIOB_CLK_SLEEP_ENABLE +#define __GPIOB_FORCE_RESET __HAL_RCC_GPIOB_FORCE_RESET +#define __GPIOB_RELEASE_RESET __HAL_RCC_GPIOB_RELEASE_RESET +#define __GPIOC_CLK_DISABLE __HAL_RCC_GPIOC_CLK_DISABLE +#define __GPIOC_CLK_ENABLE __HAL_RCC_GPIOC_CLK_ENABLE +#define __GPIOC_CLK_SLEEP_DISABLE __HAL_RCC_GPIOC_CLK_SLEEP_DISABLE +#define __GPIOC_CLK_SLEEP_ENABLE __HAL_RCC_GPIOC_CLK_SLEEP_ENABLE +#define __GPIOC_FORCE_RESET __HAL_RCC_GPIOC_FORCE_RESET +#define __GPIOC_RELEASE_RESET __HAL_RCC_GPIOC_RELEASE_RESET +#define __GPIOD_CLK_DISABLE __HAL_RCC_GPIOD_CLK_DISABLE +#define __GPIOD_CLK_ENABLE __HAL_RCC_GPIOD_CLK_ENABLE +#define __GPIOD_CLK_SLEEP_DISABLE __HAL_RCC_GPIOD_CLK_SLEEP_DISABLE +#define __GPIOD_CLK_SLEEP_ENABLE __HAL_RCC_GPIOD_CLK_SLEEP_ENABLE +#define __GPIOD_FORCE_RESET __HAL_RCC_GPIOD_FORCE_RESET +#define __GPIOD_RELEASE_RESET __HAL_RCC_GPIOD_RELEASE_RESET +#define __GPIOE_CLK_DISABLE __HAL_RCC_GPIOE_CLK_DISABLE +#define __GPIOE_CLK_ENABLE __HAL_RCC_GPIOE_CLK_ENABLE +#define __GPIOE_CLK_SLEEP_DISABLE __HAL_RCC_GPIOE_CLK_SLEEP_DISABLE +#define __GPIOE_CLK_SLEEP_ENABLE __HAL_RCC_GPIOE_CLK_SLEEP_ENABLE +#define __GPIOE_FORCE_RESET __HAL_RCC_GPIOE_FORCE_RESET +#define __GPIOE_RELEASE_RESET __HAL_RCC_GPIOE_RELEASE_RESET +#define __GPIOF_CLK_DISABLE __HAL_RCC_GPIOF_CLK_DISABLE +#define __GPIOF_CLK_ENABLE __HAL_RCC_GPIOF_CLK_ENABLE +#define __GPIOF_CLK_SLEEP_DISABLE __HAL_RCC_GPIOF_CLK_SLEEP_DISABLE +#define __GPIOF_CLK_SLEEP_ENABLE __HAL_RCC_GPIOF_CLK_SLEEP_ENABLE +#define __GPIOF_FORCE_RESET __HAL_RCC_GPIOF_FORCE_RESET +#define __GPIOF_RELEASE_RESET __HAL_RCC_GPIOF_RELEASE_RESET +#define __GPIOG_CLK_DISABLE __HAL_RCC_GPIOG_CLK_DISABLE +#define __GPIOG_CLK_ENABLE __HAL_RCC_GPIOG_CLK_ENABLE +#define __GPIOG_CLK_SLEEP_DISABLE __HAL_RCC_GPIOG_CLK_SLEEP_DISABLE +#define __GPIOG_CLK_SLEEP_ENABLE __HAL_RCC_GPIOG_CLK_SLEEP_ENABLE +#define __GPIOG_FORCE_RESET __HAL_RCC_GPIOG_FORCE_RESET +#define __GPIOG_RELEASE_RESET __HAL_RCC_GPIOG_RELEASE_RESET +#define __GPIOH_CLK_DISABLE __HAL_RCC_GPIOH_CLK_DISABLE +#define __GPIOH_CLK_ENABLE __HAL_RCC_GPIOH_CLK_ENABLE +#define __GPIOH_CLK_SLEEP_DISABLE __HAL_RCC_GPIOH_CLK_SLEEP_DISABLE +#define __GPIOH_CLK_SLEEP_ENABLE __HAL_RCC_GPIOH_CLK_SLEEP_ENABLE +#define __GPIOH_FORCE_RESET __HAL_RCC_GPIOH_FORCE_RESET +#define __GPIOH_RELEASE_RESET __HAL_RCC_GPIOH_RELEASE_RESET +#define __I2C1_CLK_DISABLE __HAL_RCC_I2C1_CLK_DISABLE +#define __I2C1_CLK_ENABLE __HAL_RCC_I2C1_CLK_ENABLE +#define __I2C1_CLK_SLEEP_DISABLE __HAL_RCC_I2C1_CLK_SLEEP_DISABLE +#define __I2C1_CLK_SLEEP_ENABLE __HAL_RCC_I2C1_CLK_SLEEP_ENABLE +#define __I2C1_FORCE_RESET __HAL_RCC_I2C1_FORCE_RESET +#define __I2C1_RELEASE_RESET __HAL_RCC_I2C1_RELEASE_RESET +#define __I2C2_CLK_DISABLE __HAL_RCC_I2C2_CLK_DISABLE +#define __I2C2_CLK_ENABLE __HAL_RCC_I2C2_CLK_ENABLE +#define __I2C2_CLK_SLEEP_DISABLE __HAL_RCC_I2C2_CLK_SLEEP_DISABLE +#define __I2C2_CLK_SLEEP_ENABLE __HAL_RCC_I2C2_CLK_SLEEP_ENABLE +#define __I2C2_FORCE_RESET __HAL_RCC_I2C2_FORCE_RESET +#define __I2C2_RELEASE_RESET __HAL_RCC_I2C2_RELEASE_RESET +#define __I2C3_CLK_DISABLE __HAL_RCC_I2C3_CLK_DISABLE +#define __I2C3_CLK_ENABLE __HAL_RCC_I2C3_CLK_ENABLE +#define __I2C3_CLK_SLEEP_DISABLE __HAL_RCC_I2C3_CLK_SLEEP_DISABLE +#define __I2C3_CLK_SLEEP_ENABLE __HAL_RCC_I2C3_CLK_SLEEP_ENABLE +#define __I2C3_FORCE_RESET __HAL_RCC_I2C3_FORCE_RESET +#define __I2C3_RELEASE_RESET __HAL_RCC_I2C3_RELEASE_RESET +#define __LCD_CLK_DISABLE __HAL_RCC_LCD_CLK_DISABLE +#define __LCD_CLK_ENABLE __HAL_RCC_LCD_CLK_ENABLE +#define __LCD_CLK_SLEEP_DISABLE __HAL_RCC_LCD_CLK_SLEEP_DISABLE +#define __LCD_CLK_SLEEP_ENABLE __HAL_RCC_LCD_CLK_SLEEP_ENABLE +#define __LCD_FORCE_RESET __HAL_RCC_LCD_FORCE_RESET +#define __LCD_RELEASE_RESET __HAL_RCC_LCD_RELEASE_RESET +#define __LPTIM1_CLK_DISABLE __HAL_RCC_LPTIM1_CLK_DISABLE +#define __LPTIM1_CLK_ENABLE __HAL_RCC_LPTIM1_CLK_ENABLE +#define __LPTIM1_CLK_SLEEP_DISABLE __HAL_RCC_LPTIM1_CLK_SLEEP_DISABLE +#define __LPTIM1_CLK_SLEEP_ENABLE __HAL_RCC_LPTIM1_CLK_SLEEP_ENABLE +#define __LPTIM1_FORCE_RESET __HAL_RCC_LPTIM1_FORCE_RESET +#define __LPTIM1_RELEASE_RESET __HAL_RCC_LPTIM1_RELEASE_RESET +#define __LPTIM2_CLK_DISABLE __HAL_RCC_LPTIM2_CLK_DISABLE +#define __LPTIM2_CLK_ENABLE __HAL_RCC_LPTIM2_CLK_ENABLE +#define __LPTIM2_CLK_SLEEP_DISABLE __HAL_RCC_LPTIM2_CLK_SLEEP_DISABLE +#define __LPTIM2_CLK_SLEEP_ENABLE __HAL_RCC_LPTIM2_CLK_SLEEP_ENABLE +#define __LPTIM2_FORCE_RESET __HAL_RCC_LPTIM2_FORCE_RESET +#define __LPTIM2_RELEASE_RESET __HAL_RCC_LPTIM2_RELEASE_RESET +#define __LPUART1_CLK_DISABLE __HAL_RCC_LPUART1_CLK_DISABLE +#define __LPUART1_CLK_ENABLE __HAL_RCC_LPUART1_CLK_ENABLE +#define __LPUART1_CLK_SLEEP_DISABLE __HAL_RCC_LPUART1_CLK_SLEEP_DISABLE +#define __LPUART1_CLK_SLEEP_ENABLE __HAL_RCC_LPUART1_CLK_SLEEP_ENABLE +#define __LPUART1_FORCE_RESET __HAL_RCC_LPUART1_FORCE_RESET +#define __LPUART1_RELEASE_RESET __HAL_RCC_LPUART1_RELEASE_RESET +#define __OPAMP_CLK_DISABLE __HAL_RCC_OPAMP_CLK_DISABLE +#define __OPAMP_CLK_ENABLE __HAL_RCC_OPAMP_CLK_ENABLE +#define __OPAMP_CLK_SLEEP_DISABLE __HAL_RCC_OPAMP_CLK_SLEEP_DISABLE +#define __OPAMP_CLK_SLEEP_ENABLE __HAL_RCC_OPAMP_CLK_SLEEP_ENABLE +#define __OPAMP_FORCE_RESET __HAL_RCC_OPAMP_FORCE_RESET +#define __OPAMP_RELEASE_RESET __HAL_RCC_OPAMP_RELEASE_RESET +#define __OTGFS_CLK_DISABLE __HAL_RCC_OTGFS_CLK_DISABLE +#define __OTGFS_CLK_ENABLE __HAL_RCC_OTGFS_CLK_ENABLE +#define __OTGFS_CLK_SLEEP_DISABLE __HAL_RCC_OTGFS_CLK_SLEEP_DISABLE +#define __OTGFS_CLK_SLEEP_ENABLE __HAL_RCC_OTGFS_CLK_SLEEP_ENABLE +#define __OTGFS_FORCE_RESET __HAL_RCC_OTGFS_FORCE_RESET +#define __OTGFS_RELEASE_RESET __HAL_RCC_OTGFS_RELEASE_RESET +#define __PWR_CLK_DISABLE __HAL_RCC_PWR_CLK_DISABLE +#define __PWR_CLK_ENABLE __HAL_RCC_PWR_CLK_ENABLE +#define __PWR_CLK_SLEEP_DISABLE __HAL_RCC_PWR_CLK_SLEEP_DISABLE +#define __PWR_CLK_SLEEP_ENABLE __HAL_RCC_PWR_CLK_SLEEP_ENABLE +#define __PWR_FORCE_RESET __HAL_RCC_PWR_FORCE_RESET +#define __PWR_RELEASE_RESET __HAL_RCC_PWR_RELEASE_RESET +#define __QSPI_CLK_DISABLE __HAL_RCC_QSPI_CLK_DISABLE +#define __QSPI_CLK_ENABLE __HAL_RCC_QSPI_CLK_ENABLE +#define __QSPI_CLK_SLEEP_DISABLE __HAL_RCC_QSPI_CLK_SLEEP_DISABLE +#define __QSPI_CLK_SLEEP_ENABLE __HAL_RCC_QSPI_CLK_SLEEP_ENABLE +#define __QSPI_FORCE_RESET __HAL_RCC_QSPI_FORCE_RESET +#define __QSPI_RELEASE_RESET __HAL_RCC_QSPI_RELEASE_RESET + +#if defined(STM32WB) +#define __HAL_RCC_QSPI_CLK_DISABLE __HAL_RCC_QUADSPI_CLK_DISABLE +#define __HAL_RCC_QSPI_CLK_ENABLE __HAL_RCC_QUADSPI_CLK_ENABLE +#define __HAL_RCC_QSPI_CLK_SLEEP_DISABLE __HAL_RCC_QUADSPI_CLK_SLEEP_DISABLE +#define __HAL_RCC_QSPI_CLK_SLEEP_ENABLE __HAL_RCC_QUADSPI_CLK_SLEEP_ENABLE +#define __HAL_RCC_QSPI_FORCE_RESET __HAL_RCC_QUADSPI_FORCE_RESET +#define __HAL_RCC_QSPI_RELEASE_RESET __HAL_RCC_QUADSPI_RELEASE_RESET +#define __HAL_RCC_QSPI_IS_CLK_ENABLED __HAL_RCC_QUADSPI_IS_CLK_ENABLED +#define __HAL_RCC_QSPI_IS_CLK_DISABLED __HAL_RCC_QUADSPI_IS_CLK_DISABLED +#define __HAL_RCC_QSPI_IS_CLK_SLEEP_ENABLED __HAL_RCC_QUADSPI_IS_CLK_SLEEP_ENABLED +#define __HAL_RCC_QSPI_IS_CLK_SLEEP_DISABLED __HAL_RCC_QUADSPI_IS_CLK_SLEEP_DISABLED +#define QSPI_IRQHandler QUADSPI_IRQHandler +#endif /* __HAL_RCC_QUADSPI_CLK_ENABLE */ + +#define __RNG_CLK_DISABLE __HAL_RCC_RNG_CLK_DISABLE +#define __RNG_CLK_ENABLE __HAL_RCC_RNG_CLK_ENABLE +#define __RNG_CLK_SLEEP_DISABLE __HAL_RCC_RNG_CLK_SLEEP_DISABLE +#define __RNG_CLK_SLEEP_ENABLE __HAL_RCC_RNG_CLK_SLEEP_ENABLE +#define __RNG_FORCE_RESET __HAL_RCC_RNG_FORCE_RESET +#define __RNG_RELEASE_RESET __HAL_RCC_RNG_RELEASE_RESET +#define __SAI1_CLK_DISABLE __HAL_RCC_SAI1_CLK_DISABLE +#define __SAI1_CLK_ENABLE __HAL_RCC_SAI1_CLK_ENABLE +#define __SAI1_CLK_SLEEP_DISABLE __HAL_RCC_SAI1_CLK_SLEEP_DISABLE +#define __SAI1_CLK_SLEEP_ENABLE __HAL_RCC_SAI1_CLK_SLEEP_ENABLE +#define __SAI1_FORCE_RESET __HAL_RCC_SAI1_FORCE_RESET +#define __SAI1_RELEASE_RESET __HAL_RCC_SAI1_RELEASE_RESET +#define __SAI2_CLK_DISABLE __HAL_RCC_SAI2_CLK_DISABLE +#define __SAI2_CLK_ENABLE __HAL_RCC_SAI2_CLK_ENABLE +#define __SAI2_CLK_SLEEP_DISABLE __HAL_RCC_SAI2_CLK_SLEEP_DISABLE +#define __SAI2_CLK_SLEEP_ENABLE __HAL_RCC_SAI2_CLK_SLEEP_ENABLE +#define __SAI2_FORCE_RESET __HAL_RCC_SAI2_FORCE_RESET +#define __SAI2_RELEASE_RESET __HAL_RCC_SAI2_RELEASE_RESET +#define __SDIO_CLK_DISABLE __HAL_RCC_SDIO_CLK_DISABLE +#define __SDIO_CLK_ENABLE __HAL_RCC_SDIO_CLK_ENABLE +#define __SDMMC_CLK_DISABLE __HAL_RCC_SDMMC_CLK_DISABLE +#define __SDMMC_CLK_ENABLE __HAL_RCC_SDMMC_CLK_ENABLE +#define __SDMMC_CLK_SLEEP_DISABLE __HAL_RCC_SDMMC_CLK_SLEEP_DISABLE +#define __SDMMC_CLK_SLEEP_ENABLE __HAL_RCC_SDMMC_CLK_SLEEP_ENABLE +#define __SDMMC_FORCE_RESET __HAL_RCC_SDMMC_FORCE_RESET +#define __SDMMC_RELEASE_RESET __HAL_RCC_SDMMC_RELEASE_RESET +#define __SPI1_CLK_DISABLE __HAL_RCC_SPI1_CLK_DISABLE +#define __SPI1_CLK_ENABLE __HAL_RCC_SPI1_CLK_ENABLE +#define __SPI1_CLK_SLEEP_DISABLE __HAL_RCC_SPI1_CLK_SLEEP_DISABLE +#define __SPI1_CLK_SLEEP_ENABLE __HAL_RCC_SPI1_CLK_SLEEP_ENABLE +#define __SPI1_FORCE_RESET __HAL_RCC_SPI1_FORCE_RESET +#define __SPI1_RELEASE_RESET __HAL_RCC_SPI1_RELEASE_RESET +#define __SPI2_CLK_DISABLE __HAL_RCC_SPI2_CLK_DISABLE +#define __SPI2_CLK_ENABLE __HAL_RCC_SPI2_CLK_ENABLE +#define __SPI2_CLK_SLEEP_DISABLE __HAL_RCC_SPI2_CLK_SLEEP_DISABLE +#define __SPI2_CLK_SLEEP_ENABLE __HAL_RCC_SPI2_CLK_SLEEP_ENABLE +#define __SPI2_FORCE_RESET __HAL_RCC_SPI2_FORCE_RESET +#define __SPI2_RELEASE_RESET __HAL_RCC_SPI2_RELEASE_RESET +#define __SPI3_CLK_DISABLE __HAL_RCC_SPI3_CLK_DISABLE +#define __SPI3_CLK_ENABLE __HAL_RCC_SPI3_CLK_ENABLE +#define __SPI3_CLK_SLEEP_DISABLE __HAL_RCC_SPI3_CLK_SLEEP_DISABLE +#define __SPI3_CLK_SLEEP_ENABLE __HAL_RCC_SPI3_CLK_SLEEP_ENABLE +#define __SPI3_FORCE_RESET __HAL_RCC_SPI3_FORCE_RESET +#define __SPI3_RELEASE_RESET __HAL_RCC_SPI3_RELEASE_RESET +#define __SRAM_CLK_DISABLE __HAL_RCC_SRAM_CLK_DISABLE +#define __SRAM_CLK_ENABLE __HAL_RCC_SRAM_CLK_ENABLE +#define __SRAM1_CLK_SLEEP_DISABLE __HAL_RCC_SRAM1_CLK_SLEEP_DISABLE +#define __SRAM1_CLK_SLEEP_ENABLE __HAL_RCC_SRAM1_CLK_SLEEP_ENABLE +#define __SRAM2_CLK_SLEEP_DISABLE __HAL_RCC_SRAM2_CLK_SLEEP_DISABLE +#define __SRAM2_CLK_SLEEP_ENABLE __HAL_RCC_SRAM2_CLK_SLEEP_ENABLE +#define __SWPMI1_CLK_DISABLE __HAL_RCC_SWPMI1_CLK_DISABLE +#define __SWPMI1_CLK_ENABLE __HAL_RCC_SWPMI1_CLK_ENABLE +#define __SWPMI1_CLK_SLEEP_DISABLE __HAL_RCC_SWPMI1_CLK_SLEEP_DISABLE +#define __SWPMI1_CLK_SLEEP_ENABLE __HAL_RCC_SWPMI1_CLK_SLEEP_ENABLE +#define __SWPMI1_FORCE_RESET __HAL_RCC_SWPMI1_FORCE_RESET +#define __SWPMI1_RELEASE_RESET __HAL_RCC_SWPMI1_RELEASE_RESET +#define __SYSCFG_CLK_DISABLE __HAL_RCC_SYSCFG_CLK_DISABLE +#define __SYSCFG_CLK_ENABLE __HAL_RCC_SYSCFG_CLK_ENABLE +#define __SYSCFG_CLK_SLEEP_DISABLE __HAL_RCC_SYSCFG_CLK_SLEEP_DISABLE +#define __SYSCFG_CLK_SLEEP_ENABLE __HAL_RCC_SYSCFG_CLK_SLEEP_ENABLE +#define __SYSCFG_FORCE_RESET __HAL_RCC_SYSCFG_FORCE_RESET +#define __SYSCFG_RELEASE_RESET __HAL_RCC_SYSCFG_RELEASE_RESET +#define __TIM1_CLK_DISABLE __HAL_RCC_TIM1_CLK_DISABLE +#define __TIM1_CLK_ENABLE __HAL_RCC_TIM1_CLK_ENABLE +#define __TIM1_CLK_SLEEP_DISABLE __HAL_RCC_TIM1_CLK_SLEEP_DISABLE +#define __TIM1_CLK_SLEEP_ENABLE __HAL_RCC_TIM1_CLK_SLEEP_ENABLE +#define __TIM1_FORCE_RESET __HAL_RCC_TIM1_FORCE_RESET +#define __TIM1_RELEASE_RESET __HAL_RCC_TIM1_RELEASE_RESET +#define __TIM10_CLK_DISABLE __HAL_RCC_TIM10_CLK_DISABLE +#define __TIM10_CLK_ENABLE __HAL_RCC_TIM10_CLK_ENABLE +#define __TIM10_FORCE_RESET __HAL_RCC_TIM10_FORCE_RESET +#define __TIM10_RELEASE_RESET __HAL_RCC_TIM10_RELEASE_RESET +#define __TIM11_CLK_DISABLE __HAL_RCC_TIM11_CLK_DISABLE +#define __TIM11_CLK_ENABLE __HAL_RCC_TIM11_CLK_ENABLE +#define __TIM11_FORCE_RESET __HAL_RCC_TIM11_FORCE_RESET +#define __TIM11_RELEASE_RESET __HAL_RCC_TIM11_RELEASE_RESET +#define __TIM12_CLK_DISABLE __HAL_RCC_TIM12_CLK_DISABLE +#define __TIM12_CLK_ENABLE __HAL_RCC_TIM12_CLK_ENABLE +#define __TIM12_FORCE_RESET __HAL_RCC_TIM12_FORCE_RESET +#define __TIM12_RELEASE_RESET __HAL_RCC_TIM12_RELEASE_RESET +#define __TIM13_CLK_DISABLE __HAL_RCC_TIM13_CLK_DISABLE +#define __TIM13_CLK_ENABLE __HAL_RCC_TIM13_CLK_ENABLE +#define __TIM13_FORCE_RESET __HAL_RCC_TIM13_FORCE_RESET +#define __TIM13_RELEASE_RESET __HAL_RCC_TIM13_RELEASE_RESET +#define __TIM14_CLK_DISABLE __HAL_RCC_TIM14_CLK_DISABLE +#define __TIM14_CLK_ENABLE __HAL_RCC_TIM14_CLK_ENABLE +#define __TIM14_FORCE_RESET __HAL_RCC_TIM14_FORCE_RESET +#define __TIM14_RELEASE_RESET __HAL_RCC_TIM14_RELEASE_RESET +#define __TIM15_CLK_DISABLE __HAL_RCC_TIM15_CLK_DISABLE +#define __TIM15_CLK_ENABLE __HAL_RCC_TIM15_CLK_ENABLE +#define __TIM15_CLK_SLEEP_DISABLE __HAL_RCC_TIM15_CLK_SLEEP_DISABLE +#define __TIM15_CLK_SLEEP_ENABLE __HAL_RCC_TIM15_CLK_SLEEP_ENABLE +#define __TIM15_FORCE_RESET __HAL_RCC_TIM15_FORCE_RESET +#define __TIM15_RELEASE_RESET __HAL_RCC_TIM15_RELEASE_RESET +#define __TIM16_CLK_DISABLE __HAL_RCC_TIM16_CLK_DISABLE +#define __TIM16_CLK_ENABLE __HAL_RCC_TIM16_CLK_ENABLE +#define __TIM16_CLK_SLEEP_DISABLE __HAL_RCC_TIM16_CLK_SLEEP_DISABLE +#define __TIM16_CLK_SLEEP_ENABLE __HAL_RCC_TIM16_CLK_SLEEP_ENABLE +#define __TIM16_FORCE_RESET __HAL_RCC_TIM16_FORCE_RESET +#define __TIM16_RELEASE_RESET __HAL_RCC_TIM16_RELEASE_RESET +#define __TIM17_CLK_DISABLE __HAL_RCC_TIM17_CLK_DISABLE +#define __TIM17_CLK_ENABLE __HAL_RCC_TIM17_CLK_ENABLE +#define __TIM17_CLK_SLEEP_DISABLE __HAL_RCC_TIM17_CLK_SLEEP_DISABLE +#define __TIM17_CLK_SLEEP_ENABLE __HAL_RCC_TIM17_CLK_SLEEP_ENABLE +#define __TIM17_FORCE_RESET __HAL_RCC_TIM17_FORCE_RESET +#define __TIM17_RELEASE_RESET __HAL_RCC_TIM17_RELEASE_RESET +#define __TIM2_CLK_DISABLE __HAL_RCC_TIM2_CLK_DISABLE +#define __TIM2_CLK_ENABLE __HAL_RCC_TIM2_CLK_ENABLE +#define __TIM2_CLK_SLEEP_DISABLE __HAL_RCC_TIM2_CLK_SLEEP_DISABLE +#define __TIM2_CLK_SLEEP_ENABLE __HAL_RCC_TIM2_CLK_SLEEP_ENABLE +#define __TIM2_FORCE_RESET __HAL_RCC_TIM2_FORCE_RESET +#define __TIM2_RELEASE_RESET __HAL_RCC_TIM2_RELEASE_RESET +#define __TIM3_CLK_DISABLE __HAL_RCC_TIM3_CLK_DISABLE +#define __TIM3_CLK_ENABLE __HAL_RCC_TIM3_CLK_ENABLE +#define __TIM3_CLK_SLEEP_DISABLE __HAL_RCC_TIM3_CLK_SLEEP_DISABLE +#define __TIM3_CLK_SLEEP_ENABLE __HAL_RCC_TIM3_CLK_SLEEP_ENABLE +#define __TIM3_FORCE_RESET __HAL_RCC_TIM3_FORCE_RESET +#define __TIM3_RELEASE_RESET __HAL_RCC_TIM3_RELEASE_RESET +#define __TIM4_CLK_DISABLE __HAL_RCC_TIM4_CLK_DISABLE +#define __TIM4_CLK_ENABLE __HAL_RCC_TIM4_CLK_ENABLE +#define __TIM4_CLK_SLEEP_DISABLE __HAL_RCC_TIM4_CLK_SLEEP_DISABLE +#define __TIM4_CLK_SLEEP_ENABLE __HAL_RCC_TIM4_CLK_SLEEP_ENABLE +#define __TIM4_FORCE_RESET __HAL_RCC_TIM4_FORCE_RESET +#define __TIM4_RELEASE_RESET __HAL_RCC_TIM4_RELEASE_RESET +#define __TIM5_CLK_DISABLE __HAL_RCC_TIM5_CLK_DISABLE +#define __TIM5_CLK_ENABLE __HAL_RCC_TIM5_CLK_ENABLE +#define __TIM5_CLK_SLEEP_DISABLE __HAL_RCC_TIM5_CLK_SLEEP_DISABLE +#define __TIM5_CLK_SLEEP_ENABLE __HAL_RCC_TIM5_CLK_SLEEP_ENABLE +#define __TIM5_FORCE_RESET __HAL_RCC_TIM5_FORCE_RESET +#define __TIM5_RELEASE_RESET __HAL_RCC_TIM5_RELEASE_RESET +#define __TIM6_CLK_DISABLE __HAL_RCC_TIM6_CLK_DISABLE +#define __TIM6_CLK_ENABLE __HAL_RCC_TIM6_CLK_ENABLE +#define __TIM6_CLK_SLEEP_DISABLE __HAL_RCC_TIM6_CLK_SLEEP_DISABLE +#define __TIM6_CLK_SLEEP_ENABLE __HAL_RCC_TIM6_CLK_SLEEP_ENABLE +#define __TIM6_FORCE_RESET __HAL_RCC_TIM6_FORCE_RESET +#define __TIM6_RELEASE_RESET __HAL_RCC_TIM6_RELEASE_RESET +#define __TIM7_CLK_DISABLE __HAL_RCC_TIM7_CLK_DISABLE +#define __TIM7_CLK_ENABLE __HAL_RCC_TIM7_CLK_ENABLE +#define __TIM7_CLK_SLEEP_DISABLE __HAL_RCC_TIM7_CLK_SLEEP_DISABLE +#define __TIM7_CLK_SLEEP_ENABLE __HAL_RCC_TIM7_CLK_SLEEP_ENABLE +#define __TIM7_FORCE_RESET __HAL_RCC_TIM7_FORCE_RESET +#define __TIM7_RELEASE_RESET __HAL_RCC_TIM7_RELEASE_RESET +#define __TIM8_CLK_DISABLE __HAL_RCC_TIM8_CLK_DISABLE +#define __TIM8_CLK_ENABLE __HAL_RCC_TIM8_CLK_ENABLE +#define __TIM8_CLK_SLEEP_DISABLE __HAL_RCC_TIM8_CLK_SLEEP_DISABLE +#define __TIM8_CLK_SLEEP_ENABLE __HAL_RCC_TIM8_CLK_SLEEP_ENABLE +#define __TIM8_FORCE_RESET __HAL_RCC_TIM8_FORCE_RESET +#define __TIM8_RELEASE_RESET __HAL_RCC_TIM8_RELEASE_RESET +#define __TIM9_CLK_DISABLE __HAL_RCC_TIM9_CLK_DISABLE +#define __TIM9_CLK_ENABLE __HAL_RCC_TIM9_CLK_ENABLE +#define __TIM9_FORCE_RESET __HAL_RCC_TIM9_FORCE_RESET +#define __TIM9_RELEASE_RESET __HAL_RCC_TIM9_RELEASE_RESET +#define __TSC_CLK_DISABLE __HAL_RCC_TSC_CLK_DISABLE +#define __TSC_CLK_ENABLE __HAL_RCC_TSC_CLK_ENABLE +#define __TSC_CLK_SLEEP_DISABLE __HAL_RCC_TSC_CLK_SLEEP_DISABLE +#define __TSC_CLK_SLEEP_ENABLE __HAL_RCC_TSC_CLK_SLEEP_ENABLE +#define __TSC_FORCE_RESET __HAL_RCC_TSC_FORCE_RESET +#define __TSC_RELEASE_RESET __HAL_RCC_TSC_RELEASE_RESET +#define __UART4_CLK_DISABLE __HAL_RCC_UART4_CLK_DISABLE +#define __UART4_CLK_ENABLE __HAL_RCC_UART4_CLK_ENABLE +#define __UART4_CLK_SLEEP_DISABLE __HAL_RCC_UART4_CLK_SLEEP_DISABLE +#define __UART4_CLK_SLEEP_ENABLE __HAL_RCC_UART4_CLK_SLEEP_ENABLE +#define __UART4_FORCE_RESET __HAL_RCC_UART4_FORCE_RESET +#define __UART4_RELEASE_RESET __HAL_RCC_UART4_RELEASE_RESET +#define __UART5_CLK_DISABLE __HAL_RCC_UART5_CLK_DISABLE +#define __UART5_CLK_ENABLE __HAL_RCC_UART5_CLK_ENABLE +#define __UART5_CLK_SLEEP_DISABLE __HAL_RCC_UART5_CLK_SLEEP_DISABLE +#define __UART5_CLK_SLEEP_ENABLE __HAL_RCC_UART5_CLK_SLEEP_ENABLE +#define __UART5_FORCE_RESET __HAL_RCC_UART5_FORCE_RESET +#define __UART5_RELEASE_RESET __HAL_RCC_UART5_RELEASE_RESET +#define __USART1_CLK_DISABLE __HAL_RCC_USART1_CLK_DISABLE +#define __USART1_CLK_ENABLE __HAL_RCC_USART1_CLK_ENABLE +#define __USART1_CLK_SLEEP_DISABLE __HAL_RCC_USART1_CLK_SLEEP_DISABLE +#define __USART1_CLK_SLEEP_ENABLE __HAL_RCC_USART1_CLK_SLEEP_ENABLE +#define __USART1_FORCE_RESET __HAL_RCC_USART1_FORCE_RESET +#define __USART1_RELEASE_RESET __HAL_RCC_USART1_RELEASE_RESET +#define __USART2_CLK_DISABLE __HAL_RCC_USART2_CLK_DISABLE +#define __USART2_CLK_ENABLE __HAL_RCC_USART2_CLK_ENABLE +#define __USART2_CLK_SLEEP_DISABLE __HAL_RCC_USART2_CLK_SLEEP_DISABLE +#define __USART2_CLK_SLEEP_ENABLE __HAL_RCC_USART2_CLK_SLEEP_ENABLE +#define __USART2_FORCE_RESET __HAL_RCC_USART2_FORCE_RESET +#define __USART2_RELEASE_RESET __HAL_RCC_USART2_RELEASE_RESET +#define __USART3_CLK_DISABLE __HAL_RCC_USART3_CLK_DISABLE +#define __USART3_CLK_ENABLE __HAL_RCC_USART3_CLK_ENABLE +#define __USART3_CLK_SLEEP_DISABLE __HAL_RCC_USART3_CLK_SLEEP_DISABLE +#define __USART3_CLK_SLEEP_ENABLE __HAL_RCC_USART3_CLK_SLEEP_ENABLE +#define __USART3_FORCE_RESET __HAL_RCC_USART3_FORCE_RESET +#define __USART3_RELEASE_RESET __HAL_RCC_USART3_RELEASE_RESET +#define __USART4_CLK_DISABLE __HAL_RCC_UART4_CLK_DISABLE +#define __USART4_CLK_ENABLE __HAL_RCC_UART4_CLK_ENABLE +#define __USART4_CLK_SLEEP_ENABLE __HAL_RCC_UART4_CLK_SLEEP_ENABLE +#define __USART4_CLK_SLEEP_DISABLE __HAL_RCC_UART4_CLK_SLEEP_DISABLE +#define __USART4_FORCE_RESET __HAL_RCC_UART4_FORCE_RESET +#define __USART4_RELEASE_RESET __HAL_RCC_UART4_RELEASE_RESET +#define __USART5_CLK_DISABLE __HAL_RCC_UART5_CLK_DISABLE +#define __USART5_CLK_ENABLE __HAL_RCC_UART5_CLK_ENABLE +#define __USART5_CLK_SLEEP_ENABLE __HAL_RCC_UART5_CLK_SLEEP_ENABLE +#define __USART5_CLK_SLEEP_DISABLE __HAL_RCC_UART5_CLK_SLEEP_DISABLE +#define __USART5_FORCE_RESET __HAL_RCC_UART5_FORCE_RESET +#define __USART5_RELEASE_RESET __HAL_RCC_UART5_RELEASE_RESET +#define __USART7_CLK_DISABLE __HAL_RCC_UART7_CLK_DISABLE +#define __USART7_CLK_ENABLE __HAL_RCC_UART7_CLK_ENABLE +#define __USART7_FORCE_RESET __HAL_RCC_UART7_FORCE_RESET +#define __USART7_RELEASE_RESET __HAL_RCC_UART7_RELEASE_RESET +#define __USART8_CLK_DISABLE __HAL_RCC_UART8_CLK_DISABLE +#define __USART8_CLK_ENABLE __HAL_RCC_UART8_CLK_ENABLE +#define __USART8_FORCE_RESET __HAL_RCC_UART8_FORCE_RESET +#define __USART8_RELEASE_RESET __HAL_RCC_UART8_RELEASE_RESET +#define __USB_CLK_DISABLE __HAL_RCC_USB_CLK_DISABLE +#define __USB_CLK_ENABLE __HAL_RCC_USB_CLK_ENABLE +#define __USB_FORCE_RESET __HAL_RCC_USB_FORCE_RESET +#define __USB_CLK_SLEEP_ENABLE __HAL_RCC_USB_CLK_SLEEP_ENABLE +#define __USB_CLK_SLEEP_DISABLE __HAL_RCC_USB_CLK_SLEEP_DISABLE +#define __USB_OTG_FS_CLK_DISABLE __HAL_RCC_USB_OTG_FS_CLK_DISABLE +#define __USB_OTG_FS_CLK_ENABLE __HAL_RCC_USB_OTG_FS_CLK_ENABLE +#define __USB_RELEASE_RESET __HAL_RCC_USB_RELEASE_RESET + +#if defined(STM32H7) +#define __HAL_RCC_WWDG_CLK_DISABLE __HAL_RCC_WWDG1_CLK_DISABLE +#define __HAL_RCC_WWDG_CLK_ENABLE __HAL_RCC_WWDG1_CLK_ENABLE +#define __HAL_RCC_WWDG_CLK_SLEEP_DISABLE __HAL_RCC_WWDG1_CLK_SLEEP_DISABLE +#define __HAL_RCC_WWDG_CLK_SLEEP_ENABLE __HAL_RCC_WWDG1_CLK_SLEEP_ENABLE + +#define __HAL_RCC_WWDG_FORCE_RESET ((void)0U) /* Not available on the STM32H7*/ +#define __HAL_RCC_WWDG_RELEASE_RESET ((void)0U) /* Not available on the STM32H7*/ + + +#define __HAL_RCC_WWDG_IS_CLK_ENABLED __HAL_RCC_WWDG1_IS_CLK_ENABLED +#define __HAL_RCC_WWDG_IS_CLK_DISABLED __HAL_RCC_WWDG1_IS_CLK_DISABLED +#define RCC_SPI4CLKSOURCE_D2PCLK1 RCC_SPI4CLKSOURCE_D2PCLK2 +#define RCC_SPI5CLKSOURCE_D2PCLK1 RCC_SPI5CLKSOURCE_D2PCLK2 +#define RCC_SPI45CLKSOURCE_D2PCLK1 RCC_SPI45CLKSOURCE_D2PCLK2 +#define RCC_SPI45CLKSOURCE_CDPCLK1 RCC_SPI45CLKSOURCE_CDPCLK2 +#define RCC_SPI45CLKSOURCE_PCLK1 RCC_SPI45CLKSOURCE_PCLK2 +#endif + +#define __WWDG_CLK_DISABLE __HAL_RCC_WWDG_CLK_DISABLE +#define __WWDG_CLK_ENABLE __HAL_RCC_WWDG_CLK_ENABLE +#define __WWDG_CLK_SLEEP_DISABLE __HAL_RCC_WWDG_CLK_SLEEP_DISABLE +#define __WWDG_CLK_SLEEP_ENABLE __HAL_RCC_WWDG_CLK_SLEEP_ENABLE +#define __WWDG_FORCE_RESET __HAL_RCC_WWDG_FORCE_RESET +#define __WWDG_RELEASE_RESET __HAL_RCC_WWDG_RELEASE_RESET + +#define __TIM21_CLK_ENABLE __HAL_RCC_TIM21_CLK_ENABLE +#define __TIM21_CLK_DISABLE __HAL_RCC_TIM21_CLK_DISABLE +#define __TIM21_FORCE_RESET __HAL_RCC_TIM21_FORCE_RESET +#define __TIM21_RELEASE_RESET __HAL_RCC_TIM21_RELEASE_RESET +#define __TIM21_CLK_SLEEP_ENABLE __HAL_RCC_TIM21_CLK_SLEEP_ENABLE +#define __TIM21_CLK_SLEEP_DISABLE __HAL_RCC_TIM21_CLK_SLEEP_DISABLE +#define __TIM22_CLK_ENABLE __HAL_RCC_TIM22_CLK_ENABLE +#define __TIM22_CLK_DISABLE __HAL_RCC_TIM22_CLK_DISABLE +#define __TIM22_FORCE_RESET __HAL_RCC_TIM22_FORCE_RESET +#define __TIM22_RELEASE_RESET __HAL_RCC_TIM22_RELEASE_RESET +#define __TIM22_CLK_SLEEP_ENABLE __HAL_RCC_TIM22_CLK_SLEEP_ENABLE +#define __TIM22_CLK_SLEEP_DISABLE __HAL_RCC_TIM22_CLK_SLEEP_DISABLE +#define __CRS_CLK_DISABLE __HAL_RCC_CRS_CLK_DISABLE +#define __CRS_CLK_ENABLE __HAL_RCC_CRS_CLK_ENABLE +#define __CRS_CLK_SLEEP_DISABLE __HAL_RCC_CRS_CLK_SLEEP_DISABLE +#define __CRS_CLK_SLEEP_ENABLE __HAL_RCC_CRS_CLK_SLEEP_ENABLE +#define __CRS_FORCE_RESET __HAL_RCC_CRS_FORCE_RESET +#define __CRS_RELEASE_RESET __HAL_RCC_CRS_RELEASE_RESET +#define __RCC_BACKUPRESET_FORCE __HAL_RCC_BACKUPRESET_FORCE +#define __RCC_BACKUPRESET_RELEASE __HAL_RCC_BACKUPRESET_RELEASE + +#define __USB_OTG_FS_FORCE_RESET __HAL_RCC_USB_OTG_FS_FORCE_RESET +#define __USB_OTG_FS_RELEASE_RESET __HAL_RCC_USB_OTG_FS_RELEASE_RESET +#define __USB_OTG_FS_CLK_SLEEP_ENABLE __HAL_RCC_USB_OTG_FS_CLK_SLEEP_ENABLE +#define __USB_OTG_FS_CLK_SLEEP_DISABLE __HAL_RCC_USB_OTG_FS_CLK_SLEEP_DISABLE +#define __USB_OTG_HS_CLK_DISABLE __HAL_RCC_USB_OTG_HS_CLK_DISABLE +#define __USB_OTG_HS_CLK_ENABLE __HAL_RCC_USB_OTG_HS_CLK_ENABLE +#define __USB_OTG_HS_ULPI_CLK_ENABLE __HAL_RCC_USB_OTG_HS_ULPI_CLK_ENABLE +#define __USB_OTG_HS_ULPI_CLK_DISABLE __HAL_RCC_USB_OTG_HS_ULPI_CLK_DISABLE +#define __TIM9_CLK_SLEEP_ENABLE __HAL_RCC_TIM9_CLK_SLEEP_ENABLE +#define __TIM9_CLK_SLEEP_DISABLE __HAL_RCC_TIM9_CLK_SLEEP_DISABLE +#define __TIM10_CLK_SLEEP_ENABLE __HAL_RCC_TIM10_CLK_SLEEP_ENABLE +#define __TIM10_CLK_SLEEP_DISABLE __HAL_RCC_TIM10_CLK_SLEEP_DISABLE +#define __TIM11_CLK_SLEEP_ENABLE __HAL_RCC_TIM11_CLK_SLEEP_ENABLE +#define __TIM11_CLK_SLEEP_DISABLE __HAL_RCC_TIM11_CLK_SLEEP_DISABLE +#define __ETHMACPTP_CLK_SLEEP_ENABLE __HAL_RCC_ETHMACPTP_CLK_SLEEP_ENABLE +#define __ETHMACPTP_CLK_SLEEP_DISABLE __HAL_RCC_ETHMACPTP_CLK_SLEEP_DISABLE +#define __ETHMACPTP_CLK_ENABLE __HAL_RCC_ETHMACPTP_CLK_ENABLE +#define __ETHMACPTP_CLK_DISABLE __HAL_RCC_ETHMACPTP_CLK_DISABLE +#define __HASH_CLK_ENABLE __HAL_RCC_HASH_CLK_ENABLE +#define __HASH_FORCE_RESET __HAL_RCC_HASH_FORCE_RESET +#define __HASH_RELEASE_RESET __HAL_RCC_HASH_RELEASE_RESET +#define __HASH_CLK_SLEEP_ENABLE __HAL_RCC_HASH_CLK_SLEEP_ENABLE +#define __HASH_CLK_SLEEP_DISABLE __HAL_RCC_HASH_CLK_SLEEP_DISABLE +#define __HASH_CLK_DISABLE __HAL_RCC_HASH_CLK_DISABLE +#define __SPI5_CLK_ENABLE __HAL_RCC_SPI5_CLK_ENABLE +#define __SPI5_CLK_DISABLE __HAL_RCC_SPI5_CLK_DISABLE +#define __SPI5_FORCE_RESET __HAL_RCC_SPI5_FORCE_RESET +#define __SPI5_RELEASE_RESET __HAL_RCC_SPI5_RELEASE_RESET +#define __SPI5_CLK_SLEEP_ENABLE __HAL_RCC_SPI5_CLK_SLEEP_ENABLE +#define __SPI5_CLK_SLEEP_DISABLE __HAL_RCC_SPI5_CLK_SLEEP_DISABLE +#define __SPI6_CLK_ENABLE __HAL_RCC_SPI6_CLK_ENABLE +#define __SPI6_CLK_DISABLE __HAL_RCC_SPI6_CLK_DISABLE +#define __SPI6_FORCE_RESET __HAL_RCC_SPI6_FORCE_RESET +#define __SPI6_RELEASE_RESET __HAL_RCC_SPI6_RELEASE_RESET +#define __SPI6_CLK_SLEEP_ENABLE __HAL_RCC_SPI6_CLK_SLEEP_ENABLE +#define __SPI6_CLK_SLEEP_DISABLE __HAL_RCC_SPI6_CLK_SLEEP_DISABLE +#define __LTDC_CLK_ENABLE __HAL_RCC_LTDC_CLK_ENABLE +#define __LTDC_CLK_DISABLE __HAL_RCC_LTDC_CLK_DISABLE +#define __LTDC_FORCE_RESET __HAL_RCC_LTDC_FORCE_RESET +#define __LTDC_RELEASE_RESET __HAL_RCC_LTDC_RELEASE_RESET +#define __LTDC_CLK_SLEEP_ENABLE __HAL_RCC_LTDC_CLK_SLEEP_ENABLE +#define __ETHMAC_CLK_SLEEP_ENABLE __HAL_RCC_ETHMAC_CLK_SLEEP_ENABLE +#define __ETHMAC_CLK_SLEEP_DISABLE __HAL_RCC_ETHMAC_CLK_SLEEP_DISABLE +#define __ETHMACTX_CLK_SLEEP_ENABLE __HAL_RCC_ETHMACTX_CLK_SLEEP_ENABLE +#define __ETHMACTX_CLK_SLEEP_DISABLE __HAL_RCC_ETHMACTX_CLK_SLEEP_DISABLE +#define __ETHMACRX_CLK_SLEEP_ENABLE __HAL_RCC_ETHMACRX_CLK_SLEEP_ENABLE +#define __ETHMACRX_CLK_SLEEP_DISABLE __HAL_RCC_ETHMACRX_CLK_SLEEP_DISABLE +#define __TIM12_CLK_SLEEP_ENABLE __HAL_RCC_TIM12_CLK_SLEEP_ENABLE +#define __TIM12_CLK_SLEEP_DISABLE __HAL_RCC_TIM12_CLK_SLEEP_DISABLE +#define __TIM13_CLK_SLEEP_ENABLE __HAL_RCC_TIM13_CLK_SLEEP_ENABLE +#define __TIM13_CLK_SLEEP_DISABLE __HAL_RCC_TIM13_CLK_SLEEP_DISABLE +#define __TIM14_CLK_SLEEP_ENABLE __HAL_RCC_TIM14_CLK_SLEEP_ENABLE +#define __TIM14_CLK_SLEEP_DISABLE __HAL_RCC_TIM14_CLK_SLEEP_DISABLE +#define __BKPSRAM_CLK_ENABLE __HAL_RCC_BKPSRAM_CLK_ENABLE +#define __BKPSRAM_CLK_DISABLE __HAL_RCC_BKPSRAM_CLK_DISABLE +#define __BKPSRAM_CLK_SLEEP_ENABLE __HAL_RCC_BKPSRAM_CLK_SLEEP_ENABLE +#define __BKPSRAM_CLK_SLEEP_DISABLE __HAL_RCC_BKPSRAM_CLK_SLEEP_DISABLE +#define __CCMDATARAMEN_CLK_ENABLE __HAL_RCC_CCMDATARAMEN_CLK_ENABLE +#define __CCMDATARAMEN_CLK_DISABLE __HAL_RCC_CCMDATARAMEN_CLK_DISABLE +#define __USART6_CLK_ENABLE __HAL_RCC_USART6_CLK_ENABLE +#define __USART6_CLK_DISABLE __HAL_RCC_USART6_CLK_DISABLE +#define __USART6_FORCE_RESET __HAL_RCC_USART6_FORCE_RESET +#define __USART6_RELEASE_RESET __HAL_RCC_USART6_RELEASE_RESET +#define __USART6_CLK_SLEEP_ENABLE __HAL_RCC_USART6_CLK_SLEEP_ENABLE +#define __USART6_CLK_SLEEP_DISABLE __HAL_RCC_USART6_CLK_SLEEP_DISABLE +#define __SPI4_CLK_ENABLE __HAL_RCC_SPI4_CLK_ENABLE +#define __SPI4_CLK_DISABLE __HAL_RCC_SPI4_CLK_DISABLE +#define __SPI4_FORCE_RESET __HAL_RCC_SPI4_FORCE_RESET +#define __SPI4_RELEASE_RESET __HAL_RCC_SPI4_RELEASE_RESET +#define __SPI4_CLK_SLEEP_ENABLE __HAL_RCC_SPI4_CLK_SLEEP_ENABLE +#define __SPI4_CLK_SLEEP_DISABLE __HAL_RCC_SPI4_CLK_SLEEP_DISABLE +#define __GPIOI_CLK_ENABLE __HAL_RCC_GPIOI_CLK_ENABLE +#define __GPIOI_CLK_DISABLE __HAL_RCC_GPIOI_CLK_DISABLE +#define __GPIOI_FORCE_RESET __HAL_RCC_GPIOI_FORCE_RESET +#define __GPIOI_RELEASE_RESET __HAL_RCC_GPIOI_RELEASE_RESET +#define __GPIOI_CLK_SLEEP_ENABLE __HAL_RCC_GPIOI_CLK_SLEEP_ENABLE +#define __GPIOI_CLK_SLEEP_DISABLE __HAL_RCC_GPIOI_CLK_SLEEP_DISABLE +#define __GPIOJ_CLK_ENABLE __HAL_RCC_GPIOJ_CLK_ENABLE +#define __GPIOJ_CLK_DISABLE __HAL_RCC_GPIOJ_CLK_DISABLE +#define __GPIOJ_FORCE_RESET __HAL_RCC_GPIOJ_FORCE_RESET +#define __GPIOJ_RELEASE_RESET __HAL_RCC_GPIOJ_RELEASE_RESET +#define __GPIOJ_CLK_SLEEP_ENABLE __HAL_RCC_GPIOJ_CLK_SLEEP_ENABLE +#define __GPIOJ_CLK_SLEEP_DISABLE __HAL_RCC_GPIOJ_CLK_SLEEP_DISABLE +#define __GPIOK_CLK_ENABLE __HAL_RCC_GPIOK_CLK_ENABLE +#define __GPIOK_CLK_DISABLE __HAL_RCC_GPIOK_CLK_DISABLE +#define __GPIOK_RELEASE_RESET __HAL_RCC_GPIOK_RELEASE_RESET +#define __GPIOK_CLK_SLEEP_ENABLE __HAL_RCC_GPIOK_CLK_SLEEP_ENABLE +#define __GPIOK_CLK_SLEEP_DISABLE __HAL_RCC_GPIOK_CLK_SLEEP_DISABLE +#define __ETH_CLK_ENABLE __HAL_RCC_ETH_CLK_ENABLE +#define __ETH_CLK_DISABLE __HAL_RCC_ETH_CLK_DISABLE +#define __DCMI_CLK_ENABLE __HAL_RCC_DCMI_CLK_ENABLE +#define __DCMI_CLK_DISABLE __HAL_RCC_DCMI_CLK_DISABLE +#define __DCMI_FORCE_RESET __HAL_RCC_DCMI_FORCE_RESET +#define __DCMI_RELEASE_RESET __HAL_RCC_DCMI_RELEASE_RESET +#define __DCMI_CLK_SLEEP_ENABLE __HAL_RCC_DCMI_CLK_SLEEP_ENABLE +#define __DCMI_CLK_SLEEP_DISABLE __HAL_RCC_DCMI_CLK_SLEEP_DISABLE +#define __UART7_CLK_ENABLE __HAL_RCC_UART7_CLK_ENABLE +#define __UART7_CLK_DISABLE __HAL_RCC_UART7_CLK_DISABLE +#define __UART7_RELEASE_RESET __HAL_RCC_UART7_RELEASE_RESET +#define __UART7_FORCE_RESET __HAL_RCC_UART7_FORCE_RESET +#define __UART7_CLK_SLEEP_ENABLE __HAL_RCC_UART7_CLK_SLEEP_ENABLE +#define __UART7_CLK_SLEEP_DISABLE __HAL_RCC_UART7_CLK_SLEEP_DISABLE +#define __UART8_CLK_ENABLE __HAL_RCC_UART8_CLK_ENABLE +#define __UART8_CLK_DISABLE __HAL_RCC_UART8_CLK_DISABLE +#define __UART8_FORCE_RESET __HAL_RCC_UART8_FORCE_RESET +#define __UART8_RELEASE_RESET __HAL_RCC_UART8_RELEASE_RESET +#define __UART8_CLK_SLEEP_ENABLE __HAL_RCC_UART8_CLK_SLEEP_ENABLE +#define __UART8_CLK_SLEEP_DISABLE __HAL_RCC_UART8_CLK_SLEEP_DISABLE +#define __OTGHS_CLK_SLEEP_ENABLE __HAL_RCC_USB_OTG_HS_CLK_SLEEP_ENABLE +#define __OTGHS_CLK_SLEEP_DISABLE __HAL_RCC_USB_OTG_HS_CLK_SLEEP_DISABLE +#define __OTGHS_FORCE_RESET __HAL_RCC_USB_OTG_HS_FORCE_RESET +#define __OTGHS_RELEASE_RESET __HAL_RCC_USB_OTG_HS_RELEASE_RESET +#define __OTGHSULPI_CLK_SLEEP_ENABLE __HAL_RCC_USB_OTG_HS_ULPI_CLK_SLEEP_ENABLE +#define __OTGHSULPI_CLK_SLEEP_DISABLE __HAL_RCC_USB_OTG_HS_ULPI_CLK_SLEEP_DISABLE +#define __HAL_RCC_OTGHS_CLK_SLEEP_ENABLE __HAL_RCC_USB_OTG_HS_CLK_SLEEP_ENABLE +#define __HAL_RCC_OTGHS_CLK_SLEEP_DISABLE __HAL_RCC_USB_OTG_HS_CLK_SLEEP_DISABLE +#define __HAL_RCC_OTGHS_IS_CLK_SLEEP_ENABLED __HAL_RCC_USB_OTG_HS_IS_CLK_SLEEP_ENABLED +#define __HAL_RCC_OTGHS_IS_CLK_SLEEP_DISABLED __HAL_RCC_USB_OTG_HS_IS_CLK_SLEEP_DISABLED +#define __HAL_RCC_OTGHS_FORCE_RESET __HAL_RCC_USB_OTG_HS_FORCE_RESET +#define __HAL_RCC_OTGHS_RELEASE_RESET __HAL_RCC_USB_OTG_HS_RELEASE_RESET +#define __HAL_RCC_OTGHSULPI_CLK_SLEEP_ENABLE __HAL_RCC_USB_OTG_HS_ULPI_CLK_SLEEP_ENABLE +#define __HAL_RCC_OTGHSULPI_CLK_SLEEP_DISABLE __HAL_RCC_USB_OTG_HS_ULPI_CLK_SLEEP_DISABLE +#define __HAL_RCC_OTGHSULPI_IS_CLK_SLEEP_ENABLED __HAL_RCC_USB_OTG_HS_ULPI_IS_CLK_SLEEP_ENABLED +#define __HAL_RCC_OTGHSULPI_IS_CLK_SLEEP_DISABLED __HAL_RCC_USB_OTG_HS_ULPI_IS_CLK_SLEEP_DISABLED +#define __SRAM3_CLK_SLEEP_ENABLE __HAL_RCC_SRAM3_CLK_SLEEP_ENABLE +#define __CAN2_CLK_SLEEP_ENABLE __HAL_RCC_CAN2_CLK_SLEEP_ENABLE +#define __CAN2_CLK_SLEEP_DISABLE __HAL_RCC_CAN2_CLK_SLEEP_DISABLE +#define __DAC_CLK_SLEEP_ENABLE __HAL_RCC_DAC_CLK_SLEEP_ENABLE +#define __DAC_CLK_SLEEP_DISABLE __HAL_RCC_DAC_CLK_SLEEP_DISABLE +#define __ADC2_CLK_SLEEP_ENABLE __HAL_RCC_ADC2_CLK_SLEEP_ENABLE +#define __ADC2_CLK_SLEEP_DISABLE __HAL_RCC_ADC2_CLK_SLEEP_DISABLE +#define __ADC3_CLK_SLEEP_ENABLE __HAL_RCC_ADC3_CLK_SLEEP_ENABLE +#define __ADC3_CLK_SLEEP_DISABLE __HAL_RCC_ADC3_CLK_SLEEP_DISABLE +#define __FSMC_FORCE_RESET __HAL_RCC_FSMC_FORCE_RESET +#define __FSMC_RELEASE_RESET __HAL_RCC_FSMC_RELEASE_RESET +#define __FSMC_CLK_SLEEP_ENABLE __HAL_RCC_FSMC_CLK_SLEEP_ENABLE +#define __FSMC_CLK_SLEEP_DISABLE __HAL_RCC_FSMC_CLK_SLEEP_DISABLE +#define __SDIO_FORCE_RESET __HAL_RCC_SDIO_FORCE_RESET +#define __SDIO_RELEASE_RESET __HAL_RCC_SDIO_RELEASE_RESET +#define __SDIO_CLK_SLEEP_DISABLE __HAL_RCC_SDIO_CLK_SLEEP_DISABLE +#define __SDIO_CLK_SLEEP_ENABLE __HAL_RCC_SDIO_CLK_SLEEP_ENABLE +#define __DMA2D_CLK_ENABLE __HAL_RCC_DMA2D_CLK_ENABLE +#define __DMA2D_CLK_DISABLE __HAL_RCC_DMA2D_CLK_DISABLE +#define __DMA2D_FORCE_RESET __HAL_RCC_DMA2D_FORCE_RESET +#define __DMA2D_RELEASE_RESET __HAL_RCC_DMA2D_RELEASE_RESET +#define __DMA2D_CLK_SLEEP_ENABLE __HAL_RCC_DMA2D_CLK_SLEEP_ENABLE +#define __DMA2D_CLK_SLEEP_DISABLE __HAL_RCC_DMA2D_CLK_SLEEP_DISABLE + +/* alias define maintained for legacy */ +#define __HAL_RCC_OTGFS_FORCE_RESET __HAL_RCC_USB_OTG_FS_FORCE_RESET +#define __HAL_RCC_OTGFS_RELEASE_RESET __HAL_RCC_USB_OTG_FS_RELEASE_RESET + +#define __ADC12_CLK_ENABLE __HAL_RCC_ADC12_CLK_ENABLE +#define __ADC12_CLK_DISABLE __HAL_RCC_ADC12_CLK_DISABLE +#define __ADC34_CLK_ENABLE __HAL_RCC_ADC34_CLK_ENABLE +#define __ADC34_CLK_DISABLE __HAL_RCC_ADC34_CLK_DISABLE +#define __DAC2_CLK_ENABLE __HAL_RCC_DAC2_CLK_ENABLE +#define __DAC2_CLK_DISABLE __HAL_RCC_DAC2_CLK_DISABLE +#define __TIM18_CLK_ENABLE __HAL_RCC_TIM18_CLK_ENABLE +#define __TIM18_CLK_DISABLE __HAL_RCC_TIM18_CLK_DISABLE +#define __TIM19_CLK_ENABLE __HAL_RCC_TIM19_CLK_ENABLE +#define __TIM19_CLK_DISABLE __HAL_RCC_TIM19_CLK_DISABLE +#define __TIM20_CLK_ENABLE __HAL_RCC_TIM20_CLK_ENABLE +#define __TIM20_CLK_DISABLE __HAL_RCC_TIM20_CLK_DISABLE +#define __HRTIM1_CLK_ENABLE __HAL_RCC_HRTIM1_CLK_ENABLE +#define __HRTIM1_CLK_DISABLE __HAL_RCC_HRTIM1_CLK_DISABLE +#define __SDADC1_CLK_ENABLE __HAL_RCC_SDADC1_CLK_ENABLE +#define __SDADC2_CLK_ENABLE __HAL_RCC_SDADC2_CLK_ENABLE +#define __SDADC3_CLK_ENABLE __HAL_RCC_SDADC3_CLK_ENABLE +#define __SDADC1_CLK_DISABLE __HAL_RCC_SDADC1_CLK_DISABLE +#define __SDADC2_CLK_DISABLE __HAL_RCC_SDADC2_CLK_DISABLE +#define __SDADC3_CLK_DISABLE __HAL_RCC_SDADC3_CLK_DISABLE + +#define __ADC12_FORCE_RESET __HAL_RCC_ADC12_FORCE_RESET +#define __ADC12_RELEASE_RESET __HAL_RCC_ADC12_RELEASE_RESET +#define __ADC34_FORCE_RESET __HAL_RCC_ADC34_FORCE_RESET +#define __ADC34_RELEASE_RESET __HAL_RCC_ADC34_RELEASE_RESET +#define __DAC2_FORCE_RESET __HAL_RCC_DAC2_FORCE_RESET +#define __DAC2_RELEASE_RESET __HAL_RCC_DAC2_RELEASE_RESET +#define __TIM18_FORCE_RESET __HAL_RCC_TIM18_FORCE_RESET +#define __TIM18_RELEASE_RESET __HAL_RCC_TIM18_RELEASE_RESET +#define __TIM19_FORCE_RESET __HAL_RCC_TIM19_FORCE_RESET +#define __TIM19_RELEASE_RESET __HAL_RCC_TIM19_RELEASE_RESET +#define __TIM20_FORCE_RESET __HAL_RCC_TIM20_FORCE_RESET +#define __TIM20_RELEASE_RESET __HAL_RCC_TIM20_RELEASE_RESET +#define __HRTIM1_FORCE_RESET __HAL_RCC_HRTIM1_FORCE_RESET +#define __HRTIM1_RELEASE_RESET __HAL_RCC_HRTIM1_RELEASE_RESET +#define __SDADC1_FORCE_RESET __HAL_RCC_SDADC1_FORCE_RESET +#define __SDADC2_FORCE_RESET __HAL_RCC_SDADC2_FORCE_RESET +#define __SDADC3_FORCE_RESET __HAL_RCC_SDADC3_FORCE_RESET +#define __SDADC1_RELEASE_RESET __HAL_RCC_SDADC1_RELEASE_RESET +#define __SDADC2_RELEASE_RESET __HAL_RCC_SDADC2_RELEASE_RESET +#define __SDADC3_RELEASE_RESET __HAL_RCC_SDADC3_RELEASE_RESET + +#define __ADC1_IS_CLK_ENABLED __HAL_RCC_ADC1_IS_CLK_ENABLED +#define __ADC1_IS_CLK_DISABLED __HAL_RCC_ADC1_IS_CLK_DISABLED +#define __ADC12_IS_CLK_ENABLED __HAL_RCC_ADC12_IS_CLK_ENABLED +#define __ADC12_IS_CLK_DISABLED __HAL_RCC_ADC12_IS_CLK_DISABLED +#define __ADC34_IS_CLK_ENABLED __HAL_RCC_ADC34_IS_CLK_ENABLED +#define __ADC34_IS_CLK_DISABLED __HAL_RCC_ADC34_IS_CLK_DISABLED +#define __CEC_IS_CLK_ENABLED __HAL_RCC_CEC_IS_CLK_ENABLED +#define __CEC_IS_CLK_DISABLED __HAL_RCC_CEC_IS_CLK_DISABLED +#define __CRC_IS_CLK_ENABLED __HAL_RCC_CRC_IS_CLK_ENABLED +#define __CRC_IS_CLK_DISABLED __HAL_RCC_CRC_IS_CLK_DISABLED +#define __DAC1_IS_CLK_ENABLED __HAL_RCC_DAC1_IS_CLK_ENABLED +#define __DAC1_IS_CLK_DISABLED __HAL_RCC_DAC1_IS_CLK_DISABLED +#define __DAC2_IS_CLK_ENABLED __HAL_RCC_DAC2_IS_CLK_ENABLED +#define __DAC2_IS_CLK_DISABLED __HAL_RCC_DAC2_IS_CLK_DISABLED +#define __DMA1_IS_CLK_ENABLED __HAL_RCC_DMA1_IS_CLK_ENABLED +#define __DMA1_IS_CLK_DISABLED __HAL_RCC_DMA1_IS_CLK_DISABLED +#define __DMA2_IS_CLK_ENABLED __HAL_RCC_DMA2_IS_CLK_ENABLED +#define __DMA2_IS_CLK_DISABLED __HAL_RCC_DMA2_IS_CLK_DISABLED +#define __FLITF_IS_CLK_ENABLED __HAL_RCC_FLITF_IS_CLK_ENABLED +#define __FLITF_IS_CLK_DISABLED __HAL_RCC_FLITF_IS_CLK_DISABLED +#define __FMC_IS_CLK_ENABLED __HAL_RCC_FMC_IS_CLK_ENABLED +#define __FMC_IS_CLK_DISABLED __HAL_RCC_FMC_IS_CLK_DISABLED +#define __GPIOA_IS_CLK_ENABLED __HAL_RCC_GPIOA_IS_CLK_ENABLED +#define __GPIOA_IS_CLK_DISABLED __HAL_RCC_GPIOA_IS_CLK_DISABLED +#define __GPIOB_IS_CLK_ENABLED __HAL_RCC_GPIOB_IS_CLK_ENABLED +#define __GPIOB_IS_CLK_DISABLED __HAL_RCC_GPIOB_IS_CLK_DISABLED +#define __GPIOC_IS_CLK_ENABLED __HAL_RCC_GPIOC_IS_CLK_ENABLED +#define __GPIOC_IS_CLK_DISABLED __HAL_RCC_GPIOC_IS_CLK_DISABLED +#define __GPIOD_IS_CLK_ENABLED __HAL_RCC_GPIOD_IS_CLK_ENABLED +#define __GPIOD_IS_CLK_DISABLED __HAL_RCC_GPIOD_IS_CLK_DISABLED +#define __GPIOE_IS_CLK_ENABLED __HAL_RCC_GPIOE_IS_CLK_ENABLED +#define __GPIOE_IS_CLK_DISABLED __HAL_RCC_GPIOE_IS_CLK_DISABLED +#define __GPIOF_IS_CLK_ENABLED __HAL_RCC_GPIOF_IS_CLK_ENABLED +#define __GPIOF_IS_CLK_DISABLED __HAL_RCC_GPIOF_IS_CLK_DISABLED +#define __GPIOG_IS_CLK_ENABLED __HAL_RCC_GPIOG_IS_CLK_ENABLED +#define __GPIOG_IS_CLK_DISABLED __HAL_RCC_GPIOG_IS_CLK_DISABLED +#define __GPIOH_IS_CLK_ENABLED __HAL_RCC_GPIOH_IS_CLK_ENABLED +#define __GPIOH_IS_CLK_DISABLED __HAL_RCC_GPIOH_IS_CLK_DISABLED +#define __HRTIM1_IS_CLK_ENABLED __HAL_RCC_HRTIM1_IS_CLK_ENABLED +#define __HRTIM1_IS_CLK_DISABLED __HAL_RCC_HRTIM1_IS_CLK_DISABLED +#define __I2C1_IS_CLK_ENABLED __HAL_RCC_I2C1_IS_CLK_ENABLED +#define __I2C1_IS_CLK_DISABLED __HAL_RCC_I2C1_IS_CLK_DISABLED +#define __I2C2_IS_CLK_ENABLED __HAL_RCC_I2C2_IS_CLK_ENABLED +#define __I2C2_IS_CLK_DISABLED __HAL_RCC_I2C2_IS_CLK_DISABLED +#define __I2C3_IS_CLK_ENABLED __HAL_RCC_I2C3_IS_CLK_ENABLED +#define __I2C3_IS_CLK_DISABLED __HAL_RCC_I2C3_IS_CLK_DISABLED +#define __PWR_IS_CLK_ENABLED __HAL_RCC_PWR_IS_CLK_ENABLED +#define __PWR_IS_CLK_DISABLED __HAL_RCC_PWR_IS_CLK_DISABLED +#define __SYSCFG_IS_CLK_ENABLED __HAL_RCC_SYSCFG_IS_CLK_ENABLED +#define __SYSCFG_IS_CLK_DISABLED __HAL_RCC_SYSCFG_IS_CLK_DISABLED +#define __SPI1_IS_CLK_ENABLED __HAL_RCC_SPI1_IS_CLK_ENABLED +#define __SPI1_IS_CLK_DISABLED __HAL_RCC_SPI1_IS_CLK_DISABLED +#define __SPI2_IS_CLK_ENABLED __HAL_RCC_SPI2_IS_CLK_ENABLED +#define __SPI2_IS_CLK_DISABLED __HAL_RCC_SPI2_IS_CLK_DISABLED +#define __SPI3_IS_CLK_ENABLED __HAL_RCC_SPI3_IS_CLK_ENABLED +#define __SPI3_IS_CLK_DISABLED __HAL_RCC_SPI3_IS_CLK_DISABLED +#define __SPI4_IS_CLK_ENABLED __HAL_RCC_SPI4_IS_CLK_ENABLED +#define __SPI4_IS_CLK_DISABLED __HAL_RCC_SPI4_IS_CLK_DISABLED +#define __SDADC1_IS_CLK_ENABLED __HAL_RCC_SDADC1_IS_CLK_ENABLED +#define __SDADC1_IS_CLK_DISABLED __HAL_RCC_SDADC1_IS_CLK_DISABLED +#define __SDADC2_IS_CLK_ENABLED __HAL_RCC_SDADC2_IS_CLK_ENABLED +#define __SDADC2_IS_CLK_DISABLED __HAL_RCC_SDADC2_IS_CLK_DISABLED +#define __SDADC3_IS_CLK_ENABLED __HAL_RCC_SDADC3_IS_CLK_ENABLED +#define __SDADC3_IS_CLK_DISABLED __HAL_RCC_SDADC3_IS_CLK_DISABLED +#define __SRAM_IS_CLK_ENABLED __HAL_RCC_SRAM_IS_CLK_ENABLED +#define __SRAM_IS_CLK_DISABLED __HAL_RCC_SRAM_IS_CLK_DISABLED +#define __TIM1_IS_CLK_ENABLED __HAL_RCC_TIM1_IS_CLK_ENABLED +#define __TIM1_IS_CLK_DISABLED __HAL_RCC_TIM1_IS_CLK_DISABLED +#define __TIM2_IS_CLK_ENABLED __HAL_RCC_TIM2_IS_CLK_ENABLED +#define __TIM2_IS_CLK_DISABLED __HAL_RCC_TIM2_IS_CLK_DISABLED +#define __TIM3_IS_CLK_ENABLED __HAL_RCC_TIM3_IS_CLK_ENABLED +#define __TIM3_IS_CLK_DISABLED __HAL_RCC_TIM3_IS_CLK_DISABLED +#define __TIM4_IS_CLK_ENABLED __HAL_RCC_TIM4_IS_CLK_ENABLED +#define __TIM4_IS_CLK_DISABLED __HAL_RCC_TIM4_IS_CLK_DISABLED +#define __TIM5_IS_CLK_ENABLED __HAL_RCC_TIM5_IS_CLK_ENABLED +#define __TIM5_IS_CLK_DISABLED __HAL_RCC_TIM5_IS_CLK_DISABLED +#define __TIM6_IS_CLK_ENABLED __HAL_RCC_TIM6_IS_CLK_ENABLED +#define __TIM6_IS_CLK_DISABLED __HAL_RCC_TIM6_IS_CLK_DISABLED +#define __TIM7_IS_CLK_ENABLED __HAL_RCC_TIM7_IS_CLK_ENABLED +#define __TIM7_IS_CLK_DISABLED __HAL_RCC_TIM7_IS_CLK_DISABLED +#define __TIM8_IS_CLK_ENABLED __HAL_RCC_TIM8_IS_CLK_ENABLED +#define __TIM8_IS_CLK_DISABLED __HAL_RCC_TIM8_IS_CLK_DISABLED +#define __TIM12_IS_CLK_ENABLED __HAL_RCC_TIM12_IS_CLK_ENABLED +#define __TIM12_IS_CLK_DISABLED __HAL_RCC_TIM12_IS_CLK_DISABLED +#define __TIM13_IS_CLK_ENABLED __HAL_RCC_TIM13_IS_CLK_ENABLED +#define __TIM13_IS_CLK_DISABLED __HAL_RCC_TIM13_IS_CLK_DISABLED +#define __TIM14_IS_CLK_ENABLED __HAL_RCC_TIM14_IS_CLK_ENABLED +#define __TIM14_IS_CLK_DISABLED __HAL_RCC_TIM14_IS_CLK_DISABLED +#define __TIM15_IS_CLK_ENABLED __HAL_RCC_TIM15_IS_CLK_ENABLED +#define __TIM15_IS_CLK_DISABLED __HAL_RCC_TIM15_IS_CLK_DISABLED +#define __TIM16_IS_CLK_ENABLED __HAL_RCC_TIM16_IS_CLK_ENABLED +#define __TIM16_IS_CLK_DISABLED __HAL_RCC_TIM16_IS_CLK_DISABLED +#define __TIM17_IS_CLK_ENABLED __HAL_RCC_TIM17_IS_CLK_ENABLED +#define __TIM17_IS_CLK_DISABLED __HAL_RCC_TIM17_IS_CLK_DISABLED +#define __TIM18_IS_CLK_ENABLED __HAL_RCC_TIM18_IS_CLK_ENABLED +#define __TIM18_IS_CLK_DISABLED __HAL_RCC_TIM18_IS_CLK_DISABLED +#define __TIM19_IS_CLK_ENABLED __HAL_RCC_TIM19_IS_CLK_ENABLED +#define __TIM19_IS_CLK_DISABLED __HAL_RCC_TIM19_IS_CLK_DISABLED +#define __TIM20_IS_CLK_ENABLED __HAL_RCC_TIM20_IS_CLK_ENABLED +#define __TIM20_IS_CLK_DISABLED __HAL_RCC_TIM20_IS_CLK_DISABLED +#define __TSC_IS_CLK_ENABLED __HAL_RCC_TSC_IS_CLK_ENABLED +#define __TSC_IS_CLK_DISABLED __HAL_RCC_TSC_IS_CLK_DISABLED +#define __UART4_IS_CLK_ENABLED __HAL_RCC_UART4_IS_CLK_ENABLED +#define __UART4_IS_CLK_DISABLED __HAL_RCC_UART4_IS_CLK_DISABLED +#define __UART5_IS_CLK_ENABLED __HAL_RCC_UART5_IS_CLK_ENABLED +#define __UART5_IS_CLK_DISABLED __HAL_RCC_UART5_IS_CLK_DISABLED +#define __USART1_IS_CLK_ENABLED __HAL_RCC_USART1_IS_CLK_ENABLED +#define __USART1_IS_CLK_DISABLED __HAL_RCC_USART1_IS_CLK_DISABLED +#define __USART2_IS_CLK_ENABLED __HAL_RCC_USART2_IS_CLK_ENABLED +#define __USART2_IS_CLK_DISABLED __HAL_RCC_USART2_IS_CLK_DISABLED +#define __USART3_IS_CLK_ENABLED __HAL_RCC_USART3_IS_CLK_ENABLED +#define __USART3_IS_CLK_DISABLED __HAL_RCC_USART3_IS_CLK_DISABLED +#define __USB_IS_CLK_ENABLED __HAL_RCC_USB_IS_CLK_ENABLED +#define __USB_IS_CLK_DISABLED __HAL_RCC_USB_IS_CLK_DISABLED +#define __WWDG_IS_CLK_ENABLED __HAL_RCC_WWDG_IS_CLK_ENABLED +#define __WWDG_IS_CLK_DISABLED __HAL_RCC_WWDG_IS_CLK_DISABLED + +#if defined(STM32L1) +#define __HAL_RCC_CRYP_CLK_DISABLE __HAL_RCC_AES_CLK_DISABLE +#define __HAL_RCC_CRYP_CLK_ENABLE __HAL_RCC_AES_CLK_ENABLE +#define __HAL_RCC_CRYP_CLK_SLEEP_DISABLE __HAL_RCC_AES_CLK_SLEEP_DISABLE +#define __HAL_RCC_CRYP_CLK_SLEEP_ENABLE __HAL_RCC_AES_CLK_SLEEP_ENABLE +#define __HAL_RCC_CRYP_FORCE_RESET __HAL_RCC_AES_FORCE_RESET +#define __HAL_RCC_CRYP_RELEASE_RESET __HAL_RCC_AES_RELEASE_RESET +#endif /* STM32L1 */ + +#if defined(STM32F4) +#define __HAL_RCC_SDMMC1_FORCE_RESET __HAL_RCC_SDIO_FORCE_RESET +#define __HAL_RCC_SDMMC1_RELEASE_RESET __HAL_RCC_SDIO_RELEASE_RESET +#define __HAL_RCC_SDMMC1_CLK_SLEEP_ENABLE __HAL_RCC_SDIO_CLK_SLEEP_ENABLE +#define __HAL_RCC_SDMMC1_CLK_SLEEP_DISABLE __HAL_RCC_SDIO_CLK_SLEEP_DISABLE +#define __HAL_RCC_SDMMC1_CLK_ENABLE __HAL_RCC_SDIO_CLK_ENABLE +#define __HAL_RCC_SDMMC1_CLK_DISABLE __HAL_RCC_SDIO_CLK_DISABLE +#define __HAL_RCC_SDMMC1_IS_CLK_ENABLED __HAL_RCC_SDIO_IS_CLK_ENABLED +#define __HAL_RCC_SDMMC1_IS_CLK_DISABLED __HAL_RCC_SDIO_IS_CLK_DISABLED +#define Sdmmc1ClockSelection SdioClockSelection +#define RCC_PERIPHCLK_SDMMC1 RCC_PERIPHCLK_SDIO +#define RCC_SDMMC1CLKSOURCE_CLK48 RCC_SDIOCLKSOURCE_CK48 +#define RCC_SDMMC1CLKSOURCE_SYSCLK RCC_SDIOCLKSOURCE_SYSCLK +#define __HAL_RCC_SDMMC1_CONFIG __HAL_RCC_SDIO_CONFIG +#define __HAL_RCC_GET_SDMMC1_SOURCE __HAL_RCC_GET_SDIO_SOURCE +#endif + +#if defined(STM32F7) || defined(STM32L4) +#define __HAL_RCC_SDIO_FORCE_RESET __HAL_RCC_SDMMC1_FORCE_RESET +#define __HAL_RCC_SDIO_RELEASE_RESET __HAL_RCC_SDMMC1_RELEASE_RESET +#define __HAL_RCC_SDIO_CLK_SLEEP_ENABLE __HAL_RCC_SDMMC1_CLK_SLEEP_ENABLE +#define __HAL_RCC_SDIO_CLK_SLEEP_DISABLE __HAL_RCC_SDMMC1_CLK_SLEEP_DISABLE +#define __HAL_RCC_SDIO_CLK_ENABLE __HAL_RCC_SDMMC1_CLK_ENABLE +#define __HAL_RCC_SDIO_CLK_DISABLE __HAL_RCC_SDMMC1_CLK_DISABLE +#define __HAL_RCC_SDIO_IS_CLK_ENABLED __HAL_RCC_SDMMC1_IS_CLK_ENABLED +#define __HAL_RCC_SDIO_IS_CLK_DISABLED __HAL_RCC_SDMMC1_IS_CLK_DISABLED +#define SdioClockSelection Sdmmc1ClockSelection +#define RCC_PERIPHCLK_SDIO RCC_PERIPHCLK_SDMMC1 +#define __HAL_RCC_SDIO_CONFIG __HAL_RCC_SDMMC1_CONFIG +#define __HAL_RCC_GET_SDIO_SOURCE __HAL_RCC_GET_SDMMC1_SOURCE +#endif + +#if defined(STM32F7) +#define RCC_SDIOCLKSOURCE_CLK48 RCC_SDMMC1CLKSOURCE_CLK48 +#define RCC_SDIOCLKSOURCE_SYSCLK RCC_SDMMC1CLKSOURCE_SYSCLK +#endif + +#if defined(STM32H7) +#define __HAL_RCC_USB_OTG_HS_CLK_ENABLE() __HAL_RCC_USB1_OTG_HS_CLK_ENABLE() +#define __HAL_RCC_USB_OTG_HS_ULPI_CLK_ENABLE() __HAL_RCC_USB1_OTG_HS_ULPI_CLK_ENABLE() +#define __HAL_RCC_USB_OTG_HS_CLK_DISABLE() __HAL_RCC_USB1_OTG_HS_CLK_DISABLE() +#define __HAL_RCC_USB_OTG_HS_ULPI_CLK_DISABLE() __HAL_RCC_USB1_OTG_HS_ULPI_CLK_DISABLE() +#define __HAL_RCC_USB_OTG_HS_FORCE_RESET() __HAL_RCC_USB1_OTG_HS_FORCE_RESET() +#define __HAL_RCC_USB_OTG_HS_RELEASE_RESET() __HAL_RCC_USB1_OTG_HS_RELEASE_RESET() +#define __HAL_RCC_USB_OTG_HS_CLK_SLEEP_ENABLE() __HAL_RCC_USB1_OTG_HS_CLK_SLEEP_ENABLE() +#define __HAL_RCC_USB_OTG_HS_ULPI_CLK_SLEEP_ENABLE() __HAL_RCC_USB1_OTG_HS_ULPI_CLK_SLEEP_ENABLE() +#define __HAL_RCC_USB_OTG_HS_CLK_SLEEP_DISABLE() __HAL_RCC_USB1_OTG_HS_CLK_SLEEP_DISABLE() +#define __HAL_RCC_USB_OTG_HS_ULPI_CLK_SLEEP_DISABLE() __HAL_RCC_USB1_OTG_HS_ULPI_CLK_SLEEP_DISABLE() + +#define __HAL_RCC_USB_OTG_FS_CLK_ENABLE() __HAL_RCC_USB2_OTG_FS_CLK_ENABLE() +#define __HAL_RCC_USB_OTG_FS_ULPI_CLK_ENABLE() __HAL_RCC_USB2_OTG_FS_ULPI_CLK_ENABLE() +#define __HAL_RCC_USB_OTG_FS_CLK_DISABLE() __HAL_RCC_USB2_OTG_FS_CLK_DISABLE() +#define __HAL_RCC_USB_OTG_FS_ULPI_CLK_DISABLE() __HAL_RCC_USB2_OTG_FS_ULPI_CLK_DISABLE() +#define __HAL_RCC_USB_OTG_FS_FORCE_RESET() __HAL_RCC_USB2_OTG_FS_FORCE_RESET() +#define __HAL_RCC_USB_OTG_FS_RELEASE_RESET() __HAL_RCC_USB2_OTG_FS_RELEASE_RESET() +#define __HAL_RCC_USB_OTG_FS_CLK_SLEEP_ENABLE() __HAL_RCC_USB2_OTG_FS_CLK_SLEEP_ENABLE() +#define __HAL_RCC_USB_OTG_FS_ULPI_CLK_SLEEP_ENABLE() __HAL_RCC_USB2_OTG_FS_ULPI_CLK_SLEEP_ENABLE() +#define __HAL_RCC_USB_OTG_FS_CLK_SLEEP_DISABLE() __HAL_RCC_USB2_OTG_FS_CLK_SLEEP_DISABLE() +#define __HAL_RCC_USB_OTG_FS_ULPI_CLK_SLEEP_DISABLE() __HAL_RCC_USB2_OTG_FS_ULPI_CLK_SLEEP_DISABLE() +#endif + +#define __HAL_RCC_I2SCLK __HAL_RCC_I2S_CONFIG +#define __HAL_RCC_I2SCLK_CONFIG __HAL_RCC_I2S_CONFIG + +#define __RCC_PLLSRC RCC_GET_PLL_OSCSOURCE + +#define IS_RCC_MSIRANGE IS_RCC_MSI_CLOCK_RANGE +#define IS_RCC_RTCCLK_SOURCE IS_RCC_RTCCLKSOURCE +#define IS_RCC_SYSCLK_DIV IS_RCC_HCLK +#define IS_RCC_HCLK_DIV IS_RCC_PCLK +#define IS_RCC_PERIPHCLK IS_RCC_PERIPHCLOCK + +#define RCC_IT_HSI14 RCC_IT_HSI14RDY + +#define RCC_IT_CSSLSE RCC_IT_LSECSS +#define RCC_IT_CSSHSE RCC_IT_CSS + +#define RCC_PLLMUL_3 RCC_PLL_MUL3 +#define RCC_PLLMUL_4 RCC_PLL_MUL4 +#define RCC_PLLMUL_6 RCC_PLL_MUL6 +#define RCC_PLLMUL_8 RCC_PLL_MUL8 +#define RCC_PLLMUL_12 RCC_PLL_MUL12 +#define RCC_PLLMUL_16 RCC_PLL_MUL16 +#define RCC_PLLMUL_24 RCC_PLL_MUL24 +#define RCC_PLLMUL_32 RCC_PLL_MUL32 +#define RCC_PLLMUL_48 RCC_PLL_MUL48 + +#define RCC_PLLDIV_2 RCC_PLL_DIV2 +#define RCC_PLLDIV_3 RCC_PLL_DIV3 +#define RCC_PLLDIV_4 RCC_PLL_DIV4 + +#define IS_RCC_MCOSOURCE IS_RCC_MCO1SOURCE +#define __HAL_RCC_MCO_CONFIG __HAL_RCC_MCO1_CONFIG +#define RCC_MCO_NODIV RCC_MCODIV_1 +#define RCC_MCO_DIV1 RCC_MCODIV_1 +#define RCC_MCO_DIV2 RCC_MCODIV_2 +#define RCC_MCO_DIV4 RCC_MCODIV_4 +#define RCC_MCO_DIV8 RCC_MCODIV_8 +#define RCC_MCO_DIV16 RCC_MCODIV_16 +#define RCC_MCO_DIV32 RCC_MCODIV_32 +#define RCC_MCO_DIV64 RCC_MCODIV_64 +#define RCC_MCO_DIV128 RCC_MCODIV_128 +#define RCC_MCOSOURCE_NONE RCC_MCO1SOURCE_NOCLOCK +#define RCC_MCOSOURCE_LSI RCC_MCO1SOURCE_LSI +#define RCC_MCOSOURCE_LSE RCC_MCO1SOURCE_LSE +#define RCC_MCOSOURCE_SYSCLK RCC_MCO1SOURCE_SYSCLK +#define RCC_MCOSOURCE_HSI RCC_MCO1SOURCE_HSI +#define RCC_MCOSOURCE_HSI14 RCC_MCO1SOURCE_HSI14 +#define RCC_MCOSOURCE_HSI48 RCC_MCO1SOURCE_HSI48 +#define RCC_MCOSOURCE_HSE RCC_MCO1SOURCE_HSE +#define RCC_MCOSOURCE_PLLCLK_DIV1 RCC_MCO1SOURCE_PLLCLK +#define RCC_MCOSOURCE_PLLCLK_NODIV RCC_MCO1SOURCE_PLLCLK +#define RCC_MCOSOURCE_PLLCLK_DIV2 RCC_MCO1SOURCE_PLLCLK_DIV2 + +#if defined(STM32U0) +#define RCC_SYSCLKSOURCE_STATUS_PLLR RCC_SYSCLKSOURCE_STATUS_PLLCLK +#endif + +#if defined(STM32L4) || defined(STM32WB) || defined(STM32G0) || defined(STM32G4) || defined(STM32L5) || \ + defined(STM32WL) || defined(STM32C0) || defined(STM32H7RS) || defined(STM32U0) +#define RCC_RTCCLKSOURCE_NO_CLK RCC_RTCCLKSOURCE_NONE +#else +#define RCC_RTCCLKSOURCE_NONE RCC_RTCCLKSOURCE_NO_CLK +#endif + +#define RCC_USBCLK_PLLSAI1 RCC_USBCLKSOURCE_PLLSAI1 +#define RCC_USBCLK_PLL RCC_USBCLKSOURCE_PLL +#define RCC_USBCLK_MSI RCC_USBCLKSOURCE_MSI +#define RCC_USBCLKSOURCE_PLLCLK RCC_USBCLKSOURCE_PLL +#define RCC_USBPLLCLK_DIV1 RCC_USBCLKSOURCE_PLL +#define RCC_USBPLLCLK_DIV1_5 RCC_USBCLKSOURCE_PLL_DIV1_5 +#define RCC_USBPLLCLK_DIV2 RCC_USBCLKSOURCE_PLL_DIV2 +#define RCC_USBPLLCLK_DIV3 RCC_USBCLKSOURCE_PLL_DIV3 + +#define HSION_BitNumber RCC_HSION_BIT_NUMBER +#define HSION_BITNUMBER RCC_HSION_BIT_NUMBER +#define HSEON_BitNumber RCC_HSEON_BIT_NUMBER +#define HSEON_BITNUMBER RCC_HSEON_BIT_NUMBER +#define MSION_BITNUMBER RCC_MSION_BIT_NUMBER +#define CSSON_BitNumber RCC_CSSON_BIT_NUMBER +#define CSSON_BITNUMBER RCC_CSSON_BIT_NUMBER +#define PLLON_BitNumber RCC_PLLON_BIT_NUMBER +#define PLLON_BITNUMBER RCC_PLLON_BIT_NUMBER +#define PLLI2SON_BitNumber RCC_PLLI2SON_BIT_NUMBER +#define I2SSRC_BitNumber RCC_I2SSRC_BIT_NUMBER +#define RTCEN_BitNumber RCC_RTCEN_BIT_NUMBER +#define RTCEN_BITNUMBER RCC_RTCEN_BIT_NUMBER +#define BDRST_BitNumber RCC_BDRST_BIT_NUMBER +#define BDRST_BITNUMBER RCC_BDRST_BIT_NUMBER +#define RTCRST_BITNUMBER RCC_RTCRST_BIT_NUMBER +#define LSION_BitNumber RCC_LSION_BIT_NUMBER +#define LSION_BITNUMBER RCC_LSION_BIT_NUMBER +#define LSEON_BitNumber RCC_LSEON_BIT_NUMBER +#define LSEON_BITNUMBER RCC_LSEON_BIT_NUMBER +#define LSEBYP_BITNUMBER RCC_LSEBYP_BIT_NUMBER +#define PLLSAION_BitNumber RCC_PLLSAION_BIT_NUMBER +#define TIMPRE_BitNumber RCC_TIMPRE_BIT_NUMBER +#define RMVF_BitNumber RCC_RMVF_BIT_NUMBER +#define RMVF_BITNUMBER RCC_RMVF_BIT_NUMBER +#define RCC_CR2_HSI14TRIM_BitNumber RCC_HSI14TRIM_BIT_NUMBER +#define CR_BYTE2_ADDRESS RCC_CR_BYTE2_ADDRESS +#define CIR_BYTE1_ADDRESS RCC_CIR_BYTE1_ADDRESS +#define CIR_BYTE2_ADDRESS RCC_CIR_BYTE2_ADDRESS +#define BDCR_BYTE0_ADDRESS RCC_BDCR_BYTE0_ADDRESS +#define DBP_TIMEOUT_VALUE RCC_DBP_TIMEOUT_VALUE +#define LSE_TIMEOUT_VALUE RCC_LSE_TIMEOUT_VALUE + +#define CR_HSION_BB RCC_CR_HSION_BB +#define CR_CSSON_BB RCC_CR_CSSON_BB +#define CR_PLLON_BB RCC_CR_PLLON_BB +#define CR_PLLI2SON_BB RCC_CR_PLLI2SON_BB +#define CR_MSION_BB RCC_CR_MSION_BB +#define CSR_LSION_BB RCC_CSR_LSION_BB +#define CSR_LSEON_BB RCC_CSR_LSEON_BB +#define CSR_LSEBYP_BB RCC_CSR_LSEBYP_BB +#define CSR_RTCEN_BB RCC_CSR_RTCEN_BB +#define CSR_RTCRST_BB RCC_CSR_RTCRST_BB +#define CFGR_I2SSRC_BB RCC_CFGR_I2SSRC_BB +#define BDCR_RTCEN_BB RCC_BDCR_RTCEN_BB +#define BDCR_BDRST_BB RCC_BDCR_BDRST_BB +#define CR_HSEON_BB RCC_CR_HSEON_BB +#define CSR_RMVF_BB RCC_CSR_RMVF_BB +#define CR_PLLSAION_BB RCC_CR_PLLSAION_BB +#define DCKCFGR_TIMPRE_BB RCC_DCKCFGR_TIMPRE_BB + +#define __HAL_RCC_CRS_ENABLE_FREQ_ERROR_COUNTER __HAL_RCC_CRS_FREQ_ERROR_COUNTER_ENABLE +#define __HAL_RCC_CRS_DISABLE_FREQ_ERROR_COUNTER __HAL_RCC_CRS_FREQ_ERROR_COUNTER_DISABLE +#define __HAL_RCC_CRS_ENABLE_AUTOMATIC_CALIB __HAL_RCC_CRS_AUTOMATIC_CALIB_ENABLE +#define __HAL_RCC_CRS_DISABLE_AUTOMATIC_CALIB __HAL_RCC_CRS_AUTOMATIC_CALIB_DISABLE +#define __HAL_RCC_CRS_CALCULATE_RELOADVALUE __HAL_RCC_CRS_RELOADVALUE_CALCULATE + +#define __HAL_RCC_GET_IT_SOURCE __HAL_RCC_GET_IT + +#define RCC_CRS_SYNCWARM RCC_CRS_SYNCWARN +#define RCC_CRS_TRIMOV RCC_CRS_TRIMOVF + +#define RCC_PERIPHCLK_CK48 RCC_PERIPHCLK_CLK48 +#define RCC_CK48CLKSOURCE_PLLQ RCC_CLK48CLKSOURCE_PLLQ +#define RCC_CK48CLKSOURCE_PLLSAIP RCC_CLK48CLKSOURCE_PLLSAIP +#define RCC_CK48CLKSOURCE_PLLI2SQ RCC_CLK48CLKSOURCE_PLLI2SQ +#define IS_RCC_CK48CLKSOURCE IS_RCC_CLK48CLKSOURCE +#define RCC_SDIOCLKSOURCE_CK48 RCC_SDIOCLKSOURCE_CLK48 + +#define __HAL_RCC_DFSDM_CLK_ENABLE __HAL_RCC_DFSDM1_CLK_ENABLE +#define __HAL_RCC_DFSDM_CLK_DISABLE __HAL_RCC_DFSDM1_CLK_DISABLE +#define __HAL_RCC_DFSDM_IS_CLK_ENABLED __HAL_RCC_DFSDM1_IS_CLK_ENABLED +#define __HAL_RCC_DFSDM_IS_CLK_DISABLED __HAL_RCC_DFSDM1_IS_CLK_DISABLED +#define __HAL_RCC_DFSDM_FORCE_RESET __HAL_RCC_DFSDM1_FORCE_RESET +#define __HAL_RCC_DFSDM_RELEASE_RESET __HAL_RCC_DFSDM1_RELEASE_RESET +#define __HAL_RCC_DFSDM_CLK_SLEEP_ENABLE __HAL_RCC_DFSDM1_CLK_SLEEP_ENABLE +#define __HAL_RCC_DFSDM_CLK_SLEEP_DISABLE __HAL_RCC_DFSDM1_CLK_SLEEP_DISABLE +#define __HAL_RCC_DFSDM_IS_CLK_SLEEP_ENABLED __HAL_RCC_DFSDM1_IS_CLK_SLEEP_ENABLED +#define __HAL_RCC_DFSDM_IS_CLK_SLEEP_DISABLED __HAL_RCC_DFSDM1_IS_CLK_SLEEP_DISABLED +#define DfsdmClockSelection Dfsdm1ClockSelection +#define RCC_PERIPHCLK_DFSDM RCC_PERIPHCLK_DFSDM1 +#define RCC_DFSDMCLKSOURCE_PCLK RCC_DFSDM1CLKSOURCE_PCLK2 +#define RCC_DFSDMCLKSOURCE_SYSCLK RCC_DFSDM1CLKSOURCE_SYSCLK +#define __HAL_RCC_DFSDM_CONFIG __HAL_RCC_DFSDM1_CONFIG +#define __HAL_RCC_GET_DFSDM_SOURCE __HAL_RCC_GET_DFSDM1_SOURCE +#define RCC_DFSDM1CLKSOURCE_PCLK RCC_DFSDM1CLKSOURCE_PCLK2 +#define RCC_SWPMI1CLKSOURCE_PCLK RCC_SWPMI1CLKSOURCE_PCLK1 +#if !defined(STM32U0) +#define RCC_LPTIM1CLKSOURCE_PCLK RCC_LPTIM1CLKSOURCE_PCLK1 +#define RCC_LPTIM2CLKSOURCE_PCLK RCC_LPTIM2CLKSOURCE_PCLK1 +#endif + +#define RCC_DFSDM1AUDIOCLKSOURCE_I2SAPB1 RCC_DFSDM1AUDIOCLKSOURCE_I2S1 +#define RCC_DFSDM1AUDIOCLKSOURCE_I2SAPB2 RCC_DFSDM1AUDIOCLKSOURCE_I2S2 +#define RCC_DFSDM2AUDIOCLKSOURCE_I2SAPB1 RCC_DFSDM2AUDIOCLKSOURCE_I2S1 +#define RCC_DFSDM2AUDIOCLKSOURCE_I2SAPB2 RCC_DFSDM2AUDIOCLKSOURCE_I2S2 +#define RCC_DFSDM1CLKSOURCE_APB2 RCC_DFSDM1CLKSOURCE_PCLK2 +#define RCC_DFSDM2CLKSOURCE_APB2 RCC_DFSDM2CLKSOURCE_PCLK2 +#define RCC_FMPI2C1CLKSOURCE_APB RCC_FMPI2C1CLKSOURCE_PCLK1 +#if defined(STM32U5) +#define MSIKPLLModeSEL RCC_MSIKPLL_MODE_SEL +#define MSISPLLModeSEL RCC_MSISPLL_MODE_SEL +#define __HAL_RCC_AHB21_CLK_DISABLE __HAL_RCC_AHB2_1_CLK_DISABLE +#define __HAL_RCC_AHB22_CLK_DISABLE __HAL_RCC_AHB2_2_CLK_DISABLE +#define __HAL_RCC_AHB1_CLK_Disable_Clear __HAL_RCC_AHB1_CLK_ENABLE +#define __HAL_RCC_AHB21_CLK_Disable_Clear __HAL_RCC_AHB2_1_CLK_ENABLE +#define __HAL_RCC_AHB22_CLK_Disable_Clear __HAL_RCC_AHB2_2_CLK_ENABLE +#define __HAL_RCC_AHB3_CLK_Disable_Clear __HAL_RCC_AHB3_CLK_ENABLE +#define __HAL_RCC_APB1_CLK_Disable_Clear __HAL_RCC_APB1_CLK_ENABLE +#define __HAL_RCC_APB2_CLK_Disable_Clear __HAL_RCC_APB2_CLK_ENABLE +#define __HAL_RCC_APB3_CLK_Disable_Clear __HAL_RCC_APB3_CLK_ENABLE +#define IS_RCC_MSIPLLModeSelection IS_RCC_MSIPLLMODE_SELECT +#define RCC_PERIPHCLK_CLK48 RCC_PERIPHCLK_ICLK +#define RCC_CLK48CLKSOURCE_HSI48 RCC_ICLK_CLKSOURCE_HSI48 +#define RCC_CLK48CLKSOURCE_PLL2 RCC_ICLK_CLKSOURCE_PLL2 +#define RCC_CLK48CLKSOURCE_PLL1 RCC_ICLK_CLKSOURCE_PLL1 +#define RCC_CLK48CLKSOURCE_MSIK RCC_ICLK_CLKSOURCE_MSIK +#define __HAL_RCC_ADC1_CLK_ENABLE __HAL_RCC_ADC12_CLK_ENABLE +#define __HAL_RCC_ADC1_CLK_DISABLE __HAL_RCC_ADC12_CLK_DISABLE +#define __HAL_RCC_ADC1_IS_CLK_ENABLED __HAL_RCC_ADC12_IS_CLK_ENABLED +#define __HAL_RCC_ADC1_IS_CLK_DISABLED __HAL_RCC_ADC12_IS_CLK_DISABLED +#define __HAL_RCC_ADC1_FORCE_RESET __HAL_RCC_ADC12_FORCE_RESET +#define __HAL_RCC_ADC1_RELEASE_RESET __HAL_RCC_ADC12_RELEASE_RESET +#define __HAL_RCC_ADC1_CLK_SLEEP_ENABLE __HAL_RCC_ADC12_CLK_SLEEP_ENABLE +#define __HAL_RCC_ADC1_CLK_SLEEP_DISABLE __HAL_RCC_ADC12_CLK_SLEEP_DISABLE +#define __HAL_RCC_GET_CLK48_SOURCE __HAL_RCC_GET_ICLK_SOURCE +#define __HAL_RCC_PLLFRACN_ENABLE __HAL_RCC_PLL_FRACN_ENABLE +#define __HAL_RCC_PLLFRACN_DISABLE __HAL_RCC_PLL_FRACN_DISABLE +#define __HAL_RCC_PLLFRACN_CONFIG __HAL_RCC_PLL_FRACN_CONFIG +#define IS_RCC_PLLFRACN_VALUE IS_RCC_PLL_FRACN_VALUE +#endif /* STM32U5 */ + +#if defined(STM32H5) +#define __HAL_RCC_PLLFRACN_ENABLE __HAL_RCC_PLL_FRACN_ENABLE +#define __HAL_RCC_PLLFRACN_DISABLE __HAL_RCC_PLL_FRACN_DISABLE +#define __HAL_RCC_PLLFRACN_CONFIG __HAL_RCC_PLL_FRACN_CONFIG +#define IS_RCC_PLLFRACN_VALUE IS_RCC_PLL_FRACN_VALUE + +#define RCC_PLLSOURCE_NONE RCC_PLL1_SOURCE_NONE +#define RCC_PLLSOURCE_HSI RCC_PLL1_SOURCE_HSI +#define RCC_PLLSOURCE_CSI RCC_PLL1_SOURCE_CSI +#define RCC_PLLSOURCE_HSE RCC_PLL1_SOURCE_HSE +#define RCC_PLLVCIRANGE_0 RCC_PLL1_VCIRANGE_0 +#define RCC_PLLVCIRANGE_1 RCC_PLL1_VCIRANGE_1 +#define RCC_PLLVCIRANGE_2 RCC_PLL1_VCIRANGE_2 +#define RCC_PLLVCIRANGE_3 RCC_PLL1_VCIRANGE_3 +#define RCC_PLL1VCOWIDE RCC_PLL1_VCORANGE_WIDE +#define RCC_PLL1VCOMEDIUM RCC_PLL1_VCORANGE_MEDIUM + +#define IS_RCC_PLLSOURCE IS_RCC_PLL1_SOURCE +#define IS_RCC_PLLRGE_VALUE IS_RCC_PLL1_VCIRGE_VALUE +#define IS_RCC_PLLVCORGE_VALUE IS_RCC_PLL1_VCORGE_VALUE +#define IS_RCC_PLLCLOCKOUT_VALUE IS_RCC_PLL1_CLOCKOUT_VALUE +#define IS_RCC_PLL_FRACN_VALUE IS_RCC_PLL1_FRACN_VALUE +#define IS_RCC_PLLM_VALUE IS_RCC_PLL1_DIVM_VALUE +#define IS_RCC_PLLN_VALUE IS_RCC_PLL1_MULN_VALUE +#define IS_RCC_PLLP_VALUE IS_RCC_PLL1_DIVP_VALUE +#define IS_RCC_PLLQ_VALUE IS_RCC_PLL1_DIVQ_VALUE +#define IS_RCC_PLLR_VALUE IS_RCC_PLL1_DIVR_VALUE + +#define __HAL_RCC_PLL_ENABLE __HAL_RCC_PLL1_ENABLE +#define __HAL_RCC_PLL_DISABLE __HAL_RCC_PLL1_DISABLE +#define __HAL_RCC_PLL_FRACN_ENABLE __HAL_RCC_PLL1_FRACN_ENABLE +#define __HAL_RCC_PLL_FRACN_DISABLE __HAL_RCC_PLL1_FRACN_DISABLE +#define __HAL_RCC_PLL_CONFIG __HAL_RCC_PLL1_CONFIG +#define __HAL_RCC_PLL_PLLSOURCE_CONFIG __HAL_RCC_PLL1_PLLSOURCE_CONFIG +#define __HAL_RCC_PLL_DIVM_CONFIG __HAL_RCC_PLL1_DIVM_CONFIG +#define __HAL_RCC_PLL_FRACN_CONFIG __HAL_RCC_PLL1_FRACN_CONFIG +#define __HAL_RCC_PLL_VCIRANGE __HAL_RCC_PLL1_VCIRANGE +#define __HAL_RCC_PLL_VCORANGE __HAL_RCC_PLL1_VCORANGE +#define __HAL_RCC_GET_PLL_OSCSOURCE __HAL_RCC_GET_PLL1_OSCSOURCE +#define __HAL_RCC_PLLCLKOUT_ENABLE __HAL_RCC_PLL1_CLKOUT_ENABLE +#define __HAL_RCC_PLLCLKOUT_DISABLE __HAL_RCC_PLL1_CLKOUT_DISABLE +#define __HAL_RCC_GET_PLLCLKOUT_CONFIG __HAL_RCC_GET_PLL1_CLKOUT_CONFIG + +#define __HAL_RCC_PLL2FRACN_ENABLE __HAL_RCC_PLL2_FRACN_ENABLE +#define __HAL_RCC_PLL2FRACN_DISABLE __HAL_RCC_PLL2_FRACN_DISABLE +#define __HAL_RCC_PLL2CLKOUT_ENABLE __HAL_RCC_PLL2_CLKOUT_ENABLE +#define __HAL_RCC_PLL2CLKOUT_DISABLE __HAL_RCC_PLL2_CLKOUT_DISABLE +#define __HAL_RCC_PLL2FRACN_CONFIG __HAL_RCC_PLL2_FRACN_CONFIG +#define __HAL_RCC_GET_PLL2CLKOUT_CONFIG __HAL_RCC_GET_PLL2_CLKOUT_CONFIG + +#define __HAL_RCC_PLL3FRACN_ENABLE __HAL_RCC_PLL3_FRACN_ENABLE +#define __HAL_RCC_PLL3FRACN_DISABLE __HAL_RCC_PLL3_FRACN_DISABLE +#define __HAL_RCC_PLL3CLKOUT_ENABLE __HAL_RCC_PLL3_CLKOUT_ENABLE +#define __HAL_RCC_PLL3CLKOUT_DISABLE __HAL_RCC_PLL3_CLKOUT_DISABLE +#define __HAL_RCC_PLL3FRACN_CONFIG __HAL_RCC_PLL3_FRACN_CONFIG +#define __HAL_RCC_GET_PLL3CLKOUT_CONFIG __HAL_RCC_GET_PLL3_CLKOUT_CONFIG + +#define RCC_PLL2VCIRANGE_0 RCC_PLL2_VCIRANGE_0 +#define RCC_PLL2VCIRANGE_1 RCC_PLL2_VCIRANGE_1 +#define RCC_PLL2VCIRANGE_2 RCC_PLL2_VCIRANGE_2 +#define RCC_PLL2VCIRANGE_3 RCC_PLL2_VCIRANGE_3 + +#define RCC_PLL2VCOWIDE RCC_PLL2_VCORANGE_WIDE +#define RCC_PLL2VCOMEDIUM RCC_PLL2_VCORANGE_MEDIUM + +#define RCC_PLL2SOURCE_NONE RCC_PLL2_SOURCE_NONE +#define RCC_PLL2SOURCE_HSI RCC_PLL2_SOURCE_HSI +#define RCC_PLL2SOURCE_CSI RCC_PLL2_SOURCE_CSI +#define RCC_PLL2SOURCE_HSE RCC_PLL2_SOURCE_HSE + +#define RCC_PLL3VCIRANGE_0 RCC_PLL3_VCIRANGE_0 +#define RCC_PLL3VCIRANGE_1 RCC_PLL3_VCIRANGE_1 +#define RCC_PLL3VCIRANGE_2 RCC_PLL3_VCIRANGE_2 +#define RCC_PLL3VCIRANGE_3 RCC_PLL3_VCIRANGE_3 + +#define RCC_PLL3VCOWIDE RCC_PLL3_VCORANGE_WIDE +#define RCC_PLL3VCOMEDIUM RCC_PLL3_VCORANGE_MEDIUM + +#define RCC_PLL3SOURCE_NONE RCC_PLL3_SOURCE_NONE +#define RCC_PLL3SOURCE_HSI RCC_PLL3_SOURCE_HSI +#define RCC_PLL3SOURCE_CSI RCC_PLL3_SOURCE_CSI +#define RCC_PLL3SOURCE_HSE RCC_PLL3_SOURCE_HSE + + +#endif /* STM32H5 */ + +/** + * @} + */ + +/** @defgroup HAL_RNG_Aliased_Macros HAL RNG Aliased Macros maintained for legacy purpose + * @{ + */ +#define HAL_RNG_ReadyCallback(__HANDLE__) HAL_RNG_ReadyDataCallback((__HANDLE__), uint32_t random32bit) + +/** + * @} + */ + +/** @defgroup HAL_RTC_Aliased_Macros HAL RTC Aliased Macros maintained for legacy purpose + * @{ + */ +#if defined (STM32G0) || defined (STM32L5) || defined (STM32L412xx) || defined (STM32L422xx) || \ + defined (STM32L4P5xx)|| defined (STM32L4Q5xx) || defined (STM32G4) || defined (STM32WL) || defined (STM32U5) || \ + defined (STM32WBA) || defined (STM32H5) || defined (STM32C0) || defined (STM32H7RS) || defined (STM32U0) +#else +#define __HAL_RTC_CLEAR_FLAG __HAL_RTC_EXTI_CLEAR_FLAG +#endif +#define __HAL_RTC_DISABLE_IT __HAL_RTC_EXTI_DISABLE_IT +#define __HAL_RTC_ENABLE_IT __HAL_RTC_EXTI_ENABLE_IT + +#if defined (STM32F1) +#define __HAL_RTC_EXTI_CLEAR_FLAG(RTC_EXTI_LINE_ALARM_EVENT) __HAL_RTC_ALARM_EXTI_CLEAR_FLAG() + +#define __HAL_RTC_EXTI_ENABLE_IT(RTC_EXTI_LINE_ALARM_EVENT) __HAL_RTC_ALARM_EXTI_ENABLE_IT() + +#define __HAL_RTC_EXTI_DISABLE_IT(RTC_EXTI_LINE_ALARM_EVENT) __HAL_RTC_ALARM_EXTI_DISABLE_IT() + +#define __HAL_RTC_EXTI_GET_FLAG(RTC_EXTI_LINE_ALARM_EVENT) __HAL_RTC_ALARM_EXTI_GET_FLAG() + +#define __HAL_RTC_EXTI_GENERATE_SWIT(RTC_EXTI_LINE_ALARM_EVENT) __HAL_RTC_ALARM_EXTI_GENERATE_SWIT() +#else +#define __HAL_RTC_EXTI_CLEAR_FLAG(__EXTI_LINE__) (((__EXTI_LINE__) == RTC_EXTI_LINE_ALARM_EVENT) ? __HAL_RTC_ALARM_EXTI_CLEAR_FLAG() : \ + (((__EXTI_LINE__) == RTC_EXTI_LINE_WAKEUPTIMER_EVENT) ? __HAL_RTC_WAKEUPTIMER_EXTI_CLEAR_FLAG() : \ + __HAL_RTC_TAMPER_TIMESTAMP_EXTI_CLEAR_FLAG())) +#define __HAL_RTC_EXTI_ENABLE_IT(__EXTI_LINE__) (((__EXTI_LINE__) == RTC_EXTI_LINE_ALARM_EVENT) ? __HAL_RTC_ALARM_EXTI_ENABLE_IT() : \ + (((__EXTI_LINE__) == RTC_EXTI_LINE_WAKEUPTIMER_EVENT) ? __HAL_RTC_WAKEUPTIMER_EXTI_ENABLE_IT() : \ + __HAL_RTC_TAMPER_TIMESTAMP_EXTI_ENABLE_IT())) +#define __HAL_RTC_EXTI_DISABLE_IT(__EXTI_LINE__) (((__EXTI_LINE__) == RTC_EXTI_LINE_ALARM_EVENT) ? __HAL_RTC_ALARM_EXTI_DISABLE_IT() : \ + (((__EXTI_LINE__) == RTC_EXTI_LINE_WAKEUPTIMER_EVENT) ? __HAL_RTC_WAKEUPTIMER_EXTI_DISABLE_IT() : \ + __HAL_RTC_TAMPER_TIMESTAMP_EXTI_DISABLE_IT())) +#define __HAL_RTC_EXTI_GET_FLAG(__EXTI_LINE__) (((__EXTI_LINE__) == RTC_EXTI_LINE_ALARM_EVENT) ? __HAL_RTC_ALARM_EXTI_GET_FLAG() : \ + (((__EXTI_LINE__) == RTC_EXTI_LINE_WAKEUPTIMER_EVENT) ? __HAL_RTC_WAKEUPTIMER_EXTI_GET_FLAG() : \ + __HAL_RTC_TAMPER_TIMESTAMP_EXTI_GET_FLAG())) +#define __HAL_RTC_EXTI_GENERATE_SWIT(__EXTI_LINE__) (((__EXTI_LINE__) == RTC_EXTI_LINE_ALARM_EVENT) ? __HAL_RTC_ALARM_EXTI_GENERATE_SWIT() : \ + (((__EXTI_LINE__) == RTC_EXTI_LINE_WAKEUPTIMER_EVENT) ? __HAL_RTC_WAKEUPTIMER_EXTI_GENERATE_SWIT() : \ + __HAL_RTC_TAMPER_TIMESTAMP_EXTI_GENERATE_SWIT())) +#endif /* STM32F1 */ + +#if defined (STM32F0) || defined (STM32F2) || defined (STM32F3) || defined (STM32F4) || defined (STM32F7) || \ + defined (STM32H7) || \ + defined (STM32L0) || defined (STM32L1) || \ + defined (STM32WB) +#define __HAL_RTC_TAMPER_GET_IT __HAL_RTC_TAMPER_GET_FLAG +#endif + +#define IS_ALARM IS_RTC_ALARM +#define IS_ALARM_MASK IS_RTC_ALARM_MASK +#define IS_TAMPER IS_RTC_TAMPER +#define IS_TAMPER_ERASE_MODE IS_RTC_TAMPER_ERASE_MODE +#define IS_TAMPER_FILTER IS_RTC_TAMPER_FILTER +#define IS_TAMPER_INTERRUPT IS_RTC_TAMPER_INTERRUPT +#define IS_TAMPER_MASKFLAG_STATE IS_RTC_TAMPER_MASKFLAG_STATE +#define IS_TAMPER_PRECHARGE_DURATION IS_RTC_TAMPER_PRECHARGE_DURATION +#define IS_TAMPER_PULLUP_STATE IS_RTC_TAMPER_PULLUP_STATE +#define IS_TAMPER_SAMPLING_FREQ IS_RTC_TAMPER_SAMPLING_FREQ +#define IS_TAMPER_TIMESTAMPONTAMPER_DETECTION IS_RTC_TAMPER_TIMESTAMPONTAMPER_DETECTION +#define IS_TAMPER_TRIGGER IS_RTC_TAMPER_TRIGGER +#define IS_WAKEUP_CLOCK IS_RTC_WAKEUP_CLOCK +#define IS_WAKEUP_COUNTER IS_RTC_WAKEUP_COUNTER + +#define __RTC_WRITEPROTECTION_ENABLE __HAL_RTC_WRITEPROTECTION_ENABLE +#define __RTC_WRITEPROTECTION_DISABLE __HAL_RTC_WRITEPROTECTION_DISABLE + +#if defined (STM32H5) +#define __HAL_RCC_RTCAPB_CLK_ENABLE __HAL_RCC_RTC_CLK_ENABLE +#define __HAL_RCC_RTCAPB_CLK_DISABLE __HAL_RCC_RTC_CLK_DISABLE +#endif /* STM32H5 */ + +/** + * @} + */ + +/** @defgroup HAL_SD_Aliased_Macros HAL SD/MMC Aliased Macros maintained for legacy purpose + * @{ + */ + +#define SD_OCR_CID_CSD_OVERWRIETE SD_OCR_CID_CSD_OVERWRITE +#define SD_CMD_SD_APP_STAUS SD_CMD_SD_APP_STATUS + +#if !defined(STM32F1) && !defined(STM32F2) && !defined(STM32F4) && !defined(STM32L1) +#define eMMC_HIGH_VOLTAGE_RANGE EMMC_HIGH_VOLTAGE_RANGE +#define eMMC_DUAL_VOLTAGE_RANGE EMMC_DUAL_VOLTAGE_RANGE +#define eMMC_LOW_VOLTAGE_RANGE EMMC_LOW_VOLTAGE_RANGE + +#define SDMMC_NSpeed_CLK_DIV SDMMC_NSPEED_CLK_DIV +#define SDMMC_HSpeed_CLK_DIV SDMMC_HSPEED_CLK_DIV +#endif + +#if defined(STM32F4) || defined(STM32F2) +#define SD_SDMMC_DISABLED SD_SDIO_DISABLED +#define SD_SDMMC_FUNCTION_BUSY SD_SDIO_FUNCTION_BUSY +#define SD_SDMMC_FUNCTION_FAILED SD_SDIO_FUNCTION_FAILED +#define SD_SDMMC_UNKNOWN_FUNCTION SD_SDIO_UNKNOWN_FUNCTION +#define SD_CMD_SDMMC_SEN_OP_COND SD_CMD_SDIO_SEN_OP_COND +#define SD_CMD_SDMMC_RW_DIRECT SD_CMD_SDIO_RW_DIRECT +#define SD_CMD_SDMMC_RW_EXTENDED SD_CMD_SDIO_RW_EXTENDED +#define __HAL_SD_SDMMC_ENABLE __HAL_SD_SDIO_ENABLE +#define __HAL_SD_SDMMC_DISABLE __HAL_SD_SDIO_DISABLE +#define __HAL_SD_SDMMC_DMA_ENABLE __HAL_SD_SDIO_DMA_ENABLE +#define __HAL_SD_SDMMC_DMA_DISABLE __HAL_SD_SDIO_DMA_DISABL +#define __HAL_SD_SDMMC_ENABLE_IT __HAL_SD_SDIO_ENABLE_IT +#define __HAL_SD_SDMMC_DISABLE_IT __HAL_SD_SDIO_DISABLE_IT +#define __HAL_SD_SDMMC_GET_FLAG __HAL_SD_SDIO_GET_FLAG +#define __HAL_SD_SDMMC_CLEAR_FLAG __HAL_SD_SDIO_CLEAR_FLAG +#define __HAL_SD_SDMMC_GET_IT __HAL_SD_SDIO_GET_IT +#define __HAL_SD_SDMMC_CLEAR_IT __HAL_SD_SDIO_CLEAR_IT +#define SDMMC_STATIC_FLAGS SDIO_STATIC_FLAGS +#define SDMMC_CMD0TIMEOUT SDIO_CMD0TIMEOUT +#define SD_SDMMC_SEND_IF_COND SD_SDIO_SEND_IF_COND +/* alias CMSIS */ +#define SDMMC1_IRQn SDIO_IRQn +#define SDMMC1_IRQHandler SDIO_IRQHandler +#endif + +#if defined(STM32F7) || defined(STM32L4) +#define SD_SDIO_DISABLED SD_SDMMC_DISABLED +#define SD_SDIO_FUNCTION_BUSY SD_SDMMC_FUNCTION_BUSY +#define SD_SDIO_FUNCTION_FAILED SD_SDMMC_FUNCTION_FAILED +#define SD_SDIO_UNKNOWN_FUNCTION SD_SDMMC_UNKNOWN_FUNCTION +#define SD_CMD_SDIO_SEN_OP_COND SD_CMD_SDMMC_SEN_OP_COND +#define SD_CMD_SDIO_RW_DIRECT SD_CMD_SDMMC_RW_DIRECT +#define SD_CMD_SDIO_RW_EXTENDED SD_CMD_SDMMC_RW_EXTENDED +#define __HAL_SD_SDIO_ENABLE __HAL_SD_SDMMC_ENABLE +#define __HAL_SD_SDIO_DISABLE __HAL_SD_SDMMC_DISABLE +#define __HAL_SD_SDIO_DMA_ENABLE __HAL_SD_SDMMC_DMA_ENABLE +#define __HAL_SD_SDIO_DMA_DISABL __HAL_SD_SDMMC_DMA_DISABLE +#define __HAL_SD_SDIO_ENABLE_IT __HAL_SD_SDMMC_ENABLE_IT +#define __HAL_SD_SDIO_DISABLE_IT __HAL_SD_SDMMC_DISABLE_IT +#define __HAL_SD_SDIO_GET_FLAG __HAL_SD_SDMMC_GET_FLAG +#define __HAL_SD_SDIO_CLEAR_FLAG __HAL_SD_SDMMC_CLEAR_FLAG +#define __HAL_SD_SDIO_GET_IT __HAL_SD_SDMMC_GET_IT +#define __HAL_SD_SDIO_CLEAR_IT __HAL_SD_SDMMC_CLEAR_IT +#define SDIO_STATIC_FLAGS SDMMC_STATIC_FLAGS +#define SDIO_CMD0TIMEOUT SDMMC_CMD0TIMEOUT +#define SD_SDIO_SEND_IF_COND SD_SDMMC_SEND_IF_COND +/* alias CMSIS for compatibilities */ +#define SDIO_IRQn SDMMC1_IRQn +#define SDIO_IRQHandler SDMMC1_IRQHandler +#endif + +#if defined(STM32F7) || defined(STM32F4) || defined(STM32F2) || defined(STM32L4) || defined(STM32H7) +#define HAL_SD_CardCIDTypedef HAL_SD_CardCIDTypeDef +#define HAL_SD_CardCSDTypedef HAL_SD_CardCSDTypeDef +#define HAL_SD_CardStatusTypedef HAL_SD_CardStatusTypeDef +#define HAL_SD_CardStateTypedef HAL_SD_CardStateTypeDef +#endif + +#if defined(STM32H7) || defined(STM32L5) +#define HAL_MMCEx_Read_DMADoubleBuffer0CpltCallback HAL_MMCEx_Read_DMADoubleBuf0CpltCallback +#define HAL_MMCEx_Read_DMADoubleBuffer1CpltCallback HAL_MMCEx_Read_DMADoubleBuf1CpltCallback +#define HAL_MMCEx_Write_DMADoubleBuffer0CpltCallback HAL_MMCEx_Write_DMADoubleBuf0CpltCallback +#define HAL_MMCEx_Write_DMADoubleBuffer1CpltCallback HAL_MMCEx_Write_DMADoubleBuf1CpltCallback +#define HAL_SDEx_Read_DMADoubleBuffer0CpltCallback HAL_SDEx_Read_DMADoubleBuf0CpltCallback +#define HAL_SDEx_Read_DMADoubleBuffer1CpltCallback HAL_SDEx_Read_DMADoubleBuf1CpltCallback +#define HAL_SDEx_Write_DMADoubleBuffer0CpltCallback HAL_SDEx_Write_DMADoubleBuf0CpltCallback +#define HAL_SDEx_Write_DMADoubleBuffer1CpltCallback HAL_SDEx_Write_DMADoubleBuf1CpltCallback +#define HAL_SD_DriveTransciver_1_8V_Callback HAL_SD_DriveTransceiver_1_8V_Callback +#endif +/** + * @} + */ + +/** @defgroup HAL_SMARTCARD_Aliased_Macros HAL SMARTCARD Aliased Macros maintained for legacy purpose + * @{ + */ + +#define __SMARTCARD_ENABLE_IT __HAL_SMARTCARD_ENABLE_IT +#define __SMARTCARD_DISABLE_IT __HAL_SMARTCARD_DISABLE_IT +#define __SMARTCARD_ENABLE __HAL_SMARTCARD_ENABLE +#define __SMARTCARD_DISABLE __HAL_SMARTCARD_DISABLE +#define __SMARTCARD_DMA_REQUEST_ENABLE __HAL_SMARTCARD_DMA_REQUEST_ENABLE +#define __SMARTCARD_DMA_REQUEST_DISABLE __HAL_SMARTCARD_DMA_REQUEST_DISABLE + +#define __HAL_SMARTCARD_GETCLOCKSOURCE SMARTCARD_GETCLOCKSOURCE +#define __SMARTCARD_GETCLOCKSOURCE SMARTCARD_GETCLOCKSOURCE + +#define IS_SMARTCARD_ONEBIT_SAMPLING IS_SMARTCARD_ONE_BIT_SAMPLE + +/** + * @} + */ + +/** @defgroup HAL_SMBUS_Aliased_Macros HAL SMBUS Aliased Macros maintained for legacy purpose + * @{ + */ +#define __HAL_SMBUS_RESET_CR1 SMBUS_RESET_CR1 +#define __HAL_SMBUS_RESET_CR2 SMBUS_RESET_CR2 +#define __HAL_SMBUS_GENERATE_START SMBUS_GENERATE_START +#define __HAL_SMBUS_GET_ADDR_MATCH SMBUS_GET_ADDR_MATCH +#define __HAL_SMBUS_GET_DIR SMBUS_GET_DIR +#define __HAL_SMBUS_GET_STOP_MODE SMBUS_GET_STOP_MODE +#define __HAL_SMBUS_GET_PEC_MODE SMBUS_GET_PEC_MODE +#define __HAL_SMBUS_GET_ALERT_ENABLED SMBUS_GET_ALERT_ENABLED +/** + * @} + */ + +/** @defgroup HAL_SPI_Aliased_Macros HAL SPI Aliased Macros maintained for legacy purpose + * @{ + */ + +#define __HAL_SPI_1LINE_TX SPI_1LINE_TX +#define __HAL_SPI_1LINE_RX SPI_1LINE_RX +#define __HAL_SPI_RESET_CRC SPI_RESET_CRC + +/** + * @} + */ + +/** @defgroup HAL_UART_Aliased_Macros HAL UART Aliased Macros maintained for legacy purpose + * @{ + */ + +#define __HAL_UART_GETCLOCKSOURCE UART_GETCLOCKSOURCE +#define __HAL_UART_MASK_COMPUTATION UART_MASK_COMPUTATION +#define __UART_GETCLOCKSOURCE UART_GETCLOCKSOURCE +#define __UART_MASK_COMPUTATION UART_MASK_COMPUTATION + +#define IS_UART_WAKEUPMETHODE IS_UART_WAKEUPMETHOD + +#define IS_UART_ONEBIT_SAMPLE IS_UART_ONE_BIT_SAMPLE +#define IS_UART_ONEBIT_SAMPLING IS_UART_ONE_BIT_SAMPLE + +/** + * @} + */ + + +/** @defgroup HAL_USART_Aliased_Macros HAL USART Aliased Macros maintained for legacy purpose + * @{ + */ + +#define __USART_ENABLE_IT __HAL_USART_ENABLE_IT +#define __USART_DISABLE_IT __HAL_USART_DISABLE_IT +#define __USART_ENABLE __HAL_USART_ENABLE +#define __USART_DISABLE __HAL_USART_DISABLE + +#define __HAL_USART_GETCLOCKSOURCE USART_GETCLOCKSOURCE +#define __USART_GETCLOCKSOURCE USART_GETCLOCKSOURCE + +#if defined(STM32F0) || defined(STM32F3) || defined(STM32F7) +#define USART_OVERSAMPLING_16 0x00000000U +#define USART_OVERSAMPLING_8 USART_CR1_OVER8 + +#define IS_USART_OVERSAMPLING(__SAMPLING__) (((__SAMPLING__) == USART_OVERSAMPLING_16) || \ + ((__SAMPLING__) == USART_OVERSAMPLING_8)) +#endif /* STM32F0 || STM32F3 || STM32F7 */ +/** + * @} + */ + +/** @defgroup HAL_USB_Aliased_Macros HAL USB Aliased Macros maintained for legacy purpose + * @{ + */ +#define USB_EXTI_LINE_WAKEUP USB_WAKEUP_EXTI_LINE + +#define USB_FS_EXTI_TRIGGER_RISING_EDGE USB_OTG_FS_WAKEUP_EXTI_RISING_EDGE +#define USB_FS_EXTI_TRIGGER_FALLING_EDGE USB_OTG_FS_WAKEUP_EXTI_FALLING_EDGE +#define USB_FS_EXTI_TRIGGER_BOTH_EDGE USB_OTG_FS_WAKEUP_EXTI_RISING_FALLING_EDGE +#define USB_FS_EXTI_LINE_WAKEUP USB_OTG_FS_WAKEUP_EXTI_LINE + +#define USB_HS_EXTI_TRIGGER_RISING_EDGE USB_OTG_HS_WAKEUP_EXTI_RISING_EDGE +#define USB_HS_EXTI_TRIGGER_FALLING_EDGE USB_OTG_HS_WAKEUP_EXTI_FALLING_EDGE +#define USB_HS_EXTI_TRIGGER_BOTH_EDGE USB_OTG_HS_WAKEUP_EXTI_RISING_FALLING_EDGE +#define USB_HS_EXTI_LINE_WAKEUP USB_OTG_HS_WAKEUP_EXTI_LINE + +#define __HAL_USB_EXTI_ENABLE_IT __HAL_USB_WAKEUP_EXTI_ENABLE_IT +#define __HAL_USB_EXTI_DISABLE_IT __HAL_USB_WAKEUP_EXTI_DISABLE_IT +#define __HAL_USB_EXTI_GET_FLAG __HAL_USB_WAKEUP_EXTI_GET_FLAG +#define __HAL_USB_EXTI_CLEAR_FLAG __HAL_USB_WAKEUP_EXTI_CLEAR_FLAG +#define __HAL_USB_EXTI_SET_RISING_EDGE_TRIGGER __HAL_USB_WAKEUP_EXTI_ENABLE_RISING_EDGE +#define __HAL_USB_EXTI_SET_FALLING_EDGE_TRIGGER __HAL_USB_WAKEUP_EXTI_ENABLE_FALLING_EDGE +#define __HAL_USB_EXTI_SET_FALLINGRISING_TRIGGER __HAL_USB_WAKEUP_EXTI_ENABLE_RISING_FALLING_EDGE + +#define __HAL_USB_FS_EXTI_ENABLE_IT __HAL_USB_OTG_FS_WAKEUP_EXTI_ENABLE_IT +#define __HAL_USB_FS_EXTI_DISABLE_IT __HAL_USB_OTG_FS_WAKEUP_EXTI_DISABLE_IT +#define __HAL_USB_FS_EXTI_GET_FLAG __HAL_USB_OTG_FS_WAKEUP_EXTI_GET_FLAG +#define __HAL_USB_FS_EXTI_CLEAR_FLAG __HAL_USB_OTG_FS_WAKEUP_EXTI_CLEAR_FLAG +#define __HAL_USB_FS_EXTI_SET_RISING_EGDE_TRIGGER __HAL_USB_OTG_FS_WAKEUP_EXTI_ENABLE_RISING_EDGE +#define __HAL_USB_FS_EXTI_SET_FALLING_EGDE_TRIGGER __HAL_USB_OTG_FS_WAKEUP_EXTI_ENABLE_FALLING_EDGE +#define __HAL_USB_FS_EXTI_SET_FALLINGRISING_TRIGGER __HAL_USB_OTG_FS_WAKEUP_EXTI_ENABLE_RISING_FALLING_EDGE +#define __HAL_USB_FS_EXTI_GENERATE_SWIT __HAL_USB_OTG_FS_WAKEUP_EXTI_GENERATE_SWIT + +#define __HAL_USB_HS_EXTI_ENABLE_IT __HAL_USB_OTG_HS_WAKEUP_EXTI_ENABLE_IT +#define __HAL_USB_HS_EXTI_DISABLE_IT __HAL_USB_OTG_HS_WAKEUP_EXTI_DISABLE_IT +#define __HAL_USB_HS_EXTI_GET_FLAG __HAL_USB_OTG_HS_WAKEUP_EXTI_GET_FLAG +#define __HAL_USB_HS_EXTI_CLEAR_FLAG __HAL_USB_OTG_HS_WAKEUP_EXTI_CLEAR_FLAG +#define __HAL_USB_HS_EXTI_SET_RISING_EGDE_TRIGGER __HAL_USB_OTG_HS_WAKEUP_EXTI_ENABLE_RISING_EDGE +#define __HAL_USB_HS_EXTI_SET_FALLING_EGDE_TRIGGER __HAL_USB_OTG_HS_WAKEUP_EXTI_ENABLE_FALLING_EDGE +#define __HAL_USB_HS_EXTI_SET_FALLINGRISING_TRIGGER __HAL_USB_OTG_HS_WAKEUP_EXTI_ENABLE_RISING_FALLING_EDGE +#define __HAL_USB_HS_EXTI_GENERATE_SWIT __HAL_USB_OTG_HS_WAKEUP_EXTI_GENERATE_SWIT + +#define HAL_PCD_ActiveRemoteWakeup HAL_PCD_ActivateRemoteWakeup +#define HAL_PCD_DeActiveRemoteWakeup HAL_PCD_DeActivateRemoteWakeup + +#define HAL_PCD_SetTxFiFo HAL_PCDEx_SetTxFiFo +#define HAL_PCD_SetRxFiFo HAL_PCDEx_SetRxFiFo +/** + * @} + */ + +/** @defgroup HAL_TIM_Aliased_Macros HAL TIM Aliased Macros maintained for legacy purpose + * @{ + */ +#define __HAL_TIM_SetICPrescalerValue TIM_SET_ICPRESCALERVALUE +#define __HAL_TIM_ResetICPrescalerValue TIM_RESET_ICPRESCALERVALUE + +#define TIM_GET_ITSTATUS __HAL_TIM_GET_IT_SOURCE +#define TIM_GET_CLEAR_IT __HAL_TIM_CLEAR_IT + +#define __HAL_TIM_GET_ITSTATUS __HAL_TIM_GET_IT_SOURCE + +#define __HAL_TIM_DIRECTION_STATUS __HAL_TIM_IS_TIM_COUNTING_DOWN +#define __HAL_TIM_PRESCALER __HAL_TIM_SET_PRESCALER +#define __HAL_TIM_SetCounter __HAL_TIM_SET_COUNTER +#define __HAL_TIM_GetCounter __HAL_TIM_GET_COUNTER +#define __HAL_TIM_SetAutoreload __HAL_TIM_SET_AUTORELOAD +#define __HAL_TIM_GetAutoreload __HAL_TIM_GET_AUTORELOAD +#define __HAL_TIM_SetClockDivision __HAL_TIM_SET_CLOCKDIVISION +#define __HAL_TIM_GetClockDivision __HAL_TIM_GET_CLOCKDIVISION +#define __HAL_TIM_SetICPrescaler __HAL_TIM_SET_ICPRESCALER +#define __HAL_TIM_GetICPrescaler __HAL_TIM_GET_ICPRESCALER +#define __HAL_TIM_SetCompare __HAL_TIM_SET_COMPARE +#define __HAL_TIM_GetCompare __HAL_TIM_GET_COMPARE + +#define TIM_BREAKINPUTSOURCE_DFSDM TIM_BREAKINPUTSOURCE_DFSDM1 + +#define TIM_OCMODE_ASSYMETRIC_PWM1 TIM_OCMODE_ASYMMETRIC_PWM1 +#define TIM_OCMODE_ASSYMETRIC_PWM2 TIM_OCMODE_ASYMMETRIC_PWM2 +/** + * @} + */ + +/** @defgroup HAL_ETH_Aliased_Macros HAL ETH Aliased Macros maintained for legacy purpose + * @{ + */ + +#define __HAL_ETH_EXTI_ENABLE_IT __HAL_ETH_WAKEUP_EXTI_ENABLE_IT +#define __HAL_ETH_EXTI_DISABLE_IT __HAL_ETH_WAKEUP_EXTI_DISABLE_IT +#define __HAL_ETH_EXTI_GET_FLAG __HAL_ETH_WAKEUP_EXTI_GET_FLAG +#define __HAL_ETH_EXTI_CLEAR_FLAG __HAL_ETH_WAKEUP_EXTI_CLEAR_FLAG +#define __HAL_ETH_EXTI_SET_RISING_EGDE_TRIGGER __HAL_ETH_WAKEUP_EXTI_ENABLE_RISING_EDGE_TRIGGER +#define __HAL_ETH_EXTI_SET_FALLING_EGDE_TRIGGER __HAL_ETH_WAKEUP_EXTI_ENABLE_FALLING_EDGE_TRIGGER +#define __HAL_ETH_EXTI_SET_FALLINGRISING_TRIGGER __HAL_ETH_WAKEUP_EXTI_ENABLE_FALLINGRISING_TRIGGER + +#define ETH_PROMISCIOUSMODE_ENABLE ETH_PROMISCUOUS_MODE_ENABLE +#define ETH_PROMISCIOUSMODE_DISABLE ETH_PROMISCUOUS_MODE_DISABLE +#define IS_ETH_PROMISCIOUS_MODE IS_ETH_PROMISCUOUS_MODE +/** + * @} + */ + +/** @defgroup HAL_LTDC_Aliased_Macros HAL LTDC Aliased Macros maintained for legacy purpose + * @{ + */ +#define __HAL_LTDC_LAYER LTDC_LAYER +#define __HAL_LTDC_RELOAD_CONFIG __HAL_LTDC_RELOAD_IMMEDIATE_CONFIG +/** + * @} + */ + +/** @defgroup HAL_SAI_Aliased_Macros HAL SAI Aliased Macros maintained for legacy purpose + * @{ + */ +#define SAI_OUTPUTDRIVE_DISABLED SAI_OUTPUTDRIVE_DISABLE +#define SAI_OUTPUTDRIVE_ENABLED SAI_OUTPUTDRIVE_ENABLE +#define SAI_MASTERDIVIDER_ENABLED SAI_MASTERDIVIDER_ENABLE +#define SAI_MASTERDIVIDER_DISABLED SAI_MASTERDIVIDER_DISABLE +#define SAI_STREOMODE SAI_STEREOMODE +#define SAI_FIFOStatus_Empty SAI_FIFOSTATUS_EMPTY +#define SAI_FIFOStatus_Less1QuarterFull SAI_FIFOSTATUS_LESS1QUARTERFULL +#define SAI_FIFOStatus_1QuarterFull SAI_FIFOSTATUS_1QUARTERFULL +#define SAI_FIFOStatus_HalfFull SAI_FIFOSTATUS_HALFFULL +#define SAI_FIFOStatus_3QuartersFull SAI_FIFOSTATUS_3QUARTERFULL +#define SAI_FIFOStatus_Full SAI_FIFOSTATUS_FULL +#define IS_SAI_BLOCK_MONO_STREO_MODE IS_SAI_BLOCK_MONO_STEREO_MODE +#define SAI_SYNCHRONOUS_EXT SAI_SYNCHRONOUS_EXT_SAI1 +#define SAI_SYNCEXT_IN_ENABLE SAI_SYNCEXT_OUTBLOCKA_ENABLE +/** + * @} + */ + +/** @defgroup HAL_SPDIFRX_Aliased_Macros HAL SPDIFRX Aliased Macros maintained for legacy purpose + * @{ + */ +#if defined(STM32H7) +#define HAL_SPDIFRX_ReceiveControlFlow HAL_SPDIFRX_ReceiveCtrlFlow +#define HAL_SPDIFRX_ReceiveControlFlow_IT HAL_SPDIFRX_ReceiveCtrlFlow_IT +#define HAL_SPDIFRX_ReceiveControlFlow_DMA HAL_SPDIFRX_ReceiveCtrlFlow_DMA +#endif +/** + * @} + */ + +/** @defgroup HAL_HRTIM_Aliased_Functions HAL HRTIM Aliased Functions maintained for legacy purpose + * @{ + */ +#if defined (STM32H7) || defined (STM32G4) || defined (STM32F3) +#define HAL_HRTIM_WaveformCounterStart_IT HAL_HRTIM_WaveformCountStart_IT +#define HAL_HRTIM_WaveformCounterStart_DMA HAL_HRTIM_WaveformCountStart_DMA +#define HAL_HRTIM_WaveformCounterStart HAL_HRTIM_WaveformCountStart +#define HAL_HRTIM_WaveformCounterStop_IT HAL_HRTIM_WaveformCountStop_IT +#define HAL_HRTIM_WaveformCounterStop_DMA HAL_HRTIM_WaveformCountStop_DMA +#define HAL_HRTIM_WaveformCounterStop HAL_HRTIM_WaveformCountStop +#endif +/** + * @} + */ + +/** @defgroup HAL_QSPI_Aliased_Macros HAL QSPI Aliased Macros maintained for legacy purpose + * @{ + */ +#if defined (STM32L4) || defined (STM32F4) || defined (STM32F7) || defined(STM32H7) +#define HAL_QPSI_TIMEOUT_DEFAULT_VALUE HAL_QSPI_TIMEOUT_DEFAULT_VALUE +#endif /* STM32L4 || STM32F4 || STM32F7 */ +/** + * @} + */ + +/** @defgroup HAL_Generic_Aliased_Macros HAL Generic Aliased Macros maintained for legacy purpose + * @{ + */ +#if defined (STM32F7) +#define ART_ACCLERATOR_ENABLE ART_ACCELERATOR_ENABLE +#endif /* STM32F7 */ +/** + * @} + */ + +/** @defgroup HAL_PPP_Aliased_Macros HAL PPP Aliased Macros maintained for legacy purpose + * @{ + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* STM32_HAL_LEGACY */ + + diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal.h new file mode 100644 index 0000000..f7eb847 --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal.h @@ -0,0 +1,297 @@ +/** + ****************************************************************************** + * @file stm32f4xx_hal.h + * @author MCD Application Team + * @brief This file contains all the functions prototypes for the HAL + * module driver. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F4xx_HAL_H +#define __STM32F4xx_HAL_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx_hal_conf.h" + +/** @addtogroup STM32F4xx_HAL_Driver + * @{ + */ + +/** @addtogroup HAL + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/* Exported constants --------------------------------------------------------*/ + +/** @defgroup HAL_Exported_Constants HAL Exported Constants + * @{ + */ + +/** @defgroup HAL_TICK_FREQ Tick Frequency + * @{ + */ +typedef enum +{ + HAL_TICK_FREQ_10HZ = 100U, + HAL_TICK_FREQ_100HZ = 10U, + HAL_TICK_FREQ_1KHZ = 1U, + HAL_TICK_FREQ_DEFAULT = HAL_TICK_FREQ_1KHZ +} HAL_TickFreqTypeDef; +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup HAL_Exported_Macros HAL Exported Macros + * @{ + */ + +/** @brief Freeze/Unfreeze Peripherals in Debug mode + */ +#define __HAL_DBGMCU_FREEZE_TIM2() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_TIM2_STOP)) +#define __HAL_DBGMCU_FREEZE_TIM3() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_TIM3_STOP)) +#define __HAL_DBGMCU_FREEZE_TIM4() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_TIM4_STOP)) +#define __HAL_DBGMCU_FREEZE_TIM5() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_TIM5_STOP)) +#define __HAL_DBGMCU_FREEZE_TIM6() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_TIM6_STOP)) +#define __HAL_DBGMCU_FREEZE_TIM7() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_TIM7_STOP)) +#define __HAL_DBGMCU_FREEZE_TIM12() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_TIM12_STOP)) +#define __HAL_DBGMCU_FREEZE_TIM13() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_TIM13_STOP)) +#define __HAL_DBGMCU_FREEZE_TIM14() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_TIM14_STOP)) +#define __HAL_DBGMCU_FREEZE_RTC() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_RTC_STOP)) +#define __HAL_DBGMCU_FREEZE_WWDG() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_WWDG_STOP)) +#define __HAL_DBGMCU_FREEZE_IWDG() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_IWDG_STOP)) +#define __HAL_DBGMCU_FREEZE_I2C1_TIMEOUT() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_I2C1_SMBUS_TIMEOUT)) +#define __HAL_DBGMCU_FREEZE_I2C2_TIMEOUT() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_I2C2_SMBUS_TIMEOUT)) +#define __HAL_DBGMCU_FREEZE_I2C3_TIMEOUT() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_I2C3_SMBUS_TIMEOUT)) +#define __HAL_DBGMCU_FREEZE_CAN1() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_CAN1_STOP)) +#define __HAL_DBGMCU_FREEZE_CAN2() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_CAN2_STOP)) +#define __HAL_DBGMCU_FREEZE_TIM1() (DBGMCU->APB2FZ |= (DBGMCU_APB2_FZ_DBG_TIM1_STOP)) +#define __HAL_DBGMCU_FREEZE_TIM8() (DBGMCU->APB2FZ |= (DBGMCU_APB2_FZ_DBG_TIM8_STOP)) +#define __HAL_DBGMCU_FREEZE_TIM9() (DBGMCU->APB2FZ |= (DBGMCU_APB2_FZ_DBG_TIM9_STOP)) +#define __HAL_DBGMCU_FREEZE_TIM10() (DBGMCU->APB2FZ |= (DBGMCU_APB2_FZ_DBG_TIM10_STOP)) +#define __HAL_DBGMCU_FREEZE_TIM11() (DBGMCU->APB2FZ |= (DBGMCU_APB2_FZ_DBG_TIM11_STOP)) + +#define __HAL_DBGMCU_UNFREEZE_TIM2() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_TIM2_STOP)) +#define __HAL_DBGMCU_UNFREEZE_TIM3() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_TIM3_STOP)) +#define __HAL_DBGMCU_UNFREEZE_TIM4() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_TIM4_STOP)) +#define __HAL_DBGMCU_UNFREEZE_TIM5() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_TIM5_STOP)) +#define __HAL_DBGMCU_UNFREEZE_TIM6() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_TIM6_STOP)) +#define __HAL_DBGMCU_UNFREEZE_TIM7() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_TIM7_STOP)) +#define __HAL_DBGMCU_UNFREEZE_TIM12() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_TIM12_STOP)) +#define __HAL_DBGMCU_UNFREEZE_TIM13() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_TIM13_STOP)) +#define __HAL_DBGMCU_UNFREEZE_TIM14() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_TIM14_STOP)) +#define __HAL_DBGMCU_UNFREEZE_RTC() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_RTC_STOP)) +#define __HAL_DBGMCU_UNFREEZE_WWDG() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_WWDG_STOP)) +#define __HAL_DBGMCU_UNFREEZE_IWDG() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_IWDG_STOP)) +#define __HAL_DBGMCU_UNFREEZE_I2C1_TIMEOUT() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_I2C1_SMBUS_TIMEOUT)) +#define __HAL_DBGMCU_UNFREEZE_I2C2_TIMEOUT() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_I2C2_SMBUS_TIMEOUT)) +#define __HAL_DBGMCU_UNFREEZE_I2C3_TIMEOUT() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_I2C3_SMBUS_TIMEOUT)) +#define __HAL_DBGMCU_UNFREEZE_CAN1() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_CAN1_STOP)) +#define __HAL_DBGMCU_UNFREEZE_CAN2() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_CAN2_STOP)) +#define __HAL_DBGMCU_UNFREEZE_TIM1() (DBGMCU->APB2FZ &= ~(DBGMCU_APB2_FZ_DBG_TIM1_STOP)) +#define __HAL_DBGMCU_UNFREEZE_TIM8() (DBGMCU->APB2FZ &= ~(DBGMCU_APB2_FZ_DBG_TIM8_STOP)) +#define __HAL_DBGMCU_UNFREEZE_TIM9() (DBGMCU->APB2FZ &= ~(DBGMCU_APB2_FZ_DBG_TIM9_STOP)) +#define __HAL_DBGMCU_UNFREEZE_TIM10() (DBGMCU->APB2FZ &= ~(DBGMCU_APB2_FZ_DBG_TIM10_STOP)) +#define __HAL_DBGMCU_UNFREEZE_TIM11() (DBGMCU->APB2FZ &= ~(DBGMCU_APB2_FZ_DBG_TIM11_STOP)) + +/** @brief Main Flash memory mapped at 0x00000000 + */ +#define __HAL_SYSCFG_REMAPMEMORY_FLASH() (SYSCFG->MEMRMP &= ~(SYSCFG_MEMRMP_MEM_MODE)) + +/** @brief System Flash memory mapped at 0x00000000 + */ +#define __HAL_SYSCFG_REMAPMEMORY_SYSTEMFLASH() do {SYSCFG->MEMRMP &= ~(SYSCFG_MEMRMP_MEM_MODE);\ + SYSCFG->MEMRMP |= SYSCFG_MEMRMP_MEM_MODE_0;\ + }while(0); + +/** @brief Embedded SRAM mapped at 0x00000000 + */ +#define __HAL_SYSCFG_REMAPMEMORY_SRAM() do {SYSCFG->MEMRMP &= ~(SYSCFG_MEMRMP_MEM_MODE);\ + SYSCFG->MEMRMP |= (SYSCFG_MEMRMP_MEM_MODE_0 | SYSCFG_MEMRMP_MEM_MODE_1);\ + }while(0); + +#if defined(STM32F405xx) || defined(STM32F415xx) || defined(STM32F407xx)|| defined(STM32F417xx) +/** @brief FSMC Bank1 (NOR/PSRAM 1 and 2) mapped at 0x00000000 + */ +#define __HAL_SYSCFG_REMAPMEMORY_FSMC() do {SYSCFG->MEMRMP &= ~(SYSCFG_MEMRMP_MEM_MODE);\ + SYSCFG->MEMRMP |= (SYSCFG_MEMRMP_MEM_MODE_1);\ + }while(0); +#endif /* STM32F405xx || STM32F415xx || STM32F407xx || STM32F417xx */ + +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx)|| defined(STM32F439xx) ||\ + defined(STM32F469xx) || defined(STM32F479xx) +/** @brief FMC Bank1 (NOR/PSRAM 1 and 2) mapped at 0x00000000 + */ +#define __HAL_SYSCFG_REMAPMEMORY_FMC() do {SYSCFG->MEMRMP &= ~(SYSCFG_MEMRMP_MEM_MODE);\ + SYSCFG->MEMRMP |= (SYSCFG_MEMRMP_MEM_MODE_1);\ + }while(0); + +/** @brief FMC/SDRAM Bank 1 and 2 mapped at 0x00000000 + */ +#define __HAL_SYSCFG_REMAPMEMORY_FMC_SDRAM() do {SYSCFG->MEMRMP &= ~(SYSCFG_MEMRMP_MEM_MODE);\ + SYSCFG->MEMRMP |= (SYSCFG_MEMRMP_MEM_MODE_2);\ + }while(0); +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F469xx || STM32F479xx */ + +#if defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) || defined(STM32F413xx) || defined(STM32F423xx) +/** @defgroup Cortex_Lockup_Enable Cortex Lockup Enable + * @{ + */ +/** @brief SYSCFG Break Lockup lock + * Enables and locks the connection of Cortex-M4 LOCKUP (Hardfault) output to TIM1/8 input + * @note The selected configuration is locked and can be unlocked by system reset + */ +#define __HAL_SYSCFG_BREAK_PVD_LOCK() do {SYSCFG->CFGR2 &= ~(SYSCFG_CFGR2_PVD_LOCK); \ + SYSCFG->CFGR2 |= SYSCFG_CFGR2_PVD_LOCK; \ + }while(0) +/** + * @} + */ + +/** @defgroup PVD_Lock_Enable PVD Lock + * @{ + */ +/** @brief SYSCFG Break PVD lock + * Enables and locks the PVD connection with Timer1/8 Break Input, , as well as the PVDE and PLS[2:0] in the PWR_CR register + * @note The selected configuration is locked and can be unlocked by system reset + */ +#define __HAL_SYSCFG_BREAK_LOCKUP_LOCK() do {SYSCFG->CFGR2 &= ~(SYSCFG_CFGR2_LOCKUP_LOCK); \ + SYSCFG->CFGR2 |= SYSCFG_CFGR2_LOCKUP_LOCK; \ + }while(0) +/** + * @} + */ +#endif /* STM32F410Tx || STM32F410Cx || STM32F410Rx || STM32F413xx || STM32F423xx */ +/** + * @} + */ + +/** @defgroup HAL_Private_Macros HAL Private Macros + * @{ + */ +#define IS_TICKFREQ(FREQ) (((FREQ) == HAL_TICK_FREQ_10HZ) || \ + ((FREQ) == HAL_TICK_FREQ_100HZ) || \ + ((FREQ) == HAL_TICK_FREQ_1KHZ)) +/** + * @} + */ + +/* Exported variables --------------------------------------------------------*/ + +/** @addtogroup HAL_Exported_Variables + * @{ + */ +extern __IO uint32_t uwTick; +extern uint32_t uwTickPrio; +extern HAL_TickFreqTypeDef uwTickFreq; +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup HAL_Exported_Functions + * @{ + */ +/** @addtogroup HAL_Exported_Functions_Group1 + * @{ + */ +/* Initialization and Configuration functions ******************************/ +HAL_StatusTypeDef HAL_Init(void); +HAL_StatusTypeDef HAL_DeInit(void); +void HAL_MspInit(void); +void HAL_MspDeInit(void); +HAL_StatusTypeDef HAL_InitTick (uint32_t TickPriority); +/** + * @} + */ + +/** @addtogroup HAL_Exported_Functions_Group2 + * @{ + */ +/* Peripheral Control functions ************************************************/ +void HAL_IncTick(void); +void HAL_Delay(uint32_t Delay); +uint32_t HAL_GetTick(void); +uint32_t HAL_GetTickPrio(void); +HAL_StatusTypeDef HAL_SetTickFreq(HAL_TickFreqTypeDef Freq); +HAL_TickFreqTypeDef HAL_GetTickFreq(void); +void HAL_SuspendTick(void); +void HAL_ResumeTick(void); +uint32_t HAL_GetHalVersion(void); +uint32_t HAL_GetREVID(void); +uint32_t HAL_GetDEVID(void); +void HAL_DBGMCU_EnableDBGSleepMode(void); +void HAL_DBGMCU_DisableDBGSleepMode(void); +void HAL_DBGMCU_EnableDBGStopMode(void); +void HAL_DBGMCU_DisableDBGStopMode(void); +void HAL_DBGMCU_EnableDBGStandbyMode(void); +void HAL_DBGMCU_DisableDBGStandbyMode(void); +void HAL_EnableCompensationCell(void); +void HAL_DisableCompensationCell(void); +uint32_t HAL_GetUIDw0(void); +uint32_t HAL_GetUIDw1(void); +uint32_t HAL_GetUIDw2(void); +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx)|| defined(STM32F439xx) ||\ + defined(STM32F469xx) || defined(STM32F479xx) +void HAL_EnableMemorySwappingBank(void); +void HAL_DisableMemorySwappingBank(void); +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F469xx || STM32F479xx */ +/** + * @} + */ + +/** + * @} + */ +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/** @defgroup HAL_Private_Variables HAL Private Variables + * @{ + */ +/** + * @} + */ +/* Private constants ---------------------------------------------------------*/ +/** @defgroup HAL_Private_Constants HAL Private Constants + * @{ + */ +/** + * @} + */ +/* Private macros ------------------------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F4xx_HAL_H */ + + diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_cortex.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_cortex.h new file mode 100644 index 0000000..7690930 --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_cortex.h @@ -0,0 +1,410 @@ +/** + ****************************************************************************** + * @file stm32f4xx_hal_cortex.h + * @author MCD Application Team + * @brief Header file of CORTEX HAL module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F4xx_HAL_CORTEX_H +#define __STM32F4xx_HAL_CORTEX_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx_hal_def.h" + +/** @addtogroup STM32F4xx_HAL_Driver + * @{ + */ + +/** @addtogroup CORTEX + * @{ + */ +/* Exported types ------------------------------------------------------------*/ +/** @defgroup CORTEX_Exported_Types Cortex Exported Types + * @{ + */ + +#if (__MPU_PRESENT == 1U) +/** @defgroup CORTEX_MPU_Region_Initialization_Structure_definition MPU Region Initialization Structure Definition + * @brief MPU Region initialization structure + * @{ + */ +typedef struct +{ + uint8_t Enable; /*!< Specifies the status of the region. + This parameter can be a value of @ref CORTEX_MPU_Region_Enable */ + uint8_t Number; /*!< Specifies the number of the region to protect. + This parameter can be a value of @ref CORTEX_MPU_Region_Number */ + uint32_t BaseAddress; /*!< Specifies the base address of the region to protect. */ + uint8_t Size; /*!< Specifies the size of the region to protect. + This parameter can be a value of @ref CORTEX_MPU_Region_Size */ + uint8_t SubRegionDisable; /*!< Specifies the number of the subregion protection to disable. + This parameter must be a number between Min_Data = 0x00 and Max_Data = 0xFF */ + uint8_t TypeExtField; /*!< Specifies the TEX field level. + This parameter can be a value of @ref CORTEX_MPU_TEX_Levels */ + uint8_t AccessPermission; /*!< Specifies the region access permission type. + This parameter can be a value of @ref CORTEX_MPU_Region_Permission_Attributes */ + uint8_t DisableExec; /*!< Specifies the instruction access status. + This parameter can be a value of @ref CORTEX_MPU_Instruction_Access */ + uint8_t IsShareable; /*!< Specifies the shareability status of the protected region. + This parameter can be a value of @ref CORTEX_MPU_Access_Shareable */ + uint8_t IsCacheable; /*!< Specifies the cacheable status of the region protected. + This parameter can be a value of @ref CORTEX_MPU_Access_Cacheable */ + uint8_t IsBufferable; /*!< Specifies the bufferable status of the protected region. + This parameter can be a value of @ref CORTEX_MPU_Access_Bufferable */ +}MPU_Region_InitTypeDef; +/** + * @} + */ +#endif /* __MPU_PRESENT */ + +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ + +/** @defgroup CORTEX_Exported_Constants CORTEX Exported Constants + * @{ + */ + +/** @defgroup CORTEX_Preemption_Priority_Group CORTEX Preemption Priority Group + * @{ + */ +#define NVIC_PRIORITYGROUP_0 0x00000007U /*!< 0 bits for pre-emption priority + 4 bits for subpriority */ +#define NVIC_PRIORITYGROUP_1 0x00000006U /*!< 1 bits for pre-emption priority + 3 bits for subpriority */ +#define NVIC_PRIORITYGROUP_2 0x00000005U /*!< 2 bits for pre-emption priority + 2 bits for subpriority */ +#define NVIC_PRIORITYGROUP_3 0x00000004U /*!< 3 bits for pre-emption priority + 1 bits for subpriority */ +#define NVIC_PRIORITYGROUP_4 0x00000003U /*!< 4 bits for pre-emption priority + 0 bits for subpriority */ +/** + * @} + */ + +/** @defgroup CORTEX_SysTick_clock_source CORTEX _SysTick clock source + * @{ + */ +#define SYSTICK_CLKSOURCE_HCLK_DIV8 0x00000000U +#define SYSTICK_CLKSOURCE_HCLK 0x00000004U + +/** + * @} + */ + +#if (__MPU_PRESENT == 1) +/** @defgroup CORTEX_MPU_HFNMI_PRIVDEF_Control MPU HFNMI and PRIVILEGED Access control + * @{ + */ +#define MPU_HFNMI_PRIVDEF_NONE 0x00000000U +#define MPU_HARDFAULT_NMI MPU_CTRL_HFNMIENA_Msk +#define MPU_PRIVILEGED_DEFAULT MPU_CTRL_PRIVDEFENA_Msk +#define MPU_HFNMI_PRIVDEF (MPU_CTRL_HFNMIENA_Msk | MPU_CTRL_PRIVDEFENA_Msk) + +/** + * @} + */ + +/** @defgroup CORTEX_MPU_Region_Enable CORTEX MPU Region Enable + * @{ + */ +#define MPU_REGION_ENABLE ((uint8_t)0x01) +#define MPU_REGION_DISABLE ((uint8_t)0x00) +/** + * @} + */ + +/** @defgroup CORTEX_MPU_Instruction_Access CORTEX MPU Instruction Access + * @{ + */ +#define MPU_INSTRUCTION_ACCESS_ENABLE ((uint8_t)0x00) +#define MPU_INSTRUCTION_ACCESS_DISABLE ((uint8_t)0x01) +/** + * @} + */ + +/** @defgroup CORTEX_MPU_Access_Shareable CORTEX MPU Instruction Access Shareable + * @{ + */ +#define MPU_ACCESS_SHAREABLE ((uint8_t)0x01) +#define MPU_ACCESS_NOT_SHAREABLE ((uint8_t)0x00) +/** + * @} + */ + +/** @defgroup CORTEX_MPU_Access_Cacheable CORTEX MPU Instruction Access Cacheable + * @{ + */ +#define MPU_ACCESS_CACHEABLE ((uint8_t)0x01) +#define MPU_ACCESS_NOT_CACHEABLE ((uint8_t)0x00) +/** + * @} + */ + +/** @defgroup CORTEX_MPU_Access_Bufferable CORTEX MPU Instruction Access Bufferable + * @{ + */ +#define MPU_ACCESS_BUFFERABLE ((uint8_t)0x01) +#define MPU_ACCESS_NOT_BUFFERABLE ((uint8_t)0x00) +/** + * @} + */ + +/** @defgroup CORTEX_MPU_TEX_Levels MPU TEX Levels + * @{ + */ +#define MPU_TEX_LEVEL0 ((uint8_t)0x00) +#define MPU_TEX_LEVEL1 ((uint8_t)0x01) +#define MPU_TEX_LEVEL2 ((uint8_t)0x02) +/** + * @} + */ + +/** @defgroup CORTEX_MPU_Region_Size CORTEX MPU Region Size + * @{ + */ +#define MPU_REGION_SIZE_32B ((uint8_t)0x04) +#define MPU_REGION_SIZE_64B ((uint8_t)0x05) +#define MPU_REGION_SIZE_128B ((uint8_t)0x06) +#define MPU_REGION_SIZE_256B ((uint8_t)0x07) +#define MPU_REGION_SIZE_512B ((uint8_t)0x08) +#define MPU_REGION_SIZE_1KB ((uint8_t)0x09) +#define MPU_REGION_SIZE_2KB ((uint8_t)0x0A) +#define MPU_REGION_SIZE_4KB ((uint8_t)0x0B) +#define MPU_REGION_SIZE_8KB ((uint8_t)0x0C) +#define MPU_REGION_SIZE_16KB ((uint8_t)0x0D) +#define MPU_REGION_SIZE_32KB ((uint8_t)0x0E) +#define MPU_REGION_SIZE_64KB ((uint8_t)0x0F) +#define MPU_REGION_SIZE_128KB ((uint8_t)0x10) +#define MPU_REGION_SIZE_256KB ((uint8_t)0x11) +#define MPU_REGION_SIZE_512KB ((uint8_t)0x12) +#define MPU_REGION_SIZE_1MB ((uint8_t)0x13) +#define MPU_REGION_SIZE_2MB ((uint8_t)0x14) +#define MPU_REGION_SIZE_4MB ((uint8_t)0x15) +#define MPU_REGION_SIZE_8MB ((uint8_t)0x16) +#define MPU_REGION_SIZE_16MB ((uint8_t)0x17) +#define MPU_REGION_SIZE_32MB ((uint8_t)0x18) +#define MPU_REGION_SIZE_64MB ((uint8_t)0x19) +#define MPU_REGION_SIZE_128MB ((uint8_t)0x1A) +#define MPU_REGION_SIZE_256MB ((uint8_t)0x1B) +#define MPU_REGION_SIZE_512MB ((uint8_t)0x1C) +#define MPU_REGION_SIZE_1GB ((uint8_t)0x1D) +#define MPU_REGION_SIZE_2GB ((uint8_t)0x1E) +#define MPU_REGION_SIZE_4GB ((uint8_t)0x1F) +/** + * @} + */ + +/** @defgroup CORTEX_MPU_Region_Permission_Attributes CORTEX MPU Region Permission Attributes + * @{ + */ +#define MPU_REGION_NO_ACCESS ((uint8_t)0x00) +#define MPU_REGION_PRIV_RW ((uint8_t)0x01) +#define MPU_REGION_PRIV_RW_URO ((uint8_t)0x02) +#define MPU_REGION_FULL_ACCESS ((uint8_t)0x03) +#define MPU_REGION_PRIV_RO ((uint8_t)0x05) +#define MPU_REGION_PRIV_RO_URO ((uint8_t)0x06) +/** + * @} + */ + +/** @defgroup CORTEX_MPU_Region_Number CORTEX MPU Region Number + * @{ + */ +#define MPU_REGION_NUMBER0 ((uint8_t)0x00) +#define MPU_REGION_NUMBER1 ((uint8_t)0x01) +#define MPU_REGION_NUMBER2 ((uint8_t)0x02) +#define MPU_REGION_NUMBER3 ((uint8_t)0x03) +#define MPU_REGION_NUMBER4 ((uint8_t)0x04) +#define MPU_REGION_NUMBER5 ((uint8_t)0x05) +#define MPU_REGION_NUMBER6 ((uint8_t)0x06) +#define MPU_REGION_NUMBER7 ((uint8_t)0x07) +/** + * @} + */ +#endif /* __MPU_PRESENT */ + +/** + * @} + */ + + +/* Exported Macros -----------------------------------------------------------*/ + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup CORTEX_Exported_Functions + * @{ + */ + +/** @addtogroup CORTEX_Exported_Functions_Group1 + * @{ + */ +/* Initialization and de-initialization functions *****************************/ +void HAL_NVIC_SetPriorityGrouping(uint32_t PriorityGroup); +void HAL_NVIC_SetPriority(IRQn_Type IRQn, uint32_t PreemptPriority, uint32_t SubPriority); +void HAL_NVIC_EnableIRQ(IRQn_Type IRQn); +void HAL_NVIC_DisableIRQ(IRQn_Type IRQn); +void HAL_NVIC_SystemReset(void); +uint32_t HAL_SYSTICK_Config(uint32_t TicksNumb); +/** + * @} + */ + +/** @addtogroup CORTEX_Exported_Functions_Group2 + * @{ + */ +/* Peripheral Control functions ***********************************************/ +uint32_t HAL_NVIC_GetPriorityGrouping(void); +void HAL_NVIC_GetPriority(IRQn_Type IRQn, uint32_t PriorityGroup, uint32_t* pPreemptPriority, uint32_t* pSubPriority); +uint32_t HAL_NVIC_GetPendingIRQ(IRQn_Type IRQn); +void HAL_NVIC_SetPendingIRQ(IRQn_Type IRQn); +void HAL_NVIC_ClearPendingIRQ(IRQn_Type IRQn); +uint32_t HAL_NVIC_GetActive(IRQn_Type IRQn); +void HAL_SYSTICK_CLKSourceConfig(uint32_t CLKSource); +void HAL_SYSTICK_IRQHandler(void); +void HAL_SYSTICK_Callback(void); + +#if (__MPU_PRESENT == 1U) +void HAL_MPU_Enable(uint32_t MPU_Control); +void HAL_MPU_Disable(void); +void HAL_MPU_EnableRegion(uint32_t RegionNumber); +void HAL_MPU_DisableRegion(uint32_t RegionNumber); +void HAL_MPU_ConfigRegion(MPU_Region_InitTypeDef *MPU_Init); +#endif /* __MPU_PRESENT */ +void HAL_CORTEX_ClearEvent(void); +/** + * @} + */ + +/** + * @} + */ + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/* Private macros ------------------------------------------------------------*/ +/** @defgroup CORTEX_Private_Macros CORTEX Private Macros + * @{ + */ +#define IS_NVIC_PRIORITY_GROUP(GROUP) (((GROUP) == NVIC_PRIORITYGROUP_0) || \ + ((GROUP) == NVIC_PRIORITYGROUP_1) || \ + ((GROUP) == NVIC_PRIORITYGROUP_2) || \ + ((GROUP) == NVIC_PRIORITYGROUP_3) || \ + ((GROUP) == NVIC_PRIORITYGROUP_4)) + +#define IS_NVIC_PREEMPTION_PRIORITY(PRIORITY) ((PRIORITY) < 0x10U) + +#define IS_NVIC_SUB_PRIORITY(PRIORITY) ((PRIORITY) < 0x10U) + +#define IS_NVIC_DEVICE_IRQ(IRQ) ((IRQ) >= (IRQn_Type)0x00U) + +#define IS_SYSTICK_CLK_SOURCE(SOURCE) (((SOURCE) == SYSTICK_CLKSOURCE_HCLK) || \ + ((SOURCE) == SYSTICK_CLKSOURCE_HCLK_DIV8)) + +#if (__MPU_PRESENT == 1U) +#define IS_MPU_REGION_ENABLE(STATE) (((STATE) == MPU_REGION_ENABLE) || \ + ((STATE) == MPU_REGION_DISABLE)) + +#define IS_MPU_INSTRUCTION_ACCESS(STATE) (((STATE) == MPU_INSTRUCTION_ACCESS_ENABLE) || \ + ((STATE) == MPU_INSTRUCTION_ACCESS_DISABLE)) + +#define IS_MPU_ACCESS_SHAREABLE(STATE) (((STATE) == MPU_ACCESS_SHAREABLE) || \ + ((STATE) == MPU_ACCESS_NOT_SHAREABLE)) + +#define IS_MPU_ACCESS_CACHEABLE(STATE) (((STATE) == MPU_ACCESS_CACHEABLE) || \ + ((STATE) == MPU_ACCESS_NOT_CACHEABLE)) + +#define IS_MPU_ACCESS_BUFFERABLE(STATE) (((STATE) == MPU_ACCESS_BUFFERABLE) || \ + ((STATE) == MPU_ACCESS_NOT_BUFFERABLE)) + +#define IS_MPU_TEX_LEVEL(TYPE) (((TYPE) == MPU_TEX_LEVEL0) || \ + ((TYPE) == MPU_TEX_LEVEL1) || \ + ((TYPE) == MPU_TEX_LEVEL2)) + +#define IS_MPU_REGION_PERMISSION_ATTRIBUTE(TYPE) (((TYPE) == MPU_REGION_NO_ACCESS) || \ + ((TYPE) == MPU_REGION_PRIV_RW) || \ + ((TYPE) == MPU_REGION_PRIV_RW_URO) || \ + ((TYPE) == MPU_REGION_FULL_ACCESS) || \ + ((TYPE) == MPU_REGION_PRIV_RO) || \ + ((TYPE) == MPU_REGION_PRIV_RO_URO)) + +#define IS_MPU_REGION_NUMBER(NUMBER) (((NUMBER) == MPU_REGION_NUMBER0) || \ + ((NUMBER) == MPU_REGION_NUMBER1) || \ + ((NUMBER) == MPU_REGION_NUMBER2) || \ + ((NUMBER) == MPU_REGION_NUMBER3) || \ + ((NUMBER) == MPU_REGION_NUMBER4) || \ + ((NUMBER) == MPU_REGION_NUMBER5) || \ + ((NUMBER) == MPU_REGION_NUMBER6) || \ + ((NUMBER) == MPU_REGION_NUMBER7)) + +#define IS_MPU_REGION_SIZE(SIZE) (((SIZE) == MPU_REGION_SIZE_32B) || \ + ((SIZE) == MPU_REGION_SIZE_64B) || \ + ((SIZE) == MPU_REGION_SIZE_128B) || \ + ((SIZE) == MPU_REGION_SIZE_256B) || \ + ((SIZE) == MPU_REGION_SIZE_512B) || \ + ((SIZE) == MPU_REGION_SIZE_1KB) || \ + ((SIZE) == MPU_REGION_SIZE_2KB) || \ + ((SIZE) == MPU_REGION_SIZE_4KB) || \ + ((SIZE) == MPU_REGION_SIZE_8KB) || \ + ((SIZE) == MPU_REGION_SIZE_16KB) || \ + ((SIZE) == MPU_REGION_SIZE_32KB) || \ + ((SIZE) == MPU_REGION_SIZE_64KB) || \ + ((SIZE) == MPU_REGION_SIZE_128KB) || \ + ((SIZE) == MPU_REGION_SIZE_256KB) || \ + ((SIZE) == MPU_REGION_SIZE_512KB) || \ + ((SIZE) == MPU_REGION_SIZE_1MB) || \ + ((SIZE) == MPU_REGION_SIZE_2MB) || \ + ((SIZE) == MPU_REGION_SIZE_4MB) || \ + ((SIZE) == MPU_REGION_SIZE_8MB) || \ + ((SIZE) == MPU_REGION_SIZE_16MB) || \ + ((SIZE) == MPU_REGION_SIZE_32MB) || \ + ((SIZE) == MPU_REGION_SIZE_64MB) || \ + ((SIZE) == MPU_REGION_SIZE_128MB) || \ + ((SIZE) == MPU_REGION_SIZE_256MB) || \ + ((SIZE) == MPU_REGION_SIZE_512MB) || \ + ((SIZE) == MPU_REGION_SIZE_1GB) || \ + ((SIZE) == MPU_REGION_SIZE_2GB) || \ + ((SIZE) == MPU_REGION_SIZE_4GB)) + +#define IS_MPU_SUB_REGION_DISABLE(SUBREGION) ((SUBREGION) < (uint16_t)0x00FF) +#endif /* __MPU_PRESENT */ + +/** + * @} + */ + +/* Private functions ---------------------------------------------------------*/ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F4xx_HAL_CORTEX_H */ + + diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_def.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_def.h new file mode 100644 index 0000000..1df0d7d --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_def.h @@ -0,0 +1,212 @@ +/** + ****************************************************************************** + * @file stm32f4xx_hal_def.h + * @author MCD Application Team + * @brief This file contains HAL common defines, enumeration, macros and + * structures definitions. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F4xx_HAL_DEF +#define __STM32F4xx_HAL_DEF + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx.h" +#include "Legacy/stm32_hal_legacy.h" +#include + +/* Exported types ------------------------------------------------------------*/ + +/** + * @brief HAL Status structures definition + */ +typedef enum +{ + HAL_OK = 0x00U, + HAL_ERROR = 0x01U, + HAL_BUSY = 0x02U, + HAL_TIMEOUT = 0x03U +} HAL_StatusTypeDef; + +/** + * @brief HAL Lock structures definition + */ +typedef enum +{ + HAL_UNLOCKED = 0x00U, + HAL_LOCKED = 0x01U +} HAL_LockTypeDef; + +/* Exported macro ------------------------------------------------------------*/ + +#if !defined(UNUSED) +#define UNUSED(X) (void)X /* To avoid gcc/g++ warnings */ +#endif /* UNUSED */ + +#define HAL_MAX_DELAY 0xFFFFFFFFU + +#define HAL_IS_BIT_SET(REG, BIT) (((REG) & (BIT)) == (BIT)) +#define HAL_IS_BIT_CLR(REG, BIT) (((REG) & (BIT)) == 0U) + +#define __HAL_LINKDMA(__HANDLE__, __PPP_DMA_FIELD__, __DMA_HANDLE__) \ + do{ \ + (__HANDLE__)->__PPP_DMA_FIELD__ = &(__DMA_HANDLE__); \ + (__DMA_HANDLE__).Parent = (__HANDLE__); \ + } while(0U) + +/** @brief Reset the Handle's State field. + * @param __HANDLE__ specifies the Peripheral Handle. + * @note This macro can be used for the following purpose: + * - When the Handle is declared as local variable; before passing it as parameter + * to HAL_PPP_Init() for the first time, it is mandatory to use this macro + * to set to 0 the Handle's "State" field. + * Otherwise, "State" field may have any random value and the first time the function + * HAL_PPP_Init() is called, the low level hardware initialization will be missed + * (i.e. HAL_PPP_MspInit() will not be executed). + * - When there is a need to reconfigure the low level hardware: instead of calling + * HAL_PPP_DeInit() then HAL_PPP_Init(), user can make a call to this macro then HAL_PPP_Init(). + * In this later function, when the Handle's "State" field is set to 0, it will execute the function + * HAL_PPP_MspInit() which will reconfigure the low level hardware. + * @retval None + */ +#define __HAL_RESET_HANDLE_STATE(__HANDLE__) ((__HANDLE__)->State = 0U) + +#if (USE_RTOS == 1U) + /* Reserved for future use */ + #error "USE_RTOS should be 0 in the current HAL release" +#else + #define __HAL_LOCK(__HANDLE__) \ + do{ \ + if((__HANDLE__)->Lock == HAL_LOCKED) \ + { \ + return HAL_BUSY; \ + } \ + else \ + { \ + (__HANDLE__)->Lock = HAL_LOCKED; \ + } \ + }while (0U) + + #define __HAL_UNLOCK(__HANDLE__) \ + do{ \ + (__HANDLE__)->Lock = HAL_UNLOCKED; \ + }while (0U) +#endif /* USE_RTOS */ + +#if defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) /* ARM Compiler V6 */ + #ifndef __weak + #define __weak __attribute__((weak)) + #endif + #ifndef __packed + #define __packed __attribute__((packed)) + #endif +#elif defined ( __GNUC__ ) && !defined (__CC_ARM) /* GNU Compiler */ + #ifndef __weak + #define __weak __attribute__((weak)) + #endif /* __weak */ + #ifndef __packed + #define __packed __attribute__((__packed__)) + #endif /* __packed */ +#endif /* __GNUC__ */ + + +/* Macro to get variable aligned on 4-bytes, for __ICCARM__ the directive "#pragma data_alignment=4" must be used instead */ +#if defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) /* ARM Compiler V6 */ + #ifndef __ALIGN_BEGIN + #define __ALIGN_BEGIN + #endif + #ifndef __ALIGN_END + #define __ALIGN_END __attribute__ ((aligned (4))) + #endif +#elif defined ( __GNUC__ ) && !defined (__CC_ARM) /* GNU Compiler */ + #ifndef __ALIGN_END +#define __ALIGN_END __attribute__ ((aligned (4))) + #endif /* __ALIGN_END */ + #ifndef __ALIGN_BEGIN + #define __ALIGN_BEGIN + #endif /* __ALIGN_BEGIN */ +#else + #ifndef __ALIGN_END + #define __ALIGN_END + #endif /* __ALIGN_END */ + #ifndef __ALIGN_BEGIN + #if defined (__CC_ARM) /* ARM Compiler V5*/ +#define __ALIGN_BEGIN __align(4) + #elif defined (__ICCARM__) /* IAR Compiler */ + #define __ALIGN_BEGIN + #endif /* __CC_ARM */ + #endif /* __ALIGN_BEGIN */ +#endif /* __GNUC__ */ + + +/** + * @brief __RAM_FUNC definition + */ +#if defined ( __CC_ARM ) || (defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)) +/* ARM Compiler V4/V5 and V6 + -------------------------- + RAM functions are defined using the toolchain options. + Functions that are executed in RAM should reside in a separate source module. + Using the 'Options for File' dialog you can simply change the 'Code / Const' + area of a module to a memory space in physical RAM. + Available memory areas are declared in the 'Target' tab of the 'Options for Target' + dialog. +*/ +#define __RAM_FUNC + +#elif defined ( __ICCARM__ ) +/* ICCARM Compiler + --------------- + RAM functions are defined using a specific toolchain keyword "__ramfunc". +*/ +#define __RAM_FUNC __ramfunc + +#elif defined ( __GNUC__ ) +/* GNU Compiler + ------------ + RAM functions are defined using a specific toolchain attribute + "__attribute__((section(".RamFunc")))". +*/ +#define __RAM_FUNC __attribute__((section(".RamFunc"))) + +#endif + +/** + * @brief __NOINLINE definition + */ +#if defined ( __CC_ARM ) || (defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)) || defined ( __GNUC__ ) +/* ARM V4/V5 and V6 & GNU Compiler + ------------------------------- +*/ +#define __NOINLINE __attribute__ ( (noinline) ) + +#elif defined ( __ICCARM__ ) +/* ICCARM Compiler + --------------- +*/ +#define __NOINLINE _Pragma("optimize = no_inline") + +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* ___STM32F4xx_HAL_DEF */ + + diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_dma.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_dma.h new file mode 100644 index 0000000..7ff3836 --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_dma.h @@ -0,0 +1,802 @@ +/** + ****************************************************************************** + * @file stm32f4xx_hal_dma.h + * @author MCD Application Team + * @brief Header file of DMA HAL module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F4xx_HAL_DMA_H +#define __STM32F4xx_HAL_DMA_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx_hal_def.h" + +/** @addtogroup STM32F4xx_HAL_Driver + * @{ + */ + +/** @addtogroup DMA + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ + +/** @defgroup DMA_Exported_Types DMA Exported Types + * @brief DMA Exported Types + * @{ + */ + +/** + * @brief DMA Configuration Structure definition + */ +typedef struct +{ + uint32_t Channel; /*!< Specifies the channel used for the specified stream. + This parameter can be a value of @ref DMA_Channel_selection */ + + uint32_t Direction; /*!< Specifies if the data will be transferred from memory to peripheral, + from memory to memory or from peripheral to memory. + This parameter can be a value of @ref DMA_Data_transfer_direction */ + + uint32_t PeriphInc; /*!< Specifies whether the Peripheral address register should be incremented or not. + This parameter can be a value of @ref DMA_Peripheral_incremented_mode */ + + uint32_t MemInc; /*!< Specifies whether the memory address register should be incremented or not. + This parameter can be a value of @ref DMA_Memory_incremented_mode */ + + uint32_t PeriphDataAlignment; /*!< Specifies the Peripheral data width. + This parameter can be a value of @ref DMA_Peripheral_data_size */ + + uint32_t MemDataAlignment; /*!< Specifies the Memory data width. + This parameter can be a value of @ref DMA_Memory_data_size */ + + uint32_t Mode; /*!< Specifies the operation mode of the DMAy Streamx. + This parameter can be a value of @ref DMA_mode + @note The circular buffer mode cannot be used if the memory-to-memory + data transfer is configured on the selected Stream */ + + uint32_t Priority; /*!< Specifies the software priority for the DMAy Streamx. + This parameter can be a value of @ref DMA_Priority_level */ + + uint32_t FIFOMode; /*!< Specifies if the FIFO mode or Direct mode will be used for the specified stream. + This parameter can be a value of @ref DMA_FIFO_direct_mode + @note The Direct mode (FIFO mode disabled) cannot be used if the + memory-to-memory data transfer is configured on the selected stream */ + + uint32_t FIFOThreshold; /*!< Specifies the FIFO threshold level. + This parameter can be a value of @ref DMA_FIFO_threshold_level */ + + uint32_t MemBurst; /*!< Specifies the Burst transfer configuration for the memory transfers. + It specifies the amount of data to be transferred in a single non interruptible + transaction. + This parameter can be a value of @ref DMA_Memory_burst + @note The burst mode is possible only if the address Increment mode is enabled. */ + + uint32_t PeriphBurst; /*!< Specifies the Burst transfer configuration for the peripheral transfers. + It specifies the amount of data to be transferred in a single non interruptible + transaction. + This parameter can be a value of @ref DMA_Peripheral_burst + @note The burst mode is possible only if the address Increment mode is enabled. */ +}DMA_InitTypeDef; + + +/** + * @brief HAL DMA State structures definition + */ +typedef enum +{ + HAL_DMA_STATE_RESET = 0x00U, /*!< DMA not yet initialized or disabled */ + HAL_DMA_STATE_READY = 0x01U, /*!< DMA initialized and ready for use */ + HAL_DMA_STATE_BUSY = 0x02U, /*!< DMA process is ongoing */ + HAL_DMA_STATE_TIMEOUT = 0x03U, /*!< DMA timeout state */ + HAL_DMA_STATE_ERROR = 0x04U, /*!< DMA error state */ + HAL_DMA_STATE_ABORT = 0x05U, /*!< DMA Abort state */ +}HAL_DMA_StateTypeDef; + +/** + * @brief HAL DMA Error Code structure definition + */ +typedef enum +{ + HAL_DMA_FULL_TRANSFER = 0x00U, /*!< Full transfer */ + HAL_DMA_HALF_TRANSFER = 0x01U /*!< Half Transfer */ +}HAL_DMA_LevelCompleteTypeDef; + +/** + * @brief HAL DMA Error Code structure definition + */ +typedef enum +{ + HAL_DMA_XFER_CPLT_CB_ID = 0x00U, /*!< Full transfer */ + HAL_DMA_XFER_HALFCPLT_CB_ID = 0x01U, /*!< Half Transfer */ + HAL_DMA_XFER_M1CPLT_CB_ID = 0x02U, /*!< M1 Full Transfer */ + HAL_DMA_XFER_M1HALFCPLT_CB_ID = 0x03U, /*!< M1 Half Transfer */ + HAL_DMA_XFER_ERROR_CB_ID = 0x04U, /*!< Error */ + HAL_DMA_XFER_ABORT_CB_ID = 0x05U, /*!< Abort */ + HAL_DMA_XFER_ALL_CB_ID = 0x06U /*!< All */ +}HAL_DMA_CallbackIDTypeDef; + +/** + * @brief DMA handle Structure definition + */ +typedef struct __DMA_HandleTypeDef +{ + DMA_Stream_TypeDef *Instance; /*!< Register base address */ + + DMA_InitTypeDef Init; /*!< DMA communication parameters */ + + HAL_LockTypeDef Lock; /*!< DMA locking object */ + + __IO HAL_DMA_StateTypeDef State; /*!< DMA transfer state */ + + void *Parent; /*!< Parent object state */ + + void (* XferCpltCallback)( struct __DMA_HandleTypeDef * hdma); /*!< DMA transfer complete callback */ + + void (* XferHalfCpltCallback)( struct __DMA_HandleTypeDef * hdma); /*!< DMA Half transfer complete callback */ + + void (* XferM1CpltCallback)( struct __DMA_HandleTypeDef * hdma); /*!< DMA transfer complete Memory1 callback */ + + void (* XferM1HalfCpltCallback)( struct __DMA_HandleTypeDef * hdma); /*!< DMA transfer Half complete Memory1 callback */ + + void (* XferErrorCallback)( struct __DMA_HandleTypeDef * hdma); /*!< DMA transfer error callback */ + + void (* XferAbortCallback)( struct __DMA_HandleTypeDef * hdma); /*!< DMA transfer Abort callback */ + + __IO uint32_t ErrorCode; /*!< DMA Error code */ + + uint32_t StreamBaseAddress; /*!< DMA Stream Base Address */ + + uint32_t StreamIndex; /*!< DMA Stream Index */ + +}DMA_HandleTypeDef; + +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ + +/** @defgroup DMA_Exported_Constants DMA Exported Constants + * @brief DMA Exported constants + * @{ + */ + +/** @defgroup DMA_Error_Code DMA Error Code + * @brief DMA Error Code + * @{ + */ +#define HAL_DMA_ERROR_NONE 0x00000000U /*!< No error */ +#define HAL_DMA_ERROR_TE 0x00000001U /*!< Transfer error */ +#define HAL_DMA_ERROR_FE 0x00000002U /*!< FIFO error */ +#define HAL_DMA_ERROR_DME 0x00000004U /*!< Direct Mode error */ +#define HAL_DMA_ERROR_TIMEOUT 0x00000020U /*!< Timeout error */ +#define HAL_DMA_ERROR_PARAM 0x00000040U /*!< Parameter error */ +#define HAL_DMA_ERROR_NO_XFER 0x00000080U /*!< Abort requested with no Xfer ongoing */ +#define HAL_DMA_ERROR_NOT_SUPPORTED 0x00000100U /*!< Not supported mode */ +/** + * @} + */ + +/** @defgroup DMA_Channel_selection DMA Channel selection + * @brief DMA channel selection + * @{ + */ +#define DMA_CHANNEL_0 0x00000000U /*!< DMA Channel 0 */ +#define DMA_CHANNEL_1 0x02000000U /*!< DMA Channel 1 */ +#define DMA_CHANNEL_2 0x04000000U /*!< DMA Channel 2 */ +#define DMA_CHANNEL_3 0x06000000U /*!< DMA Channel 3 */ +#define DMA_CHANNEL_4 0x08000000U /*!< DMA Channel 4 */ +#define DMA_CHANNEL_5 0x0A000000U /*!< DMA Channel 5 */ +#define DMA_CHANNEL_6 0x0C000000U /*!< DMA Channel 6 */ +#define DMA_CHANNEL_7 0x0E000000U /*!< DMA Channel 7 */ +#if defined (DMA_SxCR_CHSEL_3) +#define DMA_CHANNEL_8 0x10000000U /*!< DMA Channel 8 */ +#define DMA_CHANNEL_9 0x12000000U /*!< DMA Channel 9 */ +#define DMA_CHANNEL_10 0x14000000U /*!< DMA Channel 10 */ +#define DMA_CHANNEL_11 0x16000000U /*!< DMA Channel 11 */ +#define DMA_CHANNEL_12 0x18000000U /*!< DMA Channel 12 */ +#define DMA_CHANNEL_13 0x1A000000U /*!< DMA Channel 13 */ +#define DMA_CHANNEL_14 0x1C000000U /*!< DMA Channel 14 */ +#define DMA_CHANNEL_15 0x1E000000U /*!< DMA Channel 15 */ +#endif /* DMA_SxCR_CHSEL_3 */ +/** + * @} + */ + +/** @defgroup DMA_Data_transfer_direction DMA Data transfer direction + * @brief DMA data transfer direction + * @{ + */ +#define DMA_PERIPH_TO_MEMORY 0x00000000U /*!< Peripheral to memory direction */ +#define DMA_MEMORY_TO_PERIPH ((uint32_t)DMA_SxCR_DIR_0) /*!< Memory to peripheral direction */ +#define DMA_MEMORY_TO_MEMORY ((uint32_t)DMA_SxCR_DIR_1) /*!< Memory to memory direction */ +/** + * @} + */ + +/** @defgroup DMA_Peripheral_incremented_mode DMA Peripheral incremented mode + * @brief DMA peripheral incremented mode + * @{ + */ +#define DMA_PINC_ENABLE ((uint32_t)DMA_SxCR_PINC) /*!< Peripheral increment mode enable */ +#define DMA_PINC_DISABLE 0x00000000U /*!< Peripheral increment mode disable */ +/** + * @} + */ + +/** @defgroup DMA_Memory_incremented_mode DMA Memory incremented mode + * @brief DMA memory incremented mode + * @{ + */ +#define DMA_MINC_ENABLE ((uint32_t)DMA_SxCR_MINC) /*!< Memory increment mode enable */ +#define DMA_MINC_DISABLE 0x00000000U /*!< Memory increment mode disable */ +/** + * @} + */ + +/** @defgroup DMA_Peripheral_data_size DMA Peripheral data size + * @brief DMA peripheral data size + * @{ + */ +#define DMA_PDATAALIGN_BYTE 0x00000000U /*!< Peripheral data alignment: Byte */ +#define DMA_PDATAALIGN_HALFWORD ((uint32_t)DMA_SxCR_PSIZE_0) /*!< Peripheral data alignment: HalfWord */ +#define DMA_PDATAALIGN_WORD ((uint32_t)DMA_SxCR_PSIZE_1) /*!< Peripheral data alignment: Word */ +/** + * @} + */ + +/** @defgroup DMA_Memory_data_size DMA Memory data size + * @brief DMA memory data size + * @{ + */ +#define DMA_MDATAALIGN_BYTE 0x00000000U /*!< Memory data alignment: Byte */ +#define DMA_MDATAALIGN_HALFWORD ((uint32_t)DMA_SxCR_MSIZE_0) /*!< Memory data alignment: HalfWord */ +#define DMA_MDATAALIGN_WORD ((uint32_t)DMA_SxCR_MSIZE_1) /*!< Memory data alignment: Word */ +/** + * @} + */ + +/** @defgroup DMA_mode DMA mode + * @brief DMA mode + * @{ + */ +#define DMA_NORMAL 0x00000000U /*!< Normal mode */ +#define DMA_CIRCULAR ((uint32_t)DMA_SxCR_CIRC) /*!< Circular mode */ +#define DMA_PFCTRL ((uint32_t)DMA_SxCR_PFCTRL) /*!< Peripheral flow control mode */ +/** + * @} + */ + +/** @defgroup DMA_Priority_level DMA Priority level + * @brief DMA priority levels + * @{ + */ +#define DMA_PRIORITY_LOW 0x00000000U /*!< Priority level: Low */ +#define DMA_PRIORITY_MEDIUM ((uint32_t)DMA_SxCR_PL_0) /*!< Priority level: Medium */ +#define DMA_PRIORITY_HIGH ((uint32_t)DMA_SxCR_PL_1) /*!< Priority level: High */ +#define DMA_PRIORITY_VERY_HIGH ((uint32_t)DMA_SxCR_PL) /*!< Priority level: Very High */ +/** + * @} + */ + +/** @defgroup DMA_FIFO_direct_mode DMA FIFO direct mode + * @brief DMA FIFO direct mode + * @{ + */ +#define DMA_FIFOMODE_DISABLE 0x00000000U /*!< FIFO mode disable */ +#define DMA_FIFOMODE_ENABLE ((uint32_t)DMA_SxFCR_DMDIS) /*!< FIFO mode enable */ +/** + * @} + */ + +/** @defgroup DMA_FIFO_threshold_level DMA FIFO threshold level + * @brief DMA FIFO level + * @{ + */ +#define DMA_FIFO_THRESHOLD_1QUARTERFULL 0x00000000U /*!< FIFO threshold 1 quart full configuration */ +#define DMA_FIFO_THRESHOLD_HALFFULL ((uint32_t)DMA_SxFCR_FTH_0) /*!< FIFO threshold half full configuration */ +#define DMA_FIFO_THRESHOLD_3QUARTERSFULL ((uint32_t)DMA_SxFCR_FTH_1) /*!< FIFO threshold 3 quarts full configuration */ +#define DMA_FIFO_THRESHOLD_FULL ((uint32_t)DMA_SxFCR_FTH) /*!< FIFO threshold full configuration */ +/** + * @} + */ + +/** @defgroup DMA_Memory_burst DMA Memory burst + * @brief DMA memory burst + * @{ + */ +#define DMA_MBURST_SINGLE 0x00000000U +#define DMA_MBURST_INC4 ((uint32_t)DMA_SxCR_MBURST_0) +#define DMA_MBURST_INC8 ((uint32_t)DMA_SxCR_MBURST_1) +#define DMA_MBURST_INC16 ((uint32_t)DMA_SxCR_MBURST) +/** + * @} + */ + +/** @defgroup DMA_Peripheral_burst DMA Peripheral burst + * @brief DMA peripheral burst + * @{ + */ +#define DMA_PBURST_SINGLE 0x00000000U +#define DMA_PBURST_INC4 ((uint32_t)DMA_SxCR_PBURST_0) +#define DMA_PBURST_INC8 ((uint32_t)DMA_SxCR_PBURST_1) +#define DMA_PBURST_INC16 ((uint32_t)DMA_SxCR_PBURST) +/** + * @} + */ + +/** @defgroup DMA_interrupt_enable_definitions DMA interrupt enable definitions + * @brief DMA interrupts definition + * @{ + */ +#define DMA_IT_TC ((uint32_t)DMA_SxCR_TCIE) +#define DMA_IT_HT ((uint32_t)DMA_SxCR_HTIE) +#define DMA_IT_TE ((uint32_t)DMA_SxCR_TEIE) +#define DMA_IT_DME ((uint32_t)DMA_SxCR_DMEIE) +#define DMA_IT_FE 0x00000080U +/** + * @} + */ + +/** @defgroup DMA_flag_definitions DMA flag definitions + * @brief DMA flag definitions + * @{ + */ +#define DMA_FLAG_FEIF0_4 0x00000001U +#define DMA_FLAG_DMEIF0_4 0x00000004U +#define DMA_FLAG_TEIF0_4 0x00000008U +#define DMA_FLAG_HTIF0_4 0x00000010U +#define DMA_FLAG_TCIF0_4 0x00000020U +#define DMA_FLAG_FEIF1_5 0x00000040U +#define DMA_FLAG_DMEIF1_5 0x00000100U +#define DMA_FLAG_TEIF1_5 0x00000200U +#define DMA_FLAG_HTIF1_5 0x00000400U +#define DMA_FLAG_TCIF1_5 0x00000800U +#define DMA_FLAG_FEIF2_6 0x00010000U +#define DMA_FLAG_DMEIF2_6 0x00040000U +#define DMA_FLAG_TEIF2_6 0x00080000U +#define DMA_FLAG_HTIF2_6 0x00100000U +#define DMA_FLAG_TCIF2_6 0x00200000U +#define DMA_FLAG_FEIF3_7 0x00400000U +#define DMA_FLAG_DMEIF3_7 0x01000000U +#define DMA_FLAG_TEIF3_7 0x02000000U +#define DMA_FLAG_HTIF3_7 0x04000000U +#define DMA_FLAG_TCIF3_7 0x08000000U +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ + +/** @brief Reset DMA handle state + * @param __HANDLE__ specifies the DMA handle. + * @retval None + */ +#define __HAL_DMA_RESET_HANDLE_STATE(__HANDLE__) ((__HANDLE__)->State = HAL_DMA_STATE_RESET) + +/** + * @brief Return the current DMA Stream FIFO filled level. + * @param __HANDLE__ DMA handle + * @retval The FIFO filling state. + * - DMA_FIFOStatus_Less1QuarterFull: when FIFO is less than 1 quarter-full + * and not empty. + * - DMA_FIFOStatus_1QuarterFull: if more than 1 quarter-full. + * - DMA_FIFOStatus_HalfFull: if more than 1 half-full. + * - DMA_FIFOStatus_3QuartersFull: if more than 3 quarters-full. + * - DMA_FIFOStatus_Empty: when FIFO is empty + * - DMA_FIFOStatus_Full: when FIFO is full + */ +#define __HAL_DMA_GET_FS(__HANDLE__) (((__HANDLE__)->Instance->FCR & (DMA_SxFCR_FS))) + +/** + * @brief Enable the specified DMA Stream. + * @param __HANDLE__ DMA handle + * @retval None + */ +#define __HAL_DMA_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->CR |= DMA_SxCR_EN) + +/** + * @brief Disable the specified DMA Stream. + * @param __HANDLE__ DMA handle + * @retval None + */ +#define __HAL_DMA_DISABLE(__HANDLE__) ((__HANDLE__)->Instance->CR &= ~DMA_SxCR_EN) + +/* Interrupt & Flag management */ + +/** + * @brief Return the current DMA Stream transfer complete flag. + * @param __HANDLE__ DMA handle + * @retval The specified transfer complete flag index. + */ +#define __HAL_DMA_GET_TC_FLAG_INDEX(__HANDLE__) \ +(((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream0))? DMA_FLAG_TCIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream0))? DMA_FLAG_TCIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream4))? DMA_FLAG_TCIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream4))? DMA_FLAG_TCIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream1))? DMA_FLAG_TCIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream1))? DMA_FLAG_TCIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream5))? DMA_FLAG_TCIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream5))? DMA_FLAG_TCIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream2))? DMA_FLAG_TCIF2_6 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream2))? DMA_FLAG_TCIF2_6 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream6))? DMA_FLAG_TCIF2_6 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream6))? DMA_FLAG_TCIF2_6 :\ + DMA_FLAG_TCIF3_7) + +/** + * @brief Return the current DMA Stream half transfer complete flag. + * @param __HANDLE__ DMA handle + * @retval The specified half transfer complete flag index. + */ +#define __HAL_DMA_GET_HT_FLAG_INDEX(__HANDLE__)\ +(((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream0))? DMA_FLAG_HTIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream0))? DMA_FLAG_HTIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream4))? DMA_FLAG_HTIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream4))? DMA_FLAG_HTIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream1))? DMA_FLAG_HTIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream1))? DMA_FLAG_HTIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream5))? DMA_FLAG_HTIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream5))? DMA_FLAG_HTIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream2))? DMA_FLAG_HTIF2_6 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream2))? DMA_FLAG_HTIF2_6 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream6))? DMA_FLAG_HTIF2_6 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream6))? DMA_FLAG_HTIF2_6 :\ + DMA_FLAG_HTIF3_7) + +/** + * @brief Return the current DMA Stream transfer error flag. + * @param __HANDLE__ DMA handle + * @retval The specified transfer error flag index. + */ +#define __HAL_DMA_GET_TE_FLAG_INDEX(__HANDLE__)\ +(((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream0))? DMA_FLAG_TEIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream0))? DMA_FLAG_TEIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream4))? DMA_FLAG_TEIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream4))? DMA_FLAG_TEIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream1))? DMA_FLAG_TEIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream1))? DMA_FLAG_TEIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream5))? DMA_FLAG_TEIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream5))? DMA_FLAG_TEIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream2))? DMA_FLAG_TEIF2_6 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream2))? DMA_FLAG_TEIF2_6 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream6))? DMA_FLAG_TEIF2_6 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream6))? DMA_FLAG_TEIF2_6 :\ + DMA_FLAG_TEIF3_7) + +/** + * @brief Return the current DMA Stream FIFO error flag. + * @param __HANDLE__ DMA handle + * @retval The specified FIFO error flag index. + */ +#define __HAL_DMA_GET_FE_FLAG_INDEX(__HANDLE__)\ +(((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream0))? DMA_FLAG_FEIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream0))? DMA_FLAG_FEIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream4))? DMA_FLAG_FEIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream4))? DMA_FLAG_FEIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream1))? DMA_FLAG_FEIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream1))? DMA_FLAG_FEIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream5))? DMA_FLAG_FEIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream5))? DMA_FLAG_FEIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream2))? DMA_FLAG_FEIF2_6 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream2))? DMA_FLAG_FEIF2_6 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream6))? DMA_FLAG_FEIF2_6 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream6))? DMA_FLAG_FEIF2_6 :\ + DMA_FLAG_FEIF3_7) + +/** + * @brief Return the current DMA Stream direct mode error flag. + * @param __HANDLE__ DMA handle + * @retval The specified direct mode error flag index. + */ +#define __HAL_DMA_GET_DME_FLAG_INDEX(__HANDLE__)\ +(((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream0))? DMA_FLAG_DMEIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream0))? DMA_FLAG_DMEIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream4))? DMA_FLAG_DMEIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream4))? DMA_FLAG_DMEIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream1))? DMA_FLAG_DMEIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream1))? DMA_FLAG_DMEIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream5))? DMA_FLAG_DMEIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream5))? DMA_FLAG_DMEIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream2))? DMA_FLAG_DMEIF2_6 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream2))? DMA_FLAG_DMEIF2_6 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream6))? DMA_FLAG_DMEIF2_6 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream6))? DMA_FLAG_DMEIF2_6 :\ + DMA_FLAG_DMEIF3_7) + +/** + * @brief Get the DMA Stream pending flags. + * @param __HANDLE__ DMA handle + * @param __FLAG__ Get the specified flag. + * This parameter can be any combination of the following values: + * @arg DMA_FLAG_TCIFx: Transfer complete flag. + * @arg DMA_FLAG_HTIFx: Half transfer complete flag. + * @arg DMA_FLAG_TEIFx: Transfer error flag. + * @arg DMA_FLAG_DMEIFx: Direct mode error flag. + * @arg DMA_FLAG_FEIFx: FIFO error flag. + * Where x can be 0_4, 1_5, 2_6 or 3_7 to select the DMA Stream flag. + * @retval The state of FLAG (SET or RESET). + */ +#define __HAL_DMA_GET_FLAG(__HANDLE__, __FLAG__)\ +(((uint32_t)((__HANDLE__)->Instance) > (uint32_t)DMA2_Stream3)? (DMA2->HISR & (__FLAG__)) :\ + ((uint32_t)((__HANDLE__)->Instance) > (uint32_t)DMA1_Stream7)? (DMA2->LISR & (__FLAG__)) :\ + ((uint32_t)((__HANDLE__)->Instance) > (uint32_t)DMA1_Stream3)? (DMA1->HISR & (__FLAG__)) : (DMA1->LISR & (__FLAG__))) + +/** + * @brief Clear the DMA Stream pending flags. + * @param __HANDLE__ DMA handle + * @param __FLAG__ specifies the flag to clear. + * This parameter can be any combination of the following values: + * @arg DMA_FLAG_TCIFx: Transfer complete flag. + * @arg DMA_FLAG_HTIFx: Half transfer complete flag. + * @arg DMA_FLAG_TEIFx: Transfer error flag. + * @arg DMA_FLAG_DMEIFx: Direct mode error flag. + * @arg DMA_FLAG_FEIFx: FIFO error flag. + * Where x can be 0_4, 1_5, 2_6 or 3_7 to select the DMA Stream flag. + * @retval None + */ +#define __HAL_DMA_CLEAR_FLAG(__HANDLE__, __FLAG__) \ +(((uint32_t)((__HANDLE__)->Instance) > (uint32_t)DMA2_Stream3)? (DMA2->HIFCR = (__FLAG__)) :\ + ((uint32_t)((__HANDLE__)->Instance) > (uint32_t)DMA1_Stream7)? (DMA2->LIFCR = (__FLAG__)) :\ + ((uint32_t)((__HANDLE__)->Instance) > (uint32_t)DMA1_Stream3)? (DMA1->HIFCR = (__FLAG__)) : (DMA1->LIFCR = (__FLAG__))) + +/** + * @brief Enable the specified DMA Stream interrupts. + * @param __HANDLE__ DMA handle + * @param __INTERRUPT__ specifies the DMA interrupt sources to be enabled or disabled. + * This parameter can be any combination of the following values: + * @arg DMA_IT_TC: Transfer complete interrupt mask. + * @arg DMA_IT_HT: Half transfer complete interrupt mask. + * @arg DMA_IT_TE: Transfer error interrupt mask. + * @arg DMA_IT_FE: FIFO error interrupt mask. + * @arg DMA_IT_DME: Direct mode error interrupt. + * @retval None + */ +#define __HAL_DMA_ENABLE_IT(__HANDLE__, __INTERRUPT__) (((__INTERRUPT__) != DMA_IT_FE)? \ +((__HANDLE__)->Instance->CR |= (__INTERRUPT__)) : ((__HANDLE__)->Instance->FCR |= (__INTERRUPT__))) + +/** + * @brief Disable the specified DMA Stream interrupts. + * @param __HANDLE__ DMA handle + * @param __INTERRUPT__ specifies the DMA interrupt sources to be enabled or disabled. + * This parameter can be any combination of the following values: + * @arg DMA_IT_TC: Transfer complete interrupt mask. + * @arg DMA_IT_HT: Half transfer complete interrupt mask. + * @arg DMA_IT_TE: Transfer error interrupt mask. + * @arg DMA_IT_FE: FIFO error interrupt mask. + * @arg DMA_IT_DME: Direct mode error interrupt. + * @retval None + */ +#define __HAL_DMA_DISABLE_IT(__HANDLE__, __INTERRUPT__) (((__INTERRUPT__) != DMA_IT_FE)? \ +((__HANDLE__)->Instance->CR &= ~(__INTERRUPT__)) : ((__HANDLE__)->Instance->FCR &= ~(__INTERRUPT__))) + +/** + * @brief Check whether the specified DMA Stream interrupt is enabled or disabled. + * @param __HANDLE__ DMA handle + * @param __INTERRUPT__ specifies the DMA interrupt source to check. + * This parameter can be one of the following values: + * @arg DMA_IT_TC: Transfer complete interrupt mask. + * @arg DMA_IT_HT: Half transfer complete interrupt mask. + * @arg DMA_IT_TE: Transfer error interrupt mask. + * @arg DMA_IT_FE: FIFO error interrupt mask. + * @arg DMA_IT_DME: Direct mode error interrupt. + * @retval The state of DMA_IT. + */ +#define __HAL_DMA_GET_IT_SOURCE(__HANDLE__, __INTERRUPT__) (((__INTERRUPT__) != DMA_IT_FE)? \ + ((__HANDLE__)->Instance->CR & (__INTERRUPT__)) : \ + ((__HANDLE__)->Instance->FCR & (__INTERRUPT__))) + +/** + * @brief Writes the number of data units to be transferred on the DMA Stream. + * @param __HANDLE__ DMA handle + * @param __COUNTER__ Number of data units to be transferred (from 0 to 65535) + * Number of data items depends only on the Peripheral data format. + * + * @note If Peripheral data format is Bytes: number of data units is equal + * to total number of bytes to be transferred. + * + * @note If Peripheral data format is Half-Word: number of data units is + * equal to total number of bytes to be transferred / 2. + * + * @note If Peripheral data format is Word: number of data units is equal + * to total number of bytes to be transferred / 4. + * + * @retval The number of remaining data units in the current DMAy Streamx transfer. + */ +#define __HAL_DMA_SET_COUNTER(__HANDLE__, __COUNTER__) ((__HANDLE__)->Instance->NDTR = (uint16_t)(__COUNTER__)) + +/** + * @brief Returns the number of remaining data units in the current DMAy Streamx transfer. + * @param __HANDLE__ DMA handle + * + * @retval The number of remaining data units in the current DMA Stream transfer. + */ +#define __HAL_DMA_GET_COUNTER(__HANDLE__) ((__HANDLE__)->Instance->NDTR) + + +/* Include DMA HAL Extension module */ +#include "stm32f4xx_hal_dma_ex.h" + +/* Exported functions --------------------------------------------------------*/ + +/** @defgroup DMA_Exported_Functions DMA Exported Functions + * @brief DMA Exported functions + * @{ + */ + +/** @defgroup DMA_Exported_Functions_Group1 Initialization and de-initialization functions + * @brief Initialization and de-initialization functions + * @{ + */ +HAL_StatusTypeDef HAL_DMA_Init(DMA_HandleTypeDef *hdma); +HAL_StatusTypeDef HAL_DMA_DeInit(DMA_HandleTypeDef *hdma); +/** + * @} + */ + +/** @defgroup DMA_Exported_Functions_Group2 I/O operation functions + * @brief I/O operation functions + * @{ + */ +HAL_StatusTypeDef HAL_DMA_Start (DMA_HandleTypeDef *hdma, uint32_t SrcAddress, uint32_t DstAddress, uint32_t DataLength); +HAL_StatusTypeDef HAL_DMA_Start_IT(DMA_HandleTypeDef *hdma, uint32_t SrcAddress, uint32_t DstAddress, uint32_t DataLength); +HAL_StatusTypeDef HAL_DMA_Abort(DMA_HandleTypeDef *hdma); +HAL_StatusTypeDef HAL_DMA_Abort_IT(DMA_HandleTypeDef *hdma); +HAL_StatusTypeDef HAL_DMA_PollForTransfer(DMA_HandleTypeDef *hdma, HAL_DMA_LevelCompleteTypeDef CompleteLevel, uint32_t Timeout); +void HAL_DMA_IRQHandler(DMA_HandleTypeDef *hdma); +HAL_StatusTypeDef HAL_DMA_CleanCallbacks(DMA_HandleTypeDef *hdma); +HAL_StatusTypeDef HAL_DMA_RegisterCallback(DMA_HandleTypeDef *hdma, HAL_DMA_CallbackIDTypeDef CallbackID, void (* pCallback)(DMA_HandleTypeDef *_hdma)); +HAL_StatusTypeDef HAL_DMA_UnRegisterCallback(DMA_HandleTypeDef *hdma, HAL_DMA_CallbackIDTypeDef CallbackID); + +/** + * @} + */ + +/** @defgroup DMA_Exported_Functions_Group3 Peripheral State functions + * @brief Peripheral State functions + * @{ + */ +HAL_DMA_StateTypeDef HAL_DMA_GetState(DMA_HandleTypeDef *hdma); +uint32_t HAL_DMA_GetError(DMA_HandleTypeDef *hdma); +/** + * @} + */ +/** + * @} + */ +/* Private Constants -------------------------------------------------------------*/ +/** @defgroup DMA_Private_Constants DMA Private Constants + * @brief DMA private defines and constants + * @{ + */ +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup DMA_Private_Macros DMA Private Macros + * @brief DMA private macros + * @{ + */ +#if defined (DMA_SxCR_CHSEL_3) +#define IS_DMA_CHANNEL(CHANNEL) (((CHANNEL) == DMA_CHANNEL_0) || \ + ((CHANNEL) == DMA_CHANNEL_1) || \ + ((CHANNEL) == DMA_CHANNEL_2) || \ + ((CHANNEL) == DMA_CHANNEL_3) || \ + ((CHANNEL) == DMA_CHANNEL_4) || \ + ((CHANNEL) == DMA_CHANNEL_5) || \ + ((CHANNEL) == DMA_CHANNEL_6) || \ + ((CHANNEL) == DMA_CHANNEL_7) || \ + ((CHANNEL) == DMA_CHANNEL_8) || \ + ((CHANNEL) == DMA_CHANNEL_9) || \ + ((CHANNEL) == DMA_CHANNEL_10)|| \ + ((CHANNEL) == DMA_CHANNEL_11)|| \ + ((CHANNEL) == DMA_CHANNEL_12)|| \ + ((CHANNEL) == DMA_CHANNEL_13)|| \ + ((CHANNEL) == DMA_CHANNEL_14)|| \ + ((CHANNEL) == DMA_CHANNEL_15)) +#else +#define IS_DMA_CHANNEL(CHANNEL) (((CHANNEL) == DMA_CHANNEL_0) || \ + ((CHANNEL) == DMA_CHANNEL_1) || \ + ((CHANNEL) == DMA_CHANNEL_2) || \ + ((CHANNEL) == DMA_CHANNEL_3) || \ + ((CHANNEL) == DMA_CHANNEL_4) || \ + ((CHANNEL) == DMA_CHANNEL_5) || \ + ((CHANNEL) == DMA_CHANNEL_6) || \ + ((CHANNEL) == DMA_CHANNEL_7)) +#endif /* DMA_SxCR_CHSEL_3 */ + +#define IS_DMA_DIRECTION(DIRECTION) (((DIRECTION) == DMA_PERIPH_TO_MEMORY ) || \ + ((DIRECTION) == DMA_MEMORY_TO_PERIPH) || \ + ((DIRECTION) == DMA_MEMORY_TO_MEMORY)) + +#define IS_DMA_BUFFER_SIZE(SIZE) (((SIZE) >= 0x01U) && ((SIZE) < 0x10000U)) + +#define IS_DMA_PERIPHERAL_INC_STATE(STATE) (((STATE) == DMA_PINC_ENABLE) || \ + ((STATE) == DMA_PINC_DISABLE)) + +#define IS_DMA_MEMORY_INC_STATE(STATE) (((STATE) == DMA_MINC_ENABLE) || \ + ((STATE) == DMA_MINC_DISABLE)) + +#define IS_DMA_PERIPHERAL_DATA_SIZE(SIZE) (((SIZE) == DMA_PDATAALIGN_BYTE) || \ + ((SIZE) == DMA_PDATAALIGN_HALFWORD) || \ + ((SIZE) == DMA_PDATAALIGN_WORD)) + +#define IS_DMA_MEMORY_DATA_SIZE(SIZE) (((SIZE) == DMA_MDATAALIGN_BYTE) || \ + ((SIZE) == DMA_MDATAALIGN_HALFWORD) || \ + ((SIZE) == DMA_MDATAALIGN_WORD )) + +#define IS_DMA_MODE(MODE) (((MODE) == DMA_NORMAL ) || \ + ((MODE) == DMA_CIRCULAR) || \ + ((MODE) == DMA_PFCTRL)) + +#define IS_DMA_PRIORITY(PRIORITY) (((PRIORITY) == DMA_PRIORITY_LOW ) || \ + ((PRIORITY) == DMA_PRIORITY_MEDIUM) || \ + ((PRIORITY) == DMA_PRIORITY_HIGH) || \ + ((PRIORITY) == DMA_PRIORITY_VERY_HIGH)) + +#define IS_DMA_FIFO_MODE_STATE(STATE) (((STATE) == DMA_FIFOMODE_DISABLE ) || \ + ((STATE) == DMA_FIFOMODE_ENABLE)) + +#define IS_DMA_FIFO_THRESHOLD(THRESHOLD) (((THRESHOLD) == DMA_FIFO_THRESHOLD_1QUARTERFULL ) || \ + ((THRESHOLD) == DMA_FIFO_THRESHOLD_HALFFULL) || \ + ((THRESHOLD) == DMA_FIFO_THRESHOLD_3QUARTERSFULL) || \ + ((THRESHOLD) == DMA_FIFO_THRESHOLD_FULL)) + +#define IS_DMA_MEMORY_BURST(BURST) (((BURST) == DMA_MBURST_SINGLE) || \ + ((BURST) == DMA_MBURST_INC4) || \ + ((BURST) == DMA_MBURST_INC8) || \ + ((BURST) == DMA_MBURST_INC16)) + +#define IS_DMA_PERIPHERAL_BURST(BURST) (((BURST) == DMA_PBURST_SINGLE) || \ + ((BURST) == DMA_PBURST_INC4) || \ + ((BURST) == DMA_PBURST_INC8) || \ + ((BURST) == DMA_PBURST_INC16)) +/** + * @} + */ + +/* Private functions ---------------------------------------------------------*/ +/** @defgroup DMA_Private_Functions DMA Private Functions + * @brief DMA private functions + * @{ + */ +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F4xx_HAL_DMA_H */ + diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_dma_ex.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_dma_ex.h new file mode 100644 index 0000000..9858c74 --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_dma_ex.h @@ -0,0 +1,102 @@ +/** + ****************************************************************************** + * @file stm32f4xx_hal_dma_ex.h + * @author MCD Application Team + * @brief Header file of DMA HAL extension module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F4xx_HAL_DMA_EX_H +#define __STM32F4xx_HAL_DMA_EX_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx_hal_def.h" + +/** @addtogroup STM32F4xx_HAL_Driver + * @{ + */ + +/** @addtogroup DMAEx + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup DMAEx_Exported_Types DMAEx Exported Types + * @brief DMAEx Exported types + * @{ + */ + +/** + * @brief HAL DMA Memory definition + */ +typedef enum +{ + MEMORY0 = 0x00U, /*!< Memory 0 */ + MEMORY1 = 0x01U /*!< Memory 1 */ +}HAL_DMA_MemoryTypeDef; + +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup DMAEx_Exported_Functions DMAEx Exported Functions + * @brief DMAEx Exported functions + * @{ + */ + +/** @defgroup DMAEx_Exported_Functions_Group1 Extended features functions + * @brief Extended features functions + * @{ + */ + +/* IO operation functions *******************************************************/ +HAL_StatusTypeDef HAL_DMAEx_MultiBufferStart(DMA_HandleTypeDef *hdma, uint32_t SrcAddress, uint32_t DstAddress, uint32_t SecondMemAddress, uint32_t DataLength); +HAL_StatusTypeDef HAL_DMAEx_MultiBufferStart_IT(DMA_HandleTypeDef *hdma, uint32_t SrcAddress, uint32_t DstAddress, uint32_t SecondMemAddress, uint32_t DataLength); +HAL_StatusTypeDef HAL_DMAEx_ChangeMemory(DMA_HandleTypeDef *hdma, uint32_t Address, HAL_DMA_MemoryTypeDef memory); + +/** + * @} + */ +/** + * @} + */ + +/* Private functions ---------------------------------------------------------*/ +/** @defgroup DMAEx_Private_Functions DMAEx Private Functions + * @brief DMAEx Private functions + * @{ + */ +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /*__STM32F4xx_HAL_DMA_EX_H*/ + diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_eth.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_eth.h new file mode 100644 index 0000000..9024fff --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_eth.h @@ -0,0 +1,2017 @@ +/** + ****************************************************************************** + * @file stm32f4xx_hal_eth.h + * @author MCD Application Team + * @brief Header file of ETH HAL module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32F4xx_HAL_ETH_H +#define STM32F4xx_HAL_ETH_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx_hal_def.h" + +#if defined(ETH) + +/** @addtogroup STM32F4xx_HAL_Driver + * @{ + */ + +/** @addtogroup ETH + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +#ifndef ETH_TX_DESC_CNT +#define ETH_TX_DESC_CNT 4U +#endif /* ETH_TX_DESC_CNT */ + +#ifndef ETH_RX_DESC_CNT +#define ETH_RX_DESC_CNT 4U +#endif /* ETH_RX_DESC_CNT */ + + +/*********************** Descriptors struct def section ************************/ +/** @defgroup ETH_Exported_Types ETH Exported Types + * @{ + */ + +/** + * @brief ETH DMA Descriptor structure definition + */ +typedef struct +{ + __IO uint32_t DESC0; + __IO uint32_t DESC1; + __IO uint32_t DESC2; + __IO uint32_t DESC3; + __IO uint32_t DESC4; + __IO uint32_t DESC5; + __IO uint32_t DESC6; + __IO uint32_t DESC7; + uint32_t BackupAddr0; /* used to store rx buffer 1 address */ + uint32_t BackupAddr1; /* used to store rx buffer 2 address */ +} ETH_DMADescTypeDef; +/** + * + */ + +/** + * @brief ETH Buffers List structure definition + */ +typedef struct __ETH_BufferTypeDef +{ + uint8_t *buffer; /*gState = HAL_ETH_STATE_RESET; \ + (__HANDLE__)->MspInitCallback = NULL; \ + (__HANDLE__)->MspDeInitCallback = NULL; \ + } while(0) +#else +#define __HAL_ETH_RESET_HANDLE_STATE(__HANDLE__) do{ \ + (__HANDLE__)->gState = HAL_ETH_STATE_RESET; \ + } while(0) +#endif /*USE_HAL_ETH_REGISTER_CALLBACKS */ + +/** + * @brief Enables the specified ETHERNET DMA interrupts. + * @param __HANDLE__ : ETH Handle + * @param __INTERRUPT__: specifies the ETHERNET DMA interrupt sources to be + * enabled @ref ETH_DMA_Interrupts + * @retval None + */ +#define __HAL_ETH_DMA_ENABLE_IT(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->DMAIER \ + |= (__INTERRUPT__)) + +/** + * @brief Disables the specified ETHERNET DMA interrupts. + * @param __HANDLE__ : ETH Handle + * @param __INTERRUPT__: specifies the ETHERNET DMA interrupt sources to be + * disabled. @ref ETH_DMA_Interrupts + * @retval None + */ +#define __HAL_ETH_DMA_DISABLE_IT(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->DMAIER \ + &= ~(__INTERRUPT__)) + +/** + * @brief Gets the ETHERNET DMA IT source enabled or disabled. + * @param __HANDLE__ : ETH Handle + * @param __INTERRUPT__: specifies the interrupt source to get . @ref ETH_DMA_Interrupts + * @retval The ETH DMA IT Source enabled or disabled + */ +#define __HAL_ETH_DMA_GET_IT_SOURCE(__HANDLE__, __INTERRUPT__) (((__HANDLE__)->Instance->DMAIER &\ + (__INTERRUPT__)) == (__INTERRUPT__)) + +/** + * @brief Gets the ETHERNET DMA IT pending bit. + * @param __HANDLE__ : ETH Handle + * @param __INTERRUPT__: specifies the interrupt source to get . @ref ETH_DMA_Interrupts + * @retval The state of ETH DMA IT (SET or RESET) + */ +#define __HAL_ETH_DMA_GET_IT(__HANDLE__, __INTERRUPT__) (((__HANDLE__)->Instance->DMASR &\ + (__INTERRUPT__)) == (__INTERRUPT__)) + +/** + * @brief Clears the ETHERNET DMA IT pending bit. + * @param __HANDLE__ : ETH Handle + * @param __INTERRUPT__: specifies the interrupt pending bit to clear. @ref ETH_DMA_Interrupts + * @retval None + */ +#define __HAL_ETH_DMA_CLEAR_IT(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->DMASR = (__INTERRUPT__)) + +/** + * @brief Checks whether the specified ETHERNET DMA flag is set or not. + * @param __HANDLE__: ETH Handle + * @param __FLAG__: specifies the flag to check. @ref ETH_DMA_Status_Flags + * @retval The state of ETH DMA FLAG (SET or RESET). + */ +#define __HAL_ETH_DMA_GET_FLAG(__HANDLE__, __FLAG__) (((__HANDLE__)->Instance->DMASR &\ + ( __FLAG__)) == ( __FLAG__)) + +/** + * @brief Clears the specified ETHERNET DMA flag. + * @param __HANDLE__: ETH Handle + * @param __FLAG__: specifies the flag to check. @ref ETH_DMA_Status_Flags + * @retval The state of ETH DMA FLAG (SET or RESET). + */ +#define __HAL_ETH_DMA_CLEAR_FLAG(__HANDLE__, __FLAG__) ((__HANDLE__)->Instance->DMASR = ( __FLAG__)) + + + +/** + * @brief Checks whether the specified ETHERNET MAC flag is set or not. + * @param __HANDLE__: ETH Handle + * @param __INTERRUPT__: specifies the flag to check. @ref ETH_MAC_Interrupts + * @retval The state of ETH MAC IT (SET or RESET). + */ +#define __HAL_ETH_MAC_GET_IT(__HANDLE__, __INTERRUPT__) (((__HANDLE__)->Instance->MACSR &\ + ( __INTERRUPT__)) == ( __INTERRUPT__)) + +/*!< External interrupt line 19 Connected to the ETH wakeup EXTI Line */ +#define ETH_WAKEUP_EXTI_LINE 0x00080000U + +/** + * @brief Enable the ETH WAKEUP Exti Line. + * @param __EXTI_LINE__: specifies the ETH WAKEUP Exti sources to be enabled. + * @arg ETH_WAKEUP_EXTI_LINE + * @retval None. + */ +#define __HAL_ETH_WAKEUP_EXTI_ENABLE_IT(__EXTI_LINE__) (EXTI->IMR |= (__EXTI_LINE__)) + +/** + * @brief checks whether the specified ETH WAKEUP Exti interrupt flag is set or not. + * @param __EXTI_LINE__: specifies the ETH WAKEUP Exti sources to be cleared. + * @arg ETH_WAKEUP_EXTI_LINE + * @retval EXTI ETH WAKEUP Line Status. + */ +#define __HAL_ETH_WAKEUP_EXTI_GET_FLAG(__EXTI_LINE__) (EXTI->PR & (__EXTI_LINE__)) + +/** + * @brief Clear the ETH WAKEUP Exti flag. + * @param __EXTI_LINE__: specifies the ETH WAKEUP Exti sources to be cleared. + * @arg ETH_WAKEUP_EXTI_LINE + * @retval None. + */ +#define __HAL_ETH_WAKEUP_EXTI_CLEAR_FLAG(__EXTI_LINE__) (EXTI->PR = (__EXTI_LINE__)) + +/** + * @brief enable rising edge interrupt on selected EXTI line. + * @param __EXTI_LINE__: specifies the ETH WAKEUP EXTI sources to be disabled. + * @arg ETH_WAKEUP_EXTI_LINE + * @retval None + */ +#define __HAL_ETH_WAKEUP_EXTI_ENABLE_RISING_EDGE(__EXTI_LINE__) (EXTI->FTSR &= ~(__EXTI_LINE__)); \ + (EXTI->RTSR |= (__EXTI_LINE__)) + +/** + * @brief enable falling edge interrupt on selected EXTI line. + * @param __EXTI_LINE__: specifies the ETH WAKEUP EXTI sources to be disabled. + * @arg ETH_WAKEUP_EXTI_LINE + * @retval None + */ +#define __HAL_ETH_WAKEUP_EXTI_ENABLE_FALLING_EDGE(__EXTI_LINE__) (EXTI->RTSR &= ~(__EXTI_LINE__));\ + (EXTI->FTSR |= (__EXTI_LINE__)) + +/** + * @brief enable falling edge interrupt on selected EXTI line. + * @param __EXTI_LINE__: specifies the ETH WAKEUP EXTI sources to be disabled. + * @arg ETH_WAKEUP_EXTI_LINE + * @retval None + */ +#define __HAL_ETH_WAKEUP_EXTI_ENABLE_RISING_FALLING_EDGE(__EXTI_LINE__) (EXTI->RTSR |= (__EXTI_LINE__));\ + (EXTI->FTSR |= (__EXTI_LINE__)) + +/** + * @brief Generates a Software interrupt on selected EXTI line. + * @param __EXTI_LINE__: specifies the ETH WAKEUP EXTI sources to be disabled. + * @arg ETH_WAKEUP_EXTI_LINE + * @retval None + */ +#define __HAL_ETH_WAKEUP_EXTI_GENERATE_SWIT(__EXTI_LINE__) (EXTI->SWIER |= (__EXTI_LINE__)) + +#define __HAL_ETH_GET_PTP_CONTROL(__HANDLE__, __FLAG__) (((((__HANDLE__)->Instance->PTPTSCR) & \ + (__FLAG__)) == (__FLAG__)) ? SET : RESET) + +#define __HAL_ETH_SET_PTP_CONTROL(__HANDLE__, __FLAG__) ((__HANDLE__)->Instance->PTPTSCR |= (__FLAG__)) + +/** + * @} + */ + + +/* Exported functions --------------------------------------------------------*/ + +/** @addtogroup ETH_Exported_Functions + * @{ + */ + +/** @addtogroup ETH_Exported_Functions_Group1 + * @{ + */ +/* Initialization and de initialization functions **********************************/ +HAL_StatusTypeDef HAL_ETH_Init(ETH_HandleTypeDef *heth); +HAL_StatusTypeDef HAL_ETH_DeInit(ETH_HandleTypeDef *heth); +void HAL_ETH_MspInit(ETH_HandleTypeDef *heth); +void HAL_ETH_MspDeInit(ETH_HandleTypeDef *heth); + +/* Callbacks Register/UnRegister functions ***********************************/ +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) +HAL_StatusTypeDef HAL_ETH_RegisterCallback(ETH_HandleTypeDef *heth, HAL_ETH_CallbackIDTypeDef CallbackID, + pETH_CallbackTypeDef pCallback); +HAL_StatusTypeDef HAL_ETH_UnRegisterCallback(ETH_HandleTypeDef *heth, HAL_ETH_CallbackIDTypeDef CallbackID); +#endif /* USE_HAL_ETH_REGISTER_CALLBACKS */ + +/** + * @} + */ + +/** @addtogroup ETH_Exported_Functions_Group2 + * @{ + */ +/* IO operation functions *******************************************************/ +HAL_StatusTypeDef HAL_ETH_Start(ETH_HandleTypeDef *heth); +HAL_StatusTypeDef HAL_ETH_Start_IT(ETH_HandleTypeDef *heth); +HAL_StatusTypeDef HAL_ETH_Stop(ETH_HandleTypeDef *heth); +HAL_StatusTypeDef HAL_ETH_Stop_IT(ETH_HandleTypeDef *heth); + +HAL_StatusTypeDef HAL_ETH_ReadData(ETH_HandleTypeDef *heth, void **pAppBuff); +HAL_StatusTypeDef HAL_ETH_RegisterRxAllocateCallback(ETH_HandleTypeDef *heth, + pETH_rxAllocateCallbackTypeDef rxAllocateCallback); +HAL_StatusTypeDef HAL_ETH_UnRegisterRxAllocateCallback(ETH_HandleTypeDef *heth); +HAL_StatusTypeDef HAL_ETH_RegisterRxLinkCallback(ETH_HandleTypeDef *heth, pETH_rxLinkCallbackTypeDef rxLinkCallback); +HAL_StatusTypeDef HAL_ETH_UnRegisterRxLinkCallback(ETH_HandleTypeDef *heth); +HAL_StatusTypeDef HAL_ETH_GetRxDataErrorCode(const ETH_HandleTypeDef *heth, uint32_t *pErrorCode); +HAL_StatusTypeDef HAL_ETH_RegisterTxFreeCallback(ETH_HandleTypeDef *heth, pETH_txFreeCallbackTypeDef txFreeCallback); +HAL_StatusTypeDef HAL_ETH_UnRegisterTxFreeCallback(ETH_HandleTypeDef *heth); +HAL_StatusTypeDef HAL_ETH_ReleaseTxPacket(ETH_HandleTypeDef *heth); + +#ifdef HAL_ETH_USE_PTP +HAL_StatusTypeDef HAL_ETH_PTP_SetConfig(ETH_HandleTypeDef *heth, ETH_PTP_ConfigTypeDef *ptpconfig); +HAL_StatusTypeDef HAL_ETH_PTP_GetConfig(ETH_HandleTypeDef *heth, ETH_PTP_ConfigTypeDef *ptpconfig); +HAL_StatusTypeDef HAL_ETH_PTP_SetTime(ETH_HandleTypeDef *heth, ETH_TimeTypeDef *time); +HAL_StatusTypeDef HAL_ETH_PTP_GetTime(ETH_HandleTypeDef *heth, ETH_TimeTypeDef *time); +HAL_StatusTypeDef HAL_ETH_PTP_AddTimeOffset(ETH_HandleTypeDef *heth, ETH_PtpUpdateTypeDef ptpoffsettype, + ETH_TimeTypeDef *timeoffset); +HAL_StatusTypeDef HAL_ETH_PTP_InsertTxTimestamp(ETH_HandleTypeDef *heth); +HAL_StatusTypeDef HAL_ETH_PTP_GetTxTimestamp(ETH_HandleTypeDef *heth, ETH_TimeStampTypeDef *timestamp); +HAL_StatusTypeDef HAL_ETH_PTP_GetRxTimestamp(ETH_HandleTypeDef *heth, ETH_TimeStampTypeDef *timestamp); +HAL_StatusTypeDef HAL_ETH_RegisterTxPtpCallback(ETH_HandleTypeDef *heth, pETH_txPtpCallbackTypeDef txPtpCallback); +HAL_StatusTypeDef HAL_ETH_UnRegisterTxPtpCallback(ETH_HandleTypeDef *heth); +#endif /* HAL_ETH_USE_PTP */ + +HAL_StatusTypeDef HAL_ETH_Transmit(ETH_HandleTypeDef *heth, ETH_TxPacketConfigTypeDef *pTxConfig, uint32_t Timeout); +HAL_StatusTypeDef HAL_ETH_Transmit_IT(ETH_HandleTypeDef *heth, ETH_TxPacketConfigTypeDef *pTxConfig); + +HAL_StatusTypeDef HAL_ETH_WritePHYRegister(const ETH_HandleTypeDef *heth, uint32_t PHYAddr, uint32_t PHYReg, + uint32_t RegValue); +HAL_StatusTypeDef HAL_ETH_ReadPHYRegister(ETH_HandleTypeDef *heth, uint32_t PHYAddr, uint32_t PHYReg, + uint32_t *pRegValue); + +void HAL_ETH_IRQHandler(ETH_HandleTypeDef *heth); +void HAL_ETH_TxCpltCallback(ETH_HandleTypeDef *heth); +void HAL_ETH_RxCpltCallback(ETH_HandleTypeDef *heth); +void HAL_ETH_ErrorCallback(ETH_HandleTypeDef *heth); +void HAL_ETH_PMTCallback(ETH_HandleTypeDef *heth); +void HAL_ETH_WakeUpCallback(ETH_HandleTypeDef *heth); +void HAL_ETH_RxAllocateCallback(uint8_t **buff); +void HAL_ETH_RxLinkCallback(void **pStart, void **pEnd, uint8_t *buff, uint16_t Length); +void HAL_ETH_TxFreeCallback(uint32_t *buff); +void HAL_ETH_TxPtpCallback(uint32_t *buff, ETH_TimeStampTypeDef *timestamp); +/** + * @} + */ + +/** @addtogroup ETH_Exported_Functions_Group3 + * @{ + */ +/* Peripheral Control functions **********************************************/ +/* MAC & DMA Configuration APIs **********************************************/ +HAL_StatusTypeDef HAL_ETH_GetMACConfig(const ETH_HandleTypeDef *heth, ETH_MACConfigTypeDef *macconf); +HAL_StatusTypeDef HAL_ETH_GetDMAConfig(const ETH_HandleTypeDef *heth, ETH_DMAConfigTypeDef *dmaconf); +HAL_StatusTypeDef HAL_ETH_SetMACConfig(ETH_HandleTypeDef *heth, ETH_MACConfigTypeDef *macconf); +HAL_StatusTypeDef HAL_ETH_SetDMAConfig(ETH_HandleTypeDef *heth, ETH_DMAConfigTypeDef *dmaconf); +void HAL_ETH_SetMDIOClockRange(ETH_HandleTypeDef *heth); + +/* MAC VLAN Processing APIs ************************************************/ +void HAL_ETH_SetRxVLANIdentifier(ETH_HandleTypeDef *heth, uint32_t ComparisonBits, + uint32_t VLANIdentifier); + +/* MAC L2 Packet Filtering APIs **********************************************/ +HAL_StatusTypeDef HAL_ETH_GetMACFilterConfig(const ETH_HandleTypeDef *heth, ETH_MACFilterConfigTypeDef *pFilterConfig); +HAL_StatusTypeDef HAL_ETH_SetMACFilterConfig(ETH_HandleTypeDef *heth, const ETH_MACFilterConfigTypeDef *pFilterConfig); +HAL_StatusTypeDef HAL_ETH_SetHashTable(ETH_HandleTypeDef *heth, uint32_t *pHashTable); +HAL_StatusTypeDef HAL_ETH_SetSourceMACAddrMatch(const ETH_HandleTypeDef *heth, uint32_t AddrNbr, + const uint8_t *pMACAddr); + +/* MAC Power Down APIs *****************************************************/ +void HAL_ETH_EnterPowerDownMode(ETH_HandleTypeDef *heth, + const ETH_PowerDownConfigTypeDef *pPowerDownConfig); +void HAL_ETH_ExitPowerDownMode(ETH_HandleTypeDef *heth); +HAL_StatusTypeDef HAL_ETH_SetWakeUpFilter(ETH_HandleTypeDef *heth, uint32_t *pFilter, uint32_t Count); + +/** + * @} + */ + +/** @addtogroup ETH_Exported_Functions_Group4 + * @{ + */ +/* Peripheral State functions **************************************************/ +HAL_ETH_StateTypeDef HAL_ETH_GetState(const ETH_HandleTypeDef *heth); +uint32_t HAL_ETH_GetError(const ETH_HandleTypeDef *heth); +uint32_t HAL_ETH_GetDMAError(const ETH_HandleTypeDef *heth); +uint32_t HAL_ETH_GetMACError(const ETH_HandleTypeDef *heth); +uint32_t HAL_ETH_GetMACWakeUpSource(const ETH_HandleTypeDef *heth); +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#endif /* ETH */ + +#ifdef __cplusplus +} +#endif + +#endif /* STM32F4xx_HAL_ETH_H */ diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_exti.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_exti.h new file mode 100644 index 0000000..b18a228 --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_exti.h @@ -0,0 +1,366 @@ +/** + ****************************************************************************** + * @file stm32f4xx_hal_exti.h + * @author MCD Application Team + * @brief Header file of EXTI HAL module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2018 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS.Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32f4xx_HAL_EXTI_H +#define STM32f4xx_HAL_EXTI_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx_hal_def.h" + +/** @addtogroup STM32F4xx_HAL_Driver + * @{ + */ + +/** @defgroup EXTI EXTI + * @brief EXTI HAL module driver + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ + +/** @defgroup EXTI_Exported_Types EXTI Exported Types + * @{ + */ +typedef enum +{ + HAL_EXTI_COMMON_CB_ID = 0x00U +} EXTI_CallbackIDTypeDef; + +/** + * @brief EXTI Handle structure definition + */ +typedef struct +{ + uint32_t Line; /*!< Exti line number */ + void (* PendingCallback)(void); /*!< Exti pending callback */ +} EXTI_HandleTypeDef; + +/** + * @brief EXTI Configuration structure definition + */ +typedef struct +{ + uint32_t Line; /*!< The Exti line to be configured. This parameter + can be a value of @ref EXTI_Line */ + uint32_t Mode; /*!< The Exit Mode to be configured for a core. + This parameter can be a combination of @ref EXTI_Mode */ + uint32_t Trigger; /*!< The Exti Trigger to be configured. This parameter + can be a value of @ref EXTI_Trigger */ + uint32_t GPIOSel; /*!< The Exti GPIO multiplexer selection to be configured. + This parameter is only possible for line 0 to 15. It + can be a value of @ref EXTI_GPIOSel */ +} EXTI_ConfigTypeDef; + +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup EXTI_Exported_Constants EXTI Exported Constants + * @{ + */ + +/** @defgroup EXTI_Line EXTI Line + * @{ + */ +#define EXTI_LINE_0 (EXTI_GPIO | 0x00u) /*!< External interrupt line 0 */ +#define EXTI_LINE_1 (EXTI_GPIO | 0x01u) /*!< External interrupt line 1 */ +#define EXTI_LINE_2 (EXTI_GPIO | 0x02u) /*!< External interrupt line 2 */ +#define EXTI_LINE_3 (EXTI_GPIO | 0x03u) /*!< External interrupt line 3 */ +#define EXTI_LINE_4 (EXTI_GPIO | 0x04u) /*!< External interrupt line 4 */ +#define EXTI_LINE_5 (EXTI_GPIO | 0x05u) /*!< External interrupt line 5 */ +#define EXTI_LINE_6 (EXTI_GPIO | 0x06u) /*!< External interrupt line 6 */ +#define EXTI_LINE_7 (EXTI_GPIO | 0x07u) /*!< External interrupt line 7 */ +#define EXTI_LINE_8 (EXTI_GPIO | 0x08u) /*!< External interrupt line 8 */ +#define EXTI_LINE_9 (EXTI_GPIO | 0x09u) /*!< External interrupt line 9 */ +#define EXTI_LINE_10 (EXTI_GPIO | 0x0Au) /*!< External interrupt line 10 */ +#define EXTI_LINE_11 (EXTI_GPIO | 0x0Bu) /*!< External interrupt line 11 */ +#define EXTI_LINE_12 (EXTI_GPIO | 0x0Cu) /*!< External interrupt line 12 */ +#define EXTI_LINE_13 (EXTI_GPIO | 0x0Du) /*!< External interrupt line 13 */ +#define EXTI_LINE_14 (EXTI_GPIO | 0x0Eu) /*!< External interrupt line 14 */ +#define EXTI_LINE_15 (EXTI_GPIO | 0x0Fu) /*!< External interrupt line 15 */ +#define EXTI_LINE_16 (EXTI_CONFIG | 0x10u) /*!< External interrupt line 16 Connected to the PVD Output */ +#define EXTI_LINE_17 (EXTI_CONFIG | 0x11u) /*!< External interrupt line 17 Connected to the RTC Alarm event */ +#if defined(EXTI_IMR_IM18) +#define EXTI_LINE_18 (EXTI_CONFIG | 0x12u) /*!< External interrupt line 18 Connected to the USB OTG FS Wakeup from suspend event */ +#else +#define EXTI_LINE_18 (EXTI_RESERVED | 0x12u) /*!< No interrupt supported in this line */ +#endif /* EXTI_IMR_IM18 */ +#if defined(EXTI_IMR_IM19) +#define EXTI_LINE_19 (EXTI_CONFIG | 0x13u) /*!< External interrupt line 19 Connected to the Ethernet Wakeup event */ +#else +#define EXTI_LINE_19 (EXTI_RESERVED | 0x13u) /*!< No interrupt supported in this line */ +#endif /* EXTI_IMR_IM19 */ +#if defined(EXTI_IMR_IM20) +#define EXTI_LINE_20 (EXTI_CONFIG | 0x14u) /*!< External interrupt line 20 Connected to the USB OTG HS (configured in FS) Wakeup event */ +#else +#define EXTI_LINE_20 (EXTI_RESERVED | 0x14u) /*!< No interrupt supported in this line */ +#endif /* EXTI_IMR_IM20 */ +#define EXTI_LINE_21 (EXTI_CONFIG | 0x15u) /*!< External interrupt line 21 Connected to the RTC Tamper and Time Stamp events */ +#define EXTI_LINE_22 (EXTI_CONFIG | 0x16u) /*!< External interrupt line 22 Connected to the RTC Wakeup event */ +#if defined(EXTI_IMR_IM23) +#define EXTI_LINE_23 (EXTI_CONFIG | 0x17u) /*!< External interrupt line 23 Connected to the LPTIM1 asynchronous event */ +#endif /* EXTI_IMR_IM23 */ + +/** + * @} + */ + +/** @defgroup EXTI_Mode EXTI Mode + * @{ + */ +#define EXTI_MODE_NONE 0x00000000u +#define EXTI_MODE_INTERRUPT 0x00000001u +#define EXTI_MODE_EVENT 0x00000002u +/** + * @} + */ + +/** @defgroup EXTI_Trigger EXTI Trigger + * @{ + */ + +#define EXTI_TRIGGER_NONE 0x00000000u +#define EXTI_TRIGGER_RISING 0x00000001u +#define EXTI_TRIGGER_FALLING 0x00000002u +#define EXTI_TRIGGER_RISING_FALLING (EXTI_TRIGGER_RISING | EXTI_TRIGGER_FALLING) +/** + * @} + */ + +/** @defgroup EXTI_GPIOSel EXTI GPIOSel + * @brief + * @{ + */ +#define EXTI_GPIOA 0x00000000u +#define EXTI_GPIOB 0x00000001u +#define EXTI_GPIOC 0x00000002u +#if defined (GPIOD) +#define EXTI_GPIOD 0x00000003u +#endif /* GPIOD */ +#if defined (GPIOE) +#define EXTI_GPIOE 0x00000004u +#endif /* GPIOE */ +#if defined (GPIOF) +#define EXTI_GPIOF 0x00000005u +#endif /* GPIOF */ +#if defined (GPIOG) +#define EXTI_GPIOG 0x00000006u +#endif /* GPIOG */ +#if defined (GPIOH) +#define EXTI_GPIOH 0x00000007u +#endif /* GPIOH */ +#if defined (GPIOI) +#define EXTI_GPIOI 0x00000008u +#endif /* GPIOI */ +#if defined (GPIOJ) +#define EXTI_GPIOJ 0x00000009u +#endif /* GPIOJ */ +#if defined (GPIOK) +#define EXTI_GPIOK 0x0000000Au +#endif /* GPIOK */ + +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup EXTI_Exported_Macros EXTI Exported Macros + * @{ + */ + +/** + * @} + */ + +/* Private constants --------------------------------------------------------*/ +/** @defgroup EXTI_Private_Constants EXTI Private Constants + * @{ + */ +/** + * @brief EXTI Line property definition + */ +#define EXTI_PROPERTY_SHIFT 24u +#define EXTI_CONFIG (0x02uL << EXTI_PROPERTY_SHIFT) +#define EXTI_GPIO ((0x04uL << EXTI_PROPERTY_SHIFT) | EXTI_CONFIG) +#define EXTI_RESERVED (0x08uL << EXTI_PROPERTY_SHIFT) +#define EXTI_PROPERTY_MASK (EXTI_CONFIG | EXTI_GPIO) + +/** + * @brief EXTI bit usage + */ +#define EXTI_PIN_MASK 0x0000001Fu + +/** + * @brief EXTI Mask for interrupt & event mode + */ +#define EXTI_MODE_MASK (EXTI_MODE_EVENT | EXTI_MODE_INTERRUPT) + +/** + * @brief EXTI Mask for trigger possibilities + */ +#define EXTI_TRIGGER_MASK (EXTI_TRIGGER_RISING | EXTI_TRIGGER_FALLING) + +/** + * @brief EXTI Line number + */ +#if defined(EXTI_IMR_IM23) +#define EXTI_LINE_NB 24UL +#else +#define EXTI_LINE_NB 23UL +#endif /* EXTI_IMR_IM23 */ + +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup EXTI_Private_Macros EXTI Private Macros + * @{ + */ +#define IS_EXTI_LINE(__EXTI_LINE__) ((((__EXTI_LINE__) & ~(EXTI_PROPERTY_MASK | EXTI_PIN_MASK)) == 0x00u) && \ + ((((__EXTI_LINE__) & EXTI_PROPERTY_MASK) == EXTI_CONFIG) || \ + (((__EXTI_LINE__) & EXTI_PROPERTY_MASK) == EXTI_GPIO)) && \ + (((__EXTI_LINE__) & EXTI_PIN_MASK) < EXTI_LINE_NB)) + +#define IS_EXTI_MODE(__EXTI_LINE__) ((((__EXTI_LINE__) & EXTI_MODE_MASK) != 0x00u) && \ + (((__EXTI_LINE__) & ~EXTI_MODE_MASK) == 0x00u)) + +#define IS_EXTI_TRIGGER(__EXTI_LINE__) (((__EXTI_LINE__) & ~EXTI_TRIGGER_MASK) == 0x00u) + +#define IS_EXTI_PENDING_EDGE(__EXTI_LINE__) ((__EXTI_LINE__) == EXTI_TRIGGER_RISING_FALLING) + +#define IS_EXTI_CONFIG_LINE(__EXTI_LINE__) (((__EXTI_LINE__) & EXTI_CONFIG) != 0x00u) + +#if !defined (GPIOD) +#define IS_EXTI_GPIO_PORT(__PORT__) (((__PORT__) == EXTI_GPIOA) || \ + ((__PORT__) == EXTI_GPIOB) || \ + ((__PORT__) == EXTI_GPIOC) || \ + ((__PORT__) == EXTI_GPIOH)) +#elif !defined (GPIOE) +#define IS_EXTI_GPIO_PORT(__PORT__) (((__PORT__) == EXTI_GPIOA) || \ + ((__PORT__) == EXTI_GPIOB) || \ + ((__PORT__) == EXTI_GPIOC) || \ + ((__PORT__) == EXTI_GPIOD) || \ + ((__PORT__) == EXTI_GPIOH)) +#elif !defined (GPIOF) +#define IS_EXTI_GPIO_PORT(__PORT__) (((__PORT__) == EXTI_GPIOA) || \ + ((__PORT__) == EXTI_GPIOB) || \ + ((__PORT__) == EXTI_GPIOC) || \ + ((__PORT__) == EXTI_GPIOD) || \ + ((__PORT__) == EXTI_GPIOE) || \ + ((__PORT__) == EXTI_GPIOH)) +#elif !defined (GPIOI) +#define IS_EXTI_GPIO_PORT(__PORT__) (((__PORT__) == EXTI_GPIOA) || \ + ((__PORT__) == EXTI_GPIOB) || \ + ((__PORT__) == EXTI_GPIOC) || \ + ((__PORT__) == EXTI_GPIOD) || \ + ((__PORT__) == EXTI_GPIOE) || \ + ((__PORT__) == EXTI_GPIOF) || \ + ((__PORT__) == EXTI_GPIOG) || \ + ((__PORT__) == EXTI_GPIOH)) +#elif !defined (GPIOJ) +#define IS_EXTI_GPIO_PORT(__PORT__) (((__PORT__) == EXTI_GPIOA) || \ + ((__PORT__) == EXTI_GPIOB) || \ + ((__PORT__) == EXTI_GPIOC) || \ + ((__PORT__) == EXTI_GPIOD) || \ + ((__PORT__) == EXTI_GPIOE) || \ + ((__PORT__) == EXTI_GPIOF) || \ + ((__PORT__) == EXTI_GPIOG) || \ + ((__PORT__) == EXTI_GPIOH) || \ + ((__PORT__) == EXTI_GPIOI)) +#else +#define IS_EXTI_GPIO_PORT(__PORT__) (((__PORT__) == EXTI_GPIOA) || \ + ((__PORT__) == EXTI_GPIOB) || \ + ((__PORT__) == EXTI_GPIOC) || \ + ((__PORT__) == EXTI_GPIOD) || \ + ((__PORT__) == EXTI_GPIOE) || \ + ((__PORT__) == EXTI_GPIOF) || \ + ((__PORT__) == EXTI_GPIOG) || \ + ((__PORT__) == EXTI_GPIOH) || \ + ((__PORT__) == EXTI_GPIOI) || \ + ((__PORT__) == EXTI_GPIOJ) || \ + ((__PORT__) == EXTI_GPIOK)) +#endif /* GPIOD */ + +#define IS_EXTI_GPIO_PIN(__PIN__) ((__PIN__) < 16U) +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup EXTI_Exported_Functions EXTI Exported Functions + * @brief EXTI Exported Functions + * @{ + */ + +/** @defgroup EXTI_Exported_Functions_Group1 Configuration functions + * @brief Configuration functions + * @{ + */ +/* Configuration functions ****************************************************/ +HAL_StatusTypeDef HAL_EXTI_SetConfigLine(EXTI_HandleTypeDef *hexti, EXTI_ConfigTypeDef *pExtiConfig); +HAL_StatusTypeDef HAL_EXTI_GetConfigLine(EXTI_HandleTypeDef *hexti, EXTI_ConfigTypeDef *pExtiConfig); +HAL_StatusTypeDef HAL_EXTI_ClearConfigLine(EXTI_HandleTypeDef *hexti); +HAL_StatusTypeDef HAL_EXTI_RegisterCallback(EXTI_HandleTypeDef *hexti, EXTI_CallbackIDTypeDef CallbackID, void (*pPendingCbfn)(void)); +HAL_StatusTypeDef HAL_EXTI_GetHandle(EXTI_HandleTypeDef *hexti, uint32_t ExtiLine); +/** + * @} + */ + +/** @defgroup EXTI_Exported_Functions_Group2 IO operation functions + * @brief IO operation functions + * @{ + */ +/* IO operation functions *****************************************************/ +void HAL_EXTI_IRQHandler(EXTI_HandleTypeDef *hexti); +uint32_t HAL_EXTI_GetPending(EXTI_HandleTypeDef *hexti, uint32_t Edge); +void HAL_EXTI_ClearPending(EXTI_HandleTypeDef *hexti, uint32_t Edge); +void HAL_EXTI_GenerateSWI(EXTI_HandleTypeDef *hexti); + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* STM32f4xx_HAL_EXTI_H */ + diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_flash.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_flash.h new file mode 100644 index 0000000..41f77d2 --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_flash.h @@ -0,0 +1,425 @@ +/** + ****************************************************************************** + * @file stm32f4xx_hal_flash.h + * @author MCD Application Team + * @brief Header file of FLASH HAL module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F4xx_HAL_FLASH_H +#define __STM32F4xx_HAL_FLASH_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx_hal_def.h" + +/** @addtogroup STM32F4xx_HAL_Driver + * @{ + */ + +/** @addtogroup FLASH + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup FLASH_Exported_Types FLASH Exported Types + * @{ + */ + +/** + * @brief FLASH Procedure structure definition + */ +typedef enum +{ + FLASH_PROC_NONE = 0U, + FLASH_PROC_SECTERASE, + FLASH_PROC_MASSERASE, + FLASH_PROC_PROGRAM +} FLASH_ProcedureTypeDef; + +/** + * @brief FLASH handle Structure definition + */ +typedef struct +{ + __IO FLASH_ProcedureTypeDef ProcedureOnGoing; /*Internal variable to indicate which procedure is ongoing or not in IT context*/ + + __IO uint32_t NbSectorsToErase; /*Internal variable to save the remaining sectors to erase in IT context*/ + + __IO uint8_t VoltageForErase; /*Internal variable to provide voltage range selected by user in IT context*/ + + __IO uint32_t Sector; /*Internal variable to define the current sector which is erasing*/ + + __IO uint32_t Bank; /*Internal variable to save current bank selected during mass erase*/ + + __IO uint32_t Address; /*Internal variable to save address selected for program*/ + + HAL_LockTypeDef Lock; /* FLASH locking object */ + + __IO uint32_t ErrorCode; /* FLASH error code */ + +} FLASH_ProcessTypeDef; + +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup FLASH_Exported_Constants FLASH Exported Constants + * @{ + */ +/** @defgroup FLASH_Error_Code FLASH Error Code + * @brief FLASH Error Code + * @{ + */ +#define HAL_FLASH_ERROR_NONE 0x00000000U /*!< No error */ +#define HAL_FLASH_ERROR_RD 0x00000001U /*!< Read Protection error */ +#define HAL_FLASH_ERROR_PGS 0x00000002U /*!< Programming Sequence error */ +#define HAL_FLASH_ERROR_PGP 0x00000004U /*!< Programming Parallelism error */ +#define HAL_FLASH_ERROR_PGA 0x00000008U /*!< Programming Alignment error */ +#define HAL_FLASH_ERROR_WRP 0x00000010U /*!< Write protection error */ +#define HAL_FLASH_ERROR_OPERATION 0x00000020U /*!< Operation Error */ +/** + * @} + */ + +/** @defgroup FLASH_Type_Program FLASH Type Program + * @{ + */ +#define FLASH_TYPEPROGRAM_BYTE 0x00000000U /*!< Program byte (8-bit) at a specified address */ +#define FLASH_TYPEPROGRAM_HALFWORD 0x00000001U /*!< Program a half-word (16-bit) at a specified address */ +#define FLASH_TYPEPROGRAM_WORD 0x00000002U /*!< Program a word (32-bit) at a specified address */ +#define FLASH_TYPEPROGRAM_DOUBLEWORD 0x00000003U /*!< Program a double word (64-bit) at a specified address */ +/** + * @} + */ + +/** @defgroup FLASH_Flag_definition FLASH Flag definition + * @brief Flag definition + * @{ + */ +#define FLASH_FLAG_EOP FLASH_SR_EOP /*!< FLASH End of Operation flag */ +#define FLASH_FLAG_OPERR FLASH_SR_SOP /*!< FLASH operation Error flag */ +#define FLASH_FLAG_WRPERR FLASH_SR_WRPERR /*!< FLASH Write protected error flag */ +#define FLASH_FLAG_PGAERR FLASH_SR_PGAERR /*!< FLASH Programming Alignment error flag */ +#define FLASH_FLAG_PGPERR FLASH_SR_PGPERR /*!< FLASH Programming Parallelism error flag */ +#define FLASH_FLAG_PGSERR FLASH_SR_PGSERR /*!< FLASH Programming Sequence error flag */ +#if defined(FLASH_SR_RDERR) +#define FLASH_FLAG_RDERR FLASH_SR_RDERR /*!< Read Protection error flag (PCROP) */ +#endif /* FLASH_SR_RDERR */ +#define FLASH_FLAG_BSY FLASH_SR_BSY /*!< FLASH Busy flag */ +/** + * @} + */ + +/** @defgroup FLASH_Interrupt_definition FLASH Interrupt definition + * @brief FLASH Interrupt definition + * @{ + */ +#define FLASH_IT_EOP FLASH_CR_EOPIE /*!< End of FLASH Operation Interrupt source */ +#define FLASH_IT_ERR 0x02000000U /*!< Error Interrupt source */ +/** + * @} + */ + +/** @defgroup FLASH_Program_Parallelism FLASH Program Parallelism + * @{ + */ +#define FLASH_PSIZE_BYTE 0x00000000U +#define FLASH_PSIZE_HALF_WORD 0x00000100U +#define FLASH_PSIZE_WORD 0x00000200U +#define FLASH_PSIZE_DOUBLE_WORD 0x00000300U +#define CR_PSIZE_MASK 0xFFFFFCFFU +/** + * @} + */ + +/** @defgroup FLASH_Keys FLASH Keys + * @{ + */ +#define RDP_KEY ((uint16_t)0x00A5) +#define FLASH_KEY1 0x45670123U +#define FLASH_KEY2 0xCDEF89ABU +#define FLASH_OPT_KEY1 0x08192A3BU +#define FLASH_OPT_KEY2 0x4C5D6E7FU +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup FLASH_Exported_Macros FLASH Exported Macros + * @{ + */ +/** + * @brief Set the FLASH Latency. + * @param __LATENCY__ FLASH Latency + * The value of this parameter depend on device used within the same series + * @retval none + */ +#define __HAL_FLASH_SET_LATENCY(__LATENCY__) (*(__IO uint8_t *)ACR_BYTE0_ADDRESS = (uint8_t)(__LATENCY__)) + +/** + * @brief Get the FLASH Latency. + * @retval FLASH Latency + * The value of this parameter depend on device used within the same series + */ +#define __HAL_FLASH_GET_LATENCY() (READ_BIT((FLASH->ACR), FLASH_ACR_LATENCY)) + +/** + * @brief Enable the FLASH prefetch buffer. + * @retval none + */ +#define __HAL_FLASH_PREFETCH_BUFFER_ENABLE() (FLASH->ACR |= FLASH_ACR_PRFTEN) + +/** + * @brief Disable the FLASH prefetch buffer. + * @retval none + */ +#define __HAL_FLASH_PREFETCH_BUFFER_DISABLE() (FLASH->ACR &= (~FLASH_ACR_PRFTEN)) + +/** + * @brief Enable the FLASH instruction cache. + * @retval none + */ +#define __HAL_FLASH_INSTRUCTION_CACHE_ENABLE() (FLASH->ACR |= FLASH_ACR_ICEN) + +/** + * @brief Disable the FLASH instruction cache. + * @retval none + */ +#define __HAL_FLASH_INSTRUCTION_CACHE_DISABLE() (FLASH->ACR &= (~FLASH_ACR_ICEN)) + +/** + * @brief Enable the FLASH data cache. + * @retval none + */ +#define __HAL_FLASH_DATA_CACHE_ENABLE() (FLASH->ACR |= FLASH_ACR_DCEN) + +/** + * @brief Disable the FLASH data cache. + * @retval none + */ +#define __HAL_FLASH_DATA_CACHE_DISABLE() (FLASH->ACR &= (~FLASH_ACR_DCEN)) + +/** + * @brief Resets the FLASH instruction Cache. + * @note This function must be used only when the Instruction Cache is disabled. + * @retval None + */ +#define __HAL_FLASH_INSTRUCTION_CACHE_RESET() do {FLASH->ACR |= FLASH_ACR_ICRST; \ + FLASH->ACR &= ~FLASH_ACR_ICRST; \ + }while(0U) + +/** + * @brief Resets the FLASH data Cache. + * @note This function must be used only when the data Cache is disabled. + * @retval None + */ +#define __HAL_FLASH_DATA_CACHE_RESET() do {FLASH->ACR |= FLASH_ACR_DCRST; \ + FLASH->ACR &= ~FLASH_ACR_DCRST; \ + }while(0U) +/** + * @brief Enable the specified FLASH interrupt. + * @param __INTERRUPT__ FLASH interrupt + * This parameter can be any combination of the following values: + * @arg FLASH_IT_EOP: End of FLASH Operation Interrupt + * @arg FLASH_IT_ERR: Error Interrupt + * @retval none + */ +#define __HAL_FLASH_ENABLE_IT(__INTERRUPT__) (FLASH->CR |= (__INTERRUPT__)) + +/** + * @brief Disable the specified FLASH interrupt. + * @param __INTERRUPT__ FLASH interrupt + * This parameter can be any combination of the following values: + * @arg FLASH_IT_EOP: End of FLASH Operation Interrupt + * @arg FLASH_IT_ERR: Error Interrupt + * @retval none + */ +#define __HAL_FLASH_DISABLE_IT(__INTERRUPT__) (FLASH->CR &= ~(uint32_t)(__INTERRUPT__)) + +/** + * @brief Get the specified FLASH flag status. + * @param __FLAG__ specifies the FLASH flags to check. + * This parameter can be any combination of the following values: + * @arg FLASH_FLAG_EOP : FLASH End of Operation flag + * @arg FLASH_FLAG_OPERR : FLASH operation Error flag + * @arg FLASH_FLAG_WRPERR: FLASH Write protected error flag + * @arg FLASH_FLAG_PGAERR: FLASH Programming Alignment error flag + * @arg FLASH_FLAG_PGPERR: FLASH Programming Parallelism error flag + * @arg FLASH_FLAG_PGSERR: FLASH Programming Sequence error flag + * @arg FLASH_FLAG_RDERR : FLASH Read Protection error flag (PCROP) (*) + * @arg FLASH_FLAG_BSY : FLASH Busy flag + * (*) FLASH_FLAG_RDERR is not available for STM32F405xx/407xx/415xx/417xx devices + * @retval The new state of __FLAG__ (SET or RESET). + */ +#define __HAL_FLASH_GET_FLAG(__FLAG__) ((FLASH->SR & (__FLAG__))) + +/** + * @brief Clear the specified FLASH flags. + * @param __FLAG__ specifies the FLASH flags to clear. + * This parameter can be any combination of the following values: + * @arg FLASH_FLAG_EOP : FLASH End of Operation flag + * @arg FLASH_FLAG_OPERR : FLASH operation Error flag + * @arg FLASH_FLAG_WRPERR: FLASH Write protected error flag + * @arg FLASH_FLAG_PGAERR: FLASH Programming Alignment error flag + * @arg FLASH_FLAG_PGPERR: FLASH Programming Parallelism error flag + * @arg FLASH_FLAG_PGSERR: FLASH Programming Sequence error flag + * @arg FLASH_FLAG_RDERR : FLASH Read Protection error flag (PCROP) (*) + * (*) FLASH_FLAG_RDERR is not available for STM32F405xx/407xx/415xx/417xx devices + * @retval none + */ +#define __HAL_FLASH_CLEAR_FLAG(__FLAG__) (FLASH->SR = (__FLAG__)) +/** + * @} + */ + +/* Include FLASH HAL Extension module */ +#include "stm32f4xx_hal_flash_ex.h" +#include "stm32f4xx_hal_flash_ramfunc.h" + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup FLASH_Exported_Functions + * @{ + */ +/** @addtogroup FLASH_Exported_Functions_Group1 + * @{ + */ +/* Program operation functions ***********************************************/ +HAL_StatusTypeDef HAL_FLASH_Program(uint32_t TypeProgram, uint32_t Address, uint64_t Data); +HAL_StatusTypeDef HAL_FLASH_Program_IT(uint32_t TypeProgram, uint32_t Address, uint64_t Data); +/* FLASH IRQ handler method */ +void HAL_FLASH_IRQHandler(void); +/* Callbacks in non blocking modes */ +void HAL_FLASH_EndOfOperationCallback(uint32_t ReturnValue); +void HAL_FLASH_OperationErrorCallback(uint32_t ReturnValue); +/** + * @} + */ + +/** @addtogroup FLASH_Exported_Functions_Group2 + * @{ + */ +/* Peripheral Control functions **********************************************/ +HAL_StatusTypeDef HAL_FLASH_Unlock(void); +HAL_StatusTypeDef HAL_FLASH_Lock(void); +HAL_StatusTypeDef HAL_FLASH_OB_Unlock(void); +HAL_StatusTypeDef HAL_FLASH_OB_Lock(void); +/* Option bytes control */ +HAL_StatusTypeDef HAL_FLASH_OB_Launch(void); +/** + * @} + */ + +/** @addtogroup FLASH_Exported_Functions_Group3 + * @{ + */ +/* Peripheral State functions ************************************************/ +uint32_t HAL_FLASH_GetError(void); +HAL_StatusTypeDef FLASH_WaitForLastOperation(uint32_t Timeout); +/** + * @} + */ + +/** + * @} + */ +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/** @defgroup FLASH_Private_Variables FLASH Private Variables + * @{ + */ + +/** + * @} + */ +/* Private constants ---------------------------------------------------------*/ +/** @defgroup FLASH_Private_Constants FLASH Private Constants + * @{ + */ + +/** + * @brief ACR register byte 0 (Bits[7:0]) base address + */ +#define ACR_BYTE0_ADDRESS 0x40023C00U +/** + * @brief OPTCR register byte 0 (Bits[7:0]) base address + */ +#define OPTCR_BYTE0_ADDRESS 0x40023C14U +/** + * @brief OPTCR register byte 1 (Bits[15:8]) base address + */ +#define OPTCR_BYTE1_ADDRESS 0x40023C15U +/** + * @brief OPTCR register byte 2 (Bits[23:16]) base address + */ +#define OPTCR_BYTE2_ADDRESS 0x40023C16U +/** + * @brief OPTCR register byte 3 (Bits[31:24]) base address + */ +#define OPTCR_BYTE3_ADDRESS 0x40023C17U + +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup FLASH_Private_Macros FLASH Private Macros + * @{ + */ + +/** @defgroup FLASH_IS_FLASH_Definitions FLASH Private macros to check input parameters + * @{ + */ +#define IS_FLASH_TYPEPROGRAM(VALUE)(((VALUE) == FLASH_TYPEPROGRAM_BYTE) || \ + ((VALUE) == FLASH_TYPEPROGRAM_HALFWORD) || \ + ((VALUE) == FLASH_TYPEPROGRAM_WORD) || \ + ((VALUE) == FLASH_TYPEPROGRAM_DOUBLEWORD)) +/** + * @} + */ + +/** + * @} + */ + +/* Private functions ---------------------------------------------------------*/ +/** @defgroup FLASH_Private_Functions FLASH Private Functions + * @{ + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F4xx_HAL_FLASH_H */ + diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_flash_ex.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_flash_ex.h new file mode 100644 index 0000000..5fa89db --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_flash_ex.h @@ -0,0 +1,1063 @@ +/** + ****************************************************************************** + * @file stm32f4xx_hal_flash_ex.h + * @author MCD Application Team + * @brief Header file of FLASH HAL Extension module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F4xx_HAL_FLASH_EX_H +#define __STM32F4xx_HAL_FLASH_EX_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx_hal_def.h" + +/** @addtogroup STM32F4xx_HAL_Driver + * @{ + */ + +/** @addtogroup FLASHEx + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup FLASHEx_Exported_Types FLASH Exported Types + * @{ + */ + +/** + * @brief FLASH Erase structure definition + */ +typedef struct +{ + uint32_t TypeErase; /*!< Mass erase or sector Erase. + This parameter can be a value of @ref FLASHEx_Type_Erase */ + + uint32_t Banks; /*!< Select banks to erase when Mass erase is enabled. + This parameter must be a value of @ref FLASHEx_Banks */ + + uint32_t Sector; /*!< Initial FLASH sector to erase when Mass erase is disabled + This parameter must be a value of @ref FLASHEx_Sectors */ + + uint32_t NbSectors; /*!< Number of sectors to be erased. + This parameter must be a value between 1 and (max number of sectors - value of Initial sector)*/ + + uint32_t VoltageRange;/*!< The device voltage range which defines the erase parallelism + This parameter must be a value of @ref FLASHEx_Voltage_Range */ + +} FLASH_EraseInitTypeDef; + +/** + * @brief FLASH Option Bytes Program structure definition + */ +typedef struct +{ + uint32_t OptionType; /*!< Option byte to be configured. + This parameter can be a value of @ref FLASHEx_Option_Type */ + + uint32_t WRPState; /*!< Write protection activation or deactivation. + This parameter can be a value of @ref FLASHEx_WRP_State */ + + uint32_t WRPSector; /*!< Specifies the sector(s) to be write protected. + The value of this parameter depend on device used within the same series */ + + uint32_t Banks; /*!< Select banks for WRP activation/deactivation of all sectors. + This parameter must be a value of @ref FLASHEx_Banks */ + + uint32_t RDPLevel; /*!< Set the read protection level. + This parameter can be a value of @ref FLASHEx_Option_Bytes_Read_Protection */ + + uint32_t BORLevel; /*!< Set the BOR Level. + This parameter can be a value of @ref FLASHEx_BOR_Reset_Level */ + + uint8_t USERConfig; /*!< Program the FLASH User Option Byte: IWDG_SW / RST_STOP / RST_STDBY. */ + +} FLASH_OBProgramInitTypeDef; + +/** + * @brief FLASH Advanced Option Bytes Program structure definition + */ +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) ||\ + defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F410Tx) || defined(STM32F410Cx) ||\ + defined(STM32F410Rx) || defined(STM32F411xE) || defined(STM32F446xx) || defined(STM32F469xx) ||\ + defined(STM32F479xx) || defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) ||\ + defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) +typedef struct +{ + uint32_t OptionType; /*!< Option byte to be configured for extension. + This parameter can be a value of @ref FLASHEx_Advanced_Option_Type */ + + uint32_t PCROPState; /*!< PCROP activation or deactivation. + This parameter can be a value of @ref FLASHEx_PCROP_State */ + +#if defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) || defined(STM32F411xE) || defined(STM32F446xx) || defined(STM32F412Zx) ||\ + defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) + uint16_t Sectors; /*!< specifies the sector(s) set for PCROP. + This parameter can be a value of @ref FLASHEx_Option_Bytes_PC_ReadWrite_Protection */ +#endif /* STM32F401xC || STM32F401xE || STM32F410xx || STM32F411xE || STM32F446xx || STM32F412Zx || STM32F412Vx || STM32F412Rx ||\ + STM32F412Cx || STM32F413xx || STM32F423xx */ + +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) || defined(STM32F469xx) || defined(STM32F479xx) + uint32_t Banks; /*!< Select banks for PCROP activation/deactivation of all sectors. + This parameter must be a value of @ref FLASHEx_Banks */ + + uint16_t SectorsBank1; /*!< Specifies the sector(s) set for PCROP for Bank1. + This parameter can be a value of @ref FLASHEx_Option_Bytes_PC_ReadWrite_Protection */ + + uint16_t SectorsBank2; /*!< Specifies the sector(s) set for PCROP for Bank2. + This parameter can be a value of @ref FLASHEx_Option_Bytes_PC_ReadWrite_Protection */ + + uint8_t BootConfig; /*!< Specifies Option bytes for boot config. + This parameter can be a value of @ref FLASHEx_Dual_Boot */ + +#endif /*STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F469xx || STM32F479xx */ +} FLASH_AdvOBProgramInitTypeDef; +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F401xC || STM32F401xE || STM32F410xx || STM32F411xE || STM32F446xx || + STM32F469xx || STM32F479xx || STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx || STM32F413xx || STM32F423xx */ +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ + +/** @defgroup FLASHEx_Exported_Constants FLASH Exported Constants + * @{ + */ + +/** @defgroup FLASHEx_Type_Erase FLASH Type Erase + * @{ + */ +#define FLASH_TYPEERASE_SECTORS 0x00000000U /*!< Sectors erase only */ +#define FLASH_TYPEERASE_MASSERASE 0x00000001U /*!< Flash Mass erase activation */ +/** + * @} + */ + +/** @defgroup FLASHEx_Voltage_Range FLASH Voltage Range + * @{ + */ +#define FLASH_VOLTAGE_RANGE_1 0x00000000U /*!< Device operating range: 1.8V to 2.1V */ +#define FLASH_VOLTAGE_RANGE_2 0x00000001U /*!< Device operating range: 2.1V to 2.7V */ +#define FLASH_VOLTAGE_RANGE_3 0x00000002U /*!< Device operating range: 2.7V to 3.6V */ +#define FLASH_VOLTAGE_RANGE_4 0x00000003U /*!< Device operating range: 2.7V to 3.6V + External Vpp */ +/** + * @} + */ + +/** @defgroup FLASHEx_WRP_State FLASH WRP State + * @{ + */ +#define OB_WRPSTATE_DISABLE 0x00000000U /*!< Disable the write protection of the desired bank 1 sectors */ +#define OB_WRPSTATE_ENABLE 0x00000001U /*!< Enable the write protection of the desired bank 1 sectors */ +/** + * @} + */ + +/** @defgroup FLASHEx_Option_Type FLASH Option Type + * @{ + */ +#define OPTIONBYTE_WRP 0x00000001U /*!< WRP option byte configuration */ +#define OPTIONBYTE_RDP 0x00000002U /*!< RDP option byte configuration */ +#define OPTIONBYTE_USER 0x00000004U /*!< USER option byte configuration */ +#define OPTIONBYTE_BOR 0x00000008U /*!< BOR option byte configuration */ +/** + * @} + */ + +/** @defgroup FLASHEx_Option_Bytes_Read_Protection FLASH Option Bytes Read Protection + * @{ + */ +#define OB_RDP_LEVEL_0 ((uint8_t)0xAA) +#define OB_RDP_LEVEL_1 ((uint8_t)0x55) +#define OB_RDP_LEVEL_2 ((uint8_t)0xCC) /*!< Warning: When enabling read protection level 2 + it s no more possible to go back to level 1 or 0 */ +/** + * @} + */ + +/** @defgroup FLASHEx_Option_Bytes_IWatchdog FLASH Option Bytes IWatchdog + * @{ + */ +#define OB_IWDG_SW ((uint8_t)0x20) /*!< Software IWDG selected */ +#define OB_IWDG_HW ((uint8_t)0x00) /*!< Hardware IWDG selected */ +/** + * @} + */ + +/** @defgroup FLASHEx_Option_Bytes_nRST_STOP FLASH Option Bytes nRST_STOP + * @{ + */ +#define OB_STOP_NO_RST ((uint8_t)0x40) /*!< No reset generated when entering in STOP */ +#define OB_STOP_RST ((uint8_t)0x00) /*!< Reset generated when entering in STOP */ +/** + * @} + */ + + +/** @defgroup FLASHEx_Option_Bytes_nRST_STDBY FLASH Option Bytes nRST_STDBY + * @{ + */ +#define OB_STDBY_NO_RST ((uint8_t)0x80) /*!< No reset generated when entering in STANDBY */ +#define OB_STDBY_RST ((uint8_t)0x00) /*!< Reset generated when entering in STANDBY */ +/** + * @} + */ + +/** @defgroup FLASHEx_BOR_Reset_Level FLASH BOR Reset Level + * @{ + */ +#define OB_BOR_LEVEL3 ((uint8_t)0x00) /*!< Supply voltage ranges from 2.70 to 3.60 V */ +#define OB_BOR_LEVEL2 ((uint8_t)0x04) /*!< Supply voltage ranges from 2.40 to 2.70 V */ +#define OB_BOR_LEVEL1 ((uint8_t)0x08) /*!< Supply voltage ranges from 2.10 to 2.40 V */ +#define OB_BOR_OFF ((uint8_t)0x0C) /*!< Supply voltage ranges from 1.62 to 2.10 V */ +/** + * @} + */ + +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) ||\ + defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F410Tx) || defined(STM32F410Cx) ||\ + defined(STM32F410Rx) || defined(STM32F411xE) || defined(STM32F446xx) || defined(STM32F469xx) ||\ + defined(STM32F479xx) || defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) ||\ + defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) +/** @defgroup FLASHEx_PCROP_State FLASH PCROP State + * @{ + */ +#define OB_PCROP_STATE_DISABLE 0x00000000U /*!< Disable PCROP */ +#define OB_PCROP_STATE_ENABLE 0x00000001U /*!< Enable PCROP */ +/** + * @} + */ +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F401xC || STM32F401xE ||\ + STM32F410xx || STM32F411xE || STM32F446xx || STM32F469xx || STM32F479xx || STM32F412Zx ||\ + STM32F412Vx || STM32F412Rx || STM32F412Cx || STM32F413xx || STM32F423xx */ + +/** @defgroup FLASHEx_Advanced_Option_Type FLASH Advanced Option Type + * @{ + */ +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) ||\ + defined(STM32F469xx) || defined(STM32F479xx) +#define OPTIONBYTE_PCROP 0x00000001U /*!< PCROP option byte configuration */ +#define OPTIONBYTE_BOOTCONFIG 0x00000002U /*!< BOOTConfig option byte configuration */ +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F469xx || STM32F479xx */ + +#if defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F410Tx) || defined(STM32F410Cx) ||\ + defined(STM32F410Rx) || defined(STM32F411xE) || defined(STM32F446xx) || defined(STM32F412Zx) ||\ + defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) ||\ + defined(STM32F423xx) +#define OPTIONBYTE_PCROP 0x00000001U /*!= FLASH_BASE) && ((ADDRESS) <= FLASH_END)) || \ + (((ADDRESS) >= FLASH_OTP_BASE) && ((ADDRESS) <= FLASH_OTP_END))) + +#define IS_FLASH_NBSECTORS(NBSECTORS) (((NBSECTORS) != 0) && ((NBSECTORS) <= FLASH_SECTOR_TOTAL)) + +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) || defined(STM32F469xx) || defined(STM32F479xx) +#define IS_OB_WRP_SECTOR(SECTOR)((((SECTOR) & 0xFF000000U) == 0x00000000U) && ((SECTOR) != 0x00000000U)) +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F469xx || STM32F479xx */ + +#if defined(STM32F413xx) || defined(STM32F423xx) +#define IS_OB_WRP_SECTOR(SECTOR)((((SECTOR) & 0xFFFF8000U) == 0x00000000U) && ((SECTOR) != 0x00000000U)) +#endif /* STM32F413xx || STM32F423xx */ + +#if defined(STM32F405xx) || defined(STM32F415xx) || defined(STM32F407xx) || defined(STM32F417xx) +#define IS_OB_WRP_SECTOR(SECTOR)((((SECTOR) & 0xFFFFF000U) == 0x00000000U) && ((SECTOR) != 0x00000000U)) +#endif /* STM32F405xx || STM32F415xx || STM32F407xx || STM32F417xx */ + +#if defined(STM32F401xC) +#define IS_OB_WRP_SECTOR(SECTOR)((((SECTOR) & 0xFFFFF000U) == 0x00000000U) && ((SECTOR) != 0x00000000U)) +#endif /* STM32F401xC */ + +#if defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) +#define IS_OB_WRP_SECTOR(SECTOR)((((SECTOR) & 0xFFFFF000U) == 0x00000000U) && ((SECTOR) != 0x00000000U)) +#endif /* STM32F410Tx || STM32F410Cx || STM32F410Rx */ + +#if defined(STM32F401xE) || defined(STM32F411xE) || defined(STM32F446xx) || defined(STM32F412Zx) || defined(STM32F412Vx) ||\ + defined(STM32F412Rx) || defined(STM32F412Cx) +#define IS_OB_WRP_SECTOR(SECTOR)((((SECTOR) & 0xFFFFF000U) == 0x00000000U) && ((SECTOR) != 0x00000000U)) +#endif /* STM32F401xE || STM32F411xE || STM32F446xx || STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx */ + +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) || defined(STM32F469xx) || defined(STM32F479xx) +#define IS_OB_PCROP(SECTOR)((((SECTOR) & 0xFFFFF000U) == 0x00000000U) && ((SECTOR) != 0x00000000U)) +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F469xx || STM32F479xx */ + +#if defined(STM32F413xx) || defined(STM32F423xx) +#define IS_OB_PCROP(SECTOR)((((SECTOR) & 0xFFFF8000U) == 0x00000000U) && ((SECTOR) != 0x00000000U)) +#endif /* STM32F413xx || STM32F423xx */ + +#if defined(STM32F401xC) +#define IS_OB_PCROP(SECTOR)((((SECTOR) & 0xFFFFF000U) == 0x00000000U) && ((SECTOR) != 0x00000000U)) +#endif /* STM32F401xC */ + +#if defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) +#define IS_OB_PCROP(SECTOR)((((SECTOR) & 0xFFFFF000U) == 0x00000000U) && ((SECTOR) != 0x00000000U)) +#endif /* STM32F410Tx || STM32F410Cx || STM32F410Rx */ + +#if defined(STM32F401xE) || defined(STM32F411xE) || defined(STM32F446xx) || defined(STM32F412Zx) || defined(STM32F412Vx) ||\ + defined(STM32F412Rx) || defined(STM32F412Cx) +#define IS_OB_PCROP(SECTOR)((((SECTOR) & 0xFFFFF000U) == 0x00000000U) && ((SECTOR) != 0x00000000U)) +#endif /* STM32F401xE || STM32F411xE || STM32F446xx || STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx */ + +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) ||\ + defined(STM32F469xx) || defined(STM32F479xx) +#define IS_OB_BOOT(BOOT) (((BOOT) == OB_DUAL_BOOT_ENABLE) || ((BOOT) == OB_DUAL_BOOT_DISABLE)) +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F469xx || STM32F479xx */ + +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) ||\ + defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F410Tx) || defined(STM32F410Cx) ||\ + defined(STM32F410Rx) || defined(STM32F411xE) || defined(STM32F446xx) || defined(STM32F469xx) ||\ + defined(STM32F479xx) || defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) ||\ + defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) +#define IS_OB_PCROP_SELECT(PCROP) (((PCROP) == OB_PCROP_SELECTED) || ((PCROP) == OB_PCROP_DESELECTED)) +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F401xC || STM32F401xE ||\ + STM32F410xx || STM32F411xE || STM32F446xx || STM32F469xx || STM32F479xx || STM32F412Zx ||\ + STM32F412Vx || STM32F412Rx || STM32F412Cx || STM32F413xx || STM32F423xx */ +/** + * @} + */ + +/** + * @} + */ + +/* Private functions ---------------------------------------------------------*/ +/** @defgroup FLASHEx_Private_Functions FLASH Private Functions + * @{ + */ +void FLASH_Erase_Sector(uint32_t Sector, uint8_t VoltageRange); +void FLASH_FlushCaches(void); +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F4xx_HAL_FLASH_EX_H */ + diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_flash_ramfunc.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_flash_ramfunc.h new file mode 100644 index 0000000..2112e74 --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_flash_ramfunc.h @@ -0,0 +1,76 @@ +/** + ****************************************************************************** + * @file stm32f4xx_hal_flash_ramfunc.h + * @author MCD Application Team + * @brief Header file of FLASH RAMFUNC driver. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F4xx_FLASH_RAMFUNC_H +#define __STM32F4xx_FLASH_RAMFUNC_H + +#ifdef __cplusplus +extern "C" { +#endif +#if defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) || defined(STM32F411xE) || defined(STM32F446xx) || defined(STM32F412Zx) ||\ + defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx_hal_def.h" + +/** @addtogroup STM32F4xx_HAL_Driver + * @{ + */ + +/** @addtogroup FLASH_RAMFUNC + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/* Exported macro ------------------------------------------------------------*/ +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup FLASH_RAMFUNC_Exported_Functions + * @{ + */ + +/** @addtogroup FLASH_RAMFUNC_Exported_Functions_Group1 + * @{ + */ +__RAM_FUNC HAL_StatusTypeDef HAL_FLASHEx_StopFlashInterfaceClk(void); +__RAM_FUNC HAL_StatusTypeDef HAL_FLASHEx_StartFlashInterfaceClk(void); +__RAM_FUNC HAL_StatusTypeDef HAL_FLASHEx_EnableFlashSleepMode(void); +__RAM_FUNC HAL_StatusTypeDef HAL_FLASHEx_DisableFlashSleepMode(void); +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#endif /* STM32F410xx || STM32F411xE || STM32F446xx || STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx */ +#ifdef __cplusplus +} +#endif + + +#endif /* __STM32F4xx_FLASH_RAMFUNC_H */ + diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_gpio.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_gpio.h new file mode 100644 index 0000000..5f3d749 --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_gpio.h @@ -0,0 +1,325 @@ +/** + ****************************************************************************** + * @file stm32f4xx_hal_gpio.h + * @author MCD Application Team + * @brief Header file of GPIO HAL module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F4xx_HAL_GPIO_H +#define __STM32F4xx_HAL_GPIO_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx_hal_def.h" + +/** @addtogroup STM32F4xx_HAL_Driver + * @{ + */ + +/** @addtogroup GPIO + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup GPIO_Exported_Types GPIO Exported Types + * @{ + */ + +/** + * @brief GPIO Init structure definition + */ +typedef struct +{ + uint32_t Pin; /*!< Specifies the GPIO pins to be configured. + This parameter can be any value of @ref GPIO_pins_define */ + + uint32_t Mode; /*!< Specifies the operating mode for the selected pins. + This parameter can be a value of @ref GPIO_mode_define */ + + uint32_t Pull; /*!< Specifies the Pull-up or Pull-Down activation for the selected pins. + This parameter can be a value of @ref GPIO_pull_define */ + + uint32_t Speed; /*!< Specifies the speed for the selected pins. + This parameter can be a value of @ref GPIO_speed_define */ + + uint32_t Alternate; /*!< Peripheral to be connected to the selected pins. + This parameter can be a value of @ref GPIO_Alternate_function_selection */ +}GPIO_InitTypeDef; + +/** + * @brief GPIO Bit SET and Bit RESET enumeration + */ +typedef enum +{ + GPIO_PIN_RESET = 0, + GPIO_PIN_SET +}GPIO_PinState; +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ + +/** @defgroup GPIO_Exported_Constants GPIO Exported Constants + * @{ + */ + +/** @defgroup GPIO_pins_define GPIO pins define + * @{ + */ +#define GPIO_PIN_0 ((uint16_t)0x0001) /* Pin 0 selected */ +#define GPIO_PIN_1 ((uint16_t)0x0002) /* Pin 1 selected */ +#define GPIO_PIN_2 ((uint16_t)0x0004) /* Pin 2 selected */ +#define GPIO_PIN_3 ((uint16_t)0x0008) /* Pin 3 selected */ +#define GPIO_PIN_4 ((uint16_t)0x0010) /* Pin 4 selected */ +#define GPIO_PIN_5 ((uint16_t)0x0020) /* Pin 5 selected */ +#define GPIO_PIN_6 ((uint16_t)0x0040) /* Pin 6 selected */ +#define GPIO_PIN_7 ((uint16_t)0x0080) /* Pin 7 selected */ +#define GPIO_PIN_8 ((uint16_t)0x0100) /* Pin 8 selected */ +#define GPIO_PIN_9 ((uint16_t)0x0200) /* Pin 9 selected */ +#define GPIO_PIN_10 ((uint16_t)0x0400) /* Pin 10 selected */ +#define GPIO_PIN_11 ((uint16_t)0x0800) /* Pin 11 selected */ +#define GPIO_PIN_12 ((uint16_t)0x1000) /* Pin 12 selected */ +#define GPIO_PIN_13 ((uint16_t)0x2000) /* Pin 13 selected */ +#define GPIO_PIN_14 ((uint16_t)0x4000) /* Pin 14 selected */ +#define GPIO_PIN_15 ((uint16_t)0x8000) /* Pin 15 selected */ +#define GPIO_PIN_All ((uint16_t)0xFFFF) /* All pins selected */ + +#define GPIO_PIN_MASK 0x0000FFFFU /* PIN mask for assert test */ +/** + * @} + */ + +/** @defgroup GPIO_mode_define GPIO mode define + * @brief GPIO Configuration Mode + * Elements values convention: 0x00WX00YZ + * - W : EXTI trigger detection on 3 bits + * - X : EXTI mode (IT or Event) on 2 bits + * - Y : Output type (Push Pull or Open Drain) on 1 bit + * - Z : GPIO mode (Input, Output, Alternate or Analog) on 2 bits + * @{ + */ +#define GPIO_MODE_INPUT MODE_INPUT /*!< Input Floating Mode */ +#define GPIO_MODE_OUTPUT_PP (MODE_OUTPUT | OUTPUT_PP) /*!< Output Push Pull Mode */ +#define GPIO_MODE_OUTPUT_OD (MODE_OUTPUT | OUTPUT_OD) /*!< Output Open Drain Mode */ +#define GPIO_MODE_AF_PP (MODE_AF | OUTPUT_PP) /*!< Alternate Function Push Pull Mode */ +#define GPIO_MODE_AF_OD (MODE_AF | OUTPUT_OD) /*!< Alternate Function Open Drain Mode */ + +#define GPIO_MODE_ANALOG MODE_ANALOG /*!< Analog Mode */ + +#define GPIO_MODE_IT_RISING (MODE_INPUT | EXTI_IT | TRIGGER_RISING) /*!< External Interrupt Mode with Rising edge trigger detection */ +#define GPIO_MODE_IT_FALLING (MODE_INPUT | EXTI_IT | TRIGGER_FALLING) /*!< External Interrupt Mode with Falling edge trigger detection */ +#define GPIO_MODE_IT_RISING_FALLING (MODE_INPUT | EXTI_IT | TRIGGER_RISING | TRIGGER_FALLING) /*!< External Interrupt Mode with Rising/Falling edge trigger detection */ + +#define GPIO_MODE_EVT_RISING (MODE_INPUT | EXTI_EVT | TRIGGER_RISING) /*!< External Event Mode with Rising edge trigger detection */ +#define GPIO_MODE_EVT_FALLING (MODE_INPUT | EXTI_EVT | TRIGGER_FALLING) /*!< External Event Mode with Falling edge trigger detection */ +#define GPIO_MODE_EVT_RISING_FALLING (MODE_INPUT | EXTI_EVT | TRIGGER_RISING | TRIGGER_FALLING) /*!< External Event Mode with Rising/Falling edge trigger detection */ + +/** + * @} + */ + +/** @defgroup GPIO_speed_define GPIO speed define + * @brief GPIO Output Maximum frequency + * @{ + */ +#define GPIO_SPEED_FREQ_LOW 0x00000000U /*!< IO works at 2 MHz, please refer to the product datasheet */ +#define GPIO_SPEED_FREQ_MEDIUM 0x00000001U /*!< range 12,5 MHz to 50 MHz, please refer to the product datasheet */ +#define GPIO_SPEED_FREQ_HIGH 0x00000002U /*!< range 25 MHz to 100 MHz, please refer to the product datasheet */ +#define GPIO_SPEED_FREQ_VERY_HIGH 0x00000003U /*!< range 50 MHz to 200 MHz, please refer to the product datasheet */ +/** + * @} + */ + + /** @defgroup GPIO_pull_define GPIO pull define + * @brief GPIO Pull-Up or Pull-Down Activation + * @{ + */ +#define GPIO_NOPULL 0x00000000U /*!< No Pull-up or Pull-down activation */ +#define GPIO_PULLUP 0x00000001U /*!< Pull-up activation */ +#define GPIO_PULLDOWN 0x00000002U /*!< Pull-down activation */ +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup GPIO_Exported_Macros GPIO Exported Macros + * @{ + */ + +/** + * @brief Checks whether the specified EXTI line flag is set or not. + * @param __EXTI_LINE__ specifies the EXTI line flag to check. + * This parameter can be GPIO_PIN_x where x can be(0..15) + * @retval The new state of __EXTI_LINE__ (SET or RESET). + */ +#define __HAL_GPIO_EXTI_GET_FLAG(__EXTI_LINE__) (EXTI->PR & (__EXTI_LINE__)) + +/** + * @brief Clears the EXTI's line pending flags. + * @param __EXTI_LINE__ specifies the EXTI lines flags to clear. + * This parameter can be any combination of GPIO_PIN_x where x can be (0..15) + * @retval None + */ +#define __HAL_GPIO_EXTI_CLEAR_FLAG(__EXTI_LINE__) (EXTI->PR = (__EXTI_LINE__)) + +/** + * @brief Checks whether the specified EXTI line is asserted or not. + * @param __EXTI_LINE__ specifies the EXTI line to check. + * This parameter can be GPIO_PIN_x where x can be(0..15) + * @retval The new state of __EXTI_LINE__ (SET or RESET). + */ +#define __HAL_GPIO_EXTI_GET_IT(__EXTI_LINE__) (EXTI->PR & (__EXTI_LINE__)) + +/** + * @brief Clears the EXTI's line pending bits. + * @param __EXTI_LINE__ specifies the EXTI lines to clear. + * This parameter can be any combination of GPIO_PIN_x where x can be (0..15) + * @retval None + */ +#define __HAL_GPIO_EXTI_CLEAR_IT(__EXTI_LINE__) (EXTI->PR = (__EXTI_LINE__)) + +/** + * @brief Generates a Software interrupt on selected EXTI line. + * @param __EXTI_LINE__ specifies the EXTI line to check. + * This parameter can be GPIO_PIN_x where x can be(0..15) + * @retval None + */ +#define __HAL_GPIO_EXTI_GENERATE_SWIT(__EXTI_LINE__) (EXTI->SWIER |= (__EXTI_LINE__)) +/** + * @} + */ + +/* Include GPIO HAL Extension module */ +#include "stm32f4xx_hal_gpio_ex.h" + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup GPIO_Exported_Functions + * @{ + */ + +/** @addtogroup GPIO_Exported_Functions_Group1 + * @{ + */ +/* Initialization and de-initialization functions *****************************/ +void HAL_GPIO_Init(GPIO_TypeDef *GPIOx, GPIO_InitTypeDef *GPIO_Init); +void HAL_GPIO_DeInit(GPIO_TypeDef *GPIOx, uint32_t GPIO_Pin); +/** + * @} + */ + +/** @addtogroup GPIO_Exported_Functions_Group2 + * @{ + */ +/* IO operation functions *****************************************************/ +GPIO_PinState HAL_GPIO_ReadPin(GPIO_TypeDef* GPIOx, uint16_t GPIO_Pin); +void HAL_GPIO_WritePin(GPIO_TypeDef* GPIOx, uint16_t GPIO_Pin, GPIO_PinState PinState); +void HAL_GPIO_TogglePin(GPIO_TypeDef* GPIOx, uint16_t GPIO_Pin); +HAL_StatusTypeDef HAL_GPIO_LockPin(GPIO_TypeDef* GPIOx, uint16_t GPIO_Pin); +void HAL_GPIO_EXTI_IRQHandler(uint16_t GPIO_Pin); +void HAL_GPIO_EXTI_Callback(uint16_t GPIO_Pin); + +/** + * @} + */ + +/** + * @} + */ +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/** @defgroup GPIO_Private_Constants GPIO Private Constants + * @{ + */ +#define GPIO_MODE_Pos 0U +#define GPIO_MODE (0x3UL << GPIO_MODE_Pos) +#define MODE_INPUT (0x0UL << GPIO_MODE_Pos) +#define MODE_OUTPUT (0x1UL << GPIO_MODE_Pos) +#define MODE_AF (0x2UL << GPIO_MODE_Pos) +#define MODE_ANALOG (0x3UL << GPIO_MODE_Pos) +#define OUTPUT_TYPE_Pos 4U +#define OUTPUT_TYPE (0x1UL << OUTPUT_TYPE_Pos) +#define OUTPUT_PP (0x0UL << OUTPUT_TYPE_Pos) +#define OUTPUT_OD (0x1UL << OUTPUT_TYPE_Pos) +#define EXTI_MODE_Pos 16U +#define EXTI_MODE (0x3UL << EXTI_MODE_Pos) +#define EXTI_IT (0x1UL << EXTI_MODE_Pos) +#define EXTI_EVT (0x2UL << EXTI_MODE_Pos) +#define TRIGGER_MODE_Pos 20U +#define TRIGGER_MODE (0x7UL << TRIGGER_MODE_Pos) +#define TRIGGER_RISING (0x1UL << TRIGGER_MODE_Pos) +#define TRIGGER_FALLING (0x2UL << TRIGGER_MODE_Pos) + +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup GPIO_Private_Macros GPIO Private Macros + * @{ + */ +#define IS_GPIO_PIN_ACTION(ACTION) (((ACTION) == GPIO_PIN_RESET) || ((ACTION) == GPIO_PIN_SET)) +#define IS_GPIO_PIN(PIN) (((((uint32_t)PIN) & GPIO_PIN_MASK ) != 0x00U) && ((((uint32_t)PIN) & ~GPIO_PIN_MASK) == 0x00U)) +#define IS_GPIO_MODE(MODE) (((MODE) == GPIO_MODE_INPUT) ||\ + ((MODE) == GPIO_MODE_OUTPUT_PP) ||\ + ((MODE) == GPIO_MODE_OUTPUT_OD) ||\ + ((MODE) == GPIO_MODE_AF_PP) ||\ + ((MODE) == GPIO_MODE_AF_OD) ||\ + ((MODE) == GPIO_MODE_IT_RISING) ||\ + ((MODE) == GPIO_MODE_IT_FALLING) ||\ + ((MODE) == GPIO_MODE_IT_RISING_FALLING) ||\ + ((MODE) == GPIO_MODE_EVT_RISING) ||\ + ((MODE) == GPIO_MODE_EVT_FALLING) ||\ + ((MODE) == GPIO_MODE_EVT_RISING_FALLING) ||\ + ((MODE) == GPIO_MODE_ANALOG)) +#define IS_GPIO_SPEED(SPEED) (((SPEED) == GPIO_SPEED_FREQ_LOW) || ((SPEED) == GPIO_SPEED_FREQ_MEDIUM) || \ + ((SPEED) == GPIO_SPEED_FREQ_HIGH) || ((SPEED) == GPIO_SPEED_FREQ_VERY_HIGH)) +#define IS_GPIO_PULL(PULL) (((PULL) == GPIO_NOPULL) || ((PULL) == GPIO_PULLUP) || \ + ((PULL) == GPIO_PULLDOWN)) +/** + * @} + */ + +/* Private functions ---------------------------------------------------------*/ +/** @defgroup GPIO_Private_Functions GPIO Private Functions + * @{ + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F4xx_HAL_GPIO_H */ + diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_gpio_ex.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_gpio_ex.h new file mode 100644 index 0000000..5e0b7cc --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_gpio_ex.h @@ -0,0 +1,1590 @@ +/** + ****************************************************************************** + * @file stm32f4xx_hal_gpio_ex.h + * @author MCD Application Team + * @brief Header file of GPIO HAL Extension module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F4xx_HAL_GPIO_EX_H +#define __STM32F4xx_HAL_GPIO_EX_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx_hal_def.h" + +/** @addtogroup STM32F4xx_HAL_Driver + * @{ + */ + +/** @defgroup GPIOEx GPIOEx + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/* Exported constants --------------------------------------------------------*/ +/** @defgroup GPIOEx_Exported_Constants GPIO Exported Constants + * @{ + */ + +/** @defgroup GPIO_Alternate_function_selection GPIO Alternate Function Selection + * @{ + */ + +/*------------------------------------------ STM32F429xx/STM32F439xx ---------*/ +#if defined(STM32F429xx) || defined(STM32F439xx) +/** + * @brief AF 0 selection + */ +#define GPIO_AF0_RTC_50Hz ((uint8_t)0x00) /* RTC_50Hz Alternate Function mapping */ +#define GPIO_AF0_MCO ((uint8_t)0x00) /* MCO (MCO1 and MCO2) Alternate Function mapping */ +#define GPIO_AF0_TAMPER ((uint8_t)0x00) /* TAMPER (TAMPER_1 and TAMPER_2) Alternate Function mapping */ +#define GPIO_AF0_SWJ ((uint8_t)0x00) /* SWJ (SWD and JTAG) Alternate Function mapping */ +#define GPIO_AF0_TRACE ((uint8_t)0x00) /* TRACE Alternate Function mapping */ + +/** + * @brief AF 1 selection + */ +#define GPIO_AF1_TIM1 ((uint8_t)0x01) /* TIM1 Alternate Function mapping */ +#define GPIO_AF1_TIM2 ((uint8_t)0x01) /* TIM2 Alternate Function mapping */ + +/** + * @brief AF 2 selection + */ +#define GPIO_AF2_TIM3 ((uint8_t)0x02) /* TIM3 Alternate Function mapping */ +#define GPIO_AF2_TIM4 ((uint8_t)0x02) /* TIM4 Alternate Function mapping */ +#define GPIO_AF2_TIM5 ((uint8_t)0x02) /* TIM5 Alternate Function mapping */ + +/** + * @brief AF 3 selection + */ +#define GPIO_AF3_TIM8 ((uint8_t)0x03) /* TIM8 Alternate Function mapping */ +#define GPIO_AF3_TIM9 ((uint8_t)0x03) /* TIM9 Alternate Function mapping */ +#define GPIO_AF3_TIM10 ((uint8_t)0x03) /* TIM10 Alternate Function mapping */ +#define GPIO_AF3_TIM11 ((uint8_t)0x03) /* TIM11 Alternate Function mapping */ + +/** + * @brief AF 4 selection + */ +#define GPIO_AF4_I2C1 ((uint8_t)0x04) /* I2C1 Alternate Function mapping */ +#define GPIO_AF4_I2C2 ((uint8_t)0x04) /* I2C2 Alternate Function mapping */ +#define GPIO_AF4_I2C3 ((uint8_t)0x04) /* I2C3 Alternate Function mapping */ + +/** + * @brief AF 5 selection + */ +#define GPIO_AF5_SPI1 ((uint8_t)0x05) /* SPI1 Alternate Function mapping */ +#define GPIO_AF5_SPI2 ((uint8_t)0x05) /* SPI2/I2S2 Alternate Function mapping */ +#define GPIO_AF5_SPI3 ((uint8_t)0x05) /* SPI3/I2S3 Alternate Function mapping */ +#define GPIO_AF5_SPI4 ((uint8_t)0x05) /* SPI4 Alternate Function mapping */ +#define GPIO_AF5_SPI5 ((uint8_t)0x05) /* SPI5 Alternate Function mapping */ +#define GPIO_AF5_SPI6 ((uint8_t)0x05) /* SPI6 Alternate Function mapping */ +#define GPIO_AF5_I2S3ext ((uint8_t)0x05) /* I2S3ext_SD Alternate Function mapping */ + +/** + * @brief AF 6 selection + */ +#define GPIO_AF6_SPI3 ((uint8_t)0x06) /* SPI3/I2S3 Alternate Function mapping */ +#define GPIO_AF6_I2S2ext ((uint8_t)0x06) /* I2S2ext_SD Alternate Function mapping */ +#define GPIO_AF6_SAI1 ((uint8_t)0x06) /* SAI1 Alternate Function mapping */ + +/** + * @brief AF 7 selection + */ +#define GPIO_AF7_USART1 ((uint8_t)0x07) /* USART1 Alternate Function mapping */ +#define GPIO_AF7_USART2 ((uint8_t)0x07) /* USART2 Alternate Function mapping */ +#define GPIO_AF7_USART3 ((uint8_t)0x07) /* USART3 Alternate Function mapping */ +#define GPIO_AF7_I2S3ext ((uint8_t)0x07) /* I2S3ext_SD Alternate Function mapping */ + +/** + * @brief AF 8 selection + */ +#define GPIO_AF8_UART4 ((uint8_t)0x08) /* UART4 Alternate Function mapping */ +#define GPIO_AF8_UART5 ((uint8_t)0x08) /* UART5 Alternate Function mapping */ +#define GPIO_AF8_USART6 ((uint8_t)0x08) /* USART6 Alternate Function mapping */ +#define GPIO_AF8_UART7 ((uint8_t)0x08) /* UART7 Alternate Function mapping */ +#define GPIO_AF8_UART8 ((uint8_t)0x08) /* UART8 Alternate Function mapping */ + +/** + * @brief AF 9 selection + */ +#define GPIO_AF9_CAN1 ((uint8_t)0x09) /* CAN1 Alternate Function mapping */ +#define GPIO_AF9_CAN2 ((uint8_t)0x09) /* CAN2 Alternate Function mapping */ +#define GPIO_AF9_TIM12 ((uint8_t)0x09) /* TIM12 Alternate Function mapping */ +#define GPIO_AF9_TIM13 ((uint8_t)0x09) /* TIM13 Alternate Function mapping */ +#define GPIO_AF9_TIM14 ((uint8_t)0x09) /* TIM14 Alternate Function mapping */ +#define GPIO_AF9_LTDC ((uint8_t)0x09) /* LCD-TFT Alternate Function mapping */ + +/** + * @brief AF 10 selection + */ +#define GPIO_AF10_OTG_FS ((uint8_t)0x0A) /* OTG_FS Alternate Function mapping */ +#define GPIO_AF10_OTG_HS ((uint8_t)0x0A) /* OTG_HS Alternate Function mapping */ + +/** + * @brief AF 11 selection + */ +#define GPIO_AF11_ETH ((uint8_t)0x0B) /* ETHERNET Alternate Function mapping */ + +/** + * @brief AF 12 selection + */ +#define GPIO_AF12_FMC ((uint8_t)0x0C) /* FMC Alternate Function mapping */ +#define GPIO_AF12_OTG_HS_FS ((uint8_t)0x0C) /* OTG HS configured in FS, Alternate Function mapping */ +#define GPIO_AF12_SDIO ((uint8_t)0x0C) /* SDIO Alternate Function mapping */ + +/** + * @brief AF 13 selection + */ +#define GPIO_AF13_DCMI ((uint8_t)0x0D) /* DCMI Alternate Function mapping */ + +/** + * @brief AF 14 selection + */ +#define GPIO_AF14_LTDC ((uint8_t)0x0E) /* LCD-TFT Alternate Function mapping */ + +/** + * @brief AF 15 selection + */ +#define GPIO_AF15_EVENTOUT ((uint8_t)0x0F) /* EVENTOUT Alternate Function mapping */ +#endif /* STM32F429xx || STM32F439xx */ +/*----------------------------------------------------------------------------*/ + +/*---------------------------------- STM32F427xx/STM32F437xx------------------*/ +#if defined(STM32F427xx) || defined(STM32F437xx) +/** + * @brief AF 0 selection + */ +#define GPIO_AF0_RTC_50Hz ((uint8_t)0x00) /* RTC_50Hz Alternate Function mapping */ +#define GPIO_AF0_MCO ((uint8_t)0x00) /* MCO (MCO1 and MCO2) Alternate Function mapping */ +#define GPIO_AF0_TAMPER ((uint8_t)0x00) /* TAMPER (TAMPER_1 and TAMPER_2) Alternate Function mapping */ +#define GPIO_AF0_SWJ ((uint8_t)0x00) /* SWJ (SWD and JTAG) Alternate Function mapping */ +#define GPIO_AF0_TRACE ((uint8_t)0x00) /* TRACE Alternate Function mapping */ + +/** + * @brief AF 1 selection + */ +#define GPIO_AF1_TIM1 ((uint8_t)0x01) /* TIM1 Alternate Function mapping */ +#define GPIO_AF1_TIM2 ((uint8_t)0x01) /* TIM2 Alternate Function mapping */ + +/** + * @brief AF 2 selection + */ +#define GPIO_AF2_TIM3 ((uint8_t)0x02) /* TIM3 Alternate Function mapping */ +#define GPIO_AF2_TIM4 ((uint8_t)0x02) /* TIM4 Alternate Function mapping */ +#define GPIO_AF2_TIM5 ((uint8_t)0x02) /* TIM5 Alternate Function mapping */ + +/** + * @brief AF 3 selection + */ +#define GPIO_AF3_TIM8 ((uint8_t)0x03) /* TIM8 Alternate Function mapping */ +#define GPIO_AF3_TIM9 ((uint8_t)0x03) /* TIM9 Alternate Function mapping */ +#define GPIO_AF3_TIM10 ((uint8_t)0x03) /* TIM10 Alternate Function mapping */ +#define GPIO_AF3_TIM11 ((uint8_t)0x03) /* TIM11 Alternate Function mapping */ + +/** + * @brief AF 4 selection + */ +#define GPIO_AF4_I2C1 ((uint8_t)0x04) /* I2C1 Alternate Function mapping */ +#define GPIO_AF4_I2C2 ((uint8_t)0x04) /* I2C2 Alternate Function mapping */ +#define GPIO_AF4_I2C3 ((uint8_t)0x04) /* I2C3 Alternate Function mapping */ + +/** + * @brief AF 5 selection + */ +#define GPIO_AF5_SPI1 ((uint8_t)0x05) /* SPI1 Alternate Function mapping */ +#define GPIO_AF5_SPI2 ((uint8_t)0x05) /* SPI2/I2S2 Alternate Function mapping */ +#define GPIO_AF5_SPI3 ((uint8_t)0x05) /* SPI3/I2S3 Alternate Function mapping */ +#define GPIO_AF5_SPI4 ((uint8_t)0x05) /* SPI4 Alternate Function mapping */ +#define GPIO_AF5_SPI5 ((uint8_t)0x05) /* SPI5 Alternate Function mapping */ +#define GPIO_AF5_SPI6 ((uint8_t)0x05) /* SPI6 Alternate Function mapping */ +/** @brief GPIO_Legacy + */ +#define GPIO_AF5_I2S3ext GPIO_AF5_SPI3 /* I2S3ext_SD Alternate Function mapping */ + +/** + * @brief AF 6 selection + */ +#define GPIO_AF6_SPI3 ((uint8_t)0x06) /* SPI3/I2S3 Alternate Function mapping */ +#define GPIO_AF6_I2S2ext ((uint8_t)0x06) /* I2S2ext_SD Alternate Function mapping */ +#define GPIO_AF6_SAI1 ((uint8_t)0x06) /* SAI1 Alternate Function mapping */ + +/** + * @brief AF 7 selection + */ +#define GPIO_AF7_USART1 ((uint8_t)0x07) /* USART1 Alternate Function mapping */ +#define GPIO_AF7_USART2 ((uint8_t)0x07) /* USART2 Alternate Function mapping */ +#define GPIO_AF7_USART3 ((uint8_t)0x07) /* USART3 Alternate Function mapping */ +#define GPIO_AF7_I2S3ext ((uint8_t)0x07) /* I2S3ext_SD Alternate Function mapping */ + +/** + * @brief AF 8 selection + */ +#define GPIO_AF8_UART4 ((uint8_t)0x08) /* UART4 Alternate Function mapping */ +#define GPIO_AF8_UART5 ((uint8_t)0x08) /* UART5 Alternate Function mapping */ +#define GPIO_AF8_USART6 ((uint8_t)0x08) /* USART6 Alternate Function mapping */ +#define GPIO_AF8_UART7 ((uint8_t)0x08) /* UART7 Alternate Function mapping */ +#define GPIO_AF8_UART8 ((uint8_t)0x08) /* UART8 Alternate Function mapping */ + +/** + * @brief AF 9 selection + */ +#define GPIO_AF9_CAN1 ((uint8_t)0x09) /* CAN1 Alternate Function mapping */ +#define GPIO_AF9_CAN2 ((uint8_t)0x09) /* CAN2 Alternate Function mapping */ +#define GPIO_AF9_TIM12 ((uint8_t)0x09) /* TIM12 Alternate Function mapping */ +#define GPIO_AF9_TIM13 ((uint8_t)0x09) /* TIM13 Alternate Function mapping */ +#define GPIO_AF9_TIM14 ((uint8_t)0x09) /* TIM14 Alternate Function mapping */ + +/** + * @brief AF 10 selection + */ +#define GPIO_AF10_OTG_FS ((uint8_t)0x0A) /* OTG_FS Alternate Function mapping */ +#define GPIO_AF10_OTG_HS ((uint8_t)0x0A) /* OTG_HS Alternate Function mapping */ + +/** + * @brief AF 11 selection + */ +#define GPIO_AF11_ETH ((uint8_t)0x0B) /* ETHERNET Alternate Function mapping */ + +/** + * @brief AF 12 selection + */ +#define GPIO_AF12_FMC ((uint8_t)0x0C) /* FMC Alternate Function mapping */ +#define GPIO_AF12_OTG_HS_FS ((uint8_t)0x0C) /* OTG HS configured in FS, Alternate Function mapping */ +#define GPIO_AF12_SDIO ((uint8_t)0x0C) /* SDIO Alternate Function mapping */ + +/** + * @brief AF 13 selection + */ +#define GPIO_AF13_DCMI ((uint8_t)0x0D) /* DCMI Alternate Function mapping */ + +/** + * @brief AF 15 selection + */ +#define GPIO_AF15_EVENTOUT ((uint8_t)0x0F) /* EVENTOUT Alternate Function mapping */ +#endif /* STM32F427xx || STM32F437xx */ +/*----------------------------------------------------------------------------*/ + +/*---------------------------------- STM32F407xx/STM32F417xx------------------*/ +#if defined(STM32F407xx) || defined(STM32F417xx) +/** + * @brief AF 0 selection + */ +#define GPIO_AF0_RTC_50Hz ((uint8_t)0x00) /* RTC_50Hz Alternate Function mapping */ +#define GPIO_AF0_MCO ((uint8_t)0x00) /* MCO (MCO1 and MCO2) Alternate Function mapping */ +#define GPIO_AF0_TAMPER ((uint8_t)0x00) /* TAMPER (TAMPER_1 and TAMPER_2) Alternate Function mapping */ +#define GPIO_AF0_SWJ ((uint8_t)0x00) /* SWJ (SWD and JTAG) Alternate Function mapping */ +#define GPIO_AF0_TRACE ((uint8_t)0x00) /* TRACE Alternate Function mapping */ + +/** + * @brief AF 1 selection + */ +#define GPIO_AF1_TIM1 ((uint8_t)0x01) /* TIM1 Alternate Function mapping */ +#define GPIO_AF1_TIM2 ((uint8_t)0x01) /* TIM2 Alternate Function mapping */ + +/** + * @brief AF 2 selection + */ +#define GPIO_AF2_TIM3 ((uint8_t)0x02) /* TIM3 Alternate Function mapping */ +#define GPIO_AF2_TIM4 ((uint8_t)0x02) /* TIM4 Alternate Function mapping */ +#define GPIO_AF2_TIM5 ((uint8_t)0x02) /* TIM5 Alternate Function mapping */ + +/** + * @brief AF 3 selection + */ +#define GPIO_AF3_TIM8 ((uint8_t)0x03) /* TIM8 Alternate Function mapping */ +#define GPIO_AF3_TIM9 ((uint8_t)0x03) /* TIM9 Alternate Function mapping */ +#define GPIO_AF3_TIM10 ((uint8_t)0x03) /* TIM10 Alternate Function mapping */ +#define GPIO_AF3_TIM11 ((uint8_t)0x03) /* TIM11 Alternate Function mapping */ + +/** + * @brief AF 4 selection + */ +#define GPIO_AF4_I2C1 ((uint8_t)0x04) /* I2C1 Alternate Function mapping */ +#define GPIO_AF4_I2C2 ((uint8_t)0x04) /* I2C2 Alternate Function mapping */ +#define GPIO_AF4_I2C3 ((uint8_t)0x04) /* I2C3 Alternate Function mapping */ + +/** + * @brief AF 5 selection + */ +#define GPIO_AF5_SPI1 ((uint8_t)0x05) /* SPI1 Alternate Function mapping */ +#define GPIO_AF5_SPI2 ((uint8_t)0x05) /* SPI2/I2S2 Alternate Function mapping */ +#define GPIO_AF5_I2S3ext ((uint8_t)0x05) /* I2S3ext_SD Alternate Function mapping */ + +/** + * @brief AF 6 selection + */ +#define GPIO_AF6_SPI3 ((uint8_t)0x06) /* SPI3/I2S3 Alternate Function mapping */ +#define GPIO_AF6_I2S2ext ((uint8_t)0x06) /* I2S2ext_SD Alternate Function mapping */ + +/** + * @brief AF 7 selection + */ +#define GPIO_AF7_USART1 ((uint8_t)0x07) /* USART1 Alternate Function mapping */ +#define GPIO_AF7_USART2 ((uint8_t)0x07) /* USART2 Alternate Function mapping */ +#define GPIO_AF7_USART3 ((uint8_t)0x07) /* USART3 Alternate Function mapping */ +#define GPIO_AF7_I2S3ext ((uint8_t)0x07) /* I2S3ext_SD Alternate Function mapping */ + +/** + * @brief AF 8 selection + */ +#define GPIO_AF8_UART4 ((uint8_t)0x08) /* UART4 Alternate Function mapping */ +#define GPIO_AF8_UART5 ((uint8_t)0x08) /* UART5 Alternate Function mapping */ +#define GPIO_AF8_USART6 ((uint8_t)0x08) /* USART6 Alternate Function mapping */ + +/** + * @brief AF 9 selection + */ +#define GPIO_AF9_CAN1 ((uint8_t)0x09) /* CAN1 Alternate Function mapping */ +#define GPIO_AF9_CAN2 ((uint8_t)0x09) /* CAN2 Alternate Function mapping */ +#define GPIO_AF9_TIM12 ((uint8_t)0x09) /* TIM12 Alternate Function mapping */ +#define GPIO_AF9_TIM13 ((uint8_t)0x09) /* TIM13 Alternate Function mapping */ +#define GPIO_AF9_TIM14 ((uint8_t)0x09) /* TIM14 Alternate Function mapping */ + +/** + * @brief AF 10 selection + */ +#define GPIO_AF10_OTG_FS ((uint8_t)0x0A) /* OTG_FS Alternate Function mapping */ +#define GPIO_AF10_OTG_HS ((uint8_t)0x0A) /* OTG_HS Alternate Function mapping */ + +/** + * @brief AF 11 selection + */ +#define GPIO_AF11_ETH ((uint8_t)0x0B) /* ETHERNET Alternate Function mapping */ + +/** + * @brief AF 12 selection + */ +#define GPIO_AF12_FSMC ((uint8_t)0x0C) /* FSMC Alternate Function mapping */ +#define GPIO_AF12_OTG_HS_FS ((uint8_t)0x0C) /* OTG HS configured in FS, Alternate Function mapping */ +#define GPIO_AF12_SDIO ((uint8_t)0x0C) /* SDIO Alternate Function mapping */ + +/** + * @brief AF 13 selection + */ +#define GPIO_AF13_DCMI ((uint8_t)0x0D) /* DCMI Alternate Function mapping */ + +/** + * @brief AF 15 selection + */ +#define GPIO_AF15_EVENTOUT ((uint8_t)0x0F) /* EVENTOUT Alternate Function mapping */ +#endif /* STM32F407xx || STM32F417xx */ +/*----------------------------------------------------------------------------*/ + +/*---------------------------------- STM32F405xx/STM32F415xx------------------*/ +#if defined(STM32F405xx) || defined(STM32F415xx) +/** + * @brief AF 0 selection + */ +#define GPIO_AF0_RTC_50Hz ((uint8_t)0x00) /* RTC_50Hz Alternate Function mapping */ +#define GPIO_AF0_MCO ((uint8_t)0x00) /* MCO (MCO1 and MCO2) Alternate Function mapping */ +#define GPIO_AF0_TAMPER ((uint8_t)0x00) /* TAMPER (TAMPER_1 and TAMPER_2) Alternate Function mapping */ +#define GPIO_AF0_SWJ ((uint8_t)0x00) /* SWJ (SWD and JTAG) Alternate Function mapping */ +#define GPIO_AF0_TRACE ((uint8_t)0x00) /* TRACE Alternate Function mapping */ + +/** + * @brief AF 1 selection + */ +#define GPIO_AF1_TIM1 ((uint8_t)0x01) /* TIM1 Alternate Function mapping */ +#define GPIO_AF1_TIM2 ((uint8_t)0x01) /* TIM2 Alternate Function mapping */ + +/** + * @brief AF 2 selection + */ +#define GPIO_AF2_TIM3 ((uint8_t)0x02) /* TIM3 Alternate Function mapping */ +#define GPIO_AF2_TIM4 ((uint8_t)0x02) /* TIM4 Alternate Function mapping */ +#define GPIO_AF2_TIM5 ((uint8_t)0x02) /* TIM5 Alternate Function mapping */ + +/** + * @brief AF 3 selection + */ +#define GPIO_AF3_TIM8 ((uint8_t)0x03) /* TIM8 Alternate Function mapping */ +#define GPIO_AF3_TIM9 ((uint8_t)0x03) /* TIM9 Alternate Function mapping */ +#define GPIO_AF3_TIM10 ((uint8_t)0x03) /* TIM10 Alternate Function mapping */ +#define GPIO_AF3_TIM11 ((uint8_t)0x03) /* TIM11 Alternate Function mapping */ + +/** + * @brief AF 4 selection + */ +#define GPIO_AF4_I2C1 ((uint8_t)0x04) /* I2C1 Alternate Function mapping */ +#define GPIO_AF4_I2C2 ((uint8_t)0x04) /* I2C2 Alternate Function mapping */ +#define GPIO_AF4_I2C3 ((uint8_t)0x04) /* I2C3 Alternate Function mapping */ + +/** + * @brief AF 5 selection + */ +#define GPIO_AF5_SPI1 ((uint8_t)0x05) /* SPI1 Alternate Function mapping */ +#define GPIO_AF5_SPI2 ((uint8_t)0x05) /* SPI2/I2S2 Alternate Function mapping */ +#define GPIO_AF5_I2S3ext ((uint8_t)0x05) /* I2S3ext_SD Alternate Function mapping */ + +/** + * @brief AF 6 selection + */ +#define GPIO_AF6_SPI3 ((uint8_t)0x06) /* SPI3/I2S3 Alternate Function mapping */ +#define GPIO_AF6_I2S2ext ((uint8_t)0x06) /* I2S2ext_SD Alternate Function mapping */ + +/** + * @brief AF 7 selection + */ +#define GPIO_AF7_USART1 ((uint8_t)0x07) /* USART1 Alternate Function mapping */ +#define GPIO_AF7_USART2 ((uint8_t)0x07) /* USART2 Alternate Function mapping */ +#define GPIO_AF7_USART3 ((uint8_t)0x07) /* USART3 Alternate Function mapping */ +#define GPIO_AF7_I2S3ext ((uint8_t)0x07) /* I2S3ext_SD Alternate Function mapping */ + +/** + * @brief AF 8 selection + */ +#define GPIO_AF8_UART4 ((uint8_t)0x08) /* UART4 Alternate Function mapping */ +#define GPIO_AF8_UART5 ((uint8_t)0x08) /* UART5 Alternate Function mapping */ +#define GPIO_AF8_USART6 ((uint8_t)0x08) /* USART6 Alternate Function mapping */ + +/** + * @brief AF 9 selection + */ +#define GPIO_AF9_CAN1 ((uint8_t)0x09) /* CAN1 Alternate Function mapping */ +#define GPIO_AF9_CAN2 ((uint8_t)0x09) /* CAN2 Alternate Function mapping */ +#define GPIO_AF9_TIM12 ((uint8_t)0x09) /* TIM12 Alternate Function mapping */ +#define GPIO_AF9_TIM13 ((uint8_t)0x09) /* TIM13 Alternate Function mapping */ +#define GPIO_AF9_TIM14 ((uint8_t)0x09) /* TIM14 Alternate Function mapping */ + +/** + * @brief AF 10 selection + */ +#define GPIO_AF10_OTG_FS ((uint8_t)0x0A) /* OTG_FS Alternate Function mapping */ +#define GPIO_AF10_OTG_HS ((uint8_t)0x0A) /* OTG_HS Alternate Function mapping */ + +/** + * @brief AF 12 selection + */ +#define GPIO_AF12_FSMC ((uint8_t)0x0C) /* FSMC Alternate Function mapping */ +#define GPIO_AF12_OTG_HS_FS ((uint8_t)0x0C) /* OTG HS configured in FS, Alternate Function mapping */ +#define GPIO_AF12_SDIO ((uint8_t)0x0C) /* SDIO Alternate Function mapping */ + +/** + * @brief AF 15 selection + */ +#define GPIO_AF15_EVENTOUT ((uint8_t)0x0F) /* EVENTOUT Alternate Function mapping */ +#endif /* STM32F405xx || STM32F415xx */ + +/*----------------------------------------------------------------------------*/ + +/*---------------------------------------- STM32F401xx------------------------*/ +#if defined(STM32F401xC) || defined(STM32F401xE) +/** + * @brief AF 0 selection + */ +#define GPIO_AF0_RTC_50Hz ((uint8_t)0x00) /* RTC_50Hz Alternate Function mapping */ +#define GPIO_AF0_MCO ((uint8_t)0x00) /* MCO (MCO1 and MCO2) Alternate Function mapping */ +#define GPIO_AF0_TAMPER ((uint8_t)0x00) /* TAMPER (TAMPER_1 and TAMPER_2) Alternate Function mapping */ +#define GPIO_AF0_SWJ ((uint8_t)0x00) /* SWJ (SWD and JTAG) Alternate Function mapping */ +#define GPIO_AF0_TRACE ((uint8_t)0x00) /* TRACE Alternate Function mapping */ + +/** + * @brief AF 1 selection + */ +#define GPIO_AF1_TIM1 ((uint8_t)0x01) /* TIM1 Alternate Function mapping */ +#define GPIO_AF1_TIM2 ((uint8_t)0x01) /* TIM2 Alternate Function mapping */ + +/** + * @brief AF 2 selection + */ +#define GPIO_AF2_TIM3 ((uint8_t)0x02) /* TIM3 Alternate Function mapping */ +#define GPIO_AF2_TIM4 ((uint8_t)0x02) /* TIM4 Alternate Function mapping */ +#define GPIO_AF2_TIM5 ((uint8_t)0x02) /* TIM5 Alternate Function mapping */ + +/** + * @brief AF 3 selection + */ +#define GPIO_AF3_TIM9 ((uint8_t)0x03) /* TIM9 Alternate Function mapping */ +#define GPIO_AF3_TIM10 ((uint8_t)0x03) /* TIM10 Alternate Function mapping */ +#define GPIO_AF3_TIM11 ((uint8_t)0x03) /* TIM11 Alternate Function mapping */ + +/** + * @brief AF 4 selection + */ +#define GPIO_AF4_I2C1 ((uint8_t)0x04) /* I2C1 Alternate Function mapping */ +#define GPIO_AF4_I2C2 ((uint8_t)0x04) /* I2C2 Alternate Function mapping */ +#define GPIO_AF4_I2C3 ((uint8_t)0x04) /* I2C3 Alternate Function mapping */ + +/** + * @brief AF 5 selection + */ +#define GPIO_AF5_SPI1 ((uint8_t)0x05) /* SPI1 Alternate Function mapping */ +#define GPIO_AF5_SPI2 ((uint8_t)0x05) /* SPI2/I2S2 Alternate Function mapping */ +#define GPIO_AF5_SPI3 ((uint8_t)0x05) /* SPI3 Alternate Function mapping */ +#define GPIO_AF5_SPI4 ((uint8_t)0x05) /* SPI4 Alternate Function mapping */ +#define GPIO_AF5_I2S3ext ((uint8_t)0x05) /* I2S3ext_SD Alternate Function mapping */ + +/** + * @brief AF 6 selection + */ +#define GPIO_AF6_SPI3 ((uint8_t)0x06) /* SPI3/I2S3 Alternate Function mapping */ +#define GPIO_AF6_I2S2ext ((uint8_t)0x06) /* I2S2ext_SD Alternate Function mapping */ + +/** + * @brief AF 7 selection + */ +#define GPIO_AF7_USART1 ((uint8_t)0x07) /* USART1 Alternate Function mapping */ +#define GPIO_AF7_USART2 ((uint8_t)0x07) /* USART2 Alternate Function mapping */ +#define GPIO_AF7_I2S3ext ((uint8_t)0x07) /* I2S3ext_SD Alternate Function mapping */ + +/** + * @brief AF 8 selection + */ +#define GPIO_AF8_USART6 ((uint8_t)0x08) /* USART6 Alternate Function mapping */ + +/** + * @brief AF 9 selection + */ +#define GPIO_AF9_I2C2 ((uint8_t)0x09) /* I2C2 Alternate Function mapping */ +#define GPIO_AF9_I2C3 ((uint8_t)0x09) /* I2C3 Alternate Function mapping */ + + +/** + * @brief AF 10 selection + */ +#define GPIO_AF10_OTG_FS ((uint8_t)0x0A) /* OTG_FS Alternate Function mapping */ + +/** + * @brief AF 12 selection + */ +#define GPIO_AF12_SDIO ((uint8_t)0x0C) /* SDIO Alternate Function mapping */ + +/** + * @brief AF 15 selection + */ +#define GPIO_AF15_EVENTOUT ((uint8_t)0x0F) /* EVENTOUT Alternate Function mapping */ +#endif /* STM32F401xC || STM32F401xE */ +/*----------------------------------------------------------------------------*/ + +/*--------------- STM32F412Zx/STM32F412Vx/STM32F412Rx/STM32F412Cx-------------*/ +#if defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) +/** + * @brief AF 0 selection + */ +#define GPIO_AF0_RTC_50Hz ((uint8_t)0x00) /* RTC_50Hz Alternate Function mapping */ +#define GPIO_AF0_MCO ((uint8_t)0x00) /* MCO (MCO1 and MCO2) Alternate Function mapping */ +#define GPIO_AF0_TAMPER ((uint8_t)0x00) /* TAMPER (TAMPER_1 and TAMPER_2) Alternate Function mapping */ +#define GPIO_AF0_SWJ ((uint8_t)0x00) /* SWJ (SWD and JTAG) Alternate Function mapping */ +#define GPIO_AF0_TRACE ((uint8_t)0x00) /* TRACE Alternate Function mapping */ + +/** + * @brief AF 1 selection + */ +#define GPIO_AF1_TIM1 ((uint8_t)0x01) /* TIM1 Alternate Function mapping */ +#define GPIO_AF1_TIM2 ((uint8_t)0x01) /* TIM2 Alternate Function mapping */ + +/** + * @brief AF 2 selection + */ +#define GPIO_AF2_TIM3 ((uint8_t)0x02) /* TIM3 Alternate Function mapping */ +#define GPIO_AF2_TIM4 ((uint8_t)0x02) /* TIM4 Alternate Function mapping */ +#define GPIO_AF2_TIM5 ((uint8_t)0x02) /* TIM5 Alternate Function mapping */ + +/** + * @brief AF 3 selection + */ +#define GPIO_AF3_TIM8 ((uint8_t)0x03) /* TIM8 Alternate Function mapping */ +#define GPIO_AF3_TIM9 ((uint8_t)0x03) /* TIM9 Alternate Function mapping */ +#define GPIO_AF3_TIM10 ((uint8_t)0x03) /* TIM10 Alternate Function mapping */ +#define GPIO_AF3_TIM11 ((uint8_t)0x03) /* TIM11 Alternate Function mapping */ + +/** + * @brief AF 4 selection + */ +#define GPIO_AF4_I2C1 ((uint8_t)0x04) /* I2C1 Alternate Function mapping */ +#define GPIO_AF4_I2C2 ((uint8_t)0x04) /* I2C2 Alternate Function mapping */ +#define GPIO_AF4_I2C3 ((uint8_t)0x04) /* I2C3 Alternate Function mapping */ +#define GPIO_AF4_FMPI2C1 ((uint8_t)0x04) /* FMPI2C1 Alternate Function mapping */ + +/** + * @brief AF 5 selection + */ +#define GPIO_AF5_SPI1 ((uint8_t)0x05) /* SPI1/I2S1 Alternate Function mapping */ +#define GPIO_AF5_SPI2 ((uint8_t)0x05) /* SPI2/I2S2 Alternate Function mapping */ +#define GPIO_AF5_SPI3 ((uint8_t)0x05) /* SPI3/I2S3 Alternate Function mapping */ +#define GPIO_AF5_SPI4 ((uint8_t)0x05) /* SPI4/I2S4 Alternate Function mapping */ +#define GPIO_AF5_I2S3ext ((uint8_t)0x05) /* I2S3ext_SD Alternate Function mapping */ + +/** + * @brief AF 6 selection + */ +#define GPIO_AF6_SPI2 ((uint8_t)0x06) /* I2S2 Alternate Function mapping */ +#define GPIO_AF6_SPI3 ((uint8_t)0x06) /* SPI3/I2S3 Alternate Function mapping */ +#define GPIO_AF6_SPI4 ((uint8_t)0x06) /* SPI4/I2S4 Alternate Function mapping */ +#define GPIO_AF6_SPI5 ((uint8_t)0x06) /* SPI5/I2S5 Alternate Function mapping */ +#define GPIO_AF6_I2S2ext ((uint8_t)0x06) /* I2S2ext_SD Alternate Function mapping */ +#define GPIO_AF6_DFSDM1 ((uint8_t)0x06) /* DFSDM1 Alternate Function mapping */ +/** + * @brief AF 7 selection + */ +#define GPIO_AF7_SPI3 ((uint8_t)0x07) /* SPI3/I2S3 Alternate Function mapping */ +#define GPIO_AF7_USART1 ((uint8_t)0x07) /* USART1 Alternate Function mapping */ +#define GPIO_AF7_USART2 ((uint8_t)0x07) /* USART2 Alternate Function mapping */ +#define GPIO_AF7_USART3 ((uint8_t)0x07) /* USART3 Alternate Function mapping */ +#define GPIO_AF7_I2S3ext ((uint8_t)0x07) /* I2S3ext_SD Alternate Function mapping */ + +/** + * @brief AF 8 selection + */ +#define GPIO_AF8_USART6 ((uint8_t)0x08) /* USART6 Alternate Function mapping */ +#define GPIO_AF8_USART3 ((uint8_t)0x08) /* USART3 Alternate Function mapping */ +#define GPIO_AF8_DFSDM1 ((uint8_t)0x08) /* DFSDM1 Alternate Function mapping */ +#define GPIO_AF8_CAN1 ((uint8_t)0x08) /* CAN1 Alternate Function mapping */ + +/** + * @brief AF 9 selection + */ +#define GPIO_AF9_TIM12 ((uint8_t)0x09) /* TIM12 Alternate Function mapping */ +#define GPIO_AF9_TIM13 ((uint8_t)0x09) /* TIM13 Alternate Function mapping */ +#define GPIO_AF9_TIM14 ((uint8_t)0x09) /* TIM14 Alternate Function mapping */ +#define GPIO_AF9_I2C2 ((uint8_t)0x09) /* I2C2 Alternate Function mapping */ +#define GPIO_AF9_I2C3 ((uint8_t)0x09) /* I2C3 Alternate Function mapping */ +#define GPIO_AF9_FMPI2C1 ((uint8_t)0x09) /* FMPI2C1 Alternate Function mapping */ +#define GPIO_AF9_CAN1 ((uint8_t)0x09) /* CAN1 Alternate Function mapping */ +#define GPIO_AF9_CAN2 ((uint8_t)0x09) /* CAN1 Alternate Function mapping */ +#define GPIO_AF9_QSPI ((uint8_t)0x09) /* QSPI Alternate Function mapping */ + +/** + * @brief AF 10 selection + */ +#define GPIO_AF10_OTG_FS ((uint8_t)0x0A) /* OTG_FS Alternate Function mapping */ +#define GPIO_AF10_DFSDM1 ((uint8_t)0x0A) /* DFSDM1 Alternate Function mapping */ +#define GPIO_AF10_QSPI ((uint8_t)0x0A) /* QSPI Alternate Function mapping */ +#define GPIO_AF10_FMC ((uint8_t)0x0A) /* FMC Alternate Function mapping */ + +/** + * @brief AF 12 selection + */ +#define GPIO_AF12_SDIO ((uint8_t)0x0C) /* SDIO Alternate Function mapping */ +#define GPIO_AF12_FSMC ((uint8_t)0x0C) /* FMC Alternate Function mapping */ + +/** + * @brief AF 15 selection + */ +#define GPIO_AF15_EVENTOUT ((uint8_t)0x0F) /* EVENTOUT Alternate Function mapping */ +#endif /* STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx */ + +/*----------------------------------------------------------------------------*/ + +/*--------------- STM32F413xx/STM32F423xx-------------------------------------*/ +#if defined(STM32F413xx) || defined(STM32F423xx) +/** + * @brief AF 0 selection + */ +#define GPIO_AF0_RTC_50Hz ((uint8_t)0x00) /* RTC_50Hz Alternate Function mapping */ +#define GPIO_AF0_MCO ((uint8_t)0x00) /* MCO (MCO1 and MCO2) Alternate Function mapping */ +#define GPIO_AF0_SWJ ((uint8_t)0x00) /* SWJ (SWD and JTAG) Alternate Function mapping */ +#define GPIO_AF0_TRACE ((uint8_t)0x00) /* TRACE Alternate Function mapping */ + +/** + * @brief AF 1 selection + */ +#define GPIO_AF1_TIM1 ((uint8_t)0x01) /* TIM1 Alternate Function mapping */ +#define GPIO_AF1_TIM2 ((uint8_t)0x01) /* TIM2 Alternate Function mapping */ +#define GPIO_AF1_LPTIM1 ((uint8_t)0x01) /* LPTIM1 Alternate Function mapping */ + +/** + * @brief AF 2 selection + */ +#define GPIO_AF2_TIM3 ((uint8_t)0x02) /* TIM3 Alternate Function mapping */ +#define GPIO_AF2_TIM4 ((uint8_t)0x02) /* TIM4 Alternate Function mapping */ +#define GPIO_AF2_TIM5 ((uint8_t)0x02) /* TIM5 Alternate Function mapping */ + +/** + * @brief AF 3 selection + */ +#define GPIO_AF3_TIM8 ((uint8_t)0x03) /* TIM8 Alternate Function mapping */ +#define GPIO_AF3_TIM9 ((uint8_t)0x03) /* TIM9 Alternate Function mapping */ +#define GPIO_AF3_TIM10 ((uint8_t)0x03) /* TIM10 Alternate Function mapping */ +#define GPIO_AF3_TIM11 ((uint8_t)0x03) /* TIM11 Alternate Function mapping */ +#define GPIO_AF3_DFSDM2 ((uint8_t)0x03) /* DFSDM2 Alternate Function mapping */ + +/** + * @brief AF 4 selection + */ +#define GPIO_AF4_I2C1 ((uint8_t)0x04) /* I2C1 Alternate Function mapping */ +#define GPIO_AF4_I2C2 ((uint8_t)0x04) /* I2C2 Alternate Function mapping */ +#define GPIO_AF4_I2C3 ((uint8_t)0x04) /* I2C3 Alternate Function mapping */ +#define GPIO_AF4_FMPI2C1 ((uint8_t)0x04) /* FMPI2C1 Alternate Function mapping */ + +/** + * @brief AF 5 selection + */ +#define GPIO_AF5_SPI1 ((uint8_t)0x05) /* SPI1/I2S1 Alternate Function mapping */ +#define GPIO_AF5_SPI2 ((uint8_t)0x05) /* SPI2/I2S2 Alternate Function mapping */ +#define GPIO_AF5_SPI3 ((uint8_t)0x05) /* SPI3/I2S3 Alternate Function mapping */ +#define GPIO_AF5_SPI4 ((uint8_t)0x05) /* SPI4/I2S4 Alternate Function mapping */ +#define GPIO_AF5_I2S3ext ((uint8_t)0x05) /* I2S3ext_SD Alternate Function mapping */ + +/** + * @brief AF 6 selection + */ +#define GPIO_AF6_SPI2 ((uint8_t)0x06) /* I2S2 Alternate Function mapping */ +#define GPIO_AF6_SPI3 ((uint8_t)0x06) /* SPI3/I2S3 Alternate Function mapping */ +#define GPIO_AF6_SPI4 ((uint8_t)0x06) /* SPI4/I2S4 Alternate Function mapping */ +#define GPIO_AF6_SPI5 ((uint8_t)0x06) /* SPI5/I2S5 Alternate Function mapping */ +#define GPIO_AF6_I2S2ext ((uint8_t)0x06) /* I2S2ext_SD Alternate Function mapping */ +#define GPIO_AF6_DFSDM1 ((uint8_t)0x06) /* DFSDM1 Alternate Function mapping */ +#define GPIO_AF6_DFSDM2 ((uint8_t)0x06) /* DFSDM2 Alternate Function mapping */ +/** + * @brief AF 7 selection + */ +#define GPIO_AF7_SPI3 ((uint8_t)0x07) /* SPI3/I2S3 Alternate Function mapping */ +#define GPIO_AF7_SAI1 ((uint8_t)0x07) /* SAI1 Alternate Function mapping */ +#define GPIO_AF7_USART1 ((uint8_t)0x07) /* USART1 Alternate Function mapping */ +#define GPIO_AF7_USART2 ((uint8_t)0x07) /* USART2 Alternate Function mapping */ +#define GPIO_AF7_USART3 ((uint8_t)0x07) /* USART3 Alternate Function mapping */ +#define GPIO_AF7_I2S3ext ((uint8_t)0x07) /* I2S3ext_SD Alternate Function mapping */ +#define GPIO_AF7_DFSDM2 ((uint8_t)0x07) /* DFSDM2 Alternate Function mapping */ + +/** + * @brief AF 8 selection + */ +#define GPIO_AF8_USART6 ((uint8_t)0x08) /* USART6 Alternate Function mapping */ +#define GPIO_AF8_USART3 ((uint8_t)0x08) /* USART3 Alternate Function mapping */ +#define GPIO_AF8_UART4 ((uint8_t)0x08) /* UART4 Alternate Function mapping */ +#define GPIO_AF8_UART5 ((uint8_t)0x08) /* UART5 Alternate Function mapping */ +#define GPIO_AF8_UART7 ((uint8_t)0x08) /* UART8 Alternate Function mapping */ +#define GPIO_AF8_UART8 ((uint8_t)0x08) /* UART8 Alternate Function mapping */ +#define GPIO_AF8_DFSDM1 ((uint8_t)0x08) /* DFSDM1 Alternate Function mapping */ +#define GPIO_AF8_CAN1 ((uint8_t)0x08) /* CAN1 Alternate Function mapping */ + +/** + * @brief AF 9 selection + */ +#define GPIO_AF9_TIM12 ((uint8_t)0x09) /* TIM12 Alternate Function mapping */ +#define GPIO_AF9_TIM13 ((uint8_t)0x09) /* TIM13 Alternate Function mapping */ +#define GPIO_AF9_TIM14 ((uint8_t)0x09) /* TIM14 Alternate Function mapping */ +#define GPIO_AF9_I2C2 ((uint8_t)0x09) /* I2C2 Alternate Function mapping */ +#define GPIO_AF9_I2C3 ((uint8_t)0x09) /* I2C3 Alternate Function mapping */ +#define GPIO_AF9_FMPI2C1 ((uint8_t)0x09) /* FMPI2C1 Alternate Function mapping */ +#define GPIO_AF9_CAN1 ((uint8_t)0x09) /* CAN1 Alternate Function mapping */ +#define GPIO_AF9_CAN2 ((uint8_t)0x09) /* CAN1 Alternate Function mapping */ +#define GPIO_AF9_QSPI ((uint8_t)0x09) /* QSPI Alternate Function mapping */ + +/** + * @brief AF 10 selection + */ +#define GPIO_AF10_SAI1 ((uint8_t)0x0A) /* SAI1 Alternate Function mapping */ +#define GPIO_AF10_OTG_FS ((uint8_t)0x0A) /* OTG_FS Alternate Function mapping */ +#define GPIO_AF10_DFSDM1 ((uint8_t)0x0A) /* DFSDM1 Alternate Function mapping */ +#define GPIO_AF10_DFSDM2 ((uint8_t)0x0A) /* DFSDM2 Alternate Function mapping */ +#define GPIO_AF10_QSPI ((uint8_t)0x0A) /* QSPI Alternate Function mapping */ +#define GPIO_AF10_FSMC ((uint8_t)0x0A) /* FSMC Alternate Function mapping */ + +/** + * @brief AF 11 selection + */ +#define GPIO_AF11_UART4 ((uint8_t)0x0B) /* UART4 Alternate Function mapping */ +#define GPIO_AF11_UART5 ((uint8_t)0x0B) /* UART5 Alternate Function mapping */ +#define GPIO_AF11_UART9 ((uint8_t)0x0B) /* UART9 Alternate Function mapping */ +#define GPIO_AF11_UART10 ((uint8_t)0x0B) /* UART10 Alternate Function mapping */ +#define GPIO_AF11_CAN3 ((uint8_t)0x0B) /* CAN3 Alternate Function mapping */ + +/** + * @brief AF 12 selection + */ +#define GPIO_AF12_SDIO ((uint8_t)0x0C) /* SDIO Alternate Function mapping */ +#define GPIO_AF12_FSMC ((uint8_t)0x0C) /* FMC Alternate Function mapping */ + +/** + * @brief AF 14 selection + */ +#define GPIO_AF14_RNG ((uint8_t)0x0E) /* RNG Alternate Function mapping */ + +/** + * @brief AF 15 selection + */ +#define GPIO_AF15_EVENTOUT ((uint8_t)0x0F) /* EVENTOUT Alternate Function mapping */ +#endif /* STM32F413xx || STM32F423xx */ + +/*---------------------------------------- STM32F411xx------------------------*/ +#if defined(STM32F411xE) +/** + * @brief AF 0 selection + */ +#define GPIO_AF0_RTC_50Hz ((uint8_t)0x00) /* RTC_50Hz Alternate Function mapping */ +#define GPIO_AF0_MCO ((uint8_t)0x00) /* MCO (MCO1 and MCO2) Alternate Function mapping */ +#define GPIO_AF0_TAMPER ((uint8_t)0x00) /* TAMPER (TAMPER_1 and TAMPER_2) Alternate Function mapping */ +#define GPIO_AF0_SWJ ((uint8_t)0x00) /* SWJ (SWD and JTAG) Alternate Function mapping */ +#define GPIO_AF0_TRACE ((uint8_t)0x00) /* TRACE Alternate Function mapping */ + +/** + * @brief AF 1 selection + */ +#define GPIO_AF1_TIM1 ((uint8_t)0x01) /* TIM1 Alternate Function mapping */ +#define GPIO_AF1_TIM2 ((uint8_t)0x01) /* TIM2 Alternate Function mapping */ + +/** + * @brief AF 2 selection + */ +#define GPIO_AF2_TIM3 ((uint8_t)0x02) /* TIM3 Alternate Function mapping */ +#define GPIO_AF2_TIM4 ((uint8_t)0x02) /* TIM4 Alternate Function mapping */ +#define GPIO_AF2_TIM5 ((uint8_t)0x02) /* TIM5 Alternate Function mapping */ + +/** + * @brief AF 3 selection + */ +#define GPIO_AF3_TIM9 ((uint8_t)0x03) /* TIM9 Alternate Function mapping */ +#define GPIO_AF3_TIM10 ((uint8_t)0x03) /* TIM10 Alternate Function mapping */ +#define GPIO_AF3_TIM11 ((uint8_t)0x03) /* TIM11 Alternate Function mapping */ + +/** + * @brief AF 4 selection + */ +#define GPIO_AF4_I2C1 ((uint8_t)0x04) /* I2C1 Alternate Function mapping */ +#define GPIO_AF4_I2C2 ((uint8_t)0x04) /* I2C2 Alternate Function mapping */ +#define GPIO_AF4_I2C3 ((uint8_t)0x04) /* I2C3 Alternate Function mapping */ + +/** + * @brief AF 5 selection + */ +#define GPIO_AF5_SPI1 ((uint8_t)0x05) /* SPI1/I2S1 Alternate Function mapping */ +#define GPIO_AF5_SPI2 ((uint8_t)0x05) /* SPI2/I2S2 Alternate Function mapping */ +#define GPIO_AF5_SPI3 ((uint8_t)0x05) /* SPI3/I2S3 Alternate Function mapping */ +#define GPIO_AF5_SPI4 ((uint8_t)0x05) /* SPI4 Alternate Function mapping */ +#define GPIO_AF5_I2S3ext ((uint8_t)0x05) /* I2S3ext_SD Alternate Function mapping */ + +/** + * @brief AF 6 selection + */ +#define GPIO_AF6_SPI2 ((uint8_t)0x06) /* I2S2 Alternate Function mapping */ +#define GPIO_AF6_SPI3 ((uint8_t)0x06) /* SPI3/I2S3 Alternate Function mapping */ +#define GPIO_AF6_SPI4 ((uint8_t)0x06) /* SPI4/I2S4 Alternate Function mapping */ +#define GPIO_AF6_SPI5 ((uint8_t)0x06) /* SPI5/I2S5 Alternate Function mapping */ +#define GPIO_AF6_I2S2ext ((uint8_t)0x06) /* I2S2ext_SD Alternate Function mapping */ + +/** + * @brief AF 7 selection + */ +#define GPIO_AF7_SPI3 ((uint8_t)0x07) /* SPI3/I2S3 Alternate Function mapping */ +#define GPIO_AF7_USART1 ((uint8_t)0x07) /* USART1 Alternate Function mapping */ +#define GPIO_AF7_USART2 ((uint8_t)0x07) /* USART2 Alternate Function mapping */ +#define GPIO_AF7_I2S3ext ((uint8_t)0x07) /* I2S3ext_SD Alternate Function mapping */ + +/** + * @brief AF 8 selection + */ +#define GPIO_AF8_USART6 ((uint8_t)0x08) /* USART6 Alternate Function mapping */ + +/** + * @brief AF 9 selection + */ +#define GPIO_AF9_TIM14 ((uint8_t)0x09) /* TIM14 Alternate Function mapping */ +#define GPIO_AF9_I2C2 ((uint8_t)0x09) /* I2C2 Alternate Function mapping */ +#define GPIO_AF9_I2C3 ((uint8_t)0x09) /* I2C3 Alternate Function mapping */ + +/** + * @brief AF 10 selection + */ +#define GPIO_AF10_OTG_FS ((uint8_t)0x0A) /* OTG_FS Alternate Function mapping */ + +/** + * @brief AF 12 selection + */ +#define GPIO_AF12_SDIO ((uint8_t)0x0C) /* SDIO Alternate Function mapping */ + +/** + * @brief AF 15 selection + */ +#define GPIO_AF15_EVENTOUT ((uint8_t)0x0F) /* EVENTOUT Alternate Function mapping */ +#endif /* STM32F411xE */ + +/*---------------------------------------- STM32F410xx------------------------*/ +#if defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) +/** + * @brief AF 0 selection + */ +#define GPIO_AF0_RTC_50Hz ((uint8_t)0x00) /* RTC_50Hz Alternate Function mapping */ +#define GPIO_AF0_MCO ((uint8_t)0x00) /* MCO (MCO1 and MCO2) Alternate Function mapping */ +#define GPIO_AF0_TAMPER ((uint8_t)0x00) /* TAMPER (TAMPER_1 and TAMPER_2) Alternate Function mapping */ +#define GPIO_AF0_SWJ ((uint8_t)0x00) /* SWJ (SWD and JTAG) Alternate Function mapping */ +#define GPIO_AF0_TRACE ((uint8_t)0x00) /* TRACE Alternate Function mapping */ + +/** + * @brief AF 1 selection + */ +#define GPIO_AF1_TIM1 ((uint8_t)0x01) /* TIM1 Alternate Function mapping */ +#define GPIO_AF1_LPTIM1 ((uint8_t)0x01) /* LPTIM1 Alternate Function mapping */ + +/** + * @brief AF 2 selection + */ +#define GPIO_AF2_TIM5 ((uint8_t)0x02) /* TIM5 Alternate Function mapping */ + +/** + * @brief AF 3 selection + */ +#define GPIO_AF3_TIM9 ((uint8_t)0x03) /* TIM9 Alternate Function mapping */ +#define GPIO_AF3_TIM11 ((uint8_t)0x03) /* TIM11 Alternate Function mapping */ + +/** + * @brief AF 4 selection + */ +#define GPIO_AF4_I2C1 ((uint8_t)0x04) /* I2C1 Alternate Function mapping */ +#define GPIO_AF4_I2C2 ((uint8_t)0x04) /* I2C2 Alternate Function mapping */ +#define GPIO_AF4_FMPI2C1 ((uint8_t)0x04) /* FMPI2C1 Alternate Function mapping */ + +/** + * @brief AF 5 selection + */ +#define GPIO_AF5_SPI1 ((uint8_t)0x05) /* SPI1/I2S1 Alternate Function mapping */ +#if defined(STM32F410Cx) || defined(STM32F410Rx) +#define GPIO_AF5_SPI2 ((uint8_t)0x05) /* SPI2/I2S2 Alternate Function mapping */ +#endif /* STM32F410Cx || STM32F410Rx */ + +/** + * @brief AF 6 selection + */ +#define GPIO_AF6_SPI1 ((uint8_t)0x06) /* SPI1 Alternate Function mapping */ +#if defined(STM32F410Cx) || defined(STM32F410Rx) +#define GPIO_AF6_SPI2 ((uint8_t)0x06) /* I2S2 Alternate Function mapping */ +#endif /* STM32F410Cx || STM32F410Rx */ +#define GPIO_AF6_SPI5 ((uint8_t)0x06) /* SPI5/I2S5 Alternate Function mapping */ +/** + * @brief AF 7 selection + */ +#define GPIO_AF7_USART1 ((uint8_t)0x07) /* USART1 Alternate Function mapping */ +#define GPIO_AF7_USART2 ((uint8_t)0x07) /* USART2 Alternate Function mapping */ + +/** + * @brief AF 8 selection + */ +#define GPIO_AF8_USART6 ((uint8_t)0x08) /* USART6 Alternate Function mapping */ + +/** + * @brief AF 9 selection + */ +#define GPIO_AF9_I2C2 ((uint8_t)0x09) /* I2C2 Alternate Function mapping */ +#define GPIO_AF9_FMPI2C1 ((uint8_t)0x09) /* FMPI2C1 Alternate Function mapping */ + +/** + * @brief AF 15 selection + */ +#define GPIO_AF15_EVENTOUT ((uint8_t)0x0F) /* EVENTOUT Alternate Function mapping */ +#endif /* STM32F410Tx || STM32F410Cx || STM32F410Rx */ + +/*---------------------------------------- STM32F446xx -----------------------*/ +#if defined(STM32F446xx) +/** + * @brief AF 0 selection + */ +#define GPIO_AF0_RTC_50Hz ((uint8_t)0x00) /* RTC_50Hz Alternate Function mapping */ +#define GPIO_AF0_MCO ((uint8_t)0x00) /* MCO (MCO1 and MCO2) Alternate Function mapping */ +#define GPIO_AF0_TAMPER ((uint8_t)0x00) /* TAMPER (TAMPER_1 and TAMPER_2) Alternate Function mapping */ +#define GPIO_AF0_SWJ ((uint8_t)0x00) /* SWJ (SWD and JTAG) Alternate Function mapping */ +#define GPIO_AF0_TRACE ((uint8_t)0x00) /* TRACE Alternate Function mapping */ + +/** + * @brief AF 1 selection + */ +#define GPIO_AF1_TIM1 ((uint8_t)0x01) /* TIM1 Alternate Function mapping */ +#define GPIO_AF1_TIM2 ((uint8_t)0x01) /* TIM2 Alternate Function mapping */ + +/** + * @brief AF 2 selection + */ +#define GPIO_AF2_TIM3 ((uint8_t)0x02) /* TIM3 Alternate Function mapping */ +#define GPIO_AF2_TIM4 ((uint8_t)0x02) /* TIM4 Alternate Function mapping */ +#define GPIO_AF2_TIM5 ((uint8_t)0x02) /* TIM5 Alternate Function mapping */ + +/** + * @brief AF 3 selection + */ +#define GPIO_AF3_TIM8 ((uint8_t)0x03) /* TIM8 Alternate Function mapping */ +#define GPIO_AF3_TIM9 ((uint8_t)0x03) /* TIM9 Alternate Function mapping */ +#define GPIO_AF3_TIM10 ((uint8_t)0x03) /* TIM10 Alternate Function mapping */ +#define GPIO_AF3_TIM11 ((uint8_t)0x03) /* TIM11 Alternate Function mapping */ +#define GPIO_AF3_CEC ((uint8_t)0x03) /* CEC Alternate Function mapping */ + +/** + * @brief AF 4 selection + */ +#define GPIO_AF4_I2C1 ((uint8_t)0x04) /* I2C1 Alternate Function mapping */ +#define GPIO_AF4_I2C2 ((uint8_t)0x04) /* I2C2 Alternate Function mapping */ +#define GPIO_AF4_I2C3 ((uint8_t)0x04) /* I2C3 Alternate Function mapping */ +#define GPIO_AF4_FMPI2C1 ((uint8_t)0x04) /* FMPI2C1 Alternate Function mapping */ +#define GPIO_AF4_CEC ((uint8_t)0x04) /* CEC Alternate Function mapping */ + +/** + * @brief AF 5 selection + */ +#define GPIO_AF5_SPI1 ((uint8_t)0x05) /* SPI1/I2S1 Alternate Function mapping */ +#define GPIO_AF5_SPI2 ((uint8_t)0x05) /* SPI2/I2S2 Alternate Function mapping */ +#define GPIO_AF5_SPI3 ((uint8_t)0x05) /* SPI3/I2S3 Alternate Function mapping */ +#define GPIO_AF5_SPI4 ((uint8_t)0x05) /* SPI4 Alternate Function mapping */ + +/** + * @brief AF 6 selection + */ +#define GPIO_AF6_SPI2 ((uint8_t)0x06) /* SPI2/I2S2 Alternate Function mapping */ +#define GPIO_AF6_SPI3 ((uint8_t)0x06) /* SPI3/I2S3 Alternate Function mapping */ +#define GPIO_AF6_SPI4 ((uint8_t)0x06) /* SPI4 Alternate Function mapping */ +#define GPIO_AF6_SAI1 ((uint8_t)0x06) /* SAI1 Alternate Function mapping */ + +/** + * @brief AF 7 selection + */ +#define GPIO_AF7_USART1 ((uint8_t)0x07) /* USART1 Alternate Function mapping */ +#define GPIO_AF7_USART2 ((uint8_t)0x07) /* USART2 Alternate Function mapping */ +#define GPIO_AF7_USART3 ((uint8_t)0x07) /* USART3 Alternate Function mapping */ +#define GPIO_AF7_UART5 ((uint8_t)0x07) /* UART5 Alternate Function mapping */ +#define GPIO_AF7_SPI2 ((uint8_t)0x07) /* SPI2/I2S2 Alternate Function mapping */ +#define GPIO_AF7_SPI3 ((uint8_t)0x07) /* SPI3/I2S3 Alternate Function mapping */ +#define GPIO_AF7_SPDIFRX ((uint8_t)0x07) /* SPDIFRX Alternate Function mapping */ + +/** + * @brief AF 8 selection + */ +#define GPIO_AF8_UART4 ((uint8_t)0x08) /* UART4 Alternate Function mapping */ +#define GPIO_AF8_UART5 ((uint8_t)0x08) /* UART5 Alternate Function mapping */ +#define GPIO_AF8_USART6 ((uint8_t)0x08) /* USART6 Alternate Function mapping */ +#define GPIO_AF8_SPDIFRX ((uint8_t)0x08) /* SPDIFRX Alternate Function mapping */ +#define GPIO_AF8_SAI2 ((uint8_t)0x08) /* SAI2 Alternate Function mapping */ + +/** + * @brief AF 9 selection + */ +#define GPIO_AF9_CAN1 ((uint8_t)0x09) /* CAN1 Alternate Function mapping */ +#define GPIO_AF9_CAN2 ((uint8_t)0x09) /* CAN2 Alternate Function mapping */ +#define GPIO_AF9_TIM12 ((uint8_t)0x09) /* TIM12 Alternate Function mapping */ +#define GPIO_AF9_TIM13 ((uint8_t)0x09) /* TIM13 Alternate Function mapping */ +#define GPIO_AF9_TIM14 ((uint8_t)0x09) /* TIM14 Alternate Function mapping */ +#define GPIO_AF9_QSPI ((uint8_t)0x09) /* QSPI Alternate Function mapping */ + +/** + * @brief AF 10 selection + */ +#define GPIO_AF10_OTG_FS ((uint8_t)0x0A) /* OTG_FS Alternate Function mapping */ +#define GPIO_AF10_OTG_HS ((uint8_t)0x0A) /* OTG_HS Alternate Function mapping */ +#define GPIO_AF10_SAI2 ((uint8_t)0x0A) /* SAI2 Alternate Function mapping */ +#define GPIO_AF10_QSPI ((uint8_t)0x0A) /* QSPI Alternate Function mapping */ + +/** + * @brief AF 11 selection + */ +#define GPIO_AF11_ETH ((uint8_t)0x0B) /* ETHERNET Alternate Function mapping */ + +/** + * @brief AF 12 selection + */ +#define GPIO_AF12_FMC ((uint8_t)0x0C) /* FMC Alternate Function mapping */ +#define GPIO_AF12_OTG_HS_FS ((uint8_t)0x0C) /* OTG HS configured in FS, Alternate Function mapping */ +#define GPIO_AF12_SDIO ((uint8_t)0x0C) /* SDIO Alternate Function mapping */ + +/** + * @brief AF 13 selection + */ +#define GPIO_AF13_DCMI ((uint8_t)0x0D) /* DCMI Alternate Function mapping */ + +/** + * @brief AF 15 selection + */ +#define GPIO_AF15_EVENTOUT ((uint8_t)0x0F) /* EVENTOUT Alternate Function mapping */ + +#endif /* STM32F446xx */ +/*----------------------------------------------------------------------------*/ + +/*-------------------------------- STM32F469xx/STM32F479xx--------------------*/ +#if defined(STM32F469xx) || defined(STM32F479xx) +/** + * @brief AF 0 selection + */ +#define GPIO_AF0_RTC_50Hz ((uint8_t)0x00) /* RTC_50Hz Alternate Function mapping */ +#define GPIO_AF0_MCO ((uint8_t)0x00) /* MCO (MCO1 and MCO2) Alternate Function mapping */ +#define GPIO_AF0_TAMPER ((uint8_t)0x00) /* TAMPER (TAMPER_1 and TAMPER_2) Alternate Function mapping */ +#define GPIO_AF0_SWJ ((uint8_t)0x00) /* SWJ (SWD and JTAG) Alternate Function mapping */ +#define GPIO_AF0_TRACE ((uint8_t)0x00) /* TRACE Alternate Function mapping */ + +/** + * @brief AF 1 selection + */ +#define GPIO_AF1_TIM1 ((uint8_t)0x01) /* TIM1 Alternate Function mapping */ +#define GPIO_AF1_TIM2 ((uint8_t)0x01) /* TIM2 Alternate Function mapping */ + +/** + * @brief AF 2 selection + */ +#define GPIO_AF2_TIM3 ((uint8_t)0x02) /* TIM3 Alternate Function mapping */ +#define GPIO_AF2_TIM4 ((uint8_t)0x02) /* TIM4 Alternate Function mapping */ +#define GPIO_AF2_TIM5 ((uint8_t)0x02) /* TIM5 Alternate Function mapping */ + +/** + * @brief AF 3 selection + */ +#define GPIO_AF3_TIM8 ((uint8_t)0x03) /* TIM8 Alternate Function mapping */ +#define GPIO_AF3_TIM9 ((uint8_t)0x03) /* TIM9 Alternate Function mapping */ +#define GPIO_AF3_TIM10 ((uint8_t)0x03) /* TIM10 Alternate Function mapping */ +#define GPIO_AF3_TIM11 ((uint8_t)0x03) /* TIM11 Alternate Function mapping */ + +/** + * @brief AF 4 selection + */ +#define GPIO_AF4_I2C1 ((uint8_t)0x04) /* I2C1 Alternate Function mapping */ +#define GPIO_AF4_I2C2 ((uint8_t)0x04) /* I2C2 Alternate Function mapping */ +#define GPIO_AF4_I2C3 ((uint8_t)0x04) /* I2C3 Alternate Function mapping */ + +/** + * @brief AF 5 selection + */ +#define GPIO_AF5_SPI1 ((uint8_t)0x05) /* SPI1 Alternate Function mapping */ +#define GPIO_AF5_SPI2 ((uint8_t)0x05) /* SPI2/I2S2 Alternate Function mapping */ +#define GPIO_AF5_SPI3 ((uint8_t)0x05) /* SPI3/I2S3 Alternate Function mapping */ +#define GPIO_AF5_SPI4 ((uint8_t)0x05) /* SPI4 Alternate Function mapping */ +#define GPIO_AF5_SPI5 ((uint8_t)0x05) /* SPI5 Alternate Function mapping */ +#define GPIO_AF5_SPI6 ((uint8_t)0x05) /* SPI6 Alternate Function mapping */ +#define GPIO_AF5_I2S3ext ((uint8_t)0x05) /* I2S3ext_SD Alternate Function mapping */ + +/** + * @brief AF 6 selection + */ +#define GPIO_AF6_SPI3 ((uint8_t)0x06) /* SPI3/I2S3 Alternate Function mapping */ +#define GPIO_AF6_I2S2ext ((uint8_t)0x06) /* I2S2ext_SD Alternate Function mapping */ +#define GPIO_AF6_SAI1 ((uint8_t)0x06) /* SAI1 Alternate Function mapping */ + +/** + * @brief AF 7 selection + */ +#define GPIO_AF7_USART1 ((uint8_t)0x07) /* USART1 Alternate Function mapping */ +#define GPIO_AF7_USART2 ((uint8_t)0x07) /* USART2 Alternate Function mapping */ +#define GPIO_AF7_USART3 ((uint8_t)0x07) /* USART3 Alternate Function mapping */ +#define GPIO_AF7_I2S3ext ((uint8_t)0x07) /* I2S3ext_SD Alternate Function mapping */ + +/** + * @brief AF 8 selection + */ +#define GPIO_AF8_UART4 ((uint8_t)0x08) /* UART4 Alternate Function mapping */ +#define GPIO_AF8_UART5 ((uint8_t)0x08) /* UART5 Alternate Function mapping */ +#define GPIO_AF8_USART6 ((uint8_t)0x08) /* USART6 Alternate Function mapping */ +#define GPIO_AF8_UART7 ((uint8_t)0x08) /* UART7 Alternate Function mapping */ +#define GPIO_AF8_UART8 ((uint8_t)0x08) /* UART8 Alternate Function mapping */ + +/** + * @brief AF 9 selection + */ +#define GPIO_AF9_CAN1 ((uint8_t)0x09) /* CAN1 Alternate Function mapping */ +#define GPIO_AF9_CAN2 ((uint8_t)0x09) /* CAN2 Alternate Function mapping */ +#define GPIO_AF9_TIM12 ((uint8_t)0x09) /* TIM12 Alternate Function mapping */ +#define GPIO_AF9_TIM13 ((uint8_t)0x09) /* TIM13 Alternate Function mapping */ +#define GPIO_AF9_TIM14 ((uint8_t)0x09) /* TIM14 Alternate Function mapping */ +#define GPIO_AF9_LTDC ((uint8_t)0x09) /* LCD-TFT Alternate Function mapping */ +#define GPIO_AF9_QSPI ((uint8_t)0x09) /* QSPI Alternate Function mapping */ + +/** + * @brief AF 10 selection + */ +#define GPIO_AF10_OTG_FS ((uint8_t)0x0A) /* OTG_FS Alternate Function mapping */ +#define GPIO_AF10_OTG_HS ((uint8_t)0x0A) /* OTG_HS Alternate Function mapping */ +#define GPIO_AF10_QSPI ((uint8_t)0x0A) /* QSPI Alternate Function mapping */ + +/** + * @brief AF 11 selection + */ +#define GPIO_AF11_ETH ((uint8_t)0x0B) /* ETHERNET Alternate Function mapping */ + +/** + * @brief AF 12 selection + */ +#define GPIO_AF12_FMC ((uint8_t)0x0C) /* FMC Alternate Function mapping */ +#define GPIO_AF12_OTG_HS_FS ((uint8_t)0x0C) /* OTG HS configured in FS, Alternate Function mapping */ +#define GPIO_AF12_SDIO ((uint8_t)0x0C) /* SDIO Alternate Function mapping */ + +/** + * @brief AF 13 selection + */ +#define GPIO_AF13_DCMI ((uint8_t)0x0D) /* DCMI Alternate Function mapping */ +#define GPIO_AF13_DSI ((uint8_t)0x0D) /* DSI Alternate Function mapping */ + +/** + * @brief AF 14 selection + */ +#define GPIO_AF14_LTDC ((uint8_t)0x0E) /* LCD-TFT Alternate Function mapping */ + +/** + * @brief AF 15 selection + */ +#define GPIO_AF15_EVENTOUT ((uint8_t)0x0F) /* EVENTOUT Alternate Function mapping */ + +#endif /* STM32F469xx || STM32F479xx */ +/*----------------------------------------------------------------------------*/ +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup GPIOEx_Exported_Macros GPIO Exported Macros + * @{ + */ +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup GPIOEx_Exported_Functions GPIO Exported Functions + * @{ + */ +/** + * @} + */ + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/** @defgroup GPIOEx_Private_Constants GPIO Private Constants + * @{ + */ +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup GPIOEx_Private_Macros GPIO Private Macros + * @{ + */ +/** @defgroup GPIOEx_Get_Port_Index GPIO Get Port Index + * @{ + */ +#if defined(STM32F405xx) || defined(STM32F415xx) || defined(STM32F407xx) || defined(STM32F417xx) +#define GPIO_GET_INDEX(__GPIOx__) (uint8_t)(((__GPIOx__) == (GPIOA))? 0U :\ + ((__GPIOx__) == (GPIOB))? 1U :\ + ((__GPIOx__) == (GPIOC))? 2U :\ + ((__GPIOx__) == (GPIOD))? 3U :\ + ((__GPIOx__) == (GPIOE))? 4U :\ + ((__GPIOx__) == (GPIOF))? 5U :\ + ((__GPIOx__) == (GPIOG))? 6U :\ + ((__GPIOx__) == (GPIOH))? 7U : 8U) +#endif /* STM32F405xx || STM32F415xx || STM32F407xx || STM32F417xx */ + +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) ||\ + defined(STM32F469xx) || defined(STM32F479xx) +#define GPIO_GET_INDEX(__GPIOx__) (uint8_t)(((__GPIOx__) == (GPIOA))? 0U :\ + ((__GPIOx__) == (GPIOB))? 1U :\ + ((__GPIOx__) == (GPIOC))? 2U :\ + ((__GPIOx__) == (GPIOD))? 3U :\ + ((__GPIOx__) == (GPIOE))? 4U :\ + ((__GPIOx__) == (GPIOF))? 5U :\ + ((__GPIOx__) == (GPIOG))? 6U :\ + ((__GPIOx__) == (GPIOH))? 7U :\ + ((__GPIOx__) == (GPIOI))? 8U :\ + ((__GPIOx__) == (GPIOJ))? 9U : 10U) +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F469xx || STM32F479xx */ + +#if defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) +#define GPIO_GET_INDEX(__GPIOx__) (uint8_t)(((__GPIOx__) == (GPIOA))? 0U :\ + ((__GPIOx__) == (GPIOB))? 1U :\ + ((__GPIOx__) == (GPIOC))? 2U : 7U) +#endif /* STM32F410Tx || STM32F410Cx || STM32F410Rx */ + +#if defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F411xE) +#define GPIO_GET_INDEX(__GPIOx__) (uint8_t)(((__GPIOx__) == (GPIOA))? 0U :\ + ((__GPIOx__) == (GPIOB))? 1U :\ + ((__GPIOx__) == (GPIOC))? 2U :\ + ((__GPIOx__) == (GPIOD))? 3U :\ + ((__GPIOx__) == (GPIOE))? 4U : 7U) +#endif /* STM32F401xC || STM32F401xE || STM32F411xE */ + +#if defined(STM32F446xx) || defined(STM32F412Zx) || defined(STM32F413xx) || defined(STM32F423xx) +#define GPIO_GET_INDEX(__GPIOx__) (uint8_t)(((__GPIOx__) == (GPIOA))? 0U :\ + ((__GPIOx__) == (GPIOB))? 1U :\ + ((__GPIOx__) == (GPIOC))? 2U :\ + ((__GPIOx__) == (GPIOD))? 3U :\ + ((__GPIOx__) == (GPIOE))? 4U :\ + ((__GPIOx__) == (GPIOF))? 5U :\ + ((__GPIOx__) == (GPIOG))? 6U : 7U) +#endif /* STM32F446xx || STM32F412Zx || STM32F413xx || STM32F423xx */ +#if defined(STM32F412Vx) +#define GPIO_GET_INDEX(__GPIOx__) (uint8_t)(((__GPIOx__) == (GPIOA))? 0U :\ + ((__GPIOx__) == (GPIOB))? 1U :\ + ((__GPIOx__) == (GPIOC))? 2U :\ + ((__GPIOx__) == (GPIOD))? 3U :\ + ((__GPIOx__) == (GPIOE))? 4U : 7U) +#endif /* STM32F412Vx */ +#if defined(STM32F412Rx) +#define GPIO_GET_INDEX(__GPIOx__) (uint8_t)(((__GPIOx__) == (GPIOA))? 0U :\ + ((__GPIOx__) == (GPIOB))? 1U :\ + ((__GPIOx__) == (GPIOC))? 2U :\ + ((__GPIOx__) == (GPIOD))? 3U : 7U) +#endif /* STM32F412Rx */ +#if defined(STM32F412Cx) +#define GPIO_GET_INDEX(__GPIOx__) (uint8_t)(((__GPIOx__) == (GPIOA))? 0U :\ + ((__GPIOx__) == (GPIOB))? 1U :\ + ((__GPIOx__) == (GPIOC))? 2U : 7U) +#endif /* STM32F412Cx */ + +/** + * @} + */ + +/** @defgroup GPIOEx_IS_Alternat_function_selection GPIO Check Alternate Function + * @{ + */ +/*------------------------- STM32F429xx/STM32F439xx---------------------------*/ +#if defined(STM32F429xx) || defined(STM32F439xx) +#define IS_GPIO_AF(AF) (((AF) == GPIO_AF0_RTC_50Hz) || ((AF) == GPIO_AF9_TIM14) || \ + ((AF) == GPIO_AF0_MCO) || ((AF) == GPIO_AF0_TAMPER) || \ + ((AF) == GPIO_AF0_SWJ) || ((AF) == GPIO_AF0_TRACE) || \ + ((AF) == GPIO_AF1_TIM1) || ((AF) == GPIO_AF1_TIM2) || \ + ((AF) == GPIO_AF2_TIM3) || ((AF) == GPIO_AF2_TIM4) || \ + ((AF) == GPIO_AF2_TIM5) || ((AF) == GPIO_AF3_TIM8) || \ + ((AF) == GPIO_AF4_I2C1) || ((AF) == GPIO_AF4_I2C2) || \ + ((AF) == GPIO_AF4_I2C3) || ((AF) == GPIO_AF5_SPI1) || \ + ((AF) == GPIO_AF5_SPI2) || ((AF) == GPIO_AF9_TIM13) || \ + ((AF) == GPIO_AF6_SPI3) || ((AF) == GPIO_AF9_TIM12) || \ + ((AF) == GPIO_AF7_USART1) || ((AF) == GPIO_AF7_USART2) || \ + ((AF) == GPIO_AF7_USART3) || ((AF) == GPIO_AF8_UART4) || \ + ((AF) == GPIO_AF8_UART5) || ((AF) == GPIO_AF8_USART6) || \ + ((AF) == GPIO_AF9_CAN1) || ((AF) == GPIO_AF9_CAN2) || \ + ((AF) == GPIO_AF10_OTG_FS) || ((AF) == GPIO_AF10_OTG_HS) || \ + ((AF) == GPIO_AF11_ETH) || ((AF) == GPIO_AF12_OTG_HS_FS) || \ + ((AF) == GPIO_AF12_SDIO) || ((AF) == GPIO_AF13_DCMI) || \ + ((AF) == GPIO_AF15_EVENTOUT) || ((AF) == GPIO_AF5_SPI4) || \ + ((AF) == GPIO_AF5_SPI5) || ((AF) == GPIO_AF5_SPI6) || \ + ((AF) == GPIO_AF8_UART7) || ((AF) == GPIO_AF8_UART8) || \ + ((AF) == GPIO_AF12_FMC) || ((AF) == GPIO_AF6_SAI1) || \ + ((AF) == GPIO_AF14_LTDC)) + +#endif /* STM32F429xx || STM32F439xx */ +/*----------------------------------------------------------------------------*/ + +/*---------------------------------- STM32F427xx/STM32F437xx------------------*/ +#if defined(STM32F427xx) || defined(STM32F437xx) +#define IS_GPIO_AF(AF) (((AF) == GPIO_AF0_RTC_50Hz) || ((AF) == GPIO_AF9_TIM14) || \ + ((AF) == GPIO_AF0_MCO) || ((AF) == GPIO_AF0_TAMPER) || \ + ((AF) == GPIO_AF0_SWJ) || ((AF) == GPIO_AF0_TRACE) || \ + ((AF) == GPIO_AF1_TIM1) || ((AF) == GPIO_AF1_TIM2) || \ + ((AF) == GPIO_AF2_TIM3) || ((AF) == GPIO_AF2_TIM4) || \ + ((AF) == GPIO_AF2_TIM5) || ((AF) == GPIO_AF3_TIM8) || \ + ((AF) == GPIO_AF4_I2C1) || ((AF) == GPIO_AF4_I2C2) || \ + ((AF) == GPIO_AF4_I2C3) || ((AF) == GPIO_AF5_SPI1) || \ + ((AF) == GPIO_AF5_SPI2) || ((AF) == GPIO_AF9_TIM13) || \ + ((AF) == GPIO_AF6_SPI3) || ((AF) == GPIO_AF9_TIM12) || \ + ((AF) == GPIO_AF7_USART1) || ((AF) == GPIO_AF7_USART2) || \ + ((AF) == GPIO_AF7_USART3) || ((AF) == GPIO_AF8_UART4) || \ + ((AF) == GPIO_AF8_UART5) || ((AF) == GPIO_AF8_USART6) || \ + ((AF) == GPIO_AF9_CAN1) || ((AF) == GPIO_AF9_CAN2) || \ + ((AF) == GPIO_AF10_OTG_FS) || ((AF) == GPIO_AF10_OTG_HS) || \ + ((AF) == GPIO_AF11_ETH) || ((AF) == GPIO_AF12_OTG_HS_FS) || \ + ((AF) == GPIO_AF12_SDIO) || ((AF) == GPIO_AF13_DCMI) || \ + ((AF) == GPIO_AF15_EVENTOUT) || ((AF) == GPIO_AF5_SPI4) || \ + ((AF) == GPIO_AF5_SPI5) || ((AF) == GPIO_AF5_SPI6) || \ + ((AF) == GPIO_AF8_UART7) || ((AF) == GPIO_AF8_UART8) || \ + ((AF) == GPIO_AF12_FMC) || ((AF) == GPIO_AF6_SAI1)) + +#endif /* STM32F427xx || STM32F437xx */ +/*----------------------------------------------------------------------------*/ + +/*---------------------------------- STM32F407xx/STM32F417xx------------------*/ +#if defined(STM32F407xx) || defined(STM32F417xx) +#define IS_GPIO_AF(AF) (((AF) == GPIO_AF0_RTC_50Hz) || ((AF) == GPIO_AF9_TIM14) || \ + ((AF) == GPIO_AF0_MCO) || ((AF) == GPIO_AF0_TAMPER) || \ + ((AF) == GPIO_AF0_SWJ) || ((AF) == GPIO_AF0_TRACE) || \ + ((AF) == GPIO_AF1_TIM1) || ((AF) == GPIO_AF1_TIM2) || \ + ((AF) == GPIO_AF2_TIM3) || ((AF) == GPIO_AF2_TIM4) || \ + ((AF) == GPIO_AF2_TIM5) || ((AF) == GPIO_AF3_TIM8) || \ + ((AF) == GPIO_AF4_I2C1) || ((AF) == GPIO_AF4_I2C2) || \ + ((AF) == GPIO_AF4_I2C3) || ((AF) == GPIO_AF5_SPI1) || \ + ((AF) == GPIO_AF5_SPI2) || ((AF) == GPIO_AF9_TIM13) || \ + ((AF) == GPIO_AF6_SPI3) || ((AF) == GPIO_AF9_TIM12) || \ + ((AF) == GPIO_AF7_USART1) || ((AF) == GPIO_AF7_USART2) || \ + ((AF) == GPIO_AF7_USART3) || ((AF) == GPIO_AF8_UART4) || \ + ((AF) == GPIO_AF8_UART5) || ((AF) == GPIO_AF8_USART6) || \ + ((AF) == GPIO_AF9_CAN1) || ((AF) == GPIO_AF9_CAN2) || \ + ((AF) == GPIO_AF10_OTG_FS) || ((AF) == GPIO_AF10_OTG_HS) || \ + ((AF) == GPIO_AF11_ETH) || ((AF) == GPIO_AF12_OTG_HS_FS) || \ + ((AF) == GPIO_AF12_SDIO) || ((AF) == GPIO_AF13_DCMI) || \ + ((AF) == GPIO_AF12_FSMC) || ((AF) == GPIO_AF15_EVENTOUT)) + +#endif /* STM32F407xx || STM32F417xx */ +/*----------------------------------------------------------------------------*/ + +/*---------------------------------- STM32F405xx/STM32F415xx------------------*/ +#if defined(STM32F405xx) || defined(STM32F415xx) +#define IS_GPIO_AF(AF) (((AF) == GPIO_AF0_RTC_50Hz) || ((AF) == GPIO_AF9_TIM14) || \ + ((AF) == GPIO_AF0_MCO) || ((AF) == GPIO_AF0_TAMPER) || \ + ((AF) == GPIO_AF0_SWJ) || ((AF) == GPIO_AF0_TRACE) || \ + ((AF) == GPIO_AF1_TIM1) || ((AF) == GPIO_AF1_TIM2) || \ + ((AF) == GPIO_AF2_TIM3) || ((AF) == GPIO_AF2_TIM4) || \ + ((AF) == GPIO_AF2_TIM5) || ((AF) == GPIO_AF3_TIM8) || \ + ((AF) == GPIO_AF4_I2C1) || ((AF) == GPIO_AF4_I2C2) || \ + ((AF) == GPIO_AF4_I2C3) || ((AF) == GPIO_AF5_SPI1) || \ + ((AF) == GPIO_AF5_SPI2) || ((AF) == GPIO_AF9_TIM13) || \ + ((AF) == GPIO_AF6_SPI3) || ((AF) == GPIO_AF9_TIM12) || \ + ((AF) == GPIO_AF7_USART1) || ((AF) == GPIO_AF7_USART2) || \ + ((AF) == GPIO_AF7_USART3) || ((AF) == GPIO_AF8_UART4) || \ + ((AF) == GPIO_AF8_UART5) || ((AF) == GPIO_AF8_USART6) || \ + ((AF) == GPIO_AF9_CAN1) || ((AF) == GPIO_AF9_CAN2) || \ + ((AF) == GPIO_AF10_OTG_FS) || ((AF) == GPIO_AF10_OTG_HS) || \ + ((AF) == GPIO_AF12_OTG_HS_FS) || ((AF) == GPIO_AF12_SDIO) || \ + ((AF) == GPIO_AF12_FSMC) || ((AF) == GPIO_AF15_EVENTOUT)) + +#endif /* STM32F405xx || STM32F415xx */ + +/*----------------------------------------------------------------------------*/ + +/*---------------------------------------- STM32F401xx------------------------*/ +#if defined(STM32F401xC) || defined(STM32F401xE) +#define IS_GPIO_AF(AF) (((AF) == GPIO_AF0_RTC_50Hz) || ((AF) == GPIO_AF12_SDIO) || \ + ((AF) == GPIO_AF0_MCO) || ((AF) == GPIO_AF0_TAMPER) || \ + ((AF) == GPIO_AF0_SWJ) || ((AF) == GPIO_AF0_TRACE) || \ + ((AF) == GPIO_AF1_TIM1) || ((AF) == GPIO_AF1_TIM2) || \ + ((AF) == GPIO_AF2_TIM3) || ((AF) == GPIO_AF2_TIM4) || \ + ((AF) == GPIO_AF2_TIM5) || ((AF) == GPIO_AF3_TIM9) || \ + ((AF) == GPIO_AF3_TIM10) || ((AF) == GPIO_AF3_TIM11) || \ + ((AF) == GPIO_AF4_I2C1) || ((AF) == GPIO_AF4_I2C2) || \ + ((AF) == GPIO_AF4_I2C3) || ((AF) == GPIO_AF5_SPI1) || \ + ((AF) == GPIO_AF5_SPI2) || ((AF) == GPIO_AF5_SPI4) || \ + ((AF) == GPIO_AF6_SPI3) || ((AF) == GPIO_AF7_USART1) || \ + ((AF) == GPIO_AF7_USART2) || ((AF) == GPIO_AF8_USART6) || \ + ((AF) == GPIO_AF9_I2C2) || ((AF) == GPIO_AF9_I2C3) || \ + ((AF) == GPIO_AF10_OTG_FS) || ((AF) == GPIO_AF15_EVENTOUT)) +#endif /* STM32F401xC || STM32F401xE */ +/*----------------------------------------------------------------------------*/ +/*---------------------------------------- STM32F410xx------------------------*/ +#if defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) +#define IS_GPIO_AF(AF) (((AF) < 10U) || ((AF) == 15U)) +#endif /* STM32F410Tx || STM32F410Cx || STM32F410Rx */ + +/*---------------------------------------- STM32F411xx------------------------*/ +#if defined(STM32F411xE) +#define IS_GPIO_AF(AF) (((AF) == GPIO_AF0_RTC_50Hz) || ((AF) == GPIO_AF9_TIM14) || \ + ((AF) == GPIO_AF0_MCO) || ((AF) == GPIO_AF0_TAMPER) || \ + ((AF) == GPIO_AF0_SWJ) || ((AF) == GPIO_AF0_TRACE) || \ + ((AF) == GPIO_AF1_TIM1) || ((AF) == GPIO_AF1_TIM2) || \ + ((AF) == GPIO_AF2_TIM3) || ((AF) == GPIO_AF2_TIM4) || \ + ((AF) == GPIO_AF2_TIM5) || ((AF) == GPIO_AF4_I2C1) || \ + ((AF) == GPIO_AF4_I2C2) || ((AF) == GPIO_AF4_I2C3) || \ + ((AF) == GPIO_AF5_SPI1) || ((AF) == GPIO_AF5_SPI2) || \ + ((AF) == GPIO_AF5_SPI3) || ((AF) == GPIO_AF6_SPI4) || \ + ((AF) == GPIO_AF6_SPI3) || ((AF) == GPIO_AF5_SPI4) || \ + ((AF) == GPIO_AF6_SPI5) || ((AF) == GPIO_AF7_SPI3) || \ + ((AF) == GPIO_AF7_USART1) || ((AF) == GPIO_AF7_USART2) || \ + ((AF) == GPIO_AF8_USART6) || ((AF) == GPIO_AF10_OTG_FS) || \ + ((AF) == GPIO_AF9_I2C2) || ((AF) == GPIO_AF9_I2C3) || \ + ((AF) == GPIO_AF12_SDIO) || ((AF) == GPIO_AF15_EVENTOUT)) + +#endif /* STM32F411xE */ +/*----------------------------------------------------------------------------*/ + +/*----------------------------------------------- STM32F446xx ----------------*/ +#if defined(STM32F446xx) +#define IS_GPIO_AF(AF) (((AF) == GPIO_AF0_RTC_50Hz) || ((AF) == GPIO_AF9_TIM14) || \ + ((AF) == GPIO_AF0_MCO) || ((AF) == GPIO_AF0_TAMPER) || \ + ((AF) == GPIO_AF0_SWJ) || ((AF) == GPIO_AF0_TRACE) || \ + ((AF) == GPIO_AF1_TIM1) || ((AF) == GPIO_AF1_TIM2) || \ + ((AF) == GPIO_AF2_TIM3) || ((AF) == GPIO_AF2_TIM4) || \ + ((AF) == GPIO_AF2_TIM5) || ((AF) == GPIO_AF3_TIM8) || \ + ((AF) == GPIO_AF4_I2C1) || ((AF) == GPIO_AF4_I2C2) || \ + ((AF) == GPIO_AF4_I2C3) || ((AF) == GPIO_AF5_SPI1) || \ + ((AF) == GPIO_AF5_SPI2) || ((AF) == GPIO_AF9_TIM13) || \ + ((AF) == GPIO_AF6_SPI3) || ((AF) == GPIO_AF9_TIM12) || \ + ((AF) == GPIO_AF7_USART1) || ((AF) == GPIO_AF7_USART2) || \ + ((AF) == GPIO_AF7_USART3) || ((AF) == GPIO_AF8_UART4) || \ + ((AF) == GPIO_AF8_UART5) || ((AF) == GPIO_AF8_USART6) || \ + ((AF) == GPIO_AF9_CAN1) || ((AF) == GPIO_AF9_CAN2) || \ + ((AF) == GPIO_AF10_OTG_FS) || ((AF) == GPIO_AF10_OTG_HS) || \ + ((AF) == GPIO_AF11_ETH) || ((AF) == GPIO_AF12_OTG_HS_FS) || \ + ((AF) == GPIO_AF12_SDIO) || ((AF) == GPIO_AF13_DCMI) || \ + ((AF) == GPIO_AF15_EVENTOUT) || ((AF) == GPIO_AF5_SPI4) || \ + ((AF) == GPIO_AF12_FMC) || ((AF) == GPIO_AF6_SAI1) || \ + ((AF) == GPIO_AF3_CEC) || ((AF) == GPIO_AF4_CEC) || \ + ((AF) == GPIO_AF5_SPI3) || ((AF) == GPIO_AF6_SPI2) || \ + ((AF) == GPIO_AF6_SPI4) || ((AF) == GPIO_AF7_UART5) || \ + ((AF) == GPIO_AF7_SPI2) || ((AF) == GPIO_AF7_SPI3) || \ + ((AF) == GPIO_AF7_SPDIFRX) || ((AF) == GPIO_AF8_SPDIFRX) || \ + ((AF) == GPIO_AF8_SAI2) || ((AF) == GPIO_AF9_QSPI) || \ + ((AF) == GPIO_AF10_SAI2) || ((AF) == GPIO_AF10_QSPI)) + +#endif /* STM32F446xx */ +/*----------------------------------------------------------------------------*/ + +/*------------------------------------------- STM32F469xx/STM32F479xx --------*/ +#if defined(STM32F469xx) || defined(STM32F479xx) +#define IS_GPIO_AF(AF) (((AF) == GPIO_AF0_RTC_50Hz) || ((AF) == GPIO_AF9_TIM14) || \ + ((AF) == GPIO_AF0_MCO) || ((AF) == GPIO_AF0_TAMPER) || \ + ((AF) == GPIO_AF0_SWJ) || ((AF) == GPIO_AF0_TRACE) || \ + ((AF) == GPIO_AF1_TIM1) || ((AF) == GPIO_AF1_TIM2) || \ + ((AF) == GPIO_AF2_TIM3) || ((AF) == GPIO_AF2_TIM4) || \ + ((AF) == GPIO_AF2_TIM5) || ((AF) == GPIO_AF3_TIM8) || \ + ((AF) == GPIO_AF4_I2C1) || ((AF) == GPIO_AF4_I2C2) || \ + ((AF) == GPIO_AF4_I2C3) || ((AF) == GPIO_AF5_SPI1) || \ + ((AF) == GPIO_AF5_SPI2) || ((AF) == GPIO_AF9_TIM13) || \ + ((AF) == GPIO_AF6_SPI3) || ((AF) == GPIO_AF9_TIM12) || \ + ((AF) == GPIO_AF7_USART1) || ((AF) == GPIO_AF7_USART2) || \ + ((AF) == GPIO_AF7_USART3) || ((AF) == GPIO_AF8_UART4) || \ + ((AF) == GPIO_AF8_UART5) || ((AF) == GPIO_AF8_USART6) || \ + ((AF) == GPIO_AF9_CAN1) || ((AF) == GPIO_AF9_CAN2) || \ + ((AF) == GPIO_AF10_OTG_FS) || ((AF) == GPIO_AF10_OTG_HS) || \ + ((AF) == GPIO_AF11_ETH) || ((AF) == GPIO_AF12_OTG_HS_FS) || \ + ((AF) == GPIO_AF12_SDIO) || ((AF) == GPIO_AF13_DCMI) || \ + ((AF) == GPIO_AF15_EVENTOUT) || ((AF) == GPIO_AF5_SPI4) || \ + ((AF) == GPIO_AF5_SPI5) || ((AF) == GPIO_AF5_SPI6) || \ + ((AF) == GPIO_AF8_UART7) || ((AF) == GPIO_AF8_UART8) || \ + ((AF) == GPIO_AF12_FMC) || ((AF) == GPIO_AF6_SAI1) || \ + ((AF) == GPIO_AF14_LTDC) || ((AF) == GPIO_AF13_DSI) || \ + ((AF) == GPIO_AF9_QSPI) || ((AF) == GPIO_AF10_QSPI)) + +#endif /* STM32F469xx || STM32F479xx */ +/*----------------------------------------------------------------------------*/ + +/*------------------STM32F412Zx/STM32F412Vx/STM32F412Rx/STM32F412Cx-----------*/ +#if defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) +#define IS_GPIO_AF(AF) (((AF) < 16U) && ((AF) != 11U) && ((AF) != 14U) && ((AF) != 13U)) +#endif /* STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx */ +/*----------------------------------------------------------------------------*/ + +/*------------------STM32F413xx/STM32F423xx-----------------------------------*/ +#if defined(STM32F413xx) || defined(STM32F423xx) +#define IS_GPIO_AF(AF) (((AF) < 16U) && ((AF) != 13U)) +#endif /* STM32F413xx || STM32F423xx */ +/*----------------------------------------------------------------------------*/ + +/** + * @} + */ + +/** + * @} + */ + +/* Private functions ---------------------------------------------------------*/ +/** @defgroup GPIOEx_Private_Functions GPIO Private Functions + * @{ + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F4xx_HAL_GPIO_EX_H */ + diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_pwr.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_pwr.h new file mode 100644 index 0000000..a7273d5 --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_pwr.h @@ -0,0 +1,436 @@ +/** + ****************************************************************************** + * @file stm32f4xx_hal_pwr.h + * @author MCD Application Team + * @brief Header file of PWR HAL module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F4xx_HAL_PWR_H +#define __STM32F4xx_HAL_PWR_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx_hal_def.h" + +/** @addtogroup STM32F4xx_HAL_Driver + * @{ + */ + +/** @addtogroup PWR + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ + +/** @defgroup PWR_Exported_Types PWR Exported Types + * @{ + */ + +/** + * @brief PWR PVD configuration structure definition + */ +typedef struct +{ + uint32_t PVDLevel; /*!< PVDLevel: Specifies the PVD detection level. + This parameter can be a value of @ref PWR_PVD_detection_level */ + + uint32_t Mode; /*!< Mode: Specifies the operating mode for the selected pins. + This parameter can be a value of @ref PWR_PVD_Mode */ +}PWR_PVDTypeDef; + +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup PWR_Exported_Constants PWR Exported Constants + * @{ + */ + +/** @defgroup PWR_WakeUp_Pins PWR WakeUp Pins + * @{ + */ +#define PWR_WAKEUP_PIN1 0x00000100U +/** + * @} + */ + +/** @defgroup PWR_PVD_detection_level PWR PVD detection level + * @{ + */ +#define PWR_PVDLEVEL_0 PWR_CR_PLS_LEV0 +#define PWR_PVDLEVEL_1 PWR_CR_PLS_LEV1 +#define PWR_PVDLEVEL_2 PWR_CR_PLS_LEV2 +#define PWR_PVDLEVEL_3 PWR_CR_PLS_LEV3 +#define PWR_PVDLEVEL_4 PWR_CR_PLS_LEV4 +#define PWR_PVDLEVEL_5 PWR_CR_PLS_LEV5 +#define PWR_PVDLEVEL_6 PWR_CR_PLS_LEV6 +#define PWR_PVDLEVEL_7 PWR_CR_PLS_LEV7/* External input analog voltage + (Compare internally to VREFINT) */ +/** + * @} + */ + +/** @defgroup PWR_PVD_Mode PWR PVD Mode + * @{ + */ +#define PWR_PVD_MODE_NORMAL 0x00000000U /*!< basic mode is used */ +#define PWR_PVD_MODE_IT_RISING 0x00010001U /*!< External Interrupt Mode with Rising edge trigger detection */ +#define PWR_PVD_MODE_IT_FALLING 0x00010002U /*!< External Interrupt Mode with Falling edge trigger detection */ +#define PWR_PVD_MODE_IT_RISING_FALLING 0x00010003U /*!< External Interrupt Mode with Rising/Falling edge trigger detection */ +#define PWR_PVD_MODE_EVENT_RISING 0x00020001U /*!< Event Mode with Rising edge trigger detection */ +#define PWR_PVD_MODE_EVENT_FALLING 0x00020002U /*!< Event Mode with Falling edge trigger detection */ +#define PWR_PVD_MODE_EVENT_RISING_FALLING 0x00020003U /*!< Event Mode with Rising/Falling edge trigger detection */ +/** + * @} + */ + + +/** @defgroup PWR_Regulator_state_in_STOP_mode PWR Regulator state in SLEEP/STOP mode + * @{ + */ +#define PWR_MAINREGULATOR_ON 0x00000000U +#define PWR_LOWPOWERREGULATOR_ON PWR_CR_LPDS +/** + * @} + */ + +/** @defgroup PWR_SLEEP_mode_entry PWR SLEEP mode entry + * @{ + */ +#define PWR_SLEEPENTRY_WFI ((uint8_t)0x01) +#define PWR_SLEEPENTRY_WFE ((uint8_t)0x02) +#define PWR_SLEEPENTRY_WFE_NO_EVT_CLEAR ((uint8_t)0x03) + +/** + * @} + */ + +/** @defgroup PWR_STOP_mode_entry PWR STOP mode entry + * @{ + */ +#define PWR_STOPENTRY_WFI ((uint8_t)0x01) +#define PWR_STOPENTRY_WFE ((uint8_t)0x02) +#define PWR_STOPENTRY_WFE_NO_EVT_CLEAR ((uint8_t)0x03) +/** + * @} + */ + +/** @defgroup PWR_Flag PWR Flag + * @{ + */ +#define PWR_FLAG_WU PWR_CSR_WUF +#define PWR_FLAG_SB PWR_CSR_SBF +#define PWR_FLAG_PVDO PWR_CSR_PVDO +#define PWR_FLAG_BRR PWR_CSR_BRR +#define PWR_FLAG_VOSRDY PWR_CSR_VOSRDY +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup PWR_Exported_Macro PWR Exported Macro + * @{ + */ + +/** @brief Check PWR flag is set or not. + * @param __FLAG__ specifies the flag to check. + * This parameter can be one of the following values: + * @arg PWR_FLAG_WU: Wake Up flag. This flag indicates that a wakeup event + * was received from the WKUP pin or from the RTC alarm (Alarm A + * or Alarm B), RTC Tamper event, RTC TimeStamp event or RTC Wakeup. + * An additional wakeup event is detected if the WKUP pin is enabled + * (by setting the EWUP bit) when the WKUP pin level is already high. + * @arg PWR_FLAG_SB: StandBy flag. This flag indicates that the system was + * resumed from StandBy mode. + * @arg PWR_FLAG_PVDO: PVD Output. This flag is valid only if PVD is enabled + * by the HAL_PWR_EnablePVD() function. The PVD is stopped by Standby mode + * For this reason, this bit is equal to 0 after Standby or reset + * until the PVDE bit is set. + * @arg PWR_FLAG_BRR: Backup regulator ready flag. This bit is not reset + * when the device wakes up from Standby mode or by a system reset + * or power reset. + * @arg PWR_FLAG_VOSRDY: This flag indicates that the Regulator voltage + * scaling output selection is ready. + * @retval The new state of __FLAG__ (TRUE or FALSE). + */ +#define __HAL_PWR_GET_FLAG(__FLAG__) ((PWR->CSR & (__FLAG__)) == (__FLAG__)) + +/** @brief Clear the PWR's pending flags. + * @param __FLAG__ specifies the flag to clear. + * This parameter can be one of the following values: + * @arg PWR_FLAG_WU: Wake Up flag + * @arg PWR_FLAG_SB: StandBy flag + */ +#define __HAL_PWR_CLEAR_FLAG(__FLAG__) (PWR->CR |= (__FLAG__) << 2U) + +/** + * @brief Enable the PVD Exti Line 16. + * @retval None. + */ +#define __HAL_PWR_PVD_EXTI_ENABLE_IT() (EXTI->IMR |= (PWR_EXTI_LINE_PVD)) + +/** + * @brief Disable the PVD EXTI Line 16. + * @retval None. + */ +#define __HAL_PWR_PVD_EXTI_DISABLE_IT() (EXTI->IMR &= ~(PWR_EXTI_LINE_PVD)) + +/** + * @brief Enable event on PVD Exti Line 16. + * @retval None. + */ +#define __HAL_PWR_PVD_EXTI_ENABLE_EVENT() (EXTI->EMR |= (PWR_EXTI_LINE_PVD)) + +/** + * @brief Disable event on PVD Exti Line 16. + * @retval None. + */ +#define __HAL_PWR_PVD_EXTI_DISABLE_EVENT() (EXTI->EMR &= ~(PWR_EXTI_LINE_PVD)) + +/** + * @brief Enable the PVD Extended Interrupt Rising Trigger. + * @retval None. + */ +#define __HAL_PWR_PVD_EXTI_ENABLE_RISING_EDGE() SET_BIT(EXTI->RTSR, PWR_EXTI_LINE_PVD) + +/** + * @brief Disable the PVD Extended Interrupt Rising Trigger. + * @retval None. + */ +#define __HAL_PWR_PVD_EXTI_DISABLE_RISING_EDGE() CLEAR_BIT(EXTI->RTSR, PWR_EXTI_LINE_PVD) + +/** + * @brief Enable the PVD Extended Interrupt Falling Trigger. + * @retval None. + */ +#define __HAL_PWR_PVD_EXTI_ENABLE_FALLING_EDGE() SET_BIT(EXTI->FTSR, PWR_EXTI_LINE_PVD) + + +/** + * @brief Disable the PVD Extended Interrupt Falling Trigger. + * @retval None. + */ +#define __HAL_PWR_PVD_EXTI_DISABLE_FALLING_EDGE() CLEAR_BIT(EXTI->FTSR, PWR_EXTI_LINE_PVD) + + +/** + * @brief PVD EXTI line configuration: set rising & falling edge trigger. + * @retval None. + */ +#define __HAL_PWR_PVD_EXTI_ENABLE_RISING_FALLING_EDGE() do{__HAL_PWR_PVD_EXTI_ENABLE_RISING_EDGE();\ + __HAL_PWR_PVD_EXTI_ENABLE_FALLING_EDGE();\ + }while(0U) + +/** + * @brief Disable the PVD Extended Interrupt Rising & Falling Trigger. + * This parameter can be: + * @retval None. + */ +#define __HAL_PWR_PVD_EXTI_DISABLE_RISING_FALLING_EDGE() do{__HAL_PWR_PVD_EXTI_DISABLE_RISING_EDGE();\ + __HAL_PWR_PVD_EXTI_DISABLE_FALLING_EDGE();\ + }while(0U) + +/** + * @brief checks whether the specified PVD Exti interrupt flag is set or not. + * @retval EXTI PVD Line Status. + */ +#define __HAL_PWR_PVD_EXTI_GET_FLAG() (EXTI->PR & (PWR_EXTI_LINE_PVD)) + +/** + * @brief Clear the PVD Exti flag. + * @retval None. + */ +#define __HAL_PWR_PVD_EXTI_CLEAR_FLAG() (EXTI->PR = (PWR_EXTI_LINE_PVD)) + +/** + * @brief Generates a Software interrupt on PVD EXTI line. + * @retval None + */ +#define __HAL_PWR_PVD_EXTI_GENERATE_SWIT() (EXTI->SWIER |= (PWR_EXTI_LINE_PVD)) + +/** + * @} + */ + +/* Include PWR HAL Extension module */ +#include "stm32f4xx_hal_pwr_ex.h" + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup PWR_Exported_Functions PWR Exported Functions + * @{ + */ + +/** @addtogroup PWR_Exported_Functions_Group1 Initialization and de-initialization functions + * @{ + */ +/* Initialization and de-initialization functions *****************************/ +void HAL_PWR_DeInit(void); +void HAL_PWR_EnableBkUpAccess(void); +void HAL_PWR_DisableBkUpAccess(void); +/** + * @} + */ + +/** @addtogroup PWR_Exported_Functions_Group2 Peripheral Control functions + * @{ + */ +/* Peripheral Control functions **********************************************/ +/* PVD configuration */ +void HAL_PWR_ConfigPVD(PWR_PVDTypeDef *sConfigPVD); +void HAL_PWR_EnablePVD(void); +void HAL_PWR_DisablePVD(void); + +/* WakeUp pins configuration */ +void HAL_PWR_EnableWakeUpPin(uint32_t WakeUpPinx); +void HAL_PWR_DisableWakeUpPin(uint32_t WakeUpPinx); + +/* Low Power modes entry */ +void HAL_PWR_EnterSTOPMode(uint32_t Regulator, uint8_t STOPEntry); +void HAL_PWR_EnterSLEEPMode(uint32_t Regulator, uint8_t SLEEPEntry); +void HAL_PWR_EnterSTANDBYMode(void); + +/* Power PVD IRQ Handler */ +void HAL_PWR_PVD_IRQHandler(void); +void HAL_PWR_PVDCallback(void); + +/* Cortex System Control functions *******************************************/ +void HAL_PWR_EnableSleepOnExit(void); +void HAL_PWR_DisableSleepOnExit(void); +void HAL_PWR_EnableSEVOnPend(void); +void HAL_PWR_DisableSEVOnPend(void); +/** + * @} + */ + +/** + * @} + */ + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/** @defgroup PWR_Private_Constants PWR Private Constants + * @{ + */ + +/** @defgroup PWR_PVD_EXTI_Line PWR PVD EXTI Line + * @{ + */ +#define PWR_EXTI_LINE_PVD ((uint32_t)EXTI_IMR_MR16) /*!< External interrupt line 16 Connected to the PVD EXTI Line */ +/** + * @} + */ + +/** @defgroup PWR_register_alias_address PWR Register alias address + * @{ + */ +/* ------------- PWR registers bit address in the alias region ---------------*/ +#define PWR_OFFSET (PWR_BASE - PERIPH_BASE) +#define PWR_CR_OFFSET 0x00U +#define PWR_CSR_OFFSET 0x04U +#define PWR_CR_OFFSET_BB (PWR_OFFSET + PWR_CR_OFFSET) +#define PWR_CSR_OFFSET_BB (PWR_OFFSET + PWR_CSR_OFFSET) +/** + * @} + */ + +/** @defgroup PWR_CR_register_alias PWR CR Register alias address + * @{ + */ +/* --- CR Register ---*/ +/* Alias word address of DBP bit */ +#define DBP_BIT_NUMBER PWR_CR_DBP_Pos +#define CR_DBP_BB (uint32_t)(PERIPH_BB_BASE + (PWR_CR_OFFSET_BB * 32U) + (DBP_BIT_NUMBER * 4U)) + +/* Alias word address of PVDE bit */ +#define PVDE_BIT_NUMBER PWR_CR_PVDE_Pos +#define CR_PVDE_BB (uint32_t)(PERIPH_BB_BASE + (PWR_CR_OFFSET_BB * 32U) + (PVDE_BIT_NUMBER * 4U)) + +/* Alias word address of VOS bit */ +#define VOS_BIT_NUMBER PWR_CR_VOS_Pos +#define CR_VOS_BB (uint32_t)(PERIPH_BB_BASE + (PWR_CR_OFFSET_BB * 32U) + (VOS_BIT_NUMBER * 4U)) +/** + * @} + */ + +/** @defgroup PWR_CSR_register_alias PWR CSR Register alias address + * @{ + */ +/* --- CSR Register ---*/ +/* Alias word address of EWUP bit */ +#define EWUP_BIT_NUMBER PWR_CSR_EWUP_Pos +#define CSR_EWUP_BB (PERIPH_BB_BASE + (PWR_CSR_OFFSET_BB * 32U) + (EWUP_BIT_NUMBER * 4U)) +/** + * @} + */ + +/** + * @} + */ +/* Private macros ------------------------------------------------------------*/ +/** @defgroup PWR_Private_Macros PWR Private Macros + * @{ + */ + +/** @defgroup PWR_IS_PWR_Definitions PWR Private macros to check input parameters + * @{ + */ +#define IS_PWR_PVD_LEVEL(LEVEL) (((LEVEL) == PWR_PVDLEVEL_0) || ((LEVEL) == PWR_PVDLEVEL_1)|| \ + ((LEVEL) == PWR_PVDLEVEL_2) || ((LEVEL) == PWR_PVDLEVEL_3)|| \ + ((LEVEL) == PWR_PVDLEVEL_4) || ((LEVEL) == PWR_PVDLEVEL_5)|| \ + ((LEVEL) == PWR_PVDLEVEL_6) || ((LEVEL) == PWR_PVDLEVEL_7)) +#define IS_PWR_PVD_MODE(MODE) (((MODE) == PWR_PVD_MODE_IT_RISING)|| ((MODE) == PWR_PVD_MODE_IT_FALLING) || \ + ((MODE) == PWR_PVD_MODE_IT_RISING_FALLING) || ((MODE) == PWR_PVD_MODE_EVENT_RISING) || \ + ((MODE) == PWR_PVD_MODE_EVENT_FALLING) || ((MODE) == PWR_PVD_MODE_EVENT_RISING_FALLING) || \ + ((MODE) == PWR_PVD_MODE_NORMAL)) +#define IS_PWR_REGULATOR(REGULATOR) (((REGULATOR) == PWR_MAINREGULATOR_ON) || \ + ((REGULATOR) == PWR_LOWPOWERREGULATOR_ON)) + +#define IS_PWR_SLEEP_ENTRY(ENTRY) (((ENTRY) == PWR_SLEEPENTRY_WFI) || \ + ((ENTRY) == PWR_SLEEPENTRY_WFE) || \ + ((ENTRY) == PWR_SLEEPENTRY_WFE_NO_EVT_CLEAR)) + +#define IS_PWR_STOP_ENTRY(ENTRY) (((ENTRY) == PWR_STOPENTRY_WFI) || \ + ((ENTRY) == PWR_STOPENTRY_WFE) || \ + ((ENTRY) == PWR_STOPENTRY_WFE_NO_EVT_CLEAR)) +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + + +#endif /* __STM32F4xx_HAL_PWR_H */ diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_pwr_ex.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_pwr_ex.h new file mode 100644 index 0000000..57fd4d9 --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_pwr_ex.h @@ -0,0 +1,340 @@ +/** + ****************************************************************************** + * @file stm32f4xx_hal_pwr_ex.h + * @author MCD Application Team + * @brief Header file of PWR HAL Extension module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F4xx_HAL_PWR_EX_H +#define __STM32F4xx_HAL_PWR_EX_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx_hal_def.h" + +/** @addtogroup STM32F4xx_HAL_Driver + * @{ + */ + +/** @addtogroup PWREx + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/* Exported constants --------------------------------------------------------*/ +/** @defgroup PWREx_Exported_Constants PWREx Exported Constants + * @{ + */ +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) ||\ + defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) + +/** @defgroup PWREx_Regulator_state_in_UnderDrive_mode PWREx Regulator state in UnderDrive mode + * @{ + */ +#define PWR_MAINREGULATOR_UNDERDRIVE_ON PWR_CR_MRUDS +#define PWR_LOWPOWERREGULATOR_UNDERDRIVE_ON ((uint32_t)(PWR_CR_LPDS | PWR_CR_LPUDS)) +/** + * @} + */ + +/** @defgroup PWREx_Over_Under_Drive_Flag PWREx Over Under Drive Flag + * @{ + */ +#define PWR_FLAG_ODRDY PWR_CSR_ODRDY +#define PWR_FLAG_ODSWRDY PWR_CSR_ODSWRDY +#define PWR_FLAG_UDRDY PWR_CSR_UDSWRDY +/** + * @} + */ +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F446xx || STM32F469xx || STM32F479xx */ + +/** @defgroup PWREx_Regulator_Voltage_Scale PWREx Regulator Voltage Scale + * @{ + */ +#if defined(STM32F405xx) || defined(STM32F407xx) || defined(STM32F415xx) || defined(STM32F417xx) +#define PWR_REGULATOR_VOLTAGE_SCALE1 PWR_CR_VOS /* Scale 1 mode(default value at reset): the maximum value of fHCLK = 168 MHz. */ +#define PWR_REGULATOR_VOLTAGE_SCALE2 0x00000000U /* Scale 2 mode: the maximum value of fHCLK = 144 MHz. */ +#else +#define PWR_REGULATOR_VOLTAGE_SCALE1 PWR_CR_VOS /* Scale 1 mode(default value at reset): the maximum value of fHCLK is 168 MHz. It can be extended to + 180 MHz by activating the over-drive mode. */ +#define PWR_REGULATOR_VOLTAGE_SCALE2 PWR_CR_VOS_1 /* Scale 2 mode: the maximum value of fHCLK is 144 MHz. It can be extended to + 168 MHz by activating the over-drive mode. */ +#define PWR_REGULATOR_VOLTAGE_SCALE3 PWR_CR_VOS_0 /* Scale 3 mode: the maximum value of fHCLK is 120 MHz. */ +#endif /* STM32F405xx || STM32F407xx || STM32F415xx || STM32F417xx */ +/** + * @} + */ +#if defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) || defined(STM32F446xx) || defined(STM32F412Zx) || defined(STM32F412Vx) || \ + defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) +/** @defgroup PWREx_WakeUp_Pins PWREx WakeUp Pins + * @{ + */ +#define PWR_WAKEUP_PIN2 0x00000080U +#if defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) || defined(STM32F412Zx) || defined(STM32F412Vx) || \ + defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) +#define PWR_WAKEUP_PIN3 0x00000040U +#endif /* STM32F410xx || STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Zx || STM32F412Vx || \ + STM32F412Rx || STM32F412Cx || STM32F413xx || STM32F423xx */ +/** + * @} + */ +#endif /* STM32F410xx || STM32F446xx || STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx || + STM32F413xx || STM32F423xx */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup PWREx_Exported_Constants PWREx Exported Constants + * @{ + */ + +#if defined(STM32F405xx) || defined(STM32F407xx) || defined(STM32F415xx) || defined(STM32F417xx) +/** @brief macros configure the main internal regulator output voltage. + * @param __REGULATOR__ specifies the regulator output voltage to achieve + * a tradeoff between performance and power consumption when the device does + * not operate at the maximum frequency (refer to the datasheets for more details). + * This parameter can be one of the following values: + * @arg PWR_REGULATOR_VOLTAGE_SCALE1: Regulator voltage output Scale 1 mode + * @arg PWR_REGULATOR_VOLTAGE_SCALE2: Regulator voltage output Scale 2 mode + * @retval None + */ +#define __HAL_PWR_VOLTAGESCALING_CONFIG(__REGULATOR__) do { \ + __IO uint32_t tmpreg = 0x00U; \ + MODIFY_REG(PWR->CR, PWR_CR_VOS, (__REGULATOR__)); \ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(PWR->CR, PWR_CR_VOS); \ + UNUSED(tmpreg); \ + } while(0U) +#else +/** @brief macros configure the main internal regulator output voltage. + * @param __REGULATOR__ specifies the regulator output voltage to achieve + * a tradeoff between performance and power consumption when the device does + * not operate at the maximum frequency (refer to the datasheets for more details). + * This parameter can be one of the following values: + * @arg PWR_REGULATOR_VOLTAGE_SCALE1: Regulator voltage output Scale 1 mode + * @arg PWR_REGULATOR_VOLTAGE_SCALE2: Regulator voltage output Scale 2 mode + * @arg PWR_REGULATOR_VOLTAGE_SCALE3: Regulator voltage output Scale 3 mode + * @retval None + */ +#define __HAL_PWR_VOLTAGESCALING_CONFIG(__REGULATOR__) do { \ + __IO uint32_t tmpreg = 0x00U; \ + MODIFY_REG(PWR->CR, PWR_CR_VOS, (__REGULATOR__)); \ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(PWR->CR, PWR_CR_VOS); \ + UNUSED(tmpreg); \ + } while(0U) +#endif /* STM32F405xx || STM32F407xx || STM32F415xx || STM32F417xx */ + +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) ||\ + defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) +/** @brief Macros to enable or disable the Over drive mode. + * @note These macros can be used only for STM32F42xx/STM3243xx devices. + */ +#define __HAL_PWR_OVERDRIVE_ENABLE() (*(__IO uint32_t *) CR_ODEN_BB = ENABLE) +#define __HAL_PWR_OVERDRIVE_DISABLE() (*(__IO uint32_t *) CR_ODEN_BB = DISABLE) + +/** @brief Macros to enable or disable the Over drive switching. + * @note These macros can be used only for STM32F42xx/STM3243xx devices. + */ +#define __HAL_PWR_OVERDRIVESWITCHING_ENABLE() (*(__IO uint32_t *) CR_ODSWEN_BB = ENABLE) +#define __HAL_PWR_OVERDRIVESWITCHING_DISABLE() (*(__IO uint32_t *) CR_ODSWEN_BB = DISABLE) + +/** @brief Macros to enable or disable the Under drive mode. + * @note This mode is enabled only with STOP low power mode. + * In this mode, the 1.2V domain is preserved in reduced leakage mode. This + * mode is only available when the main regulator or the low power regulator + * is in low voltage mode. + * @note If the Under-drive mode was enabled, it is automatically disabled after + * exiting Stop mode. + * When the voltage regulator operates in Under-drive mode, an additional + * startup delay is induced when waking up from Stop mode. + */ +#define __HAL_PWR_UNDERDRIVE_ENABLE() (PWR->CR |= (uint32_t)PWR_CR_UDEN) +#define __HAL_PWR_UNDERDRIVE_DISABLE() (PWR->CR &= (uint32_t)(~PWR_CR_UDEN)) + +/** @brief Check PWR flag is set or not. + * @note These macros can be used only for STM32F42xx/STM3243xx devices. + * @param __FLAG__ specifies the flag to check. + * This parameter can be one of the following values: + * @arg PWR_FLAG_ODRDY: This flag indicates that the Over-drive mode + * is ready + * @arg PWR_FLAG_ODSWRDY: This flag indicates that the Over-drive mode + * switching is ready + * @arg PWR_FLAG_UDRDY: This flag indicates that the Under-drive mode + * is enabled in Stop mode + * @retval The new state of __FLAG__ (TRUE or FALSE). + */ +#define __HAL_PWR_GET_ODRUDR_FLAG(__FLAG__) ((PWR->CSR & (__FLAG__)) == (__FLAG__)) + +/** @brief Clear the Under-Drive Ready flag. + * @note These macros can be used only for STM32F42xx/STM3243xx devices. + */ +#define __HAL_PWR_CLEAR_ODRUDR_FLAG() (PWR->CSR |= PWR_FLAG_UDRDY) + +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F446xx || STM32F469xx || STM32F479xx */ +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup PWREx_Exported_Functions PWREx Exported Functions + * @{ + */ + +/** @addtogroup PWREx_Exported_Functions_Group1 + * @{ + */ +void HAL_PWREx_EnableFlashPowerDown(void); +void HAL_PWREx_DisableFlashPowerDown(void); +HAL_StatusTypeDef HAL_PWREx_EnableBkUpReg(void); +HAL_StatusTypeDef HAL_PWREx_DisableBkUpReg(void); +uint32_t HAL_PWREx_GetVoltageRange(void); +HAL_StatusTypeDef HAL_PWREx_ControlVoltageScaling(uint32_t VoltageScaling); + +#if defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) || defined(STM32F401xC) ||\ + defined(STM32F401xE) || defined(STM32F411xE) || defined(STM32F412Zx) || defined(STM32F412Vx) ||\ + defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) +void HAL_PWREx_EnableMainRegulatorLowVoltage(void); +void HAL_PWREx_DisableMainRegulatorLowVoltage(void); +void HAL_PWREx_EnableLowRegulatorLowVoltage(void); +void HAL_PWREx_DisableLowRegulatorLowVoltage(void); +#endif /* STM32F410xx || STM32F401xC || STM32F401xE || STM32F411xE || STM32F412Zx || STM32F412Vx ||\ + STM32F412Rx || STM32F412Cx || STM32F413xx || STM32F423xx */ + +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) || defined(STM32F446xx) ||\ + defined(STM32F469xx) || defined(STM32F479xx) +HAL_StatusTypeDef HAL_PWREx_EnableOverDrive(void); +HAL_StatusTypeDef HAL_PWREx_DisableOverDrive(void); +HAL_StatusTypeDef HAL_PWREx_EnterUnderDriveSTOPMode(uint32_t Regulator, uint8_t STOPEntry); +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F446xx || STM32F469xx || STM32F479xx */ + +/** + * @} + */ + +/** + * @} + */ +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/** @defgroup PWREx_Private_Constants PWREx Private Constants + * @{ + */ + +/** @defgroup PWREx_register_alias_address PWREx Register alias address + * @{ + */ +/* ------------- PWR registers bit address in the alias region ---------------*/ +/* --- CR Register ---*/ +/* Alias word address of FPDS bit */ +#define FPDS_BIT_NUMBER PWR_CR_FPDS_Pos +#define CR_FPDS_BB (uint32_t)(PERIPH_BB_BASE + (PWR_CR_OFFSET_BB * 32U) + (FPDS_BIT_NUMBER * 4U)) + +/* Alias word address of ODEN bit */ +#define ODEN_BIT_NUMBER PWR_CR_ODEN_Pos +#define CR_ODEN_BB (uint32_t)(PERIPH_BB_BASE + (PWR_CR_OFFSET_BB * 32U) + (ODEN_BIT_NUMBER * 4U)) + +/* Alias word address of ODSWEN bit */ +#define ODSWEN_BIT_NUMBER PWR_CR_ODSWEN_Pos +#define CR_ODSWEN_BB (uint32_t)(PERIPH_BB_BASE + (PWR_CR_OFFSET_BB * 32U) + (ODSWEN_BIT_NUMBER * 4U)) + +/* Alias word address of MRLVDS bit */ +#define MRLVDS_BIT_NUMBER PWR_CR_MRLVDS_Pos +#define CR_MRLVDS_BB (uint32_t)(PERIPH_BB_BASE + (PWR_CR_OFFSET_BB * 32U) + (MRLVDS_BIT_NUMBER * 4U)) + +/* Alias word address of LPLVDS bit */ +#define LPLVDS_BIT_NUMBER PWR_CR_LPLVDS_Pos +#define CR_LPLVDS_BB (uint32_t)(PERIPH_BB_BASE + (PWR_CR_OFFSET_BB * 32U) + (LPLVDS_BIT_NUMBER * 4U)) + + /** + * @} + */ + +/** @defgroup PWREx_CSR_register_alias PWRx CSR Register alias address + * @{ + */ +/* --- CSR Register ---*/ +/* Alias word address of BRE bit */ +#define BRE_BIT_NUMBER PWR_CSR_BRE_Pos +#define CSR_BRE_BB (uint32_t)(PERIPH_BB_BASE + (PWR_CSR_OFFSET_BB * 32U) + (BRE_BIT_NUMBER * 4U)) + +/** + * @} + */ + +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup PWREx_Private_Macros PWREx Private Macros + * @{ + */ + +/** @defgroup PWREx_IS_PWR_Definitions PWREx Private macros to check input parameters + * @{ + */ +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) ||\ + defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) +#define IS_PWR_REGULATOR_UNDERDRIVE(REGULATOR) (((REGULATOR) == PWR_MAINREGULATOR_UNDERDRIVE_ON) || \ + ((REGULATOR) == PWR_LOWPOWERREGULATOR_UNDERDRIVE_ON)) +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F446xx || STM32F469xx || STM32F479xx */ + +#if defined(STM32F405xx) || defined(STM32F407xx) || defined(STM32F415xx) || defined(STM32F417xx) +#define IS_PWR_VOLTAGE_SCALING_RANGE(VOLTAGE) (((VOLTAGE) == PWR_REGULATOR_VOLTAGE_SCALE1) || \ + ((VOLTAGE) == PWR_REGULATOR_VOLTAGE_SCALE2)) +#else +#define IS_PWR_VOLTAGE_SCALING_RANGE(VOLTAGE) (((VOLTAGE) == PWR_REGULATOR_VOLTAGE_SCALE1) || \ + ((VOLTAGE) == PWR_REGULATOR_VOLTAGE_SCALE2) || \ + ((VOLTAGE) == PWR_REGULATOR_VOLTAGE_SCALE3)) +#endif /* STM32F405xx || STM32F407xx || STM32F415xx || STM32F417xx */ + +#if defined(STM32F446xx) +#define IS_PWR_WAKEUP_PIN(PIN) (((PIN) == PWR_WAKEUP_PIN1) || ((PIN) == PWR_WAKEUP_PIN2)) +#elif defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) || defined(STM32F412Zx) ||\ + defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) ||\ + defined(STM32F423xx) +#define IS_PWR_WAKEUP_PIN(PIN) (((PIN) == PWR_WAKEUP_PIN1) || ((PIN) == PWR_WAKEUP_PIN2) || \ + ((PIN) == PWR_WAKEUP_PIN3)) +#else +#define IS_PWR_WAKEUP_PIN(PIN) ((PIN) == PWR_WAKEUP_PIN1) +#endif /* STM32F446xx */ +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + + +#endif /* __STM32F4xx_HAL_PWR_EX_H */ diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_rcc.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_rcc.h new file mode 100644 index 0000000..cf01e51 --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_rcc.h @@ -0,0 +1,1458 @@ +/** + ****************************************************************************** + * @file stm32f4xx_hal_rcc.h + * @author MCD Application Team + * @brief Header file of RCC HAL module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F4xx_HAL_RCC_H +#define __STM32F4xx_HAL_RCC_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx_hal_def.h" + +/* Include RCC HAL Extended module */ +/* (include on top of file since RCC structures are defined in extended file) */ +#include "stm32f4xx_hal_rcc_ex.h" + +/** @addtogroup STM32F4xx_HAL_Driver + * @{ + */ + +/** @addtogroup RCC + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup RCC_Exported_Types RCC Exported Types + * @{ + */ + +/** + * @brief RCC Internal/External Oscillator (HSE, HSI, LSE and LSI) configuration structure definition + */ +typedef struct +{ + uint32_t OscillatorType; /*!< The oscillators to be configured. + This parameter can be a value of @ref RCC_Oscillator_Type */ + + uint32_t HSEState; /*!< The new state of the HSE. + This parameter can be a value of @ref RCC_HSE_Config */ + + uint32_t LSEState; /*!< The new state of the LSE. + This parameter can be a value of @ref RCC_LSE_Config */ + + uint32_t HSIState; /*!< The new state of the HSI. + This parameter can be a value of @ref RCC_HSI_Config */ + + uint32_t HSICalibrationValue; /*!< The HSI calibration trimming value (default is RCC_HSICALIBRATION_DEFAULT). + This parameter must be a number between Min_Data = 0x00 and Max_Data = 0x1F */ + + uint32_t LSIState; /*!< The new state of the LSI. + This parameter can be a value of @ref RCC_LSI_Config */ + + RCC_PLLInitTypeDef PLL; /*!< PLL structure parameters */ +} RCC_OscInitTypeDef; + +/** + * @brief RCC System, AHB and APB busses clock configuration structure definition + */ +typedef struct +{ + uint32_t ClockType; /*!< The clock to be configured. + This parameter can be a value of @ref RCC_System_Clock_Type */ + + uint32_t SYSCLKSource; /*!< The clock source (SYSCLKS) used as system clock. + This parameter can be a value of @ref RCC_System_Clock_Source */ + + uint32_t AHBCLKDivider; /*!< The AHB clock (HCLK) divider. This clock is derived from the system clock (SYSCLK). + This parameter can be a value of @ref RCC_AHB_Clock_Source */ + + uint32_t APB1CLKDivider; /*!< The APB1 clock (PCLK1) divider. This clock is derived from the AHB clock (HCLK). + This parameter can be a value of @ref RCC_APB1_APB2_Clock_Source */ + + uint32_t APB2CLKDivider; /*!< The APB2 clock (PCLK2) divider. This clock is derived from the AHB clock (HCLK). + This parameter can be a value of @ref RCC_APB1_APB2_Clock_Source */ + +} RCC_ClkInitTypeDef; + +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup RCC_Exported_Constants RCC Exported Constants + * @{ + */ + +/** @defgroup RCC_Oscillator_Type Oscillator Type + * @{ + */ +#define RCC_OSCILLATORTYPE_NONE 0x00000000U +#define RCC_OSCILLATORTYPE_HSE 0x00000001U +#define RCC_OSCILLATORTYPE_HSI 0x00000002U +#define RCC_OSCILLATORTYPE_LSE 0x00000004U +#define RCC_OSCILLATORTYPE_LSI 0x00000008U +/** + * @} + */ + +/** @defgroup RCC_HSE_Config HSE Config + * @{ + */ +#define RCC_HSE_OFF 0x00000000U +#define RCC_HSE_ON RCC_CR_HSEON +#define RCC_HSE_BYPASS ((uint32_t)(RCC_CR_HSEBYP | RCC_CR_HSEON)) +/** + * @} + */ + +/** @defgroup RCC_LSE_Config LSE Config + * @{ + */ +#define RCC_LSE_OFF 0x00000000U +#define RCC_LSE_ON RCC_BDCR_LSEON +#define RCC_LSE_BYPASS ((uint32_t)(RCC_BDCR_LSEBYP | RCC_BDCR_LSEON)) +/** + * @} + */ + +/** @defgroup RCC_HSI_Config HSI Config + * @{ + */ +#define RCC_HSI_OFF ((uint8_t)0x00) +#define RCC_HSI_ON ((uint8_t)0x01) + +#define RCC_HSICALIBRATION_DEFAULT 0x10U /* Default HSI calibration trimming value */ +/** + * @} + */ + +/** @defgroup RCC_LSI_Config LSI Config + * @{ + */ +#define RCC_LSI_OFF ((uint8_t)0x00) +#define RCC_LSI_ON ((uint8_t)0x01) +/** + * @} + */ + +/** @defgroup RCC_PLL_Config PLL Config + * @{ + */ +#define RCC_PLL_NONE ((uint8_t)0x00) +#define RCC_PLL_OFF ((uint8_t)0x01) +#define RCC_PLL_ON ((uint8_t)0x02) +/** + * @} + */ + +/** @defgroup RCC_PLLP_Clock_Divider PLLP Clock Divider + * @{ + */ +#define RCC_PLLP_DIV2 0x00000002U +#define RCC_PLLP_DIV4 0x00000004U +#define RCC_PLLP_DIV6 0x00000006U +#define RCC_PLLP_DIV8 0x00000008U +/** + * @} + */ + +/** @defgroup RCC_PLL_Clock_Source PLL Clock Source + * @{ + */ +#define RCC_PLLSOURCE_HSI RCC_PLLCFGR_PLLSRC_HSI +#define RCC_PLLSOURCE_HSE RCC_PLLCFGR_PLLSRC_HSE +/** + * @} + */ + +/** @defgroup RCC_System_Clock_Type System Clock Type + * @{ + */ +#define RCC_CLOCKTYPE_SYSCLK 0x00000001U +#define RCC_CLOCKTYPE_HCLK 0x00000002U +#define RCC_CLOCKTYPE_PCLK1 0x00000004U +#define RCC_CLOCKTYPE_PCLK2 0x00000008U +/** + * @} + */ + +/** @defgroup RCC_System_Clock_Source System Clock Source + * @note The RCC_SYSCLKSOURCE_PLLRCLK parameter is available only for + * STM32F446xx devices. + * @{ + */ +#define RCC_SYSCLKSOURCE_HSI RCC_CFGR_SW_HSI +#define RCC_SYSCLKSOURCE_HSE RCC_CFGR_SW_HSE +#define RCC_SYSCLKSOURCE_PLLCLK RCC_CFGR_SW_PLL +#define RCC_SYSCLKSOURCE_PLLRCLK ((uint32_t)(RCC_CFGR_SW_0 | RCC_CFGR_SW_1)) +/** + * @} + */ + +/** @defgroup RCC_System_Clock_Source_Status System Clock Source Status + * @note The RCC_SYSCLKSOURCE_STATUS_PLLRCLK parameter is available only for + * STM32F446xx devices. + * @{ + */ +#define RCC_SYSCLKSOURCE_STATUS_HSI RCC_CFGR_SWS_HSI /*!< HSI used as system clock */ +#define RCC_SYSCLKSOURCE_STATUS_HSE RCC_CFGR_SWS_HSE /*!< HSE used as system clock */ +#define RCC_SYSCLKSOURCE_STATUS_PLLCLK RCC_CFGR_SWS_PLL /*!< PLL used as system clock */ +#define RCC_SYSCLKSOURCE_STATUS_PLLRCLK ((uint32_t)(RCC_CFGR_SWS_0 | RCC_CFGR_SWS_1)) /*!< PLLR used as system clock */ +/** + * @} + */ + +/** @defgroup RCC_AHB_Clock_Source AHB Clock Source + * @{ + */ +#define RCC_SYSCLK_DIV1 RCC_CFGR_HPRE_DIV1 +#define RCC_SYSCLK_DIV2 RCC_CFGR_HPRE_DIV2 +#define RCC_SYSCLK_DIV4 RCC_CFGR_HPRE_DIV4 +#define RCC_SYSCLK_DIV8 RCC_CFGR_HPRE_DIV8 +#define RCC_SYSCLK_DIV16 RCC_CFGR_HPRE_DIV16 +#define RCC_SYSCLK_DIV64 RCC_CFGR_HPRE_DIV64 +#define RCC_SYSCLK_DIV128 RCC_CFGR_HPRE_DIV128 +#define RCC_SYSCLK_DIV256 RCC_CFGR_HPRE_DIV256 +#define RCC_SYSCLK_DIV512 RCC_CFGR_HPRE_DIV512 +/** + * @} + */ + +/** @defgroup RCC_APB1_APB2_Clock_Source APB1/APB2 Clock Source + * @{ + */ +#define RCC_HCLK_DIV1 RCC_CFGR_PPRE1_DIV1 +#define RCC_HCLK_DIV2 RCC_CFGR_PPRE1_DIV2 +#define RCC_HCLK_DIV4 RCC_CFGR_PPRE1_DIV4 +#define RCC_HCLK_DIV8 RCC_CFGR_PPRE1_DIV8 +#define RCC_HCLK_DIV16 RCC_CFGR_PPRE1_DIV16 +/** + * @} + */ + +/** @defgroup RCC_RTC_Clock_Source RTC Clock Source + * @{ + */ +#define RCC_RTCCLKSOURCE_NO_CLK 0x00000000U +#define RCC_RTCCLKSOURCE_LSE 0x00000100U +#define RCC_RTCCLKSOURCE_LSI 0x00000200U +#define RCC_RTCCLKSOURCE_HSE_DIVX 0x00000300U +#define RCC_RTCCLKSOURCE_HSE_DIV2 0x00020300U +#define RCC_RTCCLKSOURCE_HSE_DIV3 0x00030300U +#define RCC_RTCCLKSOURCE_HSE_DIV4 0x00040300U +#define RCC_RTCCLKSOURCE_HSE_DIV5 0x00050300U +#define RCC_RTCCLKSOURCE_HSE_DIV6 0x00060300U +#define RCC_RTCCLKSOURCE_HSE_DIV7 0x00070300U +#define RCC_RTCCLKSOURCE_HSE_DIV8 0x00080300U +#define RCC_RTCCLKSOURCE_HSE_DIV9 0x00090300U +#define RCC_RTCCLKSOURCE_HSE_DIV10 0x000A0300U +#define RCC_RTCCLKSOURCE_HSE_DIV11 0x000B0300U +#define RCC_RTCCLKSOURCE_HSE_DIV12 0x000C0300U +#define RCC_RTCCLKSOURCE_HSE_DIV13 0x000D0300U +#define RCC_RTCCLKSOURCE_HSE_DIV14 0x000E0300U +#define RCC_RTCCLKSOURCE_HSE_DIV15 0x000F0300U +#define RCC_RTCCLKSOURCE_HSE_DIV16 0x00100300U +#define RCC_RTCCLKSOURCE_HSE_DIV17 0x00110300U +#define RCC_RTCCLKSOURCE_HSE_DIV18 0x00120300U +#define RCC_RTCCLKSOURCE_HSE_DIV19 0x00130300U +#define RCC_RTCCLKSOURCE_HSE_DIV20 0x00140300U +#define RCC_RTCCLKSOURCE_HSE_DIV21 0x00150300U +#define RCC_RTCCLKSOURCE_HSE_DIV22 0x00160300U +#define RCC_RTCCLKSOURCE_HSE_DIV23 0x00170300U +#define RCC_RTCCLKSOURCE_HSE_DIV24 0x00180300U +#define RCC_RTCCLKSOURCE_HSE_DIV25 0x00190300U +#define RCC_RTCCLKSOURCE_HSE_DIV26 0x001A0300U +#define RCC_RTCCLKSOURCE_HSE_DIV27 0x001B0300U +#define RCC_RTCCLKSOURCE_HSE_DIV28 0x001C0300U +#define RCC_RTCCLKSOURCE_HSE_DIV29 0x001D0300U +#define RCC_RTCCLKSOURCE_HSE_DIV30 0x001E0300U +#define RCC_RTCCLKSOURCE_HSE_DIV31 0x001F0300U +/** + * @} + */ + +/** @defgroup RCC_MCO_Index MCO Index + * @{ + */ +#define RCC_MCO1 0x00000000U +#define RCC_MCO2 0x00000001U +/** + * @} + */ + +/** @defgroup RCC_MCO1_Clock_Source MCO1 Clock Source + * @{ + */ +#define RCC_MCO1SOURCE_HSI 0x00000000U +#define RCC_MCO1SOURCE_LSE RCC_CFGR_MCO1_0 +#define RCC_MCO1SOURCE_HSE RCC_CFGR_MCO1_1 +#define RCC_MCO1SOURCE_PLLCLK RCC_CFGR_MCO1 +/** + * @} + */ + +/** @defgroup RCC_MCOx_Clock_Prescaler MCOx Clock Prescaler + * @{ + */ +#define RCC_MCODIV_1 0x00000000U +#define RCC_MCODIV_2 RCC_CFGR_MCO1PRE_2 +#define RCC_MCODIV_3 ((uint32_t)RCC_CFGR_MCO1PRE_0 | RCC_CFGR_MCO1PRE_2) +#define RCC_MCODIV_4 ((uint32_t)RCC_CFGR_MCO1PRE_1 | RCC_CFGR_MCO1PRE_2) +#define RCC_MCODIV_5 RCC_CFGR_MCO1PRE +/** + * @} + */ + +/** @defgroup RCC_Interrupt Interrupts + * @{ + */ +#define RCC_IT_LSIRDY ((uint8_t)0x01) +#define RCC_IT_LSERDY ((uint8_t)0x02) +#define RCC_IT_HSIRDY ((uint8_t)0x04) +#define RCC_IT_HSERDY ((uint8_t)0x08) +#define RCC_IT_PLLRDY ((uint8_t)0x10) +#define RCC_IT_PLLI2SRDY ((uint8_t)0x20) +#define RCC_IT_CSS ((uint8_t)0x80) +/** + * @} + */ + +/** @defgroup RCC_Flag Flags + * Elements values convention: 0XXYYYYYb + * - YYYYY : Flag position in the register + * - 0XX : Register index + * - 01: CR register + * - 10: BDCR register + * - 11: CSR register + * @{ + */ +/* Flags in the CR register */ +#define RCC_FLAG_HSIRDY ((uint8_t)0x21) +#define RCC_FLAG_HSERDY ((uint8_t)0x31) +#define RCC_FLAG_PLLRDY ((uint8_t)0x39) +#define RCC_FLAG_PLLI2SRDY ((uint8_t)0x3B) + +/* Flags in the BDCR register */ +#define RCC_FLAG_LSERDY ((uint8_t)0x41) + +/* Flags in the CSR register */ +#define RCC_FLAG_LSIRDY ((uint8_t)0x61) +#define RCC_FLAG_BORRST ((uint8_t)0x79) +#define RCC_FLAG_PINRST ((uint8_t)0x7A) +#define RCC_FLAG_PORRST ((uint8_t)0x7B) +#define RCC_FLAG_SFTRST ((uint8_t)0x7C) +#define RCC_FLAG_IWDGRST ((uint8_t)0x7D) +#define RCC_FLAG_WWDGRST ((uint8_t)0x7E) +#define RCC_FLAG_LPWRRST ((uint8_t)0x7F) +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup RCC_Exported_Macros RCC Exported Macros + * @{ + */ + +/** @defgroup RCC_AHB1_Clock_Enable_Disable AHB1 Peripheral Clock Enable Disable + * @brief Enable or disable the AHB1 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_GPIOA_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOAEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOAEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_GPIOB_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOBEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOBEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_GPIOC_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOCEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOCEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_GPIOH_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOHEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOHEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_DMA1_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_DMA1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_DMA1EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_DMA2_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_DMA2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_DMA2EN);\ + UNUSED(tmpreg); \ + } while(0U) + +#define __HAL_RCC_GPIOA_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOAEN)) +#define __HAL_RCC_GPIOB_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOBEN)) +#define __HAL_RCC_GPIOC_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOCEN)) +#define __HAL_RCC_GPIOH_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOHEN)) +#define __HAL_RCC_DMA1_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_DMA1EN)) +#define __HAL_RCC_DMA2_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_DMA2EN)) +/** + * @} + */ + +/** @defgroup RCC_AHB1_Peripheral_Clock_Enable_Disable_Status AHB1 Peripheral Clock Enable Disable Status + * @brief Get the enable or disable status of the AHB1 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_GPIOA_IS_CLK_ENABLED() ((RCC->AHB1ENR &(RCC_AHB1ENR_GPIOAEN)) != RESET) +#define __HAL_RCC_GPIOB_IS_CLK_ENABLED() ((RCC->AHB1ENR &(RCC_AHB1ENR_GPIOBEN)) != RESET) +#define __HAL_RCC_GPIOC_IS_CLK_ENABLED() ((RCC->AHB1ENR &(RCC_AHB1ENR_GPIOCEN)) != RESET) +#define __HAL_RCC_GPIOH_IS_CLK_ENABLED() ((RCC->AHB1ENR &(RCC_AHB1ENR_GPIOHEN)) != RESET) +#define __HAL_RCC_DMA1_IS_CLK_ENABLED() ((RCC->AHB1ENR &(RCC_AHB1ENR_DMA1EN)) != RESET) +#define __HAL_RCC_DMA2_IS_CLK_ENABLED() ((RCC->AHB1ENR &(RCC_AHB1ENR_DMA2EN)) != RESET) + +#define __HAL_RCC_GPIOA_IS_CLK_DISABLED() ((RCC->AHB1ENR &(RCC_AHB1ENR_GPIOAEN)) == RESET) +#define __HAL_RCC_GPIOB_IS_CLK_DISABLED() ((RCC->AHB1ENR &(RCC_AHB1ENR_GPIOBEN)) == RESET) +#define __HAL_RCC_GPIOC_IS_CLK_DISABLED() ((RCC->AHB1ENR &(RCC_AHB1ENR_GPIOCEN)) == RESET) +#define __HAL_RCC_GPIOH_IS_CLK_DISABLED() ((RCC->AHB1ENR &(RCC_AHB1ENR_GPIOHEN)) == RESET) +#define __HAL_RCC_DMA1_IS_CLK_DISABLED() ((RCC->AHB1ENR &(RCC_AHB1ENR_DMA1EN)) == RESET) +#define __HAL_RCC_DMA2_IS_CLK_DISABLED() ((RCC->AHB1ENR &(RCC_AHB1ENR_DMA2EN)) == RESET) +/** + * @} + */ + +/** @defgroup RCC_APB1_Clock_Enable_Disable APB1 Peripheral Clock Enable Disable + * @brief Enable or disable the Low Speed APB (APB1) peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_TIM5_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM5EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM5EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_WWDG_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_WWDGEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_WWDGEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_SPI2_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI2EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_USART2_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_USART2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_USART2EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_I2C1_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C1EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_I2C2_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C2EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_PWR_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_PWREN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_PWREN);\ + UNUSED(tmpreg); \ + } while(0U) + +#define __HAL_RCC_TIM5_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM5EN)) +#define __HAL_RCC_WWDG_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_WWDGEN)) +#define __HAL_RCC_SPI2_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_SPI2EN)) +#define __HAL_RCC_USART2_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_USART2EN)) +#define __HAL_RCC_I2C1_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_I2C1EN)) +#define __HAL_RCC_I2C2_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_I2C2EN)) +#define __HAL_RCC_PWR_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_PWREN)) +/** + * @} + */ + +/** @defgroup RCC_APB1_Peripheral_Clock_Enable_Disable_Status APB1 Peripheral Clock Enable Disable Status + * @brief Get the enable or disable status of the APB1 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_TIM5_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM5EN)) != RESET) +#define __HAL_RCC_WWDG_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_WWDGEN)) != RESET) +#define __HAL_RCC_SPI2_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPI2EN)) != RESET) +#define __HAL_RCC_USART2_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_USART2EN)) != RESET) +#define __HAL_RCC_I2C1_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C1EN)) != RESET) +#define __HAL_RCC_I2C2_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C2EN)) != RESET) +#define __HAL_RCC_PWR_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_PWREN)) != RESET) + +#define __HAL_RCC_TIM5_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM5EN)) == RESET) +#define __HAL_RCC_WWDG_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_WWDGEN)) == RESET) +#define __HAL_RCC_SPI2_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPI2EN)) == RESET) +#define __HAL_RCC_USART2_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_USART2EN)) == RESET) +#define __HAL_RCC_I2C1_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C1EN)) == RESET) +#define __HAL_RCC_I2C2_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C2EN)) == RESET) +#define __HAL_RCC_PWR_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_PWREN)) == RESET) +/** + * @} + */ + +/** @defgroup RCC_APB2_Clock_Enable_Disable APB2 Peripheral Clock Enable Disable + * @brief Enable or disable the High Speed APB (APB2) peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_TIM1_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM1EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_USART1_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_USART1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_USART1EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_USART6_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_USART6EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_USART6EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_ADC1_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC1EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_SPI1_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI1EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_SYSCFG_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SYSCFGEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SYSCFGEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_TIM9_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM9EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM9EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_TIM11_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM11EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM11EN);\ + UNUSED(tmpreg); \ + } while(0U) + +#define __HAL_RCC_TIM1_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_TIM1EN)) +#define __HAL_RCC_USART1_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_USART1EN)) +#define __HAL_RCC_USART6_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_USART6EN)) +#define __HAL_RCC_ADC1_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_ADC1EN)) +#define __HAL_RCC_SPI1_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SPI1EN)) +#define __HAL_RCC_SYSCFG_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SYSCFGEN)) +#define __HAL_RCC_TIM9_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_TIM9EN)) +#define __HAL_RCC_TIM11_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_TIM11EN)) +/** + * @} + */ + +/** @defgroup RCC_APB2_Peripheral_Clock_Enable_Disable_Status APB2 Peripheral Clock Enable Disable Status + * @brief Get the enable or disable status of the APB2 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_TIM1_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM1EN)) != RESET) +#define __HAL_RCC_USART1_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_USART1EN)) != RESET) +#define __HAL_RCC_USART6_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_USART6EN)) != RESET) +#define __HAL_RCC_ADC1_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_ADC1EN)) != RESET) +#define __HAL_RCC_SPI1_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI1EN)) != RESET) +#define __HAL_RCC_SYSCFG_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SYSCFGEN)) != RESET) +#define __HAL_RCC_TIM9_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM9EN)) != RESET) +#define __HAL_RCC_TIM11_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM11EN)) != RESET) + +#define __HAL_RCC_TIM1_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM1EN)) == RESET) +#define __HAL_RCC_USART1_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_USART1EN)) == RESET) +#define __HAL_RCC_USART6_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_USART6EN)) == RESET) +#define __HAL_RCC_ADC1_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_ADC1EN)) == RESET) +#define __HAL_RCC_SPI1_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI1EN)) == RESET) +#define __HAL_RCC_SYSCFG_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SYSCFGEN)) == RESET) +#define __HAL_RCC_TIM9_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM9EN)) == RESET) +#define __HAL_RCC_TIM11_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM11EN)) == RESET) +/** + * @} + */ + +/** @defgroup RCC_AHB1_Force_Release_Reset AHB1 Force Release Reset + * @brief Force or release AHB1 peripheral reset. + * @{ + */ +#define __HAL_RCC_GPIOA_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOARST)) +#define __HAL_RCC_GPIOB_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOBRST)) +#define __HAL_RCC_GPIOC_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOCRST)) +#define __HAL_RCC_GPIOH_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOHRST)) +#define __HAL_RCC_DMA1_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_DMA1RST)) +#define __HAL_RCC_DMA2_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_DMA2RST)) + +#define __HAL_RCC_AHB1_RELEASE_RESET() (RCC->AHB1RSTR = 0x00U) +#define __HAL_RCC_GPIOA_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIOARST)) +#define __HAL_RCC_GPIOB_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIOBRST)) +#define __HAL_RCC_GPIOC_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIOCRST)) +#define __HAL_RCC_GPIOH_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIOHRST)) +#define __HAL_RCC_DMA1_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_DMA1RST)) +#define __HAL_RCC_DMA2_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_DMA2RST)) +/** + * @} + */ + +/** @defgroup RCC_APB1_Force_Release_Reset APB1 Force Release Reset + * @brief Force or release APB1 peripheral reset. + * @{ + */ +#define __HAL_RCC_TIM5_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM5RST)) +#define __HAL_RCC_WWDG_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_WWDGRST)) +#define __HAL_RCC_SPI2_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_SPI2RST)) +#define __HAL_RCC_USART2_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_USART2RST)) +#define __HAL_RCC_I2C1_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_I2C1RST)) +#define __HAL_RCC_I2C2_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_I2C2RST)) +#define __HAL_RCC_PWR_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_PWRRST)) + +#define __HAL_RCC_APB1_RELEASE_RESET() (RCC->APB1RSTR = 0x00U) +#define __HAL_RCC_TIM5_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM5RST)) +#define __HAL_RCC_WWDG_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_WWDGRST)) +#define __HAL_RCC_SPI2_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_SPI2RST)) +#define __HAL_RCC_USART2_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_USART2RST)) +#define __HAL_RCC_I2C1_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_I2C1RST)) +#define __HAL_RCC_I2C2_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_I2C2RST)) +#define __HAL_RCC_PWR_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_PWRRST)) +/** + * @} + */ + +/** @defgroup RCC_APB2_Force_Release_Reset APB2 Force Release Reset + * @brief Force or release APB2 peripheral reset. + * @{ + */ +#define __HAL_RCC_TIM1_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_TIM1RST)) +#define __HAL_RCC_USART1_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_USART1RST)) +#define __HAL_RCC_USART6_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_USART6RST)) +#define __HAL_RCC_ADC_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_ADCRST)) +#define __HAL_RCC_SPI1_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SPI1RST)) +#define __HAL_RCC_SYSCFG_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SYSCFGRST)) +#define __HAL_RCC_TIM9_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_TIM9RST)) +#define __HAL_RCC_TIM11_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_TIM11RST)) + +#define __HAL_RCC_APB2_RELEASE_RESET() (RCC->APB2RSTR = 0x00U) +#define __HAL_RCC_TIM1_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_TIM1RST)) +#define __HAL_RCC_USART1_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_USART1RST)) +#define __HAL_RCC_USART6_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_USART6RST)) +#define __HAL_RCC_ADC_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_ADCRST)) +#define __HAL_RCC_SPI1_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SPI1RST)) +#define __HAL_RCC_SYSCFG_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SYSCFGRST)) +#define __HAL_RCC_TIM9_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_TIM9RST)) +#define __HAL_RCC_TIM11_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_TIM11RST)) +/** + * @} + */ + +/** @defgroup RCC_AHB1_LowPower_Enable_Disable AHB1 Peripheral Low Power Enable Disable + * @brief Enable or disable the AHB1 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wake-up from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + * @{ + */ +#define __HAL_RCC_GPIOA_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIOALPEN)) +#define __HAL_RCC_GPIOB_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIOBLPEN)) +#define __HAL_RCC_GPIOC_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIOCLPEN)) +#define __HAL_RCC_GPIOH_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIOHLPEN)) +#define __HAL_RCC_DMA1_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_DMA1LPEN)) +#define __HAL_RCC_DMA2_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_DMA2LPEN)) + +#define __HAL_RCC_GPIOA_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIOALPEN)) +#define __HAL_RCC_GPIOB_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIOBLPEN)) +#define __HAL_RCC_GPIOC_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIOCLPEN)) +#define __HAL_RCC_GPIOH_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIOHLPEN)) +#define __HAL_RCC_DMA1_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_DMA1LPEN)) +#define __HAL_RCC_DMA2_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_DMA2LPEN)) +/** + * @} + */ + +/** @defgroup RCC_APB1_LowPower_Enable_Disable APB1 Peripheral Low Power Enable Disable + * @brief Enable or disable the APB1 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wake-up from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + * @{ + */ +#define __HAL_RCC_TIM5_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM5LPEN)) +#define __HAL_RCC_WWDG_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_WWDGLPEN)) +#define __HAL_RCC_SPI2_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_SPI2LPEN)) +#define __HAL_RCC_USART2_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_USART2LPEN)) +#define __HAL_RCC_I2C1_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_I2C1LPEN)) +#define __HAL_RCC_I2C2_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_I2C2LPEN)) +#define __HAL_RCC_PWR_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_PWRLPEN)) + +#define __HAL_RCC_TIM5_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM5LPEN)) +#define __HAL_RCC_WWDG_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_WWDGLPEN)) +#define __HAL_RCC_SPI2_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_SPI2LPEN)) +#define __HAL_RCC_USART2_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_USART2LPEN)) +#define __HAL_RCC_I2C1_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_I2C1LPEN)) +#define __HAL_RCC_I2C2_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_I2C2LPEN)) +#define __HAL_RCC_PWR_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_PWRLPEN)) +/** + * @} + */ + +/** @defgroup RCC_APB2_LowPower_Enable_Disable APB2 Peripheral Low Power Enable Disable + * @brief Enable or disable the APB2 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wake-up from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + * @{ + */ +#define __HAL_RCC_TIM1_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_TIM1LPEN)) +#define __HAL_RCC_USART1_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_USART1LPEN)) +#define __HAL_RCC_USART6_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_USART6LPEN)) +#define __HAL_RCC_ADC1_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_ADC1LPEN)) +#define __HAL_RCC_SPI1_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SPI1LPEN)) +#define __HAL_RCC_SYSCFG_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SYSCFGLPEN)) +#define __HAL_RCC_TIM9_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_TIM9LPEN)) +#define __HAL_RCC_TIM11_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_TIM11LPEN)) + +#define __HAL_RCC_TIM1_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_TIM1LPEN)) +#define __HAL_RCC_USART1_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_USART1LPEN)) +#define __HAL_RCC_USART6_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_USART6LPEN)) +#define __HAL_RCC_ADC1_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_ADC1LPEN)) +#define __HAL_RCC_SPI1_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SPI1LPEN)) +#define __HAL_RCC_SYSCFG_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SYSCFGLPEN)) +#define __HAL_RCC_TIM9_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_TIM9LPEN)) +#define __HAL_RCC_TIM11_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_TIM11LPEN)) +/** + * @} + */ + +/** @defgroup RCC_HSI_Configuration HSI Configuration + * @{ + */ + +/** @brief Macros to enable or disable the Internal High Speed oscillator (HSI). + * @note The HSI is stopped by hardware when entering STOP and STANDBY modes. + * It is used (enabled by hardware) as system clock source after startup + * from Reset, wake-up from STOP and STANDBY mode, or in case of failure + * of the HSE used directly or indirectly as system clock (if the Clock + * Security System CSS is enabled). + * @note HSI can not be stopped if it is used as system clock source. In this case, + * you have to select another source of the system clock then stop the HSI. + * @note After enabling the HSI, the application software should wait on HSIRDY + * flag to be set indicating that HSI clock is stable and can be used as + * system clock source. + * This parameter can be: ENABLE or DISABLE. + * @note When the HSI is stopped, HSIRDY flag goes low after 6 HSI oscillator + * clock cycles. + */ +#define __HAL_RCC_HSI_ENABLE() (*(__IO uint32_t *) RCC_CR_HSION_BB = ENABLE) +#define __HAL_RCC_HSI_DISABLE() (*(__IO uint32_t *) RCC_CR_HSION_BB = DISABLE) + +/** @brief Macro to adjust the Internal High Speed oscillator (HSI) calibration value. + * @note The calibration is used to compensate for the variations in voltage + * and temperature that influence the frequency of the internal HSI RC. + * @param __HSICalibrationValue__ specifies the calibration trimming value. + * (default is RCC_HSICALIBRATION_DEFAULT). + * This parameter must be a number between 0 and 0x1F. + */ +#define __HAL_RCC_HSI_CALIBRATIONVALUE_ADJUST(__HSICalibrationValue__) (MODIFY_REG(RCC->CR,\ + RCC_CR_HSITRIM, (uint32_t)(__HSICalibrationValue__) << RCC_CR_HSITRIM_Pos)) +/** + * @} + */ + +/** @defgroup RCC_LSI_Configuration LSI Configuration + * @{ + */ + +/** @brief Macros to enable or disable the Internal Low Speed oscillator (LSI). + * @note After enabling the LSI, the application software should wait on + * LSIRDY flag to be set indicating that LSI clock is stable and can + * be used to clock the IWDG and/or the RTC. + * @note LSI can not be disabled if the IWDG is running. + * @note When the LSI is stopped, LSIRDY flag goes low after 6 LSI oscillator + * clock cycles. + */ +#define __HAL_RCC_LSI_ENABLE() (*(__IO uint32_t *) RCC_CSR_LSION_BB = ENABLE) +#define __HAL_RCC_LSI_DISABLE() (*(__IO uint32_t *) RCC_CSR_LSION_BB = DISABLE) +/** + * @} + */ + +/** @defgroup RCC_HSE_Configuration HSE Configuration + * @{ + */ + +/** + * @brief Macro to configure the External High Speed oscillator (HSE). + * @note Transition HSE Bypass to HSE On and HSE On to HSE Bypass are not supported by this macro. + * User should request a transition to HSE Off first and then HSE On or HSE Bypass. + * @note After enabling the HSE (RCC_HSE_ON or RCC_HSE_Bypass), the application + * software should wait on HSERDY flag to be set indicating that HSE clock + * is stable and can be used to clock the PLL and/or system clock. + * @note HSE state can not be changed if it is used directly or through the + * PLL as system clock. In this case, you have to select another source + * of the system clock then change the HSE state (ex. disable it). + * @note The HSE is stopped by hardware when entering STOP and STANDBY modes. + * @note This function reset the CSSON bit, so if the clock security system(CSS) + * was previously enabled you have to enable it again after calling this + * function. + * @param __STATE__ specifies the new state of the HSE. + * This parameter can be one of the following values: + * @arg RCC_HSE_OFF: turn OFF the HSE oscillator, HSERDY flag goes low after + * 6 HSE oscillator clock cycles. + * @arg RCC_HSE_ON: turn ON the HSE oscillator. + * @arg RCC_HSE_BYPASS: HSE oscillator bypassed with external clock. + */ +#define __HAL_RCC_HSE_CONFIG(__STATE__) \ + do { \ + if ((__STATE__) == RCC_HSE_ON) \ + { \ + SET_BIT(RCC->CR, RCC_CR_HSEON); \ + } \ + else if ((__STATE__) == RCC_HSE_BYPASS) \ + { \ + SET_BIT(RCC->CR, RCC_CR_HSEBYP); \ + SET_BIT(RCC->CR, RCC_CR_HSEON); \ + } \ + else \ + { \ + CLEAR_BIT(RCC->CR, RCC_CR_HSEON); \ + CLEAR_BIT(RCC->CR, RCC_CR_HSEBYP); \ + } \ + } while(0U) +/** + * @} + */ + +/** @defgroup RCC_LSE_Configuration LSE Configuration + * @{ + */ + +/** + * @brief Macro to configure the External Low Speed oscillator (LSE). + * @note Transition LSE Bypass to LSE On and LSE On to LSE Bypass are not supported by this macro. + * User should request a transition to LSE Off first and then LSE On or LSE Bypass. + * @note As the LSE is in the Backup domain and write access is denied to + * this domain after reset, you have to enable write access using + * HAL_PWR_EnableBkUpAccess() function before to configure the LSE + * (to be done once after reset). + * @note After enabling the LSE (RCC_LSE_ON or RCC_LSE_BYPASS), the application + * software should wait on LSERDY flag to be set indicating that LSE clock + * is stable and can be used to clock the RTC. + * @param __STATE__ specifies the new state of the LSE. + * This parameter can be one of the following values: + * @arg RCC_LSE_OFF: turn OFF the LSE oscillator, LSERDY flag goes low after + * 6 LSE oscillator clock cycles. + * @arg RCC_LSE_ON: turn ON the LSE oscillator. + * @arg RCC_LSE_BYPASS: LSE oscillator bypassed with external clock. + */ +#define __HAL_RCC_LSE_CONFIG(__STATE__) \ + do { \ + if((__STATE__) == RCC_LSE_ON) \ + { \ + SET_BIT(RCC->BDCR, RCC_BDCR_LSEON); \ + } \ + else if((__STATE__) == RCC_LSE_BYPASS) \ + { \ + SET_BIT(RCC->BDCR, RCC_BDCR_LSEBYP); \ + SET_BIT(RCC->BDCR, RCC_BDCR_LSEON); \ + } \ + else \ + { \ + CLEAR_BIT(RCC->BDCR, RCC_BDCR_LSEON); \ + CLEAR_BIT(RCC->BDCR, RCC_BDCR_LSEBYP); \ + } \ + } while(0U) +/** + * @} + */ + +/** @defgroup RCC_Internal_RTC_Clock_Configuration RTC Clock Configuration + * @{ + */ + +/** @brief Macros to enable or disable the RTC clock. + * @note These macros must be used only after the RTC clock source was selected. + */ +#define __HAL_RCC_RTC_ENABLE() (*(__IO uint32_t *) RCC_BDCR_RTCEN_BB = ENABLE) +#define __HAL_RCC_RTC_DISABLE() (*(__IO uint32_t *) RCC_BDCR_RTCEN_BB = DISABLE) + +/** @brief Macros to configure the RTC clock (RTCCLK). + * @note As the RTC clock configuration bits are in the Backup domain and write + * access is denied to this domain after reset, you have to enable write + * access using the Power Backup Access macro before to configure + * the RTC clock source (to be done once after reset). + * @note Once the RTC clock is configured it can't be changed unless the + * Backup domain is reset using __HAL_RCC_BackupReset_RELEASE() macro, or by + * a Power On Reset (POR). + * @param __RTCCLKSource__ specifies the RTC clock source. + * This parameter can be one of the following values: + * @arg @ref RCC_RTCCLKSOURCE_NO_CLK : No clock selected as RTC clock. + * @arg @ref RCC_RTCCLKSOURCE_LSE : LSE selected as RTC clock. + * @arg @ref RCC_RTCCLKSOURCE_LSI : LSI selected as RTC clock. + * @arg @ref RCC_RTCCLKSOURCE_HSE_DIVX HSE divided by X selected as RTC clock (X can be retrieved thanks to @ref __HAL_RCC_GET_RTC_HSE_PRESCALER() + * @note If the LSE or LSI is used as RTC clock source, the RTC continues to + * work in STOP and STANDBY modes, and can be used as wake-up source. + * However, when the HSE clock is used as RTC clock source, the RTC + * cannot be used in STOP and STANDBY modes. + * @note The maximum input clock frequency for RTC is 1MHz (when using HSE as + * RTC clock source). + */ +#define __HAL_RCC_RTC_CLKPRESCALER(__RTCCLKSource__) (((__RTCCLKSource__) & RCC_BDCR_RTCSEL) == RCC_BDCR_RTCSEL) ? \ + MODIFY_REG(RCC->CFGR, RCC_CFGR_RTCPRE, ((__RTCCLKSource__) & 0xFFFFCFFU)) : CLEAR_BIT(RCC->CFGR, RCC_CFGR_RTCPRE) + +#define __HAL_RCC_RTC_CONFIG(__RTCCLKSource__) do { __HAL_RCC_RTC_CLKPRESCALER(__RTCCLKSource__); \ + RCC->BDCR |= ((__RTCCLKSource__) & 0x00000FFFU); \ + } while(0U) + +/** @brief Macro to get the RTC clock source. + * @retval The clock source can be one of the following values: + * @arg @ref RCC_RTCCLKSOURCE_NO_CLK No clock selected as RTC clock + * @arg @ref RCC_RTCCLKSOURCE_LSE LSE selected as RTC clock + * @arg @ref RCC_RTCCLKSOURCE_LSI LSI selected as RTC clock + * @arg @ref RCC_RTCCLKSOURCE_HSE_DIVX HSE divided by X selected as RTC clock (X can be retrieved thanks to @ref __HAL_RCC_GET_RTC_HSE_PRESCALER() + */ +#define __HAL_RCC_GET_RTC_SOURCE() (READ_BIT(RCC->BDCR, RCC_BDCR_RTCSEL)) + +/** + * @brief Get the RTC and HSE clock divider (RTCPRE). + * @retval Returned value can be one of the following values: + * @arg @ref RCC_RTCCLKSOURCE_HSE_DIVX HSE divided by X selected as RTC clock (X can be retrieved thanks to @ref __HAL_RCC_GET_RTC_HSE_PRESCALER() + */ +#define __HAL_RCC_GET_RTC_HSE_PRESCALER() (READ_BIT(RCC->CFGR, RCC_CFGR_RTCPRE) | RCC_BDCR_RTCSEL) + +/** @brief Macros to force or release the Backup domain reset. + * @note This function resets the RTC peripheral (including the backup registers) + * and the RTC clock source selection in RCC_CSR register. + * @note The BKPSRAM is not affected by this reset. + */ +#define __HAL_RCC_BACKUPRESET_FORCE() (*(__IO uint32_t *) RCC_BDCR_BDRST_BB = ENABLE) +#define __HAL_RCC_BACKUPRESET_RELEASE() (*(__IO uint32_t *) RCC_BDCR_BDRST_BB = DISABLE) +/** + * @} + */ + +/** @defgroup RCC_PLL_Configuration PLL Configuration + * @{ + */ + +/** @brief Macros to enable or disable the main PLL. + * @note After enabling the main PLL, the application software should wait on + * PLLRDY flag to be set indicating that PLL clock is stable and can + * be used as system clock source. + * @note The main PLL can not be disabled if it is used as system clock source + * @note The main PLL is disabled by hardware when entering STOP and STANDBY modes. + */ +#define __HAL_RCC_PLL_ENABLE() (*(__IO uint32_t *) RCC_CR_PLLON_BB = ENABLE) +#define __HAL_RCC_PLL_DISABLE() (*(__IO uint32_t *) RCC_CR_PLLON_BB = DISABLE) + +/** @brief Macro to configure the PLL clock source. + * @note This function must be used only when the main PLL is disabled. + * @param __PLLSOURCE__ specifies the PLL entry clock source. + * This parameter can be one of the following values: + * @arg RCC_PLLSOURCE_HSI: HSI oscillator clock selected as PLL clock entry + * @arg RCC_PLLSOURCE_HSE: HSE oscillator clock selected as PLL clock entry + * + */ +#define __HAL_RCC_PLL_PLLSOURCE_CONFIG(__PLLSOURCE__) MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLSRC, (__PLLSOURCE__)) + +/** @brief Macro to configure the PLL multiplication factor. + * @note This function must be used only when the main PLL is disabled. + * @param __PLLM__ specifies the division factor for PLL VCO input clock + * This parameter must be a number between Min_Data = 2 and Max_Data = 63. + * @note You have to set the PLLM parameter correctly to ensure that the VCO input + * frequency ranges from 1 to 2 MHz. It is recommended to select a frequency + * of 2 MHz to limit PLL jitter. + * + */ +#define __HAL_RCC_PLL_PLLM_CONFIG(__PLLM__) MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLM, (__PLLM__)) +/** + * @} + */ + +/** @defgroup RCC_Get_Clock_source Get Clock source + * @{ + */ +/** + * @brief Macro to configure the system clock source. + * @param __RCC_SYSCLKSOURCE__ specifies the system clock source. + * This parameter can be one of the following values: + * - RCC_SYSCLKSOURCE_HSI: HSI oscillator is used as system clock source. + * - RCC_SYSCLKSOURCE_HSE: HSE oscillator is used as system clock source. + * - RCC_SYSCLKSOURCE_PLLCLK: PLL output is used as system clock source. + * - RCC_SYSCLKSOURCE_PLLRCLK: PLLR output is used as system clock source. This + * parameter is available only for STM32F446xx devices. + */ +#define __HAL_RCC_SYSCLK_CONFIG(__RCC_SYSCLKSOURCE__) MODIFY_REG(RCC->CFGR, RCC_CFGR_SW, (__RCC_SYSCLKSOURCE__)) + +/** @brief Macro to get the clock source used as system clock. + * @retval The clock source used as system clock. The returned value can be one + * of the following: + * - RCC_SYSCLKSOURCE_STATUS_HSI: HSI used as system clock. + * - RCC_SYSCLKSOURCE_STATUS_HSE: HSE used as system clock. + * - RCC_SYSCLKSOURCE_STATUS_PLLCLK: PLL used as system clock. + * - RCC_SYSCLKSOURCE_STATUS_PLLRCLK: PLLR used as system clock. This parameter + * is available only for STM32F446xx devices. + */ +#define __HAL_RCC_GET_SYSCLK_SOURCE() (RCC->CFGR & RCC_CFGR_SWS) + +/** @brief Macro to get the oscillator used as PLL clock source. + * @retval The oscillator used as PLL clock source. The returned value can be one + * of the following: + * - RCC_PLLSOURCE_HSI: HSI oscillator is used as PLL clock source. + * - RCC_PLLSOURCE_HSE: HSE oscillator is used as PLL clock source. + */ +#define __HAL_RCC_GET_PLL_OSCSOURCE() ((uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC)) +/** + * @} + */ + +/** @defgroup RCCEx_MCOx_Clock_Config RCC Extended MCOx Clock Config + * @{ + */ + +/** @brief Macro to configure the MCO1 clock. + * @param __MCOCLKSOURCE__ specifies the MCO clock source. + * This parameter can be one of the following values: + * @arg RCC_MCO1SOURCE_HSI: HSI clock selected as MCO1 source + * @arg RCC_MCO1SOURCE_LSE: LSE clock selected as MCO1 source + * @arg RCC_MCO1SOURCE_HSE: HSE clock selected as MCO1 source + * @arg RCC_MCO1SOURCE_PLLCLK: main PLL clock selected as MCO1 source + * @param __MCODIV__ specifies the MCO clock prescaler. + * This parameter can be one of the following values: + * @arg RCC_MCODIV_1: no division applied to MCOx clock + * @arg RCC_MCODIV_2: division by 2 applied to MCOx clock + * @arg RCC_MCODIV_3: division by 3 applied to MCOx clock + * @arg RCC_MCODIV_4: division by 4 applied to MCOx clock + * @arg RCC_MCODIV_5: division by 5 applied to MCOx clock + */ +#define __HAL_RCC_MCO1_CONFIG(__MCOCLKSOURCE__, __MCODIV__) \ + MODIFY_REG(RCC->CFGR, (RCC_CFGR_MCO1 | RCC_CFGR_MCO1PRE), ((__MCOCLKSOURCE__) | (__MCODIV__))) + +/** @brief Macro to configure the MCO2 clock. + * @param __MCOCLKSOURCE__ specifies the MCO clock source. + * This parameter can be one of the following values: + * @arg RCC_MCO2SOURCE_SYSCLK: System clock (SYSCLK) selected as MCO2 source + * @arg RCC_MCO2SOURCE_PLLI2SCLK: PLLI2S clock selected as MCO2 source, available for all STM32F4 devices except STM32F410xx + * @arg RCC_MCO2SOURCE_I2SCLK: I2SCLK clock selected as MCO2 source, available only for STM32F410Rx devices + * @arg RCC_MCO2SOURCE_HSE: HSE clock selected as MCO2 source + * @arg RCC_MCO2SOURCE_PLLCLK: main PLL clock selected as MCO2 source + * @param __MCODIV__ specifies the MCO clock prescaler. + * This parameter can be one of the following values: + * @arg RCC_MCODIV_1: no division applied to MCOx clock + * @arg RCC_MCODIV_2: division by 2 applied to MCOx clock + * @arg RCC_MCODIV_3: division by 3 applied to MCOx clock + * @arg RCC_MCODIV_4: division by 4 applied to MCOx clock + * @arg RCC_MCODIV_5: division by 5 applied to MCOx clock + * @note For STM32F410Rx devices, to output I2SCLK clock on MCO2, you should have + * at least one of the SPI clocks enabled (SPI1, SPI2 or SPI5). + */ +#define __HAL_RCC_MCO2_CONFIG(__MCOCLKSOURCE__, __MCODIV__) \ + MODIFY_REG(RCC->CFGR, (RCC_CFGR_MCO2 | RCC_CFGR_MCO2PRE), ((__MCOCLKSOURCE__) | ((__MCODIV__) << 3U))); +/** + * @} + */ + +/** @defgroup RCC_Flags_Interrupts_Management Flags Interrupts Management + * @brief macros to manage the specified RCC Flags and interrupts. + * @{ + */ + +/** @brief Enable RCC interrupt (Perform Byte access to RCC_CIR[14:8] bits to enable + * the selected interrupts). + * @param __INTERRUPT__ specifies the RCC interrupt sources to be enabled. + * This parameter can be any combination of the following values: + * @arg RCC_IT_LSIRDY: LSI ready interrupt. + * @arg RCC_IT_LSERDY: LSE ready interrupt. + * @arg RCC_IT_HSIRDY: HSI ready interrupt. + * @arg RCC_IT_HSERDY: HSE ready interrupt. + * @arg RCC_IT_PLLRDY: Main PLL ready interrupt. + * @arg RCC_IT_PLLI2SRDY: PLLI2S ready interrupt. + */ +#define __HAL_RCC_ENABLE_IT(__INTERRUPT__) (*(__IO uint8_t *) RCC_CIR_BYTE1_ADDRESS |= (__INTERRUPT__)) + +/** @brief Disable RCC interrupt (Perform Byte access to RCC_CIR[14:8] bits to disable + * the selected interrupts). + * @param __INTERRUPT__ specifies the RCC interrupt sources to be disabled. + * This parameter can be any combination of the following values: + * @arg RCC_IT_LSIRDY: LSI ready interrupt. + * @arg RCC_IT_LSERDY: LSE ready interrupt. + * @arg RCC_IT_HSIRDY: HSI ready interrupt. + * @arg RCC_IT_HSERDY: HSE ready interrupt. + * @arg RCC_IT_PLLRDY: Main PLL ready interrupt. + * @arg RCC_IT_PLLI2SRDY: PLLI2S ready interrupt. + */ +#define __HAL_RCC_DISABLE_IT(__INTERRUPT__) (*(__IO uint8_t *) RCC_CIR_BYTE1_ADDRESS &= (uint8_t)(~(__INTERRUPT__))) + +/** @brief Clear the RCC's interrupt pending bits (Perform Byte access to RCC_CIR[23:16] + * bits to clear the selected interrupt pending bits. + * @param __INTERRUPT__ specifies the interrupt pending bit to clear. + * This parameter can be any combination of the following values: + * @arg RCC_IT_LSIRDY: LSI ready interrupt. + * @arg RCC_IT_LSERDY: LSE ready interrupt. + * @arg RCC_IT_HSIRDY: HSI ready interrupt. + * @arg RCC_IT_HSERDY: HSE ready interrupt. + * @arg RCC_IT_PLLRDY: Main PLL ready interrupt. + * @arg RCC_IT_PLLI2SRDY: PLLI2S ready interrupt. + * @arg RCC_IT_CSS: Clock Security System interrupt + */ +#define __HAL_RCC_CLEAR_IT(__INTERRUPT__) (*(__IO uint8_t *) RCC_CIR_BYTE2_ADDRESS = (__INTERRUPT__)) + +/** @brief Check the RCC's interrupt has occurred or not. + * @param __INTERRUPT__ specifies the RCC interrupt source to check. + * This parameter can be one of the following values: + * @arg RCC_IT_LSIRDY: LSI ready interrupt. + * @arg RCC_IT_LSERDY: LSE ready interrupt. + * @arg RCC_IT_HSIRDY: HSI ready interrupt. + * @arg RCC_IT_HSERDY: HSE ready interrupt. + * @arg RCC_IT_PLLRDY: Main PLL ready interrupt. + * @arg RCC_IT_PLLI2SRDY: PLLI2S ready interrupt. + * @arg RCC_IT_CSS: Clock Security System interrupt + * @retval The new state of __INTERRUPT__ (TRUE or FALSE). + */ +#define __HAL_RCC_GET_IT(__INTERRUPT__) ((RCC->CIR & (__INTERRUPT__)) == (__INTERRUPT__)) + +/** @brief Set RMVF bit to clear the reset flags: RCC_FLAG_PINRST, RCC_FLAG_PORRST, + * RCC_FLAG_SFTRST, RCC_FLAG_IWDGRST, RCC_FLAG_WWDGRST and RCC_FLAG_LPWRRST. + */ +#define __HAL_RCC_CLEAR_RESET_FLAGS() (RCC->CSR |= RCC_CSR_RMVF) + +/** @brief Check RCC flag is set or not. + * @param __FLAG__ specifies the flag to check. + * This parameter can be one of the following values: + * @arg RCC_FLAG_HSIRDY: HSI oscillator clock ready. + * @arg RCC_FLAG_HSERDY: HSE oscillator clock ready. + * @arg RCC_FLAG_PLLRDY: Main PLL clock ready. + * @arg RCC_FLAG_PLLI2SRDY: PLLI2S clock ready. + * @arg RCC_FLAG_LSERDY: LSE oscillator clock ready. + * @arg RCC_FLAG_LSIRDY: LSI oscillator clock ready. + * @arg RCC_FLAG_BORRST: POR/PDR or BOR reset. + * @arg RCC_FLAG_PINRST: Pin reset. + * @arg RCC_FLAG_PORRST: POR/PDR reset. + * @arg RCC_FLAG_SFTRST: Software reset. + * @arg RCC_FLAG_IWDGRST: Independent Watchdog reset. + * @arg RCC_FLAG_WWDGRST: Window Watchdog reset. + * @arg RCC_FLAG_LPWRRST: Low Power reset. + * @retval The new state of __FLAG__ (TRUE or FALSE). + */ +#define RCC_FLAG_MASK ((uint8_t)0x1FU) +#define __HAL_RCC_GET_FLAG(__FLAG__) (((((((__FLAG__) >> 5U)\ + == 1U)? RCC->CR :((((__FLAG__) >> 5U) == 2U) ? RCC->BDCR :((((__FLAG__) >> 5U) == 3U)? RCC->CSR :RCC->CIR))) &\ + (1U << ((__FLAG__) & RCC_FLAG_MASK)))!= 0U)? 1U : 0U) + +/** + * @} + */ + +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup RCC_Exported_Functions + * @{ + */ + +/** @addtogroup RCC_Exported_Functions_Group1 + * @{ + */ +/* Initialization and de-initialization functions ******************************/ +HAL_StatusTypeDef HAL_RCC_DeInit(void); +HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct); +HAL_StatusTypeDef HAL_RCC_ClockConfig(RCC_ClkInitTypeDef *RCC_ClkInitStruct, uint32_t FLatency); +/** + * @} + */ + +/** @addtogroup RCC_Exported_Functions_Group2 + * @{ + */ +/* Peripheral Control functions ************************************************/ +void HAL_RCC_MCOConfig(uint32_t RCC_MCOx, uint32_t RCC_MCOSource, uint32_t RCC_MCODiv); +void HAL_RCC_EnableCSS(void); +void HAL_RCC_DisableCSS(void); +uint32_t HAL_RCC_GetSysClockFreq(void); +uint32_t HAL_RCC_GetHCLKFreq(void); +uint32_t HAL_RCC_GetPCLK1Freq(void); +uint32_t HAL_RCC_GetPCLK2Freq(void); +void HAL_RCC_GetOscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct); +void HAL_RCC_GetClockConfig(RCC_ClkInitTypeDef *RCC_ClkInitStruct, uint32_t *pFLatency); + +/* CSS NMI IRQ handler */ +void HAL_RCC_NMI_IRQHandler(void); + +/* User Callbacks in non blocking mode (IT mode) */ +void HAL_RCC_CSSCallback(void); + +/** + * @} + */ + +/** + * @} + */ + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/** @defgroup RCC_Private_Constants RCC Private Constants + * @{ + */ + +/** @defgroup RCC_BitAddress_AliasRegion RCC BitAddress AliasRegion + * @brief RCC registers bit address in the alias region + * @{ + */ +#define RCC_OFFSET (RCC_BASE - PERIPH_BASE) +/* --- CR Register --- */ +/* Alias word address of HSION bit */ +#define RCC_CR_OFFSET (RCC_OFFSET + 0x00U) +#define RCC_HSION_BIT_NUMBER 0x00U +#define RCC_CR_HSION_BB (PERIPH_BB_BASE + (RCC_CR_OFFSET * 32U) + (RCC_HSION_BIT_NUMBER * 4U)) +/* Alias word address of CSSON bit */ +#define RCC_CSSON_BIT_NUMBER 0x13U +#define RCC_CR_CSSON_BB (PERIPH_BB_BASE + (RCC_CR_OFFSET * 32U) + (RCC_CSSON_BIT_NUMBER * 4U)) +/* Alias word address of PLLON bit */ +#define RCC_PLLON_BIT_NUMBER 0x18U +#define RCC_CR_PLLON_BB (PERIPH_BB_BASE + (RCC_CR_OFFSET * 32U) + (RCC_PLLON_BIT_NUMBER * 4U)) + +/* --- BDCR Register --- */ +/* Alias word address of RTCEN bit */ +#define RCC_BDCR_OFFSET (RCC_OFFSET + 0x70U) +#define RCC_RTCEN_BIT_NUMBER 0x0FU +#define RCC_BDCR_RTCEN_BB (PERIPH_BB_BASE + (RCC_BDCR_OFFSET * 32U) + (RCC_RTCEN_BIT_NUMBER * 4U)) +/* Alias word address of BDRST bit */ +#define RCC_BDRST_BIT_NUMBER 0x10U +#define RCC_BDCR_BDRST_BB (PERIPH_BB_BASE + (RCC_BDCR_OFFSET * 32U) + (RCC_BDRST_BIT_NUMBER * 4U)) + +/* --- CSR Register --- */ +/* Alias word address of LSION bit */ +#define RCC_CSR_OFFSET (RCC_OFFSET + 0x74U) +#define RCC_LSION_BIT_NUMBER 0x00U +#define RCC_CSR_LSION_BB (PERIPH_BB_BASE + (RCC_CSR_OFFSET * 32U) + (RCC_LSION_BIT_NUMBER * 4U)) + +/* CR register byte 3 (Bits[23:16]) base address */ +#define RCC_CR_BYTE2_ADDRESS 0x40023802U + +/* CIR register byte 2 (Bits[15:8]) base address */ +#define RCC_CIR_BYTE1_ADDRESS ((uint32_t)(RCC_BASE + 0x0CU + 0x01U)) + +/* CIR register byte 3 (Bits[23:16]) base address */ +#define RCC_CIR_BYTE2_ADDRESS ((uint32_t)(RCC_BASE + 0x0CU + 0x02U)) + +/* BDCR register base address */ +#define RCC_BDCR_BYTE0_ADDRESS (PERIPH_BASE + RCC_BDCR_OFFSET) + +#define RCC_DBP_TIMEOUT_VALUE 2U +#define RCC_LSE_TIMEOUT_VALUE LSE_STARTUP_TIMEOUT + +#define HSE_TIMEOUT_VALUE HSE_STARTUP_TIMEOUT +#define HSI_TIMEOUT_VALUE 2U /* 2 ms */ +#define LSI_TIMEOUT_VALUE 2U /* 2 ms */ +#define CLOCKSWITCH_TIMEOUT_VALUE 5000U /* 5 s */ + +/** + * @} + */ + +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup RCC_Private_Macros RCC Private Macros + * @{ + */ + +/** @defgroup RCC_IS_RCC_Definitions RCC Private macros to check input parameters + * @{ + */ +#define IS_RCC_OSCILLATORTYPE(OSCILLATOR) ((OSCILLATOR) <= 15U) + +#define IS_RCC_HSE(HSE) (((HSE) == RCC_HSE_OFF) || ((HSE) == RCC_HSE_ON) || \ + ((HSE) == RCC_HSE_BYPASS)) + +#define IS_RCC_LSE(LSE) (((LSE) == RCC_LSE_OFF) || ((LSE) == RCC_LSE_ON) || \ + ((LSE) == RCC_LSE_BYPASS)) + +#define IS_RCC_HSI(HSI) (((HSI) == RCC_HSI_OFF) || ((HSI) == RCC_HSI_ON)) + +#define IS_RCC_LSI(LSI) (((LSI) == RCC_LSI_OFF) || ((LSI) == RCC_LSI_ON)) + +#define IS_RCC_PLL(PLL) (((PLL) == RCC_PLL_NONE) ||((PLL) == RCC_PLL_OFF) || ((PLL) == RCC_PLL_ON)) + +#define IS_RCC_PLLSOURCE(SOURCE) (((SOURCE) == RCC_PLLSOURCE_HSI) || \ + ((SOURCE) == RCC_PLLSOURCE_HSE)) + +#define IS_RCC_SYSCLKSOURCE(SOURCE) (((SOURCE) == RCC_SYSCLKSOURCE_HSI) || \ + ((SOURCE) == RCC_SYSCLKSOURCE_HSE) || \ + ((SOURCE) == RCC_SYSCLKSOURCE_PLLCLK) || \ + ((SOURCE) == RCC_SYSCLKSOURCE_PLLRCLK)) + +#define IS_RCC_RTCCLKSOURCE(__SOURCE__) (((__SOURCE__) == RCC_RTCCLKSOURCE_LSE) || \ + ((__SOURCE__) == RCC_RTCCLKSOURCE_LSI) || \ + ((__SOURCE__) == RCC_RTCCLKSOURCE_HSE_DIV2) || \ + ((__SOURCE__) == RCC_RTCCLKSOURCE_HSE_DIV3) || \ + ((__SOURCE__) == RCC_RTCCLKSOURCE_HSE_DIV4) || \ + ((__SOURCE__) == RCC_RTCCLKSOURCE_HSE_DIV5) || \ + ((__SOURCE__) == RCC_RTCCLKSOURCE_HSE_DIV6) || \ + ((__SOURCE__) == RCC_RTCCLKSOURCE_HSE_DIV7) || \ + ((__SOURCE__) == RCC_RTCCLKSOURCE_HSE_DIV8) || \ + ((__SOURCE__) == RCC_RTCCLKSOURCE_HSE_DIV9) || \ + ((__SOURCE__) == RCC_RTCCLKSOURCE_HSE_DIV10) || \ + ((__SOURCE__) == RCC_RTCCLKSOURCE_HSE_DIV11) || \ + ((__SOURCE__) == RCC_RTCCLKSOURCE_HSE_DIV12) || \ + ((__SOURCE__) == RCC_RTCCLKSOURCE_HSE_DIV13) || \ + ((__SOURCE__) == RCC_RTCCLKSOURCE_HSE_DIV14) || \ + ((__SOURCE__) == RCC_RTCCLKSOURCE_HSE_DIV15) || \ + ((__SOURCE__) == RCC_RTCCLKSOURCE_HSE_DIV16) || \ + ((__SOURCE__) == RCC_RTCCLKSOURCE_HSE_DIV17) || \ + ((__SOURCE__) == RCC_RTCCLKSOURCE_HSE_DIV18) || \ + ((__SOURCE__) == RCC_RTCCLKSOURCE_HSE_DIV19) || \ + ((__SOURCE__) == RCC_RTCCLKSOURCE_HSE_DIV20) || \ + ((__SOURCE__) == RCC_RTCCLKSOURCE_HSE_DIV21) || \ + ((__SOURCE__) == RCC_RTCCLKSOURCE_HSE_DIV22) || \ + ((__SOURCE__) == RCC_RTCCLKSOURCE_HSE_DIV23) || \ + ((__SOURCE__) == RCC_RTCCLKSOURCE_HSE_DIV24) || \ + ((__SOURCE__) == RCC_RTCCLKSOURCE_HSE_DIV25) || \ + ((__SOURCE__) == RCC_RTCCLKSOURCE_HSE_DIV26) || \ + ((__SOURCE__) == RCC_RTCCLKSOURCE_HSE_DIV27) || \ + ((__SOURCE__) == RCC_RTCCLKSOURCE_HSE_DIV28) || \ + ((__SOURCE__) == RCC_RTCCLKSOURCE_HSE_DIV29) || \ + ((__SOURCE__) == RCC_RTCCLKSOURCE_HSE_DIV30) || \ + ((__SOURCE__) == RCC_RTCCLKSOURCE_HSE_DIV31)) + +#define IS_RCC_PLLM_VALUE(VALUE) ((2U <= (VALUE)) && ((VALUE) <= 63U)) + +#define IS_RCC_PLLP_VALUE(VALUE) (((VALUE) == 2U) || ((VALUE) == 4U) || ((VALUE) == 6U) || ((VALUE) == 8U)) + +#define IS_RCC_PLLQ_VALUE(VALUE) ((2U <= (VALUE)) && ((VALUE) <= 15U)) + +#define IS_RCC_HCLK(HCLK) (((HCLK) == RCC_SYSCLK_DIV1) || ((HCLK) == RCC_SYSCLK_DIV2) || \ + ((HCLK) == RCC_SYSCLK_DIV4) || ((HCLK) == RCC_SYSCLK_DIV8) || \ + ((HCLK) == RCC_SYSCLK_DIV16) || ((HCLK) == RCC_SYSCLK_DIV64) || \ + ((HCLK) == RCC_SYSCLK_DIV128) || ((HCLK) == RCC_SYSCLK_DIV256) || \ + ((HCLK) == RCC_SYSCLK_DIV512)) + +#define IS_RCC_CLOCKTYPE(CLK) ((1U <= (CLK)) && ((CLK) <= 15U)) + +#define IS_RCC_PCLK(PCLK) (((PCLK) == RCC_HCLK_DIV1) || ((PCLK) == RCC_HCLK_DIV2) || \ + ((PCLK) == RCC_HCLK_DIV4) || ((PCLK) == RCC_HCLK_DIV8) || \ + ((PCLK) == RCC_HCLK_DIV16)) + +#define IS_RCC_MCO(MCOx) (((MCOx) == RCC_MCO1) || ((MCOx) == RCC_MCO2)) + +#define IS_RCC_MCO1SOURCE(SOURCE) (((SOURCE) == RCC_MCO1SOURCE_HSI) || ((SOURCE) == RCC_MCO1SOURCE_LSE) || \ + ((SOURCE) == RCC_MCO1SOURCE_HSE) || ((SOURCE) == RCC_MCO1SOURCE_PLLCLK)) + +#define IS_RCC_MCODIV(DIV) (((DIV) == RCC_MCODIV_1) || ((DIV) == RCC_MCODIV_2) || \ + ((DIV) == RCC_MCODIV_3) || ((DIV) == RCC_MCODIV_4) || \ + ((DIV) == RCC_MCODIV_5)) +#define IS_RCC_CALIBRATION_VALUE(VALUE) ((VALUE) <= 0x1FU) + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F4xx_HAL_RCC_H */ + diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_rcc_ex.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_rcc_ex.h new file mode 100644 index 0000000..3b62134 --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_rcc_ex.h @@ -0,0 +1,7190 @@ +/** + ****************************************************************************** + * @file stm32f4xx_hal_rcc_ex.h + * @author MCD Application Team + * @brief Header file of RCC HAL Extension module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F4xx_HAL_RCC_EX_H +#define __STM32F4xx_HAL_RCC_EX_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx_hal_def.h" + +/** @addtogroup STM32F4xx_HAL_Driver + * @{ + */ + +/** @addtogroup RCCEx + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup RCCEx_Exported_Types RCCEx Exported Types + * @{ + */ + +/** + * @brief RCC PLL configuration structure definition + */ +typedef struct +{ + uint32_t PLLState; /*!< The new state of the PLL. + This parameter can be a value of @ref RCC_PLL_Config */ + + uint32_t PLLSource; /*!< RCC_PLLSource: PLL entry clock source. + This parameter must be a value of @ref RCC_PLL_Clock_Source */ + + uint32_t PLLM; /*!< PLLM: Division factor for PLL VCO input clock. + This parameter must be a number between Min_Data = 0 and Max_Data = 63 */ + + uint32_t PLLN; /*!< PLLN: Multiplication factor for PLL VCO output clock. + This parameter must be a number between Min_Data = 50 and Max_Data = 432 + except for STM32F411xE devices where the Min_Data = 192 */ + + uint32_t PLLP; /*!< PLLP: Division factor for main system clock (SYSCLK). + This parameter must be a value of @ref RCC_PLLP_Clock_Divider */ + + uint32_t PLLQ; /*!< PLLQ: Division factor for OTG FS, SDIO and RNG clocks. + This parameter must be a number between Min_Data = 2 and Max_Data = 15 */ +#if defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) || defined(STM32F446xx) || defined(STM32F469xx) ||\ + defined(STM32F479xx) || defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) ||\ + defined(STM32F413xx) || defined(STM32F423xx) + uint32_t PLLR; /*!< PLLR: PLL division factor for I2S, SAI, SYSTEM, SPDIFRX clocks. + This parameter is only available in STM32F410xx/STM32F446xx/STM32F469xx/STM32F479xx + and STM32F412Zx/STM32F412Vx/STM32F412Rx/STM32F412Cx/STM32F413xx/STM32F423xx devices. + This parameter must be a number between Min_Data = 2 and Max_Data = 7 */ +#endif /* STM32F410xx || STM32F446xx || STM32F469xx || STM32F479xx || STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx || STM32F413xx || STM32F423xx */ +} RCC_PLLInitTypeDef; + +#if defined(STM32F446xx) +/** + * @brief PLLI2S Clock structure definition + */ +typedef struct +{ + uint32_t PLLI2SM; /*!< Specifies division factor for PLL VCO input clock. + This parameter must be a number between Min_Data = 2 and Max_Data = 63 */ + + uint32_t PLLI2SN; /*!< Specifies the multiplication factor for PLLI2S VCO output clock. + This parameter must be a number between Min_Data = 50 and Max_Data = 432 */ + + uint32_t PLLI2SP; /*!< Specifies division factor for SPDIFRX Clock. + This parameter must be a value of @ref RCCEx_PLLI2SP_Clock_Divider */ + + uint32_t PLLI2SQ; /*!< Specifies the division factor for SAI clock. + This parameter must be a number between Min_Data = 2 and Max_Data = 15. + This parameter will be used only when PLLI2S is selected as Clock Source SAI */ + + uint32_t PLLI2SR; /*!< Specifies the division factor for I2S clock. + This parameter must be a number between Min_Data = 2 and Max_Data = 7. + This parameter will be used only when PLLI2S is selected as Clock Source I2S */ +} RCC_PLLI2SInitTypeDef; + +/** + * @brief PLLSAI Clock structure definition + */ +typedef struct +{ + uint32_t PLLSAIM; /*!< Specifies division factor for PLL VCO input clock. + This parameter must be a number between Min_Data = 2 and Max_Data = 63 */ + + uint32_t PLLSAIN; /*!< Specifies the multiplication factor for PLLI2S VCO output clock. + This parameter must be a number between Min_Data = 50 and Max_Data = 432 */ + + uint32_t PLLSAIP; /*!< Specifies division factor for OTG FS, SDIO and RNG clocks. + This parameter must be a value of @ref RCCEx_PLLSAIP_Clock_Divider */ + + uint32_t PLLSAIQ; /*!< Specifies the division factor for SAI clock. + This parameter must be a number between Min_Data = 2 and Max_Data = 15. + This parameter will be used only when PLLSAI is selected as Clock Source SAI */ +} RCC_PLLSAIInitTypeDef; + +/** + * @brief RCC extended clocks structure definition + */ +typedef struct +{ + uint32_t PeriphClockSelection; /*!< The Extended Clock to be configured. + This parameter can be a value of @ref RCCEx_Periph_Clock_Selection */ + + RCC_PLLI2SInitTypeDef PLLI2S; /*!< PLL I2S structure parameters. + This parameter will be used only when PLLI2S is selected as Clock Source I2S or SAI */ + + RCC_PLLSAIInitTypeDef PLLSAI; /*!< PLL SAI structure parameters. + This parameter will be used only when PLLI2S is selected as Clock Source SAI or LTDC */ + + uint32_t PLLI2SDivQ; /*!< Specifies the PLLI2S division factor for SAI1 clock. + This parameter must be a number between Min_Data = 1 and Max_Data = 32 + This parameter will be used only when PLLI2S is selected as Clock Source SAI */ + + uint32_t PLLSAIDivQ; /*!< Specifies the PLLI2S division factor for SAI1 clock. + This parameter must be a number between Min_Data = 1 and Max_Data = 32 + This parameter will be used only when PLLSAI is selected as Clock Source SAI */ + + uint32_t Sai1ClockSelection; /*!< Specifies SAI1 Clock Source Selection. + This parameter can be a value of @ref RCCEx_SAI1_Clock_Source */ + + uint32_t Sai2ClockSelection; /*!< Specifies SAI2 Clock Source Selection. + This parameter can be a value of @ref RCCEx_SAI2_Clock_Source */ + + uint32_t I2sApb1ClockSelection; /*!< Specifies I2S APB1 Clock Source Selection. + This parameter can be a value of @ref RCCEx_I2SAPB1_Clock_Source */ + + uint32_t I2sApb2ClockSelection; /*!< Specifies I2S APB2 Clock Source Selection. + This parameter can be a value of @ref RCCEx_I2SAPB2_Clock_Source */ + + uint32_t RTCClockSelection; /*!< Specifies RTC Clock Source Selection. + This parameter can be a value of @ref RCC_RTC_Clock_Source */ + + uint32_t SdioClockSelection; /*!< Specifies SDIO Clock Source Selection. + This parameter can be a value of @ref RCCEx_SDIO_Clock_Source */ + + uint32_t CecClockSelection; /*!< Specifies CEC Clock Source Selection. + This parameter can be a value of @ref RCCEx_CEC_Clock_Source */ + + uint32_t Fmpi2c1ClockSelection; /*!< Specifies FMPI2C1 Clock Source Selection. + This parameter can be a value of @ref RCCEx_FMPI2C1_Clock_Source */ + + uint32_t SpdifClockSelection; /*!< Specifies SPDIFRX Clock Source Selection. + This parameter can be a value of @ref RCCEx_SPDIFRX_Clock_Source */ + + uint32_t Clk48ClockSelection; /*!< Specifies CLK48 Clock Selection this clock used OTG FS, SDIO and RNG clocks. + This parameter can be a value of @ref RCCEx_CLK48_Clock_Source */ + + uint8_t TIMPresSelection; /*!< Specifies TIM Clock Source Selection. + This parameter can be a value of @ref RCCEx_TIM_PRescaler_Selection */ +} RCC_PeriphCLKInitTypeDef; +#endif /* STM32F446xx */ + +#if defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) +/** + * @brief RCC extended clocks structure definition + */ +typedef struct +{ + uint32_t PeriphClockSelection; /*!< The Extended Clock to be configured. + This parameter can be a value of @ref RCCEx_Periph_Clock_Selection */ + + uint32_t I2SClockSelection; /*!< Specifies RTC Clock Source Selection. + This parameter can be a value of @ref RCCEx_I2S_APB_Clock_Source */ + + uint32_t RTCClockSelection; /*!< Specifies RTC Clock Source Selection. + This parameter can be a value of @ref RCC_RTC_Clock_Source */ + + uint32_t Lptim1ClockSelection; /*!< Specifies LPTIM1 Clock Source Selection. + This parameter can be a value of @ref RCCEx_LPTIM1_Clock_Source */ + + uint32_t Fmpi2c1ClockSelection; /*!< Specifies FMPI2C1 Clock Source Selection. + This parameter can be a value of @ref RCCEx_FMPI2C1_Clock_Source */ + + uint8_t TIMPresSelection; /*!< Specifies TIM Clock Source Selection. + This parameter can be a value of @ref RCCEx_TIM_PRescaler_Selection */ +} RCC_PeriphCLKInitTypeDef; +#endif /* STM32F410Tx || STM32F410Cx || STM32F410Rx */ + +#if defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) +/** + * @brief PLLI2S Clock structure definition + */ +typedef struct +{ + uint32_t PLLI2SM; /*!< Specifies division factor for PLL VCO input clock. + This parameter must be a number between Min_Data = 2 and Max_Data = 63 */ + + uint32_t PLLI2SN; /*!< Specifies the multiplication factor for PLLI2S VCO output clock. + This parameter must be a number between Min_Data = 50 and Max_Data = 432 */ + + uint32_t PLLI2SQ; /*!< Specifies the division factor for SAI clock. + This parameter must be a number between Min_Data = 2 and Max_Data = 15. + This parameter will be used only when PLLI2S is selected as Clock Source SAI */ + + uint32_t PLLI2SR; /*!< Specifies the division factor for I2S clock. + This parameter must be a number between Min_Data = 2 and Max_Data = 7. + This parameter will be used only when PLLI2S is selected as Clock Source I2S */ +} RCC_PLLI2SInitTypeDef; + +/** + * @brief RCC extended clocks structure definition + */ +typedef struct +{ + uint32_t PeriphClockSelection; /*!< The Extended Clock to be configured. + This parameter can be a value of @ref RCCEx_Periph_Clock_Selection */ + + RCC_PLLI2SInitTypeDef PLLI2S; /*!< PLL I2S structure parameters. + This parameter will be used only when PLLI2S is selected as Clock Source I2S */ + +#if defined(STM32F413xx) || defined(STM32F423xx) + uint32_t PLLDivR; /*!< Specifies the PLL division factor for SAI1 clock. + This parameter must be a number between Min_Data = 1 and Max_Data = 32 + This parameter will be used only when PLL is selected as Clock Source SAI */ + + uint32_t PLLI2SDivR; /*!< Specifies the PLLI2S division factor for SAI1 clock. + This parameter must be a number between Min_Data = 1 and Max_Data = 32 + This parameter will be used only when PLLI2S is selected as Clock Source SAI */ +#endif /* STM32F413xx || STM32F423xx */ + + uint32_t I2sApb1ClockSelection; /*!< Specifies I2S APB1 Clock Source Selection. + This parameter can be a value of @ref RCCEx_I2SAPB1_Clock_Source */ + + uint32_t I2sApb2ClockSelection; /*!< Specifies I2S APB2 Clock Source Selection. + This parameter can be a value of @ref RCCEx_I2SAPB2_Clock_Source */ + + uint32_t RTCClockSelection; /*!< Specifies RTC Clock Source Selection. + This parameter can be a value of @ref RCC_RTC_Clock_Source */ + + uint32_t SdioClockSelection; /*!< Specifies SDIO Clock Source Selection. + This parameter can be a value of @ref RCCEx_SDIO_Clock_Source */ + + uint32_t Fmpi2c1ClockSelection; /*!< Specifies FMPI2C1 Clock Source Selection. + This parameter can be a value of @ref RCCEx_FMPI2C1_Clock_Source */ + + uint32_t Clk48ClockSelection; /*!< Specifies CLK48 Clock Selection this clock used OTG FS, SDIO and RNG clocks. + This parameter can be a value of @ref RCCEx_CLK48_Clock_Source */ + + uint32_t Dfsdm1ClockSelection; /*!< Specifies DFSDM1 Clock Selection. + This parameter can be a value of @ref RCCEx_DFSDM1_Kernel_Clock_Source */ + + uint32_t Dfsdm1AudioClockSelection;/*!< Specifies DFSDM1 Audio Clock Selection. + This parameter can be a value of @ref RCCEx_DFSDM1_Audio_Clock_Source */ + +#if defined(STM32F413xx) || defined(STM32F423xx) + uint32_t Dfsdm2ClockSelection; /*!< Specifies DFSDM2 Clock Selection. + This parameter can be a value of @ref RCCEx_DFSDM2_Kernel_Clock_Source */ + + uint32_t Dfsdm2AudioClockSelection;/*!< Specifies DFSDM2 Audio Clock Selection. + This parameter can be a value of @ref RCCEx_DFSDM2_Audio_Clock_Source */ + + uint32_t Lptim1ClockSelection; /*!< Specifies LPTIM1 Clock Source Selection. + This parameter can be a value of @ref RCCEx_LPTIM1_Clock_Source */ + + uint32_t SaiAClockSelection; /*!< Specifies SAI1_A Clock Prescalers Selection + This parameter can be a value of @ref RCCEx_SAI1_BlockA_Clock_Source */ + + uint32_t SaiBClockSelection; /*!< Specifies SAI1_B Clock Prescalers Selection + This parameter can be a value of @ref RCCEx_SAI1_BlockB_Clock_Source */ +#endif /* STM32F413xx || STM32F423xx */ + + uint32_t PLLI2SSelection; /*!< Specifies PLL I2S Clock Source Selection. + This parameter can be a value of @ref RCCEx_PLL_I2S_Clock_Source */ + + uint8_t TIMPresSelection; /*!< Specifies TIM Clock Source Selection. + This parameter can be a value of @ref RCCEx_TIM_PRescaler_Selection */ +} RCC_PeriphCLKInitTypeDef; +#endif /* STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx || STM32F413xx || STM32F423xx */ + +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) || defined(STM32F469xx) || defined(STM32F479xx) + +/** + * @brief PLLI2S Clock structure definition + */ +typedef struct +{ + uint32_t PLLI2SN; /*!< Specifies the multiplication factor for PLLI2S VCO output clock. + This parameter must be a number between Min_Data = 50 and Max_Data = 432. + This parameter will be used only when PLLI2S is selected as Clock Source I2S or SAI */ + + uint32_t PLLI2SR; /*!< Specifies the division factor for I2S clock. + This parameter must be a number between Min_Data = 2 and Max_Data = 7. + This parameter will be used only when PLLI2S is selected as Clock Source I2S or SAI */ + + uint32_t PLLI2SQ; /*!< Specifies the division factor for SAI1 clock. + This parameter must be a number between Min_Data = 2 and Max_Data = 15. + This parameter will be used only when PLLI2S is selected as Clock Source SAI */ +} RCC_PLLI2SInitTypeDef; + +/** + * @brief PLLSAI Clock structure definition + */ +typedef struct +{ + uint32_t PLLSAIN; /*!< Specifies the multiplication factor for PLLI2S VCO output clock. + This parameter must be a number between Min_Data = 50 and Max_Data = 432. + This parameter will be used only when PLLSAI is selected as Clock Source SAI or LTDC */ +#if defined(STM32F469xx) || defined(STM32F479xx) + uint32_t PLLSAIP; /*!< Specifies division factor for OTG FS and SDIO clocks. + This parameter is only available in STM32F469xx/STM32F479xx devices. + This parameter must be a value of @ref RCCEx_PLLSAIP_Clock_Divider */ +#endif /* STM32F469xx || STM32F479xx */ + + uint32_t PLLSAIQ; /*!< Specifies the division factor for SAI1 clock. + This parameter must be a number between Min_Data = 2 and Max_Data = 15. + This parameter will be used only when PLLSAI is selected as Clock Source SAI or LTDC */ + + uint32_t PLLSAIR; /*!< specifies the division factor for LTDC clock + This parameter must be a number between Min_Data = 2 and Max_Data = 7. + This parameter will be used only when PLLSAI is selected as Clock Source LTDC */ + +} RCC_PLLSAIInitTypeDef; + +/** + * @brief RCC extended clocks structure definition + */ +typedef struct +{ + uint32_t PeriphClockSelection; /*!< The Extended Clock to be configured. + This parameter can be a value of @ref RCCEx_Periph_Clock_Selection */ + + RCC_PLLI2SInitTypeDef PLLI2S; /*!< PLL I2S structure parameters. + This parameter will be used only when PLLI2S is selected as Clock Source I2S or SAI */ + + RCC_PLLSAIInitTypeDef PLLSAI; /*!< PLL SAI structure parameters. + This parameter will be used only when PLLI2S is selected as Clock Source SAI or LTDC */ + + uint32_t PLLI2SDivQ; /*!< Specifies the PLLI2S division factor for SAI1 clock. + This parameter must be a number between Min_Data = 1 and Max_Data = 32 + This parameter will be used only when PLLI2S is selected as Clock Source SAI */ + + uint32_t PLLSAIDivQ; /*!< Specifies the PLLI2S division factor for SAI1 clock. + This parameter must be a number between Min_Data = 1 and Max_Data = 32 + This parameter will be used only when PLLSAI is selected as Clock Source SAI */ + + uint32_t PLLSAIDivR; /*!< Specifies the PLLSAI division factor for LTDC clock. + This parameter must be one value of @ref RCCEx_PLLSAI_DIVR */ + + uint32_t RTCClockSelection; /*!< Specifies RTC Clock Prescalers Selection. + This parameter can be a value of @ref RCC_RTC_Clock_Source */ + + uint8_t TIMPresSelection; /*!< Specifies TIM Clock Prescalers Selection. + This parameter can be a value of @ref RCCEx_TIM_PRescaler_Selection */ +#if defined(STM32F469xx) || defined(STM32F479xx) + uint32_t Clk48ClockSelection; /*!< Specifies CLK48 Clock Selection this clock used OTG FS, SDIO and RNG clocks. + This parameter can be a value of @ref RCCEx_CLK48_Clock_Source */ + + uint32_t SdioClockSelection; /*!< Specifies SDIO Clock Source Selection. + This parameter can be a value of @ref RCCEx_SDIO_Clock_Source */ +#endif /* STM32F469xx || STM32F479xx */ +} RCC_PeriphCLKInitTypeDef; + +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F469xx || STM32F479xx */ + +#if defined(STM32F405xx) || defined(STM32F415xx) || defined(STM32F407xx) || defined(STM32F417xx) ||\ + defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F411xE) +/** + * @brief PLLI2S Clock structure definition + */ +typedef struct +{ +#if defined(STM32F411xE) + uint32_t PLLI2SM; /*!< PLLM: Division factor for PLLI2S VCO input clock. + This parameter must be a number between Min_Data = 2 and Max_Data = 62 */ +#endif /* STM32F411xE */ + + uint32_t PLLI2SN; /*!< Specifies the multiplication factor for PLLI2S VCO output clock. + This parameter must be a number between Min_Data = 50 and Max_Data = 432 + Except for STM32F411xE devices where the Min_Data = 192. + This parameter will be used only when PLLI2S is selected as Clock Source I2S or SAI */ + + uint32_t PLLI2SR; /*!< Specifies the division factor for I2S clock. + This parameter must be a number between Min_Data = 2 and Max_Data = 7. + This parameter will be used only when PLLI2S is selected as Clock Source I2S or SAI */ + +} RCC_PLLI2SInitTypeDef; + +/** + * @brief RCC extended clocks structure definition + */ +typedef struct +{ + uint32_t PeriphClockSelection; /*!< The Extended Clock to be configured. + This parameter can be a value of @ref RCCEx_Periph_Clock_Selection */ + + RCC_PLLI2SInitTypeDef PLLI2S; /*!< PLL I2S structure parameters. + This parameter will be used only when PLLI2S is selected as Clock Source I2S or SAI */ + + uint32_t RTCClockSelection; /*!< Specifies RTC Clock Prescalers Selection. + This parameter can be a value of @ref RCC_RTC_Clock_Source */ +#if defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F411xE) + uint8_t TIMPresSelection; /*!< Specifies TIM Clock Source Selection. + This parameter can be a value of @ref RCCEx_TIM_PRescaler_Selection */ +#endif /* STM32F401xC || STM32F401xE || STM32F411xE */ +} RCC_PeriphCLKInitTypeDef; +#endif /* STM32F405xx || STM32F415xx || STM32F407xx || STM32F417xx || STM32F401xC || STM32F401xE || STM32F411xE */ +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup RCCEx_Exported_Constants RCCEx Exported Constants + * @{ + */ + +/** @defgroup RCCEx_Periph_Clock_Selection RCC Periph Clock Selection + * @{ + */ +/* Peripheral Clock source for STM32F412Zx/STM32F412Vx/STM32F412Rx/STM32F412Cx */ +#if defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) ||\ + defined(STM32F413xx) || defined(STM32F423xx) +#define RCC_PERIPHCLK_I2S_APB1 0x00000001U +#define RCC_PERIPHCLK_I2S_APB2 0x00000002U +#define RCC_PERIPHCLK_TIM 0x00000004U +#define RCC_PERIPHCLK_RTC 0x00000008U +#define RCC_PERIPHCLK_FMPI2C1 0x00000010U +#define RCC_PERIPHCLK_CLK48 0x00000020U +#define RCC_PERIPHCLK_SDIO 0x00000040U +#define RCC_PERIPHCLK_PLLI2S 0x00000080U +#define RCC_PERIPHCLK_DFSDM1 0x00000100U +#define RCC_PERIPHCLK_DFSDM1_AUDIO 0x00000200U +#endif /* STM32F412Zx || STM32F412Vx) || STM32F412Rx || STM32F412Cx */ +#if defined(STM32F413xx) || defined(STM32F423xx) +#define RCC_PERIPHCLK_DFSDM2 0x00000400U +#define RCC_PERIPHCLK_DFSDM2_AUDIO 0x00000800U +#define RCC_PERIPHCLK_LPTIM1 0x00001000U +#define RCC_PERIPHCLK_SAIA 0x00002000U +#define RCC_PERIPHCLK_SAIB 0x00004000U +#endif /* STM32F413xx || STM32F423xx */ +/*----------------------------------------------------------------------------*/ + +/*------------------- Peripheral Clock source for STM32F410xx ----------------*/ +#if defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) +#define RCC_PERIPHCLK_I2S 0x00000001U +#define RCC_PERIPHCLK_TIM 0x00000002U +#define RCC_PERIPHCLK_RTC 0x00000004U +#define RCC_PERIPHCLK_FMPI2C1 0x00000008U +#define RCC_PERIPHCLK_LPTIM1 0x00000010U +#endif /* STM32F410Tx || STM32F410Cx || STM32F410Rx */ +/*----------------------------------------------------------------------------*/ + +/*------------------- Peripheral Clock source for STM32F446xx ----------------*/ +#if defined(STM32F446xx) +#define RCC_PERIPHCLK_I2S_APB1 0x00000001U +#define RCC_PERIPHCLK_I2S_APB2 0x00000002U +#define RCC_PERIPHCLK_SAI1 0x00000004U +#define RCC_PERIPHCLK_SAI2 0x00000008U +#define RCC_PERIPHCLK_TIM 0x00000010U +#define RCC_PERIPHCLK_RTC 0x00000020U +#define RCC_PERIPHCLK_CEC 0x00000040U +#define RCC_PERIPHCLK_FMPI2C1 0x00000080U +#define RCC_PERIPHCLK_CLK48 0x00000100U +#define RCC_PERIPHCLK_SDIO 0x00000200U +#define RCC_PERIPHCLK_SPDIFRX 0x00000400U +#define RCC_PERIPHCLK_PLLI2S 0x00000800U +#endif /* STM32F446xx */ +/*-----------------------------------------------------------------------------*/ + +/*----------- Peripheral Clock source for STM32F469xx/STM32F479xx -------------*/ +#if defined(STM32F469xx) || defined(STM32F479xx) +#define RCC_PERIPHCLK_I2S 0x00000001U +#define RCC_PERIPHCLK_SAI_PLLI2S 0x00000002U +#define RCC_PERIPHCLK_SAI_PLLSAI 0x00000004U +#define RCC_PERIPHCLK_LTDC 0x00000008U +#define RCC_PERIPHCLK_TIM 0x00000010U +#define RCC_PERIPHCLK_RTC 0x00000020U +#define RCC_PERIPHCLK_PLLI2S 0x00000040U +#define RCC_PERIPHCLK_CLK48 0x00000080U +#define RCC_PERIPHCLK_SDIO 0x00000100U +#endif /* STM32F469xx || STM32F479xx */ +/*----------------------------------------------------------------------------*/ + +/*-------- Peripheral Clock source for STM32F42xxx/STM32F43xxx ---------------*/ +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) +#define RCC_PERIPHCLK_I2S 0x00000001U +#define RCC_PERIPHCLK_SAI_PLLI2S 0x00000002U +#define RCC_PERIPHCLK_SAI_PLLSAI 0x00000004U +#define RCC_PERIPHCLK_LTDC 0x00000008U +#define RCC_PERIPHCLK_TIM 0x00000010U +#define RCC_PERIPHCLK_RTC 0x00000020U +#define RCC_PERIPHCLK_PLLI2S 0x00000040U +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx */ +/*----------------------------------------------------------------------------*/ + +/*-------- Peripheral Clock source for STM32F40xxx/STM32F41xxx ---------------*/ +#if defined(STM32F405xx) || defined(STM32F415xx) || defined(STM32F407xx)|| defined(STM32F417xx) ||\ + defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F411xE) +#define RCC_PERIPHCLK_I2S 0x00000001U +#define RCC_PERIPHCLK_RTC 0x00000002U +#define RCC_PERIPHCLK_PLLI2S 0x00000004U +#endif /* STM32F405xx || STM32F415xx || STM32F407xx || STM32F417xx || STM32F401xC || STM32F401xE || STM32F411xE */ +#if defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F411xE) +#define RCC_PERIPHCLK_TIM 0x00000008U +#endif /* STM32F401xC || STM32F401xE || STM32F411xE */ +/*----------------------------------------------------------------------------*/ +/** + * @} + */ +#if defined(STM32F405xx) || defined(STM32F415xx) || defined(STM32F407xx) || defined(STM32F417xx) || \ + defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) || \ + defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F411xE) || defined(STM32F469xx) || \ + defined(STM32F479xx) +/** @defgroup RCCEx_I2S_Clock_Source I2S Clock Source + * @{ + */ +#define RCC_I2SCLKSOURCE_PLLI2S 0x00000000U +#define RCC_I2SCLKSOURCE_EXT RCC_CFGR_I2SSRC +/** + * @} + */ +#endif /* STM32F405xx || STM32F415xx || STM32F407xx || STM32F417xx || STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || + STM32F401xC || STM32F401xE || STM32F411xE || STM32F469xx || STM32F479xx */ + +/** @defgroup RCCEx_PLLSAI_DIVR RCC PLLSAI DIVR + * @{ + */ +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) || defined(STM32F446xx) ||\ + defined(STM32F469xx) || defined(STM32F479xx) +#define RCC_PLLSAIDIVR_2 0x00000000U +#define RCC_PLLSAIDIVR_4 0x00010000U +#define RCC_PLLSAIDIVR_8 0x00020000U +#define RCC_PLLSAIDIVR_16 0x00030000U +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F446xx || STM32F469xx || STM32F479xx */ +/** + * @} + */ + +/** @defgroup RCCEx_PLLI2SP_Clock_Divider RCC PLLI2SP Clock Divider + * @{ + */ +#if defined(STM32F446xx) || defined(STM32F412Zx) || defined(STM32F412Vx) || \ + defined(STM32F412Rx) || defined(STM32F412Cx) +#define RCC_PLLI2SP_DIV2 0x00000002U +#define RCC_PLLI2SP_DIV4 0x00000004U +#define RCC_PLLI2SP_DIV6 0x00000006U +#define RCC_PLLI2SP_DIV8 0x00000008U +#endif /* STM32F446xx || STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx */ +/** + * @} + */ + +/** @defgroup RCCEx_PLLSAIP_Clock_Divider RCC PLLSAIP Clock Divider + * @{ + */ +#if defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) +#define RCC_PLLSAIP_DIV2 0x00000002U +#define RCC_PLLSAIP_DIV4 0x00000004U +#define RCC_PLLSAIP_DIV6 0x00000006U +#define RCC_PLLSAIP_DIV8 0x00000008U +#endif /* STM32F446xx || STM32F469xx || STM32F479xx */ +/** + * @} + */ + +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) || defined(STM32F469xx) || defined(STM32F479xx) +/** @defgroup RCCEx_SAI_BlockA_Clock_Source RCC SAI BlockA Clock Source + * @{ + */ +#define RCC_SAIACLKSOURCE_PLLSAI 0x00000000U +#define RCC_SAIACLKSOURCE_PLLI2S 0x00100000U +#define RCC_SAIACLKSOURCE_EXT 0x00200000U +/** + * @} + */ + +/** @defgroup RCCEx_SAI_BlockB_Clock_Source RCC SAI BlockB Clock Source + * @{ + */ +#define RCC_SAIBCLKSOURCE_PLLSAI 0x00000000U +#define RCC_SAIBCLKSOURCE_PLLI2S 0x00400000U +#define RCC_SAIBCLKSOURCE_EXT 0x00800000U +/** + * @} + */ +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F469xx || STM32F479xx */ + +#if defined(STM32F469xx) || defined(STM32F479xx) +/** @defgroup RCCEx_CLK48_Clock_Source RCC CLK48 Clock Source + * @{ + */ +#define RCC_CLK48CLKSOURCE_PLLQ 0x00000000U +#define RCC_CLK48CLKSOURCE_PLLSAIP ((uint32_t)RCC_DCKCFGR_CK48MSEL) +/** + * @} + */ + +/** @defgroup RCCEx_SDIO_Clock_Source RCC SDIO Clock Source + * @{ + */ +#define RCC_SDIOCLKSOURCE_CLK48 0x00000000U +#define RCC_SDIOCLKSOURCE_SYSCLK ((uint32_t)RCC_DCKCFGR_SDIOSEL) +/** + * @} + */ + +/** @defgroup RCCEx_DSI_Clock_Source RCC DSI Clock Source + * @{ + */ +#define RCC_DSICLKSOURCE_DSIPHY 0x00000000U +#define RCC_DSICLKSOURCE_PLLR ((uint32_t)RCC_DCKCFGR_DSISEL) +/** + * @} + */ +#endif /* STM32F469xx || STM32F479xx */ + +#if defined(STM32F446xx) +/** @defgroup RCCEx_SAI1_Clock_Source RCC SAI1 Clock Source + * @{ + */ +#define RCC_SAI1CLKSOURCE_PLLSAI 0x00000000U +#define RCC_SAI1CLKSOURCE_PLLI2S ((uint32_t)RCC_DCKCFGR_SAI1SRC_0) +#define RCC_SAI1CLKSOURCE_PLLR ((uint32_t)RCC_DCKCFGR_SAI1SRC_1) +#define RCC_SAI1CLKSOURCE_EXT ((uint32_t)RCC_DCKCFGR_SAI1SRC) +/** + * @} + */ + +/** @defgroup RCCEx_SAI2_Clock_Source RCC SAI2 Clock Source + * @{ + */ +#define RCC_SAI2CLKSOURCE_PLLSAI 0x00000000U +#define RCC_SAI2CLKSOURCE_PLLI2S ((uint32_t)RCC_DCKCFGR_SAI2SRC_0) +#define RCC_SAI2CLKSOURCE_PLLR ((uint32_t)RCC_DCKCFGR_SAI2SRC_1) +#define RCC_SAI2CLKSOURCE_PLLSRC ((uint32_t)RCC_DCKCFGR_SAI2SRC) +/** + * @} + */ + +/** @defgroup RCCEx_I2SAPB1_Clock_Source RCC I2S APB1 Clock Source + * @{ + */ +#define RCC_I2SAPB1CLKSOURCE_PLLI2S 0x00000000U +#define RCC_I2SAPB1CLKSOURCE_EXT ((uint32_t)RCC_DCKCFGR_I2S1SRC_0) +#define RCC_I2SAPB1CLKSOURCE_PLLR ((uint32_t)RCC_DCKCFGR_I2S1SRC_1) +#define RCC_I2SAPB1CLKSOURCE_PLLSRC ((uint32_t)RCC_DCKCFGR_I2S1SRC) +/** + * @} + */ + +/** @defgroup RCCEx_I2SAPB2_Clock_Source RCC I2S APB2 Clock Source + * @{ + */ +#define RCC_I2SAPB2CLKSOURCE_PLLI2S 0x00000000U +#define RCC_I2SAPB2CLKSOURCE_EXT ((uint32_t)RCC_DCKCFGR_I2S2SRC_0) +#define RCC_I2SAPB2CLKSOURCE_PLLR ((uint32_t)RCC_DCKCFGR_I2S2SRC_1) +#define RCC_I2SAPB2CLKSOURCE_PLLSRC ((uint32_t)RCC_DCKCFGR_I2S2SRC) +/** + * @} + */ + +/** @defgroup RCCEx_FMPI2C1_Clock_Source RCC FMPI2C1 Clock Source + * @{ + */ +#define RCC_FMPI2C1CLKSOURCE_PCLK1 0x00000000U +#define RCC_FMPI2C1CLKSOURCE_SYSCLK ((uint32_t)RCC_DCKCFGR2_FMPI2C1SEL_0) +#define RCC_FMPI2C1CLKSOURCE_HSI ((uint32_t)RCC_DCKCFGR2_FMPI2C1SEL_1) +/** + * @} + */ + +/** @defgroup RCCEx_CEC_Clock_Source RCC CEC Clock Source + * @{ + */ +#define RCC_CECCLKSOURCE_HSI 0x00000000U +#define RCC_CECCLKSOURCE_LSE ((uint32_t)RCC_DCKCFGR2_CECSEL) +/** + * @} + */ + +/** @defgroup RCCEx_CLK48_Clock_Source RCC CLK48 Clock Source + * @{ + */ +#define RCC_CLK48CLKSOURCE_PLLQ 0x00000000U +#define RCC_CLK48CLKSOURCE_PLLSAIP ((uint32_t)RCC_DCKCFGR2_CK48MSEL) +/** + * @} + */ + +/** @defgroup RCCEx_SDIO_Clock_Source RCC SDIO Clock Source + * @{ + */ +#define RCC_SDIOCLKSOURCE_CLK48 0x00000000U +#define RCC_SDIOCLKSOURCE_SYSCLK ((uint32_t)RCC_DCKCFGR2_SDIOSEL) +/** + * @} + */ + +/** @defgroup RCCEx_SPDIFRX_Clock_Source RCC SPDIFRX Clock Source + * @{ + */ +#define RCC_SPDIFRXCLKSOURCE_PLLR 0x00000000U +#define RCC_SPDIFRXCLKSOURCE_PLLI2SP ((uint32_t)RCC_DCKCFGR2_SPDIFRXSEL) +/** + * @} + */ + +#endif /* STM32F446xx */ + +#if defined(STM32F413xx) || defined(STM32F423xx) +/** @defgroup RCCEx_SAI1_BlockA_Clock_Source RCC SAI BlockA Clock Source + * @{ + */ +#define RCC_SAIACLKSOURCE_PLLI2SR 0x00000000U +#define RCC_SAIACLKSOURCE_EXT ((uint32_t)RCC_DCKCFGR_SAI1ASRC_0) +#define RCC_SAIACLKSOURCE_PLLR ((uint32_t)RCC_DCKCFGR_SAI1ASRC_1) +#define RCC_SAIACLKSOURCE_PLLSRC ((uint32_t)RCC_DCKCFGR_SAI1ASRC_0 | RCC_DCKCFGR_SAI1ASRC_1) +/** + * @} + */ + +/** @defgroup RCCEx_SAI1_BlockB_Clock_Source RCC SAI BlockB Clock Source + * @{ + */ +#define RCC_SAIBCLKSOURCE_PLLI2SR 0x00000000U +#define RCC_SAIBCLKSOURCE_EXT ((uint32_t)RCC_DCKCFGR_SAI1BSRC_0) +#define RCC_SAIBCLKSOURCE_PLLR ((uint32_t)RCC_DCKCFGR_SAI1BSRC_1) +#define RCC_SAIBCLKSOURCE_PLLSRC ((uint32_t)RCC_DCKCFGR_SAI1BSRC_0 | RCC_DCKCFGR_SAI1BSRC_1) +/** + * @} + */ + +/** @defgroup RCCEx_LPTIM1_Clock_Source RCC LPTIM1 Clock Source + * @{ + */ +#define RCC_LPTIM1CLKSOURCE_PCLK1 0x00000000U +#define RCC_LPTIM1CLKSOURCE_HSI ((uint32_t)RCC_DCKCFGR2_LPTIM1SEL_0) +#define RCC_LPTIM1CLKSOURCE_LSI ((uint32_t)RCC_DCKCFGR2_LPTIM1SEL_1) +#define RCC_LPTIM1CLKSOURCE_LSE ((uint32_t)RCC_DCKCFGR2_LPTIM1SEL_0 | RCC_DCKCFGR2_LPTIM1SEL_1) +/** + * @} + */ + + +/** @defgroup RCCEx_DFSDM2_Audio_Clock_Source RCC DFSDM2 Audio Clock Source + * @{ + */ +#define RCC_DFSDM2AUDIOCLKSOURCE_I2S1 0x00000000U +#define RCC_DFSDM2AUDIOCLKSOURCE_I2S2 ((uint32_t)RCC_DCKCFGR_CKDFSDM2ASEL) +/** + * @} + */ + +/** @defgroup RCCEx_DFSDM2_Kernel_Clock_Source RCC DFSDM2 Kernel Clock Source + * @{ + */ +#define RCC_DFSDM2CLKSOURCE_PCLK2 0x00000000U +#define RCC_DFSDM2CLKSOURCE_SYSCLK ((uint32_t)RCC_DCKCFGR_CKDFSDM1SEL) +/** + * @} + */ + +#endif /* STM32F413xx || STM32F423xx */ + +#if defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) +/** @defgroup RCCEx_PLL_I2S_Clock_Source PLL I2S Clock Source + * @{ + */ +#define RCC_PLLI2SCLKSOURCE_PLLSRC 0x00000000U +#define RCC_PLLI2SCLKSOURCE_EXT ((uint32_t)RCC_PLLI2SCFGR_PLLI2SSRC) +/** + * @} + */ + +/** @defgroup RCCEx_DFSDM1_Audio_Clock_Source RCC DFSDM1 Audio Clock Source + * @{ + */ +#define RCC_DFSDM1AUDIOCLKSOURCE_I2S1 0x00000000U +#define RCC_DFSDM1AUDIOCLKSOURCE_I2S2 ((uint32_t)RCC_DCKCFGR_CKDFSDM1ASEL) +/** + * @} + */ + +/** @defgroup RCCEx_DFSDM1_Kernel_Clock_Source RCC DFSDM1 Kernel Clock Source + * @{ + */ +#define RCC_DFSDM1CLKSOURCE_PCLK2 0x00000000U +#define RCC_DFSDM1CLKSOURCE_SYSCLK ((uint32_t)RCC_DCKCFGR_CKDFSDM1SEL) +/** + * @} + */ + +/** @defgroup RCCEx_I2SAPB1_Clock_Source RCC I2S APB1 Clock Source + * @{ + */ +#define RCC_I2SAPB1CLKSOURCE_PLLI2S 0x00000000U +#define RCC_I2SAPB1CLKSOURCE_EXT ((uint32_t)RCC_DCKCFGR_I2S1SRC_0) +#define RCC_I2SAPB1CLKSOURCE_PLLR ((uint32_t)RCC_DCKCFGR_I2S1SRC_1) +#define RCC_I2SAPB1CLKSOURCE_PLLSRC ((uint32_t)RCC_DCKCFGR_I2S1SRC) +/** + * @} + */ + +/** @defgroup RCCEx_I2SAPB2_Clock_Source RCC I2S APB2 Clock Source + * @{ + */ +#define RCC_I2SAPB2CLKSOURCE_PLLI2S 0x00000000U +#define RCC_I2SAPB2CLKSOURCE_EXT ((uint32_t)RCC_DCKCFGR_I2S2SRC_0) +#define RCC_I2SAPB2CLKSOURCE_PLLR ((uint32_t)RCC_DCKCFGR_I2S2SRC_1) +#define RCC_I2SAPB2CLKSOURCE_PLLSRC ((uint32_t)RCC_DCKCFGR_I2S2SRC) +/** + * @} + */ + +/** @defgroup RCCEx_FMPI2C1_Clock_Source RCC FMPI2C1 Clock Source + * @{ + */ +#define RCC_FMPI2C1CLKSOURCE_PCLK1 0x00000000U +#define RCC_FMPI2C1CLKSOURCE_SYSCLK ((uint32_t)RCC_DCKCFGR2_FMPI2C1SEL_0) +#define RCC_FMPI2C1CLKSOURCE_HSI ((uint32_t)RCC_DCKCFGR2_FMPI2C1SEL_1) +/** + * @} + */ + +/** @defgroup RCCEx_CLK48_Clock_Source RCC CLK48 Clock Source + * @{ + */ +#define RCC_CLK48CLKSOURCE_PLLQ 0x00000000U +#define RCC_CLK48CLKSOURCE_PLLI2SQ ((uint32_t)RCC_DCKCFGR2_CK48MSEL) +/** + * @} + */ + +/** @defgroup RCCEx_SDIO_Clock_Source RCC SDIO Clock Source + * @{ + */ +#define RCC_SDIOCLKSOURCE_CLK48 0x00000000U +#define RCC_SDIOCLKSOURCE_SYSCLK ((uint32_t)RCC_DCKCFGR2_SDIOSEL) +/** + * @} + */ +#endif /* STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx || STM32F413xx || STM32F423xx */ + +#if defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) + +/** @defgroup RCCEx_I2S_APB_Clock_Source RCC I2S APB Clock Source + * @{ + */ +#define RCC_I2SAPBCLKSOURCE_PLLR 0x00000000U +#define RCC_I2SAPBCLKSOURCE_EXT ((uint32_t)RCC_DCKCFGR_I2SSRC_0) +#define RCC_I2SAPBCLKSOURCE_PLLSRC ((uint32_t)RCC_DCKCFGR_I2SSRC_1) +/** + * @} + */ + +/** @defgroup RCCEx_FMPI2C1_Clock_Source RCC FMPI2C1 Clock Source + * @{ + */ +#define RCC_FMPI2C1CLKSOURCE_PCLK1 0x00000000U +#define RCC_FMPI2C1CLKSOURCE_SYSCLK ((uint32_t)RCC_DCKCFGR2_FMPI2C1SEL_0) +#define RCC_FMPI2C1CLKSOURCE_HSI ((uint32_t)RCC_DCKCFGR2_FMPI2C1SEL_1) +/** + * @} + */ + +/** @defgroup RCCEx_LPTIM1_Clock_Source RCC LPTIM1 Clock Source + * @{ + */ +#define RCC_LPTIM1CLKSOURCE_PCLK1 0x00000000U +#define RCC_LPTIM1CLKSOURCE_HSI ((uint32_t)RCC_DCKCFGR2_LPTIM1SEL_0) +#define RCC_LPTIM1CLKSOURCE_LSI ((uint32_t)RCC_DCKCFGR2_LPTIM1SEL_1) +#define RCC_LPTIM1CLKSOURCE_LSE ((uint32_t)RCC_DCKCFGR2_LPTIM1SEL_0 | RCC_DCKCFGR2_LPTIM1SEL_1) +/** + * @} + */ +#endif /* STM32F410Tx || STM32F410Cx || STM32F410Rx */ + +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) ||\ + defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F410Tx) || defined(STM32F410Cx) ||\ + defined(STM32F410Rx) || defined(STM32F411xE) || defined(STM32F446xx) || defined(STM32F469xx) ||\ + defined(STM32F479xx) || defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) ||\ + defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) +/** @defgroup RCCEx_TIM_PRescaler_Selection RCC TIM PRescaler Selection + * @{ + */ +#define RCC_TIMPRES_DESACTIVATED ((uint8_t)0x00) +#define RCC_TIMPRES_ACTIVATED ((uint8_t)0x01) +/** + * @} + */ +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F401xC || STM32F401xE ||\ + STM32F410xx || STM32F411xE || STM32F446xx || STM32F469xx || STM32F479xx || STM32F412Zx ||\ + STM32F412Vx || STM32F412Rx || STM32F412Cx || STM32F413xx || STM32F423xx */ + +#if defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) || defined(STM32F411xE) ||\ + defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) || defined(STM32F412Zx) ||\ + defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) ||\ + defined(STM32F423xx) +/** @defgroup RCCEx_LSE_Dual_Mode_Selection RCC LSE Dual Mode Selection + * @{ + */ +#define RCC_LSE_LOWPOWER_MODE ((uint8_t)0x00) +#define RCC_LSE_HIGHDRIVE_MODE ((uint8_t)0x01) +/** + * @} + */ +#endif /* STM32F410xx || STM32F411xE || STM32F446xx || STM32F469xx || STM32F479xx || STM32F412Zx || STM32F412Vx ||\ + STM32F412Rx || STM32F412Cx */ + +#if defined(STM32F405xx) || defined(STM32F415xx) || defined(STM32F407xx) || defined(STM32F417xx) || \ + defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) || \ + defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F411xE) || defined(STM32F446xx) || \ + defined(STM32F469xx) || defined(STM32F479xx) || defined(STM32F412Zx) || defined(STM32F412Vx) || \ + defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) +/** @defgroup RCC_MCO2_Clock_Source MCO2 Clock Source + * @{ + */ +#define RCC_MCO2SOURCE_SYSCLK 0x00000000U +#define RCC_MCO2SOURCE_PLLI2SCLK RCC_CFGR_MCO2_0 +#define RCC_MCO2SOURCE_HSE RCC_CFGR_MCO2_1 +#define RCC_MCO2SOURCE_PLLCLK RCC_CFGR_MCO2 +/** + * @} + */ +#endif /* STM32F405xx || STM32F415xx || STM32F407xx || STM32F417xx || STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || + STM32F401xC || STM32F401xE || STM32F411xE || STM32F446xx || STM32F469xx || STM32F479xx || STM32F412Zx || STM32F412Vx || + STM32F412Rx || STM32F413xx | STM32F423xx */ + +#if defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) +/** @defgroup RCC_MCO2_Clock_Source MCO2 Clock Source + * @{ + */ +#define RCC_MCO2SOURCE_SYSCLK 0x00000000U +#define RCC_MCO2SOURCE_I2SCLK RCC_CFGR_MCO2_0 +#define RCC_MCO2SOURCE_HSE RCC_CFGR_MCO2_1 +#define RCC_MCO2SOURCE_PLLCLK RCC_CFGR_MCO2 +/** + * @} + */ +#endif /* STM32F410Tx || STM32F410Cx || STM32F410Rx */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup RCCEx_Exported_Macros RCCEx Exported Macros + * @{ + */ +/*------------------- STM32F42xxx/STM32F43xxx/STM32F469xx/STM32F479xx --------*/ +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx)|| defined(STM32F439xx) || defined(STM32F469xx) || defined(STM32F479xx) +/** @defgroup RCCEx_AHB1_Clock_Enable_Disable AHB1 Peripheral Clock Enable Disable + * @brief Enables or disables the AHB1 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_BKPSRAM_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_BKPSRAMEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_BKPSRAMEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_CCMDATARAMEN_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CCMDATARAMEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CCMDATARAMEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_CRC_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_GPIOD_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIODEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIODEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_GPIOE_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOEEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOEEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_GPIOI_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOIEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOIEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_GPIOF_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOFEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOFEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_GPIOG_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOGEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOGEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_GPIOJ_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOJEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOJEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_GPIOK_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOKEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOKEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_DMA2D_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_DMA2DEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_DMA2DEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_ETHMAC_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_ETHMACTX_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACTXEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACTXEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_ETHMACRX_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACRXEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACRXEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_ETHMACPTP_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACPTPEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACPTPEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_USB_OTG_HS_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_OTGHSEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_OTGHSEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_USB_OTG_HS_ULPI_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_OTGHSULPIEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_OTGHSULPIEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_GPIOD_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIODEN)) +#define __HAL_RCC_GPIOE_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOEEN)) +#define __HAL_RCC_GPIOF_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOFEN)) +#define __HAL_RCC_GPIOG_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOGEN)) +#define __HAL_RCC_GPIOI_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOIEN)) +#define __HAL_RCC_GPIOJ_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOJEN)) +#define __HAL_RCC_GPIOK_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOKEN)) +#define __HAL_RCC_DMA2D_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_DMA2DEN)) +#define __HAL_RCC_ETHMAC_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_ETHMACEN)) +#define __HAL_RCC_ETHMACTX_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_ETHMACTXEN)) +#define __HAL_RCC_ETHMACRX_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_ETHMACRXEN)) +#define __HAL_RCC_ETHMACPTP_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_ETHMACPTPEN)) +#define __HAL_RCC_USB_OTG_HS_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_OTGHSEN)) +#define __HAL_RCC_USB_OTG_HS_ULPI_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_OTGHSULPIEN)) +#define __HAL_RCC_BKPSRAM_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_BKPSRAMEN)) +#define __HAL_RCC_CCMDATARAMEN_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_CCMDATARAMEN)) +#define __HAL_RCC_CRC_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_CRCEN)) + +/** + * @brief Enable ETHERNET clock. + */ +#define __HAL_RCC_ETH_CLK_ENABLE() do { \ + __HAL_RCC_ETHMAC_CLK_ENABLE(); \ + __HAL_RCC_ETHMACTX_CLK_ENABLE(); \ + __HAL_RCC_ETHMACRX_CLK_ENABLE(); \ + } while(0U) +/** + * @brief Disable ETHERNET clock. + */ +#define __HAL_RCC_ETH_CLK_DISABLE() do { \ + __HAL_RCC_ETHMACTX_CLK_DISABLE(); \ + __HAL_RCC_ETHMACRX_CLK_DISABLE(); \ + __HAL_RCC_ETHMAC_CLK_DISABLE(); \ + } while(0U) +/** + * @} + */ + +/** @defgroup RCCEx_AHB1_Peripheral_Clock_Enable_Disable_Status AHB1 Peripheral Clock Enable Disable Status + * @brief Get the enable or disable status of the AHB1 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_GPIOD_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIODEN)) != RESET) +#define __HAL_RCC_GPIOE_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOEEN)) != RESET) +#define __HAL_RCC_GPIOF_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOFEN)) != RESET) +#define __HAL_RCC_GPIOG_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOGEN)) != RESET) +#define __HAL_RCC_GPIOI_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOIEN)) != RESET) +#define __HAL_RCC_GPIOJ_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOJEN)) != RESET) +#define __HAL_RCC_GPIOK_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOKEN)) != RESET) +#define __HAL_RCC_DMA2D_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_DMA2DEN)) != RESET) +#define __HAL_RCC_ETHMAC_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_ETHMACEN)) != RESET) +#define __HAL_RCC_ETHMACTX_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_ETHMACTXEN)) != RESET) +#define __HAL_RCC_ETHMACRX_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_ETHMACRXEN)) != RESET) +#define __HAL_RCC_ETHMACPTP_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_ETHMACPTPEN)) != RESET) +#define __HAL_RCC_USB_OTG_HS_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_OTGHSEN)) != RESET) +#define __HAL_RCC_USB_OTG_HS_ULPI_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_OTGHSULPIEN)) != RESET) +#define __HAL_RCC_BKPSRAM_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_BKPSRAMEN)) != RESET) +#define __HAL_RCC_CCMDATARAMEN_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CCMDATARAMEN)) != RESET) +#define __HAL_RCC_CRC_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CRCEN)) != RESET) +#define __HAL_RCC_ETH_IS_CLK_ENABLED() (__HAL_RCC_ETHMAC_IS_CLK_ENABLED() && \ + __HAL_RCC_ETHMACTX_IS_CLK_ENABLED() && \ + __HAL_RCC_ETHMACRX_IS_CLK_ENABLED()) + +#define __HAL_RCC_GPIOD_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIODEN)) == RESET) +#define __HAL_RCC_GPIOE_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOEEN)) == RESET) +#define __HAL_RCC_GPIOF_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOFEN)) == RESET) +#define __HAL_RCC_GPIOG_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOGEN)) == RESET) +#define __HAL_RCC_GPIOI_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOIEN)) == RESET) +#define __HAL_RCC_GPIOJ_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOJEN)) == RESET) +#define __HAL_RCC_GPIOK_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOKEN)) == RESET) +#define __HAL_RCC_DMA2D_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_DMA2DEN)) == RESET) +#define __HAL_RCC_ETHMAC_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_ETHMACEN)) == RESET) +#define __HAL_RCC_ETHMACTX_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_ETHMACTXEN)) == RESET) +#define __HAL_RCC_ETHMACRX_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_ETHMACRXEN)) == RESET) +#define __HAL_RCC_ETHMACPTP_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_ETHMACPTPEN)) == RESET) +#define __HAL_RCC_USB_OTG_HS_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_OTGHSEN)) == RESET) +#define __HAL_RCC_USB_OTG_HS_ULPI_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_OTGHSULPIEN)) == RESET) +#define __HAL_RCC_BKPSRAM_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_BKPSRAMEN)) == RESET) +#define __HAL_RCC_CCMDATARAMEN_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CCMDATARAMEN)) == RESET) +#define __HAL_RCC_CRC_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CRCEN)) == RESET) +#define __HAL_RCC_ETH_IS_CLK_DISABLED() (__HAL_RCC_ETHMAC_IS_CLK_DISABLED() && \ + __HAL_RCC_ETHMACTX_IS_CLK_DISABLED() && \ + __HAL_RCC_ETHMACRX_IS_CLK_DISABLED()) +/** + * @} + */ + +/** @defgroup RCCEx_AHB2_Clock_Enable_Disable AHB2 Peripheral Clock Enable Disable + * @brief Enable or disable the AHB2 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_DCMI_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_DCMIEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_DCMIEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_DCMI_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_DCMIEN)) + +#if defined(STM32F437xx)|| defined(STM32F439xx) || defined(STM32F479xx) +#define __HAL_RCC_CRYP_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_CRYPEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_CRYPEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_HASH_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_HASHEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_HASHEN);\ + UNUSED(tmpreg); \ + } while(0U) + +#define __HAL_RCC_CRYP_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_CRYPEN)) +#define __HAL_RCC_HASH_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_HASHEN)) +#endif /* STM32F437xx || STM32F439xx || STM32F479xx */ + +#define __HAL_RCC_USB_OTG_FS_CLK_ENABLE() do {(RCC->AHB2ENR |= (RCC_AHB2ENR_OTGFSEN));\ + __HAL_RCC_SYSCFG_CLK_ENABLE();\ + }while(0U) + +#define __HAL_RCC_USB_OTG_FS_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_OTGFSEN)) + +#define __HAL_RCC_RNG_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_RNGEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_RNGEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_RNG_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_RNGEN)) +/** + * @} + */ + +/** @defgroup RCCEx_AHB2_Peripheral_Clock_Enable_Disable_Status AHB2 Peripheral Clock Enable Disable Status + * @brief Get the enable or disable status of the AHB1 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_DCMI_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_DCMIEN)) != RESET) +#define __HAL_RCC_DCMI_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_DCMIEN)) == RESET) + +#if defined(STM32F437xx)|| defined(STM32F439xx) || defined(STM32F479xx) +#define __HAL_RCC_CRYP_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_CRYPEN)) != RESET) +#define __HAL_RCC_CRYP_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_CRYPEN)) == RESET) + +#define __HAL_RCC_HASH_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_HASHEN)) != RESET) +#define __HAL_RCC_HASH_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_HASHEN)) == RESET) +#endif /* STM32F437xx || STM32F439xx || STM32F479xx */ + +#define __HAL_RCC_USB_OTG_FS_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_OTGFSEN)) != RESET) +#define __HAL_RCC_USB_OTG_FS_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_OTGFSEN)) == RESET) + +#define __HAL_RCC_RNG_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_RNGEN)) != RESET) +#define __HAL_RCC_RNG_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_RNGEN)) == RESET) +/** + * @} + */ + +/** @defgroup RCCEx_AHB3_Clock_Enable_Disable AHB3 Peripheral Clock Enable Disable + * @brief Enables or disables the AHB3 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_FMC_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB3ENR, RCC_AHB3ENR_FMCEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB3ENR, RCC_AHB3ENR_FMCEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_FMC_CLK_DISABLE() (RCC->AHB3ENR &= ~(RCC_AHB3ENR_FMCEN)) +#if defined(STM32F469xx) || defined(STM32F479xx) +#define __HAL_RCC_QSPI_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB3ENR, RCC_AHB3ENR_QSPIEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB3ENR, RCC_AHB3ENR_QSPIEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_QSPI_CLK_DISABLE() (RCC->AHB3ENR &= ~(RCC_AHB3ENR_QSPIEN)) +#endif /* STM32F469xx || STM32F479xx */ +/** + * @} + */ + + +/** @defgroup RCCEx_AHB3_Peripheral_Clock_Enable_Disable_Status AHB3 Peripheral Clock Enable Disable Status + * @brief Get the enable or disable status of the AHB3 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_FMC_IS_CLK_ENABLED() ((RCC->AHB3ENR & (RCC_AHB3ENR_FMCEN)) != RESET) +#define __HAL_RCC_FMC_IS_CLK_DISABLED() ((RCC->AHB3ENR & (RCC_AHB3ENR_FMCEN)) == RESET) +#if defined(STM32F469xx) || defined(STM32F479xx) +#define __HAL_RCC_QSPI_IS_CLK_ENABLED() ((RCC->AHB3ENR & (RCC_AHB3ENR_QSPIEN)) != RESET) +#define __HAL_RCC_QSPI_IS_CLK_DISABLED() ((RCC->AHB3ENR & (RCC_AHB3ENR_QSPIEN)) == RESET) +#endif /* STM32F469xx || STM32F479xx */ +/** + * @} + */ + +/** @defgroup RCCEx_APB1_Clock_Enable_Disable APB1 Peripheral Clock Enable Disable + * @brief Enable or disable the Low Speed APB (APB1) peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_TIM6_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM6EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM6EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_TIM7_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM7EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM7EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_TIM12_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM12EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM12EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_TIM13_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM13EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM13EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_TIM14_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM14EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM14EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_TIM14_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM14EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM14EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_USART3_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_USART3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_USART3EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_UART4_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_UART4EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_UART4EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_UART5_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_UART5EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_UART5EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_CAN1_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN1EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_CAN2_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN2EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_DAC_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_DACEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_DACEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_UART7_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_UART7EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_UART7EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_UART8_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_UART8EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_UART8EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_TIM2_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM2EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_TIM3_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM3EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_TIM4_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM4EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM4EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_SPI3_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI3EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_I2C3_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C3EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_TIM2_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM2EN)) +#define __HAL_RCC_TIM3_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM3EN)) +#define __HAL_RCC_TIM4_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM4EN)) +#define __HAL_RCC_SPI3_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_SPI3EN)) +#define __HAL_RCC_I2C3_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_I2C3EN)) +#define __HAL_RCC_TIM6_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM6EN)) +#define __HAL_RCC_TIM7_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM7EN)) +#define __HAL_RCC_TIM12_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM12EN)) +#define __HAL_RCC_TIM13_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM13EN)) +#define __HAL_RCC_TIM14_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM14EN)) +#define __HAL_RCC_USART3_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_USART3EN)) +#define __HAL_RCC_UART4_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_UART4EN)) +#define __HAL_RCC_UART5_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_UART5EN)) +#define __HAL_RCC_CAN1_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_CAN1EN)) +#define __HAL_RCC_CAN2_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_CAN2EN)) +#define __HAL_RCC_DAC_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_DACEN)) +#define __HAL_RCC_UART7_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_UART7EN)) +#define __HAL_RCC_UART8_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_UART8EN)) +/** + * @} + */ + +/** @defgroup RCCEx_APB1_Peripheral_Clock_Enable_Disable_Status APB1 Peripheral Clock Enable Disable Status + * @brief Get the enable or disable status of the APB1 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_TIM2_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM2EN)) != RESET) +#define __HAL_RCC_TIM3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM3EN)) != RESET) +#define __HAL_RCC_TIM4_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM4EN)) != RESET) +#define __HAL_RCC_SPI3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPI3EN)) != RESET) +#define __HAL_RCC_I2C3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C3EN)) != RESET) +#define __HAL_RCC_TIM6_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM6EN)) != RESET) +#define __HAL_RCC_TIM7_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM7EN)) != RESET) +#define __HAL_RCC_TIM12_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM12EN)) != RESET) +#define __HAL_RCC_TIM13_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM13EN)) != RESET) +#define __HAL_RCC_TIM14_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM14EN)) != RESET) +#define __HAL_RCC_USART3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_USART3EN)) != RESET) +#define __HAL_RCC_UART4_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART4EN)) != RESET) +#define __HAL_RCC_UART5_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART5EN)) != RESET) +#define __HAL_RCC_CAN1_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN1EN)) != RESET) +#define __HAL_RCC_CAN2_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN2EN)) != RESET) +#define __HAL_RCC_DAC_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_DACEN)) != RESET) +#define __HAL_RCC_UART7_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART7EN)) != RESET) +#define __HAL_RCC_UART8_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART8EN)) != RESET) + +#define __HAL_RCC_TIM2_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM2EN)) == RESET) +#define __HAL_RCC_TIM3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM3EN)) == RESET) +#define __HAL_RCC_TIM4_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM4EN)) == RESET) +#define __HAL_RCC_SPI3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPI3EN)) == RESET) +#define __HAL_RCC_I2C3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C3EN)) == RESET) +#define __HAL_RCC_TIM6_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM6EN)) == RESET) +#define __HAL_RCC_TIM7_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM7EN)) == RESET) +#define __HAL_RCC_TIM12_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM12EN)) == RESET) +#define __HAL_RCC_TIM13_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM13EN)) == RESET) +#define __HAL_RCC_TIM14_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM14EN)) == RESET) +#define __HAL_RCC_USART3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_USART3EN)) == RESET) +#define __HAL_RCC_UART4_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART4EN)) == RESET) +#define __HAL_RCC_UART5_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART5EN)) == RESET) +#define __HAL_RCC_CAN1_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN1EN)) == RESET) +#define __HAL_RCC_CAN2_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN2EN)) == RESET) +#define __HAL_RCC_DAC_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_DACEN)) == RESET) +#define __HAL_RCC_UART7_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART7EN)) == RESET) +#define __HAL_RCC_UART8_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART8EN)) == RESET) +/** + * @} + */ + +/** @defgroup RCCEx_APB2_Clock_Enable_Disable APB2 Peripheral Clock Enable Disable + * @brief Enable or disable the High Speed APB (APB2) peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_TIM8_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM8EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM8EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_ADC2_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC2EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_ADC3_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC3EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_SPI5_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI5EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI5EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_SPI6_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI6EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI6EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_SAI1_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SAI1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SAI1EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_SDIO_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SDIOEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SDIOEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_SPI4_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI4EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI4EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_TIM10_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM10EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM10EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_SDIO_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SDIOEN)) +#define __HAL_RCC_SPI4_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SPI4EN)) +#define __HAL_RCC_TIM10_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_TIM10EN)) +#define __HAL_RCC_TIM8_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_TIM8EN)) +#define __HAL_RCC_ADC2_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_ADC2EN)) +#define __HAL_RCC_ADC3_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_ADC3EN)) +#define __HAL_RCC_SPI5_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SPI5EN)) +#define __HAL_RCC_SPI6_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SPI6EN)) +#define __HAL_RCC_SAI1_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SAI1EN)) + +#if defined(STM32F429xx)|| defined(STM32F439xx) || defined(STM32F469xx) || defined(STM32F479xx) +#define __HAL_RCC_LTDC_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_LTDCEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_LTDCEN);\ + UNUSED(tmpreg); \ + } while(0U) + +#define __HAL_RCC_LTDC_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_LTDCEN)) +#endif /* STM32F429xx || STM32F439xx || STM32F469xx || STM32F479xx */ + +#if defined(STM32F469xx) || defined(STM32F479xx) +#define __HAL_RCC_DSI_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_DSIEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_DSIEN);\ + UNUSED(tmpreg); \ + } while(0U) + +#define __HAL_RCC_DSI_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_DSIEN)) +#endif /* STM32F469xx || STM32F479xx */ +/** + * @} + */ + +/** @defgroup RCCEx_APB2_Peripheral_Clock_Enable_Disable_Status APB2 Peripheral Clock Enable Disable Status + * @brief Get the enable or disable status of the APB2 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_TIM8_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM8EN)) != RESET) +#define __HAL_RCC_ADC2_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_ADC2EN)) != RESET) +#define __HAL_RCC_ADC3_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_ADC3EN)) != RESET) +#define __HAL_RCC_SPI5_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI5EN)) != RESET) +#define __HAL_RCC_SPI6_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI6EN)) != RESET) +#define __HAL_RCC_SAI1_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SAI1EN)) != RESET) +#define __HAL_RCC_SDIO_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SDIOEN)) != RESET) +#define __HAL_RCC_SPI4_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI4EN)) != RESET) +#define __HAL_RCC_TIM10_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM10EN))!= RESET) + +#define __HAL_RCC_SDIO_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SDIOEN)) == RESET) +#define __HAL_RCC_SPI4_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI4EN)) == RESET) +#define __HAL_RCC_TIM10_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM10EN))== RESET) +#define __HAL_RCC_TIM8_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM8EN)) == RESET) +#define __HAL_RCC_ADC2_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_ADC2EN)) == RESET) +#define __HAL_RCC_ADC3_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_ADC3EN)) == RESET) +#define __HAL_RCC_SPI5_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI5EN)) == RESET) +#define __HAL_RCC_SPI6_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI6EN)) == RESET) +#define __HAL_RCC_SAI1_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SAI1EN)) == RESET) + +#if defined(STM32F429xx)|| defined(STM32F439xx) || defined(STM32F469xx) || defined(STM32F479xx) +#define __HAL_RCC_LTDC_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_LTDCEN)) != RESET) +#define __HAL_RCC_LTDC_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_LTDCEN)) == RESET) +#endif /* STM32F429xx || STM32F439xx || STM32F469xx || STM32F479xx */ + +#if defined(STM32F469xx) || defined(STM32F479xx) +#define __HAL_RCC_DSI_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_DSIEN)) != RESET) +#define __HAL_RCC_DSI_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_DSIEN)) == RESET) +#endif /* STM32F469xx || STM32F479xx */ +/** + * @} + */ + +/** @defgroup RCCEx_AHB1_Force_Release_Reset AHB1 Force Release Reset + * @brief Force or release AHB1 peripheral reset. + * @{ + */ +#define __HAL_RCC_AHB1_FORCE_RESET() (RCC->AHB1RSTR = 0x22E017FFU) +#define __HAL_RCC_GPIOD_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIODRST)) +#define __HAL_RCC_GPIOE_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOERST)) +#define __HAL_RCC_GPIOF_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOFRST)) +#define __HAL_RCC_GPIOG_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOGRST)) +#define __HAL_RCC_GPIOI_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOIRST)) +#define __HAL_RCC_ETHMAC_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_ETHMACRST)) +#define __HAL_RCC_USB_OTG_HS_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_OTGHRST)) +#define __HAL_RCC_GPIOJ_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOJRST)) +#define __HAL_RCC_GPIOK_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOKRST)) +#define __HAL_RCC_DMA2D_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_DMA2DRST)) +#define __HAL_RCC_CRC_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_CRCRST)) + +#define __HAL_RCC_GPIOD_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIODRST)) +#define __HAL_RCC_GPIOE_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIOERST)) +#define __HAL_RCC_GPIOF_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIOFRST)) +#define __HAL_RCC_GPIOG_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIOGRST)) +#define __HAL_RCC_GPIOI_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIOIRST)) +#define __HAL_RCC_ETHMAC_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_ETHMACRST)) +#define __HAL_RCC_USB_OTG_HS_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_OTGHRST)) +#define __HAL_RCC_GPIOJ_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIOJRST)) +#define __HAL_RCC_GPIOK_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIOKRST)) +#define __HAL_RCC_DMA2D_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_DMA2DRST)) +#define __HAL_RCC_CRC_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_CRCRST)) +/** + * @} + */ + +/** @defgroup RCCEx_AHB2_Force_Release_Reset AHB2 Force Release Reset + * @brief Force or release AHB2 peripheral reset. + * @{ + */ +#if defined(STM32F427xx) || defined(STM32F429xx) || defined(STM32F469xx) +#define __HAL_RCC_AHB2_FORCE_RESET() (RCC->AHB2RSTR = 0x000000C1U) +#endif /* STM32F427xx || STM32F429xx || STM32F469xx */ +#if defined(STM32F437xx) || defined(STM32F439xx) || defined(STM32F479xx) +#define __HAL_RCC_AHB2_FORCE_RESET() (RCC->AHB2RSTR = 0x000000F1U) +#endif /* STM32F437xx || STM32F439xx || STM32F479xx */ +#define __HAL_RCC_USB_OTG_FS_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_OTGFSRST)) +#define __HAL_RCC_RNG_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_RNGRST)) +#define __HAL_RCC_DCMI_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_DCMIRST)) + +#define __HAL_RCC_AHB2_RELEASE_RESET() (RCC->AHB2RSTR = 0x00U) +#define __HAL_RCC_USB_OTG_FS_RELEASE_RESET() (RCC->AHB2RSTR &= ~(RCC_AHB2RSTR_OTGFSRST)) +#define __HAL_RCC_RNG_RELEASE_RESET() (RCC->AHB2RSTR &= ~(RCC_AHB2RSTR_RNGRST)) +#define __HAL_RCC_DCMI_RELEASE_RESET() (RCC->AHB2RSTR &= ~(RCC_AHB2RSTR_DCMIRST)) + +#if defined(STM32F437xx)|| defined(STM32F439xx) || defined(STM32F479xx) +#define __HAL_RCC_CRYP_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_CRYPRST)) +#define __HAL_RCC_HASH_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_HASHRST)) + +#define __HAL_RCC_CRYP_RELEASE_RESET() (RCC->AHB2RSTR &= ~(RCC_AHB2RSTR_CRYPRST)) +#define __HAL_RCC_HASH_RELEASE_RESET() (RCC->AHB2RSTR &= ~(RCC_AHB2RSTR_HASHRST)) +#endif /* STM32F437xx || STM32F439xx || STM32F479xx */ +/** + * @} + */ + +/** @defgroup RCCEx_AHB3_Force_Release_Reset AHB3 Force Release Reset + * @brief Force or release AHB3 peripheral reset. + * @{ + */ +#if defined(STM32F427xx) || defined(STM32F429xx) || defined(STM32F437xx) || defined(STM32F439xx) +#define __HAL_RCC_AHB3_FORCE_RESET() (RCC->AHB3RSTR = 0x00000001U) +#endif /* STM32F427xx || STM32F429xx || STM32F437xx || STM32F439xx */ +#if defined(STM32F469xx) || defined(STM32F479xx) +#define __HAL_RCC_AHB3_FORCE_RESET() (RCC->AHB3RSTR = 0x00000003U) +#endif /* STM32F469xx || STM32F479xx */ +#define __HAL_RCC_AHB3_RELEASE_RESET() (RCC->AHB3RSTR = 0x00U) +#define __HAL_RCC_FMC_FORCE_RESET() (RCC->AHB3RSTR |= (RCC_AHB3RSTR_FMCRST)) +#define __HAL_RCC_FMC_RELEASE_RESET() (RCC->AHB3RSTR &= ~(RCC_AHB3RSTR_FMCRST)) + +#if defined(STM32F469xx) || defined(STM32F479xx) +#define __HAL_RCC_QSPI_FORCE_RESET() (RCC->AHB3RSTR |= (RCC_AHB3RSTR_QSPIRST)) +#define __HAL_RCC_QSPI_RELEASE_RESET() (RCC->AHB3RSTR &= ~(RCC_AHB3RSTR_QSPIRST)) +#endif /* STM32F469xx || STM32F479xx */ +/** + * @} + */ + +/** @defgroup RCCEx_APB1_Force_Release_Reset APB1 Force Release Reset + * @brief Force or release APB1 peripheral reset. + * @{ + */ +#define __HAL_RCC_APB1_FORCE_RESET() (RCC->APB1RSTR = 0xF6FEC9FFU) +#define __HAL_RCC_TIM6_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM6RST)) +#define __HAL_RCC_TIM7_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM7RST)) +#define __HAL_RCC_TIM12_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM12RST)) +#define __HAL_RCC_TIM13_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM13RST)) +#define __HAL_RCC_TIM14_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM14RST)) +#define __HAL_RCC_USART3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_USART3RST)) +#define __HAL_RCC_UART4_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_UART4RST)) +#define __HAL_RCC_UART5_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_UART5RST)) +#define __HAL_RCC_CAN1_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_CAN1RST)) +#define __HAL_RCC_CAN2_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_CAN2RST)) +#define __HAL_RCC_DAC_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_DACRST)) +#define __HAL_RCC_UART7_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_UART7RST)) +#define __HAL_RCC_UART8_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_UART8RST)) +#define __HAL_RCC_TIM2_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM2RST)) +#define __HAL_RCC_TIM3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM3RST)) +#define __HAL_RCC_TIM4_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM4RST)) +#define __HAL_RCC_SPI3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_SPI3RST)) +#define __HAL_RCC_I2C3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_I2C3RST)) + +#define __HAL_RCC_TIM2_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM2RST)) +#define __HAL_RCC_TIM3_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM3RST)) +#define __HAL_RCC_TIM4_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM4RST)) +#define __HAL_RCC_SPI3_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_SPI3RST)) +#define __HAL_RCC_I2C3_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_I2C3RST)) +#define __HAL_RCC_TIM6_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM6RST)) +#define __HAL_RCC_TIM7_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM7RST)) +#define __HAL_RCC_TIM12_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM12RST)) +#define __HAL_RCC_TIM13_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM13RST)) +#define __HAL_RCC_TIM14_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM14RST)) +#define __HAL_RCC_USART3_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_USART3RST)) +#define __HAL_RCC_UART4_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_UART4RST)) +#define __HAL_RCC_UART5_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_UART5RST)) +#define __HAL_RCC_CAN1_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_CAN1RST)) +#define __HAL_RCC_CAN2_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_CAN2RST)) +#define __HAL_RCC_DAC_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_DACRST)) +#define __HAL_RCC_UART7_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_UART7RST)) +#define __HAL_RCC_UART8_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_UART8RST)) +/** + * @} + */ + +/** @defgroup RCCEx_APB2_Force_Release_Reset APB2 Force Release Reset + * @brief Force or release APB2 peripheral reset. + * @{ + */ +#if defined(STM32F469xx) || defined(STM32F479xx) +#define __HAL_RCC_APB2_FORCE_RESET() (RCC->APB2RSTR = 0x0C777933U) +#endif /* STM32F469xx || STM32F479xx */ +#if defined(STM32F429xx) || defined(STM32F439xx) +#define __HAL_RCC_APB2_FORCE_RESET() (RCC->APB2RSTR = 0x04777933U) +#endif /* STM32F429xx || STM32F439xx */ +#if defined(STM32F427xx) || defined(STM32F437xx) +#define __HAL_RCC_APB2_FORCE_RESET() (RCC->APB2RSTR = 0x00777933U) +#endif /* STM32F427xx || STM32F437xx */ +#define __HAL_RCC_TIM8_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_TIM8RST)) +#define __HAL_RCC_SPI5_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SPI5RST)) +#define __HAL_RCC_SPI6_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SPI6RST)) +#define __HAL_RCC_SAI1_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SAI1RST)) +#define __HAL_RCC_SDIO_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SDIORST)) +#define __HAL_RCC_SPI4_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SPI4RST)) +#define __HAL_RCC_TIM10_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_TIM10RST)) + +#define __HAL_RCC_SDIO_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SDIORST)) +#define __HAL_RCC_SPI4_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SPI4RST)) +#define __HAL_RCC_TIM10_RELEASE_RESET()(RCC->APB2RSTR &= ~(RCC_APB2RSTR_TIM10RST)) +#define __HAL_RCC_TIM8_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_TIM8RST)) +#define __HAL_RCC_SPI5_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SPI5RST)) +#define __HAL_RCC_SPI6_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SPI6RST)) +#define __HAL_RCC_SAI1_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SAI1RST)) + +#if defined(STM32F429xx)|| defined(STM32F439xx) || defined(STM32F469xx) || defined(STM32F479xx) +#define __HAL_RCC_LTDC_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_LTDCRST)) +#define __HAL_RCC_LTDC_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_LTDCRST)) +#endif /* STM32F429xx|| STM32F439xx || STM32F469xx || STM32F479xx */ + +#if defined(STM32F469xx) || defined(STM32F479xx) +#define __HAL_RCC_DSI_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_DSIRST)) +#define __HAL_RCC_DSI_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_DSIRST)) +#endif /* STM32F469xx || STM32F479xx */ +/** + * @} + */ + +/** @defgroup RCCEx_AHB1_LowPower_Enable_Disable AHB1 Peripheral Low Power Enable Disable + * @brief Enable or disable the AHB1 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + * @{ + */ +#define __HAL_RCC_GPIOD_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIODLPEN)) +#define __HAL_RCC_GPIOE_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIOELPEN)) +#define __HAL_RCC_GPIOF_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIOFLPEN)) +#define __HAL_RCC_GPIOG_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIOGLPEN)) +#define __HAL_RCC_GPIOI_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIOILPEN)) +#define __HAL_RCC_SRAM2_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_SRAM2LPEN)) +#define __HAL_RCC_ETHMAC_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_ETHMACLPEN)) +#define __HAL_RCC_ETHMACTX_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_ETHMACTXLPEN)) +#define __HAL_RCC_ETHMACRX_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_ETHMACRXLPEN)) +#define __HAL_RCC_ETHMACPTP_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_ETHMACPTPLPEN)) +#define __HAL_RCC_USB_OTG_HS_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_OTGHSLPEN)) +#define __HAL_RCC_USB_OTG_HS_ULPI_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_OTGHSULPILPEN)) +#define __HAL_RCC_GPIOJ_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIOJLPEN)) +#define __HAL_RCC_GPIOK_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIOKLPEN)) +#define __HAL_RCC_SRAM3_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_SRAM3LPEN)) +#define __HAL_RCC_DMA2D_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_DMA2DLPEN)) +#define __HAL_RCC_CRC_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_CRCLPEN)) +#define __HAL_RCC_FLITF_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_FLITFLPEN)) +#define __HAL_RCC_SRAM1_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_SRAM1LPEN)) +#define __HAL_RCC_BKPSRAM_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_BKPSRAMLPEN)) + +#define __HAL_RCC_GPIOD_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIODLPEN)) +#define __HAL_RCC_GPIOE_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIOELPEN)) +#define __HAL_RCC_GPIOF_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIOFLPEN)) +#define __HAL_RCC_GPIOG_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIOGLPEN)) +#define __HAL_RCC_GPIOI_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIOILPEN)) +#define __HAL_RCC_SRAM2_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_SRAM2LPEN)) +#define __HAL_RCC_ETHMAC_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_ETHMACLPEN)) +#define __HAL_RCC_ETHMACTX_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_ETHMACTXLPEN)) +#define __HAL_RCC_ETHMACRX_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_ETHMACRXLPEN)) +#define __HAL_RCC_ETHMACPTP_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_ETHMACPTPLPEN)) +#define __HAL_RCC_USB_OTG_HS_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_OTGHSLPEN)) +#define __HAL_RCC_USB_OTG_HS_ULPI_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_OTGHSULPILPEN)) +#define __HAL_RCC_GPIOJ_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIOJLPEN)) +#define __HAL_RCC_GPIOK_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIOKLPEN)) +#define __HAL_RCC_DMA2D_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_DMA2DLPEN)) +#define __HAL_RCC_CRC_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_CRCLPEN)) +#define __HAL_RCC_FLITF_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_FLITFLPEN)) +#define __HAL_RCC_SRAM1_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_SRAM1LPEN)) +#define __HAL_RCC_BKPSRAM_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_BKPSRAMLPEN)) +/** + * @} + */ + +/** @defgroup RCCEx_AHB2_LowPower_Enable_Disable AHB2 Peripheral Low Power Enable Disable + * @brief Enable or disable the AHB2 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wake-up from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + * @{ + */ +#define __HAL_RCC_USB_OTG_FS_CLK_SLEEP_ENABLE() (RCC->AHB2LPENR |= (RCC_AHB2LPENR_OTGFSLPEN)) +#define __HAL_RCC_USB_OTG_FS_CLK_SLEEP_DISABLE() (RCC->AHB2LPENR &= ~(RCC_AHB2LPENR_OTGFSLPEN)) + +#define __HAL_RCC_RNG_CLK_SLEEP_ENABLE() (RCC->AHB2LPENR |= (RCC_AHB2LPENR_RNGLPEN)) +#define __HAL_RCC_RNG_CLK_SLEEP_DISABLE() (RCC->AHB2LPENR &= ~(RCC_AHB2LPENR_RNGLPEN)) + +#define __HAL_RCC_DCMI_CLK_SLEEP_ENABLE() (RCC->AHB2LPENR |= (RCC_AHB2LPENR_DCMILPEN)) +#define __HAL_RCC_DCMI_CLK_SLEEP_DISABLE() (RCC->AHB2LPENR &= ~(RCC_AHB2LPENR_DCMILPEN)) + +#if defined(STM32F437xx)|| defined(STM32F439xx) || defined(STM32F479xx) +#define __HAL_RCC_CRYP_CLK_SLEEP_ENABLE() (RCC->AHB2LPENR |= (RCC_AHB2LPENR_CRYPLPEN)) +#define __HAL_RCC_HASH_CLK_SLEEP_ENABLE() (RCC->AHB2LPENR |= (RCC_AHB2LPENR_HASHLPEN)) + +#define __HAL_RCC_CRYP_CLK_SLEEP_DISABLE() (RCC->AHB2LPENR &= ~(RCC_AHB2LPENR_CRYPLPEN)) +#define __HAL_RCC_HASH_CLK_SLEEP_DISABLE() (RCC->AHB2LPENR &= ~(RCC_AHB2LPENR_HASHLPEN)) +#endif /* STM32F437xx || STM32F439xx || STM32F479xx */ +/** + * @} + */ + +/** @defgroup RCCEx_AHB3_LowPower_Enable_Disable AHB3 Peripheral Low Power Enable Disable + * @brief Enable or disable the AHB3 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + * @{ + */ +#define __HAL_RCC_FMC_CLK_SLEEP_ENABLE() (RCC->AHB3LPENR |= (RCC_AHB3LPENR_FMCLPEN)) +#define __HAL_RCC_FMC_CLK_SLEEP_DISABLE() (RCC->AHB3LPENR &= ~(RCC_AHB3LPENR_FMCLPEN)) + +#if defined(STM32F469xx) || defined(STM32F479xx) +#define __HAL_RCC_QSPI_CLK_SLEEP_ENABLE() (RCC->AHB3LPENR |= (RCC_AHB3LPENR_QSPILPEN)) +#define __HAL_RCC_QSPI_CLK_SLEEP_DISABLE() (RCC->AHB3LPENR &= ~(RCC_AHB3LPENR_QSPILPEN)) +#endif /* STM32F469xx || STM32F479xx */ +/** + * @} + */ + +/** @defgroup RCCEx_APB1_LowPower_Enable_Disable APB1 Peripheral Low Power Enable Disable + * @brief Enable or disable the APB1 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + * @{ + */ +#define __HAL_RCC_TIM6_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM6LPEN)) +#define __HAL_RCC_TIM7_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM7LPEN)) +#define __HAL_RCC_TIM12_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM12LPEN)) +#define __HAL_RCC_TIM13_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM13LPEN)) +#define __HAL_RCC_TIM14_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM14LPEN)) +#define __HAL_RCC_USART3_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_USART3LPEN)) +#define __HAL_RCC_UART4_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_UART4LPEN)) +#define __HAL_RCC_UART5_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_UART5LPEN)) +#define __HAL_RCC_CAN1_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_CAN1LPEN)) +#define __HAL_RCC_CAN2_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_CAN2LPEN)) +#define __HAL_RCC_DAC_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_DACLPEN)) +#define __HAL_RCC_UART7_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_UART7LPEN)) +#define __HAL_RCC_UART8_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_UART8LPEN)) +#define __HAL_RCC_TIM2_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM2LPEN)) +#define __HAL_RCC_TIM3_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM3LPEN)) +#define __HAL_RCC_TIM4_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM4LPEN)) +#define __HAL_RCC_SPI3_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_SPI3LPEN)) +#define __HAL_RCC_I2C3_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_I2C3LPEN)) + +#define __HAL_RCC_TIM2_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM2LPEN)) +#define __HAL_RCC_TIM3_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM3LPEN)) +#define __HAL_RCC_TIM4_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM4LPEN)) +#define __HAL_RCC_SPI3_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_SPI3LPEN)) +#define __HAL_RCC_I2C3_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_I2C3LPEN)) +#define __HAL_RCC_TIM6_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM6LPEN)) +#define __HAL_RCC_TIM7_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM7LPEN)) +#define __HAL_RCC_TIM12_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM12LPEN)) +#define __HAL_RCC_TIM13_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM13LPEN)) +#define __HAL_RCC_TIM14_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM14LPEN)) +#define __HAL_RCC_USART3_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_USART3LPEN)) +#define __HAL_RCC_UART4_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_UART4LPEN)) +#define __HAL_RCC_UART5_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_UART5LPEN)) +#define __HAL_RCC_CAN1_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_CAN1LPEN)) +#define __HAL_RCC_CAN2_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_CAN2LPEN)) +#define __HAL_RCC_DAC_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_DACLPEN)) +#define __HAL_RCC_UART7_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_UART7LPEN)) +#define __HAL_RCC_UART8_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_UART8LPEN)) +/** + * @} + */ + +/** @defgroup RCCEx_APB2_LowPower_Enable_Disable APB2 Peripheral Low Power Enable Disable + * @brief Enable or disable the APB2 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + * @{ + */ +#define __HAL_RCC_TIM8_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_TIM8LPEN)) +#define __HAL_RCC_ADC2_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_ADC2LPEN)) +#define __HAL_RCC_ADC3_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_ADC3LPEN)) +#define __HAL_RCC_SPI5_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SPI5LPEN)) +#define __HAL_RCC_SPI6_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SPI6LPEN)) +#define __HAL_RCC_SAI1_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SAI1LPEN)) +#define __HAL_RCC_SDIO_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SDIOLPEN)) +#define __HAL_RCC_SPI4_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SPI4LPEN)) +#define __HAL_RCC_TIM10_CLK_SLEEP_ENABLE()(RCC->APB2LPENR |= (RCC_APB2LPENR_TIM10LPEN)) + +#define __HAL_RCC_SDIO_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SDIOLPEN)) +#define __HAL_RCC_SPI4_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SPI4LPEN)) +#define __HAL_RCC_TIM10_CLK_SLEEP_DISABLE()(RCC->APB2LPENR &= ~(RCC_APB2LPENR_TIM10LPEN)) +#define __HAL_RCC_TIM8_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_TIM8LPEN)) +#define __HAL_RCC_ADC2_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_ADC2LPEN)) +#define __HAL_RCC_ADC3_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_ADC3LPEN)) +#define __HAL_RCC_SPI5_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SPI5LPEN)) +#define __HAL_RCC_SPI6_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SPI6LPEN)) +#define __HAL_RCC_SAI1_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SAI1LPEN)) + +#if defined(STM32F429xx)|| defined(STM32F439xx) || defined(STM32F469xx) || defined(STM32F479xx) +#define __HAL_RCC_LTDC_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_LTDCLPEN)) + +#define __HAL_RCC_LTDC_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_LTDCLPEN)) +#endif /* STM32F429xx || STM32F439xx || STM32F469xx || STM32F479xx */ + +#if defined(STM32F469xx) || defined(STM32F479xx) +#define __HAL_RCC_DSI_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_DSILPEN)) +#define __HAL_RCC_DSI_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_DSILPEN)) +#endif /* STM32F469xx || STM32F479xx */ +/** + * @} + */ +#endif /* STM32F427xx || STM32F437xx || STM32F429xx|| STM32F439xx || STM32F469xx || STM32F479xx */ +/*----------------------------------------------------------------------------*/ + +/*----------------------------------- STM32F40xxx/STM32F41xxx-----------------*/ +#if defined(STM32F405xx) || defined(STM32F415xx) || defined(STM32F407xx)|| defined(STM32F417xx) +/** @defgroup RCCEx_AHB1_Clock_Enable_Disable AHB1 Peripheral Clock Enable Disable + * @brief Enables or disables the AHB1 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_BKPSRAM_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_BKPSRAMEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_BKPSRAMEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_CCMDATARAMEN_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CCMDATARAMEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CCMDATARAMEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_CRC_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_GPIOD_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIODEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIODEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_GPIOE_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOEEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOEEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_GPIOI_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOIEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOIEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_GPIOF_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOFEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOFEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_GPIOG_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOGEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOGEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_USB_OTG_HS_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_OTGHSEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_OTGHSEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_USB_OTG_HS_ULPI_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_OTGHSULPIEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_OTGHSULPIEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_GPIOD_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIODEN)) +#define __HAL_RCC_GPIOE_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOEEN)) +#define __HAL_RCC_GPIOF_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOFEN)) +#define __HAL_RCC_GPIOG_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOGEN)) +#define __HAL_RCC_GPIOI_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOIEN)) +#define __HAL_RCC_USB_OTG_HS_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_OTGHSEN)) +#define __HAL_RCC_USB_OTG_HS_ULPI_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_OTGHSULPIEN)) +#define __HAL_RCC_BKPSRAM_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_BKPSRAMEN)) +#define __HAL_RCC_CCMDATARAMEN_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_CCMDATARAMEN)) +#define __HAL_RCC_CRC_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_CRCEN)) +#if defined(STM32F407xx)|| defined(STM32F417xx) +/** + * @brief Enable ETHERNET clock. + */ +#define __HAL_RCC_ETHMAC_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_ETHMACTX_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACTXEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACTXEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_ETHMACRX_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACRXEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACRXEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_ETHMACPTP_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACPTPEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACPTPEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_ETH_CLK_ENABLE() do { \ + __HAL_RCC_ETHMAC_CLK_ENABLE(); \ + __HAL_RCC_ETHMACTX_CLK_ENABLE(); \ + __HAL_RCC_ETHMACRX_CLK_ENABLE(); \ + } while(0U) + +/** + * @brief Disable ETHERNET clock. + */ +#define __HAL_RCC_ETHMAC_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_ETHMACEN)) +#define __HAL_RCC_ETHMACTX_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_ETHMACTXEN)) +#define __HAL_RCC_ETHMACRX_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_ETHMACRXEN)) +#define __HAL_RCC_ETHMACPTP_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_ETHMACPTPEN)) +#define __HAL_RCC_ETH_CLK_DISABLE() do { \ + __HAL_RCC_ETHMACTX_CLK_DISABLE(); \ + __HAL_RCC_ETHMACRX_CLK_DISABLE(); \ + __HAL_RCC_ETHMAC_CLK_DISABLE(); \ + } while(0U) +#endif /* STM32F407xx || STM32F417xx */ +/** + * @} + */ + +/** @defgroup RCCEx_AHB1_Peripheral_Clock_Enable_Disable_Status AHB1 Peripheral Clock Enable Disable Status + * @brief Get the enable or disable status of the AHB1 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_BKPSRAM_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_BKPSRAMEN)) != RESET) +#define __HAL_RCC_CCMDATARAMEN_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CCMDATARAMEN)) != RESET) +#define __HAL_RCC_CRC_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CRCEN)) != RESET) +#define __HAL_RCC_GPIOD_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIODEN)) != RESET) +#define __HAL_RCC_GPIOE_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOEEN)) != RESET) +#define __HAL_RCC_GPIOI_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOIEN)) != RESET) +#define __HAL_RCC_GPIOF_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOFEN)) != RESET) +#define __HAL_RCC_GPIOG_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOGEN)) != RESET) +#define __HAL_RCC_USB_OTG_HS_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_OTGHSEN)) != RESET) +#define __HAL_RCC_USB_OTG_HS_ULPI_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_OTGHSULPIEN)) != RESET) + +#define __HAL_RCC_GPIOD_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIODEN)) == RESET) +#define __HAL_RCC_GPIOE_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOEEN)) == RESET) +#define __HAL_RCC_GPIOF_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOFEN)) == RESET) +#define __HAL_RCC_GPIOG_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOGEN)) == RESET) +#define __HAL_RCC_GPIOI_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOIEN)) == RESET) +#define __HAL_RCC_USB_OTG_HS_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_OTGHSEN)) == RESET) +#define __HAL_RCC_USB_OTG_HS_ULPI_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_OTGHSULPIEN))== RESET) +#define __HAL_RCC_BKPSRAM_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_BKPSRAMEN)) == RESET) +#define __HAL_RCC_CCMDATARAMEN_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CCMDATARAMEN)) == RESET) +#define __HAL_RCC_CRC_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CRCEN)) == RESET) +#if defined(STM32F407xx)|| defined(STM32F417xx) +/** + * @brief Enable ETHERNET clock. + */ +#define __HAL_RCC_ETHMAC_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_ETHMACEN)) != RESET) +#define __HAL_RCC_ETHMACTX_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_ETHMACTXEN)) != RESET) +#define __HAL_RCC_ETHMACRX_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_ETHMACRXEN)) != RESET) +#define __HAL_RCC_ETHMACPTP_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_ETHMACPTPEN)) != RESET) +#define __HAL_RCC_ETH_IS_CLK_ENABLED() (__HAL_RCC_ETHMAC_IS_CLK_ENABLED() && \ + __HAL_RCC_ETHMACTX_IS_CLK_ENABLED() && \ + __HAL_RCC_ETHMACRX_IS_CLK_ENABLED()) +/** + * @brief Disable ETHERNET clock. + */ +#define __HAL_RCC_ETHMAC_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_ETHMACEN)) == RESET) +#define __HAL_RCC_ETHMACTX_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_ETHMACTXEN)) == RESET) +#define __HAL_RCC_ETHMACRX_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_ETHMACRXEN)) == RESET) +#define __HAL_RCC_ETHMACPTP_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_ETHMACPTPEN)) == RESET) +#define __HAL_RCC_ETH_IS_CLK_DISABLED() (__HAL_RCC_ETHMAC_IS_CLK_DISABLED() && \ + __HAL_RCC_ETHMACTX_IS_CLK_DISABLED() && \ + __HAL_RCC_ETHMACRX_IS_CLK_DISABLED()) +#endif /* STM32F407xx || STM32F417xx */ +/** + * @} + */ + +/** @defgroup RCCEx_AHB2_Clock_Enable_Disable AHB2 Peripheral Clock Enable Disable + * @brief Enable or disable the AHB2 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_USB_OTG_FS_CLK_ENABLE() do {(RCC->AHB2ENR |= (RCC_AHB2ENR_OTGFSEN));\ + __HAL_RCC_SYSCFG_CLK_ENABLE();\ + }while(0U) + +#define __HAL_RCC_USB_OTG_FS_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_OTGFSEN)) + +#define __HAL_RCC_RNG_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_RNGEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_RNGEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_RNG_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_RNGEN)) + +#if defined(STM32F407xx)|| defined(STM32F417xx) +#define __HAL_RCC_DCMI_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_DCMIEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_DCMIEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_DCMI_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_DCMIEN)) +#endif /* STM32F407xx || STM32F417xx */ + +#if defined(STM32F415xx) || defined(STM32F417xx) +#define __HAL_RCC_CRYP_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_CRYPEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_CRYPEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_HASH_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_HASHEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_HASHEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_CRYP_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_CRYPEN)) +#define __HAL_RCC_HASH_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_HASHEN)) +#endif /* STM32F415xx || STM32F417xx */ +/** + * @} + */ + + +/** @defgroup RCCEx_AHB2_Peripheral_Clock_Enable_Disable_Status AHB2 Peripheral Clock Enable Disable Status + * @brief Get the enable or disable status of the AHB2 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_USB_OTG_FS_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_OTGFSEN)) != RESET) +#define __HAL_RCC_USB_OTG_FS_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_OTGFSEN)) == RESET) + +#define __HAL_RCC_RNG_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_RNGEN)) != RESET) +#define __HAL_RCC_RNG_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_RNGEN)) == RESET) + +#if defined(STM32F407xx)|| defined(STM32F417xx) +#define __HAL_RCC_DCMI_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_DCMIEN)) != RESET) +#define __HAL_RCC_DCMI_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_DCMIEN)) == RESET) +#endif /* STM32F407xx || STM32F417xx */ + +#if defined(STM32F415xx) || defined(STM32F417xx) +#define __HAL_RCC_CRYP_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_CRYPEN)) != RESET) +#define __HAL_RCC_HASH_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_HASHEN)) != RESET) + +#define __HAL_RCC_CRYP_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_CRYPEN)) == RESET) +#define __HAL_RCC_HASH_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_HASHEN)) == RESET) +#endif /* STM32F415xx || STM32F417xx */ +/** + * @} + */ + +/** @defgroup RCCEx_AHB3_Clock_Enable_Disable AHB3 Peripheral Clock Enable Disable + * @brief Enables or disables the AHB3 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_FSMC_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB3ENR, RCC_AHB3ENR_FSMCEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB3ENR, RCC_AHB3ENR_FSMCEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_FSMC_CLK_DISABLE() (RCC->AHB3ENR &= ~(RCC_AHB3ENR_FSMCEN)) +/** + * @} + */ + +/** @defgroup RCCEx_AHB3_Peripheral_Clock_Enable_Disable_Status AHB3 Peripheral Clock Enable Disable Status + * @brief Get the enable or disable status of the AHB3 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_FSMC_IS_CLK_ENABLED() ((RCC->AHB3ENR & (RCC_AHB3ENR_FSMCEN)) != RESET) +#define __HAL_RCC_FSMC_IS_CLK_DISABLED() ((RCC->AHB3ENR & (RCC_AHB3ENR_FSMCEN)) == RESET) +/** + * @} + */ + +/** @defgroup RCCEx_APB1_Clock_Enable_Disable APB1 Peripheral Clock Enable Disable + * @brief Enable or disable the Low Speed APB (APB1) peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_TIM6_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM6EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM6EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_TIM7_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM7EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM7EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_TIM12_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM12EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM12EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_TIM13_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM13EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM13EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_TIM14_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM14EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM14EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_USART3_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_USART3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_USART3EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_UART4_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_UART4EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_UART4EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_UART5_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_UART5EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_UART5EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_CAN1_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN1EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_CAN2_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN2EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_DAC_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_DACEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_DACEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_TIM2_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM2EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_TIM3_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM3EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_TIM4_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM4EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM4EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_SPI3_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI3EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_I2C3_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C3EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_TIM2_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM2EN)) +#define __HAL_RCC_TIM3_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM3EN)) +#define __HAL_RCC_TIM4_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM4EN)) +#define __HAL_RCC_SPI3_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_SPI3EN)) +#define __HAL_RCC_I2C3_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_I2C3EN)) +#define __HAL_RCC_TIM6_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM6EN)) +#define __HAL_RCC_TIM7_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM7EN)) +#define __HAL_RCC_TIM12_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM12EN)) +#define __HAL_RCC_TIM13_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM13EN)) +#define __HAL_RCC_TIM14_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM14EN)) +#define __HAL_RCC_USART3_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_USART3EN)) +#define __HAL_RCC_UART4_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_UART4EN)) +#define __HAL_RCC_UART5_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_UART5EN)) +#define __HAL_RCC_CAN1_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_CAN1EN)) +#define __HAL_RCC_CAN2_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_CAN2EN)) +#define __HAL_RCC_DAC_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_DACEN)) +/** + * @} + */ + +/** @defgroup RCCEx_APB1_Peripheral_Clock_Enable_Disable_Status APB1 Peripheral Clock Enable Disable Status + * @brief Get the enable or disable status of the APB1 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_TIM2_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM2EN)) != RESET) +#define __HAL_RCC_TIM3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM3EN)) != RESET) +#define __HAL_RCC_TIM4_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM4EN)) != RESET) +#define __HAL_RCC_SPI3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPI3EN)) != RESET) +#define __HAL_RCC_I2C3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C3EN)) != RESET) +#define __HAL_RCC_TIM6_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM6EN)) != RESET) +#define __HAL_RCC_TIM7_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM7EN)) != RESET) +#define __HAL_RCC_TIM12_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM12EN)) != RESET) +#define __HAL_RCC_TIM13_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM13EN)) != RESET) +#define __HAL_RCC_TIM14_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM14EN)) != RESET) +#define __HAL_RCC_USART3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_USART3EN)) != RESET) +#define __HAL_RCC_UART4_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART4EN)) != RESET) +#define __HAL_RCC_UART5_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART5EN)) != RESET) +#define __HAL_RCC_CAN1_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN1EN)) != RESET) +#define __HAL_RCC_CAN2_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN2EN)) != RESET) +#define __HAL_RCC_DAC_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_DACEN)) != RESET) + +#define __HAL_RCC_TIM2_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM2EN)) == RESET) +#define __HAL_RCC_TIM3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM3EN)) == RESET) +#define __HAL_RCC_TIM4_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM4EN)) == RESET) +#define __HAL_RCC_SPI3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPI3EN)) == RESET) +#define __HAL_RCC_I2C3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C3EN)) == RESET) +#define __HAL_RCC_TIM6_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM6EN)) == RESET) +#define __HAL_RCC_TIM7_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM7EN)) == RESET) +#define __HAL_RCC_TIM12_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM12EN)) == RESET) +#define __HAL_RCC_TIM13_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM13EN)) == RESET) +#define __HAL_RCC_TIM14_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM14EN)) == RESET) +#define __HAL_RCC_USART3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_USART3EN)) == RESET) +#define __HAL_RCC_UART4_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART4EN)) == RESET) +#define __HAL_RCC_UART5_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART5EN)) == RESET) +#define __HAL_RCC_CAN1_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN1EN)) == RESET) +#define __HAL_RCC_CAN2_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN2EN)) == RESET) +#define __HAL_RCC_DAC_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_DACEN)) == RESET) +/** + * @} + */ + +/** @defgroup RCCEx_APB2_Clock_Enable_Disable APB2 Peripheral Clock Enable Disable + * @brief Enable or disable the High Speed APB (APB2) peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_TIM8_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM8EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM8EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_ADC2_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC2EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_ADC3_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC3EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_SDIO_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SDIOEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SDIOEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_SPI4_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI4EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI4EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_TIM10_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM10EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM10EN);\ + UNUSED(tmpreg); \ + } while(0U) + +#define __HAL_RCC_SDIO_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SDIOEN)) +#define __HAL_RCC_SPI4_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SPI4EN)) +#define __HAL_RCC_TIM10_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_TIM10EN)) +#define __HAL_RCC_TIM8_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_TIM8EN)) +#define __HAL_RCC_ADC2_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_ADC2EN)) +#define __HAL_RCC_ADC3_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_ADC3EN)) +/** + * @} + */ + +/** @defgroup RCCEx_APB2_Peripheral_Clock_Enable_Disable_Status APB2 Peripheral Clock Enable Disable Status + * @brief Get the enable or disable status of the APB2 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_SDIO_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SDIOEN)) != RESET) +#define __HAL_RCC_SPI4_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI4EN)) != RESET) +#define __HAL_RCC_TIM10_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM10EN)) != RESET) +#define __HAL_RCC_TIM8_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM8EN)) != RESET) +#define __HAL_RCC_ADC2_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_ADC2EN)) != RESET) +#define __HAL_RCC_ADC3_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_ADC3EN)) != RESET) + +#define __HAL_RCC_SDIO_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SDIOEN)) == RESET) +#define __HAL_RCC_SPI4_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI4EN)) == RESET) +#define __HAL_RCC_TIM10_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM10EN)) == RESET) +#define __HAL_RCC_TIM8_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM8EN)) == RESET) +#define __HAL_RCC_ADC2_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_ADC2EN)) == RESET) +#define __HAL_RCC_ADC3_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_ADC3EN)) == RESET) +/** + * @} + */ + +/** @defgroup RCCEx_AHB1_Force_Release_Reset AHB1 Force Release Reset + * @brief Force or release AHB1 peripheral reset. + * @{ + */ +#if defined (STM32F405xx) || defined (STM32F415xx) +#define __HAL_RCC_AHB1_FORCE_RESET() (RCC->AHB1RSTR = 0x206011FFU) +#endif /* STM32F405xx || STM32F415xx */ +#if defined (STM32F407xx) || defined (STM32F417xx) +#define __HAL_RCC_AHB1_FORCE_RESET() (RCC->AHB1RSTR = 0x226011FFU) +#endif /* STM32F407xx || STM32F417xx */ +#define __HAL_RCC_GPIOD_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIODRST)) +#define __HAL_RCC_GPIOE_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOERST)) +#define __HAL_RCC_GPIOF_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOFRST)) +#define __HAL_RCC_GPIOG_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOGRST)) +#define __HAL_RCC_GPIOI_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOIRST)) +#define __HAL_RCC_ETHMAC_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_ETHMACRST)) +#define __HAL_RCC_USB_OTG_HS_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_OTGHRST)) +#define __HAL_RCC_CRC_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_CRCRST)) + +#define __HAL_RCC_GPIOD_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIODRST)) +#define __HAL_RCC_GPIOE_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIOERST)) +#define __HAL_RCC_GPIOF_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIOFRST)) +#define __HAL_RCC_GPIOG_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIOGRST)) +#define __HAL_RCC_GPIOI_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIOIRST)) +#define __HAL_RCC_ETHMAC_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_ETHMACRST)) +#define __HAL_RCC_USB_OTG_HS_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_OTGHRST)) +#define __HAL_RCC_CRC_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_CRCRST)) +/** + * @} + */ + +/** @defgroup RCCEx_AHB2_Force_Release_Reset AHB2 Force Release Reset + * @brief Force or release AHB2 peripheral reset. + * @{ + */ +#if defined (STM32F415xx) || defined (STM32F417xx) +#define __HAL_RCC_AHB2_FORCE_RESET() (RCC->AHB2RSTR = 0x000000F1U) +#endif /* STM32F415xx || STM32F417xx */ +#if defined (STM32F405xx) || defined (STM32F407xx) +#define __HAL_RCC_AHB2_FORCE_RESET() (RCC->AHB2RSTR = 0x000000C1U) +#endif /* STM32F405xx || STM32F407xx */ +#define __HAL_RCC_AHB2_RELEASE_RESET() (RCC->AHB2RSTR = 0x00U) + +#if defined(STM32F407xx)|| defined(STM32F417xx) +#define __HAL_RCC_DCMI_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_DCMIRST)) +#define __HAL_RCC_DCMI_RELEASE_RESET() (RCC->AHB2RSTR &= ~(RCC_AHB2RSTR_DCMIRST)) +#endif /* STM32F407xx || STM32F417xx */ + +#if defined(STM32F415xx) || defined(STM32F417xx) +#define __HAL_RCC_CRYP_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_CRYPRST)) +#define __HAL_RCC_HASH_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_HASHRST)) + +#define __HAL_RCC_CRYP_RELEASE_RESET() (RCC->AHB2RSTR &= ~(RCC_AHB2RSTR_CRYPRST)) +#define __HAL_RCC_HASH_RELEASE_RESET() (RCC->AHB2RSTR &= ~(RCC_AHB2RSTR_HASHRST)) +#endif /* STM32F415xx || STM32F417xx */ + +#define __HAL_RCC_USB_OTG_FS_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_OTGFSRST)) +#define __HAL_RCC_USB_OTG_FS_RELEASE_RESET() (RCC->AHB2RSTR &= ~(RCC_AHB2RSTR_OTGFSRST)) + +#define __HAL_RCC_RNG_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_RNGRST)) +#define __HAL_RCC_RNG_RELEASE_RESET() (RCC->AHB2RSTR &= ~(RCC_AHB2RSTR_RNGRST)) +/** + * @} + */ + +/** @defgroup RCCEx_AHB3_Force_Release_Reset AHB3 Force Release Reset + * @brief Force or release AHB3 peripheral reset. + * @{ + */ +#define __HAL_RCC_AHB3_FORCE_RESET() (RCC->AHB3RSTR = 0x00000001U) +#define __HAL_RCC_AHB3_RELEASE_RESET() (RCC->AHB3RSTR = 0x00U) + +#define __HAL_RCC_FSMC_FORCE_RESET() (RCC->AHB3RSTR |= (RCC_AHB3RSTR_FSMCRST)) +#define __HAL_RCC_FSMC_RELEASE_RESET() (RCC->AHB3RSTR &= ~(RCC_AHB3RSTR_FSMCRST)) +/** + * @} + */ + +/** @defgroup RCCEx_APB1_Force_Release_Reset APB1 Force Release Reset + * @brief Force or release APB1 peripheral reset. + * @{ + */ +#define __HAL_RCC_APB1_FORCE_RESET() (RCC->APB1RSTR = 0xF6FEC9FFU) +#define __HAL_RCC_TIM6_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM6RST)) +#define __HAL_RCC_TIM7_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM7RST)) +#define __HAL_RCC_TIM12_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM12RST)) +#define __HAL_RCC_TIM13_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM13RST)) +#define __HAL_RCC_TIM14_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM14RST)) +#define __HAL_RCC_USART3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_USART3RST)) +#define __HAL_RCC_UART4_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_UART4RST)) +#define __HAL_RCC_UART5_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_UART5RST)) +#define __HAL_RCC_CAN1_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_CAN1RST)) +#define __HAL_RCC_CAN2_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_CAN2RST)) +#define __HAL_RCC_DAC_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_DACRST)) +#define __HAL_RCC_TIM2_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM2RST)) +#define __HAL_RCC_TIM3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM3RST)) +#define __HAL_RCC_TIM4_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM4RST)) +#define __HAL_RCC_SPI3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_SPI3RST)) +#define __HAL_RCC_I2C3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_I2C3RST)) + +#define __HAL_RCC_TIM2_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM2RST)) +#define __HAL_RCC_TIM3_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM3RST)) +#define __HAL_RCC_TIM4_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM4RST)) +#define __HAL_RCC_SPI3_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_SPI3RST)) +#define __HAL_RCC_I2C3_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_I2C3RST)) +#define __HAL_RCC_TIM6_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM6RST)) +#define __HAL_RCC_TIM7_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM7RST)) +#define __HAL_RCC_TIM12_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM12RST)) +#define __HAL_RCC_TIM13_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM13RST)) +#define __HAL_RCC_TIM14_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM14RST)) +#define __HAL_RCC_USART3_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_USART3RST)) +#define __HAL_RCC_UART4_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_UART4RST)) +#define __HAL_RCC_UART5_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_UART5RST)) +#define __HAL_RCC_CAN1_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_CAN1RST)) +#define __HAL_RCC_CAN2_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_CAN2RST)) +#define __HAL_RCC_DAC_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_DACRST)) +/** + * @} + */ + +/** @defgroup RCCEx_APB2_Force_Release_Reset APB2 Force Release Reset + * @brief Force or release APB2 peripheral reset. + * @{ + */ +#define __HAL_RCC_APB2_FORCE_RESET() (RCC->APB2RSTR = 0x04777933U) +#define __HAL_RCC_TIM8_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_TIM8RST)) +#define __HAL_RCC_SDIO_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SDIORST)) +#define __HAL_RCC_SPI4_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SPI4RST)) +#define __HAL_RCC_TIM10_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_TIM10RST)) + +#define __HAL_RCC_SDIO_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SDIORST)) +#define __HAL_RCC_SPI4_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SPI4RST)) +#define __HAL_RCC_TIM10_RELEASE_RESET()(RCC->APB2RSTR &= ~(RCC_APB2RSTR_TIM10RST)) +#define __HAL_RCC_TIM8_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_TIM8RST)) +/** + * @} + */ + +/** @defgroup RCCEx_AHB1_LowPower_Enable_Disable AHB1 Peripheral Low Power Enable Disable + * @brief Enable or disable the AHB1 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + * @{ + */ +#define __HAL_RCC_GPIOD_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIODLPEN)) +#define __HAL_RCC_GPIOE_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIOELPEN)) +#define __HAL_RCC_GPIOF_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIOFLPEN)) +#define __HAL_RCC_GPIOG_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIOGLPEN)) +#define __HAL_RCC_GPIOI_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIOILPEN)) +#define __HAL_RCC_SRAM2_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_SRAM2LPEN)) +#define __HAL_RCC_ETHMAC_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_ETHMACLPEN)) +#define __HAL_RCC_ETHMACTX_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_ETHMACTXLPEN)) +#define __HAL_RCC_ETHMACRX_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_ETHMACRXLPEN)) +#define __HAL_RCC_ETHMACPTP_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_ETHMACPTPLPEN)) +#define __HAL_RCC_USB_OTG_HS_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_OTGHSLPEN)) +#define __HAL_RCC_USB_OTG_HS_ULPI_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_OTGHSULPILPEN)) +#define __HAL_RCC_CRC_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_CRCLPEN)) +#define __HAL_RCC_FLITF_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_FLITFLPEN)) +#define __HAL_RCC_SRAM1_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_SRAM1LPEN)) +#define __HAL_RCC_BKPSRAM_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_BKPSRAMLPEN)) + +#define __HAL_RCC_GPIOD_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIODLPEN)) +#define __HAL_RCC_GPIOE_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIOELPEN)) +#define __HAL_RCC_GPIOF_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIOFLPEN)) +#define __HAL_RCC_GPIOG_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIOGLPEN)) +#define __HAL_RCC_GPIOI_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIOILPEN)) +#define __HAL_RCC_SRAM2_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_SRAM2LPEN)) +#define __HAL_RCC_ETHMAC_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_ETHMACLPEN)) +#define __HAL_RCC_ETHMACTX_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_ETHMACTXLPEN)) +#define __HAL_RCC_ETHMACRX_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_ETHMACRXLPEN)) +#define __HAL_RCC_ETHMACPTP_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_ETHMACPTPLPEN)) +#define __HAL_RCC_USB_OTG_HS_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_OTGHSLPEN)) +#define __HAL_RCC_USB_OTG_HS_ULPI_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_OTGHSULPILPEN)) +#define __HAL_RCC_CRC_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_CRCLPEN)) +#define __HAL_RCC_FLITF_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_FLITFLPEN)) +#define __HAL_RCC_SRAM1_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_SRAM1LPEN)) +#define __HAL_RCC_BKPSRAM_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_BKPSRAMLPEN)) +/** + * @} + */ + +/** @defgroup RCCEx_AHB2_LowPower_Enable_Disable AHB2 Peripheral Low Power Enable Disable + * @brief Enable or disable the AHB2 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wake-up from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + * @{ + */ +#define __HAL_RCC_USB_OTG_FS_CLK_SLEEP_ENABLE() (RCC->AHB2LPENR |= (RCC_AHB2LPENR_OTGFSLPEN)) +#define __HAL_RCC_USB_OTG_FS_CLK_SLEEP_DISABLE() (RCC->AHB2LPENR &= ~(RCC_AHB2LPENR_OTGFSLPEN)) + +#define __HAL_RCC_RNG_CLK_SLEEP_ENABLE() (RCC->AHB2LPENR |= (RCC_AHB2LPENR_RNGLPEN)) +#define __HAL_RCC_RNG_CLK_SLEEP_DISABLE() (RCC->AHB2LPENR &= ~(RCC_AHB2LPENR_RNGLPEN)) + +#if defined(STM32F407xx)|| defined(STM32F417xx) +#define __HAL_RCC_DCMI_CLK_SLEEP_ENABLE() (RCC->AHB2LPENR |= (RCC_AHB2LPENR_DCMILPEN)) +#define __HAL_RCC_DCMI_CLK_SLEEP_DISABLE() (RCC->AHB2LPENR &= ~(RCC_AHB2LPENR_DCMILPEN)) +#endif /* STM32F407xx || STM32F417xx */ + +#if defined(STM32F415xx) || defined(STM32F417xx) +#define __HAL_RCC_CRYP_CLK_SLEEP_ENABLE() (RCC->AHB2LPENR |= (RCC_AHB2LPENR_CRYPLPEN)) +#define __HAL_RCC_HASH_CLK_SLEEP_ENABLE() (RCC->AHB2LPENR |= (RCC_AHB2LPENR_HASHLPEN)) + +#define __HAL_RCC_CRYP_CLK_SLEEP_DISABLE() (RCC->AHB2LPENR &= ~(RCC_AHB2LPENR_CRYPLPEN)) +#define __HAL_RCC_HASH_CLK_SLEEP_DISABLE() (RCC->AHB2LPENR &= ~(RCC_AHB2LPENR_HASHLPEN)) +#endif /* STM32F415xx || STM32F417xx */ +/** + * @} + */ + +/** @defgroup RCCEx_AHB3_LowPower_Enable_Disable AHB3 Peripheral Low Power Enable Disable + * @brief Enable or disable the AHB3 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + * @{ + */ +#define __HAL_RCC_FSMC_CLK_SLEEP_ENABLE() (RCC->AHB3LPENR |= (RCC_AHB3LPENR_FSMCLPEN)) +#define __HAL_RCC_FSMC_CLK_SLEEP_DISABLE() (RCC->AHB3LPENR &= ~(RCC_AHB3LPENR_FSMCLPEN)) +/** + * @} + */ + +/** @defgroup RCCEx_APB1_LowPower_Enable_Disable APB1 Peripheral Low Power Enable Disable + * @brief Enable or disable the APB1 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + * @{ + */ +#define __HAL_RCC_TIM6_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM6LPEN)) +#define __HAL_RCC_TIM7_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM7LPEN)) +#define __HAL_RCC_TIM12_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM12LPEN)) +#define __HAL_RCC_TIM13_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM13LPEN)) +#define __HAL_RCC_TIM14_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM14LPEN)) +#define __HAL_RCC_USART3_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_USART3LPEN)) +#define __HAL_RCC_UART4_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_UART4LPEN)) +#define __HAL_RCC_UART5_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_UART5LPEN)) +#define __HAL_RCC_CAN1_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_CAN1LPEN)) +#define __HAL_RCC_CAN2_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_CAN2LPEN)) +#define __HAL_RCC_DAC_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_DACLPEN)) +#define __HAL_RCC_TIM2_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM2LPEN)) +#define __HAL_RCC_TIM3_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM3LPEN)) +#define __HAL_RCC_TIM4_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM4LPEN)) +#define __HAL_RCC_SPI3_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_SPI3LPEN)) +#define __HAL_RCC_I2C3_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_I2C3LPEN)) + +#define __HAL_RCC_TIM2_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM2LPEN)) +#define __HAL_RCC_TIM3_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM3LPEN)) +#define __HAL_RCC_TIM4_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM4LPEN)) +#define __HAL_RCC_SPI3_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_SPI3LPEN)) +#define __HAL_RCC_I2C3_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_I2C3LPEN)) +#define __HAL_RCC_TIM6_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM6LPEN)) +#define __HAL_RCC_TIM7_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM7LPEN)) +#define __HAL_RCC_TIM12_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM12LPEN)) +#define __HAL_RCC_TIM13_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM13LPEN)) +#define __HAL_RCC_TIM14_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM14LPEN)) +#define __HAL_RCC_USART3_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_USART3LPEN)) +#define __HAL_RCC_UART4_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_UART4LPEN)) +#define __HAL_RCC_UART5_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_UART5LPEN)) +#define __HAL_RCC_CAN1_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_CAN1LPEN)) +#define __HAL_RCC_CAN2_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_CAN2LPEN)) +#define __HAL_RCC_DAC_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_DACLPEN)) +/** + * @} + */ + +/** @defgroup RCCEx_APB2_LowPower_Enable_Disable APB2 Peripheral Low Power Enable Disable + * @brief Enable or disable the APB2 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + * @{ + */ +#define __HAL_RCC_TIM8_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_TIM8LPEN)) +#define __HAL_RCC_ADC2_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_ADC2LPEN)) +#define __HAL_RCC_ADC3_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_ADC3LPEN)) +#define __HAL_RCC_SDIO_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SDIOLPEN)) +#define __HAL_RCC_SPI4_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SPI4LPEN)) +#define __HAL_RCC_TIM10_CLK_SLEEP_ENABLE()(RCC->APB2LPENR |= (RCC_APB2LPENR_TIM10LPEN)) + +#define __HAL_RCC_SDIO_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SDIOLPEN)) +#define __HAL_RCC_SPI4_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SPI4LPEN)) +#define __HAL_RCC_TIM10_CLK_SLEEP_DISABLE()(RCC->APB2LPENR &= ~(RCC_APB2LPENR_TIM10LPEN)) +#define __HAL_RCC_TIM8_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_TIM8LPEN)) +#define __HAL_RCC_ADC2_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_ADC2LPEN)) +#define __HAL_RCC_ADC3_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_ADC3LPEN)) +/** + * @} + */ +#endif /* STM32F405xx || STM32F415xx || STM32F407xx || STM32F417xx */ +/*----------------------------------------------------------------------------*/ + +/*------------------------- STM32F401xE/STM32F401xC --------------------------*/ +#if defined(STM32F401xC) || defined(STM32F401xE) +/** @defgroup RCCEx_AHB1_Clock_Enable_Disable AHB1 Peripheral Clock Enable Disable + * @brief Enable or disable the AHB1 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_GPIOD_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIODEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIODEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_GPIOE_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOEEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOEEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_CRC_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_CCMDATARAMEN_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CCMDATARAMEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CCMDATARAMEN);\ + UNUSED(tmpreg); \ + } while(0U) + +#define __HAL_RCC_GPIOD_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIODEN)) +#define __HAL_RCC_GPIOE_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOEEN)) +#define __HAL_RCC_CRC_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_CRCEN)) +#define __HAL_RCC_CCMDATARAMEN_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_CCMDATARAMEN)) +/** + * @} + */ + +/** @defgroup RCCEx_AHB1_Peripheral_Clock_Enable_Disable_Status AHB1 Peripheral Clock Enable Disable Status + * @brief Get the enable or disable status of the AHB1 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_GPIOD_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIODEN)) != RESET) +#define __HAL_RCC_GPIOE_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOEEN)) != RESET) +#define __HAL_RCC_CRC_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CRCEN)) != RESET) +#define __HAL_RCC_CCMDATARAMEN_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CCMDATARAMEN)) != RESET) + +#define __HAL_RCC_GPIOD_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIODEN)) == RESET) +#define __HAL_RCC_GPIOE_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOEEN)) == RESET) +#define __HAL_RCC_CRC_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CRCEN)) == RESET) +#define __HAL_RCC_CCMDATARAMEN_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CCMDATARAMEN)) == RESET) +/** + * @} + */ + +/** @defgroup RCCEx_AHB2_Clock_Enable_Disable AHB2 Peripheral Clock Enable Disable + * @brief Enable or disable the AHB2 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_USB_OTG_FS_CLK_ENABLE() do {(RCC->AHB2ENR |= (RCC_AHB2ENR_OTGFSEN));\ + __HAL_RCC_SYSCFG_CLK_ENABLE();\ + }while(0U) + +#define __HAL_RCC_USB_OTG_FS_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_OTGFSEN)) +/** + * @} + */ + +/** @defgroup RCCEx_AHB2_Peripheral_Clock_Enable_Disable_Status AHB2 Peripheral Clock Enable Disable Status + * @brief Get the enable or disable status of the AHB2 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_USB_OTG_FS_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_OTGFSEN)) != RESET) +#define __HAL_RCC_USB_OTG_FS_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_OTGFSEN)) == RESET) +/** + * @} + */ + +/** @defgroup RCC_APB1_Clock_Enable_Disable APB1 Peripheral Clock Enable Disable + * @brief Enable or disable the Low Speed APB (APB1) peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_TIM2_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM2EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_TIM3_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM3EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_TIM4_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM4EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM4EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_SPI3_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI3EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_I2C3_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C3EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_TIM2_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM2EN)) +#define __HAL_RCC_TIM3_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM3EN)) +#define __HAL_RCC_TIM4_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM4EN)) +#define __HAL_RCC_SPI3_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_SPI3EN)) +#define __HAL_RCC_I2C3_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_I2C3EN)) +/** + * @} + */ + +/** @defgroup RCCEx_APB1_Peripheral_Clock_Enable_Disable_Status APB1 Peripheral Clock Enable Disable Status + * @brief Get the enable or disable status of the APB1 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_TIM2_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM2EN)) != RESET) +#define __HAL_RCC_TIM3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM3EN)) != RESET) +#define __HAL_RCC_TIM4_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM4EN)) != RESET) +#define __HAL_RCC_SPI3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPI3EN)) != RESET) +#define __HAL_RCC_I2C3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C3EN)) != RESET) + +#define __HAL_RCC_TIM2_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM2EN)) == RESET) +#define __HAL_RCC_TIM3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM3EN)) == RESET) +#define __HAL_RCC_TIM4_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM4EN)) == RESET) +#define __HAL_RCC_SPI3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPI3EN)) == RESET) +#define __HAL_RCC_I2C3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C3EN)) == RESET) +/** + * @} + */ + +/** @defgroup RCCEx_APB2_Clock_Enable_Disable APB2 Peripheral Clock Enable Disable + * @brief Enable or disable the High Speed APB (APB2) peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_SDIO_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SDIOEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SDIOEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_SPI4_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI4EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI4EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_TIM10_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM10EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM10EN);\ + UNUSED(tmpreg); \ + } while(0U) + +#define __HAL_RCC_SDIO_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SDIOEN)) +#define __HAL_RCC_SPI4_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SPI4EN)) +#define __HAL_RCC_TIM10_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_TIM10EN)) +/** + * @} + */ + +/** @defgroup RCCEx_APB2_Peripheral_Clock_Enable_Disable_Status APB2 Peripheral Clock Enable Disable Status + * @brief Get the enable or disable status of the APB2 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_SDIO_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SDIOEN)) != RESET) +#define __HAL_RCC_SPI4_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI4EN)) != RESET) +#define __HAL_RCC_TIM10_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM10EN)) != RESET) + +#define __HAL_RCC_SDIO_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SDIOEN)) == RESET) +#define __HAL_RCC_SPI4_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI4EN)) == RESET) +#define __HAL_RCC_TIM10_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM10EN)) == RESET) +/** + * @} + */ +/** @defgroup RCCEx_AHB1_Force_Release_Reset AHB1 Force Release Reset + * @brief Force or release AHB1 peripheral reset. + * @{ + */ +#define __HAL_RCC_AHB1_FORCE_RESET() (RCC->AHB1RSTR = 0x0060109FU) +#define __HAL_RCC_GPIOD_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIODRST)) +#define __HAL_RCC_GPIOE_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOERST)) +#define __HAL_RCC_CRC_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_CRCRST)) + +#define __HAL_RCC_AHB1_RELEASE_RESET() (RCC->AHB1RSTR = 0x00U) +#define __HAL_RCC_GPIOD_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIODRST)) +#define __HAL_RCC_GPIOE_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIOERST)) +#define __HAL_RCC_CRC_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_CRCRST)) +/** + * @} + */ + +/** @defgroup RCCEx_AHB2_Force_Release_Reset AHB2 Force Release Reset + * @brief Force or release AHB2 peripheral reset. + * @{ + */ +#define __HAL_RCC_AHB2_FORCE_RESET() (RCC->AHB2RSTR = 0x00000080U) +#define __HAL_RCC_USB_OTG_FS_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_OTGFSRST)) + +#define __HAL_RCC_AHB2_RELEASE_RESET() (RCC->AHB2RSTR = 0x00U) +#define __HAL_RCC_USB_OTG_FS_RELEASE_RESET() (RCC->AHB2RSTR &= ~(RCC_AHB2RSTR_OTGFSRST)) +/** + * @} + */ + +/** @defgroup RCCEx_APB1_Force_Release_Reset APB1 Force Release Reset + * @brief Force or release APB1 peripheral reset. + * @{ + */ +#define __HAL_RCC_APB1_FORCE_RESET() (RCC->APB1RSTR = 0x10E2C80FU) +#define __HAL_RCC_TIM2_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM2RST)) +#define __HAL_RCC_TIM3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM3RST)) +#define __HAL_RCC_TIM4_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM4RST)) +#define __HAL_RCC_SPI3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_SPI3RST)) +#define __HAL_RCC_I2C3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_I2C3RST)) + +#define __HAL_RCC_APB1_RELEASE_RESET() (RCC->APB1RSTR = 0x00U) +#define __HAL_RCC_TIM2_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM2RST)) +#define __HAL_RCC_TIM3_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM3RST)) +#define __HAL_RCC_TIM4_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM4RST)) +#define __HAL_RCC_SPI3_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_SPI3RST)) +#define __HAL_RCC_I2C3_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_I2C3RST)) +/** + * @} + */ + +/** @defgroup RCCEx_APB2_Force_Release_Reset APB2 Force Release Reset + * @brief Force or release APB2 peripheral reset. + * @{ + */ +#define __HAL_RCC_APB2_FORCE_RESET() (RCC->APB2RSTR = 0x00077931U) +#define __HAL_RCC_SDIO_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SDIORST)) +#define __HAL_RCC_SPI4_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SPI4RST)) +#define __HAL_RCC_TIM10_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_TIM10RST)) + +#define __HAL_RCC_APB2_RELEASE_RESET() (RCC->APB2RSTR = 0x00U) +#define __HAL_RCC_SDIO_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SDIORST)) +#define __HAL_RCC_SPI4_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SPI4RST)) +#define __HAL_RCC_TIM10_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_TIM10RST)) +/** + * @} + */ + +/** @defgroup RCCEx_AHB3_Force_Release_Reset AHB3 Force Release Reset + * @brief Force or release AHB3 peripheral reset. + * @{ + */ +#define __HAL_RCC_AHB3_FORCE_RESET() (RCC->AHB3RSTR = 0xFFFFFFFFU) +#define __HAL_RCC_AHB3_RELEASE_RESET() (RCC->AHB3RSTR = 0x00U) +/** + * @} + */ + +/** @defgroup RCCEx_AHB1_LowPower_Enable_Disable AHB1 Peripheral Low Power Enable Disable + * @brief Enable or disable the AHB1 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wake-up from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + * @{ + */ +#define __HAL_RCC_GPIOD_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIODLPEN)) +#define __HAL_RCC_GPIOE_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIOELPEN)) +#define __HAL_RCC_CRC_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_CRCLPEN)) +#define __HAL_RCC_FLITF_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_FLITFLPEN)) +#define __HAL_RCC_SRAM1_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_SRAM1LPEN)) + +#define __HAL_RCC_GPIOD_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIODLPEN)) +#define __HAL_RCC_GPIOE_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIOELPEN)) +#define __HAL_RCC_CRC_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_CRCLPEN)) +#define __HAL_RCC_FLITF_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_FLITFLPEN)) +#define __HAL_RCC_SRAM1_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_SRAM1LPEN)) +/** + * @} + */ + +/** @defgroup RCCEx_AHB2_LowPower_Enable_Disable AHB2 Peripheral Low Power Enable Disable + * @brief Enable or disable the AHB2 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wake-up from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + * @{ + */ +#define __HAL_RCC_USB_OTG_FS_CLK_SLEEP_ENABLE() (RCC->AHB2LPENR |= (RCC_AHB2LPENR_OTGFSLPEN)) + +#define __HAL_RCC_USB_OTG_FS_CLK_SLEEP_DISABLE() (RCC->AHB2LPENR &= ~(RCC_AHB2LPENR_OTGFSLPEN)) +/** + * @} + */ + +/** @defgroup RCCEx_APB1_LowPower_Enable_Disable APB1 Peripheral Low Power Enable Disable + * @brief Enable or disable the APB1 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wake-up from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + * @{ + */ +#define __HAL_RCC_TIM2_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM2LPEN)) +#define __HAL_RCC_TIM3_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM3LPEN)) +#define __HAL_RCC_TIM4_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM4LPEN)) +#define __HAL_RCC_SPI3_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_SPI3LPEN)) +#define __HAL_RCC_I2C3_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_I2C3LPEN)) + +#define __HAL_RCC_TIM2_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM2LPEN)) +#define __HAL_RCC_TIM3_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM3LPEN)) +#define __HAL_RCC_TIM4_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM4LPEN)) +#define __HAL_RCC_SPI3_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_SPI3LPEN)) +#define __HAL_RCC_I2C3_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_I2C3LPEN)) +/** + * @} + */ + +/** @defgroup RCCEx_APB2_LowPower_Enable_Disable APB2 Peripheral Low Power Enable Disable + * @brief Enable or disable the APB2 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wake-up from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + * @{ + */ +#define __HAL_RCC_SDIO_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SDIOLPEN)) +#define __HAL_RCC_SPI4_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SPI4LPEN)) +#define __HAL_RCC_TIM10_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_TIM10LPEN)) + +#define __HAL_RCC_SDIO_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SDIOLPEN)) +#define __HAL_RCC_SPI4_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SPI4LPEN)) +#define __HAL_RCC_TIM10_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_TIM10LPEN)) +/** + * @} + */ +#endif /* STM32F401xC || STM32F401xE*/ +/*----------------------------------------------------------------------------*/ + +/*-------------------------------- STM32F410xx -------------------------------*/ +#if defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) +/** @defgroup RCCEx_AHB1_Clock_Enable_Disable AHB1 Peripheral Clock Enable Disable + * @brief Enables or disables the AHB1 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_CRC_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_RNG_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_RNGEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_RNGEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_CRC_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_CRCEN)) +#define __HAL_RCC_RNG_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_RNGEN)) +/** + * @} + */ + +/** @defgroup RCCEx_AHB1_Peripheral_Clock_Enable_Disable_Status AHB1 Peripheral Clock Enable Disable Status + * @brief Get the enable or disable status of the AHB1 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_CRC_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CRCEN)) != RESET) +#define __HAL_RCC_RNG_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_RNGEN)) != RESET) + +#define __HAL_RCC_CRC_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CRCEN)) == RESET) +#define __HAL_RCC_RNG_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_RNGEN)) == RESET) +/** + * @} + */ + +/** @defgroup RCCEx_APB1_Clock_Enable_Disable APB1 Peripheral Clock Enable Disable + * @brief Enable or disable the High Speed APB (APB1) peripheral clock. + * @{ + */ +#define __HAL_RCC_TIM6_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM6EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM6EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_LPTIM1_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_LPTIM1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_LPTIM1EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_RTCAPB_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_RTCAPBEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_RTCAPBEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_FMPI2C1_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_FMPI2C1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_FMPI2C1EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_DAC_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_DACEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_DACEN);\ + UNUSED(tmpreg); \ + } while(0U) + +#define __HAL_RCC_TIM6_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM6EN)) +#define __HAL_RCC_RTCAPB_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_RTCAPBEN)) +#define __HAL_RCC_LPTIM1_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_LPTIM1EN)) +#define __HAL_RCC_FMPI2C1_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_FMPI2C1EN)) +#define __HAL_RCC_DAC_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_DACEN)) +/** + * @} + */ + +/** @defgroup RCCEx_APB1_Peripheral_Clock_Enable_Disable_Status APB1 Peripheral Clock Enable Disable Status + * @brief Get the enable or disable status of the APB1 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_TIM6_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM6EN)) != RESET) +#define __HAL_RCC_RTCAPB_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_RTCAPBEN)) != RESET) +#define __HAL_RCC_LPTIM1_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_LPTIM1EN)) != RESET) +#define __HAL_RCC_FMPI2C1_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_FMPI2C1EN)) != RESET) +#define __HAL_RCC_DAC_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_DACEN)) != RESET) + +#define __HAL_RCC_TIM6_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM6EN)) == RESET) +#define __HAL_RCC_RTCAPB_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_RTCAPBEN)) == RESET) +#define __HAL_RCC_LPTIM1_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_LPTIM1EN)) == RESET) +#define __HAL_RCC_FMPI2C1_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_FMPI2C1EN)) == RESET) +#define __HAL_RCC_DAC_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_DACEN)) == RESET) +/** + * @} + */ + +/** @defgroup RCCEx_APB2_Clock_Enable_Disable APB2 Peripheral Clock Enable Disable + * @brief Enable or disable the High Speed APB (APB2) peripheral clock. + * @{ + */ +#define __HAL_RCC_SPI5_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI5EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI5EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_EXTIT_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_EXTITEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_EXTITEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_SPI5_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SPI5EN)) +#define __HAL_RCC_EXTIT_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_EXTITEN)) +/** + * @} + */ + +/** @defgroup RCCEx_APB2_Peripheral_Clock_Enable_Disable_Status APB2 Peripheral Clock Enable Disable Status + * @brief Get the enable or disable status of the APB2 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_SPI5_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI5EN)) != RESET) +#define __HAL_RCC_EXTIT_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_EXTITEN)) != RESET) + +#define __HAL_RCC_SPI5_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI5EN)) == RESET) +#define __HAL_RCC_EXTIT_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_EXTITEN)) == RESET) +/** + * @} + */ + +/** @defgroup RCCEx_AHB1_Force_Release_Reset AHB1 Force Release Reset + * @brief Force or release AHB1 peripheral reset. + * @{ + */ +#define __HAL_RCC_AHB1_FORCE_RESET() (RCC->AHB1RSTR = 0x80601087U) +#define __HAL_RCC_CRC_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_CRCRST)) +#define __HAL_RCC_RNG_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_RNGRST)) +#define __HAL_RCC_CRC_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_CRCRST)) +#define __HAL_RCC_RNG_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_RNGRST)) +/** + * @} + */ + +/** @defgroup RCCEx_AHB2_Force_Release_Reset AHB2 Force Release Reset + * @brief Force or release AHB2 peripheral reset. + * @{ + */ +#define __HAL_RCC_AHB2_FORCE_RESET() +#define __HAL_RCC_AHB2_RELEASE_RESET() +/** + * @} + */ + +/** @defgroup RCCEx_AHB3_Force_Release_Reset AHB3 Force Release Reset + * @brief Force or release AHB3 peripheral reset. + * @{ + */ +#define __HAL_RCC_AHB3_FORCE_RESET() +#define __HAL_RCC_AHB3_RELEASE_RESET() +/** + * @} + */ + +/** @defgroup RCCEx_APB1_Force_Release_Reset APB1 Force Release Reset + * @brief Force or release APB1 peripheral reset. + * @{ + */ +#if defined (STM32F410Rx) || defined (STM32F410Cx) +#define __HAL_RCC_APB1_FORCE_RESET() (RCC->APB1RSTR = 0x31624A18U) +#endif /* STM32F410Rx || STM32F410Cx */ +#if defined (STM32F410Tx) +#define __HAL_RCC_APB1_FORCE_RESET() (RCC->APB1RSTR = 0x31620A18U) +#endif /* STM32F410Tx */ +#define __HAL_RCC_TIM6_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM6RST)) +#define __HAL_RCC_LPTIM1_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_LPTIM1RST)) +#define __HAL_RCC_FMPI2C1_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_FMPI2C1RST)) +#define __HAL_RCC_DAC_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_DACRST)) + +#define __HAL_RCC_TIM6_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM6RST)) +#define __HAL_RCC_LPTIM1_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_LPTIM1RST)) +#define __HAL_RCC_FMPI2C1_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_FMPI2C1RST)) +#define __HAL_RCC_DAC_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_DACRST)) +/** + * @} + */ + +/** @defgroup RCCEx_APB2_Force_Release_Reset APB2 Force Release Reset + * @brief Force or release APB2 peripheral reset. + * @{ + */ +#if defined (STM32F410Rx) || defined (STM32F410Cx) +#define __HAL_RCC_APB2_FORCE_RESET() (RCC->APB2RSTR = 0x00155131U) +#endif /* STM32F410Rx || STM32F410Cx */ +#if defined (STM32F410Tx) +#define __HAL_RCC_APB2_FORCE_RESET() (RCC->APB2RSTR = 0x00055111U) +#endif /* STM32F410Tx */ +#define __HAL_RCC_SPI5_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SPI5RST)) +#define __HAL_RCC_SPI5_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SPI5RST)) +/** + * @} + */ + +/** @defgroup RCCEx_AHB1_LowPower_Enable_Disable AHB1 Peripheral Low Power Enable Disable + * @brief Enable or disable the AHB1 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + * @{ + */ +#define __HAL_RCC_RNG_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_RNGLPEN)) +#define __HAL_RCC_CRC_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_CRCLPEN)) +#define __HAL_RCC_FLITF_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_FLITFLPEN)) +#define __HAL_RCC_SRAM1_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_SRAM1LPEN)) + +#define __HAL_RCC_RNG_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_RNGLPEN)) +#define __HAL_RCC_CRC_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_CRCLPEN)) +#define __HAL_RCC_FLITF_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_FLITFLPEN)) +#define __HAL_RCC_SRAM1_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_SRAM1LPEN)) +/** + * @} + */ + +/** @defgroup RCCEx_APB1_LowPower_Enable_Disable APB1 Peripheral Low Power Enable Disable + * @brief Enable or disable the APB1 peripheral clock during Low Power (Sleep) mode. + * @{ + */ +#define __HAL_RCC_TIM6_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM6LPEN)) +#define __HAL_RCC_LPTIM1_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_LPTIM1LPEN)) +#define __HAL_RCC_RTCAPB_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_RTCAPBLPEN)) +#define __HAL_RCC_FMPI2C1_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_FMPI2C1LPEN)) +#define __HAL_RCC_DAC_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_DACLPEN)) + +#define __HAL_RCC_TIM6_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM6LPEN)) +#define __HAL_RCC_LPTIM1_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_LPTIM1LPEN)) +#define __HAL_RCC_RTCAPB_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_RTCAPBLPEN)) +#define __HAL_RCC_FMPI2C1_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_FMPI2C1LPEN)) +#define __HAL_RCC_DAC_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_DACLPEN)) +/** + * @} + */ + +/** @defgroup RCCEx_APB2_LowPower_Enable_Disable APB2 Peripheral Low Power Enable Disable + * @brief Enable or disable the APB2 peripheral clock during Low Power (Sleep) mode. + * @{ + */ +#define __HAL_RCC_SPI5_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SPI5LPEN)) +#define __HAL_RCC_EXTIT_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_EXTITLPEN)) +#define __HAL_RCC_SPI5_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SPI5LPEN)) +#define __HAL_RCC_EXTIT_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_EXTITLPEN)) +/** + * @} + */ + +#endif /* STM32F410Tx || STM32F410Cx || STM32F410Rx */ +/*----------------------------------------------------------------------------*/ + +/*-------------------------------- STM32F411xx -------------------------------*/ +#if defined(STM32F411xE) +/** @defgroup RCCEx_AHB1_Clock_Enable_Disable AHB1 Peripheral Clock Enable Disable + * @brief Enables or disables the AHB1 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_CCMDATARAMEN_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CCMDATARAMEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CCMDATARAMEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_GPIOD_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIODEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIODEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_GPIOE_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOEEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOEEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_CRC_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_GPIOD_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIODEN)) +#define __HAL_RCC_GPIOE_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOEEN)) +#define __HAL_RCC_CCMDATARAMEN_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_CCMDATARAMEN)) +#define __HAL_RCC_CRC_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_CRCEN)) +/** + * @} + */ + +/** @defgroup RCCEx_AHB1_Peripheral_Clock_Enable_Disable_Status AHB1 Peripheral Clock Enable Disable Status + * @brief Get the enable or disable status of the AHB1 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_GPIOD_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIODEN)) != RESET) +#define __HAL_RCC_GPIOE_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOEEN)) != RESET) +#define __HAL_RCC_CCMDATARAMEN_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CCMDATARAMEN)) != RESET) +#define __HAL_RCC_CRC_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CRCEN)) != RESET) + +#define __HAL_RCC_GPIOD_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIODEN)) == RESET) +#define __HAL_RCC_GPIOE_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOEEN)) == RESET) +#define __HAL_RCC_CCMDATARAMEN_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CCMDATARAMEN)) == RESET) +#define __HAL_RCC_CRC_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CRCEN)) == RESET) +/** + * @} + */ + +/** @defgroup RCCEX_AHB2_Clock_Enable_Disable AHB2 Peripheral Clock Enable Disable + * @brief Enable or disable the AHB2 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_USB_OTG_FS_CLK_ENABLE() do {(RCC->AHB2ENR |= (RCC_AHB2ENR_OTGFSEN));\ + __HAL_RCC_SYSCFG_CLK_ENABLE();\ + }while(0U) + +#define __HAL_RCC_USB_OTG_FS_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_OTGFSEN)) +/** + * @} + */ + +/** @defgroup RCCEx_AHB2_Peripheral_Clock_Enable_Disable_Status AHB2 Peripheral Clock Enable Disable Status + * @brief Get the enable or disable status of the AHB2 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_USB_OTG_FS_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_OTGFSEN)) != RESET) +#define __HAL_RCC_USB_OTG_FS_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_OTGFSEN)) == RESET) +/** + * @} + */ + +/** @defgroup RCCEx_APB1_Clock_Enable_Disable APB1 Peripheral Clock Enable Disable + * @brief Enable or disable the Low Speed APB (APB1) peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_TIM2_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM2EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_TIM3_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM3EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_TIM4_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM4EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM4EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_SPI3_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI3EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_I2C3_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C3EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_TIM2_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM2EN)) +#define __HAL_RCC_TIM3_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM3EN)) +#define __HAL_RCC_TIM4_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM4EN)) +#define __HAL_RCC_SPI3_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_SPI3EN)) +#define __HAL_RCC_I2C3_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_I2C3EN)) +/** + * @} + */ + +/** @defgroup RCCEx_APB1_Peripheral_Clock_Enable_Disable_Status APB1 Peripheral Clock Enable Disable Status + * @brief Get the enable or disable status of the APB1 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_TIM2_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM2EN)) != RESET) +#define __HAL_RCC_TIM3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM3EN)) != RESET) +#define __HAL_RCC_TIM4_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM4EN)) != RESET) +#define __HAL_RCC_SPI3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPI3EN)) != RESET) +#define __HAL_RCC_I2C3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C3EN)) != RESET) + +#define __HAL_RCC_TIM2_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM2EN)) == RESET) +#define __HAL_RCC_TIM3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM3EN)) == RESET) +#define __HAL_RCC_TIM4_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM4EN)) == RESET) +#define __HAL_RCC_SPI3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPI3EN)) == RESET) +#define __HAL_RCC_I2C3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C3EN)) == RESET) +/** + * @} + */ + +/** @defgroup RCCEx_APB2_Clock_Enable_Disable APB2 Peripheral Clock Enable Disable + * @brief Enable or disable the High Speed APB (APB2) peripheral clock. + * @{ + */ +#define __HAL_RCC_SPI5_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI5EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI5EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_SDIO_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SDIOEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SDIOEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_SPI4_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI4EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI4EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_TIM10_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM10EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM10EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_SDIO_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SDIOEN)) +#define __HAL_RCC_SPI4_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SPI4EN)) +#define __HAL_RCC_TIM10_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_TIM10EN)) +#define __HAL_RCC_SPI5_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SPI5EN)) +/** + * @} + */ + +/** @defgroup RCCEx_APB2_Peripheral_Clock_Enable_Disable_Status APB2 Peripheral Clock Enable Disable Status + * @brief Get the enable or disable status of the APB2 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_SDIO_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SDIOEN)) != RESET) +#define __HAL_RCC_SPI4_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI4EN)) != RESET) +#define __HAL_RCC_TIM10_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM10EN)) != RESET) +#define __HAL_RCC_SPI5_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI5EN)) != RESET) + +#define __HAL_RCC_SDIO_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SDIOEN)) == RESET) +#define __HAL_RCC_SPI4_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI4EN)) == RESET) +#define __HAL_RCC_TIM10_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM10EN)) == RESET) +#define __HAL_RCC_SPI5_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI5EN)) == RESET) +/** + * @} + */ + +/** @defgroup RCCEx_AHB1_Force_Release_Reset AHB1 Force Release Reset + * @brief Force or release AHB1 peripheral reset. + * @{ + */ +#define __HAL_RCC_AHB1_FORCE_RESET() (RCC->AHB1RSTR = 0x0060109FU) +#define __HAL_RCC_GPIOD_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIODRST)) +#define __HAL_RCC_GPIOE_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOERST)) +#define __HAL_RCC_CRC_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_CRCRST)) + +#define __HAL_RCC_GPIOD_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIODRST)) +#define __HAL_RCC_GPIOE_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIOERST)) +#define __HAL_RCC_CRC_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_CRCRST)) +/** + * @} + */ + +/** @defgroup RCCEx_AHB2_Force_Release_Reset AHB2 Force Release Reset + * @brief Force or release AHB2 peripheral reset. + * @{ + */ +#define __HAL_RCC_AHB2_FORCE_RESET() (RCC->AHB2RSTR = 0x00000080U) +#define __HAL_RCC_USB_OTG_FS_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_OTGFSRST)) + +#define __HAL_RCC_AHB2_RELEASE_RESET() (RCC->AHB2RSTR = 0x00U) +#define __HAL_RCC_USB_OTG_FS_RELEASE_RESET() (RCC->AHB2RSTR &= ~(RCC_AHB2RSTR_OTGFSRST)) +/** + * @} + */ + +/** @defgroup RCCEx_AHB3_Force_Release_Reset AHB3 Force Release Reset + * @brief Force or release AHB3 peripheral reset. + * @{ + */ +#define __HAL_RCC_AHB3_FORCE_RESET() (RCC->AHB3RSTR = 0xFFFFFFFFU) +#define __HAL_RCC_AHB3_RELEASE_RESET() (RCC->AHB3RSTR = 0x00U) +/** + * @} + */ + +/** @defgroup RCCEx_APB1_Force_Release_Reset APB1 Force Release Reset + * @brief Force or release APB1 peripheral reset. + * @{ + */ +#define __HAL_RCC_APB1_FORCE_RESET() (RCC->APB1RSTR = 0x10E2C80FU) +#define __HAL_RCC_TIM2_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM2RST)) +#define __HAL_RCC_TIM3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM3RST)) +#define __HAL_RCC_TIM4_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM4RST)) +#define __HAL_RCC_SPI3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_SPI3RST)) +#define __HAL_RCC_I2C3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_I2C3RST)) + +#define __HAL_RCC_TIM2_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM2RST)) +#define __HAL_RCC_TIM3_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM3RST)) +#define __HAL_RCC_TIM4_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM4RST)) +#define __HAL_RCC_SPI3_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_SPI3RST)) +#define __HAL_RCC_I2C3_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_I2C3RST)) +/** + * @} + */ + +/** @defgroup RCCEx_APB2_Force_Release_Reset APB2 Force Release Reset + * @brief Force or release APB2 peripheral reset. + * @{ + */ +#define __HAL_RCC_APB2_FORCE_RESET() (RCC->APB2RSTR = 0x00177931U) +#define __HAL_RCC_SPI5_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SPI5RST)) +#define __HAL_RCC_SDIO_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SDIORST)) +#define __HAL_RCC_SPI4_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SPI4RST)) +#define __HAL_RCC_TIM10_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_TIM10RST)) + +#define __HAL_RCC_SDIO_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SDIORST)) +#define __HAL_RCC_SPI4_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SPI4RST)) +#define __HAL_RCC_TIM10_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_TIM10RST)) +#define __HAL_RCC_SPI5_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SPI5RST)) +/** + * @} + */ + +/** @defgroup RCCEx_AHB1_LowPower_Enable_Disable AHB1 Peripheral Low Power Enable Disable + * @brief Enable or disable the AHB1 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + * @{ + */ +#define __HAL_RCC_GPIOD_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIODLPEN)) +#define __HAL_RCC_GPIOE_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIOELPEN)) +#define __HAL_RCC_CRC_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_CRCLPEN)) +#define __HAL_RCC_FLITF_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_FLITFLPEN)) +#define __HAL_RCC_SRAM1_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_SRAM1LPEN)) + +#define __HAL_RCC_GPIOD_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIODLPEN)) +#define __HAL_RCC_GPIOE_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIOELPEN)) +#define __HAL_RCC_CRC_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_CRCLPEN)) +#define __HAL_RCC_FLITF_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_FLITFLPEN)) +#define __HAL_RCC_SRAM1_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_SRAM1LPEN)) +/** + * @} + */ + +/** @defgroup RCCEx_AHB2_LowPower_Enable_Disable AHB2 Peripheral Low Power Enable Disable + * @brief Enable or disable the AHB2 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wake-up from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + * @{ + */ +#define __HAL_RCC_USB_OTG_FS_CLK_SLEEP_ENABLE() (RCC->AHB2LPENR |= (RCC_AHB2LPENR_OTGFSLPEN)) +#define __HAL_RCC_USB_OTG_FS_CLK_SLEEP_DISABLE() (RCC->AHB2LPENR &= ~(RCC_AHB2LPENR_OTGFSLPEN)) +/** + * @} + */ + +/** @defgroup RCCEx_APB1_LowPower_Enable_Disable APB1 Peripheral Low Power Enable Disable + * @brief Enable or disable the APB1 peripheral clock during Low Power (Sleep) mode. + * @{ + */ +#define __HAL_RCC_TIM2_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM2LPEN)) +#define __HAL_RCC_TIM3_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM3LPEN)) +#define __HAL_RCC_TIM4_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM4LPEN)) +#define __HAL_RCC_SPI3_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_SPI3LPEN)) +#define __HAL_RCC_I2C3_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_I2C3LPEN)) + +#define __HAL_RCC_TIM2_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM2LPEN)) +#define __HAL_RCC_TIM3_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM3LPEN)) +#define __HAL_RCC_TIM4_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM4LPEN)) +#define __HAL_RCC_SPI3_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_SPI3LPEN)) +#define __HAL_RCC_I2C3_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_I2C3LPEN)) +/** + * @} + */ + +/** @defgroup RCCEx_APB2_LowPower_Enable_Disable APB2 Peripheral Low Power Enable Disable + * @brief Enable or disable the APB2 peripheral clock during Low Power (Sleep) mode. + * @{ + */ +#define __HAL_RCC_SPI5_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SPI5LPEN)) +#define __HAL_RCC_SDIO_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SDIOLPEN)) +#define __HAL_RCC_SPI4_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SPI4LPEN)) +#define __HAL_RCC_TIM10_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_TIM10LPEN)) + +#define __HAL_RCC_SDIO_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SDIOLPEN)) +#define __HAL_RCC_SPI4_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SPI4LPEN)) +#define __HAL_RCC_TIM10_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_TIM10LPEN)) +#define __HAL_RCC_SPI5_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SPI5LPEN)) +/** + * @} + */ +#endif /* STM32F411xE */ +/*----------------------------------------------------------------------------*/ + +/*---------------------------------- STM32F446xx -----------------------------*/ +#if defined(STM32F446xx) +/** @defgroup RCCEx_AHB1_Clock_Enable_Disable AHB1 Peripheral Clock Enable Disable + * @brief Enables or disables the AHB1 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_BKPSRAM_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_BKPSRAMEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_BKPSRAMEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_CCMDATARAMEN_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CCMDATARAMEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CCMDATARAMEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_CRC_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_GPIOD_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIODEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIODEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_GPIOE_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOEEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOEEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_GPIOF_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOFEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOFEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_GPIOG_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOGEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOGEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_USB_OTG_HS_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_OTGHSEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_OTGHSEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_USB_OTG_HS_ULPI_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_OTGHSULPIEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_OTGHSULPIEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_GPIOD_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIODEN)) +#define __HAL_RCC_GPIOE_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOEEN)) +#define __HAL_RCC_GPIOF_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOFEN)) +#define __HAL_RCC_GPIOG_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOGEN)) +#define __HAL_RCC_USB_OTG_HS_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_OTGHSEN)) +#define __HAL_RCC_USB_OTG_HS_ULPI_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_OTGHSULPIEN)) +#define __HAL_RCC_BKPSRAM_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_BKPSRAMEN)) +#define __HAL_RCC_CCMDATARAMEN_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_CCMDATARAMEN)) +#define __HAL_RCC_CRC_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_CRCEN)) +/** + * @} + */ + +/** @defgroup RCCEx_AHB1_Peripheral_Clock_Enable_Disable_Status AHB1 Peripheral Clock Enable Disable Status + * @brief Get the enable or disable status of the AHB1 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_GPIOD_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIODEN)) != RESET) +#define __HAL_RCC_GPIOE_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOEEN)) != RESET) +#define __HAL_RCC_GPIOF_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOFEN)) != RESET) +#define __HAL_RCC_GPIOG_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOGEN)) != RESET) +#define __HAL_RCC_USB_OTG_HS_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_OTGHSEN)) != RESET) +#define __HAL_RCC_USB_OTG_HS_ULPI_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_OTGHSULPIEN)) != RESET) +#define __HAL_RCC_BKPSRAM_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_BKPSRAMEN)) != RESET) +#define __HAL_RCC_CCMDATARAMEN_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CCMDATARAMEN))!= RESET) +#define __HAL_RCC_CRC_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CRCEN)) != RESET) + +#define __HAL_RCC_GPIOD_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIODEN)) == RESET) +#define __HAL_RCC_GPIOE_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOEEN)) == RESET) +#define __HAL_RCC_GPIOF_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOFEN)) == RESET) +#define __HAL_RCC_GPIOG_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOGEN)) == RESET) +#define __HAL_RCC_USB_OTG_HS_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_OTGHSEN)) == RESET) +#define __HAL_RCC_USB_OTG_HS_ULPI_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_OTGHSULPIEN)) == RESET) +#define __HAL_RCC_BKPSRAM_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_BKPSRAMEN)) == RESET) +#define __HAL_RCC_CCMDATARAMEN_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CCMDATARAMEN)) == RESET) +#define __HAL_RCC_CRC_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CRCEN)) == RESET) +/** + * @} + */ + +/** @defgroup RCCEx_AHB2_Clock_Enable_Disable AHB2 Peripheral Clock Enable Disable + * @brief Enable or disable the AHB2 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_DCMI_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_DCMIEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_DCMIEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_DCMI_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_DCMIEN)) +#define __HAL_RCC_USB_OTG_FS_CLK_ENABLE() do {(RCC->AHB2ENR |= (RCC_AHB2ENR_OTGFSEN));\ + __HAL_RCC_SYSCFG_CLK_ENABLE();\ + }while(0U) + +#define __HAL_RCC_USB_OTG_FS_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_OTGFSEN)) + +#define __HAL_RCC_RNG_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_RNGEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_RNGEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_RNG_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_RNGEN)) +/** + * @} + */ + +/** @defgroup RCCEx_AHB2_Peripheral_Clock_Enable_Disable_Status AHB2 Peripheral Clock Enable Disable Status + * @brief Get the enable or disable status of the AHB2 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_DCMI_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_DCMIEN)) != RESET) +#define __HAL_RCC_DCMI_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_DCMIEN)) == RESET) + +#define __HAL_RCC_USB_OTG_FS_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_OTGFSEN)) != RESET) +#define __HAL_RCC_USB_OTG_FS_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_OTGFSEN)) == RESET) + +#define __HAL_RCC_RNG_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_RNGEN)) != RESET) +#define __HAL_RCC_RNG_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_RNGEN)) == RESET) +/** + * @} + */ + +/** @defgroup RCCEx_AHB3_Clock_Enable_Disable AHB3 Peripheral Clock Enable Disable + * @brief Enables or disables the AHB3 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_FMC_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB3ENR, RCC_AHB3ENR_FMCEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB3ENR, RCC_AHB3ENR_FMCEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_QSPI_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB3ENR, RCC_AHB3ENR_QSPIEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB3ENR, RCC_AHB3ENR_QSPIEN);\ + UNUSED(tmpreg); \ + } while(0U) + +#define __HAL_RCC_FMC_CLK_DISABLE() (RCC->AHB3ENR &= ~(RCC_AHB3ENR_FMCEN)) +#define __HAL_RCC_QSPI_CLK_DISABLE() (RCC->AHB3ENR &= ~(RCC_AHB3ENR_QSPIEN)) +/** + * @} + */ + +/** @defgroup RCCEx_AHB3_Peripheral_Clock_Enable_Disable_Status AHB3 Peripheral Clock Enable Disable Status + * @brief Get the enable or disable status of the AHB3 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_FMC_IS_CLK_ENABLED() ((RCC->AHB3ENR & (RCC_AHB3ENR_FMCEN)) != RESET) +#define __HAL_RCC_QSPI_IS_CLK_ENABLED() ((RCC->AHB3ENR & (RCC_AHB3ENR_QSPIEN)) != RESET) + +#define __HAL_RCC_FMC_IS_CLK_DISABLED() ((RCC->AHB3ENR & (RCC_AHB3ENR_FMCEN)) == RESET) +#define __HAL_RCC_QSPI_IS_CLK_DISABLED() ((RCC->AHB3ENR & (RCC_AHB3ENR_QSPIEN)) == RESET) +/** + * @} + */ + +/** @defgroup RCCEx_APB1_Clock_Enable_Disable APB1 Peripheral Clock Enable Disable + * @brief Enable or disable the Low Speed APB (APB1) peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_TIM6_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM6EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM6EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_TIM7_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM7EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM7EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_TIM12_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM12EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM12EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_TIM13_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM13EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM13EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_TIM14_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM14EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM14EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_SPDIFRX_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_SPDIFRXEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_SPDIFRXEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_USART3_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_USART3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_USART3EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_UART4_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_UART4EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_UART4EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_UART5_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_UART5EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_UART5EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_FMPI2C1_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_FMPI2C1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_FMPI2C1EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_CAN1_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN1EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_CAN2_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN2EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_CEC_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_CECEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_CECEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_DAC_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_DACEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_DACEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_TIM2_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM2EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_TIM3_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM3EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_TIM4_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM4EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM4EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_SPI3_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI3EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_I2C3_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C3EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_TIM2_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM2EN)) +#define __HAL_RCC_TIM3_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM3EN)) +#define __HAL_RCC_TIM4_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM4EN)) +#define __HAL_RCC_SPI3_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_SPI3EN)) +#define __HAL_RCC_I2C3_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_I2C3EN)) +#define __HAL_RCC_TIM6_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM6EN)) +#define __HAL_RCC_TIM7_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM7EN)) +#define __HAL_RCC_TIM12_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM12EN)) +#define __HAL_RCC_TIM13_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM13EN)) +#define __HAL_RCC_TIM14_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM14EN)) +#define __HAL_RCC_SPDIFRX_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_SPDIFRXEN)) +#define __HAL_RCC_USART3_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_USART3EN)) +#define __HAL_RCC_UART4_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_UART4EN)) +#define __HAL_RCC_UART5_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_UART5EN)) +#define __HAL_RCC_FMPI2C1_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_FMPI2C1EN)) +#define __HAL_RCC_CAN1_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_CAN1EN)) +#define __HAL_RCC_CAN2_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_CAN2EN)) +#define __HAL_RCC_CEC_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_CECEN)) +#define __HAL_RCC_DAC_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_DACEN)) +/** + * @} + */ + +/** @defgroup RCCEx_APB1_Peripheral_Clock_Enable_Disable_Status APB1 Peripheral Clock Enable Disable Status + * @brief Get the enable or disable status of the APB1 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_TIM2_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM2EN)) != RESET) +#define __HAL_RCC_TIM3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM3EN)) != RESET) +#define __HAL_RCC_TIM4_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM4EN)) != RESET) +#define __HAL_RCC_SPI3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPI3EN)) != RESET) +#define __HAL_RCC_I2C3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C3EN)) != RESET) +#define __HAL_RCC_TIM6_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM6EN)) != RESET) +#define __HAL_RCC_TIM7_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM7EN)) != RESET) +#define __HAL_RCC_TIM12_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM12EN)) != RESET) +#define __HAL_RCC_TIM13_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM13EN)) != RESET) +#define __HAL_RCC_TIM14_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM14EN)) != RESET) +#define __HAL_RCC_SPDIFRX_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPDIFRXEN)) != RESET) +#define __HAL_RCC_USART3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_USART3EN)) != RESET) +#define __HAL_RCC_UART4_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART4EN)) != RESET) +#define __HAL_RCC_UART5_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART5EN)) != RESET) +#define __HAL_RCC_FMPI2C1_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_FMPI2C1EN)) != RESET) +#define __HAL_RCC_CAN1_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN1EN)) != RESET) +#define __HAL_RCC_CAN2_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN2EN)) != RESET) +#define __HAL_RCC_CEC_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CECEN)) != RESET) +#define __HAL_RCC_DAC_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_DACEN)) != RESET) + +#define __HAL_RCC_TIM2_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM2EN)) == RESET) +#define __HAL_RCC_TIM3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM3EN)) == RESET) +#define __HAL_RCC_TIM4_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM4EN)) == RESET) +#define __HAL_RCC_SPI3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPI3EN)) == RESET) +#define __HAL_RCC_I2C3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C3EN)) == RESET) +#define __HAL_RCC_TIM6_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM6EN)) == RESET) +#define __HAL_RCC_TIM7_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM7EN)) == RESET) +#define __HAL_RCC_TIM12_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM12EN)) == RESET) +#define __HAL_RCC_TIM13_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM13EN)) == RESET) +#define __HAL_RCC_TIM14_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM14EN)) == RESET) +#define __HAL_RCC_SPDIFRX_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPDIFRXEN)) == RESET) +#define __HAL_RCC_USART3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_USART3EN)) == RESET) +#define __HAL_RCC_UART4_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART4EN)) == RESET) +#define __HAL_RCC_UART5_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART5EN)) == RESET) +#define __HAL_RCC_FMPI2C1_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_FMPI2C1EN)) == RESET) +#define __HAL_RCC_CAN1_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN1EN)) == RESET) +#define __HAL_RCC_CAN2_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN2EN)) == RESET) +#define __HAL_RCC_CEC_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CECEN)) == RESET) +#define __HAL_RCC_DAC_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_DACEN)) == RESET) +/** + * @} + */ + +/** @defgroup RCCEx_APB2_Clock_Enable_Disable APB2 Peripheral Clock Enable Disable + * @brief Enable or disable the High Speed APB (APB2) peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_TIM8_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM8EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM8EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_ADC2_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC2EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_ADC3_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC3EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_SAI1_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SAI1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SAI1EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_SAI2_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SAI2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SAI2EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_SDIO_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SDIOEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SDIOEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_SPI4_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI4EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI4EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_TIM10_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM10EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM10EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_SDIO_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SDIOEN)) +#define __HAL_RCC_SPI4_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SPI4EN)) +#define __HAL_RCC_TIM10_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_TIM10EN)) +#define __HAL_RCC_TIM8_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_TIM8EN)) +#define __HAL_RCC_ADC2_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_ADC2EN)) +#define __HAL_RCC_ADC3_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_ADC3EN)) +#define __HAL_RCC_SAI1_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SAI1EN)) +#define __HAL_RCC_SAI2_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SAI2EN)) +/** + * @} + */ + +/** @defgroup RCCEx_APB2_Peripheral_Clock_Enable_Disable_Status APB2 Peripheral Clock Enable Disable Status + * @brief Get the enable or disable status of the APB2 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_SDIO_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SDIOEN)) != RESET) +#define __HAL_RCC_SPI4_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI4EN)) != RESET) +#define __HAL_RCC_TIM10_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM10EN)) != RESET) +#define __HAL_RCC_TIM8_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM8EN)) != RESET) +#define __HAL_RCC_ADC2_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_ADC2EN)) != RESET) +#define __HAL_RCC_ADC3_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_ADC3EN)) != RESET) +#define __HAL_RCC_SAI1_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SAI1EN)) != RESET) +#define __HAL_RCC_SAI2_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SAI2EN)) != RESET) + +#define __HAL_RCC_SDIO_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SDIOEN)) == RESET) +#define __HAL_RCC_SPI4_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI4EN)) == RESET) +#define __HAL_RCC_TIM10_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM10EN)) == RESET) +#define __HAL_RCC_TIM8_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM8EN)) == RESET) +#define __HAL_RCC_ADC2_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_ADC2EN)) == RESET) +#define __HAL_RCC_ADC3_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_ADC3EN)) == RESET) +#define __HAL_RCC_SAI1_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SAI1EN)) == RESET) +#define __HAL_RCC_SAI2_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SAI2EN)) == RESET) +/** + * @} + */ + +/** @defgroup RCCEx_AHB1_Force_Release_Reset AHB1 Force Release Reset + * @brief Force or release AHB1 peripheral reset. + * @{ + */ +#define __HAL_RCC_AHB1_FORCE_RESET() (RCC->AHB1RSTR = 0x206010FFU) +#define __HAL_RCC_GPIOD_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIODRST)) +#define __HAL_RCC_GPIOE_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOERST)) +#define __HAL_RCC_GPIOF_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOFRST)) +#define __HAL_RCC_GPIOG_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOGRST)) +#define __HAL_RCC_USB_OTG_HS_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_OTGHRST)) +#define __HAL_RCC_CRC_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_CRCRST)) + +#define __HAL_RCC_GPIOD_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIODRST)) +#define __HAL_RCC_GPIOE_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIOERST)) +#define __HAL_RCC_GPIOF_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIOFRST)) +#define __HAL_RCC_GPIOG_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIOGRST)) +#define __HAL_RCC_USB_OTG_HS_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_OTGHRST)) +#define __HAL_RCC_CRC_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_CRCRST)) +/** + * @} + */ + +/** @defgroup RCCEx_AHB2_Force_Release_Reset AHB2 Force Release Reset + * @brief Force or release AHB2 peripheral reset. + * @{ + */ +#define __HAL_RCC_AHB2_FORCE_RESET() (RCC->AHB2RSTR = 0x00000081U) +#define __HAL_RCC_USB_OTG_FS_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_OTGFSRST)) +#define __HAL_RCC_RNG_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_RNGRST)) +#define __HAL_RCC_DCMI_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_DCMIRST)) + +#define __HAL_RCC_AHB2_RELEASE_RESET() (RCC->AHB2RSTR = 0x00U) +#define __HAL_RCC_USB_OTG_FS_RELEASE_RESET() (RCC->AHB2RSTR &= ~(RCC_AHB2RSTR_OTGFSRST)) +#define __HAL_RCC_RNG_RELEASE_RESET() (RCC->AHB2RSTR &= ~(RCC_AHB2RSTR_RNGRST)) +#define __HAL_RCC_DCMI_RELEASE_RESET() (RCC->AHB2RSTR &= ~(RCC_AHB2RSTR_DCMIRST)) +/** + * @} + */ + +/** @defgroup RCCEx_AHB3_Force_Release_Reset AHB3 Force Release Reset + * @brief Force or release AHB3 peripheral reset. + * @{ + */ +#define __HAL_RCC_AHB3_FORCE_RESET() (RCC->AHB3RSTR = 0x00000003U) +#define __HAL_RCC_AHB3_RELEASE_RESET() (RCC->AHB3RSTR = 0x00U) + +#define __HAL_RCC_FMC_FORCE_RESET() (RCC->AHB3RSTR |= (RCC_AHB3RSTR_FMCRST)) +#define __HAL_RCC_QSPI_FORCE_RESET() (RCC->AHB3RSTR |= (RCC_AHB3RSTR_QSPIRST)) + +#define __HAL_RCC_FMC_RELEASE_RESET() (RCC->AHB3RSTR &= ~(RCC_AHB3RSTR_FMCRST)) +#define __HAL_RCC_QSPI_RELEASE_RESET() (RCC->AHB3RSTR &= ~(RCC_AHB3RSTR_QSPIRST)) +/** + * @} + */ + +/** @defgroup RCCEx_APB1_Force_Release_Reset APB1 Force Release Reset + * @brief Force or release APB1 peripheral reset. + * @{ + */ +#define __HAL_RCC_APB1_FORCE_RESET() (RCC->APB1RSTR = 0x3FFFC9FFU) +#define __HAL_RCC_TIM6_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM6RST)) +#define __HAL_RCC_TIM7_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM7RST)) +#define __HAL_RCC_TIM12_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM12RST)) +#define __HAL_RCC_TIM13_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM13RST)) +#define __HAL_RCC_TIM14_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM14RST)) +#define __HAL_RCC_SPDIFRX_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_SPDIFRXRST)) +#define __HAL_RCC_USART3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_USART3RST)) +#define __HAL_RCC_UART4_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_UART4RST)) +#define __HAL_RCC_UART5_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_UART5RST)) +#define __HAL_RCC_FMPI2C1_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_FMPI2C1RST)) +#define __HAL_RCC_CAN1_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_CAN1RST)) +#define __HAL_RCC_CAN2_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_CAN2RST)) +#define __HAL_RCC_CEC_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_CECRST)) +#define __HAL_RCC_DAC_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_DACRST)) +#define __HAL_RCC_TIM2_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM2RST)) +#define __HAL_RCC_TIM3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM3RST)) +#define __HAL_RCC_TIM4_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM4RST)) +#define __HAL_RCC_SPI3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_SPI3RST)) +#define __HAL_RCC_I2C3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_I2C3RST)) + +#define __HAL_RCC_TIM2_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM2RST)) +#define __HAL_RCC_TIM3_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM3RST)) +#define __HAL_RCC_TIM4_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM4RST)) +#define __HAL_RCC_SPI3_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_SPI3RST)) +#define __HAL_RCC_I2C3_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_I2C3RST)) +#define __HAL_RCC_TIM6_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM6RST)) +#define __HAL_RCC_TIM7_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM7RST)) +#define __HAL_RCC_TIM12_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM12RST)) +#define __HAL_RCC_TIM13_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM13RST)) +#define __HAL_RCC_TIM14_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM14RST)) +#define __HAL_RCC_SPDIFRX_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_SPDIFRXRST)) +#define __HAL_RCC_USART3_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_USART3RST)) +#define __HAL_RCC_UART4_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_UART4RST)) +#define __HAL_RCC_UART5_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_UART5RST)) +#define __HAL_RCC_FMPI2C1_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_FMPI2C1RST)) +#define __HAL_RCC_CAN1_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_CAN1RST)) +#define __HAL_RCC_CAN2_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_CAN2RST)) +#define __HAL_RCC_CEC_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_CECRST)) +#define __HAL_RCC_DAC_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_DACRST)) +/** + * @} + */ + +/** @defgroup RCCEx_APB2_Force_Release_Reset APB2 Force Release Reset + * @brief Force or release APB2 peripheral reset. + * @{ + */ +#define __HAL_RCC_APB2_FORCE_RESET() (RCC->APB2RSTR = 0x00C77933U) +#define __HAL_RCC_TIM8_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_TIM8RST)) +#define __HAL_RCC_SAI1_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SAI1RST)) +#define __HAL_RCC_SAI2_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SAI2RST)) +#define __HAL_RCC_SDIO_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SDIORST)) +#define __HAL_RCC_SPI4_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SPI4RST)) +#define __HAL_RCC_TIM10_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_TIM10RST)) + +#define __HAL_RCC_SDIO_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SDIORST)) +#define __HAL_RCC_SPI4_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SPI4RST)) +#define __HAL_RCC_TIM10_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_TIM10RST)) +#define __HAL_RCC_TIM8_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_TIM8RST)) +#define __HAL_RCC_SAI1_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SAI1RST)) +#define __HAL_RCC_SAI2_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SAI2RST)) +/** + * @} + */ + +/** @defgroup RCCEx_AHB1_LowPower_Enable_Disable AHB1 Peripheral Low Power Enable Disable + * @brief Enable or disable the AHB1 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + * @{ + */ +#define __HAL_RCC_GPIOD_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIODLPEN)) +#define __HAL_RCC_GPIOE_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIOELPEN)) +#define __HAL_RCC_GPIOF_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIOFLPEN)) +#define __HAL_RCC_GPIOG_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIOGLPEN)) +#define __HAL_RCC_SRAM2_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_SRAM2LPEN)) +#define __HAL_RCC_USB_OTG_HS_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_OTGHSLPEN)) +#define __HAL_RCC_USB_OTG_HS_ULPI_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_OTGHSULPILPEN)) +#define __HAL_RCC_CRC_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_CRCLPEN)) +#define __HAL_RCC_FLITF_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_FLITFLPEN)) +#define __HAL_RCC_SRAM1_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_SRAM1LPEN)) +#define __HAL_RCC_BKPSRAM_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_BKPSRAMLPEN)) + +#define __HAL_RCC_GPIOD_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIODLPEN)) +#define __HAL_RCC_GPIOE_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIOELPEN)) +#define __HAL_RCC_GPIOF_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIOFLPEN)) +#define __HAL_RCC_GPIOG_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIOGLPEN)) +#define __HAL_RCC_SRAM2_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_SRAM2LPEN)) +#define __HAL_RCC_USB_OTG_HS_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_OTGHSLPEN)) +#define __HAL_RCC_USB_OTG_HS_ULPI_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_OTGHSULPILPEN)) +#define __HAL_RCC_CRC_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_CRCLPEN)) +#define __HAL_RCC_FLITF_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_FLITFLPEN)) +#define __HAL_RCC_SRAM1_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_SRAM1LPEN)) +#define __HAL_RCC_BKPSRAM_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_BKPSRAMLPEN)) +/** + * @} + */ + +/** @defgroup RCCEx_AHB2_LowPower_Enable_Disable AHB2 Peripheral Low Power Enable Disable + * @brief Enable or disable the AHB2 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wake-up from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + * @{ + */ +#define __HAL_RCC_USB_OTG_FS_CLK_SLEEP_ENABLE() (RCC->AHB2LPENR |= (RCC_AHB2LPENR_OTGFSLPEN)) +#define __HAL_RCC_USB_OTG_FS_CLK_SLEEP_DISABLE() (RCC->AHB2LPENR &= ~(RCC_AHB2LPENR_OTGFSLPEN)) + +#define __HAL_RCC_RNG_CLK_SLEEP_ENABLE() (RCC->AHB2LPENR |= (RCC_AHB2LPENR_RNGLPEN)) +#define __HAL_RCC_RNG_CLK_SLEEP_DISABLE() (RCC->AHB2LPENR &= ~(RCC_AHB2LPENR_RNGLPEN)) + +#define __HAL_RCC_DCMI_CLK_SLEEP_ENABLE() (RCC->AHB2LPENR |= (RCC_AHB2LPENR_DCMILPEN)) +#define __HAL_RCC_DCMI_CLK_SLEEP_DISABLE() (RCC->AHB2LPENR &= ~(RCC_AHB2LPENR_DCMILPEN)) +/** + * @} + */ + +/** @defgroup RCCEx_AHB3_LowPower_Enable_Disable AHB3 Peripheral Low Power Enable Disable + * @brief Enable or disable the AHB3 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + * @{ + */ +#define __HAL_RCC_FMC_CLK_SLEEP_ENABLE() (RCC->AHB3LPENR |= (RCC_AHB3LPENR_FMCLPEN)) +#define __HAL_RCC_QSPI_CLK_SLEEP_ENABLE() (RCC->AHB3LPENR |= (RCC_AHB3LPENR_QSPILPEN)) + +#define __HAL_RCC_FMC_CLK_SLEEP_DISABLE() (RCC->AHB3LPENR &= ~(RCC_AHB3LPENR_FMCLPEN)) +#define __HAL_RCC_QSPI_CLK_SLEEP_DISABLE() (RCC->AHB3LPENR &= ~(RCC_AHB3LPENR_QSPILPEN)) +/** + * @} + */ + +/** @defgroup RCCEx_APB1_LowPower_Enable_Disable APB1 Peripheral Low Power Enable Disable + * @brief Enable or disable the APB1 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + * @{ + */ +#define __HAL_RCC_TIM6_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM6LPEN)) +#define __HAL_RCC_TIM7_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM7LPEN)) +#define __HAL_RCC_TIM12_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM12LPEN)) +#define __HAL_RCC_TIM13_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM13LPEN)) +#define __HAL_RCC_TIM14_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM14LPEN)) +#define __HAL_RCC_SPDIFRX_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_SPDIFRXLPEN)) +#define __HAL_RCC_USART3_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_USART3LPEN)) +#define __HAL_RCC_UART4_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_UART4LPEN)) +#define __HAL_RCC_UART5_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_UART5LPEN)) +#define __HAL_RCC_FMPI2C1_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_FMPI2C1LPEN)) +#define __HAL_RCC_CAN1_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_CAN1LPEN)) +#define __HAL_RCC_CAN2_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_CAN2LPEN)) +#define __HAL_RCC_CEC_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_CECLPEN)) +#define __HAL_RCC_DAC_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_DACLPEN)) +#define __HAL_RCC_TIM2_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM2LPEN)) +#define __HAL_RCC_TIM3_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM3LPEN)) +#define __HAL_RCC_TIM4_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM4LPEN)) +#define __HAL_RCC_SPI3_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_SPI3LPEN)) +#define __HAL_RCC_I2C3_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_I2C3LPEN)) + +#define __HAL_RCC_TIM2_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM2LPEN)) +#define __HAL_RCC_TIM3_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM3LPEN)) +#define __HAL_RCC_TIM4_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM4LPEN)) +#define __HAL_RCC_SPI3_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_SPI3LPEN)) +#define __HAL_RCC_I2C3_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_I2C3LPEN)) +#define __HAL_RCC_TIM6_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM6LPEN)) +#define __HAL_RCC_TIM7_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM7LPEN)) +#define __HAL_RCC_TIM12_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM12LPEN)) +#define __HAL_RCC_TIM13_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM13LPEN)) +#define __HAL_RCC_TIM14_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM14LPEN)) +#define __HAL_RCC_SPDIFRX_CLK_SLEEP_DISABLE()(RCC->APB1LPENR &= ~(RCC_APB1LPENR_SPDIFRXLPEN)) +#define __HAL_RCC_USART3_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_USART3LPEN)) +#define __HAL_RCC_UART4_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_UART4LPEN)) +#define __HAL_RCC_UART5_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_UART5LPEN)) +#define __HAL_RCC_FMPI2C1_CLK_SLEEP_DISABLE()(RCC->APB1LPENR &= ~(RCC_APB1LPENR_FMPI2C1LPEN)) +#define __HAL_RCC_CAN1_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_CAN1LPEN)) +#define __HAL_RCC_CAN2_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_CAN2LPEN)) +#define __HAL_RCC_CEC_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_CECLPEN)) +#define __HAL_RCC_DAC_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_DACLPEN)) +/** + * @} + */ + +/** @defgroup RCCEx_APB2_LowPower_Enable_Disable APB2 Peripheral Low Power Enable Disable + * @brief Enable or disable the APB2 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + * @{ + */ +#define __HAL_RCC_TIM8_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_TIM8LPEN)) +#define __HAL_RCC_ADC2_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_ADC2LPEN)) +#define __HAL_RCC_ADC3_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_ADC3LPEN)) +#define __HAL_RCC_SAI1_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SAI1LPEN)) +#define __HAL_RCC_SAI2_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SAI2LPEN)) +#define __HAL_RCC_SDIO_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SDIOLPEN)) +#define __HAL_RCC_SPI4_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SPI4LPEN)) +#define __HAL_RCC_TIM10_CLK_SLEEP_ENABLE()(RCC->APB2LPENR |= (RCC_APB2LPENR_TIM10LPEN)) + +#define __HAL_RCC_SDIO_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SDIOLPEN)) +#define __HAL_RCC_SPI4_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SPI4LPEN)) +#define __HAL_RCC_TIM10_CLK_SLEEP_DISABLE()(RCC->APB2LPENR &= ~(RCC_APB2LPENR_TIM10LPEN)) +#define __HAL_RCC_TIM8_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_TIM8LPEN)) +#define __HAL_RCC_ADC2_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_ADC2LPEN)) +#define __HAL_RCC_ADC3_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_ADC3LPEN)) +#define __HAL_RCC_SAI1_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SAI1LPEN)) +#define __HAL_RCC_SAI2_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SAI2LPEN)) +/** + * @} + */ + +#endif /* STM32F446xx */ +/*----------------------------------------------------------------------------*/ + +/*-------STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx || STM32F413xx || STM32F423xx-------*/ +#if defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) +/** @defgroup RCCEx_AHB1_Clock_Enable_Disable AHB1 Peripheral Clock Enable Disable + * @brief Enables or disables the AHB1 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#if defined(STM32F412Rx) || defined(STM32F412Vx) || defined(STM32F412Zx) || defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_GPIOD_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIODEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIODEN);\ + UNUSED(tmpreg); \ + } while(0U) +#endif /* STM32F412Rx || STM32F412Vx || STM32F412Zx || STM32F413xx || STM32F423xx */ +#if defined(STM32F412Vx) || defined(STM32F412Zx) || defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_GPIOE_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOEEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOEEN);\ + UNUSED(tmpreg); \ + } while(0U) +#endif /* STM32F412Vx || STM32F412Zx || STM32F413xx || STM32F423xx */ +#if defined(STM32F412Zx) || defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_GPIOF_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOFEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOFEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_GPIOG_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOGEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOGEN);\ + UNUSED(tmpreg); \ + } while(0U) +#endif /* STM32F412Zx || STM32F413xx || STM32F423xx */ +#define __HAL_RCC_CRC_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ + UNUSED(tmpreg); \ + } while(0U) +#if defined(STM32F412Rx) || defined(STM32F412Vx) || defined(STM32F412Zx) || defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_GPIOD_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIODEN)) +#endif /* STM32F412Rx || STM32F412Vx || STM32F412Zx || STM32F413xx || STM32F423xx */ +#if defined(STM32F412Vx) || defined(STM32F412Zx) || defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_GPIOE_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOEEN)) +#endif /* STM32F412Vx || STM32F412Zx || STM32F413xx || STM32F423xx */ +#if defined(STM32F412Zx) || defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_GPIOF_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOFEN)) +#define __HAL_RCC_GPIOG_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOGEN)) +#endif /* STM32F412Zx || STM32F413xx || STM32F423xx */ +#define __HAL_RCC_CRC_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_CRCEN)) +/** + * @} + */ + +/** @defgroup RCCEx_AHB1_Peripheral_Clock_Enable_Disable_Status AHB1 Peripheral Clock Enable Disable Status + * @brief Get the enable or disable status of the AHB1 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#if defined(STM32F412Rx) || defined(STM32F412Vx) || defined(STM32F412Zx) || defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_GPIOD_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIODEN)) != RESET) +#endif /* STM32F412Rx || STM32F412Vx || STM32F412Zx || STM32F413xx || STM32F423xx */ +#if defined(STM32F412Vx) || defined(STM32F412Zx) || defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_GPIOE_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOEEN)) != RESET) +#endif /* STM32F412Vx || STM32F412Zx || STM32F413xx || STM32F423xx */ +#if defined(STM32F412Zx) || defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_GPIOF_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOFEN)) != RESET) +#define __HAL_RCC_GPIOG_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOGEN)) != RESET) +#endif /* STM32F412Zx || STM32F413xx || STM32F423xx */ +#define __HAL_RCC_CRC_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CRCEN)) != RESET) + +#if defined(STM32F412Rx) || defined(STM32F412Vx) || defined(STM32F412Zx) || defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_GPIOD_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIODEN)) == RESET) +#endif /* STM32F412Rx || STM32F412Vx || STM32F412Zx || STM32F413xx || STM32F423xx */ +#if defined(STM32F412Vx) || defined(STM32F412Zx) || defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_GPIOE_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOEEN)) == RESET) +#endif /* STM32F412Vx || STM32F412Zx || STM32F413xx || STM32F423xx */ +#if defined(STM32F412Zx) || defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_GPIOF_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOFEN)) == RESET) +#define __HAL_RCC_GPIOG_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOGEN)) == RESET) +#endif /* STM32F412Zx || STM32F413xx || STM32F423xx */ +#define __HAL_RCC_CRC_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CRCEN)) == RESET) +/** + * @} + */ + +/** @defgroup RCCEx_AHB2_Clock_Enable_Disable AHB2 Peripheral Clock Enable Disable + * @brief Enable or disable the AHB2 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#if defined(STM32F423xx) +#define __HAL_RCC_AES_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_AESEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_AESEN);\ + UNUSED(tmpreg); \ + } while(0U) + +#define __HAL_RCC_AES_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_AESEN)) +#endif /* STM32F423xx */ + +#define __HAL_RCC_RNG_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_RNGEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_RNGEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_RNG_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_RNGEN)) + +#define __HAL_RCC_USB_OTG_FS_CLK_ENABLE() do {(RCC->AHB2ENR |= (RCC_AHB2ENR_OTGFSEN));\ + __HAL_RCC_SYSCFG_CLK_ENABLE();\ + }while(0U) + +#define __HAL_RCC_USB_OTG_FS_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_OTGFSEN)) +/** + * @} + */ + +/** @defgroup RCCEx_AHB2_Peripheral_Clock_Enable_Disable_Status AHB2 Peripheral Clock Enable Disable Status + * @brief Get the enable or disable status of the AHB2 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#if defined(STM32F423xx) +#define __HAL_RCC_AES_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_AESEN)) != RESET) +#define __HAL_RCC_AES_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_AESEN)) == RESET) +#endif /* STM32F423xx */ + +#define __HAL_RCC_USB_OTG_FS_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_OTGFSEN)) != RESET) +#define __HAL_RCC_USB_OTG_FS_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_OTGFSEN)) == RESET) + +#define __HAL_RCC_RNG_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_RNGEN)) != RESET) +#define __HAL_RCC_RNG_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_RNGEN)) == RESET) +/** + * @} + */ + +/** @defgroup RCCEx_AHB3_Clock_Enable_Disable AHB3 Peripheral Clock Enable Disable + * @brief Enables or disables the AHB3 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#if defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_FSMC_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB3ENR, RCC_AHB3ENR_FSMCEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB3ENR, RCC_AHB3ENR_FSMCEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_QSPI_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB3ENR, RCC_AHB3ENR_QSPIEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB3ENR, RCC_AHB3ENR_QSPIEN);\ + UNUSED(tmpreg); \ + } while(0U) + +#define __HAL_RCC_FSMC_CLK_DISABLE() (RCC->AHB3ENR &= ~(RCC_AHB3ENR_FSMCEN)) +#define __HAL_RCC_QSPI_CLK_DISABLE() (RCC->AHB3ENR &= ~(RCC_AHB3ENR_QSPIEN)) +#endif /* STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F413xx || STM32F423xx */ +/** + * @} + */ + +/** @defgroup RCCEx_AHB3_Peripheral_Clock_Enable_Disable_Status AHB3 Peripheral Clock Enable Disable Status + * @brief Get the enable or disable status of the AHB3 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#if defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_FSMC_IS_CLK_ENABLED() ((RCC->AHB3ENR & (RCC_AHB3ENR_FSMCEN)) != RESET) +#define __HAL_RCC_QSPI_IS_CLK_ENABLED() ((RCC->AHB3ENR & (RCC_AHB3ENR_QSPIEN)) != RESET) + +#define __HAL_RCC_FSMC_IS_CLK_DISABLED() ((RCC->AHB3ENR & (RCC_AHB3ENR_FSMCEN)) == RESET) +#define __HAL_RCC_QSPI_IS_CLK_DISABLED() ((RCC->AHB3ENR & (RCC_AHB3ENR_QSPIEN)) == RESET) +#endif /* STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F413xx || STM32F423xx */ + +/** + * @} + */ + +/** @defgroup RCCEx_APB1_Clock_Enable_Disable APB1 Peripheral Clock Enable Disable + * @brief Enable or disable the Low Speed APB (APB1) peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_TIM6_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM6EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM6EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_TIM7_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM7EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM7EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_TIM12_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM12EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM12EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_TIM13_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM13EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM13EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_TIM14_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM14EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM14EN);\ + UNUSED(tmpreg); \ + } while(0U) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_LPTIM1_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_LPTIM1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_LPTIM1EN);\ + UNUSED(tmpreg); \ + } while(0U) +#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_RTCAPB_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_RTCAPBEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_RTCAPBEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_USART3_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_USART3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_USART3EN);\ + UNUSED(tmpreg); \ + } while(0U) + +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_UART4_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_UART4EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_UART4EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_UART5_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_UART5EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_UART5EN);\ + UNUSED(tmpreg); \ + } while(0U) +#endif /* STM32F413xx || STM32F423xx */ + +#define __HAL_RCC_FMPI2C1_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_FMPI2C1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_FMPI2C1EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_CAN1_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN1EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_CAN2_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN2EN);\ + UNUSED(tmpreg); \ + } while(0U) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_CAN3_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN3EN);\ + UNUSED(tmpreg); \ + } while(0U) +#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_TIM2_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM2EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_TIM3_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM3EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_TIM4_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM4EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM4EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_SPI3_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI3EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_I2C3_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C3EN);\ + UNUSED(tmpreg); \ + } while(0U) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_DAC_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_DACEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_DACEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_UART7_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_UART7EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_UART7EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_UART8_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_UART8EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_UART8EN);\ + UNUSED(tmpreg); \ + } while(0U) +#endif /* STM32F413xx || STM32F423xx */ + +#define __HAL_RCC_TIM2_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM2EN)) +#define __HAL_RCC_TIM3_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM3EN)) +#define __HAL_RCC_TIM4_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM4EN)) +#define __HAL_RCC_TIM6_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM6EN)) +#define __HAL_RCC_TIM7_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM7EN)) +#define __HAL_RCC_TIM12_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM12EN)) +#define __HAL_RCC_TIM13_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM13EN)) +#define __HAL_RCC_TIM14_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM14EN)) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_LPTIM1_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_LPTIM1EN)) +#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_RTCAPB_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_RTCAPBEN)) +#define __HAL_RCC_SPI3_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_SPI3EN)) +#define __HAL_RCC_USART3_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_USART3EN)) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_UART4_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_UART4EN)) +#define __HAL_RCC_UART5_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_UART5EN)) +#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_I2C3_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_I2C3EN)) +#define __HAL_RCC_FMPI2C1_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_FMPI2C1EN)) +#define __HAL_RCC_CAN1_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_CAN1EN)) +#define __HAL_RCC_CAN2_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_CAN2EN)) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_CAN3_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_CAN3EN)) +#define __HAL_RCC_DAC_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_DACEN)) +#define __HAL_RCC_UART7_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_UART7EN)) +#define __HAL_RCC_UART8_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_UART8EN)) +#endif /* STM32F413xx || STM32F423xx */ + +/** + * @} + */ + +/** @defgroup RCCEx_APB1_Peripheral_Clock_Enable_Disable_Status APB1 Peripheral Clock Enable Disable Status + * @brief Get the enable or disable status of the APB1 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_TIM2_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM2EN)) != RESET) +#define __HAL_RCC_TIM3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM3EN)) != RESET) +#define __HAL_RCC_TIM4_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM4EN)) != RESET) +#define __HAL_RCC_TIM6_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM6EN)) != RESET) +#define __HAL_RCC_TIM7_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM7EN)) != RESET) +#define __HAL_RCC_TIM12_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM12EN)) != RESET) +#define __HAL_RCC_TIM13_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM13EN)) != RESET) +#define __HAL_RCC_TIM14_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM14EN)) != RESET) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_LPTIM1_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_LPTIM1EN)) != RESET) +#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_RTCAPB_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_RTCAPBEN)) != RESET) +#define __HAL_RCC_SPI3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPI3EN)) != RESET) +#define __HAL_RCC_USART3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_USART3EN)) != RESET) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_UART4_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART4EN)) != RESET) +#define __HAL_RCC_UART5_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART5EN)) != RESET) +#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_I2C3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C3EN)) != RESET) +#define __HAL_RCC_FMPI2C1_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_FMPI2C1EN)) != RESET) +#define __HAL_RCC_CAN1_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN1EN))!= RESET) +#define __HAL_RCC_CAN2_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN2EN)) != RESET) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_CAN3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN3EN)) != RESET) +#define __HAL_RCC_DAC_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_DACEN)) != RESET) +#define __HAL_RCC_UART7_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART7EN)) != RESET) +#define __HAL_RCC_UART8_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART8EN)) != RESET) +#endif /* STM32F413xx || STM32F423xx */ + +#define __HAL_RCC_TIM2_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM2EN)) == RESET) +#define __HAL_RCC_TIM3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM3EN)) == RESET) +#define __HAL_RCC_TIM4_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM4EN)) == RESET) +#define __HAL_RCC_TIM6_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM6EN)) == RESET) +#define __HAL_RCC_TIM7_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM7EN)) == RESET) +#define __HAL_RCC_TIM12_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM12EN)) == RESET) +#define __HAL_RCC_TIM13_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM13EN)) == RESET) +#define __HAL_RCC_TIM14_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM14EN)) == RESET) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_LPTIM1_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_LPTIM1EN)) == RESET) +#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_RTCAPB_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_RTCAPBEN)) == RESET) +#define __HAL_RCC_SPI3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPI3EN)) == RESET) +#define __HAL_RCC_USART3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_USART3EN)) == RESET) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_UART4_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART4EN)) == RESET) +#define __HAL_RCC_UART5_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART5EN)) == RESET) +#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_I2C3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C3EN)) == RESET) +#define __HAL_RCC_FMPI2C1_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_FMPI2C1EN)) == RESET) +#define __HAL_RCC_CAN1_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN1EN)) == RESET) +#define __HAL_RCC_CAN2_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN2EN)) == RESET) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_CAN3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN3EN)) == RESET) +#define __HAL_RCC_DAC_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_DACEN)) == RESET) +#define __HAL_RCC_UART7_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART7EN)) == RESET) +#define __HAL_RCC_UART8_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART8EN)) == RESET) +#endif /* STM32F413xx || STM32F423xx */ +/** + * @} + */ +/** @defgroup RCCEx_APB2_Clock_Enable_Disable APB2 Peripheral Clock Enable Disable + * @brief Enable or disable the High Speed APB (APB2) peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_TIM8_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM8EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM8EN);\ + UNUSED(tmpreg); \ + } while(0U) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_UART9_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_UART9EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_UART9EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_UART10_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_UART10EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_UART10EN);\ + UNUSED(tmpreg); \ + } while(0U) +#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_SDIO_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SDIOEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SDIOEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_SPI4_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI4EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI4EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_EXTIT_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_EXTITEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_EXTITEN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_TIM10_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM10EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM10EN);\ + UNUSED(tmpreg); \ + } while(0U) +#define __HAL_RCC_SPI5_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI5EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI5EN);\ + UNUSED(tmpreg); \ + } while(0U) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_SAI1_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SAI1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SAI1EN);\ + UNUSED(tmpreg); \ + } while(0U) +#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_DFSDM1_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_DFSDM1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_DFSDM1EN);\ + UNUSED(tmpreg); \ + } while(0U) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_DFSDM2_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_DFSDM2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_DFSDM2EN);\ + UNUSED(tmpreg); \ + } while(0U) +#endif /* STM32F413xx || STM32F423xx */ + +#define __HAL_RCC_TIM8_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_TIM8EN)) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_UART9_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_UART9EN)) +#define __HAL_RCC_UART10_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_UART10EN)) +#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_SDIO_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SDIOEN)) +#define __HAL_RCC_SPI4_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SPI4EN)) +#define __HAL_RCC_EXTIT_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_EXTITEN)) +#define __HAL_RCC_TIM10_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_TIM10EN)) +#define __HAL_RCC_SPI5_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SPI5EN)) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_SAI1_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SAI1EN)) +#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_DFSDM1_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_DFSDM1EN)) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_DFSDM2_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_DFSDM2EN)) +#endif /* STM32F413xx || STM32F423xx */ +/** + * @} + */ + +/** @defgroup RCCEx_APB2_Peripheral_Clock_Enable_Disable_Status APB2 Peripheral Clock Enable Disable Status + * @brief Get the enable or disable status of the APB2 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_TIM8_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM8EN)) != RESET) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_UART9_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_UART9EN)) != RESET) +#define __HAL_RCC_UART10_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_UART10EN)) != RESET) +#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_SDIO_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SDIOEN)) != RESET) +#define __HAL_RCC_SPI4_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI4EN)) != RESET) +#define __HAL_RCC_EXTIT_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_EXTITEN)) != RESET) +#define __HAL_RCC_TIM10_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM10EN)) != RESET) +#define __HAL_RCC_SPI5_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI5EN)) != RESET) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_SAI1_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SAI1EN)) != RESET) +#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_DFSDM1_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_DFSDM1EN)) != RESET) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_DFSDM2_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_DFSDM2EN)) != RESET) +#endif /* STM32F413xx || STM32F423xx */ + +#define __HAL_RCC_TIM8_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM8EN)) == RESET) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_UART9_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_UART9EN)) == RESET) +#define __HAL_RCC_UART10_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_UART10EN)) == RESET) +#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_SDIO_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SDIOEN)) == RESET) +#define __HAL_RCC_SPI4_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI4EN)) == RESET) +#define __HAL_RCC_EXTIT_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_EXTITEN)) == RESET) +#define __HAL_RCC_TIM10_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM10EN)) == RESET) +#define __HAL_RCC_SPI5_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI5EN)) == RESET) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_SAI1_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SAI1EN)) == RESET) +#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_DFSDM1_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_DFSDM1EN)) == RESET) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_DFSDM2_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_DFSDM2EN)) == RESET) +#endif /* STM32F413xx || STM32F423xx */ +/** + * @} + */ + +/** @defgroup RCCEx_AHB1_Force_Release_Reset AHB1 Force Release Reset + * @brief Force or release AHB1 peripheral reset. + * @{ + */ +#if defined (STM32F412Zx) || defined(STM32F413xx) || defined (STM32F423xx) +#define __HAL_RCC_AHB1_FORCE_RESET() (RCC->AHB1RSTR = 0x006010FFU) +#endif /* STM32F412Zx || STM32F413xx || STM32F423xx */ +#if defined (STM32F412Cx) +#define __HAL_RCC_AHB1_FORCE_RESET() (RCC->AHB1RSTR = 0x00601087U) +#endif /* STM32F412Cx */ +#if defined (STM32F412Vx) +#define __HAL_RCC_AHB1_FORCE_RESET() (RCC->AHB1RSTR = 0x0060109FU) +#endif /* STM32F412Vx */ +#if defined (STM32F412Rx) +#define __HAL_RCC_AHB1_FORCE_RESET() (RCC->AHB1RSTR = 0x0060108FU) +#endif /* STM32F412Rx */ +#if defined(STM32F412Rx) || defined(STM32F412Vx) || defined(STM32F412Zx) || defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_GPIOD_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIODRST)) +#endif /* STM32F412Rx || STM32F412Vx || STM32F412Zx || STM32F413xx || STM32F423xx */ +#if defined(STM32F412Vx) || defined(STM32F412Zx) || defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_GPIOE_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOERST)) +#endif /* STM32F412Vx || STM32F412Zx || STM32F413xx || STM32F423xx */ +#if defined(STM32F412Zx) || defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_GPIOF_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOFRST)) +#define __HAL_RCC_GPIOG_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOGRST)) +#endif /* STM32F412Zx || STM32F413xx || STM32F423xx */ +#define __HAL_RCC_CRC_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_CRCRST)) + +#if defined(STM32F412Rx) || defined(STM32F412Vx) || defined(STM32F412Zx) || defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_GPIOD_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIODRST)) +#endif /* STM32F412Rx || STM32F412Vx || STM32F412Zx || STM32F413xx || STM32F423xx */ +#if defined(STM32F412Vx) || defined(STM32F412Zx) || defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_GPIOE_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIOERST)) +#endif /* STM32F412Vx || STM32F412Zx || STM32F413xx || STM32F423xx */ +#if defined(STM32F412Zx) || defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_GPIOF_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIOFRST)) +#define __HAL_RCC_GPIOG_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIOGRST)) +#endif /* STM32F412Zx || STM32F413xx || STM32F423xx */ +#define __HAL_RCC_CRC_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_CRCRST)) +/** + * @} + */ + +/** @defgroup RCCEx_AHB2_Force_Release_Reset AHB2 Force Release Reset + * @brief Force or release AHB2 peripheral reset. + * @{ + */ +#if defined(STM32F423xx) +#define __HAL_RCC_AHB2_FORCE_RESET() (RCC->AHB2RSTR = 0x000000D0U) +#define __HAL_RCC_AES_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_AESRST)) +#define __HAL_RCC_AES_RELEASE_RESET() (RCC->AHB2RSTR &= ~(RCC_AHB2RSTR_AESRST)) +#else +#define __HAL_RCC_AHB2_FORCE_RESET() (RCC->AHB2RSTR = 0x000000C0U) +#endif /* STM32F423xx */ +#define __HAL_RCC_AHB2_RELEASE_RESET() (RCC->AHB2RSTR = 0x00U) + +#define __HAL_RCC_USB_OTG_FS_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_OTGFSRST)) +#define __HAL_RCC_USB_OTG_FS_RELEASE_RESET() (RCC->AHB2RSTR &= ~(RCC_AHB2RSTR_OTGFSRST)) + +#define __HAL_RCC_RNG_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_RNGRST)) +#define __HAL_RCC_RNG_RELEASE_RESET() (RCC->AHB2RSTR &= ~(RCC_AHB2RSTR_RNGRST)) +/** + * @} + */ + +/** @defgroup RCCEx_AHB3_Force_Release_Reset AHB3 Force Release Reset + * @brief Force or release AHB3 peripheral reset. + * @{ + */ +#if defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_AHB3_FORCE_RESET() (RCC->AHB3RSTR = 0x00000003U) +#define __HAL_RCC_AHB3_RELEASE_RESET() (RCC->AHB3RSTR = 0x00U) + +#define __HAL_RCC_FSMC_FORCE_RESET() (RCC->AHB3RSTR |= (RCC_AHB3RSTR_FSMCRST)) +#define __HAL_RCC_QSPI_FORCE_RESET() (RCC->AHB3RSTR |= (RCC_AHB3RSTR_QSPIRST)) + +#define __HAL_RCC_FSMC_RELEASE_RESET() (RCC->AHB3RSTR &= ~(RCC_AHB3RSTR_FSMCRST)) +#define __HAL_RCC_QSPI_RELEASE_RESET() (RCC->AHB3RSTR &= ~(RCC_AHB3RSTR_QSPIRST)) +#endif /* STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F413xx || STM32F423xx */ +#if defined(STM32F412Cx) +#define __HAL_RCC_AHB3_FORCE_RESET() +#define __HAL_RCC_AHB3_RELEASE_RESET() + +#define __HAL_RCC_FSMC_FORCE_RESET() +#define __HAL_RCC_QSPI_FORCE_RESET() + +#define __HAL_RCC_FSMC_RELEASE_RESET() +#define __HAL_RCC_QSPI_RELEASE_RESET() +#endif /* STM32F412Cx */ +/** + * @} + */ + +/** @defgroup RCCEx_APB1_Force_Release_Reset APB1 Force Release Reset + * @brief Force or release APB1 peripheral reset. + * @{ + */ +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_APB1_FORCE_RESET() (RCC->APB1RSTR = 0xFFFECBFFU) +#endif /* STM32F413xx || STM32F423xx */ +#if defined (STM32F412Zx) || defined (STM32F412Vx) || defined (STM32F412Rx) || defined (STM32F412Cx) +#define __HAL_RCC_APB1_FORCE_RESET() (RCC->APB1RSTR = 0x17E6C9FFU) +#endif /* STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx */ +#define __HAL_RCC_TIM2_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM2RST)) +#define __HAL_RCC_TIM3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM3RST)) +#define __HAL_RCC_TIM4_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM4RST)) +#define __HAL_RCC_TIM6_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM6RST)) +#define __HAL_RCC_TIM7_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM7RST)) +#define __HAL_RCC_TIM12_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM12RST)) +#define __HAL_RCC_TIM13_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM13RST)) +#define __HAL_RCC_TIM14_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM14RST)) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_LPTIM1_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_LPTIM1RST)) +#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_SPI3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_SPI3RST)) +#define __HAL_RCC_USART3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_USART3RST)) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_UART4_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_UART4RST)) +#define __HAL_RCC_UART5_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_UART5RST)) +#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_I2C3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_I2C3RST)) +#define __HAL_RCC_FMPI2C1_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_FMPI2C1RST)) +#define __HAL_RCC_CAN1_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_CAN1RST)) +#define __HAL_RCC_CAN2_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_CAN2RST)) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_CAN3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_CAN3RST)) +#define __HAL_RCC_DAC_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_DACRST)) +#define __HAL_RCC_UART7_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_UART7RST)) +#define __HAL_RCC_UART8_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_UART8RST)) +#endif /* STM32F413xx || STM32F423xx */ + +#define __HAL_RCC_TIM2_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM2RST)) +#define __HAL_RCC_TIM3_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM3RST)) +#define __HAL_RCC_TIM4_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM4RST)) +#define __HAL_RCC_TIM6_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM6RST)) +#define __HAL_RCC_TIM7_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM7RST)) +#define __HAL_RCC_TIM12_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM12RST)) +#define __HAL_RCC_TIM13_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM13RST)) +#define __HAL_RCC_TIM14_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM14RST)) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_LPTIM1_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_LPTIM1RST)) +#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_SPI3_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_SPI3RST)) +#define __HAL_RCC_USART3_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_USART3RST)) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_UART4_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_UART4RST)) +#define __HAL_RCC_UART5_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_UART5RST)) +#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_I2C3_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_I2C3RST)) +#define __HAL_RCC_FMPI2C1_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_FMPI2C1RST)) +#define __HAL_RCC_CAN1_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_CAN1RST)) +#define __HAL_RCC_CAN2_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_CAN2RST)) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_CAN3_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_CAN3RST)) +#define __HAL_RCC_DAC_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_DACRST)) +#define __HAL_RCC_UART7_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_UART7RST)) +#define __HAL_RCC_UART8_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_UART8RST)) +#endif /* STM32F413xx || STM32F423xx */ +/** + * @} + */ + +/** @defgroup RCCEx_APB2_Force_Release_Reset APB2 Force Release Reset + * @brief Force or release APB2 peripheral reset. + * @{ + */ +#if defined(STM32F413xx)|| defined(STM32F423xx) +#define __HAL_RCC_APB2_FORCE_RESET() (RCC->APB2RSTR = 0x035779F3U) +#endif /* STM32F413xx || STM32F423xx */ +#if defined (STM32F412Zx) || defined (STM32F412Vx) || defined (STM32F412Rx) || defined (STM32F412Cx) +#define __HAL_RCC_APB2_FORCE_RESET() (RCC->APB2RSTR = 0x01177933U) +#endif /* STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx */ +#define __HAL_RCC_TIM8_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_TIM8RST)) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_UART9_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_UART9RST)) +#define __HAL_RCC_UART10_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_UART10RST)) +#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_SDIO_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SDIORST)) +#define __HAL_RCC_SPI4_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SPI4RST)) +#define __HAL_RCC_TIM10_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_TIM10RST)) +#define __HAL_RCC_SPI5_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SPI5RST)) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_SAI1_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SAI1RST)) +#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_DFSDM1_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_DFSDM1RST)) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_DFSDM2_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_DFSDM2RST)) +#endif /* STM32F413xx || STM32F423xx */ + +#define __HAL_RCC_TIM8_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_TIM8RST)) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_UART9_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_UART9RST)) +#define __HAL_RCC_UART10_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_UART10RST)) +#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_SDIO_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SDIORST)) +#define __HAL_RCC_SPI4_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SPI4RST)) +#define __HAL_RCC_TIM10_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_TIM10RST)) +#define __HAL_RCC_SPI5_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SPI5RST)) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_SAI1_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SAI1RST)) +#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_DFSDM1_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_DFSDM1RST)) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_DFSDM2_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_DFSDM2RST)) +#endif /* STM32F413xx || STM32F423xx */ +/** + * @} + */ + +/** @defgroup RCCEx_AHB1_LowPower_Enable_Disable AHB1 Peripheral Low Power Enable Disable + * @brief Enable or disable the AHB1 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + * @{ + */ +#define __HAL_RCC_GPIOD_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIODLPEN)) +#define __HAL_RCC_GPIOE_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIOELPEN)) +#define __HAL_RCC_GPIOF_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIOFLPEN)) +#define __HAL_RCC_GPIOG_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIOGLPEN)) +#define __HAL_RCC_CRC_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_CRCLPEN)) +#define __HAL_RCC_FLITF_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_FLITFLPEN)) +#define __HAL_RCC_SRAM1_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_SRAM1LPEN)) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_SRAM2_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_SRAM2LPEN)) +#endif /* STM32F413xx || STM32F423xx */ + +#define __HAL_RCC_GPIOD_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIODLPEN)) +#define __HAL_RCC_GPIOE_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIOELPEN)) +#define __HAL_RCC_GPIOF_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIOFLPEN)) +#define __HAL_RCC_GPIOG_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIOGLPEN)) +#define __HAL_RCC_CRC_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_CRCLPEN)) +#define __HAL_RCC_FLITF_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_FLITFLPEN)) +#define __HAL_RCC_SRAM1_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_SRAM1LPEN)) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_SRAM2_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_SRAM2LPEN)) +#endif /* STM32F413xx || STM32F423xx */ +/** + * @} + */ + +/** @defgroup RCCEx_AHB2_LowPower_Enable_Disable AHB2 Peripheral Low Power Enable Disable + * @brief Enable or disable the AHB2 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wake-up from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + * @{ + */ +#if defined(STM32F423xx) +#define __HAL_RCC_AES_CLK_SLEEP_ENABLE() (RCC->AHB2LPENR |= (RCC_AHB2LPENR_AESLPEN)) +#define __HAL_RCC_AES_CLK_SLEEP_DISABLE() (RCC->AHB2LPENR &= ~(RCC_AHB2LPENR_AESLPEN)) +#endif /* STM32F423xx */ + +#define __HAL_RCC_USB_OTG_FS_CLK_SLEEP_ENABLE() (RCC->AHB2LPENR |= (RCC_AHB2LPENR_OTGFSLPEN)) +#define __HAL_RCC_USB_OTG_FS_CLK_SLEEP_DISABLE() (RCC->AHB2LPENR &= ~(RCC_AHB2LPENR_OTGFSLPEN)) + +#define __HAL_RCC_RNG_CLK_SLEEP_ENABLE() (RCC->AHB2LPENR |= (RCC_AHB2LPENR_RNGLPEN)) +#define __HAL_RCC_RNG_CLK_SLEEP_DISABLE() (RCC->AHB2LPENR &= ~(RCC_AHB2LPENR_RNGLPEN)) +/** + * @} + */ + +/** @defgroup RCCEx_AHB3_LowPower_Enable_Disable AHB3 Peripheral Low Power Enable Disable + * @brief Enable or disable the AHB3 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + * @{ + */ +#if defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_FSMC_CLK_SLEEP_ENABLE() (RCC->AHB3LPENR |= (RCC_AHB3LPENR_FSMCLPEN)) +#define __HAL_RCC_QSPI_CLK_SLEEP_ENABLE() (RCC->AHB3LPENR |= (RCC_AHB3LPENR_QSPILPEN)) + +#define __HAL_RCC_FSMC_CLK_SLEEP_DISABLE() (RCC->AHB3LPENR &= ~(RCC_AHB3LPENR_FSMCLPEN)) +#define __HAL_RCC_QSPI_CLK_SLEEP_DISABLE() (RCC->AHB3LPENR &= ~(RCC_AHB3LPENR_QSPILPEN)) +#endif /* STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F413xx || STM32F423xx */ + +/** + * @} + */ + +/** @defgroup RCCEx_APB1_LowPower_Enable_Disable APB1 Peripheral Low Power Enable Disable + * @brief Enable or disable the APB1 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + * @{ + */ +#define __HAL_RCC_TIM2_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM2LPEN)) +#define __HAL_RCC_TIM3_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM3LPEN)) +#define __HAL_RCC_TIM4_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM4LPEN)) +#define __HAL_RCC_TIM6_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM6LPEN)) +#define __HAL_RCC_TIM7_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM7LPEN)) +#define __HAL_RCC_TIM12_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM12LPEN)) +#define __HAL_RCC_TIM13_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM13LPEN)) +#define __HAL_RCC_TIM14_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM14LPEN)) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_LPTIM1_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_LPTIM1LPEN)) +#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_RTCAPB_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_RTCAPBLPEN)) +#define __HAL_RCC_SPI3_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_SPI3LPEN)) +#define __HAL_RCC_USART3_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_USART3LPEN)) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_UART4_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_UART4LPEN)) +#define __HAL_RCC_UART5_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_UART5LPEN)) +#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_I2C3_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_I2C3LPEN)) +#define __HAL_RCC_FMPI2C1_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_FMPI2C1LPEN)) +#define __HAL_RCC_CAN1_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_CAN1LPEN)) +#define __HAL_RCC_CAN2_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_CAN2LPEN)) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_CAN3_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_CAN3LPEN)) +#define __HAL_RCC_DAC_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_DACLPEN)) +#define __HAL_RCC_UART7_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_UART7LPEN)) +#define __HAL_RCC_UART8_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_UART8LPEN)) +#endif /* STM32F413xx || STM32F423xx */ + +#define __HAL_RCC_TIM2_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM2LPEN)) +#define __HAL_RCC_TIM3_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM3LPEN)) +#define __HAL_RCC_TIM4_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM4LPEN)) +#define __HAL_RCC_TIM6_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM6LPEN)) +#define __HAL_RCC_TIM7_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM7LPEN)) +#define __HAL_RCC_TIM12_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM12LPEN)) +#define __HAL_RCC_TIM13_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM13LPEN)) +#define __HAL_RCC_TIM14_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM14LPEN)) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_LPTIM1_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_LPTIM1LPEN)) +#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_RTCAPB_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_RTCAPBLPEN)) +#define __HAL_RCC_SPI3_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_SPI3LPEN)) +#define __HAL_RCC_USART3_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_USART3LPEN)) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_UART4_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_UART4LPEN)) +#define __HAL_RCC_UART5_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_UART5LPEN)) +#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_I2C3_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_I2C3LPEN)) +#define __HAL_RCC_FMPI2C1_CLK_SLEEP_DISABLE()(RCC->APB1LPENR &= ~(RCC_APB1LPENR_FMPI2C1LPEN)) +#define __HAL_RCC_CAN1_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_CAN1LPEN)) +#define __HAL_RCC_CAN2_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_CAN2LPEN)) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_CAN3_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_CAN3LPEN)) +#define __HAL_RCC_DAC_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_DACLPEN)) +#define __HAL_RCC_UART7_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_UART7LPEN)) +#define __HAL_RCC_UART8_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_UART8LPEN)) +#endif /* STM32F413xx || STM32F423xx */ +/** + * @} + */ + +/** @defgroup RCCEx_APB2_LowPower_Enable_Disable APB2 Peripheral Low Power Enable Disable + * @brief Enable or disable the APB2 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + * @{ + */ +#define __HAL_RCC_TIM8_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_TIM8LPEN)) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_UART9_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_UART9LPEN)) +#define __HAL_RCC_UART10_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_UART10LPEN)) +#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_SDIO_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SDIOLPEN)) +#define __HAL_RCC_SPI4_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SPI4LPEN)) +#define __HAL_RCC_EXTIT_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_EXTITLPEN)) +#define __HAL_RCC_TIM10_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_TIM10LPEN)) +#define __HAL_RCC_SPI5_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SPI5LPEN)) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_SAI1_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SAI1LPEN)) +#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_DFSDM1_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_DFSDM1LPEN)) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_DFSDM2_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_DFSDM2LPEN)) +#endif /* STM32F413xx || STM32F423xx */ + +#define __HAL_RCC_TIM8_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_TIM8LPEN)) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_UART9_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_UART9LPEN)) +#define __HAL_RCC_UART10_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_UART10LPEN)) +#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_SDIO_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SDIOLPEN)) +#define __HAL_RCC_SPI4_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SPI4LPEN)) +#define __HAL_RCC_EXTIT_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_EXTITLPEN)) +#define __HAL_RCC_TIM10_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_TIM10LPEN)) +#define __HAL_RCC_SPI5_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SPI5LPEN)) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_SAI1_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SAI1LPEN)) +#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_DFSDM1_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_DFSDM1LPEN)) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_DFSDM2_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_DFSDM2LPEN)) +#endif /* STM32F413xx || STM32F423xx */ +/** + * @} + */ +#endif /* STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx || STM32F413xx || STM32F423xx */ +/*----------------------------------------------------------------------------*/ + +/*------------------------------- PLL Configuration --------------------------*/ +#if defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) || defined(STM32F446xx) ||\ + defined(STM32F469xx) || defined(STM32F479xx) || defined(STM32F412Zx) || defined(STM32F412Vx) || \ + defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) +/** @brief Macro to configure the main PLL clock source, multiplication and division factors. + * @note This function must be used only when the main PLL is disabled. + * @param __RCC_PLLSource__ specifies the PLL entry clock source. + * This parameter can be one of the following values: + * @arg RCC_PLLSOURCE_HSI: HSI oscillator clock selected as PLL clock entry + * @arg RCC_PLLSOURCE_HSE: HSE oscillator clock selected as PLL clock entry + * @note This clock source (RCC_PLLSource) is common for the main PLL and PLLI2S. + * @param __PLLM__ specifies the division factor for PLL VCO input clock + * This parameter must be a number between Min_Data = 2 and Max_Data = 63. + * @note You have to set the PLLM parameter correctly to ensure that the VCO input + * frequency ranges from 1 to 2 MHz. It is recommended to select a frequency + * of 2 MHz to limit PLL jitter. + * @param __PLLN__ specifies the multiplication factor for PLL VCO output clock + * This parameter must be a number between Min_Data = 50 and Max_Data = 432. + * @note You have to set the PLLN parameter correctly to ensure that the VCO + * output frequency is between 100 and 432 MHz. + * + * @param __PLLP__ specifies the division factor for main system clock (SYSCLK) + * This parameter must be a number in the range {2, 4, 6, or 8}. + * + * @param __PLLQ__ specifies the division factor for OTG FS, SDIO and RNG clocks + * This parameter must be a number between Min_Data = 2 and Max_Data = 15. + * @note If the USB OTG FS is used in your application, you have to set the + * PLLQ parameter correctly to have 48 MHz clock for the USB. However, + * the SDIO and RNG need a frequency lower than or equal to 48 MHz to work + * correctly. + * + * @param __PLLR__ PLL division factor for I2S, SAI, SYSTEM, SPDIFRX clocks. + * This parameter must be a number between Min_Data = 2 and Max_Data = 7. + * @note This parameter is only available in STM32F446xx/STM32F469xx/STM32F479xx/ + STM32F412Zx/STM32F412Vx/STM32F412Rx/STM32F412Cx/STM32F413xx/STM32F423xx devices. + * + */ +#define __HAL_RCC_PLL_CONFIG(__RCC_PLLSource__, __PLLM__, __PLLN__, __PLLP__, __PLLQ__,__PLLR__) \ + (RCC->PLLCFGR = ((__RCC_PLLSource__) | (__PLLM__) | \ + ((__PLLN__) << RCC_PLLCFGR_PLLN_Pos) | \ + ((((__PLLP__) >> 1U) -1U) << RCC_PLLCFGR_PLLP_Pos) | \ + ((__PLLQ__) << RCC_PLLCFGR_PLLQ_Pos) | \ + ((__PLLR__) << RCC_PLLCFGR_PLLR_Pos))) +#else +/** @brief Macro to configure the main PLL clock source, multiplication and division factors. + * @note This function must be used only when the main PLL is disabled. + * @param __RCC_PLLSource__ specifies the PLL entry clock source. + * This parameter can be one of the following values: + * @arg RCC_PLLSOURCE_HSI: HSI oscillator clock selected as PLL clock entry + * @arg RCC_PLLSOURCE_HSE: HSE oscillator clock selected as PLL clock entry + * @note This clock source (RCC_PLLSource) is common for the main PLL and PLLI2S. + * @param __PLLM__ specifies the division factor for PLL VCO input clock + * This parameter must be a number between Min_Data = 2 and Max_Data = 63. + * @note You have to set the PLLM parameter correctly to ensure that the VCO input + * frequency ranges from 1 to 2 MHz. It is recommended to select a frequency + * of 2 MHz to limit PLL jitter. + * @param __PLLN__ specifies the multiplication factor for PLL VCO output clock + * This parameter must be a number between Min_Data = 50 and Max_Data = 432 + * Except for STM32F411xE devices where Min_Data = 192. + * @note You have to set the PLLN parameter correctly to ensure that the VCO + * output frequency is between 100 and 432 MHz, Except for STM32F411xE devices + * where frequency is between 192 and 432 MHz. + * @param __PLLP__ specifies the division factor for main system clock (SYSCLK) + * This parameter must be a number in the range {2, 4, 6, or 8}. + * + * @param __PLLQ__ specifies the division factor for OTG FS, SDIO and RNG clocks + * This parameter must be a number between Min_Data = 2 and Max_Data = 15. + * @note If the USB OTG FS is used in your application, you have to set the + * PLLQ parameter correctly to have 48 MHz clock for the USB. However, + * the SDIO and RNG need a frequency lower than or equal to 48 MHz to work + * correctly. + * + */ +#define __HAL_RCC_PLL_CONFIG(__RCC_PLLSource__, __PLLM__, __PLLN__, __PLLP__, __PLLQ__) \ + (RCC->PLLCFGR = (0x20000000U | (__RCC_PLLSource__) | (__PLLM__)| \ + ((__PLLN__) << RCC_PLLCFGR_PLLN_Pos) | \ + ((((__PLLP__) >> 1U) -1U) << RCC_PLLCFGR_PLLP_Pos) | \ + ((__PLLQ__) << RCC_PLLCFGR_PLLQ_Pos))) +#endif /* STM32F410xx || STM32F446xx || STM32F469xx || STM32F479xx || STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx */ +/*----------------------------------------------------------------------------*/ + +/*----------------------------PLLI2S Configuration ---------------------------*/ +#if defined(STM32F405xx) || defined(STM32F415xx) || defined(STM32F407xx) || defined(STM32F417xx) || \ + defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) || \ + defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F411xE) || defined(STM32F446xx) || \ + defined(STM32F469xx) || defined(STM32F479xx) || defined(STM32F412Zx) || defined(STM32F412Vx) || \ + defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) + +/** @brief Macros to enable or disable the PLLI2S. + * @note The PLLI2S is disabled by hardware when entering STOP and STANDBY modes. + */ +#define __HAL_RCC_PLLI2S_ENABLE() (*(__IO uint32_t *) RCC_CR_PLLI2SON_BB = ENABLE) +#define __HAL_RCC_PLLI2S_DISABLE() (*(__IO uint32_t *) RCC_CR_PLLI2SON_BB = DISABLE) + +#endif /* STM32F405xx || STM32F415xx || STM32F407xx || STM32F417xx || STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || + STM32F401xC || STM32F401xE || STM32F411xE || STM32F446xx || STM32F469xx || STM32F479xx || STM32F412Zx || STM32F412Vx || + STM32F412Rx || STM32F412Cx */ +#if defined(STM32F446xx) +/** @brief Macro to configure the PLLI2S clock multiplication and division factors . + * @note This macro must be used only when the PLLI2S is disabled. + * @note PLLI2S clock source is common with the main PLL (configured in + * HAL_RCC_ClockConfig() API). + * @param __PLLI2SM__ specifies the division factor for PLLI2S VCO input clock + * This parameter must be a number between Min_Data = 2 and Max_Data = 63. + * @note You have to set the PLLI2SM parameter correctly to ensure that the VCO input + * frequency ranges from 1 to 2 MHz. It is recommended to select a frequency + * of 1 MHz to limit PLLI2S jitter. + * + * @param __PLLI2SN__ specifies the multiplication factor for PLLI2S VCO output clock + * This parameter must be a number between Min_Data = 50 and Max_Data = 432. + * @note You have to set the PLLI2SN parameter correctly to ensure that the VCO + * output frequency is between Min_Data = 100 and Max_Data = 432 MHz. + * + * @param __PLLI2SP__ specifies division factor for SPDIFRX Clock. + * This parameter must be a number in the range {2, 4, 6, or 8}. + * @note the PLLI2SP parameter is only available with STM32F446xx Devices + * + * @param __PLLI2SR__ specifies the division factor for I2S clock + * This parameter must be a number between Min_Data = 2 and Max_Data = 7. + * @note You have to set the PLLI2SR parameter correctly to not exceed 192 MHz + * on the I2S clock frequency. + * + * @param __PLLI2SQ__ specifies the division factor for SAI clock + * This parameter must be a number between Min_Data = 2 and Max_Data = 15. + */ +#define __HAL_RCC_PLLI2S_CONFIG(__PLLI2SM__, __PLLI2SN__, __PLLI2SP__, __PLLI2SQ__, __PLLI2SR__) \ + (RCC->PLLI2SCFGR = ((__PLLI2SM__) |\ + ((__PLLI2SN__) << RCC_PLLI2SCFGR_PLLI2SN_Pos) |\ + ((((__PLLI2SP__) >> 1U) -1U) << RCC_PLLI2SCFGR_PLLI2SP_Pos) |\ + ((__PLLI2SQ__) << RCC_PLLI2SCFGR_PLLI2SQ_Pos) |\ + ((__PLLI2SR__) << RCC_PLLI2SCFGR_PLLI2SR_Pos))) +#elif defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) ||\ + defined(STM32F413xx) || defined(STM32F423xx) +/** @brief Macro to configure the PLLI2S clock multiplication and division factors . + * @note This macro must be used only when the PLLI2S is disabled. + * @note PLLI2S clock source is common with the main PLL (configured in + * HAL_RCC_ClockConfig() API). + * @param __PLLI2SM__ specifies the division factor for PLLI2S VCO input clock + * This parameter must be a number between Min_Data = 2 and Max_Data = 63. + * @note You have to set the PLLI2SM parameter correctly to ensure that the VCO input + * frequency ranges from 1 to 2 MHz. It is recommended to select a frequency + * of 1 MHz to limit PLLI2S jitter. + * + * @param __PLLI2SN__ specifies the multiplication factor for PLLI2S VCO output clock + * This parameter must be a number between Min_Data = 50 and Max_Data = 432. + * @note You have to set the PLLI2SN parameter correctly to ensure that the VCO + * output frequency is between Min_Data = 100 and Max_Data = 432 MHz. + * + * @param __PLLI2SR__ specifies the division factor for I2S clock + * This parameter must be a number between Min_Data = 2 and Max_Data = 7. + * @note You have to set the PLLI2SR parameter correctly to not exceed 192 MHz + * on the I2S clock frequency. + * + * @param __PLLI2SQ__ specifies the division factor for SAI clock + * This parameter must be a number between Min_Data = 2 and Max_Data = 15. + */ +#define __HAL_RCC_PLLI2S_CONFIG(__PLLI2SM__, __PLLI2SN__, __PLLI2SQ__, __PLLI2SR__) \ + (RCC->PLLI2SCFGR = ((__PLLI2SM__) |\ + ((__PLLI2SN__) << RCC_PLLI2SCFGR_PLLI2SN_Pos) |\ + ((__PLLI2SQ__) << RCC_PLLI2SCFGR_PLLI2SQ_Pos) |\ + ((__PLLI2SR__) << RCC_PLLI2SCFGR_PLLI2SR_Pos))) +#else +/** @brief Macro to configure the PLLI2S clock multiplication and division factors . + * @note This macro must be used only when the PLLI2S is disabled. + * @note PLLI2S clock source is common with the main PLL (configured in + * HAL_RCC_ClockConfig() API). + * @param __PLLI2SN__ specifies the multiplication factor for PLLI2S VCO output clock + * This parameter must be a number between Min_Data = 50 and Max_Data = 432. + * @note You have to set the PLLI2SN parameter correctly to ensure that the VCO + * output frequency is between Min_Data = 100 and Max_Data = 432 MHz. + * + * @param __PLLI2SR__ specifies the division factor for I2S clock + * This parameter must be a number between Min_Data = 2 and Max_Data = 7. + * @note You have to set the PLLI2SR parameter correctly to not exceed 192 MHz + * on the I2S clock frequency. + * + */ +#define __HAL_RCC_PLLI2S_CONFIG(__PLLI2SN__, __PLLI2SR__) \ + (RCC->PLLI2SCFGR = (((__PLLI2SN__) << RCC_PLLI2SCFGR_PLLI2SN_Pos) |\ + ((__PLLI2SR__) << RCC_PLLI2SCFGR_PLLI2SR_Pos))) +#endif /* STM32F446xx */ + +#if defined(STM32F411xE) +/** @brief Macro to configure the PLLI2S clock multiplication and division factors . + * @note This macro must be used only when the PLLI2S is disabled. + * @note This macro must be used only when the PLLI2S is disabled. + * @note PLLI2S clock source is common with the main PLL (configured in + * HAL_RCC_ClockConfig() API). + * @param __PLLI2SM__ specifies the division factor for PLLI2S VCO input clock + * This parameter must be a number between Min_Data = 2 and Max_Data = 63. + * @note The PLLI2SM parameter is only used with STM32F411xE/STM32F410xx Devices + * @note You have to set the PLLI2SM parameter correctly to ensure that the VCO input + * frequency ranges from 1 to 2 MHz. It is recommended to select a frequency + * of 2 MHz to limit PLLI2S jitter. + * @param __PLLI2SN__ specifies the multiplication factor for PLLI2S VCO output clock + * This parameter must be a number between Min_Data = 192 and Max_Data = 432. + * @note You have to set the PLLI2SN parameter correctly to ensure that the VCO + * output frequency is between Min_Data = 192 and Max_Data = 432 MHz. + * @param __PLLI2SR__ specifies the division factor for I2S clock + * This parameter must be a number between Min_Data = 2 and Max_Data = 7. + * @note You have to set the PLLI2SR parameter correctly to not exceed 192 MHz + * on the I2S clock frequency. + */ +#define __HAL_RCC_PLLI2S_I2SCLK_CONFIG(__PLLI2SM__, __PLLI2SN__, __PLLI2SR__) (RCC->PLLI2SCFGR = ((__PLLI2SM__) |\ + ((__PLLI2SN__) << RCC_PLLI2SCFGR_PLLI2SN_Pos) |\ + ((__PLLI2SR__) << RCC_PLLI2SCFGR_PLLI2SR_Pos))) +#endif /* STM32F411xE */ + +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) || defined(STM32F469xx) || defined(STM32F479xx) +/** @brief Macro used by the SAI HAL driver to configure the PLLI2S clock multiplication and division factors. + * @note This macro must be used only when the PLLI2S is disabled. + * @note PLLI2S clock source is common with the main PLL (configured in + * HAL_RCC_ClockConfig() API) + * @param __PLLI2SN__ specifies the multiplication factor for PLLI2S VCO output clock. + * This parameter must be a number between Min_Data = 50 and Max_Data = 432. + * @note You have to set the PLLI2SN parameter correctly to ensure that the VCO + * output frequency is between Min_Data = 100 and Max_Data = 432 MHz. + * @param __PLLI2SQ__ specifies the division factor for SAI1 clock. + * This parameter must be a number between Min_Data = 2 and Max_Data = 15. + * @note the PLLI2SQ parameter is only available with STM32F427xx/437xx/429xx/439xx/469xx/479xx + * Devices and can be configured using the __HAL_RCC_PLLI2S_PLLSAICLK_CONFIG() macro + * @param __PLLI2SR__ specifies the division factor for I2S clock + * This parameter must be a number between Min_Data = 2 and Max_Data = 7. + * @note You have to set the PLLI2SR parameter correctly to not exceed 192 MHz + * on the I2S clock frequency. + */ +#define __HAL_RCC_PLLI2S_SAICLK_CONFIG(__PLLI2SN__, __PLLI2SQ__, __PLLI2SR__) (RCC->PLLI2SCFGR = ((__PLLI2SN__) << 6U) |\ + ((__PLLI2SQ__) << 24U) |\ + ((__PLLI2SR__) << 28U)) +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F469xx || STM32F479xx */ +/*----------------------------------------------------------------------------*/ + +/*------------------------------ PLLSAI Configuration ------------------------*/ +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) || defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) +/** @brief Macros to Enable or Disable the PLLISAI. + * @note The PLLSAI is only available with STM32F429x/439x Devices. + * @note The PLLSAI is disabled by hardware when entering STOP and STANDBY modes. + */ +#define __HAL_RCC_PLLSAI_ENABLE() (*(__IO uint32_t *) RCC_CR_PLLSAION_BB = ENABLE) +#define __HAL_RCC_PLLSAI_DISABLE() (*(__IO uint32_t *) RCC_CR_PLLSAION_BB = DISABLE) + +#if defined(STM32F446xx) +/** @brief Macro to configure the PLLSAI clock multiplication and division factors. + * + * @param __PLLSAIM__ specifies the division factor for PLLSAI VCO input clock + * This parameter must be a number between Min_Data = 2 and Max_Data = 63. + * @note You have to set the PLLSAIM parameter correctly to ensure that the VCO input + * frequency ranges from 1 to 2 MHz. It is recommended to select a frequency + * of 1 MHz to limit PLLI2S jitter. + * @note The PLLSAIM parameter is only used with STM32F446xx Devices + * + * @param __PLLSAIN__ specifies the multiplication factor for PLLSAI VCO output clock. + * This parameter must be a number between Min_Data = 50 and Max_Data = 432. + * @note You have to set the PLLSAIN parameter correctly to ensure that the VCO + * output frequency is between Min_Data = 100 and Max_Data = 432 MHz. + * + * @param __PLLSAIP__ specifies division factor for OTG FS, SDIO and RNG clocks. + * This parameter must be a number in the range {2, 4, 6, or 8}. + * @note the PLLSAIP parameter is only available with STM32F446xx Devices + * + * @param __PLLSAIQ__ specifies the division factor for SAI clock + * This parameter must be a number between Min_Data = 2 and Max_Data = 15. + * + * @param __PLLSAIR__ specifies the division factor for LTDC clock + * This parameter must be a number between Min_Data = 2 and Max_Data = 7. + * @note the PLLI2SR parameter is only available with STM32F427/437/429/439xx Devices + */ +#define __HAL_RCC_PLLSAI_CONFIG(__PLLSAIM__, __PLLSAIN__, __PLLSAIP__, __PLLSAIQ__, __PLLSAIR__) \ + (RCC->PLLSAICFGR = ((__PLLSAIM__) | \ + ((__PLLSAIN__) << RCC_PLLSAICFGR_PLLSAIN_Pos) | \ + ((((__PLLSAIP__) >> 1U) -1U) << RCC_PLLSAICFGR_PLLSAIP_Pos) | \ + ((__PLLSAIQ__) << RCC_PLLSAICFGR_PLLSAIQ_Pos))) +#endif /* STM32F446xx */ + +#if defined(STM32F469xx) || defined(STM32F479xx) +/** @brief Macro to configure the PLLSAI clock multiplication and division factors. + * + * @param __PLLSAIN__ specifies the multiplication factor for PLLSAI VCO output clock. + * This parameter must be a number between Min_Data = 50 and Max_Data = 432. + * @note You have to set the PLLSAIN parameter correctly to ensure that the VCO + * output frequency is between Min_Data = 100 and Max_Data = 432 MHz. + * + * @param __PLLSAIP__ specifies division factor for SDIO and CLK48 clocks. + * This parameter must be a number in the range {2, 4, 6, or 8}. + * + * @param __PLLSAIQ__ specifies the division factor for SAI clock + * This parameter must be a number between Min_Data = 2 and Max_Data = 15. + * + * @param __PLLSAIR__ specifies the division factor for LTDC clock + * This parameter must be a number between Min_Data = 2 and Max_Data = 7. + */ +#define __HAL_RCC_PLLSAI_CONFIG(__PLLSAIN__, __PLLSAIP__, __PLLSAIQ__, __PLLSAIR__) \ + (RCC->PLLSAICFGR = (((__PLLSAIN__) << RCC_PLLSAICFGR_PLLSAIN_Pos) |\ + ((((__PLLSAIP__) >> 1U) -1U) << RCC_PLLSAICFGR_PLLSAIP_Pos) |\ + ((__PLLSAIQ__) << RCC_PLLSAICFGR_PLLSAIQ_Pos) |\ + ((__PLLSAIR__) << RCC_PLLSAICFGR_PLLSAIR_Pos))) +#endif /* STM32F469xx || STM32F479xx */ + +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) +/** @brief Macro to configure the PLLSAI clock multiplication and division factors. + * + * @param __PLLSAIN__ specifies the multiplication factor for PLLSAI VCO output clock. + * This parameter must be a number between Min_Data = 50 and Max_Data = 432. + * @note You have to set the PLLSAIN parameter correctly to ensure that the VCO + * output frequency is between Min_Data = 100 and Max_Data = 432 MHz. + * + * @param __PLLSAIQ__ specifies the division factor for SAI clock + * This parameter must be a number between Min_Data = 2 and Max_Data = 15. + * + * @param __PLLSAIR__ specifies the division factor for LTDC clock + * This parameter must be a number between Min_Data = 2 and Max_Data = 7. + * @note the PLLI2SR parameter is only available with STM32F427/437/429/439xx Devices + */ +#define __HAL_RCC_PLLSAI_CONFIG(__PLLSAIN__, __PLLSAIQ__, __PLLSAIR__) \ + (RCC->PLLSAICFGR = (((__PLLSAIN__) << RCC_PLLSAICFGR_PLLSAIN_Pos) | \ + ((__PLLSAIQ__) << RCC_PLLSAICFGR_PLLSAIQ_Pos) | \ + ((__PLLSAIR__) << RCC_PLLSAICFGR_PLLSAIR_Pos))) +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx */ + +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F446xx || STM32F469xx || STM32F479xx */ +/*----------------------------------------------------------------------------*/ + +/*------------------- PLLSAI/PLLI2S Dividers Configuration -------------------*/ +#if defined(STM32F413xx) || defined(STM32F423xx) +/** @brief Macro to configure the SAI clock Divider coming from PLLI2S. + * @note This function must be called before enabling the PLLI2S. + * @param __PLLI2SDivR__ specifies the PLLI2S division factor for SAI1 clock. + * This parameter must be a number between 1 and 32. + * SAI1 clock frequency = f(PLLI2SR) / __PLLI2SDivR__ + */ +#define __HAL_RCC_PLLI2S_PLLSAICLKDIVR_CONFIG(__PLLI2SDivR__) (MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_PLLI2SDIVR, (__PLLI2SDivR__)-1U)) + +/** @brief Macro to configure the SAI clock Divider coming from PLL. + * @param __PLLDivR__ specifies the PLL division factor for SAI1 clock. + * This parameter must be a number between 1 and 32. + * SAI1 clock frequency = f(PLLR) / __PLLDivR__ + */ +#define __HAL_RCC_PLL_PLLSAICLKDIVR_CONFIG(__PLLDivR__) (MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_PLLDIVR, ((__PLLDivR__)-1U)<<8U)) +#endif /* STM32F413xx || STM32F423xx */ + +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) || defined(STM32F446xx) ||\ + defined(STM32F469xx) || defined(STM32F479xx) +/** @brief Macro to configure the SAI clock Divider coming from PLLI2S. + * @note This function must be called before enabling the PLLI2S. + * @param __PLLI2SDivQ__ specifies the PLLI2S division factor for SAI1 clock. + * This parameter must be a number between 1 and 32. + * SAI1 clock frequency = f(PLLI2SQ) / __PLLI2SDivQ__ + */ +#define __HAL_RCC_PLLI2S_PLLSAICLKDIVQ_CONFIG(__PLLI2SDivQ__) (MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_PLLI2SDIVQ, (__PLLI2SDivQ__)-1U)) + +/** @brief Macro to configure the SAI clock Divider coming from PLLSAI. + * @note This function must be called before enabling the PLLSAI. + * @param __PLLSAIDivQ__ specifies the PLLSAI division factor for SAI1 clock . + * This parameter must be a number between Min_Data = 1 and Max_Data = 32. + * SAI1 clock frequency = f(PLLSAIQ) / __PLLSAIDivQ__ + */ +#define __HAL_RCC_PLLSAI_PLLSAICLKDIVQ_CONFIG(__PLLSAIDivQ__) (MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_PLLSAIDIVQ, ((__PLLSAIDivQ__)-1U)<<8U)) +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F446xx || STM32F469xx || STM32F479xx */ + +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) || defined(STM32F469xx) || defined(STM32F479xx) +/** @brief Macro to configure the LTDC clock Divider coming from PLLSAI. + * + * @note The LTDC peripheral is only available with STM32F427/437/429/439/469/479xx Devices. + * @note This function must be called before enabling the PLLSAI. + * @param __PLLSAIDivR__ specifies the PLLSAI division factor for LTDC clock . + * This parameter must be a number between Min_Data = 2 and Max_Data = 16. + * LTDC clock frequency = f(PLLSAIR) / __PLLSAIDivR__ + */ +#define __HAL_RCC_PLLSAI_PLLSAICLKDIVR_CONFIG(__PLLSAIDivR__) (MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_PLLSAIDIVR, (__PLLSAIDivR__))) +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F469xx || STM32F479xx */ +/*----------------------------------------------------------------------------*/ + +/*------------------------- Peripheral Clock selection -----------------------*/ +#if defined(STM32F405xx) || defined(STM32F415xx) || defined(STM32F407xx) || defined(STM32F417xx) ||\ + defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) ||\ + defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F411xE) || defined(STM32F469xx) ||\ + defined(STM32F479xx) +/** @brief Macro to configure the I2S clock source (I2SCLK). + * @note This function must be called before enabling the I2S APB clock. + * @param __SOURCE__ specifies the I2S clock source. + * This parameter can be one of the following values: + * @arg RCC_I2SCLKSOURCE_PLLI2S: PLLI2S clock used as I2S clock source. + * @arg RCC_I2SCLKSOURCE_EXT: External clock mapped on the I2S_CKIN pin + * used as I2S clock source. + */ +#define __HAL_RCC_I2S_CONFIG(__SOURCE__) (MODIFY_REG(RCC->CFGR, RCC_CFGR_I2SSRC, (__SOURCE__))) + + +/** @brief Macro to get the I2S clock source (I2SCLK). + * @retval The clock source can be one of the following values: + * @arg @ref RCC_I2SCLKSOURCE_PLLI2S: PLLI2S clock used as I2S clock source. + * @arg @ref RCC_I2SCLKSOURCE_EXT External clock mapped on the I2S_CKIN pin + * used as I2S clock source + */ +#define __HAL_RCC_GET_I2S_SOURCE() ((uint32_t)(READ_BIT(RCC->CFGR, RCC_CFGR_I2SSRC))) +#endif /* STM32F40xxx || STM32F41xxx || STM32F42xxx || STM32F43xxx || STM32F469xx || STM32F479xx */ + +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) || defined(STM32F469xx) || defined(STM32F479xx) + +/** @brief Macro to configure SAI1BlockA clock source selection. + * @note The SAI peripheral is only available with STM32F427/437/429/439/469/479xx Devices. + * @note This function must be called before enabling PLLSAI, PLLI2S and + * the SAI clock. + * @param __SOURCE__ specifies the SAI Block A clock source. + * This parameter can be one of the following values: + * @arg RCC_SAIACLKSOURCE_PLLI2S: PLLI2S_Q clock divided by PLLI2SDIVQ used + * as SAI1 Block A clock. + * @arg RCC_SAIACLKSOURCE_PLLSAI: PLLISAI_Q clock divided by PLLSAIDIVQ used + * as SAI1 Block A clock. + * @arg RCC_SAIACLKSOURCE_Ext: External clock mapped on the I2S_CKIN pin + * used as SAI1 Block A clock. + */ +#define __HAL_RCC_SAI_BLOCKACLKSOURCE_CONFIG(__SOURCE__) (MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_SAI1ASRC, (__SOURCE__))) + +/** @brief Macro to configure SAI1BlockB clock source selection. + * @note The SAI peripheral is only available with STM32F427/437/429/439/469/479xx Devices. + * @note This function must be called before enabling PLLSAI, PLLI2S and + * the SAI clock. + * @param __SOURCE__ specifies the SAI Block B clock source. + * This parameter can be one of the following values: + * @arg RCC_SAIBCLKSOURCE_PLLI2S: PLLI2S_Q clock divided by PLLI2SDIVQ used + * as SAI1 Block B clock. + * @arg RCC_SAIBCLKSOURCE_PLLSAI: PLLISAI_Q clock divided by PLLSAIDIVQ used + * as SAI1 Block B clock. + * @arg RCC_SAIBCLKSOURCE_Ext: External clock mapped on the I2S_CKIN pin + * used as SAI1 Block B clock. + */ +#define __HAL_RCC_SAI_BLOCKBCLKSOURCE_CONFIG(__SOURCE__) (MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_SAI1BSRC, (__SOURCE__))) +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F469xx || STM32F479xx */ + +#if defined(STM32F446xx) +/** @brief Macro to configure SAI1 clock source selection. + * @note This configuration is only available with STM32F446xx Devices. + * @note This function must be called before enabling PLL, PLLSAI, PLLI2S and + * the SAI clock. + * @param __SOURCE__ specifies the SAI1 clock source. + * This parameter can be one of the following values: + * @arg RCC_SAI1CLKSOURCE_PLLI2S: PLLI2S_Q clock divided by PLLI2SDIVQ used as SAI1 clock. + * @arg RCC_SAI1CLKSOURCE_PLLSAI: PLLISAI_Q clock divided by PLLSAIDIVQ used as SAI1 clock. + * @arg RCC_SAI1CLKSOURCE_PLLR: PLL VCO Output divided by PLLR used as SAI1 clock. + * @arg RCC_SAI1CLKSOURCE_EXT: External clock mapped on the I2S_CKIN pin used as SAI1 clock. + */ +#define __HAL_RCC_SAI1_CONFIG(__SOURCE__) (MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_SAI1SRC, (__SOURCE__))) + +/** @brief Macro to Get SAI1 clock source selection. + * @note This configuration is only available with STM32F446xx Devices. + * @retval The clock source can be one of the following values: + * @arg RCC_SAI1CLKSOURCE_PLLI2S: PLLI2S_Q clock divided by PLLI2SDIVQ used as SAI1 clock. + * @arg RCC_SAI1CLKSOURCE_PLLSAI: PLLISAI_Q clock divided by PLLSAIDIVQ used as SAI1 clock. + * @arg RCC_SAI1CLKSOURCE_PLLR: PLL VCO Output divided by PLLR used as SAI1 clock. + * @arg RCC_SAI1CLKSOURCE_EXT: External clock mapped on the I2S_CKIN pin used as SAI1 clock. + */ +#define __HAL_RCC_GET_SAI1_SOURCE() (READ_BIT(RCC->DCKCFGR, RCC_DCKCFGR_SAI1SRC)) + +/** @brief Macro to configure SAI2 clock source selection. + * @note This configuration is only available with STM32F446xx Devices. + * @note This function must be called before enabling PLL, PLLSAI, PLLI2S and + * the SAI clock. + * @param __SOURCE__ specifies the SAI2 clock source. + * This parameter can be one of the following values: + * @arg RCC_SAI2CLKSOURCE_PLLI2S: PLLI2S_Q clock divided by PLLI2SDIVQ used as SAI2 clock. + * @arg RCC_SAI2CLKSOURCE_PLLSAI: PLLISAI_Q clock divided by PLLSAIDIVQ used as SAI2 clock. + * @arg RCC_SAI2CLKSOURCE_PLLR: PLL VCO Output divided by PLLR used as SAI2 clock. + * @arg RCC_SAI2CLKSOURCE_PLLSRC: HSI or HSE depending from PLL Source clock used as SAI2 clock. + */ +#define __HAL_RCC_SAI2_CONFIG(__SOURCE__) (MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_SAI2SRC, (__SOURCE__))) + +/** @brief Macro to Get SAI2 clock source selection. + * @note This configuration is only available with STM32F446xx Devices. + * @retval The clock source can be one of the following values: + * @arg RCC_SAI2CLKSOURCE_PLLI2S: PLLI2S_Q clock divided by PLLI2SDIVQ used as SAI2 clock. + * @arg RCC_SAI2CLKSOURCE_PLLSAI: PLLISAI_Q clock divided by PLLSAIDIVQ used as SAI2 clock. + * @arg RCC_SAI2CLKSOURCE_PLLR: PLL VCO Output divided by PLLR used as SAI2 clock. + * @arg RCC_SAI2CLKSOURCE_PLLSRC: HSI or HSE depending from PLL Source clock used as SAI2 clock. + */ +#define __HAL_RCC_GET_SAI2_SOURCE() (READ_BIT(RCC->DCKCFGR, RCC_DCKCFGR_SAI2SRC)) + +/** @brief Macro to configure I2S APB1 clock source selection. + * @note This function must be called before enabling PLL, PLLI2S and the I2S clock. + * @param __SOURCE__ specifies the I2S APB1 clock source. + * This parameter can be one of the following values: + * @arg RCC_I2SAPB1CLKSOURCE_PLLI2S: PLLI2S VCO output clock divided by PLLI2SR used as I2S clock. + * @arg RCC_I2SAPB1CLKSOURCE_EXT: External clock mapped on the I2S_CKIN pin used as I2S APB1 clock. + * @arg RCC_I2SAPB1CLKSOURCE_PLLR: PLL VCO Output divided by PLLR used as I2S APB1 clock. + * @arg RCC_I2SAPB1CLKSOURCE_PLLSRC: HSI or HSE depending from PLL source Clock. + */ +#define __HAL_RCC_I2S_APB1_CONFIG(__SOURCE__) (MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_I2S1SRC, (__SOURCE__))) + +/** @brief Macro to Get I2S APB1 clock source selection. + * @retval The clock source can be one of the following values: + * @arg RCC_I2SAPB1CLKSOURCE_PLLI2S: PLLI2S VCO output clock divided by PLLI2SR used as I2S clock. + * @arg RCC_I2SAPB1CLKSOURCE_EXT: External clock mapped on the I2S_CKIN pin used as I2S APB1 clock. + * @arg RCC_I2SAPB1CLKSOURCE_PLLR: PLL VCO Output divided by PLLR used as I2S APB1 clock. + * @arg RCC_I2SAPB1CLKSOURCE_PLLSRC: HSI or HSE depending from PLL source Clock. + */ +#define __HAL_RCC_GET_I2S_APB1_SOURCE() (READ_BIT(RCC->DCKCFGR, RCC_DCKCFGR_I2S1SRC)) + +/** @brief Macro to configure I2S APB2 clock source selection. + * @note This function must be called before enabling PLL, PLLI2S and the I2S clock. + * @param __SOURCE__ specifies the SAI Block A clock source. + * This parameter can be one of the following values: + * @arg RCC_I2SAPB2CLKSOURCE_PLLI2S: PLLI2S VCO output clock divided by PLLI2SR used as I2S clock. + * @arg RCC_I2SAPB2CLKSOURCE_EXT: External clock mapped on the I2S_CKIN pin used as I2S APB2 clock. + * @arg RCC_I2SAPB2CLKSOURCE_PLLR: PLL VCO Output divided by PLLR used as I2S APB2 clock. + * @arg RCC_I2SAPB2CLKSOURCE_PLLSRC: HSI or HSE depending from PLL source Clock. + */ +#define __HAL_RCC_I2S_APB2_CONFIG(__SOURCE__) (MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_I2S2SRC, (__SOURCE__))) + +/** @brief Macro to Get I2S APB2 clock source selection. + * @retval The clock source can be one of the following values: + * @arg RCC_I2SAPB2CLKSOURCE_PLLI2S: PLLI2S VCO output clock divided by PLLI2SR used as I2S clock. + * @arg RCC_I2SAPB2CLKSOURCE_EXT: External clock mapped on the I2S_CKIN pin used as I2S APB2 clock. + * @arg RCC_I2SAPB2CLKSOURCE_PLLR: PLL VCO Output divided by PLLR used as I2S APB2 clock. + * @arg RCC_I2SAPB2CLKSOURCE_PLLSRC: HSI or HSE depending from PLL source Clock. + */ +#define __HAL_RCC_GET_I2S_APB2_SOURCE() (READ_BIT(RCC->DCKCFGR, RCC_DCKCFGR_I2S2SRC)) + +/** @brief Macro to configure the CEC clock. + * @param __SOURCE__ specifies the CEC clock source. + * This parameter can be one of the following values: + * @arg RCC_CECCLKSOURCE_HSI: HSI selected as CEC clock + * @arg RCC_CECCLKSOURCE_LSE: LSE selected as CEC clock + */ +#define __HAL_RCC_CEC_CONFIG(__SOURCE__) (MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_CECSEL, (uint32_t)(__SOURCE__))) + +/** @brief Macro to Get the CEC clock. + * @retval The clock source can be one of the following values: + * @arg RCC_CECCLKSOURCE_HSI488: HSI selected as CEC clock + * @arg RCC_CECCLKSOURCE_LSE: LSE selected as CEC clock + */ +#define __HAL_RCC_GET_CEC_SOURCE() (READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_CECSEL)) + +/** @brief Macro to configure the FMPI2C1 clock. + * @param __SOURCE__ specifies the FMPI2C1 clock source. + * This parameter can be one of the following values: + * @arg RCC_FMPI2C1CLKSOURCE_PCLK1: PCLK1 selected as FMPI2C1 clock + * @arg RCC_FMPI2C1CLKSOURCE_SYSCLK: SYS clock selected as FMPI2C1 clock + * @arg RCC_FMPI2C1CLKSOURCE_HSI: HSI selected as FMPI2C1 clock + */ +#define __HAL_RCC_FMPI2C1_CONFIG(__SOURCE__) (MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_FMPI2C1SEL, (uint32_t)(__SOURCE__))) + +/** @brief Macro to Get the FMPI2C1 clock. + * @retval The clock source can be one of the following values: + * @arg RCC_FMPI2C1CLKSOURCE_PCLK1: PCLK1 selected as FMPI2C1 clock + * @arg RCC_FMPI2C1CLKSOURCE_SYSCLK: SYS clock selected as FMPI2C1 clock + * @arg RCC_FMPI2C1CLKSOURCE_HSI: HSI selected as FMPI2C1 clock + */ +#define __HAL_RCC_GET_FMPI2C1_SOURCE() (READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_FMPI2C1SEL)) + +/** @brief Macro to configure the CLK48 clock. + * @param __SOURCE__ specifies the CLK48 clock source. + * This parameter can be one of the following values: + * @arg RCC_CLK48CLKSOURCE_PLLQ: PLL VCO Output divided by PLLQ used as CLK48 clock. + * @arg RCC_CLK48CLKSOURCE_PLLSAIP: PLLSAI VCO Output divided by PLLSAIP used as CLK48 clock. + */ +#define __HAL_RCC_CLK48_CONFIG(__SOURCE__) (MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_CK48MSEL, (uint32_t)(__SOURCE__))) + +/** @brief Macro to Get the CLK48 clock. + * @retval The clock source can be one of the following values: + * @arg RCC_CLK48CLKSOURCE_PLLQ: PLL VCO Output divided by PLLQ used as CLK48 clock. + * @arg RCC_CLK48CLKSOURCE_PLLSAIP: PLLSAI VCO Output divided by PLLSAIP used as CLK48 clock. + */ +#define __HAL_RCC_GET_CLK48_SOURCE() (READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_CK48MSEL)) + +/** @brief Macro to configure the SDIO clock. + * @param __SOURCE__ specifies the SDIO clock source. + * This parameter can be one of the following values: + * @arg RCC_SDIOCLKSOURCE_CLK48: CLK48 output used as SDIO clock. + * @arg RCC_SDIOCLKSOURCE_SYSCLK: System clock output used as SDIO clock. + */ +#define __HAL_RCC_SDIO_CONFIG(__SOURCE__) (MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_SDIOSEL, (uint32_t)(__SOURCE__))) + +/** @brief Macro to Get the SDIO clock. + * @retval The clock source can be one of the following values: + * @arg RCC_SDIOCLKSOURCE_CLK48: CLK48 output used as SDIO clock. + * @arg RCC_SDIOCLKSOURCE_SYSCLK: System clock output used as SDIO clock. + */ +#define __HAL_RCC_GET_SDIO_SOURCE() (READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_SDIOSEL)) + +/** @brief Macro to configure the SPDIFRX clock. + * @param __SOURCE__ specifies the SPDIFRX clock source. + * This parameter can be one of the following values: + * @arg RCC_SPDIFRXCLKSOURCE_PLLR: PLL VCO Output divided by PLLR used as SPDIFRX clock. + * @arg RCC_SPDIFRXCLKSOURCE_PLLI2SP: PLLI2S VCO Output divided by PLLI2SP used as SPDIFRX clock. + */ +#define __HAL_RCC_SPDIFRX_CONFIG(__SOURCE__) (MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_SPDIFRXSEL, (uint32_t)(__SOURCE__))) + +/** @brief Macro to Get the SPDIFRX clock. + * @retval The clock source can be one of the following values: + * @arg RCC_SPDIFRXCLKSOURCE_PLLR: PLL VCO Output divided by PLLR used as SPDIFRX clock. + * @arg RCC_SPDIFRXCLKSOURCE_PLLI2SP: PLLI2S VCO Output divided by PLLI2SP used as SPDIFRX clock. + */ +#define __HAL_RCC_GET_SPDIFRX_SOURCE() (READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_SPDIFRXSEL)) +#endif /* STM32F446xx */ + +#if defined(STM32F469xx) || defined(STM32F479xx) + +/** @brief Macro to configure the CLK48 clock. + * @param __SOURCE__ specifies the CLK48 clock source. + * This parameter can be one of the following values: + * @arg RCC_CLK48CLKSOURCE_PLLQ: PLL VCO Output divided by PLLQ used as CLK48 clock. + * @arg RCC_CLK48CLKSOURCE_PLLSAIP: PLLSAI VCO Output divided by PLLSAIP used as CLK48 clock. + */ +#define __HAL_RCC_CLK48_CONFIG(__SOURCE__) (MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_CK48MSEL, (uint32_t)(__SOURCE__))) + +/** @brief Macro to Get the CLK48 clock. + * @retval The clock source can be one of the following values: + * @arg RCC_CLK48CLKSOURCE_PLLQ: PLL VCO Output divided by PLLQ used as CLK48 clock. + * @arg RCC_CLK48CLKSOURCE_PLLSAIP: PLLSAI VCO Output divided by PLLSAIP used as CLK48 clock. + */ +#define __HAL_RCC_GET_CLK48_SOURCE() (READ_BIT(RCC->DCKCFGR, RCC_DCKCFGR_CK48MSEL)) + +/** @brief Macro to configure the SDIO clock. + * @param __SOURCE__ specifies the SDIO clock source. + * This parameter can be one of the following values: + * @arg RCC_SDIOCLKSOURCE_CLK48: CLK48 output used as SDIO clock. + * @arg RCC_SDIOCLKSOURCE_SYSCLK: System clock output used as SDIO clock. + */ +#define __HAL_RCC_SDIO_CONFIG(__SOURCE__) (MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_SDIOSEL, (uint32_t)(__SOURCE__))) + +/** @brief Macro to Get the SDIO clock. + * @retval The clock source can be one of the following values: + * @arg RCC_SDIOCLKSOURCE_CLK48: CLK48 output used as SDIO clock. + * @arg RCC_SDIOCLKSOURCE_SYSCLK: System clock output used as SDIO clock. + */ +#define __HAL_RCC_GET_SDIO_SOURCE() (READ_BIT(RCC->DCKCFGR, RCC_DCKCFGR_SDIOSEL)) + +/** @brief Macro to configure the DSI clock. + * @param __SOURCE__ specifies the DSI clock source. + * This parameter can be one of the following values: + * @arg RCC_DSICLKSOURCE_PLLR: PLLR output used as DSI clock. + * @arg RCC_DSICLKSOURCE_DSIPHY: DSI-PHY output used as DSI clock. + */ +#define __HAL_RCC_DSI_CONFIG(__SOURCE__) (MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_DSISEL, (uint32_t)(__SOURCE__))) + +/** @brief Macro to Get the DSI clock. + * @retval The clock source can be one of the following values: + * @arg RCC_DSICLKSOURCE_PLLR: PLLR output used as DSI clock. + * @arg RCC_DSICLKSOURCE_DSIPHY: DSI-PHY output used as DSI clock. + */ +#define __HAL_RCC_GET_DSI_SOURCE() (READ_BIT(RCC->DCKCFGR, RCC_DCKCFGR_DSISEL)) + +#endif /* STM32F469xx || STM32F479xx */ + +#if defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) ||\ + defined(STM32F413xx) || defined(STM32F423xx) +/** @brief Macro to configure the DFSDM1 clock. + * @param __DFSDM1_CLKSOURCE__ specifies the DFSDM1 clock source. + * This parameter can be one of the following values: + * @arg RCC_DFSDM1CLKSOURCE_PCLK2: PCLK2 clock used as kernel clock. + * @arg RCC_DFSDM1CLKSOURCE_SYSCLK: System clock used as kernel clock. + * @retval None + */ +#define __HAL_RCC_DFSDM1_CONFIG(__DFSDM1_CLKSOURCE__) MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_CKDFSDM1SEL, (__DFSDM1_CLKSOURCE__)) + +/** @brief Macro to get the DFSDM1 clock source. + * @retval The clock source can be one of the following values: + * @arg RCC_DFSDM1CLKSOURCE_PCLK2: PCLK2 clock used as kernel clock. + * @arg RCC_DFSDM1CLKSOURCE_SYSCLK: System clock used as kernel clock. + */ +#define __HAL_RCC_GET_DFSDM1_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR, RCC_DCKCFGR_CKDFSDM1SEL))) + +/** @brief Macro to configure DFSDM1 Audio clock source selection. + * @note This configuration is only available with STM32F412Zx/STM32F412Vx/STM32F412Rx/STM32F412Cx/ + STM32F413xx/STM32F423xx Devices. + * @param __SOURCE__ specifies the DFSDM1 Audio clock source. + * This parameter can be one of the following values: + * @arg RCC_DFSDM1AUDIOCLKSOURCE_I2S1: CK_I2S_PCLK1 selected as audio clock + * @arg RCC_DFSDM1AUDIOCLKSOURCE_I2S2: CK_I2S_PCLK2 selected as audio clock + */ +#define __HAL_RCC_DFSDM1AUDIO_CONFIG(__SOURCE__) (MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_CKDFSDM1ASEL, (__SOURCE__))) + +/** @brief Macro to Get DFSDM1 Audio clock source selection. + * @note This configuration is only available with STM32F412Zx/STM32F412Vx/STM32F412Rx/STM32F412Cx/ + STM32F413xx/STM32F423xx Devices. + * @retval The clock source can be one of the following values: + * @arg RCC_DFSDM1AUDIOCLKSOURCE_I2S1: CK_I2S_PCLK1 selected as audio clock + * @arg RCC_DFSDM1AUDIOCLKSOURCE_I2S2: CK_I2S_PCLK2 selected as audio clock + */ +#define __HAL_RCC_GET_DFSDM1AUDIO_SOURCE() (READ_BIT(RCC->DCKCFGR, RCC_DCKCFGR_CKDFSDM1ASEL)) + +#if defined(STM32F413xx) || defined(STM32F423xx) +/** @brief Macro to configure the DFSDM2 clock. + * @param __DFSDM2_CLKSOURCE__ specifies the DFSDM1 clock source. + * This parameter can be one of the following values: + * @arg RCC_DFSDM2CLKSOURCE_PCLK2: PCLK2 clock used as kernel clock. + * @arg RCC_DFSDM2CLKSOURCE_SYSCLK: System clock used as kernel clock. + * @retval None + */ +#define __HAL_RCC_DFSDM2_CONFIG(__DFSDM2_CLKSOURCE__) MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_CKDFSDM1SEL, (__DFSDM2_CLKSOURCE__)) + +/** @brief Macro to get the DFSDM2 clock source. + * @retval The clock source can be one of the following values: + * @arg RCC_DFSDM2CLKSOURCE_PCLK2: PCLK2 clock used as kernel clock. + * @arg RCC_DFSDM2CLKSOURCE_SYSCLK: System clock used as kernel clock. + */ +#define __HAL_RCC_GET_DFSDM2_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR, RCC_DCKCFGR_CKDFSDM1SEL))) + +/** @brief Macro to configure DFSDM1 Audio clock source selection. + * @note This configuration is only available with STM32F413xx/STM32F423xx Devices. + * @param __SOURCE__ specifies the DFSDM2 Audio clock source. + * This parameter can be one of the following values: + * @arg RCC_DFSDM2AUDIOCLKSOURCE_I2S1: CK_I2S_PCLK1 selected as audio clock + * @arg RCC_DFSDM2AUDIOCLKSOURCE_I2S2: CK_I2S_PCLK2 selected as audio clock + */ +#define __HAL_RCC_DFSDM2AUDIO_CONFIG(__SOURCE__) (MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_CKDFSDM2ASEL, (__SOURCE__))) + +/** @brief Macro to Get DFSDM2 Audio clock source selection. + * @note This configuration is only available with STM32F413xx/STM32F423xx Devices. + * @retval The clock source can be one of the following values: + * @arg RCC_DFSDM2AUDIOCLKSOURCE_I2S1: CK_I2S_PCLK1 selected as audio clock + * @arg RCC_DFSDM2AUDIOCLKSOURCE_I2S2: CK_I2S_PCLK2 selected as audio clock + */ +#define __HAL_RCC_GET_DFSDM2AUDIO_SOURCE() (READ_BIT(RCC->DCKCFGR, RCC_DCKCFGR_CKDFSDM2ASEL)) + +/** @brief Macro to configure SAI1BlockA clock source selection. + * @note The SAI peripheral is only available with STM32F413xx/STM32F423xx Devices. + * @note This function must be called before enabling PLLSAI, PLLI2S and + * the SAI clock. + * @param __SOURCE__ specifies the SAI Block A clock source. + * This parameter can be one of the following values: + * @arg RCC_SAIACLKSOURCE_PLLI2SR: PLLI2S_R clock divided (R2) used as SAI1 Block A clock. + * @arg RCC_SAIACLKSOURCE_EXT: External clock mapped on the I2S_CKIN pinused as SAI1 Block A clock. + * @arg RCC_SAIACLKSOURCE_PLLR: PLL_R clock divided (R1) used as SAI1 Block A clock. + * @arg RCC_SAIACLKSOURCE_PLLSRC: HSI or HSE depending from PLL source Clock. + */ +#define __HAL_RCC_SAI_BLOCKACLKSOURCE_CONFIG(__SOURCE__) (MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_SAI1ASRC, (__SOURCE__))) + +/** @brief Macro to Get SAI1 BlockA clock source selection. + * @note This configuration is only available with STM32F413xx/STM32F423xx Devices. + * @retval The clock source can be one of the following values: + * @arg RCC_SAIACLKSOURCE_PLLI2SR: PLLI2S_R clock divided (R2) used as SAI1 Block A clock. + * @arg RCC_SAIACLKSOURCE_EXT: External clock mapped on the I2S_CKIN pinused as SAI1 Block A clock. + * @arg RCC_SAIACLKSOURCE_PLLR: PLL_R clock divided (R1) used as SAI1 Block A clock. + * @arg RCC_SAIACLKSOURCE_PLLSRC: HSI or HSE depending from PLL source Clock. + */ +#define __HAL_RCC_GET_SAI_BLOCKA_SOURCE() (READ_BIT(RCC->DCKCFGR, RCC_DCKCFGR_SAI1ASRC)) + +/** @brief Macro to configure SAI1 BlockB clock source selection. + * @note The SAI peripheral is only available with STM32F413xx/STM32F423xx Devices. + * @note This function must be called before enabling PLLSAI, PLLI2S and + * the SAI clock. + * @param __SOURCE__ specifies the SAI Block B clock source. + * This parameter can be one of the following values: + * @arg RCC_SAIBCLKSOURCE_PLLI2SR: PLLI2S_R clock divided (R2) used as SAI1 Block A clock. + * @arg RCC_SAIBCLKSOURCE_EXT: External clock mapped on the I2S_CKIN pin used as SAI1 Block A clock. + * @arg RCC_SAIBCLKSOURCE_PLLR: PLL_R clock divided (R1) used as SAI1 Block A clock. + * @arg RCC_SAIBCLKSOURCE_PLLSRC: HSI or HSE depending from PLL source Clock. + */ +#define __HAL_RCC_SAI_BLOCKBCLKSOURCE_CONFIG(__SOURCE__) (MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_SAI1BSRC, (__SOURCE__))) + +/** @brief Macro to Get SAI1 BlockB clock source selection. + * @note This configuration is only available with STM32F413xx/STM32F423xx Devices. + * @retval The clock source can be one of the following values: + * @arg RCC_SAIBCLKSOURCE_PLLI2SR: PLLI2S_R clock divided (R2) used as SAI1 Block A clock. + * @arg RCC_SAIBCLKSOURCE_EXT: External clock mapped on the I2S_CKIN pin used as SAI1 Block A clock. + * @arg RCC_SAIBCLKSOURCE_PLLR: PLL_R clock divided (R1) used as SAI1 Block A clock. + * @arg RCC_SAIBCLKSOURCE_PLLSRC: HSI or HSE depending from PLL source Clock. + */ +#define __HAL_RCC_GET_SAI_BLOCKB_SOURCE() (READ_BIT(RCC->DCKCFGR, RCC_DCKCFGR_SAI1BSRC)) + +/** @brief Macro to configure the LPTIM1 clock. + * @param __SOURCE__ specifies the LPTIM1 clock source. + * This parameter can be one of the following values: + * @arg RCC_LPTIM1CLKSOURCE_PCLK1: PCLK selected as LPTIM1 clock + * @arg RCC_LPTIM1CLKSOURCE_HSI: HSI clock selected as LPTIM1 clock + * @arg RCC_LPTIM1CLKSOURCE_LSI: LSI selected as LPTIM1 clock + * @arg RCC_LPTIM1CLKSOURCE_LSE: LSE selected as LPTIM1 clock + */ +#define __HAL_RCC_LPTIM1_CONFIG(__SOURCE__) (MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_LPTIM1SEL, (uint32_t)(__SOURCE__))) + +/** @brief Macro to Get the LPTIM1 clock. + * @retval The clock source can be one of the following values: + * @arg RCC_LPTIM1CLKSOURCE_PCLK1: PCLK selected as LPTIM1 clock + * @arg RCC_LPTIM1CLKSOURCE_HSI: HSI clock selected as LPTIM1 clock + * @arg RCC_LPTIM1CLKSOURCE_LSI: LSI selected as LPTIM1 clock + * @arg RCC_LPTIM1CLKSOURCE_LSE: LSE selected as LPTIM1 clock + */ +#define __HAL_RCC_GET_LPTIM1_SOURCE() (READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_LPTIM1SEL)) +#endif /* STM32F413xx || STM32F423xx */ + +/** @brief Macro to configure I2S APB1 clock source selection. + * @param __SOURCE__ specifies the I2S APB1 clock source. + * This parameter can be one of the following values: + * @arg RCC_I2SAPB1CLKSOURCE_PLLI2S: PLLI2S VCO output clock divided by PLLI2SR. + * @arg RCC_I2SAPB1CLKSOURCE_EXT: External clock mapped on the I2S_CKIN pin. + * @arg RCC_I2SAPB1CLKSOURCE_PLLR: PLL VCO Output divided by PLLR. + * @arg RCC_I2SAPB1CLKSOURCE_PLLSRC: HSI or HSE depending from PLL source Clock. + */ +#define __HAL_RCC_I2S_APB1_CONFIG(__SOURCE__) (MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_I2S1SRC, (__SOURCE__))) + +/** @brief Macro to Get I2S APB1 clock source selection. + * @retval The clock source can be one of the following values: + * @arg RCC_I2SAPB1CLKSOURCE_PLLI2S: PLLI2S VCO output clock divided by PLLI2SR. + * @arg RCC_I2SAPB1CLKSOURCE_EXT: External clock mapped on the I2S_CKIN pin. + * @arg RCC_I2SAPB1CLKSOURCE_PLLR: PLL VCO Output divided by PLLR. + * @arg RCC_I2SAPB1CLKSOURCE_PLLSRC: HSI or HSE depending from PLL source Clock. + */ +#define __HAL_RCC_GET_I2S_APB1_SOURCE() (READ_BIT(RCC->DCKCFGR, RCC_DCKCFGR_I2S1SRC)) + +/** @brief Macro to configure I2S APB2 clock source selection. + * @param __SOURCE__ specifies the I2S APB2 clock source. + * This parameter can be one of the following values: + * @arg RCC_I2SAPB2CLKSOURCE_PLLI2S: PLLI2S VCO output clock divided by PLLI2SR. + * @arg RCC_I2SAPB2CLKSOURCE_EXT: External clock mapped on the I2S_CKIN pin. + * @arg RCC_I2SAPB2CLKSOURCE_PLLR: PLL VCO Output divided by PLLR. + * @arg RCC_I2SAPB2CLKSOURCE_PLLSRC: HSI or HSE depending from PLL source Clock. + */ +#define __HAL_RCC_I2S_APB2_CONFIG(__SOURCE__) (MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_I2S2SRC, (__SOURCE__))) + +/** @brief Macro to Get I2S APB2 clock source selection. + * @retval The clock source can be one of the following values: + * @arg RCC_I2SAPB2CLKSOURCE_PLLI2S: PLLI2S VCO output clock divided by PLLI2SR. + * @arg RCC_I2SAPB2CLKSOURCE_EXT: External clock mapped on the I2S_CKIN pin. + * @arg RCC_I2SAPB2CLKSOURCE_PLLR: PLL VCO Output divided by PLLR. + * @arg RCC_I2SAPB2CLKSOURCE_PLLSRC: HSI or HSE depending from PLL source Clock. + */ +#define __HAL_RCC_GET_I2S_APB2_SOURCE() (READ_BIT(RCC->DCKCFGR, RCC_DCKCFGR_I2S2SRC)) + +/** @brief Macro to configure the PLL I2S clock source (PLLI2SCLK). + * @note This macro must be called before enabling the I2S APB clock. + * @param __SOURCE__ specifies the I2S clock source. + * This parameter can be one of the following values: + * @arg RCC_PLLI2SCLKSOURCE_PLLSRC: HSI or HSE depending from PLL source Clock. + * @arg RCC_PLLI2SCLKSOURCE_EXT: External clock mapped on the I2S_CKIN pin + * used as I2S clock source. + */ +#define __HAL_RCC_PLL_I2S_CONFIG(__SOURCE__) (*(__IO uint32_t *) RCC_PLLI2SCFGR_PLLI2SSRC_BB = (__SOURCE__)) + +/** @brief Macro to configure the FMPI2C1 clock. + * @param __SOURCE__ specifies the FMPI2C1 clock source. + * This parameter can be one of the following values: + * @arg RCC_FMPI2C1CLKSOURCE_PCLK1: PCLK1 selected as FMPI2C1 clock + * @arg RCC_FMPI2C1CLKSOURCE_SYSCLK: SYS clock selected as FMPI2C1 clock + * @arg RCC_FMPI2C1CLKSOURCE_HSI: HSI selected as FMPI2C1 clock + */ +#define __HAL_RCC_FMPI2C1_CONFIG(__SOURCE__) (MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_FMPI2C1SEL, (uint32_t)(__SOURCE__))) + +/** @brief Macro to Get the FMPI2C1 clock. + * @retval The clock source can be one of the following values: + * @arg RCC_FMPI2C1CLKSOURCE_PCLK1: PCLK1 selected as FMPI2C1 clock + * @arg RCC_FMPI2C1CLKSOURCE_SYSCLK: SYS clock selected as FMPI2C1 clock + * @arg RCC_FMPI2C1CLKSOURCE_HSI: HSI selected as FMPI2C1 clock + */ +#define __HAL_RCC_GET_FMPI2C1_SOURCE() (READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_FMPI2C1SEL)) + +/** @brief Macro to configure the CLK48 clock. + * @param __SOURCE__ specifies the CLK48 clock source. + * This parameter can be one of the following values: + * @arg RCC_CLK48CLKSOURCE_PLLQ: PLL VCO Output divided by PLLQ used as CLK48 clock. + * @arg RCC_CLK48CLKSOURCE_PLLI2SQ: PLLI2S VCO Output divided by PLLI2SQ used as CLK48 clock. + */ +#define __HAL_RCC_CLK48_CONFIG(__SOURCE__) (MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_CK48MSEL, (uint32_t)(__SOURCE__))) + +/** @brief Macro to Get the CLK48 clock. + * @retval The clock source can be one of the following values: + * @arg RCC_CLK48CLKSOURCE_PLLQ: PLL VCO Output divided by PLLQ used as CLK48 clock. + * @arg RCC_CLK48CLKSOURCE_PLLI2SQ: PLLI2S VCO Output divided by PLLI2SQ used as CLK48 clock + */ +#define __HAL_RCC_GET_CLK48_SOURCE() (READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_CK48MSEL)) + +/** @brief Macro to configure the SDIO clock. + * @param __SOURCE__ specifies the SDIO clock source. + * This parameter can be one of the following values: + * @arg RCC_SDIOCLKSOURCE_CLK48: CLK48 output used as SDIO clock. + * @arg RCC_SDIOCLKSOURCE_SYSCLK: System clock output used as SDIO clock. + */ +#define __HAL_RCC_SDIO_CONFIG(__SOURCE__) (MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_SDIOSEL, (uint32_t)(__SOURCE__))) + +/** @brief Macro to Get the SDIO clock. + * @retval The clock source can be one of the following values: + * @arg RCC_SDIOCLKSOURCE_CLK48: CLK48 output used as SDIO clock. + * @arg RCC_SDIOCLKSOURCE_SYSCLK: System clock output used as SDIO clock. + */ +#define __HAL_RCC_GET_SDIO_SOURCE() (READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_SDIOSEL)) + +#endif /* STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx */ + +#if defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) +/** @brief Macro to configure I2S clock source selection. + * @param __SOURCE__ specifies the I2S clock source. + * This parameter can be one of the following values: + * @arg RCC_I2SAPBCLKSOURCE_PLLR: PLL VCO output clock divided by PLLR. + * @arg RCC_I2SAPBCLKSOURCE_EXT: External clock mapped on the I2S_CKIN pin. + * @arg RCC_I2SAPBCLKSOURCE_PLLSRC: HSI/HSE depends on PLLSRC. + */ +#define __HAL_RCC_I2S_CONFIG(__SOURCE__) (MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_I2SSRC, (__SOURCE__))) + +/** @brief Macro to Get I2S clock source selection. + * @retval The clock source can be one of the following values: + * @arg RCC_I2SAPBCLKSOURCE_PLLR: PLL VCO output clock divided by PLLR. + * @arg RCC_I2SAPBCLKSOURCE_EXT: External clock mapped on the I2S_CKIN pin. + * @arg RCC_I2SAPBCLKSOURCE_PLLSRC: HSI/HSE depends on PLLSRC. + */ +#define __HAL_RCC_GET_I2S_SOURCE() (READ_BIT(RCC->DCKCFGR, RCC_DCKCFGR_I2SSRC)) + +/** @brief Macro to configure the FMPI2C1 clock. + * @param __SOURCE__ specifies the FMPI2C1 clock source. + * This parameter can be one of the following values: + * @arg RCC_FMPI2C1CLKSOURCE_PCLK1: PCLK1 selected as FMPI2C1 clock + * @arg RCC_FMPI2C1CLKSOURCE_SYSCLK: SYS clock selected as FMPI2C1 clock + * @arg RCC_FMPI2C1CLKSOURCE_HSI: HSI selected as FMPI2C1 clock + */ +#define __HAL_RCC_FMPI2C1_CONFIG(__SOURCE__) (MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_FMPI2C1SEL, (uint32_t)(__SOURCE__))) + +/** @brief Macro to Get the FMPI2C1 clock. + * @retval The clock source can be one of the following values: + * @arg RCC_FMPI2C1CLKSOURCE_PCLK1: PCLK1 selected as FMPI2C1 clock + * @arg RCC_FMPI2C1CLKSOURCE_SYSCLK: SYS clock selected as FMPI2C1 clock + * @arg RCC_FMPI2C1CLKSOURCE_HSI: HSI selected as FMPI2C1 clock + */ +#define __HAL_RCC_GET_FMPI2C1_SOURCE() (READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_FMPI2C1SEL)) + +/** @brief Macro to configure the LPTIM1 clock. + * @param __SOURCE__ specifies the LPTIM1 clock source. + * This parameter can be one of the following values: + * @arg RCC_LPTIM1CLKSOURCE_PCLK1: PCLK1 selected as LPTIM1 clock + * @arg RCC_LPTIM1CLKSOURCE_HSI: HSI clock selected as LPTIM1 clock + * @arg RCC_LPTIM1CLKSOURCE_LSI: LSI selected as LPTIM1 clock + * @arg RCC_LPTIM1CLKSOURCE_LSE: LSE selected as LPTIM1 clock + */ +#define __HAL_RCC_LPTIM1_CONFIG(__SOURCE__) (MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_LPTIM1SEL, (uint32_t)(__SOURCE__))) + +/** @brief Macro to Get the LPTIM1 clock. + * @retval The clock source can be one of the following values: + * @arg RCC_LPTIM1CLKSOURCE_PCLK1: PCLK1 selected as LPTIM1 clock + * @arg RCC_LPTIM1CLKSOURCE_HSI: HSI clock selected as LPTIM1 clock + * @arg RCC_LPTIM1CLKSOURCE_LSI: LSI selected as LPTIM1 clock + * @arg RCC_LPTIM1CLKSOURCE_LSE: LSE selected as LPTIM1 clock + */ +#define __HAL_RCC_GET_LPTIM1_SOURCE() (READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_LPTIM1SEL)) +#endif /* STM32F410Tx || STM32F410Cx || STM32F410Rx */ + +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) ||\ + defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F410Tx) || defined(STM32F410Cx) ||\ + defined(STM32F410Rx) || defined(STM32F411xE) || defined(STM32F446xx) || defined(STM32F469xx) ||\ + defined(STM32F479xx) || defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) ||\ + defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) +/** @brief Macro to configure the Timers clocks prescalers + * @note This feature is only available with STM32F429x/439x Devices. + * @param __PRESC__ specifies the Timers clocks prescalers selection + * This parameter can be one of the following values: + * @arg RCC_TIMPRES_DESACTIVATED: The Timers kernels clocks prescaler is + * equal to HPRE if PPREx is corresponding to division by 1 or 2, + * else it is equal to [(HPRE * PPREx) / 2] if PPREx is corresponding to + * division by 4 or more. + * @arg RCC_TIMPRES_ACTIVATED: The Timers kernels clocks prescaler is + * equal to HPRE if PPREx is corresponding to division by 1, 2 or 4, + * else it is equal to [(HPRE * PPREx) / 4] if PPREx is corresponding + * to division by 8 or more. + */ +#define __HAL_RCC_TIMCLKPRESCALER(__PRESC__) (*(__IO uint32_t *) RCC_DCKCFGR_TIMPRE_BB = (__PRESC__)) + +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx) || STM32F401xC || STM32F401xE || STM32F410xx || STM32F411xE ||\ + STM32F446xx || STM32F469xx || STM32F479xx || STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx || STM32F413xx ||\ + STM32F423xx */ + +/*----------------------------------------------------------------------------*/ + +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) || defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) +/** @brief Enable PLLSAI_RDY interrupt. + */ +#define __HAL_RCC_PLLSAI_ENABLE_IT() (RCC->CIR |= (RCC_CIR_PLLSAIRDYIE)) + +/** @brief Disable PLLSAI_RDY interrupt. + */ +#define __HAL_RCC_PLLSAI_DISABLE_IT() (RCC->CIR &= ~(RCC_CIR_PLLSAIRDYIE)) + +/** @brief Clear the PLLSAI RDY interrupt pending bits. + */ +#define __HAL_RCC_PLLSAI_CLEAR_IT() (RCC->CIR |= (RCC_CIR_PLLSAIRDYF)) + +/** @brief Check the PLLSAI RDY interrupt has occurred or not. + * @retval The new state (TRUE or FALSE). + */ +#define __HAL_RCC_PLLSAI_GET_IT() ((RCC->CIR & (RCC_CIR_PLLSAIRDYIE)) == (RCC_CIR_PLLSAIRDYIE)) + +/** @brief Check PLLSAI RDY flag is set or not. + * @retval The new state (TRUE or FALSE). + */ +#define __HAL_RCC_PLLSAI_GET_FLAG() ((RCC->CR & (RCC_CR_PLLSAIRDY)) == (RCC_CR_PLLSAIRDY)) + +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F446xx || STM32F469xx || STM32F479xx */ + +#if defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) +/** @brief Macros to enable or disable the RCC MCO1 feature. + */ +#define __HAL_RCC_MCO1_ENABLE() (*(__IO uint32_t *) RCC_CFGR_MCO1EN_BB = ENABLE) +#define __HAL_RCC_MCO1_DISABLE() (*(__IO uint32_t *) RCC_CFGR_MCO1EN_BB = DISABLE) + +/** @brief Macros to enable or disable the RCC MCO2 feature. + */ +#define __HAL_RCC_MCO2_ENABLE() (*(__IO uint32_t *) RCC_CFGR_MCO2EN_BB = ENABLE) +#define __HAL_RCC_MCO2_DISABLE() (*(__IO uint32_t *) RCC_CFGR_MCO2EN_BB = DISABLE) + +#endif /* STM32F410Tx || STM32F410Cx || STM32F410Rx */ + +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup RCCEx_Exported_Functions + * @{ + */ + +/** @addtogroup RCCEx_Exported_Functions_Group1 + * @{ + */ +HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClkInit); +void HAL_RCCEx_GetPeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClkInit); + +uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk); + +#if defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) || defined(STM32F411xE) ||\ + defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) || defined(STM32F412Zx) ||\ + defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) ||\ + defined(STM32F423xx) +void HAL_RCCEx_SelectLSEMode(uint8_t Mode); +#endif /* STM32F410xx || STM32F411xE || STM32F446xx || STM32F469xx || STM32F479xx || STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx || STM32F413xx || STM32F423xx */ +#if defined(RCC_PLLI2S_SUPPORT) +HAL_StatusTypeDef HAL_RCCEx_EnablePLLI2S(RCC_PLLI2SInitTypeDef *PLLI2SInit); +HAL_StatusTypeDef HAL_RCCEx_DisablePLLI2S(void); +#endif /* RCC_PLLI2S_SUPPORT */ +#if defined(RCC_PLLSAI_SUPPORT) +HAL_StatusTypeDef HAL_RCCEx_EnablePLLSAI(RCC_PLLSAIInitTypeDef *PLLSAIInit); +HAL_StatusTypeDef HAL_RCCEx_DisablePLLSAI(void); +#endif /* RCC_PLLSAI_SUPPORT */ +/** + * @} + */ + +/** + * @} + */ +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/** @defgroup RCCEx_Private_Constants RCCEx Private Constants + * @{ + */ + +/** @defgroup RCCEx_BitAddress_AliasRegion RCC BitAddress AliasRegion + * @brief RCC registers bit address in the alias region + * @{ + */ +/* --- CR Register ---*/ +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) ||\ + defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) +/* Alias word address of PLLSAION bit */ +#define RCC_PLLSAION_BIT_NUMBER 0x1CU +#define RCC_CR_PLLSAION_BB (PERIPH_BB_BASE + (RCC_CR_OFFSET * 32U) + (RCC_PLLSAION_BIT_NUMBER * 4U)) + +#define PLLSAI_TIMEOUT_VALUE 2U /* Timeout value fixed to 2 ms */ +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F446xx || STM32F469xx || STM32F479xx */ + +#if defined(STM32F405xx) || defined(STM32F415xx) || defined(STM32F407xx) || defined(STM32F417xx) || \ + defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) || \ + defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F411xE) || defined(STM32F446xx) || \ + defined(STM32F469xx) || defined(STM32F479xx) || defined(STM32F412Zx) || defined(STM32F412Vx) || \ + defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) +/* Alias word address of PLLI2SON bit */ +#define RCC_PLLI2SON_BIT_NUMBER 0x1AU +#define RCC_CR_PLLI2SON_BB (PERIPH_BB_BASE + (RCC_CR_OFFSET * 32U) + (RCC_PLLI2SON_BIT_NUMBER * 4U)) +#endif /* STM32F405xx || STM32F415xx || STM32F407xx || STM32F417xx || STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || + STM32F401xC || STM32F401xE || STM32F411xE || STM32F446xx || STM32F469xx || STM32F479xx || STM32F412Zx || STM32F412Vx || + STM32F412Rx || STM32F412Cx || STM32F413xx || STM32F423xx */ + +/* --- DCKCFGR Register ---*/ +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) ||\ + defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) || defined(STM32F401xC) ||\ + defined(STM32F401xE) || defined(STM32F411xE) || defined(STM32F446xx) || defined(STM32F469xx) ||\ + defined(STM32F479xx) || defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) ||\ + defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) +/* Alias word address of TIMPRE bit */ +#define RCC_DCKCFGR_OFFSET (RCC_OFFSET + 0x8CU) +#define RCC_TIMPRE_BIT_NUMBER 0x18U +#define RCC_DCKCFGR_TIMPRE_BB (PERIPH_BB_BASE + (RCC_DCKCFGR_OFFSET * 32U) + (RCC_TIMPRE_BIT_NUMBER * 4U)) +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F410xx || STM32F401xC ||\ + STM32F401xE || STM32F411xE || STM32F446xx || STM32F469xx || STM32F479xx || STM32F412Zx ||\ + STM32F412Vx || STM32F412Rx || STM32F412Cx || STM32F413xx || STM32F423xx */ + +/* --- CFGR Register ---*/ +#define RCC_CFGR_OFFSET (RCC_OFFSET + 0x08U) +#if defined(STM32F405xx) || defined(STM32F415xx) || defined(STM32F407xx) || defined(STM32F417xx) || \ + defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) || \ + defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F411xE) || defined(STM32F446xx) || \ + defined(STM32F469xx) || defined(STM32F479xx) +/* Alias word address of I2SSRC bit */ +#define RCC_I2SSRC_BIT_NUMBER 0x17U +#define RCC_CFGR_I2SSRC_BB (PERIPH_BB_BASE + (RCC_CFGR_OFFSET * 32U) + (RCC_I2SSRC_BIT_NUMBER * 4U)) + +#define PLLI2S_TIMEOUT_VALUE 2U /* Timeout value fixed to 2 ms */ +#endif /* STM32F405xx || STM32F415xx || STM32F407xx || STM32F417xx || STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || + STM32F401xC || STM32F401xE || STM32F411xE || STM32F446xx || STM32F469xx || STM32F479xx */ + +#if defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) ||\ + defined(STM32F413xx) || defined(STM32F423xx) +/* --- PLLI2SCFGR Register ---*/ +#define RCC_PLLI2SCFGR_OFFSET (RCC_OFFSET + 0x84U) +/* Alias word address of PLLI2SSRC bit */ +#define RCC_PLLI2SSRC_BIT_NUMBER 0x16U +#define RCC_PLLI2SCFGR_PLLI2SSRC_BB (PERIPH_BB_BASE\ + + (RCC_PLLI2SCFGR_OFFSET * 32U) + (RCC_PLLI2SSRC_BIT_NUMBER * 4U)) + +#define PLLI2S_TIMEOUT_VALUE 2U /* Timeout value fixed to 2 ms */ +#endif /* STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx || STM32F413xx | STM32F423xx */ + +#if defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) +/* Alias word address of MCO1EN bit */ +#define RCC_MCO1EN_BIT_NUMBER 0x8U +#define RCC_CFGR_MCO1EN_BB (PERIPH_BB_BASE + (RCC_CFGR_OFFSET * 32U) + (RCC_MCO1EN_BIT_NUMBER * 4U)) + +/* Alias word address of MCO2EN bit */ +#define RCC_MCO2EN_BIT_NUMBER 0x9U +#define RCC_CFGR_MCO2EN_BB (PERIPH_BB_BASE + (RCC_CFGR_OFFSET * 32U) + (RCC_MCO2EN_BIT_NUMBER * 4U)) +#endif /* STM32F410Tx || STM32F410Cx || STM32F410Rx */ + +#define PLL_TIMEOUT_VALUE 2U /* 2 ms */ +/** + * @} + */ + +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup RCCEx_Private_Macros RCCEx Private Macros + * @{ + */ +/** @defgroup RCCEx_IS_RCC_Definitions RCC Private macros to check input parameters + * @{ + */ +#define IS_RCC_PLLN_VALUE(VALUE) ((50U <= (VALUE)) && ((VALUE) <= 432U)) +#define IS_RCC_PLLI2SN_VALUE(VALUE) ((50U <= (VALUE)) && ((VALUE) <= 432U)) + +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx)|| defined(STM32F439xx) +#define IS_RCC_PERIPHCLOCK(SELECTION) ((1U <= (SELECTION)) && ((SELECTION) <= 0x0000007FU)) +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx */ + +#if defined(STM32F405xx) || defined(STM32F415xx) || defined(STM32F407xx)|| defined(STM32F417xx) +#define IS_RCC_PERIPHCLOCK(SELECTION) ((1U <= (SELECTION)) && ((SELECTION) <= 0x00000007U)) +#endif /* STM32F405xx || STM32F415xx || STM32F407xx || STM32F417xx */ + +#if defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F411xE) +#define IS_RCC_PERIPHCLOCK(SELECTION) ((1U <= (SELECTION)) && ((SELECTION) <= 0x0000000FU)) +#endif /* STM32F401xC || STM32F401xE || STM32F411xE */ + +#if defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) +#define IS_RCC_PERIPHCLOCK(SELECTION) ((1U <= (SELECTION)) && ((SELECTION) <= 0x0000001FU)) +#endif /* STM32F410Tx || STM32F410Cx || STM32F410Rx */ + +#if defined(STM32F446xx) +#define IS_RCC_PERIPHCLOCK(SELECTION) ((1U <= (SELECTION)) && ((SELECTION) <= 0x00000FFFU)) +#endif /* STM32F446xx */ + +#if defined(STM32F469xx) || defined(STM32F479xx) +#define IS_RCC_PERIPHCLOCK(SELECTION) ((1U <= (SELECTION)) && ((SELECTION) <= 0x000001FFU)) +#endif /* STM32F469xx || STM32F479xx */ + +#if defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) +#define IS_RCC_PERIPHCLOCK(SELECTION) ((1U <= (SELECTION)) && ((SELECTION) <= 0x000003FFU)) +#endif /* STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx */ + +#if defined(STM32F413xx) || defined(STM32F423xx) +#define IS_RCC_PERIPHCLOCK(SELECTION) ((1U <= (SELECTION)) && ((SELECTION) <= 0x00007FFFU)) +#endif /* STM32F413xx || STM32F423xx */ + +#define IS_RCC_PLLI2SR_VALUE(VALUE) ((2U <= (VALUE)) && ((VALUE) <= 7U)) + +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx)|| defined(STM32F439xx) ||\ + defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) +#define IS_RCC_PLLI2SQ_VALUE(VALUE) ((2U <= (VALUE)) && ((VALUE) <= 15U)) + +#define IS_RCC_PLLSAIN_VALUE(VALUE) ((50U <= (VALUE)) && ((VALUE) <= 432U)) + +#define IS_RCC_PLLSAIQ_VALUE(VALUE) ((2U <= (VALUE)) && ((VALUE) <= 15U)) + +#define IS_RCC_PLLSAIR_VALUE(VALUE) ((2U <= (VALUE)) && ((VALUE) <= 7U)) + +#define IS_RCC_PLLSAI_DIVQ_VALUE(VALUE) ((1U <= (VALUE)) && ((VALUE) <= 32U)) + +#define IS_RCC_PLLI2S_DIVQ_VALUE(VALUE) ((1U <= (VALUE)) && ((VALUE) <= 32U)) + +#define IS_RCC_PLLSAI_DIVR_VALUE(VALUE) (((VALUE) == RCC_PLLSAIDIVR_2) ||\ + ((VALUE) == RCC_PLLSAIDIVR_4) ||\ + ((VALUE) == RCC_PLLSAIDIVR_8) ||\ + ((VALUE) == RCC_PLLSAIDIVR_16)) +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F446xx || STM32F469xx || STM32F479xx */ + +#if defined(STM32F411xE) || defined(STM32F446xx) || defined(STM32F412Zx) || defined(STM32F412Vx) || \ + defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) +#define IS_RCC_PLLI2SM_VALUE(VALUE) ((2U <= (VALUE)) && ((VALUE) <= 63U)) + +#define IS_RCC_LSE_MODE(MODE) (((MODE) == RCC_LSE_LOWPOWER_MODE) ||\ + ((MODE) == RCC_LSE_HIGHDRIVE_MODE)) +#endif /* STM32F411xE || STM32F446xx || STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx || STM32F413xx || STM32F423xx */ + +#if defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) +#define IS_RCC_PLLR_VALUE(VALUE) ((2U <= (VALUE)) && ((VALUE) <= 7U)) + +#define IS_RCC_LSE_MODE(MODE) (((MODE) == RCC_LSE_LOWPOWER_MODE) ||\ + ((MODE) == RCC_LSE_HIGHDRIVE_MODE)) + +#define IS_RCC_FMPI2C1CLKSOURCE(SOURCE) (((SOURCE) == RCC_FMPI2C1CLKSOURCE_PCLK1) ||\ + ((SOURCE) == RCC_FMPI2C1CLKSOURCE_SYSCLK) ||\ + ((SOURCE) == RCC_FMPI2C1CLKSOURCE_HSI)) + +#define IS_RCC_LPTIM1CLKSOURCE(SOURCE) (((SOURCE) == RCC_LPTIM1CLKSOURCE_PCLK1) ||\ + ((SOURCE) == RCC_LPTIM1CLKSOURCE_HSI) ||\ + ((SOURCE) == RCC_LPTIM1CLKSOURCE_LSI) ||\ + ((SOURCE) == RCC_LPTIM1CLKSOURCE_LSE)) + +#define IS_RCC_I2SAPBCLKSOURCE(SOURCE) (((SOURCE) == RCC_I2SAPBCLKSOURCE_PLLR) ||\ + ((SOURCE) == RCC_I2SAPBCLKSOURCE_EXT) ||\ + ((SOURCE) == RCC_I2SAPBCLKSOURCE_PLLSRC)) +#endif /* STM32F410Tx || STM32F410Cx || STM32F410Rx */ + +#if defined(STM32F446xx) +#define IS_RCC_PLLR_VALUE(VALUE) ((2U <= (VALUE)) && ((VALUE) <= 7U)) + +#define IS_RCC_PLLI2SP_VALUE(VALUE) (((VALUE) == RCC_PLLI2SP_DIV2) ||\ + ((VALUE) == RCC_PLLI2SP_DIV4) ||\ + ((VALUE) == RCC_PLLI2SP_DIV6) ||\ + ((VALUE) == RCC_PLLI2SP_DIV8)) + +#define IS_RCC_PLLSAIM_VALUE(VALUE) ((VALUE) <= 63U) + +#define IS_RCC_PLLSAIP_VALUE(VALUE) (((VALUE) == RCC_PLLSAIP_DIV2) ||\ + ((VALUE) == RCC_PLLSAIP_DIV4) ||\ + ((VALUE) == RCC_PLLSAIP_DIV6) ||\ + ((VALUE) == RCC_PLLSAIP_DIV8)) + +#define IS_RCC_SAI1CLKSOURCE(SOURCE) (((SOURCE) == RCC_SAI1CLKSOURCE_PLLSAI) ||\ + ((SOURCE) == RCC_SAI1CLKSOURCE_PLLI2S) ||\ + ((SOURCE) == RCC_SAI1CLKSOURCE_PLLR) ||\ + ((SOURCE) == RCC_SAI1CLKSOURCE_EXT)) + +#define IS_RCC_SAI2CLKSOURCE(SOURCE) (((SOURCE) == RCC_SAI2CLKSOURCE_PLLSAI) ||\ + ((SOURCE) == RCC_SAI2CLKSOURCE_PLLI2S) ||\ + ((SOURCE) == RCC_SAI2CLKSOURCE_PLLR) ||\ + ((SOURCE) == RCC_SAI2CLKSOURCE_PLLSRC)) + +#define IS_RCC_I2SAPB1CLKSOURCE(SOURCE) (((SOURCE) == RCC_I2SAPB1CLKSOURCE_PLLI2S) ||\ + ((SOURCE) == RCC_I2SAPB1CLKSOURCE_EXT) ||\ + ((SOURCE) == RCC_I2SAPB1CLKSOURCE_PLLR) ||\ + ((SOURCE) == RCC_I2SAPB1CLKSOURCE_PLLSRC)) + +#define IS_RCC_I2SAPB2CLKSOURCE(SOURCE) (((SOURCE) == RCC_I2SAPB2CLKSOURCE_PLLI2S) ||\ + ((SOURCE) == RCC_I2SAPB2CLKSOURCE_EXT) ||\ + ((SOURCE) == RCC_I2SAPB2CLKSOURCE_PLLR) ||\ + ((SOURCE) == RCC_I2SAPB2CLKSOURCE_PLLSRC)) + +#define IS_RCC_FMPI2C1CLKSOURCE(SOURCE) (((SOURCE) == RCC_FMPI2C1CLKSOURCE_PCLK1) ||\ + ((SOURCE) == RCC_FMPI2C1CLKSOURCE_SYSCLK) ||\ + ((SOURCE) == RCC_FMPI2C1CLKSOURCE_HSI)) + +#define IS_RCC_CECCLKSOURCE(SOURCE) (((SOURCE) == RCC_CECCLKSOURCE_HSI) ||\ + ((SOURCE) == RCC_CECCLKSOURCE_LSE)) + +#define IS_RCC_CLK48CLKSOURCE(SOURCE) (((SOURCE) == RCC_CLK48CLKSOURCE_PLLQ) ||\ + ((SOURCE) == RCC_CLK48CLKSOURCE_PLLSAIP)) + +#define IS_RCC_SDIOCLKSOURCE(SOURCE) (((SOURCE) == RCC_SDIOCLKSOURCE_CLK48) ||\ + ((SOURCE) == RCC_SDIOCLKSOURCE_SYSCLK)) + +#define IS_RCC_SPDIFRXCLKSOURCE(SOURCE) (((SOURCE) == RCC_SPDIFRXCLKSOURCE_PLLR) ||\ + ((SOURCE) == RCC_SPDIFRXCLKSOURCE_PLLI2SP)) +#endif /* STM32F446xx */ + +#if defined(STM32F469xx) || defined(STM32F479xx) +#define IS_RCC_PLLR_VALUE(VALUE) ((2U <= (VALUE)) && ((VALUE) <= 7U)) + +#define IS_RCC_PLLSAIP_VALUE(VALUE) (((VALUE) == RCC_PLLSAIP_DIV2) ||\ + ((VALUE) == RCC_PLLSAIP_DIV4) ||\ + ((VALUE) == RCC_PLLSAIP_DIV6) ||\ + ((VALUE) == RCC_PLLSAIP_DIV8)) + +#define IS_RCC_CLK48CLKSOURCE(SOURCE) (((SOURCE) == RCC_CLK48CLKSOURCE_PLLQ) ||\ + ((SOURCE) == RCC_CLK48CLKSOURCE_PLLSAIP)) + +#define IS_RCC_SDIOCLKSOURCE(SOURCE) (((SOURCE) == RCC_SDIOCLKSOURCE_CLK48) ||\ + ((SOURCE) == RCC_SDIOCLKSOURCE_SYSCLK)) + +#define IS_RCC_DSIBYTELANECLKSOURCE(SOURCE) (((SOURCE) == RCC_DSICLKSOURCE_PLLR) ||\ + ((SOURCE) == RCC_DSICLKSOURCE_DSIPHY)) + +#define IS_RCC_LSE_MODE(MODE) (((MODE) == RCC_LSE_LOWPOWER_MODE) ||\ + ((MODE) == RCC_LSE_HIGHDRIVE_MODE)) +#endif /* STM32F469xx || STM32F479xx */ + +#if defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) ||\ + defined(STM32F413xx) || defined(STM32F423xx) +#define IS_RCC_PLLI2SQ_VALUE(VALUE) ((2U <= (VALUE)) && ((VALUE) <= 15U)) + +#define IS_RCC_PLLR_VALUE(VALUE) ((2U <= (VALUE)) && ((VALUE) <= 7U)) + +#define IS_RCC_PLLI2SCLKSOURCE(__SOURCE__) (((__SOURCE__) == RCC_PLLI2SCLKSOURCE_PLLSRC) || \ + ((__SOURCE__) == RCC_PLLI2SCLKSOURCE_EXT)) + +#define IS_RCC_I2SAPB1CLKSOURCE(SOURCE) (((SOURCE) == RCC_I2SAPB1CLKSOURCE_PLLI2S) ||\ + ((SOURCE) == RCC_I2SAPB1CLKSOURCE_EXT) ||\ + ((SOURCE) == RCC_I2SAPB1CLKSOURCE_PLLR) ||\ + ((SOURCE) == RCC_I2SAPB1CLKSOURCE_PLLSRC)) + +#define IS_RCC_I2SAPB2CLKSOURCE(SOURCE) (((SOURCE) == RCC_I2SAPB2CLKSOURCE_PLLI2S) ||\ + ((SOURCE) == RCC_I2SAPB2CLKSOURCE_EXT) ||\ + ((SOURCE) == RCC_I2SAPB2CLKSOURCE_PLLR) ||\ + ((SOURCE) == RCC_I2SAPB2CLKSOURCE_PLLSRC)) + +#define IS_RCC_FMPI2C1CLKSOURCE(SOURCE) (((SOURCE) == RCC_FMPI2C1CLKSOURCE_PCLK1) ||\ + ((SOURCE) == RCC_FMPI2C1CLKSOURCE_SYSCLK) ||\ + ((SOURCE) == RCC_FMPI2C1CLKSOURCE_HSI)) + +#define IS_RCC_CLK48CLKSOURCE(SOURCE) (((SOURCE) == RCC_CLK48CLKSOURCE_PLLQ) ||\ + ((SOURCE) == RCC_CLK48CLKSOURCE_PLLI2SQ)) + +#define IS_RCC_SDIOCLKSOURCE(SOURCE) (((SOURCE) == RCC_SDIOCLKSOURCE_CLK48) ||\ + ((SOURCE) == RCC_SDIOCLKSOURCE_SYSCLK)) + +#define IS_RCC_DFSDM1CLKSOURCE(__SOURCE__) (((__SOURCE__) == RCC_DFSDM1CLKSOURCE_PCLK2) || \ + ((__SOURCE__) == RCC_DFSDM1CLKSOURCE_SYSCLK)) + +#define IS_RCC_DFSDM1AUDIOCLKSOURCE(__SOURCE__) (((__SOURCE__) == RCC_DFSDM1AUDIOCLKSOURCE_I2S1) || \ + ((__SOURCE__) == RCC_DFSDM1AUDIOCLKSOURCE_I2S2)) + +#if defined(STM32F413xx) || defined(STM32F423xx) +#define IS_RCC_DFSDM2CLKSOURCE(__SOURCE__) (((__SOURCE__) == RCC_DFSDM2CLKSOURCE_PCLK2) || \ + ((__SOURCE__) == RCC_DFSDM2CLKSOURCE_SYSCLK)) + +#define IS_RCC_DFSDM2AUDIOCLKSOURCE(__SOURCE__) (((__SOURCE__) == RCC_DFSDM2AUDIOCLKSOURCE_I2S1) || \ + ((__SOURCE__) == RCC_DFSDM2AUDIOCLKSOURCE_I2S2)) + +#define IS_RCC_LPTIM1CLKSOURCE(SOURCE) (((SOURCE) == RCC_LPTIM1CLKSOURCE_PCLK1) ||\ + ((SOURCE) == RCC_LPTIM1CLKSOURCE_HSI) ||\ + ((SOURCE) == RCC_LPTIM1CLKSOURCE_LSI) ||\ + ((SOURCE) == RCC_LPTIM1CLKSOURCE_LSE)) + +#define IS_RCC_SAIACLKSOURCE(SOURCE) (((SOURCE) == RCC_SAIACLKSOURCE_PLLI2SR) ||\ + ((SOURCE) == RCC_SAIACLKSOURCE_EXT) ||\ + ((SOURCE) == RCC_SAIACLKSOURCE_PLLR) ||\ + ((SOURCE) == RCC_SAIACLKSOURCE_PLLSRC)) + +#define IS_RCC_SAIBCLKSOURCE(SOURCE) (((SOURCE) == RCC_SAIBCLKSOURCE_PLLI2SR) ||\ + ((SOURCE) == RCC_SAIBCLKSOURCE_EXT) ||\ + ((SOURCE) == RCC_SAIBCLKSOURCE_PLLR) ||\ + ((SOURCE) == RCC_SAIBCLKSOURCE_PLLSRC)) + +#define IS_RCC_PLL_DIVR_VALUE(VALUE) ((1U <= (VALUE)) && ((VALUE) <= 32U)) + +#define IS_RCC_PLLI2S_DIVR_VALUE(VALUE) ((1U <= (VALUE)) && ((VALUE) <= 32U)) + +#endif /* STM32F413xx || STM32F423xx */ +#endif /* STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx || STM32F413xx || STM32F423xx */ + +#if defined(STM32F405xx) || defined(STM32F415xx) || defined(STM32F407xx) || defined(STM32F417xx) || \ + defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) || \ + defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F411xE) || defined(STM32F446xx) || \ + defined(STM32F469xx) || defined(STM32F479xx) || defined(STM32F412Zx) || defined(STM32F412Vx) || \ + defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) + +#define IS_RCC_MCO2SOURCE(SOURCE) (((SOURCE) == RCC_MCO2SOURCE_SYSCLK) || ((SOURCE) == RCC_MCO2SOURCE_PLLI2SCLK)|| \ + ((SOURCE) == RCC_MCO2SOURCE_HSE) || ((SOURCE) == RCC_MCO2SOURCE_PLLCLK)) + +#endif /* STM32F405xx || STM32F415xx || STM32F407xx || STM32F417xx || STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || + STM32F401xC || STM32F401xE || STM32F411xE || STM32F446xx || STM32F469xx || STM32F479xx || STM32F412Zx || STM32F412Vx || \ + STM32F412Rx */ + +#if defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) +#define IS_RCC_MCO2SOURCE(SOURCE) (((SOURCE) == RCC_MCO2SOURCE_SYSCLK) || ((SOURCE) == RCC_MCO2SOURCE_I2SCLK)|| \ + ((SOURCE) == RCC_MCO2SOURCE_HSE) || ((SOURCE) == RCC_MCO2SOURCE_PLLCLK)) +#endif /* STM32F410Tx || STM32F410Cx || STM32F410Rx */ +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F4xx_HAL_RCC_EX_H */ + diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_spi.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_spi.h new file mode 100644 index 0000000..4e2de04 --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_spi.h @@ -0,0 +1,729 @@ +/** + ****************************************************************************** + * @file stm32f4xx_hal_spi.h + * @author MCD Application Team + * @brief Header file of SPI HAL module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32F4xx_HAL_SPI_H +#define STM32F4xx_HAL_SPI_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx_hal_def.h" + +/** @addtogroup STM32F4xx_HAL_Driver + * @{ + */ + +/** @addtogroup SPI + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup SPI_Exported_Types SPI Exported Types + * @{ + */ + +/** + * @brief SPI Configuration Structure definition + */ +typedef struct +{ + uint32_t Mode; /*!< Specifies the SPI operating mode. + This parameter can be a value of @ref SPI_Mode */ + + uint32_t Direction; /*!< Specifies the SPI bidirectional mode state. + This parameter can be a value of @ref SPI_Direction */ + + uint32_t DataSize; /*!< Specifies the SPI data size. + This parameter can be a value of @ref SPI_Data_Size */ + + uint32_t CLKPolarity; /*!< Specifies the serial clock steady state. + This parameter can be a value of @ref SPI_Clock_Polarity */ + + uint32_t CLKPhase; /*!< Specifies the clock active edge for the bit capture. + This parameter can be a value of @ref SPI_Clock_Phase */ + + uint32_t NSS; /*!< Specifies whether the NSS signal is managed by + hardware (NSS pin) or by software using the SSI bit. + This parameter can be a value of @ref SPI_Slave_Select_management */ + + uint32_t BaudRatePrescaler; /*!< Specifies the Baud Rate prescaler value which will be + used to configure the transmit and receive SCK clock. + This parameter can be a value of @ref SPI_BaudRate_Prescaler + @note The communication clock is derived from the master + clock. The slave clock does not need to be set. */ + + uint32_t FirstBit; /*!< Specifies whether data transfers start from MSB or LSB bit. + This parameter can be a value of @ref SPI_MSB_LSB_transmission */ + + uint32_t TIMode; /*!< Specifies if the TI mode is enabled or not. + This parameter can be a value of @ref SPI_TI_mode */ + + uint32_t CRCCalculation; /*!< Specifies if the CRC calculation is enabled or not. + This parameter can be a value of @ref SPI_CRC_Calculation */ + + uint32_t CRCPolynomial; /*!< Specifies the polynomial used for the CRC calculation. + This parameter must be an odd number between Min_Data = 1 and Max_Data = 65535 */ +} SPI_InitTypeDef; + +/** + * @brief HAL SPI State structure definition + */ +typedef enum +{ + HAL_SPI_STATE_RESET = 0x00U, /*!< Peripheral not Initialized */ + HAL_SPI_STATE_READY = 0x01U, /*!< Peripheral Initialized and ready for use */ + HAL_SPI_STATE_BUSY = 0x02U, /*!< an internal process is ongoing */ + HAL_SPI_STATE_BUSY_TX = 0x03U, /*!< Data Transmission process is ongoing */ + HAL_SPI_STATE_BUSY_RX = 0x04U, /*!< Data Reception process is ongoing */ + HAL_SPI_STATE_BUSY_TX_RX = 0x05U, /*!< Data Transmission and Reception process is ongoing */ + HAL_SPI_STATE_ERROR = 0x06U, /*!< SPI error state */ + HAL_SPI_STATE_ABORT = 0x07U /*!< SPI abort is ongoing */ +} HAL_SPI_StateTypeDef; + +/** + * @brief SPI handle Structure definition + */ +typedef struct __SPI_HandleTypeDef +{ + SPI_TypeDef *Instance; /*!< SPI registers base address */ + + SPI_InitTypeDef Init; /*!< SPI communication parameters */ + + const uint8_t *pTxBuffPtr; /*!< Pointer to SPI Tx transfer Buffer */ + + uint16_t TxXferSize; /*!< SPI Tx Transfer size */ + + __IO uint16_t TxXferCount; /*!< SPI Tx Transfer Counter */ + + uint8_t *pRxBuffPtr; /*!< Pointer to SPI Rx transfer Buffer */ + + uint16_t RxXferSize; /*!< SPI Rx Transfer size */ + + __IO uint16_t RxXferCount; /*!< SPI Rx Transfer Counter */ + + void (*RxISR)(struct __SPI_HandleTypeDef *hspi); /*!< function pointer on Rx ISR */ + + void (*TxISR)(struct __SPI_HandleTypeDef *hspi); /*!< function pointer on Tx ISR */ + + DMA_HandleTypeDef *hdmatx; /*!< SPI Tx DMA Handle parameters */ + + DMA_HandleTypeDef *hdmarx; /*!< SPI Rx DMA Handle parameters */ + + HAL_LockTypeDef Lock; /*!< Locking object */ + + __IO HAL_SPI_StateTypeDef State; /*!< SPI communication state */ + + __IO uint32_t ErrorCode; /*!< SPI Error code */ + +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + void (* TxCpltCallback)(struct __SPI_HandleTypeDef *hspi); /*!< SPI Tx Completed callback */ + void (* RxCpltCallback)(struct __SPI_HandleTypeDef *hspi); /*!< SPI Rx Completed callback */ + void (* TxRxCpltCallback)(struct __SPI_HandleTypeDef *hspi); /*!< SPI TxRx Completed callback */ + void (* TxHalfCpltCallback)(struct __SPI_HandleTypeDef *hspi); /*!< SPI Tx Half Completed callback */ + void (* RxHalfCpltCallback)(struct __SPI_HandleTypeDef *hspi); /*!< SPI Rx Half Completed callback */ + void (* TxRxHalfCpltCallback)(struct __SPI_HandleTypeDef *hspi); /*!< SPI TxRx Half Completed callback */ + void (* ErrorCallback)(struct __SPI_HandleTypeDef *hspi); /*!< SPI Error callback */ + void (* AbortCpltCallback)(struct __SPI_HandleTypeDef *hspi); /*!< SPI Abort callback */ + void (* MspInitCallback)(struct __SPI_HandleTypeDef *hspi); /*!< SPI Msp Init callback */ + void (* MspDeInitCallback)(struct __SPI_HandleTypeDef *hspi); /*!< SPI Msp DeInit callback */ + +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ +} SPI_HandleTypeDef; + +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) +/** + * @brief HAL SPI Callback ID enumeration definition + */ +typedef enum +{ + HAL_SPI_TX_COMPLETE_CB_ID = 0x00U, /*!< SPI Tx Completed callback ID */ + HAL_SPI_RX_COMPLETE_CB_ID = 0x01U, /*!< SPI Rx Completed callback ID */ + HAL_SPI_TX_RX_COMPLETE_CB_ID = 0x02U, /*!< SPI TxRx Completed callback ID */ + HAL_SPI_TX_HALF_COMPLETE_CB_ID = 0x03U, /*!< SPI Tx Half Completed callback ID */ + HAL_SPI_RX_HALF_COMPLETE_CB_ID = 0x04U, /*!< SPI Rx Half Completed callback ID */ + HAL_SPI_TX_RX_HALF_COMPLETE_CB_ID = 0x05U, /*!< SPI TxRx Half Completed callback ID */ + HAL_SPI_ERROR_CB_ID = 0x06U, /*!< SPI Error callback ID */ + HAL_SPI_ABORT_CB_ID = 0x07U, /*!< SPI Abort callback ID */ + HAL_SPI_MSPINIT_CB_ID = 0x08U, /*!< SPI Msp Init callback ID */ + HAL_SPI_MSPDEINIT_CB_ID = 0x09U /*!< SPI Msp DeInit callback ID */ + +} HAL_SPI_CallbackIDTypeDef; + +/** + * @brief HAL SPI Callback pointer definition + */ +typedef void (*pSPI_CallbackTypeDef)(SPI_HandleTypeDef *hspi); /*!< pointer to an SPI callback function */ + +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup SPI_Exported_Constants SPI Exported Constants + * @{ + */ + +/** @defgroup SPI_Error_Code SPI Error Code + * @{ + */ +#define HAL_SPI_ERROR_NONE (0x00000000U) /*!< No error */ +#define HAL_SPI_ERROR_MODF (0x00000001U) /*!< MODF error */ +#define HAL_SPI_ERROR_CRC (0x00000002U) /*!< CRC error */ +#define HAL_SPI_ERROR_OVR (0x00000004U) /*!< OVR error */ +#define HAL_SPI_ERROR_FRE (0x00000008U) /*!< FRE error */ +#define HAL_SPI_ERROR_DMA (0x00000010U) /*!< DMA transfer error */ +#define HAL_SPI_ERROR_FLAG (0x00000020U) /*!< Error on RXNE/TXE/BSY Flag */ +#define HAL_SPI_ERROR_ABORT (0x00000040U) /*!< Error during SPI Abort procedure */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) +#define HAL_SPI_ERROR_INVALID_CALLBACK (0x00000080U) /*!< Invalid Callback error */ +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ +/** + * @} + */ + +/** @defgroup SPI_Mode SPI Mode + * @{ + */ +#define SPI_MODE_SLAVE (0x00000000U) +#define SPI_MODE_MASTER (SPI_CR1_MSTR | SPI_CR1_SSI) +/** + * @} + */ + +/** @defgroup SPI_Direction SPI Direction Mode + * @{ + */ +#define SPI_DIRECTION_2LINES (0x00000000U) +#define SPI_DIRECTION_2LINES_RXONLY SPI_CR1_RXONLY +#define SPI_DIRECTION_1LINE SPI_CR1_BIDIMODE +/** + * @} + */ + +/** @defgroup SPI_Data_Size SPI Data Size + * @{ + */ +#define SPI_DATASIZE_8BIT (0x00000000U) +#define SPI_DATASIZE_16BIT SPI_CR1_DFF +/** + * @} + */ + +/** @defgroup SPI_Clock_Polarity SPI Clock Polarity + * @{ + */ +#define SPI_POLARITY_LOW (0x00000000U) +#define SPI_POLARITY_HIGH SPI_CR1_CPOL +/** + * @} + */ + +/** @defgroup SPI_Clock_Phase SPI Clock Phase + * @{ + */ +#define SPI_PHASE_1EDGE (0x00000000U) +#define SPI_PHASE_2EDGE SPI_CR1_CPHA +/** + * @} + */ + +/** @defgroup SPI_Slave_Select_management SPI Slave Select Management + * @{ + */ +#define SPI_NSS_SOFT SPI_CR1_SSM +#define SPI_NSS_HARD_INPUT (0x00000000U) +#define SPI_NSS_HARD_OUTPUT (SPI_CR2_SSOE << 16U) +/** + * @} + */ + +/** @defgroup SPI_BaudRate_Prescaler SPI BaudRate Prescaler + * @{ + */ +#define SPI_BAUDRATEPRESCALER_2 (0x00000000U) +#define SPI_BAUDRATEPRESCALER_4 (SPI_CR1_BR_0) +#define SPI_BAUDRATEPRESCALER_8 (SPI_CR1_BR_1) +#define SPI_BAUDRATEPRESCALER_16 (SPI_CR1_BR_1 | SPI_CR1_BR_0) +#define SPI_BAUDRATEPRESCALER_32 (SPI_CR1_BR_2) +#define SPI_BAUDRATEPRESCALER_64 (SPI_CR1_BR_2 | SPI_CR1_BR_0) +#define SPI_BAUDRATEPRESCALER_128 (SPI_CR1_BR_2 | SPI_CR1_BR_1) +#define SPI_BAUDRATEPRESCALER_256 (SPI_CR1_BR_2 | SPI_CR1_BR_1 | SPI_CR1_BR_0) +/** + * @} + */ + +/** @defgroup SPI_MSB_LSB_transmission SPI MSB LSB Transmission + * @{ + */ +#define SPI_FIRSTBIT_MSB (0x00000000U) +#define SPI_FIRSTBIT_LSB SPI_CR1_LSBFIRST +/** + * @} + */ + +/** @defgroup SPI_TI_mode SPI TI Mode + * @{ + */ +#define SPI_TIMODE_DISABLE (0x00000000U) +#define SPI_TIMODE_ENABLE SPI_CR2_FRF +/** + * @} + */ + +/** @defgroup SPI_CRC_Calculation SPI CRC Calculation + * @{ + */ +#define SPI_CRCCALCULATION_DISABLE (0x00000000U) +#define SPI_CRCCALCULATION_ENABLE SPI_CR1_CRCEN +/** + * @} + */ + +/** @defgroup SPI_Interrupt_definition SPI Interrupt Definition + * @{ + */ +#define SPI_IT_TXE SPI_CR2_TXEIE +#define SPI_IT_RXNE SPI_CR2_RXNEIE +#define SPI_IT_ERR SPI_CR2_ERRIE +/** + * @} + */ + +/** @defgroup SPI_Flags_definition SPI Flags Definition + * @{ + */ +#define SPI_FLAG_RXNE SPI_SR_RXNE /* SPI status flag: Rx buffer not empty flag */ +#define SPI_FLAG_TXE SPI_SR_TXE /* SPI status flag: Tx buffer empty flag */ +#define SPI_FLAG_BSY SPI_SR_BSY /* SPI status flag: Busy flag */ +#define SPI_FLAG_CRCERR SPI_SR_CRCERR /* SPI Error flag: CRC error flag */ +#define SPI_FLAG_MODF SPI_SR_MODF /* SPI Error flag: Mode fault flag */ +#define SPI_FLAG_OVR SPI_SR_OVR /* SPI Error flag: Overrun flag */ +#define SPI_FLAG_FRE SPI_SR_FRE /* SPI Error flag: TI mode frame format error flag */ +#define SPI_FLAG_MASK (SPI_SR_RXNE | SPI_SR_TXE | SPI_SR_BSY | SPI_SR_CRCERR\ + | SPI_SR_MODF | SPI_SR_OVR | SPI_SR_FRE) +/** + * @} + */ + +/** + * @} + */ + +/* Exported macros -----------------------------------------------------------*/ +/** @defgroup SPI_Exported_Macros SPI Exported Macros + * @{ + */ + +/** @brief Reset SPI handle state. + * @param __HANDLE__ specifies the SPI Handle. + * This parameter can be SPI where x: 1, 2, or 3 to select the SPI peripheral. + * @retval None + */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) +#define __HAL_SPI_RESET_HANDLE_STATE(__HANDLE__) do{ \ + (__HANDLE__)->State = HAL_SPI_STATE_RESET; \ + (__HANDLE__)->MspInitCallback = NULL; \ + (__HANDLE__)->MspDeInitCallback = NULL; \ + } while(0) +#else +#define __HAL_SPI_RESET_HANDLE_STATE(__HANDLE__) ((__HANDLE__)->State = HAL_SPI_STATE_RESET) +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ + +/** @brief Enable the specified SPI interrupts. + * @param __HANDLE__ specifies the SPI Handle. + * This parameter can be SPI where x: 1, 2, or 3 to select the SPI peripheral. + * @param __INTERRUPT__ specifies the interrupt source to enable. + * This parameter can be one of the following values: + * @arg SPI_IT_TXE: Tx buffer empty interrupt enable + * @arg SPI_IT_RXNE: RX buffer not empty interrupt enable + * @arg SPI_IT_ERR: Error interrupt enable + * @retval None + */ +#define __HAL_SPI_ENABLE_IT(__HANDLE__, __INTERRUPT__) SET_BIT((__HANDLE__)->Instance->CR2, (__INTERRUPT__)) + +/** @brief Disable the specified SPI interrupts. + * @param __HANDLE__ specifies the SPI handle. + * This parameter can be SPIx where x: 1, 2, or 3 to select the SPI peripheral. + * @param __INTERRUPT__ specifies the interrupt source to disable. + * This parameter can be one of the following values: + * @arg SPI_IT_TXE: Tx buffer empty interrupt enable + * @arg SPI_IT_RXNE: RX buffer not empty interrupt enable + * @arg SPI_IT_ERR: Error interrupt enable + * @retval None + */ +#define __HAL_SPI_DISABLE_IT(__HANDLE__, __INTERRUPT__) CLEAR_BIT((__HANDLE__)->Instance->CR2, (__INTERRUPT__)) + +/** @brief Check whether the specified SPI interrupt source is enabled or not. + * @param __HANDLE__ specifies the SPI Handle. + * This parameter can be SPI where x: 1, 2, or 3 to select the SPI peripheral. + * @param __INTERRUPT__ specifies the SPI interrupt source to check. + * This parameter can be one of the following values: + * @arg SPI_IT_TXE: Tx buffer empty interrupt enable + * @arg SPI_IT_RXNE: RX buffer not empty interrupt enable + * @arg SPI_IT_ERR: Error interrupt enable + * @retval The new state of __IT__ (TRUE or FALSE). + */ +#define __HAL_SPI_GET_IT_SOURCE(__HANDLE__, __INTERRUPT__) ((((__HANDLE__)->Instance->CR2\ + & (__INTERRUPT__)) == (__INTERRUPT__)) ? SET : RESET) + +/** @brief Check whether the specified SPI flag is set or not. + * @param __HANDLE__ specifies the SPI Handle. + * This parameter can be SPI where x: 1, 2, or 3 to select the SPI peripheral. + * @param __FLAG__ specifies the flag to check. + * This parameter can be one of the following values: + * @arg SPI_FLAG_RXNE: Receive buffer not empty flag + * @arg SPI_FLAG_TXE: Transmit buffer empty flag + * @arg SPI_FLAG_CRCERR: CRC error flag + * @arg SPI_FLAG_MODF: Mode fault flag + * @arg SPI_FLAG_OVR: Overrun flag + * @arg SPI_FLAG_BSY: Busy flag + * @arg SPI_FLAG_FRE: Frame format error flag + * @retval The new state of __FLAG__ (TRUE or FALSE). + */ +#define __HAL_SPI_GET_FLAG(__HANDLE__, __FLAG__) ((((__HANDLE__)->Instance->SR) & (__FLAG__)) == (__FLAG__)) + +/** @brief Clear the SPI CRCERR pending flag. + * @param __HANDLE__ specifies the SPI Handle. + * This parameter can be SPI where x: 1, 2, or 3 to select the SPI peripheral. + * @retval None + */ +#define __HAL_SPI_CLEAR_CRCERRFLAG(__HANDLE__) ((__HANDLE__)->Instance->SR = (uint16_t)(~SPI_FLAG_CRCERR)) + +/** @brief Clear the SPI MODF pending flag. + * @param __HANDLE__ specifies the SPI Handle. + * This parameter can be SPI where x: 1, 2, or 3 to select the SPI peripheral. + * @retval None + */ +#define __HAL_SPI_CLEAR_MODFFLAG(__HANDLE__) \ + do{ \ + __IO uint32_t tmpreg_modf = 0x00U; \ + tmpreg_modf = (__HANDLE__)->Instance->SR; \ + CLEAR_BIT((__HANDLE__)->Instance->CR1, SPI_CR1_SPE); \ + UNUSED(tmpreg_modf); \ + } while(0U) + +/** @brief Clear the SPI OVR pending flag. + * @param __HANDLE__ specifies the SPI Handle. + * This parameter can be SPI where x: 1, 2, or 3 to select the SPI peripheral. + * @retval None + */ +#define __HAL_SPI_CLEAR_OVRFLAG(__HANDLE__) \ + do{ \ + __IO uint32_t tmpreg_ovr = 0x00U; \ + tmpreg_ovr = (__HANDLE__)->Instance->DR; \ + tmpreg_ovr = (__HANDLE__)->Instance->SR; \ + UNUSED(tmpreg_ovr); \ + } while(0U) + +/** @brief Clear the SPI FRE pending flag. + * @param __HANDLE__ specifies the SPI Handle. + * This parameter can be SPI where x: 1, 2, or 3 to select the SPI peripheral. + * @retval None + */ +#define __HAL_SPI_CLEAR_FREFLAG(__HANDLE__) \ + do{ \ + __IO uint32_t tmpreg_fre = 0x00U; \ + tmpreg_fre = (__HANDLE__)->Instance->SR; \ + UNUSED(tmpreg_fre); \ + }while(0U) + +/** @brief Enable the SPI peripheral. + * @param __HANDLE__ specifies the SPI Handle. + * This parameter can be SPI where x: 1, 2, or 3 to select the SPI peripheral. + * @retval None + */ +#define __HAL_SPI_ENABLE(__HANDLE__) SET_BIT((__HANDLE__)->Instance->CR1, SPI_CR1_SPE) + +/** @brief Disable the SPI peripheral. + * @param __HANDLE__ specifies the SPI Handle. + * This parameter can be SPI where x: 1, 2, or 3 to select the SPI peripheral. + * @retval None + */ +#define __HAL_SPI_DISABLE(__HANDLE__) CLEAR_BIT((__HANDLE__)->Instance->CR1, SPI_CR1_SPE) + +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup SPI_Private_Macros SPI Private Macros + * @{ + */ + +/** @brief Set the SPI transmit-only mode. + * @param __HANDLE__ specifies the SPI Handle. + * This parameter can be SPI where x: 1, 2, or 3 to select the SPI peripheral. + * @retval None + */ +#define SPI_1LINE_TX(__HANDLE__) SET_BIT((__HANDLE__)->Instance->CR1, SPI_CR1_BIDIOE) + +/** @brief Set the SPI receive-only mode. + * @param __HANDLE__ specifies the SPI Handle. + * This parameter can be SPI where x: 1, 2, or 3 to select the SPI peripheral. + * @retval None + */ +#define SPI_1LINE_RX(__HANDLE__) CLEAR_BIT((__HANDLE__)->Instance->CR1, SPI_CR1_BIDIOE) + +/** @brief Reset the CRC calculation of the SPI. + * @param __HANDLE__ specifies the SPI Handle. + * This parameter can be SPI where x: 1, 2, or 3 to select the SPI peripheral. + * @retval None + */ +#define SPI_RESET_CRC(__HANDLE__) do{CLEAR_BIT((__HANDLE__)->Instance->CR1, SPI_CR1_CRCEN);\ + SET_BIT((__HANDLE__)->Instance->CR1, SPI_CR1_CRCEN);}while(0U) + +/** @brief Check whether the specified SPI flag is set or not. + * @param __SR__ copy of SPI SR register. + * @param __FLAG__ specifies the flag to check. + * This parameter can be one of the following values: + * @arg SPI_FLAG_RXNE: Receive buffer not empty flag + * @arg SPI_FLAG_TXE: Transmit buffer empty flag + * @arg SPI_FLAG_CRCERR: CRC error flag + * @arg SPI_FLAG_MODF: Mode fault flag + * @arg SPI_FLAG_OVR: Overrun flag + * @arg SPI_FLAG_BSY: Busy flag + * @arg SPI_FLAG_FRE: Frame format error flag + * @retval SET or RESET. + */ +#define SPI_CHECK_FLAG(__SR__, __FLAG__) ((((__SR__) & ((__FLAG__) & SPI_FLAG_MASK)) == \ + ((__FLAG__) & SPI_FLAG_MASK)) ? SET : RESET) + +/** @brief Check whether the specified SPI Interrupt is set or not. + * @param __CR2__ copy of SPI CR2 register. + * @param __INTERRUPT__ specifies the SPI interrupt source to check. + * This parameter can be one of the following values: + * @arg SPI_IT_TXE: Tx buffer empty interrupt enable + * @arg SPI_IT_RXNE: RX buffer not empty interrupt enable + * @arg SPI_IT_ERR: Error interrupt enable + * @retval SET or RESET. + */ +#define SPI_CHECK_IT_SOURCE(__CR2__, __INTERRUPT__) ((((__CR2__) & (__INTERRUPT__)) == \ + (__INTERRUPT__)) ? SET : RESET) + +/** @brief Checks if SPI Mode parameter is in allowed range. + * @param __MODE__ specifies the SPI Mode. + * This parameter can be a value of @ref SPI_Mode + * @retval None + */ +#define IS_SPI_MODE(__MODE__) (((__MODE__) == SPI_MODE_SLAVE) || \ + ((__MODE__) == SPI_MODE_MASTER)) + +/** @brief Checks if SPI Direction Mode parameter is in allowed range. + * @param __MODE__ specifies the SPI Direction Mode. + * This parameter can be a value of @ref SPI_Direction + * @retval None + */ +#define IS_SPI_DIRECTION(__MODE__) (((__MODE__) == SPI_DIRECTION_2LINES) || \ + ((__MODE__) == SPI_DIRECTION_2LINES_RXONLY) || \ + ((__MODE__) == SPI_DIRECTION_1LINE)) + +/** @brief Checks if SPI Direction Mode parameter is 2 lines. + * @param __MODE__ specifies the SPI Direction Mode. + * @retval None + */ +#define IS_SPI_DIRECTION_2LINES(__MODE__) ((__MODE__) == SPI_DIRECTION_2LINES) + +/** @brief Checks if SPI Direction Mode parameter is 1 or 2 lines. + * @param __MODE__ specifies the SPI Direction Mode. + * @retval None + */ +#define IS_SPI_DIRECTION_2LINES_OR_1LINE(__MODE__) (((__MODE__) == SPI_DIRECTION_2LINES) || \ + ((__MODE__) == SPI_DIRECTION_1LINE)) + +/** @brief Checks if SPI Data Size parameter is in allowed range. + * @param __DATASIZE__ specifies the SPI Data Size. + * This parameter can be a value of @ref SPI_Data_Size + * @retval None + */ +#define IS_SPI_DATASIZE(__DATASIZE__) (((__DATASIZE__) == SPI_DATASIZE_16BIT) || \ + ((__DATASIZE__) == SPI_DATASIZE_8BIT)) + +/** @brief Checks if SPI Serial clock steady state parameter is in allowed range. + * @param __CPOL__ specifies the SPI serial clock steady state. + * This parameter can be a value of @ref SPI_Clock_Polarity + * @retval None + */ +#define IS_SPI_CPOL(__CPOL__) (((__CPOL__) == SPI_POLARITY_LOW) || \ + ((__CPOL__) == SPI_POLARITY_HIGH)) + +/** @brief Checks if SPI Clock Phase parameter is in allowed range. + * @param __CPHA__ specifies the SPI Clock Phase. + * This parameter can be a value of @ref SPI_Clock_Phase + * @retval None + */ +#define IS_SPI_CPHA(__CPHA__) (((__CPHA__) == SPI_PHASE_1EDGE) || \ + ((__CPHA__) == SPI_PHASE_2EDGE)) + +/** @brief Checks if SPI Slave Select parameter is in allowed range. + * @param __NSS__ specifies the SPI Slave Select management parameter. + * This parameter can be a value of @ref SPI_Slave_Select_management + * @retval None + */ +#define IS_SPI_NSS(__NSS__) (((__NSS__) == SPI_NSS_SOFT) || \ + ((__NSS__) == SPI_NSS_HARD_INPUT) || \ + ((__NSS__) == SPI_NSS_HARD_OUTPUT)) + +/** @brief Checks if SPI Baudrate prescaler parameter is in allowed range. + * @param __PRESCALER__ specifies the SPI Baudrate prescaler. + * This parameter can be a value of @ref SPI_BaudRate_Prescaler + * @retval None + */ +#define IS_SPI_BAUDRATE_PRESCALER(__PRESCALER__) (((__PRESCALER__) == SPI_BAUDRATEPRESCALER_2) || \ + ((__PRESCALER__) == SPI_BAUDRATEPRESCALER_4) || \ + ((__PRESCALER__) == SPI_BAUDRATEPRESCALER_8) || \ + ((__PRESCALER__) == SPI_BAUDRATEPRESCALER_16) || \ + ((__PRESCALER__) == SPI_BAUDRATEPRESCALER_32) || \ + ((__PRESCALER__) == SPI_BAUDRATEPRESCALER_64) || \ + ((__PRESCALER__) == SPI_BAUDRATEPRESCALER_128) || \ + ((__PRESCALER__) == SPI_BAUDRATEPRESCALER_256)) + +/** @brief Checks if SPI MSB LSB transmission parameter is in allowed range. + * @param __BIT__ specifies the SPI MSB LSB transmission (whether data transfer starts from MSB or LSB bit). + * This parameter can be a value of @ref SPI_MSB_LSB_transmission + * @retval None + */ +#define IS_SPI_FIRST_BIT(__BIT__) (((__BIT__) == SPI_FIRSTBIT_MSB) || \ + ((__BIT__) == SPI_FIRSTBIT_LSB)) + +/** @brief Checks if SPI TI mode parameter is in allowed range. + * @param __MODE__ specifies the SPI TI mode. + * This parameter can be a value of @ref SPI_TI_mode + * @retval None + */ +#define IS_SPI_TIMODE(__MODE__) (((__MODE__) == SPI_TIMODE_DISABLE) || \ + ((__MODE__) == SPI_TIMODE_ENABLE)) + +/** @brief Checks if SPI CRC calculation enabled state is in allowed range. + * @param __CALCULATION__ specifies the SPI CRC calculation enable state. + * This parameter can be a value of @ref SPI_CRC_Calculation + * @retval None + */ +#define IS_SPI_CRC_CALCULATION(__CALCULATION__) (((__CALCULATION__) == SPI_CRCCALCULATION_DISABLE) || \ + ((__CALCULATION__) == SPI_CRCCALCULATION_ENABLE)) + +/** @brief Checks if SPI polynomial value to be used for the CRC calculation, is in allowed range. + * @param __POLYNOMIAL__ specifies the SPI polynomial value to be used for the CRC calculation. + * This parameter must be a number between Min_Data = 0 and Max_Data = 65535 + * @retval None + */ +#define IS_SPI_CRC_POLYNOMIAL(__POLYNOMIAL__) (((__POLYNOMIAL__) >= 0x1U) && \ + ((__POLYNOMIAL__) <= 0xFFFFU) && \ + (((__POLYNOMIAL__)&0x1U) != 0U)) + +/** @brief Checks if DMA handle is valid. + * @param __HANDLE__ specifies a DMA Handle. + * @retval None + */ +#define IS_SPI_DMA_HANDLE(__HANDLE__) ((__HANDLE__) != NULL) + +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup SPI_Exported_Functions + * @{ + */ + +/** @addtogroup SPI_Exported_Functions_Group1 + * @{ + */ +/* Initialization/de-initialization functions ********************************/ +HAL_StatusTypeDef HAL_SPI_Init(SPI_HandleTypeDef *hspi); +HAL_StatusTypeDef HAL_SPI_DeInit(SPI_HandleTypeDef *hspi); +void HAL_SPI_MspInit(SPI_HandleTypeDef *hspi); +void HAL_SPI_MspDeInit(SPI_HandleTypeDef *hspi); + +/* Callbacks Register/UnRegister functions ***********************************/ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) +HAL_StatusTypeDef HAL_SPI_RegisterCallback(SPI_HandleTypeDef *hspi, HAL_SPI_CallbackIDTypeDef CallbackID, + pSPI_CallbackTypeDef pCallback); +HAL_StatusTypeDef HAL_SPI_UnRegisterCallback(SPI_HandleTypeDef *hspi, HAL_SPI_CallbackIDTypeDef CallbackID); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ +/** + * @} + */ + +/** @addtogroup SPI_Exported_Functions_Group2 + * @{ + */ +/* I/O operation functions ***************************************************/ +HAL_StatusTypeDef HAL_SPI_Transmit(SPI_HandleTypeDef *hspi, const uint8_t *pData, uint16_t Size, uint32_t Timeout); +HAL_StatusTypeDef HAL_SPI_Receive(SPI_HandleTypeDef *hspi, uint8_t *pData, uint16_t Size, uint32_t Timeout); +HAL_StatusTypeDef HAL_SPI_TransmitReceive(SPI_HandleTypeDef *hspi, const uint8_t *pTxData, uint8_t *pRxData, + uint16_t Size, uint32_t Timeout); +HAL_StatusTypeDef HAL_SPI_Transmit_IT(SPI_HandleTypeDef *hspi, const uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef HAL_SPI_Receive_IT(SPI_HandleTypeDef *hspi, uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef HAL_SPI_TransmitReceive_IT(SPI_HandleTypeDef *hspi, const uint8_t *pTxData, uint8_t *pRxData, + uint16_t Size); +HAL_StatusTypeDef HAL_SPI_Transmit_DMA(SPI_HandleTypeDef *hspi, const uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef HAL_SPI_Receive_DMA(SPI_HandleTypeDef *hspi, uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef HAL_SPI_TransmitReceive_DMA(SPI_HandleTypeDef *hspi, const uint8_t *pTxData, uint8_t *pRxData, + uint16_t Size); +HAL_StatusTypeDef HAL_SPI_DMAPause(SPI_HandleTypeDef *hspi); +HAL_StatusTypeDef HAL_SPI_DMAResume(SPI_HandleTypeDef *hspi); +HAL_StatusTypeDef HAL_SPI_DMAStop(SPI_HandleTypeDef *hspi); +/* Transfer Abort functions */ +HAL_StatusTypeDef HAL_SPI_Abort(SPI_HandleTypeDef *hspi); +HAL_StatusTypeDef HAL_SPI_Abort_IT(SPI_HandleTypeDef *hspi); + +void HAL_SPI_IRQHandler(SPI_HandleTypeDef *hspi); +void HAL_SPI_TxCpltCallback(SPI_HandleTypeDef *hspi); +void HAL_SPI_RxCpltCallback(SPI_HandleTypeDef *hspi); +void HAL_SPI_TxRxCpltCallback(SPI_HandleTypeDef *hspi); +void HAL_SPI_TxHalfCpltCallback(SPI_HandleTypeDef *hspi); +void HAL_SPI_RxHalfCpltCallback(SPI_HandleTypeDef *hspi); +void HAL_SPI_TxRxHalfCpltCallback(SPI_HandleTypeDef *hspi); +void HAL_SPI_ErrorCallback(SPI_HandleTypeDef *hspi); +void HAL_SPI_AbortCpltCallback(SPI_HandleTypeDef *hspi); +/** + * @} + */ + +/** @addtogroup SPI_Exported_Functions_Group3 + * @{ + */ +/* Peripheral State and Error functions ***************************************/ +HAL_SPI_StateTypeDef HAL_SPI_GetState(const SPI_HandleTypeDef *hspi); +uint32_t HAL_SPI_GetError(const SPI_HandleTypeDef *hspi); +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* STM32F4xx_HAL_SPI_H */ + diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_tim.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_tim.h new file mode 100644 index 0000000..1a8357c --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_tim.h @@ -0,0 +1,2155 @@ +/** + ****************************************************************************** + * @file stm32f4xx_hal_tim.h + * @author MCD Application Team + * @brief Header file of TIM HAL module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32F4xx_HAL_TIM_H +#define STM32F4xx_HAL_TIM_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx_hal_def.h" + +/** @addtogroup STM32F4xx_HAL_Driver + * @{ + */ + +/** @addtogroup TIM + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup TIM_Exported_Types TIM Exported Types + * @{ + */ + +/** + * @brief TIM Time base Configuration Structure definition + */ +typedef struct +{ + uint32_t Prescaler; /*!< Specifies the prescaler value used to divide the TIM clock. + This parameter can be a number between Min_Data = 0x0000 and Max_Data = 0xFFFF */ + + uint32_t CounterMode; /*!< Specifies the counter mode. + This parameter can be a value of @ref TIM_Counter_Mode */ + + uint32_t Period; /*!< Specifies the period value to be loaded into the active + Auto-Reload Register at the next update event. + This parameter can be a number between Min_Data = 0x0000 and Max_Data = 0xFFFF. */ + + uint32_t ClockDivision; /*!< Specifies the clock division. + This parameter can be a value of @ref TIM_ClockDivision */ + + uint32_t RepetitionCounter; /*!< Specifies the repetition counter value. Each time the RCR downcounter + reaches zero, an update event is generated and counting restarts + from the RCR value (N). + This means in PWM mode that (N+1) corresponds to: + - the number of PWM periods in edge-aligned mode + - the number of half PWM period in center-aligned mode + GP timers: this parameter must be a number between Min_Data = 0x00 and + Max_Data = 0xFF. + Advanced timers: this parameter must be a number between Min_Data = 0x0000 and + Max_Data = 0xFFFF. */ + + uint32_t AutoReloadPreload; /*!< Specifies the auto-reload preload. + This parameter can be a value of @ref TIM_AutoReloadPreload */ +} TIM_Base_InitTypeDef; + +/** + * @brief TIM Output Compare Configuration Structure definition + */ +typedef struct +{ + uint32_t OCMode; /*!< Specifies the TIM mode. + This parameter can be a value of @ref TIM_Output_Compare_and_PWM_modes */ + + uint32_t Pulse; /*!< Specifies the pulse value to be loaded into the Capture Compare Register. + This parameter can be a number between Min_Data = 0x0000 and Max_Data = 0xFFFF */ + + uint32_t OCPolarity; /*!< Specifies the output polarity. + This parameter can be a value of @ref TIM_Output_Compare_Polarity */ + + uint32_t OCNPolarity; /*!< Specifies the complementary output polarity. + This parameter can be a value of @ref TIM_Output_Compare_N_Polarity + @note This parameter is valid only for timer instances supporting break feature. */ + + uint32_t OCFastMode; /*!< Specifies the Fast mode state. + This parameter can be a value of @ref TIM_Output_Fast_State + @note This parameter is valid only in PWM1 and PWM2 mode. */ + + + uint32_t OCIdleState; /*!< Specifies the TIM Output Compare pin state during Idle state. + This parameter can be a value of @ref TIM_Output_Compare_Idle_State + @note This parameter is valid only for timer instances supporting break feature. */ + + uint32_t OCNIdleState; /*!< Specifies the TIM Output Compare pin state during Idle state. + This parameter can be a value of @ref TIM_Output_Compare_N_Idle_State + @note This parameter is valid only for timer instances supporting break feature. */ +} TIM_OC_InitTypeDef; + +/** + * @brief TIM One Pulse Mode Configuration Structure definition + */ +typedef struct +{ + uint32_t OCMode; /*!< Specifies the TIM mode. + This parameter can be a value of @ref TIM_Output_Compare_and_PWM_modes */ + + uint32_t Pulse; /*!< Specifies the pulse value to be loaded into the Capture Compare Register. + This parameter can be a number between Min_Data = 0x0000 and Max_Data = 0xFFFF */ + + uint32_t OCPolarity; /*!< Specifies the output polarity. + This parameter can be a value of @ref TIM_Output_Compare_Polarity */ + + uint32_t OCNPolarity; /*!< Specifies the complementary output polarity. + This parameter can be a value of @ref TIM_Output_Compare_N_Polarity + @note This parameter is valid only for timer instances supporting break feature. */ + + uint32_t OCIdleState; /*!< Specifies the TIM Output Compare pin state during Idle state. + This parameter can be a value of @ref TIM_Output_Compare_Idle_State + @note This parameter is valid only for timer instances supporting break feature. */ + + uint32_t OCNIdleState; /*!< Specifies the TIM Output Compare pin state during Idle state. + This parameter can be a value of @ref TIM_Output_Compare_N_Idle_State + @note This parameter is valid only for timer instances supporting break feature. */ + + uint32_t ICPolarity; /*!< Specifies the active edge of the input signal. + This parameter can be a value of @ref TIM_Input_Capture_Polarity */ + + uint32_t ICSelection; /*!< Specifies the input. + This parameter can be a value of @ref TIM_Input_Capture_Selection */ + + uint32_t ICFilter; /*!< Specifies the input capture filter. + This parameter can be a number between Min_Data = 0x0 and Max_Data = 0xF */ +} TIM_OnePulse_InitTypeDef; + +/** + * @brief TIM Input Capture Configuration Structure definition + */ +typedef struct +{ + uint32_t ICPolarity; /*!< Specifies the active edge of the input signal. + This parameter can be a value of @ref TIM_Input_Capture_Polarity */ + + uint32_t ICSelection; /*!< Specifies the input. + This parameter can be a value of @ref TIM_Input_Capture_Selection */ + + uint32_t ICPrescaler; /*!< Specifies the Input Capture Prescaler. + This parameter can be a value of @ref TIM_Input_Capture_Prescaler */ + + uint32_t ICFilter; /*!< Specifies the input capture filter. + This parameter can be a number between Min_Data = 0x0 and Max_Data = 0xF */ +} TIM_IC_InitTypeDef; + +/** + * @brief TIM Encoder Configuration Structure definition + */ +typedef struct +{ + uint32_t EncoderMode; /*!< Specifies the active edge of the input signal. + This parameter can be a value of @ref TIM_Encoder_Mode */ + + uint32_t IC1Polarity; /*!< Specifies the active edge of the input signal. + This parameter can be a value of @ref TIM_Encoder_Input_Polarity */ + + uint32_t IC1Selection; /*!< Specifies the input. + This parameter can be a value of @ref TIM_Input_Capture_Selection */ + + uint32_t IC1Prescaler; /*!< Specifies the Input Capture Prescaler. + This parameter can be a value of @ref TIM_Input_Capture_Prescaler */ + + uint32_t IC1Filter; /*!< Specifies the input capture filter. + This parameter can be a number between Min_Data = 0x0 and Max_Data = 0xF */ + + uint32_t IC2Polarity; /*!< Specifies the active edge of the input signal. + This parameter can be a value of @ref TIM_Encoder_Input_Polarity */ + + uint32_t IC2Selection; /*!< Specifies the input. + This parameter can be a value of @ref TIM_Input_Capture_Selection */ + + uint32_t IC2Prescaler; /*!< Specifies the Input Capture Prescaler. + This parameter can be a value of @ref TIM_Input_Capture_Prescaler */ + + uint32_t IC2Filter; /*!< Specifies the input capture filter. + This parameter can be a number between Min_Data = 0x0 and Max_Data = 0xF */ +} TIM_Encoder_InitTypeDef; + +/** + * @brief Clock Configuration Handle Structure definition + */ +typedef struct +{ + uint32_t ClockSource; /*!< TIM clock sources + This parameter can be a value of @ref TIM_Clock_Source */ + uint32_t ClockPolarity; /*!< TIM clock polarity + This parameter can be a value of @ref TIM_Clock_Polarity */ + uint32_t ClockPrescaler; /*!< TIM clock prescaler + This parameter can be a value of @ref TIM_Clock_Prescaler */ + uint32_t ClockFilter; /*!< TIM clock filter + This parameter can be a number between Min_Data = 0x0 and Max_Data = 0xF */ +} TIM_ClockConfigTypeDef; + +/** + * @brief TIM Clear Input Configuration Handle Structure definition + */ +typedef struct +{ + uint32_t ClearInputState; /*!< TIM clear Input state + This parameter can be ENABLE or DISABLE */ + uint32_t ClearInputSource; /*!< TIM clear Input sources + This parameter can be a value of @ref TIM_ClearInput_Source */ + uint32_t ClearInputPolarity; /*!< TIM Clear Input polarity + This parameter can be a value of @ref TIM_ClearInput_Polarity */ + uint32_t ClearInputPrescaler; /*!< TIM Clear Input prescaler + This parameter must be 0: When OCRef clear feature is used with ETR source, + ETR prescaler must be off */ + uint32_t ClearInputFilter; /*!< TIM Clear Input filter + This parameter can be a number between Min_Data = 0x0 and Max_Data = 0xF */ +} TIM_ClearInputConfigTypeDef; + +/** + * @brief TIM Master configuration Structure definition + */ +typedef struct +{ + uint32_t MasterOutputTrigger; /*!< Trigger output (TRGO) selection + This parameter can be a value of @ref TIM_Master_Mode_Selection */ + uint32_t MasterSlaveMode; /*!< Master/slave mode selection + This parameter can be a value of @ref TIM_Master_Slave_Mode + @note When the Master/slave mode is enabled, the effect of + an event on the trigger input (TRGI) is delayed to allow a + perfect synchronization between the current timer and its + slaves (through TRGO). It is not mandatory in case of timer + synchronization mode. */ +} TIM_MasterConfigTypeDef; + +/** + * @brief TIM Slave configuration Structure definition + */ +typedef struct +{ + uint32_t SlaveMode; /*!< Slave mode selection + This parameter can be a value of @ref TIM_Slave_Mode */ + uint32_t InputTrigger; /*!< Input Trigger source + This parameter can be a value of @ref TIM_Trigger_Selection */ + uint32_t TriggerPolarity; /*!< Input Trigger polarity + This parameter can be a value of @ref TIM_Trigger_Polarity */ + uint32_t TriggerPrescaler; /*!< Input trigger prescaler + This parameter can be a value of @ref TIM_Trigger_Prescaler */ + uint32_t TriggerFilter; /*!< Input trigger filter + This parameter can be a number between Min_Data = 0x0 and Max_Data = 0xF */ + +} TIM_SlaveConfigTypeDef; + +/** + * @brief TIM Break input(s) and Dead time configuration Structure definition + * @note 2 break inputs can be configured (BKIN and BKIN2) with configurable + * filter and polarity. + */ +typedef struct +{ + uint32_t OffStateRunMode; /*!< TIM off state in run mode, This parameter can be a value of @ref TIM_OSSR_Off_State_Selection_for_Run_mode_state */ + + uint32_t OffStateIDLEMode; /*!< TIM off state in IDLE mode, This parameter can be a value of @ref TIM_OSSI_Off_State_Selection_for_Idle_mode_state */ + + uint32_t LockLevel; /*!< TIM Lock level, This parameter can be a value of @ref TIM_Lock_level */ + + uint32_t DeadTime; /*!< TIM dead Time, This parameter can be a number between Min_Data = 0x00 and Max_Data = 0xFF */ + + uint32_t BreakState; /*!< TIM Break State, This parameter can be a value of @ref TIM_Break_Input_enable_disable */ + + uint32_t BreakPolarity; /*!< TIM Break input polarity, This parameter can be a value of @ref TIM_Break_Polarity */ + + uint32_t BreakFilter; /*!< Specifies the break input filter.This parameter can be a number between Min_Data = 0x0 and Max_Data = 0xF */ + + uint32_t AutomaticOutput; /*!< TIM Automatic Output Enable state, This parameter can be a value of @ref TIM_AOE_Bit_Set_Reset */ + +} TIM_BreakDeadTimeConfigTypeDef; + +/** + * @brief HAL State structures definition + */ +typedef enum +{ + HAL_TIM_STATE_RESET = 0x00U, /*!< Peripheral not yet initialized or disabled */ + HAL_TIM_STATE_READY = 0x01U, /*!< Peripheral Initialized and ready for use */ + HAL_TIM_STATE_BUSY = 0x02U, /*!< An internal process is ongoing */ + HAL_TIM_STATE_TIMEOUT = 0x03U, /*!< Timeout state */ + HAL_TIM_STATE_ERROR = 0x04U /*!< Reception process is ongoing */ +} HAL_TIM_StateTypeDef; + +/** + * @brief TIM Channel States definition + */ +typedef enum +{ + HAL_TIM_CHANNEL_STATE_RESET = 0x00U, /*!< TIM Channel initial state */ + HAL_TIM_CHANNEL_STATE_READY = 0x01U, /*!< TIM Channel ready for use */ + HAL_TIM_CHANNEL_STATE_BUSY = 0x02U, /*!< An internal process is ongoing on the TIM channel */ +} HAL_TIM_ChannelStateTypeDef; + +/** + * @brief DMA Burst States definition + */ +typedef enum +{ + HAL_DMA_BURST_STATE_RESET = 0x00U, /*!< DMA Burst initial state */ + HAL_DMA_BURST_STATE_READY = 0x01U, /*!< DMA Burst ready for use */ + HAL_DMA_BURST_STATE_BUSY = 0x02U, /*!< Ongoing DMA Burst */ +} HAL_TIM_DMABurstStateTypeDef; + +/** + * @brief HAL Active channel structures definition + */ +typedef enum +{ + HAL_TIM_ACTIVE_CHANNEL_1 = 0x01U, /*!< The active channel is 1 */ + HAL_TIM_ACTIVE_CHANNEL_2 = 0x02U, /*!< The active channel is 2 */ + HAL_TIM_ACTIVE_CHANNEL_3 = 0x04U, /*!< The active channel is 3 */ + HAL_TIM_ACTIVE_CHANNEL_4 = 0x08U, /*!< The active channel is 4 */ + HAL_TIM_ACTIVE_CHANNEL_CLEARED = 0x00U /*!< All active channels cleared */ +} HAL_TIM_ActiveChannel; + +/** + * @brief TIM Time Base Handle Structure definition + */ +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) +typedef struct __TIM_HandleTypeDef +#else +typedef struct +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ +{ + TIM_TypeDef *Instance; /*!< Register base address */ + TIM_Base_InitTypeDef Init; /*!< TIM Time Base required parameters */ + HAL_TIM_ActiveChannel Channel; /*!< Active channel */ + DMA_HandleTypeDef *hdma[7]; /*!< DMA Handlers array + This array is accessed by a @ref DMA_Handle_index */ + HAL_LockTypeDef Lock; /*!< Locking object */ + __IO HAL_TIM_StateTypeDef State; /*!< TIM operation state */ + __IO HAL_TIM_ChannelStateTypeDef ChannelState[4]; /*!< TIM channel operation state */ + __IO HAL_TIM_ChannelStateTypeDef ChannelNState[4]; /*!< TIM complementary channel operation state */ + __IO HAL_TIM_DMABurstStateTypeDef DMABurstState; /*!< DMA burst operation state */ + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + void (* Base_MspInitCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Base Msp Init Callback */ + void (* Base_MspDeInitCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Base Msp DeInit Callback */ + void (* IC_MspInitCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM IC Msp Init Callback */ + void (* IC_MspDeInitCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM IC Msp DeInit Callback */ + void (* OC_MspInitCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM OC Msp Init Callback */ + void (* OC_MspDeInitCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM OC Msp DeInit Callback */ + void (* PWM_MspInitCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM PWM Msp Init Callback */ + void (* PWM_MspDeInitCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM PWM Msp DeInit Callback */ + void (* OnePulse_MspInitCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM One Pulse Msp Init Callback */ + void (* OnePulse_MspDeInitCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM One Pulse Msp DeInit Callback */ + void (* Encoder_MspInitCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Encoder Msp Init Callback */ + void (* Encoder_MspDeInitCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Encoder Msp DeInit Callback */ + void (* HallSensor_MspInitCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Hall Sensor Msp Init Callback */ + void (* HallSensor_MspDeInitCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Hall Sensor Msp DeInit Callback */ + void (* PeriodElapsedCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Period Elapsed Callback */ + void (* PeriodElapsedHalfCpltCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Period Elapsed half complete Callback */ + void (* TriggerCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Trigger Callback */ + void (* TriggerHalfCpltCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Trigger half complete Callback */ + void (* IC_CaptureCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Input Capture Callback */ + void (* IC_CaptureHalfCpltCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Input Capture half complete Callback */ + void (* OC_DelayElapsedCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Output Compare Delay Elapsed Callback */ + void (* PWM_PulseFinishedCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM PWM Pulse Finished Callback */ + void (* PWM_PulseFinishedHalfCpltCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM PWM Pulse Finished half complete Callback */ + void (* ErrorCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Error Callback */ + void (* CommutationCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Commutation Callback */ + void (* CommutationHalfCpltCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Commutation half complete Callback */ + void (* BreakCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Break Callback */ +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ +} TIM_HandleTypeDef; + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) +/** + * @brief HAL TIM Callback ID enumeration definition + */ +typedef enum +{ + HAL_TIM_BASE_MSPINIT_CB_ID = 0x00U /*!< TIM Base MspInit Callback ID */ + , HAL_TIM_BASE_MSPDEINIT_CB_ID = 0x01U /*!< TIM Base MspDeInit Callback ID */ + , HAL_TIM_IC_MSPINIT_CB_ID = 0x02U /*!< TIM IC MspInit Callback ID */ + , HAL_TIM_IC_MSPDEINIT_CB_ID = 0x03U /*!< TIM IC MspDeInit Callback ID */ + , HAL_TIM_OC_MSPINIT_CB_ID = 0x04U /*!< TIM OC MspInit Callback ID */ + , HAL_TIM_OC_MSPDEINIT_CB_ID = 0x05U /*!< TIM OC MspDeInit Callback ID */ + , HAL_TIM_PWM_MSPINIT_CB_ID = 0x06U /*!< TIM PWM MspInit Callback ID */ + , HAL_TIM_PWM_MSPDEINIT_CB_ID = 0x07U /*!< TIM PWM MspDeInit Callback ID */ + , HAL_TIM_ONE_PULSE_MSPINIT_CB_ID = 0x08U /*!< TIM One Pulse MspInit Callback ID */ + , HAL_TIM_ONE_PULSE_MSPDEINIT_CB_ID = 0x09U /*!< TIM One Pulse MspDeInit Callback ID */ + , HAL_TIM_ENCODER_MSPINIT_CB_ID = 0x0AU /*!< TIM Encoder MspInit Callback ID */ + , HAL_TIM_ENCODER_MSPDEINIT_CB_ID = 0x0BU /*!< TIM Encoder MspDeInit Callback ID */ + , HAL_TIM_HALL_SENSOR_MSPINIT_CB_ID = 0x0CU /*!< TIM Hall Sensor MspDeInit Callback ID */ + , HAL_TIM_HALL_SENSOR_MSPDEINIT_CB_ID = 0x0DU /*!< TIM Hall Sensor MspDeInit Callback ID */ + , HAL_TIM_PERIOD_ELAPSED_CB_ID = 0x0EU /*!< TIM Period Elapsed Callback ID */ + , HAL_TIM_PERIOD_ELAPSED_HALF_CB_ID = 0x0FU /*!< TIM Period Elapsed half complete Callback ID */ + , HAL_TIM_TRIGGER_CB_ID = 0x10U /*!< TIM Trigger Callback ID */ + , HAL_TIM_TRIGGER_HALF_CB_ID = 0x11U /*!< TIM Trigger half complete Callback ID */ + , HAL_TIM_IC_CAPTURE_CB_ID = 0x12U /*!< TIM Input Capture Callback ID */ + , HAL_TIM_IC_CAPTURE_HALF_CB_ID = 0x13U /*!< TIM Input Capture half complete Callback ID */ + , HAL_TIM_OC_DELAY_ELAPSED_CB_ID = 0x14U /*!< TIM Output Compare Delay Elapsed Callback ID */ + , HAL_TIM_PWM_PULSE_FINISHED_CB_ID = 0x15U /*!< TIM PWM Pulse Finished Callback ID */ + , HAL_TIM_PWM_PULSE_FINISHED_HALF_CB_ID = 0x16U /*!< TIM PWM Pulse Finished half complete Callback ID */ + , HAL_TIM_ERROR_CB_ID = 0x17U /*!< TIM Error Callback ID */ + , HAL_TIM_COMMUTATION_CB_ID = 0x18U /*!< TIM Commutation Callback ID */ + , HAL_TIM_COMMUTATION_HALF_CB_ID = 0x19U /*!< TIM Commutation half complete Callback ID */ + , HAL_TIM_BREAK_CB_ID = 0x1AU /*!< TIM Break Callback ID */ +} HAL_TIM_CallbackIDTypeDef; + +/** + * @brief HAL TIM Callback pointer definition + */ +typedef void (*pTIM_CallbackTypeDef)(TIM_HandleTypeDef *htim); /*!< pointer to the TIM callback function */ + +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + +/** + * @} + */ +/* End of exported types -----------------------------------------------------*/ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup TIM_Exported_Constants TIM Exported Constants + * @{ + */ + +/** @defgroup TIM_ClearInput_Source TIM Clear Input Source + * @{ + */ +#define TIM_CLEARINPUTSOURCE_NONE 0x00000000U /*!< OCREF_CLR is disabled */ +#define TIM_CLEARINPUTSOURCE_ETR 0x00000001U /*!< OCREF_CLR is connected to ETRF input */ +/** + * @} + */ + +/** @defgroup TIM_DMA_Base_address TIM DMA Base Address + * @{ + */ +#define TIM_DMABASE_CR1 0x00000000U +#define TIM_DMABASE_CR2 0x00000001U +#define TIM_DMABASE_SMCR 0x00000002U +#define TIM_DMABASE_DIER 0x00000003U +#define TIM_DMABASE_SR 0x00000004U +#define TIM_DMABASE_EGR 0x00000005U +#define TIM_DMABASE_CCMR1 0x00000006U +#define TIM_DMABASE_CCMR2 0x00000007U +#define TIM_DMABASE_CCER 0x00000008U +#define TIM_DMABASE_CNT 0x00000009U +#define TIM_DMABASE_PSC 0x0000000AU +#define TIM_DMABASE_ARR 0x0000000BU +#define TIM_DMABASE_RCR 0x0000000CU +#define TIM_DMABASE_CCR1 0x0000000DU +#define TIM_DMABASE_CCR2 0x0000000EU +#define TIM_DMABASE_CCR3 0x0000000FU +#define TIM_DMABASE_CCR4 0x00000010U +#define TIM_DMABASE_BDTR 0x00000011U +#define TIM_DMABASE_DCR 0x00000012U +#define TIM_DMABASE_DMAR 0x00000013U +/** + * @} + */ + +/** @defgroup TIM_Event_Source TIM Event Source + * @{ + */ +#define TIM_EVENTSOURCE_UPDATE TIM_EGR_UG /*!< Reinitialize the counter and generates an update of the registers */ +#define TIM_EVENTSOURCE_CC1 TIM_EGR_CC1G /*!< A capture/compare event is generated on channel 1 */ +#define TIM_EVENTSOURCE_CC2 TIM_EGR_CC2G /*!< A capture/compare event is generated on channel 2 */ +#define TIM_EVENTSOURCE_CC3 TIM_EGR_CC3G /*!< A capture/compare event is generated on channel 3 */ +#define TIM_EVENTSOURCE_CC4 TIM_EGR_CC4G /*!< A capture/compare event is generated on channel 4 */ +#define TIM_EVENTSOURCE_COM TIM_EGR_COMG /*!< A commutation event is generated */ +#define TIM_EVENTSOURCE_TRIGGER TIM_EGR_TG /*!< A trigger event is generated */ +#define TIM_EVENTSOURCE_BREAK TIM_EGR_BG /*!< A break event is generated */ +/** + * @} + */ + +/** @defgroup TIM_Input_Channel_Polarity TIM Input Channel polarity + * @{ + */ +#define TIM_INPUTCHANNELPOLARITY_RISING 0x00000000U /*!< Polarity for TIx source */ +#define TIM_INPUTCHANNELPOLARITY_FALLING TIM_CCER_CC1P /*!< Polarity for TIx source */ +#define TIM_INPUTCHANNELPOLARITY_BOTHEDGE (TIM_CCER_CC1P | TIM_CCER_CC1NP) /*!< Polarity for TIx source */ +/** + * @} + */ + +/** @defgroup TIM_ETR_Polarity TIM ETR Polarity + * @{ + */ +#define TIM_ETRPOLARITY_INVERTED TIM_SMCR_ETP /*!< Polarity for ETR source */ +#define TIM_ETRPOLARITY_NONINVERTED 0x00000000U /*!< Polarity for ETR source */ +/** + * @} + */ + +/** @defgroup TIM_ETR_Prescaler TIM ETR Prescaler + * @{ + */ +#define TIM_ETRPRESCALER_DIV1 0x00000000U /*!< No prescaler is used */ +#define TIM_ETRPRESCALER_DIV2 TIM_SMCR_ETPS_0 /*!< ETR input source is divided by 2 */ +#define TIM_ETRPRESCALER_DIV4 TIM_SMCR_ETPS_1 /*!< ETR input source is divided by 4 */ +#define TIM_ETRPRESCALER_DIV8 TIM_SMCR_ETPS /*!< ETR input source is divided by 8 */ +/** + * @} + */ + +/** @defgroup TIM_Counter_Mode TIM Counter Mode + * @{ + */ +#define TIM_COUNTERMODE_UP 0x00000000U /*!< Counter used as up-counter */ +#define TIM_COUNTERMODE_DOWN TIM_CR1_DIR /*!< Counter used as down-counter */ +#define TIM_COUNTERMODE_CENTERALIGNED1 TIM_CR1_CMS_0 /*!< Center-aligned mode 1 */ +#define TIM_COUNTERMODE_CENTERALIGNED2 TIM_CR1_CMS_1 /*!< Center-aligned mode 2 */ +#define TIM_COUNTERMODE_CENTERALIGNED3 TIM_CR1_CMS /*!< Center-aligned mode 3 */ +/** + * @} + */ + +/** @defgroup TIM_ClockDivision TIM Clock Division + * @{ + */ +#define TIM_CLOCKDIVISION_DIV1 0x00000000U /*!< Clock division: tDTS=tCK_INT */ +#define TIM_CLOCKDIVISION_DIV2 TIM_CR1_CKD_0 /*!< Clock division: tDTS=2*tCK_INT */ +#define TIM_CLOCKDIVISION_DIV4 TIM_CR1_CKD_1 /*!< Clock division: tDTS=4*tCK_INT */ +/** + * @} + */ + +/** @defgroup TIM_Output_Compare_State TIM Output Compare State + * @{ + */ +#define TIM_OUTPUTSTATE_DISABLE 0x00000000U /*!< Capture/Compare 1 output disabled */ +#define TIM_OUTPUTSTATE_ENABLE TIM_CCER_CC1E /*!< Capture/Compare 1 output enabled */ +/** + * @} + */ + +/** @defgroup TIM_AutoReloadPreload TIM Auto-Reload Preload + * @{ + */ +#define TIM_AUTORELOAD_PRELOAD_DISABLE 0x00000000U /*!< TIMx_ARR register is not buffered */ +#define TIM_AUTORELOAD_PRELOAD_ENABLE TIM_CR1_ARPE /*!< TIMx_ARR register is buffered */ + +/** + * @} + */ + +/** @defgroup TIM_Output_Fast_State TIM Output Fast State + * @{ + */ +#define TIM_OCFAST_DISABLE 0x00000000U /*!< Output Compare fast disable */ +#define TIM_OCFAST_ENABLE TIM_CCMR1_OC1FE /*!< Output Compare fast enable */ +/** + * @} + */ + +/** @defgroup TIM_Output_Compare_N_State TIM Complementary Output Compare State + * @{ + */ +#define TIM_OUTPUTNSTATE_DISABLE 0x00000000U /*!< OCxN is disabled */ +#define TIM_OUTPUTNSTATE_ENABLE TIM_CCER_CC1NE /*!< OCxN is enabled */ +/** + * @} + */ + +/** @defgroup TIM_Output_Compare_Polarity TIM Output Compare Polarity + * @{ + */ +#define TIM_OCPOLARITY_HIGH 0x00000000U /*!< Capture/Compare output polarity */ +#define TIM_OCPOLARITY_LOW TIM_CCER_CC1P /*!< Capture/Compare output polarity */ +/** + * @} + */ + +/** @defgroup TIM_Output_Compare_N_Polarity TIM Complementary Output Compare Polarity + * @{ + */ +#define TIM_OCNPOLARITY_HIGH 0x00000000U /*!< Capture/Compare complementary output polarity */ +#define TIM_OCNPOLARITY_LOW TIM_CCER_CC1NP /*!< Capture/Compare complementary output polarity */ +/** + * @} + */ + +/** @defgroup TIM_Output_Compare_Idle_State TIM Output Compare Idle State + * @{ + */ +#define TIM_OCIDLESTATE_SET TIM_CR2_OIS1 /*!< Output Idle state: OCx=1 when MOE=0 */ +#define TIM_OCIDLESTATE_RESET 0x00000000U /*!< Output Idle state: OCx=0 when MOE=0 */ +/** + * @} + */ + +/** @defgroup TIM_Output_Compare_N_Idle_State TIM Complementary Output Compare Idle State + * @{ + */ +#define TIM_OCNIDLESTATE_SET TIM_CR2_OIS1N /*!< Complementary output Idle state: OCxN=1 when MOE=0 */ +#define TIM_OCNIDLESTATE_RESET 0x00000000U /*!< Complementary output Idle state: OCxN=0 when MOE=0 */ +/** + * @} + */ + +/** @defgroup TIM_Input_Capture_Polarity TIM Input Capture Polarity + * @{ + */ +#define TIM_ICPOLARITY_RISING TIM_INPUTCHANNELPOLARITY_RISING /*!< Capture triggered by rising edge on timer input */ +#define TIM_ICPOLARITY_FALLING TIM_INPUTCHANNELPOLARITY_FALLING /*!< Capture triggered by falling edge on timer input */ +#define TIM_ICPOLARITY_BOTHEDGE TIM_INPUTCHANNELPOLARITY_BOTHEDGE /*!< Capture triggered by both rising and falling edges on timer input*/ +/** + * @} + */ + +/** @defgroup TIM_Encoder_Input_Polarity TIM Encoder Input Polarity + * @{ + */ +#define TIM_ENCODERINPUTPOLARITY_RISING TIM_INPUTCHANNELPOLARITY_RISING /*!< Encoder input with rising edge polarity */ +#define TIM_ENCODERINPUTPOLARITY_FALLING TIM_INPUTCHANNELPOLARITY_FALLING /*!< Encoder input with falling edge polarity */ +/** + * @} + */ + +/** @defgroup TIM_Input_Capture_Selection TIM Input Capture Selection + * @{ + */ +#define TIM_ICSELECTION_DIRECTTI TIM_CCMR1_CC1S_0 /*!< TIM Input 1, 2, 3 or 4 is selected to be connected to IC1, IC2, IC3 or IC4, respectively */ +#define TIM_ICSELECTION_INDIRECTTI TIM_CCMR1_CC1S_1 /*!< TIM Input 1, 2, 3 or 4 is selected to be connected to IC2, IC1, IC4 or IC3, respectively */ +#define TIM_ICSELECTION_TRC TIM_CCMR1_CC1S /*!< TIM Input 1, 2, 3 or 4 is selected to be connected to TRC */ +/** + * @} + */ + +/** @defgroup TIM_Input_Capture_Prescaler TIM Input Capture Prescaler + * @{ + */ +#define TIM_ICPSC_DIV1 0x00000000U /*!< Capture performed each time an edge is detected on the capture input */ +#define TIM_ICPSC_DIV2 TIM_CCMR1_IC1PSC_0 /*!< Capture performed once every 2 events */ +#define TIM_ICPSC_DIV4 TIM_CCMR1_IC1PSC_1 /*!< Capture performed once every 4 events */ +#define TIM_ICPSC_DIV8 TIM_CCMR1_IC1PSC /*!< Capture performed once every 8 events */ +/** + * @} + */ + +/** @defgroup TIM_One_Pulse_Mode TIM One Pulse Mode + * @{ + */ +#define TIM_OPMODE_SINGLE TIM_CR1_OPM /*!< Counter stops counting at the next update event */ +#define TIM_OPMODE_REPETITIVE 0x00000000U /*!< Counter is not stopped at update event */ +/** + * @} + */ + +/** @defgroup TIM_Encoder_Mode TIM Encoder Mode + * @{ + */ +#define TIM_ENCODERMODE_TI1 TIM_SMCR_SMS_0 /*!< Quadrature encoder mode 1, x2 mode, counts up/down on TI1FP1 edge depending on TI2FP2 level */ +#define TIM_ENCODERMODE_TI2 TIM_SMCR_SMS_1 /*!< Quadrature encoder mode 2, x2 mode, counts up/down on TI2FP2 edge depending on TI1FP1 level. */ +#define TIM_ENCODERMODE_TI12 (TIM_SMCR_SMS_1 | TIM_SMCR_SMS_0) /*!< Quadrature encoder mode 3, x4 mode, counts up/down on both TI1FP1 and TI2FP2 edges depending on the level of the other input. */ +/** + * @} + */ + +/** @defgroup TIM_Interrupt_definition TIM interrupt Definition + * @{ + */ +#define TIM_IT_UPDATE TIM_DIER_UIE /*!< Update interrupt */ +#define TIM_IT_CC1 TIM_DIER_CC1IE /*!< Capture/Compare 1 interrupt */ +#define TIM_IT_CC2 TIM_DIER_CC2IE /*!< Capture/Compare 2 interrupt */ +#define TIM_IT_CC3 TIM_DIER_CC3IE /*!< Capture/Compare 3 interrupt */ +#define TIM_IT_CC4 TIM_DIER_CC4IE /*!< Capture/Compare 4 interrupt */ +#define TIM_IT_COM TIM_DIER_COMIE /*!< Commutation interrupt */ +#define TIM_IT_TRIGGER TIM_DIER_TIE /*!< Trigger interrupt */ +#define TIM_IT_BREAK TIM_DIER_BIE /*!< Break interrupt */ +/** + * @} + */ + +/** @defgroup TIM_Commutation_Source TIM Commutation Source + * @{ + */ +#define TIM_COMMUTATION_TRGI TIM_CR2_CCUS /*!< When Capture/compare control bits are preloaded, they are updated by setting the COMG bit or when an rising edge occurs on trigger input */ +#define TIM_COMMUTATION_SOFTWARE 0x00000000U /*!< When Capture/compare control bits are preloaded, they are updated by setting the COMG bit */ +/** + * @} + */ + +/** @defgroup TIM_DMA_sources TIM DMA Sources + * @{ + */ +#define TIM_DMA_UPDATE TIM_DIER_UDE /*!< DMA request is triggered by the update event */ +#define TIM_DMA_CC1 TIM_DIER_CC1DE /*!< DMA request is triggered by the capture/compare macth 1 event */ +#define TIM_DMA_CC2 TIM_DIER_CC2DE /*!< DMA request is triggered by the capture/compare macth 2 event event */ +#define TIM_DMA_CC3 TIM_DIER_CC3DE /*!< DMA request is triggered by the capture/compare macth 3 event event */ +#define TIM_DMA_CC4 TIM_DIER_CC4DE /*!< DMA request is triggered by the capture/compare macth 4 event event */ +#define TIM_DMA_COM TIM_DIER_COMDE /*!< DMA request is triggered by the commutation event */ +#define TIM_DMA_TRIGGER TIM_DIER_TDE /*!< DMA request is triggered by the trigger event */ +/** + * @} + */ + +/** @defgroup TIM_CC_DMA_Request CCx DMA request selection + * @{ + */ +#define TIM_CCDMAREQUEST_CC 0x00000000U /*!< CCx DMA request sent when capture or compare match event occurs */ +#define TIM_CCDMAREQUEST_UPDATE TIM_CR2_CCDS /*!< CCx DMA requests sent when update event occurs */ +/** + * @} + */ + +/** @defgroup TIM_Flag_definition TIM Flag Definition + * @{ + */ +#define TIM_FLAG_UPDATE TIM_SR_UIF /*!< Update interrupt flag */ +#define TIM_FLAG_CC1 TIM_SR_CC1IF /*!< Capture/Compare 1 interrupt flag */ +#define TIM_FLAG_CC2 TIM_SR_CC2IF /*!< Capture/Compare 2 interrupt flag */ +#define TIM_FLAG_CC3 TIM_SR_CC3IF /*!< Capture/Compare 3 interrupt flag */ +#define TIM_FLAG_CC4 TIM_SR_CC4IF /*!< Capture/Compare 4 interrupt flag */ +#define TIM_FLAG_COM TIM_SR_COMIF /*!< Commutation interrupt flag */ +#define TIM_FLAG_TRIGGER TIM_SR_TIF /*!< Trigger interrupt flag */ +#define TIM_FLAG_BREAK TIM_SR_BIF /*!< Break interrupt flag */ +#define TIM_FLAG_CC1OF TIM_SR_CC1OF /*!< Capture 1 overcapture flag */ +#define TIM_FLAG_CC2OF TIM_SR_CC2OF /*!< Capture 2 overcapture flag */ +#define TIM_FLAG_CC3OF TIM_SR_CC3OF /*!< Capture 3 overcapture flag */ +#define TIM_FLAG_CC4OF TIM_SR_CC4OF /*!< Capture 4 overcapture flag */ +/** + * @} + */ + +/** @defgroup TIM_Channel TIM Channel + * @{ + */ +#define TIM_CHANNEL_1 0x00000000U /*!< Capture/compare channel 1 identifier */ +#define TIM_CHANNEL_2 0x00000004U /*!< Capture/compare channel 2 identifier */ +#define TIM_CHANNEL_3 0x00000008U /*!< Capture/compare channel 3 identifier */ +#define TIM_CHANNEL_4 0x0000000CU /*!< Capture/compare channel 4 identifier */ +#define TIM_CHANNEL_ALL 0x0000003CU /*!< Global Capture/compare channel identifier */ +/** + * @} + */ + +/** @defgroup TIM_Clock_Source TIM Clock Source + * @{ + */ +#define TIM_CLOCKSOURCE_INTERNAL TIM_SMCR_ETPS_0 /*!< Internal clock source */ +#define TIM_CLOCKSOURCE_ETRMODE1 TIM_TS_ETRF /*!< External clock source mode 1 (ETRF) */ +#define TIM_CLOCKSOURCE_ETRMODE2 TIM_SMCR_ETPS_1 /*!< External clock source mode 2 */ +#define TIM_CLOCKSOURCE_TI1ED TIM_TS_TI1F_ED /*!< External clock source mode 1 (TTI1FP1 + edge detect.) */ +#define TIM_CLOCKSOURCE_TI1 TIM_TS_TI1FP1 /*!< External clock source mode 1 (TTI1FP1) */ +#define TIM_CLOCKSOURCE_TI2 TIM_TS_TI2FP2 /*!< External clock source mode 1 (TTI2FP2) */ +#define TIM_CLOCKSOURCE_ITR0 TIM_TS_ITR0 /*!< External clock source mode 1 (ITR0) */ +#define TIM_CLOCKSOURCE_ITR1 TIM_TS_ITR1 /*!< External clock source mode 1 (ITR1) */ +#define TIM_CLOCKSOURCE_ITR2 TIM_TS_ITR2 /*!< External clock source mode 1 (ITR2) */ +#define TIM_CLOCKSOURCE_ITR3 TIM_TS_ITR3 /*!< External clock source mode 1 (ITR3) */ +/** + * @} + */ + +/** @defgroup TIM_Clock_Polarity TIM Clock Polarity + * @{ + */ +#define TIM_CLOCKPOLARITY_INVERTED TIM_ETRPOLARITY_INVERTED /*!< Polarity for ETRx clock sources */ +#define TIM_CLOCKPOLARITY_NONINVERTED TIM_ETRPOLARITY_NONINVERTED /*!< Polarity for ETRx clock sources */ +#define TIM_CLOCKPOLARITY_RISING TIM_INPUTCHANNELPOLARITY_RISING /*!< Polarity for TIx clock sources */ +#define TIM_CLOCKPOLARITY_FALLING TIM_INPUTCHANNELPOLARITY_FALLING /*!< Polarity for TIx clock sources */ +#define TIM_CLOCKPOLARITY_BOTHEDGE TIM_INPUTCHANNELPOLARITY_BOTHEDGE /*!< Polarity for TIx clock sources */ +/** + * @} + */ + +/** @defgroup TIM_Clock_Prescaler TIM Clock Prescaler + * @{ + */ +#define TIM_CLOCKPRESCALER_DIV1 TIM_ETRPRESCALER_DIV1 /*!< No prescaler is used */ +#define TIM_CLOCKPRESCALER_DIV2 TIM_ETRPRESCALER_DIV2 /*!< Prescaler for External ETR Clock: Capture performed once every 2 events. */ +#define TIM_CLOCKPRESCALER_DIV4 TIM_ETRPRESCALER_DIV4 /*!< Prescaler for External ETR Clock: Capture performed once every 4 events. */ +#define TIM_CLOCKPRESCALER_DIV8 TIM_ETRPRESCALER_DIV8 /*!< Prescaler for External ETR Clock: Capture performed once every 8 events. */ +/** + * @} + */ + +/** @defgroup TIM_ClearInput_Polarity TIM Clear Input Polarity + * @{ + */ +#define TIM_CLEARINPUTPOLARITY_INVERTED TIM_ETRPOLARITY_INVERTED /*!< Polarity for ETRx pin */ +#define TIM_CLEARINPUTPOLARITY_NONINVERTED TIM_ETRPOLARITY_NONINVERTED /*!< Polarity for ETRx pin */ +/** + * @} + */ + +/** @defgroup TIM_ClearInput_Prescaler TIM Clear Input Prescaler + * @{ + */ +#define TIM_CLEARINPUTPRESCALER_DIV1 TIM_ETRPRESCALER_DIV1 /*!< No prescaler is used */ +#define TIM_CLEARINPUTPRESCALER_DIV2 TIM_ETRPRESCALER_DIV2 /*!< Prescaler for External ETR pin: Capture performed once every 2 events. */ +#define TIM_CLEARINPUTPRESCALER_DIV4 TIM_ETRPRESCALER_DIV4 /*!< Prescaler for External ETR pin: Capture performed once every 4 events. */ +#define TIM_CLEARINPUTPRESCALER_DIV8 TIM_ETRPRESCALER_DIV8 /*!< Prescaler for External ETR pin: Capture performed once every 8 events. */ +/** + * @} + */ + +/** @defgroup TIM_OSSR_Off_State_Selection_for_Run_mode_state TIM OSSR OffState Selection for Run mode state + * @{ + */ +#define TIM_OSSR_ENABLE TIM_BDTR_OSSR /*!< When inactive, OC/OCN outputs are enabled (still controlled by the timer) */ +#define TIM_OSSR_DISABLE 0x00000000U /*!< When inactive, OC/OCN outputs are disabled (not controlled any longer by the timer) */ +/** + * @} + */ + +/** @defgroup TIM_OSSI_Off_State_Selection_for_Idle_mode_state TIM OSSI OffState Selection for Idle mode state + * @{ + */ +#define TIM_OSSI_ENABLE TIM_BDTR_OSSI /*!< When inactive, OC/OCN outputs are enabled (still controlled by the timer) */ +#define TIM_OSSI_DISABLE 0x00000000U /*!< When inactive, OC/OCN outputs are disabled (not controlled any longer by the timer) */ +/** + * @} + */ +/** @defgroup TIM_Lock_level TIM Lock level + * @{ + */ +#define TIM_LOCKLEVEL_OFF 0x00000000U /*!< LOCK OFF */ +#define TIM_LOCKLEVEL_1 TIM_BDTR_LOCK_0 /*!< LOCK Level 1 */ +#define TIM_LOCKLEVEL_2 TIM_BDTR_LOCK_1 /*!< LOCK Level 2 */ +#define TIM_LOCKLEVEL_3 TIM_BDTR_LOCK /*!< LOCK Level 3 */ +/** + * @} + */ + +/** @defgroup TIM_Break_Input_enable_disable TIM Break Input Enable + * @{ + */ +#define TIM_BREAK_ENABLE TIM_BDTR_BKE /*!< Break input BRK is enabled */ +#define TIM_BREAK_DISABLE 0x00000000U /*!< Break input BRK is disabled */ +/** + * @} + */ + +/** @defgroup TIM_Break_Polarity TIM Break Input Polarity + * @{ + */ +#define TIM_BREAKPOLARITY_LOW 0x00000000U /*!< Break input BRK is active low */ +#define TIM_BREAKPOLARITY_HIGH TIM_BDTR_BKP /*!< Break input BRK is active high */ +/** + * @} + */ + +/** @defgroup TIM_AOE_Bit_Set_Reset TIM Automatic Output Enable + * @{ + */ +#define TIM_AUTOMATICOUTPUT_DISABLE 0x00000000U /*!< MOE can be set only by software */ +#define TIM_AUTOMATICOUTPUT_ENABLE TIM_BDTR_AOE /*!< MOE can be set by software or automatically at the next update event (if none of the break inputs BRK and BRK2 is active) */ +/** + * @} + */ + +/** @defgroup TIM_Master_Mode_Selection TIM Master Mode Selection + * @{ + */ +#define TIM_TRGO_RESET 0x00000000U /*!< TIMx_EGR.UG bit is used as trigger output (TRGO) */ +#define TIM_TRGO_ENABLE TIM_CR2_MMS_0 /*!< TIMx_CR1.CEN bit is used as trigger output (TRGO) */ +#define TIM_TRGO_UPDATE TIM_CR2_MMS_1 /*!< Update event is used as trigger output (TRGO) */ +#define TIM_TRGO_OC1 (TIM_CR2_MMS_1 | TIM_CR2_MMS_0) /*!< Capture or a compare match 1 is used as trigger output (TRGO) */ +#define TIM_TRGO_OC1REF TIM_CR2_MMS_2 /*!< OC1REF signal is used as trigger output (TRGO) */ +#define TIM_TRGO_OC2REF (TIM_CR2_MMS_2 | TIM_CR2_MMS_0) /*!< OC2REF signal is used as trigger output(TRGO) */ +#define TIM_TRGO_OC3REF (TIM_CR2_MMS_2 | TIM_CR2_MMS_1) /*!< OC3REF signal is used as trigger output(TRGO) */ +#define TIM_TRGO_OC4REF (TIM_CR2_MMS_2 | TIM_CR2_MMS_1 | TIM_CR2_MMS_0) /*!< OC4REF signal is used as trigger output(TRGO) */ +/** + * @} + */ + +/** @defgroup TIM_Master_Slave_Mode TIM Master/Slave Mode + * @{ + */ +#define TIM_MASTERSLAVEMODE_ENABLE TIM_SMCR_MSM /*!< No action */ +#define TIM_MASTERSLAVEMODE_DISABLE 0x00000000U /*!< Master/slave mode is selected */ +/** + * @} + */ + +/** @defgroup TIM_Slave_Mode TIM Slave mode + * @{ + */ +#define TIM_SLAVEMODE_DISABLE 0x00000000U /*!< Slave mode disabled */ +#define TIM_SLAVEMODE_RESET TIM_SMCR_SMS_2 /*!< Reset Mode */ +#define TIM_SLAVEMODE_GATED (TIM_SMCR_SMS_2 | TIM_SMCR_SMS_0) /*!< Gated Mode */ +#define TIM_SLAVEMODE_TRIGGER (TIM_SMCR_SMS_2 | TIM_SMCR_SMS_1) /*!< Trigger Mode */ +#define TIM_SLAVEMODE_EXTERNAL1 (TIM_SMCR_SMS_2 | TIM_SMCR_SMS_1 | TIM_SMCR_SMS_0) /*!< External Clock Mode 1 */ +/** + * @} + */ + +/** @defgroup TIM_Output_Compare_and_PWM_modes TIM Output Compare and PWM Modes + * @{ + */ +#define TIM_OCMODE_TIMING 0x00000000U /*!< Frozen */ +#define TIM_OCMODE_ACTIVE TIM_CCMR1_OC1M_0 /*!< Set channel to active level on match */ +#define TIM_OCMODE_INACTIVE TIM_CCMR1_OC1M_1 /*!< Set channel to inactive level on match */ +#define TIM_OCMODE_TOGGLE (TIM_CCMR1_OC1M_1 | TIM_CCMR1_OC1M_0) /*!< Toggle */ +#define TIM_OCMODE_PWM1 (TIM_CCMR1_OC1M_2 | TIM_CCMR1_OC1M_1) /*!< PWM mode 1 */ +#define TIM_OCMODE_PWM2 (TIM_CCMR1_OC1M_2 | TIM_CCMR1_OC1M_1 | TIM_CCMR1_OC1M_0) /*!< PWM mode 2 */ +#define TIM_OCMODE_FORCED_ACTIVE (TIM_CCMR1_OC1M_2 | TIM_CCMR1_OC1M_0) /*!< Force active level */ +#define TIM_OCMODE_FORCED_INACTIVE TIM_CCMR1_OC1M_2 /*!< Force inactive level */ +/** + * @} + */ + +/** @defgroup TIM_Trigger_Selection TIM Trigger Selection + * @{ + */ +#define TIM_TS_ITR0 0x00000000U /*!< Internal Trigger 0 (ITR0) */ +#define TIM_TS_ITR1 TIM_SMCR_TS_0 /*!< Internal Trigger 1 (ITR1) */ +#define TIM_TS_ITR2 TIM_SMCR_TS_1 /*!< Internal Trigger 2 (ITR2) */ +#define TIM_TS_ITR3 (TIM_SMCR_TS_0 | TIM_SMCR_TS_1) /*!< Internal Trigger 3 (ITR3) */ +#define TIM_TS_TI1F_ED TIM_SMCR_TS_2 /*!< TI1 Edge Detector (TI1F_ED) */ +#define TIM_TS_TI1FP1 (TIM_SMCR_TS_0 | TIM_SMCR_TS_2) /*!< Filtered Timer Input 1 (TI1FP1) */ +#define TIM_TS_TI2FP2 (TIM_SMCR_TS_1 | TIM_SMCR_TS_2) /*!< Filtered Timer Input 2 (TI2FP2) */ +#define TIM_TS_ETRF (TIM_SMCR_TS_0 | TIM_SMCR_TS_1 | TIM_SMCR_TS_2) /*!< Filtered External Trigger input (ETRF) */ +#define TIM_TS_NONE 0x0000FFFFU /*!< No trigger selected */ +/** + * @} + */ + +/** @defgroup TIM_Trigger_Polarity TIM Trigger Polarity + * @{ + */ +#define TIM_TRIGGERPOLARITY_INVERTED TIM_ETRPOLARITY_INVERTED /*!< Polarity for ETRx trigger sources */ +#define TIM_TRIGGERPOLARITY_NONINVERTED TIM_ETRPOLARITY_NONINVERTED /*!< Polarity for ETRx trigger sources */ +#define TIM_TRIGGERPOLARITY_RISING TIM_INPUTCHANNELPOLARITY_RISING /*!< Polarity for TIxFPx or TI1_ED trigger sources */ +#define TIM_TRIGGERPOLARITY_FALLING TIM_INPUTCHANNELPOLARITY_FALLING /*!< Polarity for TIxFPx or TI1_ED trigger sources */ +#define TIM_TRIGGERPOLARITY_BOTHEDGE TIM_INPUTCHANNELPOLARITY_BOTHEDGE /*!< Polarity for TIxFPx or TI1_ED trigger sources */ +/** + * @} + */ + +/** @defgroup TIM_Trigger_Prescaler TIM Trigger Prescaler + * @{ + */ +#define TIM_TRIGGERPRESCALER_DIV1 TIM_ETRPRESCALER_DIV1 /*!< No prescaler is used */ +#define TIM_TRIGGERPRESCALER_DIV2 TIM_ETRPRESCALER_DIV2 /*!< Prescaler for External ETR Trigger: Capture performed once every 2 events. */ +#define TIM_TRIGGERPRESCALER_DIV4 TIM_ETRPRESCALER_DIV4 /*!< Prescaler for External ETR Trigger: Capture performed once every 4 events. */ +#define TIM_TRIGGERPRESCALER_DIV8 TIM_ETRPRESCALER_DIV8 /*!< Prescaler for External ETR Trigger: Capture performed once every 8 events. */ +/** + * @} + */ + +/** @defgroup TIM_TI1_Selection TIM TI1 Input Selection + * @{ + */ +#define TIM_TI1SELECTION_CH1 0x00000000U /*!< The TIMx_CH1 pin is connected to TI1 input */ +#define TIM_TI1SELECTION_XORCOMBINATION TIM_CR2_TI1S /*!< The TIMx_CH1, CH2 and CH3 pins are connected to the TI1 input (XOR combination) */ +/** + * @} + */ + +/** @defgroup TIM_DMA_Burst_Length TIM DMA Burst Length + * @{ + */ +#define TIM_DMABURSTLENGTH_1TRANSFER 0x00000000U /*!< The transfer is done to 1 register starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_2TRANSFERS 0x00000100U /*!< The transfer is done to 2 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_3TRANSFERS 0x00000200U /*!< The transfer is done to 3 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_4TRANSFERS 0x00000300U /*!< The transfer is done to 4 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_5TRANSFERS 0x00000400U /*!< The transfer is done to 5 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_6TRANSFERS 0x00000500U /*!< The transfer is done to 6 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_7TRANSFERS 0x00000600U /*!< The transfer is done to 7 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_8TRANSFERS 0x00000700U /*!< The transfer is done to 8 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_9TRANSFERS 0x00000800U /*!< The transfer is done to 9 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_10TRANSFERS 0x00000900U /*!< The transfer is done to 10 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_11TRANSFERS 0x00000A00U /*!< The transfer is done to 11 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_12TRANSFERS 0x00000B00U /*!< The transfer is done to 12 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_13TRANSFERS 0x00000C00U /*!< The transfer is done to 13 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_14TRANSFERS 0x00000D00U /*!< The transfer is done to 14 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_15TRANSFERS 0x00000E00U /*!< The transfer is done to 15 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_16TRANSFERS 0x00000F00U /*!< The transfer is done to 16 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_17TRANSFERS 0x00001000U /*!< The transfer is done to 17 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_18TRANSFERS 0x00001100U /*!< The transfer is done to 18 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +/** + * @} + */ + +/** @defgroup DMA_Handle_index TIM DMA Handle Index + * @{ + */ +#define TIM_DMA_ID_UPDATE ((uint16_t) 0x0000) /*!< Index of the DMA handle used for Update DMA requests */ +#define TIM_DMA_ID_CC1 ((uint16_t) 0x0001) /*!< Index of the DMA handle used for Capture/Compare 1 DMA requests */ +#define TIM_DMA_ID_CC2 ((uint16_t) 0x0002) /*!< Index of the DMA handle used for Capture/Compare 2 DMA requests */ +#define TIM_DMA_ID_CC3 ((uint16_t) 0x0003) /*!< Index of the DMA handle used for Capture/Compare 3 DMA requests */ +#define TIM_DMA_ID_CC4 ((uint16_t) 0x0004) /*!< Index of the DMA handle used for Capture/Compare 4 DMA requests */ +#define TIM_DMA_ID_COMMUTATION ((uint16_t) 0x0005) /*!< Index of the DMA handle used for Commutation DMA requests */ +#define TIM_DMA_ID_TRIGGER ((uint16_t) 0x0006) /*!< Index of the DMA handle used for Trigger DMA requests */ +/** + * @} + */ + +/** @defgroup Channel_CC_State TIM Capture/Compare Channel State + * @{ + */ +#define TIM_CCx_ENABLE 0x00000001U /*!< Input or output channel is enabled */ +#define TIM_CCx_DISABLE 0x00000000U /*!< Input or output channel is disabled */ +#define TIM_CCxN_ENABLE 0x00000004U /*!< Complementary output channel is enabled */ +#define TIM_CCxN_DISABLE 0x00000000U /*!< Complementary output channel is enabled */ +/** + * @} + */ + +/** + * @} + */ +/* End of exported constants -------------------------------------------------*/ + +/* Exported macros -----------------------------------------------------------*/ +/** @defgroup TIM_Exported_Macros TIM Exported Macros + * @{ + */ + +/** @brief Reset TIM handle state. + * @param __HANDLE__ TIM handle. + * @retval None + */ +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) +#define __HAL_TIM_RESET_HANDLE_STATE(__HANDLE__) do { \ + (__HANDLE__)->State = HAL_TIM_STATE_RESET; \ + (__HANDLE__)->ChannelState[0] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelState[1] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelState[2] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelState[3] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelNState[0] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelNState[1] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelNState[2] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelNState[3] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->DMABurstState = HAL_DMA_BURST_STATE_RESET; \ + (__HANDLE__)->Base_MspInitCallback = NULL; \ + (__HANDLE__)->Base_MspDeInitCallback = NULL; \ + (__HANDLE__)->IC_MspInitCallback = NULL; \ + (__HANDLE__)->IC_MspDeInitCallback = NULL; \ + (__HANDLE__)->OC_MspInitCallback = NULL; \ + (__HANDLE__)->OC_MspDeInitCallback = NULL; \ + (__HANDLE__)->PWM_MspInitCallback = NULL; \ + (__HANDLE__)->PWM_MspDeInitCallback = NULL; \ + (__HANDLE__)->OnePulse_MspInitCallback = NULL; \ + (__HANDLE__)->OnePulse_MspDeInitCallback = NULL; \ + (__HANDLE__)->Encoder_MspInitCallback = NULL; \ + (__HANDLE__)->Encoder_MspDeInitCallback = NULL; \ + (__HANDLE__)->HallSensor_MspInitCallback = NULL; \ + (__HANDLE__)->HallSensor_MspDeInitCallback = NULL; \ + } while(0) +#else +#define __HAL_TIM_RESET_HANDLE_STATE(__HANDLE__) do { \ + (__HANDLE__)->State = HAL_TIM_STATE_RESET; \ + (__HANDLE__)->ChannelState[0] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelState[1] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelState[2] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelState[3] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelNState[0] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelNState[1] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelNState[2] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelNState[3] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->DMABurstState = HAL_DMA_BURST_STATE_RESET; \ + } while(0) +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + +/** + * @brief Enable the TIM peripheral. + * @param __HANDLE__ TIM handle + * @retval None + */ +#define __HAL_TIM_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->CR1|=(TIM_CR1_CEN)) + +/** + * @brief Enable the TIM main Output. + * @param __HANDLE__ TIM handle + * @retval None + */ +#define __HAL_TIM_MOE_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->BDTR|=(TIM_BDTR_MOE)) + +/** + * @brief Disable the TIM peripheral. + * @param __HANDLE__ TIM handle + * @retval None + */ +#define __HAL_TIM_DISABLE(__HANDLE__) \ + do { \ + if (((__HANDLE__)->Instance->CCER & TIM_CCER_CCxE_MASK) == 0UL) \ + { \ + if(((__HANDLE__)->Instance->CCER & TIM_CCER_CCxNE_MASK) == 0UL) \ + { \ + (__HANDLE__)->Instance->CR1 &= ~(TIM_CR1_CEN); \ + } \ + } \ + } while(0) + +/** + * @brief Disable the TIM main Output. + * @param __HANDLE__ TIM handle + * @retval None + * @note The Main Output Enable of a timer instance is disabled only if all the CCx and CCxN channels have been + * disabled + */ +#define __HAL_TIM_MOE_DISABLE(__HANDLE__) \ + do { \ + if (((__HANDLE__)->Instance->CCER & TIM_CCER_CCxE_MASK) == 0UL) \ + { \ + if(((__HANDLE__)->Instance->CCER & TIM_CCER_CCxNE_MASK) == 0UL) \ + { \ + (__HANDLE__)->Instance->BDTR &= ~(TIM_BDTR_MOE); \ + } \ + } \ + } while(0) + +/** + * @brief Disable the TIM main Output. + * @param __HANDLE__ TIM handle + * @retval None + * @note The Main Output Enable of a timer instance is disabled unconditionally + */ +#define __HAL_TIM_MOE_DISABLE_UNCONDITIONALLY(__HANDLE__) (__HANDLE__)->Instance->BDTR &= ~(TIM_BDTR_MOE) + +/** @brief Enable the specified TIM interrupt. + * @param __HANDLE__ specifies the TIM Handle. + * @param __INTERRUPT__ specifies the TIM interrupt source to enable. + * This parameter can be one of the following values: + * @arg TIM_IT_UPDATE: Update interrupt + * @arg TIM_IT_CC1: Capture/Compare 1 interrupt + * @arg TIM_IT_CC2: Capture/Compare 2 interrupt + * @arg TIM_IT_CC3: Capture/Compare 3 interrupt + * @arg TIM_IT_CC4: Capture/Compare 4 interrupt + * @arg TIM_IT_COM: Commutation interrupt + * @arg TIM_IT_TRIGGER: Trigger interrupt + * @arg TIM_IT_BREAK: Break interrupt + * @retval None + */ +#define __HAL_TIM_ENABLE_IT(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->DIER |= (__INTERRUPT__)) + +/** @brief Disable the specified TIM interrupt. + * @param __HANDLE__ specifies the TIM Handle. + * @param __INTERRUPT__ specifies the TIM interrupt source to disable. + * This parameter can be one of the following values: + * @arg TIM_IT_UPDATE: Update interrupt + * @arg TIM_IT_CC1: Capture/Compare 1 interrupt + * @arg TIM_IT_CC2: Capture/Compare 2 interrupt + * @arg TIM_IT_CC3: Capture/Compare 3 interrupt + * @arg TIM_IT_CC4: Capture/Compare 4 interrupt + * @arg TIM_IT_COM: Commutation interrupt + * @arg TIM_IT_TRIGGER: Trigger interrupt + * @arg TIM_IT_BREAK: Break interrupt + * @retval None + */ +#define __HAL_TIM_DISABLE_IT(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->DIER &= ~(__INTERRUPT__)) + +/** @brief Enable the specified DMA request. + * @param __HANDLE__ specifies the TIM Handle. + * @param __DMA__ specifies the TIM DMA request to enable. + * This parameter can be one of the following values: + * @arg TIM_DMA_UPDATE: Update DMA request + * @arg TIM_DMA_CC1: Capture/Compare 1 DMA request + * @arg TIM_DMA_CC2: Capture/Compare 2 DMA request + * @arg TIM_DMA_CC3: Capture/Compare 3 DMA request + * @arg TIM_DMA_CC4: Capture/Compare 4 DMA request + * @arg TIM_DMA_COM: Commutation DMA request + * @arg TIM_DMA_TRIGGER: Trigger DMA request + * @retval None + */ +#define __HAL_TIM_ENABLE_DMA(__HANDLE__, __DMA__) ((__HANDLE__)->Instance->DIER |= (__DMA__)) + +/** @brief Disable the specified DMA request. + * @param __HANDLE__ specifies the TIM Handle. + * @param __DMA__ specifies the TIM DMA request to disable. + * This parameter can be one of the following values: + * @arg TIM_DMA_UPDATE: Update DMA request + * @arg TIM_DMA_CC1: Capture/Compare 1 DMA request + * @arg TIM_DMA_CC2: Capture/Compare 2 DMA request + * @arg TIM_DMA_CC3: Capture/Compare 3 DMA request + * @arg TIM_DMA_CC4: Capture/Compare 4 DMA request + * @arg TIM_DMA_COM: Commutation DMA request + * @arg TIM_DMA_TRIGGER: Trigger DMA request + * @retval None + */ +#define __HAL_TIM_DISABLE_DMA(__HANDLE__, __DMA__) ((__HANDLE__)->Instance->DIER &= ~(__DMA__)) + +/** @brief Check whether the specified TIM interrupt flag is set or not. + * @param __HANDLE__ specifies the TIM Handle. + * @param __FLAG__ specifies the TIM interrupt flag to check. + * This parameter can be one of the following values: + * @arg TIM_FLAG_UPDATE: Update interrupt flag + * @arg TIM_FLAG_CC1: Capture/Compare 1 interrupt flag + * @arg TIM_FLAG_CC2: Capture/Compare 2 interrupt flag + * @arg TIM_FLAG_CC3: Capture/Compare 3 interrupt flag + * @arg TIM_FLAG_CC4: Capture/Compare 4 interrupt flag + * @arg TIM_FLAG_COM: Commutation interrupt flag + * @arg TIM_FLAG_TRIGGER: Trigger interrupt flag + * @arg TIM_FLAG_BREAK: Break interrupt flag + * @arg TIM_FLAG_CC1OF: Capture/Compare 1 overcapture flag + * @arg TIM_FLAG_CC2OF: Capture/Compare 2 overcapture flag + * @arg TIM_FLAG_CC3OF: Capture/Compare 3 overcapture flag + * @arg TIM_FLAG_CC4OF: Capture/Compare 4 overcapture flag + * @retval The new state of __FLAG__ (TRUE or FALSE). + */ +#define __HAL_TIM_GET_FLAG(__HANDLE__, __FLAG__) (((__HANDLE__)->Instance->SR &(__FLAG__)) == (__FLAG__)) + +/** @brief Clear the specified TIM interrupt flag. + * @param __HANDLE__ specifies the TIM Handle. + * @param __FLAG__ specifies the TIM interrupt flag to clear. + * This parameter can be one of the following values: + * @arg TIM_FLAG_UPDATE: Update interrupt flag + * @arg TIM_FLAG_CC1: Capture/Compare 1 interrupt flag + * @arg TIM_FLAG_CC2: Capture/Compare 2 interrupt flag + * @arg TIM_FLAG_CC3: Capture/Compare 3 interrupt flag + * @arg TIM_FLAG_CC4: Capture/Compare 4 interrupt flag + * @arg TIM_FLAG_COM: Commutation interrupt flag + * @arg TIM_FLAG_TRIGGER: Trigger interrupt flag + * @arg TIM_FLAG_BREAK: Break interrupt flag + * @arg TIM_FLAG_CC1OF: Capture/Compare 1 overcapture flag + * @arg TIM_FLAG_CC2OF: Capture/Compare 2 overcapture flag + * @arg TIM_FLAG_CC3OF: Capture/Compare 3 overcapture flag + * @arg TIM_FLAG_CC4OF: Capture/Compare 4 overcapture flag + * @retval The new state of __FLAG__ (TRUE or FALSE). + */ +#define __HAL_TIM_CLEAR_FLAG(__HANDLE__, __FLAG__) ((__HANDLE__)->Instance->SR = ~(__FLAG__)) + +/** + * @brief Check whether the specified TIM interrupt source is enabled or not. + * @param __HANDLE__ TIM handle + * @param __INTERRUPT__ specifies the TIM interrupt source to check. + * This parameter can be one of the following values: + * @arg TIM_IT_UPDATE: Update interrupt + * @arg TIM_IT_CC1: Capture/Compare 1 interrupt + * @arg TIM_IT_CC2: Capture/Compare 2 interrupt + * @arg TIM_IT_CC3: Capture/Compare 3 interrupt + * @arg TIM_IT_CC4: Capture/Compare 4 interrupt + * @arg TIM_IT_COM: Commutation interrupt + * @arg TIM_IT_TRIGGER: Trigger interrupt + * @arg TIM_IT_BREAK: Break interrupt + * @retval The state of TIM_IT (SET or RESET). + */ +#define __HAL_TIM_GET_IT_SOURCE(__HANDLE__, __INTERRUPT__) ((((__HANDLE__)->Instance->DIER & (__INTERRUPT__)) \ + == (__INTERRUPT__)) ? SET : RESET) + +/** @brief Clear the TIM interrupt pending bits. + * @param __HANDLE__ TIM handle + * @param __INTERRUPT__ specifies the interrupt pending bit to clear. + * This parameter can be one of the following values: + * @arg TIM_IT_UPDATE: Update interrupt + * @arg TIM_IT_CC1: Capture/Compare 1 interrupt + * @arg TIM_IT_CC2: Capture/Compare 2 interrupt + * @arg TIM_IT_CC3: Capture/Compare 3 interrupt + * @arg TIM_IT_CC4: Capture/Compare 4 interrupt + * @arg TIM_IT_COM: Commutation interrupt + * @arg TIM_IT_TRIGGER: Trigger interrupt + * @arg TIM_IT_BREAK: Break interrupt + * @retval None + */ +#define __HAL_TIM_CLEAR_IT(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->SR = ~(__INTERRUPT__)) + +/** + * @brief Indicates whether or not the TIM Counter is used as downcounter. + * @param __HANDLE__ TIM handle. + * @retval False (Counter used as upcounter) or True (Counter used as downcounter) + * @note This macro is particularly useful to get the counting mode when the timer operates in Center-aligned mode + * or Encoder mode. + */ +#define __HAL_TIM_IS_TIM_COUNTING_DOWN(__HANDLE__) (((__HANDLE__)->Instance->CR1 &(TIM_CR1_DIR)) == (TIM_CR1_DIR)) + +/** + * @brief Set the TIM Prescaler on runtime. + * @param __HANDLE__ TIM handle. + * @param __PRESC__ specifies the Prescaler new value. + * @retval None + */ +#define __HAL_TIM_SET_PRESCALER(__HANDLE__, __PRESC__) ((__HANDLE__)->Instance->PSC = (__PRESC__)) + +/** + * @brief Set the TIM Counter Register value on runtime. + * @param __HANDLE__ TIM handle. + * @param __COUNTER__ specifies the Counter register new value. + * @retval None + */ +#define __HAL_TIM_SET_COUNTER(__HANDLE__, __COUNTER__) ((__HANDLE__)->Instance->CNT = (__COUNTER__)) + +/** + * @brief Get the TIM Counter Register value on runtime. + * @param __HANDLE__ TIM handle. + * @retval 16-bit or 32-bit value of the timer counter register (TIMx_CNT) + */ +#define __HAL_TIM_GET_COUNTER(__HANDLE__) ((__HANDLE__)->Instance->CNT) + +/** + * @brief Set the TIM Autoreload Register value on runtime without calling another time any Init function. + * @param __HANDLE__ TIM handle. + * @param __AUTORELOAD__ specifies the Counter register new value. + * @retval None + */ +#define __HAL_TIM_SET_AUTORELOAD(__HANDLE__, __AUTORELOAD__) \ + do{ \ + (__HANDLE__)->Instance->ARR = (__AUTORELOAD__); \ + (__HANDLE__)->Init.Period = (__AUTORELOAD__); \ + } while(0) + +/** + * @brief Get the TIM Autoreload Register value on runtime. + * @param __HANDLE__ TIM handle. + * @retval 16-bit or 32-bit value of the timer auto-reload register(TIMx_ARR) + */ +#define __HAL_TIM_GET_AUTORELOAD(__HANDLE__) ((__HANDLE__)->Instance->ARR) + +/** + * @brief Set the TIM Clock Division value on runtime without calling another time any Init function. + * @param __HANDLE__ TIM handle. + * @param __CKD__ specifies the clock division value. + * This parameter can be one of the following value: + * @arg TIM_CLOCKDIVISION_DIV1: tDTS=tCK_INT + * @arg TIM_CLOCKDIVISION_DIV2: tDTS=2*tCK_INT + * @arg TIM_CLOCKDIVISION_DIV4: tDTS=4*tCK_INT + * @retval None + */ +#define __HAL_TIM_SET_CLOCKDIVISION(__HANDLE__, __CKD__) \ + do{ \ + (__HANDLE__)->Instance->CR1 &= (~TIM_CR1_CKD); \ + (__HANDLE__)->Instance->CR1 |= (__CKD__); \ + (__HANDLE__)->Init.ClockDivision = (__CKD__); \ + } while(0) + +/** + * @brief Get the TIM Clock Division value on runtime. + * @param __HANDLE__ TIM handle. + * @retval The clock division can be one of the following values: + * @arg TIM_CLOCKDIVISION_DIV1: tDTS=tCK_INT + * @arg TIM_CLOCKDIVISION_DIV2: tDTS=2*tCK_INT + * @arg TIM_CLOCKDIVISION_DIV4: tDTS=4*tCK_INT + */ +#define __HAL_TIM_GET_CLOCKDIVISION(__HANDLE__) ((__HANDLE__)->Instance->CR1 & TIM_CR1_CKD) + +/** + * @brief Set the TIM Input Capture prescaler on runtime without calling another time HAL_TIM_IC_ConfigChannel() + * function. + * @param __HANDLE__ TIM handle. + * @param __CHANNEL__ TIM Channels to be configured. + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @param __ICPSC__ specifies the Input Capture4 prescaler new value. + * This parameter can be one of the following values: + * @arg TIM_ICPSC_DIV1: no prescaler + * @arg TIM_ICPSC_DIV2: capture is done once every 2 events + * @arg TIM_ICPSC_DIV4: capture is done once every 4 events + * @arg TIM_ICPSC_DIV8: capture is done once every 8 events + * @retval None + */ +#define __HAL_TIM_SET_ICPRESCALER(__HANDLE__, __CHANNEL__, __ICPSC__) \ + do{ \ + TIM_RESET_ICPRESCALERVALUE((__HANDLE__), (__CHANNEL__)); \ + TIM_SET_ICPRESCALERVALUE((__HANDLE__), (__CHANNEL__), (__ICPSC__)); \ + } while(0) + +/** + * @brief Get the TIM Input Capture prescaler on runtime. + * @param __HANDLE__ TIM handle. + * @param __CHANNEL__ TIM Channels to be configured. + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: get input capture 1 prescaler value + * @arg TIM_CHANNEL_2: get input capture 2 prescaler value + * @arg TIM_CHANNEL_3: get input capture 3 prescaler value + * @arg TIM_CHANNEL_4: get input capture 4 prescaler value + * @retval The input capture prescaler can be one of the following values: + * @arg TIM_ICPSC_DIV1: no prescaler + * @arg TIM_ICPSC_DIV2: capture is done once every 2 events + * @arg TIM_ICPSC_DIV4: capture is done once every 4 events + * @arg TIM_ICPSC_DIV8: capture is done once every 8 events + */ +#define __HAL_TIM_GET_ICPRESCALER(__HANDLE__, __CHANNEL__) \ + (((__CHANNEL__) == TIM_CHANNEL_1) ? ((__HANDLE__)->Instance->CCMR1 & TIM_CCMR1_IC1PSC) :\ + ((__CHANNEL__) == TIM_CHANNEL_2) ? (((__HANDLE__)->Instance->CCMR1 & TIM_CCMR1_IC2PSC) >> 8U) :\ + ((__CHANNEL__) == TIM_CHANNEL_3) ? ((__HANDLE__)->Instance->CCMR2 & TIM_CCMR2_IC3PSC) :\ + (((__HANDLE__)->Instance->CCMR2 & TIM_CCMR2_IC4PSC)) >> 8U) + +/** + * @brief Set the TIM Capture Compare Register value on runtime without calling another time ConfigChannel function. + * @param __HANDLE__ TIM handle. + * @param __CHANNEL__ TIM Channels to be configured. + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @param __COMPARE__ specifies the Capture Compare register new value. + * @retval None + */ +#define __HAL_TIM_SET_COMPARE(__HANDLE__, __CHANNEL__, __COMPARE__) \ + (((__CHANNEL__) == TIM_CHANNEL_1) ? ((__HANDLE__)->Instance->CCR1 = (__COMPARE__)) :\ + ((__CHANNEL__) == TIM_CHANNEL_2) ? ((__HANDLE__)->Instance->CCR2 = (__COMPARE__)) :\ + ((__CHANNEL__) == TIM_CHANNEL_3) ? ((__HANDLE__)->Instance->CCR3 = (__COMPARE__)) :\ + ((__HANDLE__)->Instance->CCR4 = (__COMPARE__))) + +/** + * @brief Get the TIM Capture Compare Register value on runtime. + * @param __HANDLE__ TIM handle. + * @param __CHANNEL__ TIM Channel associated with the capture compare register + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: get capture/compare 1 register value + * @arg TIM_CHANNEL_2: get capture/compare 2 register value + * @arg TIM_CHANNEL_3: get capture/compare 3 register value + * @arg TIM_CHANNEL_4: get capture/compare 4 register value + * @retval 16-bit or 32-bit value of the capture/compare register (TIMx_CCRy) + */ +#define __HAL_TIM_GET_COMPARE(__HANDLE__, __CHANNEL__) \ + (((__CHANNEL__) == TIM_CHANNEL_1) ? ((__HANDLE__)->Instance->CCR1) :\ + ((__CHANNEL__) == TIM_CHANNEL_2) ? ((__HANDLE__)->Instance->CCR2) :\ + ((__CHANNEL__) == TIM_CHANNEL_3) ? ((__HANDLE__)->Instance->CCR3) :\ + ((__HANDLE__)->Instance->CCR4)) + +/** + * @brief Set the TIM Output compare preload. + * @param __HANDLE__ TIM handle. + * @param __CHANNEL__ TIM Channels to be configured. + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval None + */ +#define __HAL_TIM_ENABLE_OCxPRELOAD(__HANDLE__, __CHANNEL__) \ + (((__CHANNEL__) == TIM_CHANNEL_1) ? ((__HANDLE__)->Instance->CCMR1 |= TIM_CCMR1_OC1PE) :\ + ((__CHANNEL__) == TIM_CHANNEL_2) ? ((__HANDLE__)->Instance->CCMR1 |= TIM_CCMR1_OC2PE) :\ + ((__CHANNEL__) == TIM_CHANNEL_3) ? ((__HANDLE__)->Instance->CCMR2 |= TIM_CCMR2_OC3PE) :\ + ((__HANDLE__)->Instance->CCMR2 |= TIM_CCMR2_OC4PE)) + +/** + * @brief Reset the TIM Output compare preload. + * @param __HANDLE__ TIM handle. + * @param __CHANNEL__ TIM Channels to be configured. + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval None + */ +#define __HAL_TIM_DISABLE_OCxPRELOAD(__HANDLE__, __CHANNEL__) \ + (((__CHANNEL__) == TIM_CHANNEL_1) ? ((__HANDLE__)->Instance->CCMR1 &= ~TIM_CCMR1_OC1PE) :\ + ((__CHANNEL__) == TIM_CHANNEL_2) ? ((__HANDLE__)->Instance->CCMR1 &= ~TIM_CCMR1_OC2PE) :\ + ((__CHANNEL__) == TIM_CHANNEL_3) ? ((__HANDLE__)->Instance->CCMR2 &= ~TIM_CCMR2_OC3PE) :\ + ((__HANDLE__)->Instance->CCMR2 &= ~TIM_CCMR2_OC4PE)) + +/** + * @brief Enable fast mode for a given channel. + * @param __HANDLE__ TIM handle. + * @param __CHANNEL__ TIM Channels to be configured. + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @note When fast mode is enabled an active edge on the trigger input acts + * like a compare match on CCx output. Delay to sample the trigger + * input and to activate CCx output is reduced to 3 clock cycles. + * @note Fast mode acts only if the channel is configured in PWM1 or PWM2 mode. + * @retval None + */ +#define __HAL_TIM_ENABLE_OCxFAST(__HANDLE__, __CHANNEL__) \ + (((__CHANNEL__) == TIM_CHANNEL_1) ? ((__HANDLE__)->Instance->CCMR1 |= TIM_CCMR1_OC1FE) :\ + ((__CHANNEL__) == TIM_CHANNEL_2) ? ((__HANDLE__)->Instance->CCMR1 |= TIM_CCMR1_OC2FE) :\ + ((__CHANNEL__) == TIM_CHANNEL_3) ? ((__HANDLE__)->Instance->CCMR2 |= TIM_CCMR2_OC3FE) :\ + ((__HANDLE__)->Instance->CCMR2 |= TIM_CCMR2_OC4FE)) + +/** + * @brief Disable fast mode for a given channel. + * @param __HANDLE__ TIM handle. + * @param __CHANNEL__ TIM Channels to be configured. + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @note When fast mode is disabled CCx output behaves normally depending + * on counter and CCRx values even when the trigger is ON. The minimum + * delay to activate CCx output when an active edge occurs on the + * trigger input is 5 clock cycles. + * @retval None + */ +#define __HAL_TIM_DISABLE_OCxFAST(__HANDLE__, __CHANNEL__) \ + (((__CHANNEL__) == TIM_CHANNEL_1) ? ((__HANDLE__)->Instance->CCMR1 &= ~TIM_CCMR1_OC1FE) :\ + ((__CHANNEL__) == TIM_CHANNEL_2) ? ((__HANDLE__)->Instance->CCMR1 &= ~TIM_CCMR1_OC2FE) :\ + ((__CHANNEL__) == TIM_CHANNEL_3) ? ((__HANDLE__)->Instance->CCMR2 &= ~TIM_CCMR2_OC3FE) :\ + ((__HANDLE__)->Instance->CCMR2 &= ~TIM_CCMR2_OC4FE)) + +/** + * @brief Set the Update Request Source (URS) bit of the TIMx_CR1 register. + * @param __HANDLE__ TIM handle. + * @note When the URS bit of the TIMx_CR1 register is set, only counter + * overflow/underflow generates an update interrupt or DMA request (if + * enabled) + * @retval None + */ +#define __HAL_TIM_URS_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->CR1|= TIM_CR1_URS) + +/** + * @brief Reset the Update Request Source (URS) bit of the TIMx_CR1 register. + * @param __HANDLE__ TIM handle. + * @note When the URS bit of the TIMx_CR1 register is reset, any of the + * following events generate an update interrupt or DMA request (if + * enabled): + * _ Counter overflow underflow + * _ Setting the UG bit + * _ Update generation through the slave mode controller + * @retval None + */ +#define __HAL_TIM_URS_DISABLE(__HANDLE__) ((__HANDLE__)->Instance->CR1&=~TIM_CR1_URS) + +/** + * @brief Set the TIM Capture x input polarity on runtime. + * @param __HANDLE__ TIM handle. + * @param __CHANNEL__ TIM Channels to be configured. + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @param __POLARITY__ Polarity for TIx source + * @arg TIM_INPUTCHANNELPOLARITY_RISING: Rising Edge + * @arg TIM_INPUTCHANNELPOLARITY_FALLING: Falling Edge + * @arg TIM_INPUTCHANNELPOLARITY_BOTHEDGE: Rising and Falling Edge + * @retval None + */ +#define __HAL_TIM_SET_CAPTUREPOLARITY(__HANDLE__, __CHANNEL__, __POLARITY__) \ + do{ \ + TIM_RESET_CAPTUREPOLARITY((__HANDLE__), (__CHANNEL__)); \ + TIM_SET_CAPTUREPOLARITY((__HANDLE__), (__CHANNEL__), (__POLARITY__)); \ + }while(0) + +/** @brief Select the Capture/compare DMA request source. + * @param __HANDLE__ specifies the TIM Handle. + * @param __CCDMA__ specifies Capture/compare DMA request source + * This parameter can be one of the following values: + * @arg TIM_CCDMAREQUEST_CC: CCx DMA request generated on Capture/Compare event + * @arg TIM_CCDMAREQUEST_UPDATE: CCx DMA request generated on Update event + * @retval None + */ +#define __HAL_TIM_SELECT_CCDMAREQUEST(__HANDLE__, __CCDMA__) \ + MODIFY_REG((__HANDLE__)->Instance->CR2, TIM_CR2_CCDS, (__CCDMA__)) + +/** + * @} + */ +/* End of exported macros ----------------------------------------------------*/ + +/* Private constants ---------------------------------------------------------*/ +/** @defgroup TIM_Private_Constants TIM Private Constants + * @{ + */ +/* The counter of a timer instance is disabled only if all the CCx and CCxN + channels have been disabled */ +#define TIM_CCER_CCxE_MASK ((uint32_t)(TIM_CCER_CC1E | TIM_CCER_CC2E | TIM_CCER_CC3E | TIM_CCER_CC4E)) +#define TIM_CCER_CCxNE_MASK ((uint32_t)(TIM_CCER_CC1NE | TIM_CCER_CC2NE | TIM_CCER_CC3NE)) +/** + * @} + */ +/* End of private constants --------------------------------------------------*/ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup TIM_Private_Macros TIM Private Macros + * @{ + */ +#define IS_TIM_CLEARINPUT_SOURCE(__MODE__) (((__MODE__) == TIM_CLEARINPUTSOURCE_NONE) || \ + ((__MODE__) == TIM_CLEARINPUTSOURCE_ETR)) + +#define IS_TIM_DMA_BASE(__BASE__) (((__BASE__) == TIM_DMABASE_CR1) || \ + ((__BASE__) == TIM_DMABASE_CR2) || \ + ((__BASE__) == TIM_DMABASE_SMCR) || \ + ((__BASE__) == TIM_DMABASE_DIER) || \ + ((__BASE__) == TIM_DMABASE_SR) || \ + ((__BASE__) == TIM_DMABASE_EGR) || \ + ((__BASE__) == TIM_DMABASE_CCMR1) || \ + ((__BASE__) == TIM_DMABASE_CCMR2) || \ + ((__BASE__) == TIM_DMABASE_CCER) || \ + ((__BASE__) == TIM_DMABASE_CNT) || \ + ((__BASE__) == TIM_DMABASE_PSC) || \ + ((__BASE__) == TIM_DMABASE_ARR) || \ + ((__BASE__) == TIM_DMABASE_RCR) || \ + ((__BASE__) == TIM_DMABASE_CCR1) || \ + ((__BASE__) == TIM_DMABASE_CCR2) || \ + ((__BASE__) == TIM_DMABASE_CCR3) || \ + ((__BASE__) == TIM_DMABASE_CCR4) || \ + ((__BASE__) == TIM_DMABASE_BDTR)) + +#define IS_TIM_EVENT_SOURCE(__SOURCE__) ((((__SOURCE__) & 0xFFFFFF00U) == 0x00000000U) && ((__SOURCE__) != 0x00000000U)) + +#define IS_TIM_COUNTER_MODE(__MODE__) (((__MODE__) == TIM_COUNTERMODE_UP) || \ + ((__MODE__) == TIM_COUNTERMODE_DOWN) || \ + ((__MODE__) == TIM_COUNTERMODE_CENTERALIGNED1) || \ + ((__MODE__) == TIM_COUNTERMODE_CENTERALIGNED2) || \ + ((__MODE__) == TIM_COUNTERMODE_CENTERALIGNED3)) + +#define IS_TIM_CLOCKDIVISION_DIV(__DIV__) (((__DIV__) == TIM_CLOCKDIVISION_DIV1) || \ + ((__DIV__) == TIM_CLOCKDIVISION_DIV2) || \ + ((__DIV__) == TIM_CLOCKDIVISION_DIV4)) + +#define IS_TIM_AUTORELOAD_PRELOAD(PRELOAD) (((PRELOAD) == TIM_AUTORELOAD_PRELOAD_DISABLE) || \ + ((PRELOAD) == TIM_AUTORELOAD_PRELOAD_ENABLE)) + +#define IS_TIM_FAST_STATE(__STATE__) (((__STATE__) == TIM_OCFAST_DISABLE) || \ + ((__STATE__) == TIM_OCFAST_ENABLE)) + +#define IS_TIM_OC_POLARITY(__POLARITY__) (((__POLARITY__) == TIM_OCPOLARITY_HIGH) || \ + ((__POLARITY__) == TIM_OCPOLARITY_LOW)) + +#define IS_TIM_OCN_POLARITY(__POLARITY__) (((__POLARITY__) == TIM_OCNPOLARITY_HIGH) || \ + ((__POLARITY__) == TIM_OCNPOLARITY_LOW)) + +#define IS_TIM_OCIDLE_STATE(__STATE__) (((__STATE__) == TIM_OCIDLESTATE_SET) || \ + ((__STATE__) == TIM_OCIDLESTATE_RESET)) + +#define IS_TIM_OCNIDLE_STATE(__STATE__) (((__STATE__) == TIM_OCNIDLESTATE_SET) || \ + ((__STATE__) == TIM_OCNIDLESTATE_RESET)) + +#define IS_TIM_ENCODERINPUT_POLARITY(__POLARITY__) (((__POLARITY__) == TIM_ENCODERINPUTPOLARITY_RISING) || \ + ((__POLARITY__) == TIM_ENCODERINPUTPOLARITY_FALLING)) + +#define IS_TIM_IC_POLARITY(__POLARITY__) (((__POLARITY__) == TIM_ICPOLARITY_RISING) || \ + ((__POLARITY__) == TIM_ICPOLARITY_FALLING) || \ + ((__POLARITY__) == TIM_ICPOLARITY_BOTHEDGE)) + +#define IS_TIM_IC_SELECTION(__SELECTION__) (((__SELECTION__) == TIM_ICSELECTION_DIRECTTI) || \ + ((__SELECTION__) == TIM_ICSELECTION_INDIRECTTI) || \ + ((__SELECTION__) == TIM_ICSELECTION_TRC)) + +#define IS_TIM_IC_PRESCALER(__PRESCALER__) (((__PRESCALER__) == TIM_ICPSC_DIV1) || \ + ((__PRESCALER__) == TIM_ICPSC_DIV2) || \ + ((__PRESCALER__) == TIM_ICPSC_DIV4) || \ + ((__PRESCALER__) == TIM_ICPSC_DIV8)) + +#define IS_TIM_OPM_MODE(__MODE__) (((__MODE__) == TIM_OPMODE_SINGLE) || \ + ((__MODE__) == TIM_OPMODE_REPETITIVE)) + +#define IS_TIM_ENCODER_MODE(__MODE__) (((__MODE__) == TIM_ENCODERMODE_TI1) || \ + ((__MODE__) == TIM_ENCODERMODE_TI2) || \ + ((__MODE__) == TIM_ENCODERMODE_TI12)) + +#define IS_TIM_DMA_SOURCE(__SOURCE__) ((((__SOURCE__) & 0xFFFF80FFU) == 0x00000000U) && ((__SOURCE__) != 0x00000000U)) + +#define IS_TIM_CHANNELS(__CHANNEL__) (((__CHANNEL__) == TIM_CHANNEL_1) || \ + ((__CHANNEL__) == TIM_CHANNEL_2) || \ + ((__CHANNEL__) == TIM_CHANNEL_3) || \ + ((__CHANNEL__) == TIM_CHANNEL_4) || \ + ((__CHANNEL__) == TIM_CHANNEL_ALL)) + +#define IS_TIM_OPM_CHANNELS(__CHANNEL__) (((__CHANNEL__) == TIM_CHANNEL_1) || \ + ((__CHANNEL__) == TIM_CHANNEL_2)) + +#define IS_TIM_PERIOD(__HANDLE__, __PERIOD__) ((IS_TIM_32B_COUNTER_INSTANCE(((__HANDLE__)->Instance)) == 0U) ? \ + (((__PERIOD__) > 0U) && ((__PERIOD__) <= 0x0000FFFFU)) : \ + ((__PERIOD__) > 0U)) + +#define IS_TIM_COMPLEMENTARY_CHANNELS(__CHANNEL__) (((__CHANNEL__) == TIM_CHANNEL_1) || \ + ((__CHANNEL__) == TIM_CHANNEL_2) || \ + ((__CHANNEL__) == TIM_CHANNEL_3)) + +#define IS_TIM_CLOCKSOURCE(__CLOCK__) (((__CLOCK__) == TIM_CLOCKSOURCE_INTERNAL) || \ + ((__CLOCK__) == TIM_CLOCKSOURCE_ETRMODE1) || \ + ((__CLOCK__) == TIM_CLOCKSOURCE_ETRMODE2) || \ + ((__CLOCK__) == TIM_CLOCKSOURCE_TI1ED) || \ + ((__CLOCK__) == TIM_CLOCKSOURCE_TI1) || \ + ((__CLOCK__) == TIM_CLOCKSOURCE_TI2) || \ + ((__CLOCK__) == TIM_CLOCKSOURCE_ITR0) || \ + ((__CLOCK__) == TIM_CLOCKSOURCE_ITR1) || \ + ((__CLOCK__) == TIM_CLOCKSOURCE_ITR2) || \ + ((__CLOCK__) == TIM_CLOCKSOURCE_ITR3)) + +#define IS_TIM_CLOCKPOLARITY(__POLARITY__) (((__POLARITY__) == TIM_CLOCKPOLARITY_INVERTED) || \ + ((__POLARITY__) == TIM_CLOCKPOLARITY_NONINVERTED) || \ + ((__POLARITY__) == TIM_CLOCKPOLARITY_RISING) || \ + ((__POLARITY__) == TIM_CLOCKPOLARITY_FALLING) || \ + ((__POLARITY__) == TIM_CLOCKPOLARITY_BOTHEDGE)) + +#define IS_TIM_CLOCKPRESCALER(__PRESCALER__) (((__PRESCALER__) == TIM_CLOCKPRESCALER_DIV1) || \ + ((__PRESCALER__) == TIM_CLOCKPRESCALER_DIV2) || \ + ((__PRESCALER__) == TIM_CLOCKPRESCALER_DIV4) || \ + ((__PRESCALER__) == TIM_CLOCKPRESCALER_DIV8)) + +#define IS_TIM_CLOCKFILTER(__ICFILTER__) ((__ICFILTER__) <= 0xFU) + +#define IS_TIM_CLEARINPUT_POLARITY(__POLARITY__) (((__POLARITY__) == TIM_CLEARINPUTPOLARITY_INVERTED) || \ + ((__POLARITY__) == TIM_CLEARINPUTPOLARITY_NONINVERTED)) + +#define IS_TIM_CLEARINPUT_PRESCALER(__PRESCALER__) (((__PRESCALER__) == TIM_CLEARINPUTPRESCALER_DIV1) || \ + ((__PRESCALER__) == TIM_CLEARINPUTPRESCALER_DIV2) || \ + ((__PRESCALER__) == TIM_CLEARINPUTPRESCALER_DIV4) || \ + ((__PRESCALER__) == TIM_CLEARINPUTPRESCALER_DIV8)) + +#define IS_TIM_CLEARINPUT_FILTER(__ICFILTER__) ((__ICFILTER__) <= 0xFU) + +#define IS_TIM_OSSR_STATE(__STATE__) (((__STATE__) == TIM_OSSR_ENABLE) || \ + ((__STATE__) == TIM_OSSR_DISABLE)) + +#define IS_TIM_OSSI_STATE(__STATE__) (((__STATE__) == TIM_OSSI_ENABLE) || \ + ((__STATE__) == TIM_OSSI_DISABLE)) + +#define IS_TIM_LOCK_LEVEL(__LEVEL__) (((__LEVEL__) == TIM_LOCKLEVEL_OFF) || \ + ((__LEVEL__) == TIM_LOCKLEVEL_1) || \ + ((__LEVEL__) == TIM_LOCKLEVEL_2) || \ + ((__LEVEL__) == TIM_LOCKLEVEL_3)) + +#define IS_TIM_BREAK_FILTER(__BRKFILTER__) ((__BRKFILTER__) <= 0xFUL) + +#define IS_TIM_BREAK_STATE(__STATE__) (((__STATE__) == TIM_BREAK_ENABLE) || \ + ((__STATE__) == TIM_BREAK_DISABLE)) + +#define IS_TIM_BREAK_POLARITY(__POLARITY__) (((__POLARITY__) == TIM_BREAKPOLARITY_LOW) || \ + ((__POLARITY__) == TIM_BREAKPOLARITY_HIGH)) + +#define IS_TIM_AUTOMATIC_OUTPUT_STATE(__STATE__) (((__STATE__) == TIM_AUTOMATICOUTPUT_ENABLE) || \ + ((__STATE__) == TIM_AUTOMATICOUTPUT_DISABLE)) + +#define IS_TIM_TRGO_SOURCE(__SOURCE__) (((__SOURCE__) == TIM_TRGO_RESET) || \ + ((__SOURCE__) == TIM_TRGO_ENABLE) || \ + ((__SOURCE__) == TIM_TRGO_UPDATE) || \ + ((__SOURCE__) == TIM_TRGO_OC1) || \ + ((__SOURCE__) == TIM_TRGO_OC1REF) || \ + ((__SOURCE__) == TIM_TRGO_OC2REF) || \ + ((__SOURCE__) == TIM_TRGO_OC3REF) || \ + ((__SOURCE__) == TIM_TRGO_OC4REF)) + +#define IS_TIM_MSM_STATE(__STATE__) (((__STATE__) == TIM_MASTERSLAVEMODE_ENABLE) || \ + ((__STATE__) == TIM_MASTERSLAVEMODE_DISABLE)) + +#define IS_TIM_SLAVE_MODE(__MODE__) (((__MODE__) == TIM_SLAVEMODE_DISABLE) || \ + ((__MODE__) == TIM_SLAVEMODE_RESET) || \ + ((__MODE__) == TIM_SLAVEMODE_GATED) || \ + ((__MODE__) == TIM_SLAVEMODE_TRIGGER) || \ + ((__MODE__) == TIM_SLAVEMODE_EXTERNAL1)) + +#define IS_TIM_PWM_MODE(__MODE__) (((__MODE__) == TIM_OCMODE_PWM1) || \ + ((__MODE__) == TIM_OCMODE_PWM2)) + +#define IS_TIM_OC_MODE(__MODE__) (((__MODE__) == TIM_OCMODE_TIMING) || \ + ((__MODE__) == TIM_OCMODE_ACTIVE) || \ + ((__MODE__) == TIM_OCMODE_INACTIVE) || \ + ((__MODE__) == TIM_OCMODE_TOGGLE) || \ + ((__MODE__) == TIM_OCMODE_FORCED_ACTIVE) || \ + ((__MODE__) == TIM_OCMODE_FORCED_INACTIVE)) + +#define IS_TIM_TRIGGER_SELECTION(__SELECTION__) (((__SELECTION__) == TIM_TS_ITR0) || \ + ((__SELECTION__) == TIM_TS_ITR1) || \ + ((__SELECTION__) == TIM_TS_ITR2) || \ + ((__SELECTION__) == TIM_TS_ITR3) || \ + ((__SELECTION__) == TIM_TS_TI1F_ED) || \ + ((__SELECTION__) == TIM_TS_TI1FP1) || \ + ((__SELECTION__) == TIM_TS_TI2FP2) || \ + ((__SELECTION__) == TIM_TS_ETRF)) + +#define IS_TIM_INTERNAL_TRIGGEREVENT_SELECTION(__SELECTION__) (((__SELECTION__) == TIM_TS_ITR0) || \ + ((__SELECTION__) == TIM_TS_ITR1) || \ + ((__SELECTION__) == TIM_TS_ITR2) || \ + ((__SELECTION__) == TIM_TS_ITR3) || \ + ((__SELECTION__) == TIM_TS_NONE)) + +#define IS_TIM_TRIGGERPOLARITY(__POLARITY__) (((__POLARITY__) == TIM_TRIGGERPOLARITY_INVERTED ) || \ + ((__POLARITY__) == TIM_TRIGGERPOLARITY_NONINVERTED) || \ + ((__POLARITY__) == TIM_TRIGGERPOLARITY_RISING ) || \ + ((__POLARITY__) == TIM_TRIGGERPOLARITY_FALLING ) || \ + ((__POLARITY__) == TIM_TRIGGERPOLARITY_BOTHEDGE )) + +#define IS_TIM_TRIGGERPRESCALER(__PRESCALER__) (((__PRESCALER__) == TIM_TRIGGERPRESCALER_DIV1) || \ + ((__PRESCALER__) == TIM_TRIGGERPRESCALER_DIV2) || \ + ((__PRESCALER__) == TIM_TRIGGERPRESCALER_DIV4) || \ + ((__PRESCALER__) == TIM_TRIGGERPRESCALER_DIV8)) + +#define IS_TIM_TRIGGERFILTER(__ICFILTER__) ((__ICFILTER__) <= 0xFU) + +#define IS_TIM_TI1SELECTION(__TI1SELECTION__) (((__TI1SELECTION__) == TIM_TI1SELECTION_CH1) || \ + ((__TI1SELECTION__) == TIM_TI1SELECTION_XORCOMBINATION)) + +#define IS_TIM_DMA_LENGTH(__LENGTH__) (((__LENGTH__) == TIM_DMABURSTLENGTH_1TRANSFER) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_2TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_3TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_4TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_5TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_6TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_7TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_8TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_9TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_10TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_11TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_12TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_13TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_14TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_15TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_16TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_17TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_18TRANSFERS)) + +#define IS_TIM_DMA_DATA_LENGTH(LENGTH) (((LENGTH) >= 0x1U) && ((LENGTH) < 0x10000U)) + +#define IS_TIM_IC_FILTER(__ICFILTER__) ((__ICFILTER__) <= 0xFU) + +#define IS_TIM_DEADTIME(__DEADTIME__) ((__DEADTIME__) <= 0xFFU) + +#define IS_TIM_SLAVEMODE_TRIGGER_ENABLED(__TRIGGER__) ((__TRIGGER__) == TIM_SLAVEMODE_TRIGGER) + +#define TIM_SET_ICPRESCALERVALUE(__HANDLE__, __CHANNEL__, __ICPSC__) \ + (((__CHANNEL__) == TIM_CHANNEL_1) ? ((__HANDLE__)->Instance->CCMR1 |= (__ICPSC__)) :\ + ((__CHANNEL__) == TIM_CHANNEL_2) ? ((__HANDLE__)->Instance->CCMR1 |= ((__ICPSC__) << 8U)) :\ + ((__CHANNEL__) == TIM_CHANNEL_3) ? ((__HANDLE__)->Instance->CCMR2 |= (__ICPSC__)) :\ + ((__HANDLE__)->Instance->CCMR2 |= ((__ICPSC__) << 8U))) + +#define TIM_RESET_ICPRESCALERVALUE(__HANDLE__, __CHANNEL__) \ + (((__CHANNEL__) == TIM_CHANNEL_1) ? ((__HANDLE__)->Instance->CCMR1 &= ~TIM_CCMR1_IC1PSC) :\ + ((__CHANNEL__) == TIM_CHANNEL_2) ? ((__HANDLE__)->Instance->CCMR1 &= ~TIM_CCMR1_IC2PSC) :\ + ((__CHANNEL__) == TIM_CHANNEL_3) ? ((__HANDLE__)->Instance->CCMR2 &= ~TIM_CCMR2_IC3PSC) :\ + ((__HANDLE__)->Instance->CCMR2 &= ~TIM_CCMR2_IC4PSC)) + +#define TIM_SET_CAPTUREPOLARITY(__HANDLE__, __CHANNEL__, __POLARITY__) \ + (((__CHANNEL__) == TIM_CHANNEL_1) ? ((__HANDLE__)->Instance->CCER |= (__POLARITY__)) :\ + ((__CHANNEL__) == TIM_CHANNEL_2) ? ((__HANDLE__)->Instance->CCER |= ((__POLARITY__) << 4U)) :\ + ((__CHANNEL__) == TIM_CHANNEL_3) ? ((__HANDLE__)->Instance->CCER |= ((__POLARITY__) << 8U)) :\ + ((__HANDLE__)->Instance->CCER |= (((__POLARITY__) << 12U)))) + +#define TIM_RESET_CAPTUREPOLARITY(__HANDLE__, __CHANNEL__) \ + (((__CHANNEL__) == TIM_CHANNEL_1) ? ((__HANDLE__)->Instance->CCER &= ~(TIM_CCER_CC1P | TIM_CCER_CC1NP)) :\ + ((__CHANNEL__) == TIM_CHANNEL_2) ? ((__HANDLE__)->Instance->CCER &= ~(TIM_CCER_CC2P | TIM_CCER_CC2NP)) :\ + ((__CHANNEL__) == TIM_CHANNEL_3) ? ((__HANDLE__)->Instance->CCER &= ~(TIM_CCER_CC3P | TIM_CCER_CC3NP)) :\ + ((__HANDLE__)->Instance->CCER &= ~(TIM_CCER_CC4P | TIM_CCER_CC4NP))) + +#define TIM_CHANNEL_STATE_GET(__HANDLE__, __CHANNEL__)\ + (((__CHANNEL__) == TIM_CHANNEL_1) ? (__HANDLE__)->ChannelState[0] :\ + ((__CHANNEL__) == TIM_CHANNEL_2) ? (__HANDLE__)->ChannelState[1] :\ + ((__CHANNEL__) == TIM_CHANNEL_3) ? (__HANDLE__)->ChannelState[2] :\ + (__HANDLE__)->ChannelState[3]) + +#define TIM_CHANNEL_STATE_SET(__HANDLE__, __CHANNEL__, __CHANNEL_STATE__) \ + (((__CHANNEL__) == TIM_CHANNEL_1) ? ((__HANDLE__)->ChannelState[0] = (__CHANNEL_STATE__)) :\ + ((__CHANNEL__) == TIM_CHANNEL_2) ? ((__HANDLE__)->ChannelState[1] = (__CHANNEL_STATE__)) :\ + ((__CHANNEL__) == TIM_CHANNEL_3) ? ((__HANDLE__)->ChannelState[2] = (__CHANNEL_STATE__)) :\ + ((__HANDLE__)->ChannelState[3] = (__CHANNEL_STATE__))) + +#define TIM_CHANNEL_STATE_SET_ALL(__HANDLE__, __CHANNEL_STATE__) do { \ + (__HANDLE__)->ChannelState[0] = (__CHANNEL_STATE__); \ + (__HANDLE__)->ChannelState[1] = (__CHANNEL_STATE__); \ + (__HANDLE__)->ChannelState[2] = (__CHANNEL_STATE__); \ + (__HANDLE__)->ChannelState[3] = (__CHANNEL_STATE__); \ + } while(0) + +#define TIM_CHANNEL_N_STATE_GET(__HANDLE__, __CHANNEL__)\ + (((__CHANNEL__) == TIM_CHANNEL_1) ? (__HANDLE__)->ChannelNState[0] :\ + ((__CHANNEL__) == TIM_CHANNEL_2) ? (__HANDLE__)->ChannelNState[1] :\ + ((__CHANNEL__) == TIM_CHANNEL_3) ? (__HANDLE__)->ChannelNState[2] :\ + (__HANDLE__)->ChannelNState[3]) + +#define TIM_CHANNEL_N_STATE_SET(__HANDLE__, __CHANNEL__, __CHANNEL_STATE__) \ + (((__CHANNEL__) == TIM_CHANNEL_1) ? ((__HANDLE__)->ChannelNState[0] = (__CHANNEL_STATE__)) :\ + ((__CHANNEL__) == TIM_CHANNEL_2) ? ((__HANDLE__)->ChannelNState[1] = (__CHANNEL_STATE__)) :\ + ((__CHANNEL__) == TIM_CHANNEL_3) ? ((__HANDLE__)->ChannelNState[2] = (__CHANNEL_STATE__)) :\ + ((__HANDLE__)->ChannelNState[3] = (__CHANNEL_STATE__))) + +#define TIM_CHANNEL_N_STATE_SET_ALL(__HANDLE__, __CHANNEL_STATE__) do { \ + (__HANDLE__)->ChannelNState[0] = \ + (__CHANNEL_STATE__); \ + (__HANDLE__)->ChannelNState[1] = \ + (__CHANNEL_STATE__); \ + (__HANDLE__)->ChannelNState[2] = \ + (__CHANNEL_STATE__); \ + (__HANDLE__)->ChannelNState[3] = \ + (__CHANNEL_STATE__); \ + } while(0) + +/** + * @} + */ +/* End of private macros -----------------------------------------------------*/ + +/* Include TIM HAL Extended module */ +#include "stm32f4xx_hal_tim_ex.h" + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup TIM_Exported_Functions TIM Exported Functions + * @{ + */ + +/** @addtogroup TIM_Exported_Functions_Group1 TIM Time Base functions + * @brief Time Base functions + * @{ + */ +/* Time Base functions ********************************************************/ +HAL_StatusTypeDef HAL_TIM_Base_Init(TIM_HandleTypeDef *htim); +HAL_StatusTypeDef HAL_TIM_Base_DeInit(TIM_HandleTypeDef *htim); +void HAL_TIM_Base_MspInit(TIM_HandleTypeDef *htim); +void HAL_TIM_Base_MspDeInit(TIM_HandleTypeDef *htim); +/* Blocking mode: Polling */ +HAL_StatusTypeDef HAL_TIM_Base_Start(TIM_HandleTypeDef *htim); +HAL_StatusTypeDef HAL_TIM_Base_Stop(TIM_HandleTypeDef *htim); +/* Non-Blocking mode: Interrupt */ +HAL_StatusTypeDef HAL_TIM_Base_Start_IT(TIM_HandleTypeDef *htim); +HAL_StatusTypeDef HAL_TIM_Base_Stop_IT(TIM_HandleTypeDef *htim); +/* Non-Blocking mode: DMA */ +HAL_StatusTypeDef HAL_TIM_Base_Start_DMA(TIM_HandleTypeDef *htim, const uint32_t *pData, uint16_t Length); +HAL_StatusTypeDef HAL_TIM_Base_Stop_DMA(TIM_HandleTypeDef *htim); +/** + * @} + */ + +/** @addtogroup TIM_Exported_Functions_Group2 TIM Output Compare functions + * @brief TIM Output Compare functions + * @{ + */ +/* Timer Output Compare functions *********************************************/ +HAL_StatusTypeDef HAL_TIM_OC_Init(TIM_HandleTypeDef *htim); +HAL_StatusTypeDef HAL_TIM_OC_DeInit(TIM_HandleTypeDef *htim); +void HAL_TIM_OC_MspInit(TIM_HandleTypeDef *htim); +void HAL_TIM_OC_MspDeInit(TIM_HandleTypeDef *htim); +/* Blocking mode: Polling */ +HAL_StatusTypeDef HAL_TIM_OC_Start(TIM_HandleTypeDef *htim, uint32_t Channel); +HAL_StatusTypeDef HAL_TIM_OC_Stop(TIM_HandleTypeDef *htim, uint32_t Channel); +/* Non-Blocking mode: Interrupt */ +HAL_StatusTypeDef HAL_TIM_OC_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel); +HAL_StatusTypeDef HAL_TIM_OC_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel); +/* Non-Blocking mode: DMA */ +HAL_StatusTypeDef HAL_TIM_OC_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, const uint32_t *pData, + uint16_t Length); +HAL_StatusTypeDef HAL_TIM_OC_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel); +/** + * @} + */ + +/** @addtogroup TIM_Exported_Functions_Group3 TIM PWM functions + * @brief TIM PWM functions + * @{ + */ +/* Timer PWM functions ********************************************************/ +HAL_StatusTypeDef HAL_TIM_PWM_Init(TIM_HandleTypeDef *htim); +HAL_StatusTypeDef HAL_TIM_PWM_DeInit(TIM_HandleTypeDef *htim); +void HAL_TIM_PWM_MspInit(TIM_HandleTypeDef *htim); +void HAL_TIM_PWM_MspDeInit(TIM_HandleTypeDef *htim); +/* Blocking mode: Polling */ +HAL_StatusTypeDef HAL_TIM_PWM_Start(TIM_HandleTypeDef *htim, uint32_t Channel); +HAL_StatusTypeDef HAL_TIM_PWM_Stop(TIM_HandleTypeDef *htim, uint32_t Channel); +/* Non-Blocking mode: Interrupt */ +HAL_StatusTypeDef HAL_TIM_PWM_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel); +HAL_StatusTypeDef HAL_TIM_PWM_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel); +/* Non-Blocking mode: DMA */ +HAL_StatusTypeDef HAL_TIM_PWM_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, const uint32_t *pData, + uint16_t Length); +HAL_StatusTypeDef HAL_TIM_PWM_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel); +/** + * @} + */ + +/** @addtogroup TIM_Exported_Functions_Group4 TIM Input Capture functions + * @brief TIM Input Capture functions + * @{ + */ +/* Timer Input Capture functions **********************************************/ +HAL_StatusTypeDef HAL_TIM_IC_Init(TIM_HandleTypeDef *htim); +HAL_StatusTypeDef HAL_TIM_IC_DeInit(TIM_HandleTypeDef *htim); +void HAL_TIM_IC_MspInit(TIM_HandleTypeDef *htim); +void HAL_TIM_IC_MspDeInit(TIM_HandleTypeDef *htim); +/* Blocking mode: Polling */ +HAL_StatusTypeDef HAL_TIM_IC_Start(TIM_HandleTypeDef *htim, uint32_t Channel); +HAL_StatusTypeDef HAL_TIM_IC_Stop(TIM_HandleTypeDef *htim, uint32_t Channel); +/* Non-Blocking mode: Interrupt */ +HAL_StatusTypeDef HAL_TIM_IC_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel); +HAL_StatusTypeDef HAL_TIM_IC_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel); +/* Non-Blocking mode: DMA */ +HAL_StatusTypeDef HAL_TIM_IC_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, uint32_t *pData, uint16_t Length); +HAL_StatusTypeDef HAL_TIM_IC_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel); +/** + * @} + */ + +/** @addtogroup TIM_Exported_Functions_Group5 TIM One Pulse functions + * @brief TIM One Pulse functions + * @{ + */ +/* Timer One Pulse functions **************************************************/ +HAL_StatusTypeDef HAL_TIM_OnePulse_Init(TIM_HandleTypeDef *htim, uint32_t OnePulseMode); +HAL_StatusTypeDef HAL_TIM_OnePulse_DeInit(TIM_HandleTypeDef *htim); +void HAL_TIM_OnePulse_MspInit(TIM_HandleTypeDef *htim); +void HAL_TIM_OnePulse_MspDeInit(TIM_HandleTypeDef *htim); +/* Blocking mode: Polling */ +HAL_StatusTypeDef HAL_TIM_OnePulse_Start(TIM_HandleTypeDef *htim, uint32_t OutputChannel); +HAL_StatusTypeDef HAL_TIM_OnePulse_Stop(TIM_HandleTypeDef *htim, uint32_t OutputChannel); +/* Non-Blocking mode: Interrupt */ +HAL_StatusTypeDef HAL_TIM_OnePulse_Start_IT(TIM_HandleTypeDef *htim, uint32_t OutputChannel); +HAL_StatusTypeDef HAL_TIM_OnePulse_Stop_IT(TIM_HandleTypeDef *htim, uint32_t OutputChannel); +/** + * @} + */ + +/** @addtogroup TIM_Exported_Functions_Group6 TIM Encoder functions + * @brief TIM Encoder functions + * @{ + */ +/* Timer Encoder functions ****************************************************/ +HAL_StatusTypeDef HAL_TIM_Encoder_Init(TIM_HandleTypeDef *htim, const TIM_Encoder_InitTypeDef *sConfig); +HAL_StatusTypeDef HAL_TIM_Encoder_DeInit(TIM_HandleTypeDef *htim); +void HAL_TIM_Encoder_MspInit(TIM_HandleTypeDef *htim); +void HAL_TIM_Encoder_MspDeInit(TIM_HandleTypeDef *htim); +/* Blocking mode: Polling */ +HAL_StatusTypeDef HAL_TIM_Encoder_Start(TIM_HandleTypeDef *htim, uint32_t Channel); +HAL_StatusTypeDef HAL_TIM_Encoder_Stop(TIM_HandleTypeDef *htim, uint32_t Channel); +/* Non-Blocking mode: Interrupt */ +HAL_StatusTypeDef HAL_TIM_Encoder_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel); +HAL_StatusTypeDef HAL_TIM_Encoder_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel); +/* Non-Blocking mode: DMA */ +HAL_StatusTypeDef HAL_TIM_Encoder_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, uint32_t *pData1, + uint32_t *pData2, uint16_t Length); +HAL_StatusTypeDef HAL_TIM_Encoder_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel); +/** + * @} + */ + +/** @addtogroup TIM_Exported_Functions_Group7 TIM IRQ handler management + * @brief IRQ handler management + * @{ + */ +/* Interrupt Handler functions ***********************************************/ +void HAL_TIM_IRQHandler(TIM_HandleTypeDef *htim); +/** + * @} + */ + +/** @defgroup TIM_Exported_Functions_Group8 TIM Peripheral Control functions + * @brief Peripheral Control functions + * @{ + */ +/* Control functions *********************************************************/ +HAL_StatusTypeDef HAL_TIM_OC_ConfigChannel(TIM_HandleTypeDef *htim, const TIM_OC_InitTypeDef *sConfig, + uint32_t Channel); +HAL_StatusTypeDef HAL_TIM_PWM_ConfigChannel(TIM_HandleTypeDef *htim, const TIM_OC_InitTypeDef *sConfig, + uint32_t Channel); +HAL_StatusTypeDef HAL_TIM_IC_ConfigChannel(TIM_HandleTypeDef *htim, const TIM_IC_InitTypeDef *sConfig, + uint32_t Channel); +HAL_StatusTypeDef HAL_TIM_OnePulse_ConfigChannel(TIM_HandleTypeDef *htim, TIM_OnePulse_InitTypeDef *sConfig, + uint32_t OutputChannel, uint32_t InputChannel); +HAL_StatusTypeDef HAL_TIM_ConfigOCrefClear(TIM_HandleTypeDef *htim, + const TIM_ClearInputConfigTypeDef *sClearInputConfig, + uint32_t Channel); +HAL_StatusTypeDef HAL_TIM_ConfigClockSource(TIM_HandleTypeDef *htim, const TIM_ClockConfigTypeDef *sClockSourceConfig); +HAL_StatusTypeDef HAL_TIM_ConfigTI1Input(TIM_HandleTypeDef *htim, uint32_t TI1_Selection); +HAL_StatusTypeDef HAL_TIM_SlaveConfigSynchro(TIM_HandleTypeDef *htim, const TIM_SlaveConfigTypeDef *sSlaveConfig); +HAL_StatusTypeDef HAL_TIM_SlaveConfigSynchro_IT(TIM_HandleTypeDef *htim, const TIM_SlaveConfigTypeDef *sSlaveConfig); +HAL_StatusTypeDef HAL_TIM_DMABurst_WriteStart(TIM_HandleTypeDef *htim, uint32_t BurstBaseAddress, + uint32_t BurstRequestSrc, const uint32_t *BurstBuffer, + uint32_t BurstLength); +HAL_StatusTypeDef HAL_TIM_DMABurst_MultiWriteStart(TIM_HandleTypeDef *htim, uint32_t BurstBaseAddress, + uint32_t BurstRequestSrc, const uint32_t *BurstBuffer, + uint32_t BurstLength, uint32_t DataLength); +HAL_StatusTypeDef HAL_TIM_DMABurst_WriteStop(TIM_HandleTypeDef *htim, uint32_t BurstRequestSrc); +HAL_StatusTypeDef HAL_TIM_DMABurst_ReadStart(TIM_HandleTypeDef *htim, uint32_t BurstBaseAddress, + uint32_t BurstRequestSrc, uint32_t *BurstBuffer, uint32_t BurstLength); +HAL_StatusTypeDef HAL_TIM_DMABurst_MultiReadStart(TIM_HandleTypeDef *htim, uint32_t BurstBaseAddress, + uint32_t BurstRequestSrc, uint32_t *BurstBuffer, + uint32_t BurstLength, uint32_t DataLength); +HAL_StatusTypeDef HAL_TIM_DMABurst_ReadStop(TIM_HandleTypeDef *htim, uint32_t BurstRequestSrc); +HAL_StatusTypeDef HAL_TIM_GenerateEvent(TIM_HandleTypeDef *htim, uint32_t EventSource); +uint32_t HAL_TIM_ReadCapturedValue(const TIM_HandleTypeDef *htim, uint32_t Channel); +/** + * @} + */ + +/** @defgroup TIM_Exported_Functions_Group9 TIM Callbacks functions + * @brief TIM Callbacks functions + * @{ + */ +/* Callback in non blocking modes (Interrupt and DMA) *************************/ +void HAL_TIM_PeriodElapsedCallback(TIM_HandleTypeDef *htim); +void HAL_TIM_PeriodElapsedHalfCpltCallback(TIM_HandleTypeDef *htim); +void HAL_TIM_OC_DelayElapsedCallback(TIM_HandleTypeDef *htim); +void HAL_TIM_IC_CaptureCallback(TIM_HandleTypeDef *htim); +void HAL_TIM_IC_CaptureHalfCpltCallback(TIM_HandleTypeDef *htim); +void HAL_TIM_PWM_PulseFinishedCallback(TIM_HandleTypeDef *htim); +void HAL_TIM_PWM_PulseFinishedHalfCpltCallback(TIM_HandleTypeDef *htim); +void HAL_TIM_TriggerCallback(TIM_HandleTypeDef *htim); +void HAL_TIM_TriggerHalfCpltCallback(TIM_HandleTypeDef *htim); +void HAL_TIM_ErrorCallback(TIM_HandleTypeDef *htim); + +/* Callbacks Register/UnRegister functions ***********************************/ +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) +HAL_StatusTypeDef HAL_TIM_RegisterCallback(TIM_HandleTypeDef *htim, HAL_TIM_CallbackIDTypeDef CallbackID, + pTIM_CallbackTypeDef pCallback); +HAL_StatusTypeDef HAL_TIM_UnRegisterCallback(TIM_HandleTypeDef *htim, HAL_TIM_CallbackIDTypeDef CallbackID); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + +/** + * @} + */ + +/** @defgroup TIM_Exported_Functions_Group10 TIM Peripheral State functions + * @brief Peripheral State functions + * @{ + */ +/* Peripheral State functions ************************************************/ +HAL_TIM_StateTypeDef HAL_TIM_Base_GetState(const TIM_HandleTypeDef *htim); +HAL_TIM_StateTypeDef HAL_TIM_OC_GetState(const TIM_HandleTypeDef *htim); +HAL_TIM_StateTypeDef HAL_TIM_PWM_GetState(const TIM_HandleTypeDef *htim); +HAL_TIM_StateTypeDef HAL_TIM_IC_GetState(const TIM_HandleTypeDef *htim); +HAL_TIM_StateTypeDef HAL_TIM_OnePulse_GetState(const TIM_HandleTypeDef *htim); +HAL_TIM_StateTypeDef HAL_TIM_Encoder_GetState(const TIM_HandleTypeDef *htim); + +/* Peripheral Channel state functions ************************************************/ +HAL_TIM_ActiveChannel HAL_TIM_GetActiveChannel(const TIM_HandleTypeDef *htim); +HAL_TIM_ChannelStateTypeDef HAL_TIM_GetChannelState(const TIM_HandleTypeDef *htim, uint32_t Channel); +HAL_TIM_DMABurstStateTypeDef HAL_TIM_DMABurstState(const TIM_HandleTypeDef *htim); +/** + * @} + */ + +/** + * @} + */ +/* End of exported functions -------------------------------------------------*/ + +/* Private functions----------------------------------------------------------*/ +/** @defgroup TIM_Private_Functions TIM Private Functions + * @{ + */ +void TIM_Base_SetConfig(TIM_TypeDef *TIMx, const TIM_Base_InitTypeDef *Structure); +void TIM_TI1_SetConfig(TIM_TypeDef *TIMx, uint32_t TIM_ICPolarity, uint32_t TIM_ICSelection, uint32_t TIM_ICFilter); +void TIM_OC2_SetConfig(TIM_TypeDef *TIMx, const TIM_OC_InitTypeDef *OC_Config); +void TIM_ETR_SetConfig(TIM_TypeDef *TIMx, uint32_t TIM_ExtTRGPrescaler, + uint32_t TIM_ExtTRGPolarity, uint32_t ExtTRGFilter); + +void TIM_DMADelayPulseHalfCplt(DMA_HandleTypeDef *hdma); +void TIM_DMAError(DMA_HandleTypeDef *hdma); +void TIM_DMACaptureCplt(DMA_HandleTypeDef *hdma); +void TIM_DMACaptureHalfCplt(DMA_HandleTypeDef *hdma); +void TIM_CCxChannelCmd(TIM_TypeDef *TIMx, uint32_t Channel, uint32_t ChannelState); + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) +void TIM_ResetCallback(TIM_HandleTypeDef *htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + +/** + * @} + */ +/* End of private functions --------------------------------------------------*/ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* STM32F4xx_HAL_TIM_H */ diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_tim_ex.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_tim_ex.h new file mode 100644 index 0000000..561e9bb --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_tim_ex.h @@ -0,0 +1,355 @@ +/** + ****************************************************************************** + * @file stm32f4xx_hal_tim_ex.h + * @author MCD Application Team + * @brief Header file of TIM HAL Extended module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32F4xx_HAL_TIM_EX_H +#define STM32F4xx_HAL_TIM_EX_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx_hal_def.h" + +/** @addtogroup STM32F4xx_HAL_Driver + * @{ + */ + +/** @addtogroup TIMEx + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup TIMEx_Exported_Types TIM Extended Exported Types + * @{ + */ + +/** + * @brief TIM Hall sensor Configuration Structure definition + */ + +typedef struct +{ + uint32_t IC1Polarity; /*!< Specifies the active edge of the input signal. + This parameter can be a value of @ref TIM_Input_Capture_Polarity */ + + uint32_t IC1Prescaler; /*!< Specifies the Input Capture Prescaler. + This parameter can be a value of @ref TIM_Input_Capture_Prescaler */ + + uint32_t IC1Filter; /*!< Specifies the input capture filter. + This parameter can be a number between Min_Data = 0x0 and Max_Data = 0xF */ + + uint32_t Commutation_Delay; /*!< Specifies the pulse value to be loaded into the Capture Compare Register. + This parameter can be a number between Min_Data = 0x0000 and Max_Data = 0xFFFF */ +} TIM_HallSensor_InitTypeDef; +/** + * @} + */ +/* End of exported types -----------------------------------------------------*/ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup TIMEx_Exported_Constants TIM Extended Exported Constants + * @{ + */ + +/** @defgroup TIMEx_Remap TIM Extended Remapping + * @{ + */ +#if defined (TIM2) +#if defined(TIM8) +#define TIM_TIM2_TIM8_TRGO 0x00000000U /*!< TIM2 ITR1 is connected to TIM8 TRGO */ +#endif /* TIM8 */ +#define TIM_TIM2_ETH_PTP TIM_OR_ITR1_RMP_0 /*!< TIM2 ITR1 is connected to PTP trigger output */ +#define TIM_TIM2_USBFS_SOF TIM_OR_ITR1_RMP_1 /*!< TIM2 ITR1 is connected to OTG FS SOF */ +#define TIM_TIM2_USBHS_SOF (TIM_OR_ITR1_RMP_1 | TIM_OR_ITR1_RMP_0) /*!< TIM2 ITR1 is connected to OTG HS SOF */ +#endif /* TIM2 */ + +#define TIM_TIM5_GPIO 0x00000000U /*!< TIM5 TI4 is connected to GPIO */ +#define TIM_TIM5_LSI TIM_OR_TI4_RMP_0 /*!< TIM5 TI4 is connected to LSI */ +#define TIM_TIM5_LSE TIM_OR_TI4_RMP_1 /*!< TIM5 TI4 is connected to LSE */ +#define TIM_TIM5_RTC (TIM_OR_TI4_RMP_1 | TIM_OR_TI4_RMP_0) /*!< TIM5 TI4 is connected to the RTC wakeup interrupt */ + +#define TIM_TIM11_GPIO 0x00000000U /*!< TIM11 TI1 is connected to GPIO */ +#define TIM_TIM11_HSE TIM_OR_TI1_RMP_1 /*!< TIM11 TI1 is connected to HSE_RTC clock */ +#if defined(SPDIFRX) +#define TIM_TIM11_SPDIFRX TIM_OR_TI1_RMP_0 /*!< TIM11 TI1 is connected to SPDIFRX_FRAME_SYNC */ +#endif /* SPDIFRX*/ + +#if defined(LPTIM_OR_TIM1_ITR2_RMP) && defined(LPTIM_OR_TIM5_ITR1_RMP) && defined(LPTIM_OR_TIM5_ITR1_RMP) +#define LPTIM_REMAP_MASK 0x10000000U + +#define TIM_TIM9_TIM3_TRGO LPTIM_REMAP_MASK /*!< TIM9 ITR1 is connected to TIM3 TRGO */ +#define TIM_TIM9_LPTIM (LPTIM_REMAP_MASK | LPTIM_OR_TIM9_ITR1_RMP) /*!< TIM9 ITR1 is connected to LPTIM1 output */ + +#define TIM_TIM5_TIM3_TRGO LPTIM_REMAP_MASK /*!< TIM5 ITR1 is connected to TIM3 TRGO */ +#define TIM_TIM5_LPTIM (LPTIM_REMAP_MASK | LPTIM_OR_TIM5_ITR1_RMP) /*!< TIM5 ITR1 is connected to LPTIM1 output */ + +#define TIM_TIM1_TIM3_TRGO LPTIM_REMAP_MASK /*!< TIM1 ITR2 is connected to TIM3 TRGO */ +#define TIM_TIM1_LPTIM (LPTIM_REMAP_MASK | LPTIM_OR_TIM1_ITR2_RMP) /*!< TIM1 ITR2 is connected to LPTIM1 output */ +#endif /* LPTIM_OR_TIM1_ITR2_RMP && LPTIM_OR_TIM5_ITR1_RMP && LPTIM_OR_TIM5_ITR1_RMP */ +/** + * @} + */ + +/** + * @} + */ +/* End of exported constants -------------------------------------------------*/ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup TIMEx_Exported_Macros TIM Extended Exported Macros + * @{ + */ + +/** + * @} + */ +/* End of exported macro -----------------------------------------------------*/ + +/* Private macro -------------------------------------------------------------*/ +/** @defgroup TIMEx_Private_Macros TIM Extended Private Macros + * @{ + */ +#if defined(SPDIFRX) +#define IS_TIM_REMAP(INSTANCE, TIM_REMAP) \ + ((((INSTANCE) == TIM2) && (((TIM_REMAP) == TIM_TIM2_TIM8_TRGO) || \ + ((TIM_REMAP) == TIM_TIM2_USBFS_SOF) || \ + ((TIM_REMAP) == TIM_TIM2_USBHS_SOF))) || \ + (((INSTANCE) == TIM5) && (((TIM_REMAP) == TIM_TIM5_GPIO) || \ + ((TIM_REMAP) == TIM_TIM5_LSI) || \ + ((TIM_REMAP) == TIM_TIM5_LSE) || \ + ((TIM_REMAP) == TIM_TIM5_RTC))) || \ + (((INSTANCE) == TIM11) && (((TIM_REMAP) == TIM_TIM11_GPIO) || \ + ((TIM_REMAP) == TIM_TIM11_SPDIFRX) || \ + ((TIM_REMAP) == TIM_TIM11_HSE)))) +#elif defined(TIM2) +#if defined(LPTIM_OR_TIM1_ITR2_RMP) && defined(LPTIM_OR_TIM5_ITR1_RMP) && defined(LPTIM_OR_TIM5_ITR1_RMP) +#define IS_TIM_REMAP(INSTANCE, TIM_REMAP) \ + ((((INSTANCE) == TIM2) && (((TIM_REMAP) == TIM_TIM2_TIM8_TRGO) || \ + ((TIM_REMAP) == TIM_TIM2_USBFS_SOF) || \ + ((TIM_REMAP) == TIM_TIM2_USBHS_SOF))) || \ + (((INSTANCE) == TIM5) && (((TIM_REMAP) == TIM_TIM5_GPIO) || \ + ((TIM_REMAP) == TIM_TIM5_LSI) || \ + ((TIM_REMAP) == TIM_TIM5_LSE) || \ + ((TIM_REMAP) == TIM_TIM5_RTC))) || \ + (((INSTANCE) == TIM11) && (((TIM_REMAP) == TIM_TIM11_GPIO) || \ + ((TIM_REMAP) == TIM_TIM11_HSE))) || \ + (((INSTANCE) == TIM1) && (((TIM_REMAP) == TIM_TIM1_TIM3_TRGO) || \ + ((TIM_REMAP) == TIM_TIM1_LPTIM))) || \ + (((INSTANCE) == TIM5) && (((TIM_REMAP) == TIM_TIM5_TIM3_TRGO) || \ + ((TIM_REMAP) == TIM_TIM5_LPTIM))) || \ + (((INSTANCE) == TIM9) && (((TIM_REMAP) == TIM_TIM9_TIM3_TRGO) || \ + ((TIM_REMAP) == TIM_TIM9_LPTIM)))) +#elif defined(TIM8) +#define IS_TIM_REMAP(INSTANCE, TIM_REMAP) \ + ((((INSTANCE) == TIM2) && (((TIM_REMAP) == TIM_TIM2_TIM8_TRGO) || \ + ((TIM_REMAP) == TIM_TIM2_USBFS_SOF) || \ + ((TIM_REMAP) == TIM_TIM2_USBHS_SOF))) || \ + (((INSTANCE) == TIM5) && (((TIM_REMAP) == TIM_TIM5_GPIO) || \ + ((TIM_REMAP) == TIM_TIM5_LSI) || \ + ((TIM_REMAP) == TIM_TIM5_LSE) || \ + ((TIM_REMAP) == TIM_TIM5_RTC))) || \ + (((INSTANCE) == TIM11) && (((TIM_REMAP) == TIM_TIM11_GPIO) || \ + ((TIM_REMAP) == TIM_TIM11_HSE)))) +#else +#define IS_TIM_REMAP(INSTANCE, TIM_REMAP) \ + ((((INSTANCE) == TIM2) && (((TIM_REMAP) == TIM_TIM2_ETH_PTP) || \ + ((TIM_REMAP) == TIM_TIM2_USBFS_SOF) || \ + ((TIM_REMAP) == TIM_TIM2_USBHS_SOF))) || \ + (((INSTANCE) == TIM5) && (((TIM_REMAP) == TIM_TIM5_GPIO) || \ + ((TIM_REMAP) == TIM_TIM5_LSI) || \ + ((TIM_REMAP) == TIM_TIM5_LSE) || \ + ((TIM_REMAP) == TIM_TIM5_RTC))) || \ + (((INSTANCE) == TIM11) && (((TIM_REMAP) == TIM_TIM11_GPIO) || \ + ((TIM_REMAP) == TIM_TIM11_HSE)))) +#endif /* LPTIM_OR_TIM1_ITR2_RMP && LPTIM_OR_TIM5_ITR1_RMP && LPTIM_OR_TIM5_ITR1_RMP */ +#else +#define IS_TIM_REMAP(INSTANCE, TIM_REMAP) \ + ((((INSTANCE) == TIM5) && (((TIM_REMAP) == TIM_TIM5_GPIO) || \ + ((TIM_REMAP) == TIM_TIM5_LSI) || \ + ((TIM_REMAP) == TIM_TIM5_LSE) || \ + ((TIM_REMAP) == TIM_TIM5_RTC))) || \ + (((INSTANCE) == TIM11) && (((TIM_REMAP) == TIM_TIM11_GPIO) || \ + ((TIM_REMAP) == TIM_TIM11_HSE)))) +#endif /* SPDIFRX */ + +/** + * @} + */ +/* End of private macro ------------------------------------------------------*/ + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup TIMEx_Exported_Functions TIM Extended Exported Functions + * @{ + */ + +/** @addtogroup TIMEx_Exported_Functions_Group1 Extended Timer Hall Sensor functions + * @brief Timer Hall Sensor functions + * @{ + */ +/* Timer Hall Sensor functions **********************************************/ +HAL_StatusTypeDef HAL_TIMEx_HallSensor_Init(TIM_HandleTypeDef *htim, const TIM_HallSensor_InitTypeDef *sConfig); +HAL_StatusTypeDef HAL_TIMEx_HallSensor_DeInit(TIM_HandleTypeDef *htim); + +void HAL_TIMEx_HallSensor_MspInit(TIM_HandleTypeDef *htim); +void HAL_TIMEx_HallSensor_MspDeInit(TIM_HandleTypeDef *htim); + +/* Blocking mode: Polling */ +HAL_StatusTypeDef HAL_TIMEx_HallSensor_Start(TIM_HandleTypeDef *htim); +HAL_StatusTypeDef HAL_TIMEx_HallSensor_Stop(TIM_HandleTypeDef *htim); +/* Non-Blocking mode: Interrupt */ +HAL_StatusTypeDef HAL_TIMEx_HallSensor_Start_IT(TIM_HandleTypeDef *htim); +HAL_StatusTypeDef HAL_TIMEx_HallSensor_Stop_IT(TIM_HandleTypeDef *htim); +/* Non-Blocking mode: DMA */ +HAL_StatusTypeDef HAL_TIMEx_HallSensor_Start_DMA(TIM_HandleTypeDef *htim, uint32_t *pData, uint16_t Length); +HAL_StatusTypeDef HAL_TIMEx_HallSensor_Stop_DMA(TIM_HandleTypeDef *htim); +/** + * @} + */ + +/** @addtogroup TIMEx_Exported_Functions_Group2 Extended Timer Complementary Output Compare functions + * @brief Timer Complementary Output Compare functions + * @{ + */ +/* Timer Complementary Output Compare functions *****************************/ +/* Blocking mode: Polling */ +HAL_StatusTypeDef HAL_TIMEx_OCN_Start(TIM_HandleTypeDef *htim, uint32_t Channel); +HAL_StatusTypeDef HAL_TIMEx_OCN_Stop(TIM_HandleTypeDef *htim, uint32_t Channel); + +/* Non-Blocking mode: Interrupt */ +HAL_StatusTypeDef HAL_TIMEx_OCN_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel); +HAL_StatusTypeDef HAL_TIMEx_OCN_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel); + +/* Non-Blocking mode: DMA */ +HAL_StatusTypeDef HAL_TIMEx_OCN_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, const uint32_t *pData, + uint16_t Length); +HAL_StatusTypeDef HAL_TIMEx_OCN_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel); +/** + * @} + */ + +/** @addtogroup TIMEx_Exported_Functions_Group3 Extended Timer Complementary PWM functions + * @brief Timer Complementary PWM functions + * @{ + */ +/* Timer Complementary PWM functions ****************************************/ +/* Blocking mode: Polling */ +HAL_StatusTypeDef HAL_TIMEx_PWMN_Start(TIM_HandleTypeDef *htim, uint32_t Channel); +HAL_StatusTypeDef HAL_TIMEx_PWMN_Stop(TIM_HandleTypeDef *htim, uint32_t Channel); + +/* Non-Blocking mode: Interrupt */ +HAL_StatusTypeDef HAL_TIMEx_PWMN_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel); +HAL_StatusTypeDef HAL_TIMEx_PWMN_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel); +/* Non-Blocking mode: DMA */ +HAL_StatusTypeDef HAL_TIMEx_PWMN_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, const uint32_t *pData, + uint16_t Length); +HAL_StatusTypeDef HAL_TIMEx_PWMN_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel); +/** + * @} + */ + +/** @addtogroup TIMEx_Exported_Functions_Group4 Extended Timer Complementary One Pulse functions + * @brief Timer Complementary One Pulse functions + * @{ + */ +/* Timer Complementary One Pulse functions **********************************/ +/* Blocking mode: Polling */ +HAL_StatusTypeDef HAL_TIMEx_OnePulseN_Start(TIM_HandleTypeDef *htim, uint32_t OutputChannel); +HAL_StatusTypeDef HAL_TIMEx_OnePulseN_Stop(TIM_HandleTypeDef *htim, uint32_t OutputChannel); + +/* Non-Blocking mode: Interrupt */ +HAL_StatusTypeDef HAL_TIMEx_OnePulseN_Start_IT(TIM_HandleTypeDef *htim, uint32_t OutputChannel); +HAL_StatusTypeDef HAL_TIMEx_OnePulseN_Stop_IT(TIM_HandleTypeDef *htim, uint32_t OutputChannel); +/** + * @} + */ + +/** @addtogroup TIMEx_Exported_Functions_Group5 Extended Peripheral Control functions + * @brief Peripheral Control functions + * @{ + */ +/* Extended Control functions ************************************************/ +HAL_StatusTypeDef HAL_TIMEx_ConfigCommutEvent(TIM_HandleTypeDef *htim, uint32_t InputTrigger, + uint32_t CommutationSource); +HAL_StatusTypeDef HAL_TIMEx_ConfigCommutEvent_IT(TIM_HandleTypeDef *htim, uint32_t InputTrigger, + uint32_t CommutationSource); +HAL_StatusTypeDef HAL_TIMEx_ConfigCommutEvent_DMA(TIM_HandleTypeDef *htim, uint32_t InputTrigger, + uint32_t CommutationSource); +HAL_StatusTypeDef HAL_TIMEx_MasterConfigSynchronization(TIM_HandleTypeDef *htim, + const TIM_MasterConfigTypeDef *sMasterConfig); +HAL_StatusTypeDef HAL_TIMEx_ConfigBreakDeadTime(TIM_HandleTypeDef *htim, + const TIM_BreakDeadTimeConfigTypeDef *sBreakDeadTimeConfig); +HAL_StatusTypeDef HAL_TIMEx_RemapConfig(TIM_HandleTypeDef *htim, uint32_t Remap); +/** + * @} + */ + +/** @addtogroup TIMEx_Exported_Functions_Group6 Extended Callbacks functions + * @brief Extended Callbacks functions + * @{ + */ +/* Extended Callback **********************************************************/ +void HAL_TIMEx_CommutCallback(TIM_HandleTypeDef *htim); +void HAL_TIMEx_CommutHalfCpltCallback(TIM_HandleTypeDef *htim); +void HAL_TIMEx_BreakCallback(TIM_HandleTypeDef *htim); +/** + * @} + */ + +/** @addtogroup TIMEx_Exported_Functions_Group7 Extended Peripheral State functions + * @brief Extended Peripheral State functions + * @{ + */ +/* Extended Peripheral State functions ***************************************/ +HAL_TIM_StateTypeDef HAL_TIMEx_HallSensor_GetState(const TIM_HandleTypeDef *htim); +HAL_TIM_ChannelStateTypeDef HAL_TIMEx_GetChannelNState(const TIM_HandleTypeDef *htim, uint32_t ChannelN); +/** + * @} + */ + +/** + * @} + */ +/* End of exported functions -------------------------------------------------*/ + +/* Private functions----------------------------------------------------------*/ +/** @addtogroup TIMEx_Private_Functions TIM Extended Private Functions + * @{ + */ +void TIMEx_DMACommutationCplt(DMA_HandleTypeDef *hdma); +void TIMEx_DMACommutationHalfCplt(DMA_HandleTypeDef *hdma); +/** + * @} + */ +/* End of private functions --------------------------------------------------*/ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + + +#endif /* STM32F4xx_HAL_TIM_EX_H */ diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_uart.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_uart.h new file mode 100644 index 0000000..e6ce82f --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_uart.h @@ -0,0 +1,909 @@ +/** + ****************************************************************************** + * @file stm32f4xx_hal_uart.h + * @author MCD Application Team + * @brief Header file of UART HAL module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F4xx_HAL_UART_H +#define __STM32F4xx_HAL_UART_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx_hal_def.h" + +/** @addtogroup STM32F4xx_HAL_Driver + * @{ + */ + +/** @addtogroup UART + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup UART_Exported_Types UART Exported Types + * @{ + */ + +/** + * @brief UART Init Structure definition + */ +typedef struct +{ + uint32_t BaudRate; /*!< This member configures the UART communication baud rate. + The baud rate is computed using the following formula: + - IntegerDivider = ((PCLKx) / (8 * (OVR8+1) * (huart->Init.BaudRate))) + - FractionalDivider = ((IntegerDivider - ((uint32_t) IntegerDivider)) * 8 * (OVR8+1)) + 0.5 + Where OVR8 is the "oversampling by 8 mode" configuration bit in the CR1 register. */ + + uint32_t WordLength; /*!< Specifies the number of data bits transmitted or received in a frame. + This parameter can be a value of @ref UART_Word_Length */ + + uint32_t StopBits; /*!< Specifies the number of stop bits transmitted. + This parameter can be a value of @ref UART_Stop_Bits */ + + uint32_t Parity; /*!< Specifies the parity mode. + This parameter can be a value of @ref UART_Parity + @note When parity is enabled, the computed parity is inserted + at the MSB position of the transmitted data (9th bit when + the word length is set to 9 data bits; 8th bit when the + word length is set to 8 data bits). */ + + uint32_t Mode; /*!< Specifies whether the Receive or Transmit mode is enabled or disabled. + This parameter can be a value of @ref UART_Mode */ + + uint32_t HwFlowCtl; /*!< Specifies whether the hardware flow control mode is enabled or disabled. + This parameter can be a value of @ref UART_Hardware_Flow_Control */ + + uint32_t OverSampling; /*!< Specifies whether the Over sampling 8 is enabled or disabled, to achieve higher speed (up to fPCLK/8). + This parameter can be a value of @ref UART_Over_Sampling */ +} UART_InitTypeDef; + +/** + * @brief HAL UART State structures definition + * @note HAL UART State value is a combination of 2 different substates: gState and RxState. + * - gState contains UART state information related to global Handle management + * and also information related to Tx operations. + * gState value coding follow below described bitmap : + * b7-b6 Error information + * 00 : No Error + * 01 : (Not Used) + * 10 : Timeout + * 11 : Error + * b5 Peripheral initialization status + * 0 : Reset (Peripheral not initialized) + * 1 : Init done (Peripheral initialized. HAL UART Init function already called) + * b4-b3 (not used) + * xx : Should be set to 00 + * b2 Intrinsic process state + * 0 : Ready + * 1 : Busy (Peripheral busy with some configuration or internal operations) + * b1 (not used) + * x : Should be set to 0 + * b0 Tx state + * 0 : Ready (no Tx operation ongoing) + * 1 : Busy (Tx operation ongoing) + * - RxState contains information related to Rx operations. + * RxState value coding follow below described bitmap : + * b7-b6 (not used) + * xx : Should be set to 00 + * b5 Peripheral initialization status + * 0 : Reset (Peripheral not initialized) + * 1 : Init done (Peripheral initialized) + * b4-b2 (not used) + * xxx : Should be set to 000 + * b1 Rx state + * 0 : Ready (no Rx operation ongoing) + * 1 : Busy (Rx operation ongoing) + * b0 (not used) + * x : Should be set to 0. + */ +typedef enum +{ + HAL_UART_STATE_RESET = 0x00U, /*!< Peripheral is not yet Initialized + Value is allowed for gState and RxState */ + HAL_UART_STATE_READY = 0x20U, /*!< Peripheral Initialized and ready for use + Value is allowed for gState and RxState */ + HAL_UART_STATE_BUSY = 0x24U, /*!< an internal process is ongoing + Value is allowed for gState only */ + HAL_UART_STATE_BUSY_TX = 0x21U, /*!< Data Transmission process is ongoing + Value is allowed for gState only */ + HAL_UART_STATE_BUSY_RX = 0x22U, /*!< Data Reception process is ongoing + Value is allowed for RxState only */ + HAL_UART_STATE_BUSY_TX_RX = 0x23U, /*!< Data Transmission and Reception process is ongoing + Not to be used for neither gState nor RxState. + Value is result of combination (Or) between gState and RxState values */ + HAL_UART_STATE_TIMEOUT = 0xA0U, /*!< Timeout state + Value is allowed for gState only */ + HAL_UART_STATE_ERROR = 0xE0U /*!< Error + Value is allowed for gState only */ +} HAL_UART_StateTypeDef; + +/** + * @brief HAL UART Reception type definition + * @note HAL UART Reception type value aims to identify which type of Reception is ongoing. + * This parameter can be a value of @ref UART_Reception_Type_Values : + * HAL_UART_RECEPTION_STANDARD = 0x00U, + * HAL_UART_RECEPTION_TOIDLE = 0x01U, + */ +typedef uint32_t HAL_UART_RxTypeTypeDef; + +/** + * @brief HAL UART Rx Event type definition + * @note HAL UART Rx Event type value aims to identify which type of Event has occurred + * leading to call of the RxEvent callback. + * This parameter can be a value of @ref UART_RxEvent_Type_Values : + * HAL_UART_RXEVENT_TC = 0x00U, + * HAL_UART_RXEVENT_HT = 0x01U, + * HAL_UART_RXEVENT_IDLE = 0x02U, + */ +typedef uint32_t HAL_UART_RxEventTypeTypeDef; + +/** + * @brief UART handle Structure definition + */ +typedef struct __UART_HandleTypeDef +{ + USART_TypeDef *Instance; /*!< UART registers base address */ + + UART_InitTypeDef Init; /*!< UART communication parameters */ + + const uint8_t *pTxBuffPtr; /*!< Pointer to UART Tx transfer Buffer */ + + uint16_t TxXferSize; /*!< UART Tx Transfer size */ + + __IO uint16_t TxXferCount; /*!< UART Tx Transfer Counter */ + + uint8_t *pRxBuffPtr; /*!< Pointer to UART Rx transfer Buffer */ + + uint16_t RxXferSize; /*!< UART Rx Transfer size */ + + __IO uint16_t RxXferCount; /*!< UART Rx Transfer Counter */ + + __IO HAL_UART_RxTypeTypeDef ReceptionType; /*!< Type of ongoing reception */ + + __IO HAL_UART_RxEventTypeTypeDef RxEventType; /*!< Type of Rx Event */ + + DMA_HandleTypeDef *hdmatx; /*!< UART Tx DMA Handle parameters */ + + DMA_HandleTypeDef *hdmarx; /*!< UART Rx DMA Handle parameters */ + + HAL_LockTypeDef Lock; /*!< Locking object */ + + __IO HAL_UART_StateTypeDef gState; /*!< UART state information related to global Handle management + and also related to Tx operations. + This parameter can be a value of @ref HAL_UART_StateTypeDef */ + + __IO HAL_UART_StateTypeDef RxState; /*!< UART state information related to Rx operations. + This parameter can be a value of @ref HAL_UART_StateTypeDef */ + + __IO uint32_t ErrorCode; /*!< UART Error code */ + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + void (* TxHalfCpltCallback)(struct __UART_HandleTypeDef *huart); /*!< UART Tx Half Complete Callback */ + void (* TxCpltCallback)(struct __UART_HandleTypeDef *huart); /*!< UART Tx Complete Callback */ + void (* RxHalfCpltCallback)(struct __UART_HandleTypeDef *huart); /*!< UART Rx Half Complete Callback */ + void (* RxCpltCallback)(struct __UART_HandleTypeDef *huart); /*!< UART Rx Complete Callback */ + void (* ErrorCallback)(struct __UART_HandleTypeDef *huart); /*!< UART Error Callback */ + void (* AbortCpltCallback)(struct __UART_HandleTypeDef *huart); /*!< UART Abort Complete Callback */ + void (* AbortTransmitCpltCallback)(struct __UART_HandleTypeDef *huart); /*!< UART Abort Transmit Complete Callback */ + void (* AbortReceiveCpltCallback)(struct __UART_HandleTypeDef *huart); /*!< UART Abort Receive Complete Callback */ + void (* WakeupCallback)(struct __UART_HandleTypeDef *huart); /*!< UART Wakeup Callback */ + void (* RxEventCallback)(struct __UART_HandleTypeDef *huart, uint16_t Pos); /*!< UART Reception Event Callback */ + + void (* MspInitCallback)(struct __UART_HandleTypeDef *huart); /*!< UART Msp Init callback */ + void (* MspDeInitCallback)(struct __UART_HandleTypeDef *huart); /*!< UART Msp DeInit callback */ +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + +} UART_HandleTypeDef; + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) +/** + * @brief HAL UART Callback ID enumeration definition + */ +typedef enum +{ + HAL_UART_TX_HALFCOMPLETE_CB_ID = 0x00U, /*!< UART Tx Half Complete Callback ID */ + HAL_UART_TX_COMPLETE_CB_ID = 0x01U, /*!< UART Tx Complete Callback ID */ + HAL_UART_RX_HALFCOMPLETE_CB_ID = 0x02U, /*!< UART Rx Half Complete Callback ID */ + HAL_UART_RX_COMPLETE_CB_ID = 0x03U, /*!< UART Rx Complete Callback ID */ + HAL_UART_ERROR_CB_ID = 0x04U, /*!< UART Error Callback ID */ + HAL_UART_ABORT_COMPLETE_CB_ID = 0x05U, /*!< UART Abort Complete Callback ID */ + HAL_UART_ABORT_TRANSMIT_COMPLETE_CB_ID = 0x06U, /*!< UART Abort Transmit Complete Callback ID */ + HAL_UART_ABORT_RECEIVE_COMPLETE_CB_ID = 0x07U, /*!< UART Abort Receive Complete Callback ID */ + HAL_UART_WAKEUP_CB_ID = 0x08U, /*!< UART Wakeup Callback ID */ + + HAL_UART_MSPINIT_CB_ID = 0x0BU, /*!< UART MspInit callback ID */ + HAL_UART_MSPDEINIT_CB_ID = 0x0CU /*!< UART MspDeInit callback ID */ + +} HAL_UART_CallbackIDTypeDef; + +/** + * @brief HAL UART Callback pointer definition + */ +typedef void (*pUART_CallbackTypeDef)(UART_HandleTypeDef *huart); /*!< pointer to an UART callback function */ +typedef void (*pUART_RxEventCallbackTypeDef)(struct __UART_HandleTypeDef *huart, uint16_t Pos); /*!< pointer to a UART Rx Event specific callback function */ + +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup UART_Exported_Constants UART Exported Constants + * @{ + */ + +/** @defgroup UART_Error_Code UART Error Code + * @{ + */ +#define HAL_UART_ERROR_NONE 0x00000000U /*!< No error */ +#define HAL_UART_ERROR_PE 0x00000001U /*!< Parity error */ +#define HAL_UART_ERROR_NE 0x00000002U /*!< Noise error */ +#define HAL_UART_ERROR_FE 0x00000004U /*!< Frame error */ +#define HAL_UART_ERROR_ORE 0x00000008U /*!< Overrun error */ +#define HAL_UART_ERROR_DMA 0x00000010U /*!< DMA transfer error */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) +#define HAL_UART_ERROR_INVALID_CALLBACK 0x00000020U /*!< Invalid Callback error */ +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ +/** + * @} + */ + +/** @defgroup UART_Word_Length UART Word Length + * @{ + */ +#define UART_WORDLENGTH_8B 0x00000000U +#define UART_WORDLENGTH_9B ((uint32_t)USART_CR1_M) +/** + * @} + */ + +/** @defgroup UART_Stop_Bits UART Number of Stop Bits + * @{ + */ +#define UART_STOPBITS_1 0x00000000U +#define UART_STOPBITS_2 ((uint32_t)USART_CR2_STOP_1) +/** + * @} + */ + +/** @defgroup UART_Parity UART Parity + * @{ + */ +#define UART_PARITY_NONE 0x00000000U +#define UART_PARITY_EVEN ((uint32_t)USART_CR1_PCE) +#define UART_PARITY_ODD ((uint32_t)(USART_CR1_PCE | USART_CR1_PS)) +/** + * @} + */ + +/** @defgroup UART_Hardware_Flow_Control UART Hardware Flow Control + * @{ + */ +#define UART_HWCONTROL_NONE 0x00000000U +#define UART_HWCONTROL_RTS ((uint32_t)USART_CR3_RTSE) +#define UART_HWCONTROL_CTS ((uint32_t)USART_CR3_CTSE) +#define UART_HWCONTROL_RTS_CTS ((uint32_t)(USART_CR3_RTSE | USART_CR3_CTSE)) +/** + * @} + */ + +/** @defgroup UART_Mode UART Transfer Mode + * @{ + */ +#define UART_MODE_RX ((uint32_t)USART_CR1_RE) +#define UART_MODE_TX ((uint32_t)USART_CR1_TE) +#define UART_MODE_TX_RX ((uint32_t)(USART_CR1_TE | USART_CR1_RE)) +/** + * @} + */ + +/** @defgroup UART_State UART State + * @{ + */ +#define UART_STATE_DISABLE 0x00000000U +#define UART_STATE_ENABLE ((uint32_t)USART_CR1_UE) +/** + * @} + */ + +/** @defgroup UART_Over_Sampling UART Over Sampling + * @{ + */ +#define UART_OVERSAMPLING_16 0x00000000U +#define UART_OVERSAMPLING_8 ((uint32_t)USART_CR1_OVER8) +/** + * @} + */ + +/** @defgroup UART_LIN_Break_Detection_Length UART LIN Break Detection Length + * @{ + */ +#define UART_LINBREAKDETECTLENGTH_10B 0x00000000U +#define UART_LINBREAKDETECTLENGTH_11B ((uint32_t)USART_CR2_LBDL) +/** + * @} + */ + +/** @defgroup UART_WakeUp_functions UART Wakeup Functions + * @{ + */ +#define UART_WAKEUPMETHOD_IDLELINE 0x00000000U +#define UART_WAKEUPMETHOD_ADDRESSMARK ((uint32_t)USART_CR1_WAKE) +/** + * @} + */ + +/** @defgroup UART_Flags UART FLags + * Elements values convention: 0xXXXX + * - 0xXXXX : Flag mask in the SR register + * @{ + */ +#define UART_FLAG_CTS ((uint32_t)USART_SR_CTS) +#define UART_FLAG_LBD ((uint32_t)USART_SR_LBD) +#define UART_FLAG_TXE ((uint32_t)USART_SR_TXE) +#define UART_FLAG_TC ((uint32_t)USART_SR_TC) +#define UART_FLAG_RXNE ((uint32_t)USART_SR_RXNE) +#define UART_FLAG_IDLE ((uint32_t)USART_SR_IDLE) +#define UART_FLAG_ORE ((uint32_t)USART_SR_ORE) +#define UART_FLAG_NE ((uint32_t)USART_SR_NE) +#define UART_FLAG_FE ((uint32_t)USART_SR_FE) +#define UART_FLAG_PE ((uint32_t)USART_SR_PE) +/** + * @} + */ + +/** @defgroup UART_Interrupt_definition UART Interrupt Definitions + * Elements values convention: 0xY000XXXX + * - XXXX : Interrupt mask (16 bits) in the Y register + * - Y : Interrupt source register (2bits) + * - 0001: CR1 register + * - 0010: CR2 register + * - 0011: CR3 register + * @{ + */ + +#define UART_IT_PE ((uint32_t)(UART_CR1_REG_INDEX << 28U | USART_CR1_PEIE)) +#define UART_IT_TXE ((uint32_t)(UART_CR1_REG_INDEX << 28U | USART_CR1_TXEIE)) +#define UART_IT_TC ((uint32_t)(UART_CR1_REG_INDEX << 28U | USART_CR1_TCIE)) +#define UART_IT_RXNE ((uint32_t)(UART_CR1_REG_INDEX << 28U | USART_CR1_RXNEIE)) +#define UART_IT_IDLE ((uint32_t)(UART_CR1_REG_INDEX << 28U | USART_CR1_IDLEIE)) + +#define UART_IT_LBD ((uint32_t)(UART_CR2_REG_INDEX << 28U | USART_CR2_LBDIE)) + +#define UART_IT_CTS ((uint32_t)(UART_CR3_REG_INDEX << 28U | USART_CR3_CTSIE)) +#define UART_IT_ERR ((uint32_t)(UART_CR3_REG_INDEX << 28U | USART_CR3_EIE)) +/** + * @} + */ + +/** @defgroup UART_Reception_Type_Values UART Reception type values + * @{ + */ +#define HAL_UART_RECEPTION_STANDARD (0x00000000U) /*!< Standard reception */ +#define HAL_UART_RECEPTION_TOIDLE (0x00000001U) /*!< Reception till completion or IDLE event */ +/** + * @} + */ + +/** @defgroup UART_RxEvent_Type_Values UART RxEvent type values + * @{ + */ +#define HAL_UART_RXEVENT_TC (0x00000000U) /*!< RxEvent linked to Transfer Complete event */ +#define HAL_UART_RXEVENT_HT (0x00000001U) /*!< RxEvent linked to Half Transfer event */ +#define HAL_UART_RXEVENT_IDLE (0x00000002U) +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup UART_Exported_Macros UART Exported Macros + * @{ + */ + +/** @brief Reset UART handle gstate & RxState + * @param __HANDLE__ specifies the UART Handle. + * UART Handle selects the USARTx or UARTy peripheral + * (USART,UART availability and x,y values depending on device). + * @retval None + */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) +#define __HAL_UART_RESET_HANDLE_STATE(__HANDLE__) do{ \ + (__HANDLE__)->gState = HAL_UART_STATE_RESET; \ + (__HANDLE__)->RxState = HAL_UART_STATE_RESET; \ + (__HANDLE__)->MspInitCallback = NULL; \ + (__HANDLE__)->MspDeInitCallback = NULL; \ + } while(0U) +#else +#define __HAL_UART_RESET_HANDLE_STATE(__HANDLE__) do{ \ + (__HANDLE__)->gState = HAL_UART_STATE_RESET; \ + (__HANDLE__)->RxState = HAL_UART_STATE_RESET; \ + } while(0U) +#endif /*USE_HAL_UART_REGISTER_CALLBACKS */ + +/** @brief Flushes the UART DR register + * @param __HANDLE__ specifies the UART Handle. + * UART Handle selects the USARTx or UARTy peripheral + * (USART,UART availability and x,y values depending on device). + */ +#define __HAL_UART_FLUSH_DRREGISTER(__HANDLE__) ((__HANDLE__)->Instance->DR) + +/** @brief Checks whether the specified UART flag is set or not. + * @param __HANDLE__ specifies the UART Handle. + * UART Handle selects the USARTx or UARTy peripheral + * (USART,UART availability and x,y values depending on device). + * @param __FLAG__ specifies the flag to check. + * This parameter can be one of the following values: + * @arg UART_FLAG_CTS: CTS Change flag (not available for UART4 and UART5) + * @arg UART_FLAG_LBD: LIN Break detection flag + * @arg UART_FLAG_TXE: Transmit data register empty flag + * @arg UART_FLAG_TC: Transmission Complete flag + * @arg UART_FLAG_RXNE: Receive data register not empty flag + * @arg UART_FLAG_IDLE: Idle Line detection flag + * @arg UART_FLAG_ORE: Overrun Error flag + * @arg UART_FLAG_NE: Noise Error flag + * @arg UART_FLAG_FE: Framing Error flag + * @arg UART_FLAG_PE: Parity Error flag + * @retval The new state of __FLAG__ (TRUE or FALSE). + */ +#define __HAL_UART_GET_FLAG(__HANDLE__, __FLAG__) (((__HANDLE__)->Instance->SR & (__FLAG__)) == (__FLAG__)) + +/** @brief Clears the specified UART pending flag. + * @param __HANDLE__ specifies the UART Handle. + * UART Handle selects the USARTx or UARTy peripheral + * (USART,UART availability and x,y values depending on device). + * @param __FLAG__ specifies the flag to check. + * This parameter can be any combination of the following values: + * @arg UART_FLAG_CTS: CTS Change flag (not available for UART4 and UART5). + * @arg UART_FLAG_LBD: LIN Break detection flag. + * @arg UART_FLAG_TC: Transmission Complete flag. + * @arg UART_FLAG_RXNE: Receive data register not empty flag. + * + * @note PE (Parity error), FE (Framing error), NE (Noise error), ORE (Overrun + * error) and IDLE (Idle line detected) flags are cleared by software + * sequence: a read operation to USART_SR register followed by a read + * operation to USART_DR register. + * @note RXNE flag can be also cleared by a read to the USART_DR register. + * @note TC flag can be also cleared by software sequence: a read operation to + * USART_SR register followed by a write operation to USART_DR register. + * @note TXE flag is cleared only by a write to the USART_DR register. + * + * @retval None + */ +#define __HAL_UART_CLEAR_FLAG(__HANDLE__, __FLAG__) ((__HANDLE__)->Instance->SR = ~(__FLAG__)) + +/** @brief Clears the UART PE pending flag. + * @param __HANDLE__ specifies the UART Handle. + * UART Handle selects the USARTx or UARTy peripheral + * (USART,UART availability and x,y values depending on device). + * @retval None + */ +#define __HAL_UART_CLEAR_PEFLAG(__HANDLE__) \ + do{ \ + __IO uint32_t tmpreg = 0x00U; \ + tmpreg = (__HANDLE__)->Instance->SR; \ + tmpreg = (__HANDLE__)->Instance->DR; \ + UNUSED(tmpreg); \ + } while(0U) + +/** @brief Clears the UART FE pending flag. + * @param __HANDLE__ specifies the UART Handle. + * UART Handle selects the USARTx or UARTy peripheral + * (USART,UART availability and x,y values depending on device). + * @retval None + */ +#define __HAL_UART_CLEAR_FEFLAG(__HANDLE__) __HAL_UART_CLEAR_PEFLAG(__HANDLE__) + +/** @brief Clears the UART NE pending flag. + * @param __HANDLE__ specifies the UART Handle. + * UART Handle selects the USARTx or UARTy peripheral + * (USART,UART availability and x,y values depending on device). + * @retval None + */ +#define __HAL_UART_CLEAR_NEFLAG(__HANDLE__) __HAL_UART_CLEAR_PEFLAG(__HANDLE__) + +/** @brief Clears the UART ORE pending flag. + * @param __HANDLE__ specifies the UART Handle. + * UART Handle selects the USARTx or UARTy peripheral + * (USART,UART availability and x,y values depending on device). + * @retval None + */ +#define __HAL_UART_CLEAR_OREFLAG(__HANDLE__) __HAL_UART_CLEAR_PEFLAG(__HANDLE__) + +/** @brief Clears the UART IDLE pending flag. + * @param __HANDLE__ specifies the UART Handle. + * UART Handle selects the USARTx or UARTy peripheral + * (USART,UART availability and x,y values depending on device). + * @retval None + */ +#define __HAL_UART_CLEAR_IDLEFLAG(__HANDLE__) __HAL_UART_CLEAR_PEFLAG(__HANDLE__) + +/** @brief Enable the specified UART interrupt. + * @param __HANDLE__ specifies the UART Handle. + * UART Handle selects the USARTx or UARTy peripheral + * (USART,UART availability and x,y values depending on device). + * @param __INTERRUPT__ specifies the UART interrupt source to enable. + * This parameter can be one of the following values: + * @arg UART_IT_CTS: CTS change interrupt + * @arg UART_IT_LBD: LIN Break detection interrupt + * @arg UART_IT_TXE: Transmit Data Register empty interrupt + * @arg UART_IT_TC: Transmission complete interrupt + * @arg UART_IT_RXNE: Receive Data register not empty interrupt + * @arg UART_IT_IDLE: Idle line detection interrupt + * @arg UART_IT_PE: Parity Error interrupt + * @arg UART_IT_ERR: Error interrupt(Frame error, noise error, overrun error) + * @retval None + */ +#define __HAL_UART_ENABLE_IT(__HANDLE__, __INTERRUPT__) ((((__INTERRUPT__) >> 28U) == UART_CR1_REG_INDEX)? ((__HANDLE__)->Instance->CR1 |= ((__INTERRUPT__) & UART_IT_MASK)): \ + (((__INTERRUPT__) >> 28U) == UART_CR2_REG_INDEX)? ((__HANDLE__)->Instance->CR2 |= ((__INTERRUPT__) & UART_IT_MASK)): \ + ((__HANDLE__)->Instance->CR3 |= ((__INTERRUPT__) & UART_IT_MASK))) + +/** @brief Disable the specified UART interrupt. + * @param __HANDLE__ specifies the UART Handle. + * UART Handle selects the USARTx or UARTy peripheral + * (USART,UART availability and x,y values depending on device). + * @param __INTERRUPT__ specifies the UART interrupt source to disable. + * This parameter can be one of the following values: + * @arg UART_IT_CTS: CTS change interrupt + * @arg UART_IT_LBD: LIN Break detection interrupt + * @arg UART_IT_TXE: Transmit Data Register empty interrupt + * @arg UART_IT_TC: Transmission complete interrupt + * @arg UART_IT_RXNE: Receive Data register not empty interrupt + * @arg UART_IT_IDLE: Idle line detection interrupt + * @arg UART_IT_PE: Parity Error interrupt + * @arg UART_IT_ERR: Error interrupt(Frame error, noise error, overrun error) + * @retval None + */ +#define __HAL_UART_DISABLE_IT(__HANDLE__, __INTERRUPT__) ((((__INTERRUPT__) >> 28U) == UART_CR1_REG_INDEX)? ((__HANDLE__)->Instance->CR1 &= ~((__INTERRUPT__) & UART_IT_MASK)): \ + (((__INTERRUPT__) >> 28U) == UART_CR2_REG_INDEX)? ((__HANDLE__)->Instance->CR2 &= ~((__INTERRUPT__) & UART_IT_MASK)): \ + ((__HANDLE__)->Instance->CR3 &= ~ ((__INTERRUPT__) & UART_IT_MASK))) + +/** @brief Checks whether the specified UART interrupt source is enabled or not. + * @param __HANDLE__ specifies the UART Handle. + * UART Handle selects the USARTx or UARTy peripheral + * (USART,UART availability and x,y values depending on device). + * @param __IT__ specifies the UART interrupt source to check. + * This parameter can be one of the following values: + * @arg UART_IT_CTS: CTS change interrupt (not available for UART4 and UART5) + * @arg UART_IT_LBD: LIN Break detection interrupt + * @arg UART_IT_TXE: Transmit Data Register empty interrupt + * @arg UART_IT_TC: Transmission complete interrupt + * @arg UART_IT_RXNE: Receive Data register not empty interrupt + * @arg UART_IT_IDLE: Idle line detection interrupt + * @arg UART_IT_ERR: Error interrupt + * @retval The new state of __IT__ (TRUE or FALSE). + */ +#define __HAL_UART_GET_IT_SOURCE(__HANDLE__, __IT__) (((((__IT__) >> 28U) == UART_CR1_REG_INDEX)? (__HANDLE__)->Instance->CR1:(((((uint32_t)(__IT__)) >> 28U) == UART_CR2_REG_INDEX)? \ + (__HANDLE__)->Instance->CR2 : (__HANDLE__)->Instance->CR3)) & (((uint32_t)(__IT__)) & UART_IT_MASK)) + +/** @brief Enable CTS flow control + * @note This macro allows to enable CTS hardware flow control for a given UART instance, + * without need to call HAL_UART_Init() function. + * As involving direct access to UART registers, usage of this macro should be fully endorsed by user. + * @note As macro is expected to be used for modifying CTS Hw flow control feature activation, without need + * for USART instance Deinit/Init, following conditions for macro call should be fulfilled : + * - UART instance should have already been initialised (through call of HAL_UART_Init() ) + * - macro could only be called when corresponding UART instance is disabled (i.e __HAL_UART_DISABLE(__HANDLE__)) + * and should be followed by an Enable macro (i.e __HAL_UART_ENABLE(__HANDLE__)). + * @param __HANDLE__ specifies the UART Handle. + * The Handle Instance can be any USARTx (supporting the HW Flow control feature). + * It is used to select the USART peripheral (USART availability and x value depending on device). + * @retval None + */ +#define __HAL_UART_HWCONTROL_CTS_ENABLE(__HANDLE__) \ + do{ \ + ATOMIC_SET_BIT((__HANDLE__)->Instance->CR3, USART_CR3_CTSE); \ + (__HANDLE__)->Init.HwFlowCtl |= USART_CR3_CTSE; \ + } while(0U) + +/** @brief Disable CTS flow control + * @note This macro allows to disable CTS hardware flow control for a given UART instance, + * without need to call HAL_UART_Init() function. + * As involving direct access to UART registers, usage of this macro should be fully endorsed by user. + * @note As macro is expected to be used for modifying CTS Hw flow control feature activation, without need + * for USART instance Deinit/Init, following conditions for macro call should be fulfilled : + * - UART instance should have already been initialised (through call of HAL_UART_Init() ) + * - macro could only be called when corresponding UART instance is disabled (i.e __HAL_UART_DISABLE(__HANDLE__)) + * and should be followed by an Enable macro (i.e __HAL_UART_ENABLE(__HANDLE__)). + * @param __HANDLE__ specifies the UART Handle. + * The Handle Instance can be any USARTx (supporting the HW Flow control feature). + * It is used to select the USART peripheral (USART availability and x value depending on device). + * @retval None + */ +#define __HAL_UART_HWCONTROL_CTS_DISABLE(__HANDLE__) \ + do{ \ + ATOMIC_CLEAR_BIT((__HANDLE__)->Instance->CR3, USART_CR3_CTSE); \ + (__HANDLE__)->Init.HwFlowCtl &= ~(USART_CR3_CTSE); \ + } while(0U) + +/** @brief Enable RTS flow control + * This macro allows to enable RTS hardware flow control for a given UART instance, + * without need to call HAL_UART_Init() function. + * As involving direct access to UART registers, usage of this macro should be fully endorsed by user. + * @note As macro is expected to be used for modifying RTS Hw flow control feature activation, without need + * for USART instance Deinit/Init, following conditions for macro call should be fulfilled : + * - UART instance should have already been initialised (through call of HAL_UART_Init() ) + * - macro could only be called when corresponding UART instance is disabled (i.e __HAL_UART_DISABLE(__HANDLE__)) + * and should be followed by an Enable macro (i.e __HAL_UART_ENABLE(__HANDLE__)). + * @param __HANDLE__ specifies the UART Handle. + * The Handle Instance can be any USARTx (supporting the HW Flow control feature). + * It is used to select the USART peripheral (USART availability and x value depending on device). + * @retval None + */ +#define __HAL_UART_HWCONTROL_RTS_ENABLE(__HANDLE__) \ + do{ \ + ATOMIC_SET_BIT((__HANDLE__)->Instance->CR3, USART_CR3_RTSE); \ + (__HANDLE__)->Init.HwFlowCtl |= USART_CR3_RTSE; \ + } while(0U) + +/** @brief Disable RTS flow control + * This macro allows to disable RTS hardware flow control for a given UART instance, + * without need to call HAL_UART_Init() function. + * As involving direct access to UART registers, usage of this macro should be fully endorsed by user. + * @note As macro is expected to be used for modifying RTS Hw flow control feature activation, without need + * for USART instance Deinit/Init, following conditions for macro call should be fulfilled : + * - UART instance should have already been initialised (through call of HAL_UART_Init() ) + * - macro could only be called when corresponding UART instance is disabled (i.e __HAL_UART_DISABLE(__HANDLE__)) + * and should be followed by an Enable macro (i.e __HAL_UART_ENABLE(__HANDLE__)). + * @param __HANDLE__ specifies the UART Handle. + * The Handle Instance can be any USARTx (supporting the HW Flow control feature). + * It is used to select the USART peripheral (USART availability and x value depending on device). + * @retval None + */ +#define __HAL_UART_HWCONTROL_RTS_DISABLE(__HANDLE__) \ + do{ \ + ATOMIC_CLEAR_BIT((__HANDLE__)->Instance->CR3, USART_CR3_RTSE);\ + (__HANDLE__)->Init.HwFlowCtl &= ~(USART_CR3_RTSE); \ + } while(0U) + +/** @brief Macro to enable the UART's one bit sample method + * @param __HANDLE__ specifies the UART Handle. + * @retval None + */ +#define __HAL_UART_ONE_BIT_SAMPLE_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->CR3|= USART_CR3_ONEBIT) + +/** @brief Macro to disable the UART's one bit sample method + * @param __HANDLE__ specifies the UART Handle. + * @retval None + */ +#define __HAL_UART_ONE_BIT_SAMPLE_DISABLE(__HANDLE__) ((__HANDLE__)->Instance->CR3\ + &= (uint16_t)~((uint16_t)USART_CR3_ONEBIT)) + +/** @brief Enable UART + * @param __HANDLE__ specifies the UART Handle. + * @retval None + */ +#define __HAL_UART_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->CR1 |= USART_CR1_UE) + +/** @brief Disable UART + * @param __HANDLE__ specifies the UART Handle. + * @retval None + */ +#define __HAL_UART_DISABLE(__HANDLE__) ((__HANDLE__)->Instance->CR1 &= ~USART_CR1_UE) +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup UART_Exported_Functions + * @{ + */ + +/** @addtogroup UART_Exported_Functions_Group1 Initialization and de-initialization functions + * @{ + */ + +/* Initialization/de-initialization functions **********************************/ +HAL_StatusTypeDef HAL_UART_Init(UART_HandleTypeDef *huart); +HAL_StatusTypeDef HAL_HalfDuplex_Init(UART_HandleTypeDef *huart); +HAL_StatusTypeDef HAL_LIN_Init(UART_HandleTypeDef *huart, uint32_t BreakDetectLength); +HAL_StatusTypeDef HAL_MultiProcessor_Init(UART_HandleTypeDef *huart, uint8_t Address, uint32_t WakeUpMethod); +HAL_StatusTypeDef HAL_UART_DeInit(UART_HandleTypeDef *huart); +void HAL_UART_MspInit(UART_HandleTypeDef *huart); +void HAL_UART_MspDeInit(UART_HandleTypeDef *huart); + +/* Callbacks Register/UnRegister functions ***********************************/ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) +HAL_StatusTypeDef HAL_UART_RegisterCallback(UART_HandleTypeDef *huart, HAL_UART_CallbackIDTypeDef CallbackID, + pUART_CallbackTypeDef pCallback); +HAL_StatusTypeDef HAL_UART_UnRegisterCallback(UART_HandleTypeDef *huart, HAL_UART_CallbackIDTypeDef CallbackID); + +HAL_StatusTypeDef HAL_UART_RegisterRxEventCallback(UART_HandleTypeDef *huart, pUART_RxEventCallbackTypeDef pCallback); +HAL_StatusTypeDef HAL_UART_UnRegisterRxEventCallback(UART_HandleTypeDef *huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + +/** + * @} + */ + +/** @addtogroup UART_Exported_Functions_Group2 IO operation functions + * @{ + */ + +/* IO operation functions *******************************************************/ +HAL_StatusTypeDef HAL_UART_Transmit(UART_HandleTypeDef *huart, const uint8_t *pData, uint16_t Size, uint32_t Timeout); +HAL_StatusTypeDef HAL_UART_Receive(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size, uint32_t Timeout); +HAL_StatusTypeDef HAL_UART_Transmit_IT(UART_HandleTypeDef *huart, const uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef HAL_UART_Receive_IT(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef HAL_UART_Transmit_DMA(UART_HandleTypeDef *huart, const uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef HAL_UART_Receive_DMA(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef HAL_UART_DMAPause(UART_HandleTypeDef *huart); +HAL_StatusTypeDef HAL_UART_DMAResume(UART_HandleTypeDef *huart); +HAL_StatusTypeDef HAL_UART_DMAStop(UART_HandleTypeDef *huart); + +HAL_StatusTypeDef HAL_UARTEx_ReceiveToIdle(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size, uint16_t *RxLen, + uint32_t Timeout); +HAL_StatusTypeDef HAL_UARTEx_ReceiveToIdle_IT(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef HAL_UARTEx_ReceiveToIdle_DMA(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size); + +HAL_UART_RxEventTypeTypeDef HAL_UARTEx_GetRxEventType(UART_HandleTypeDef *huart); + +/* Transfer Abort functions */ +HAL_StatusTypeDef HAL_UART_Abort(UART_HandleTypeDef *huart); +HAL_StatusTypeDef HAL_UART_AbortTransmit(UART_HandleTypeDef *huart); +HAL_StatusTypeDef HAL_UART_AbortReceive(UART_HandleTypeDef *huart); +HAL_StatusTypeDef HAL_UART_Abort_IT(UART_HandleTypeDef *huart); +HAL_StatusTypeDef HAL_UART_AbortTransmit_IT(UART_HandleTypeDef *huart); +HAL_StatusTypeDef HAL_UART_AbortReceive_IT(UART_HandleTypeDef *huart); + +void HAL_UART_IRQHandler(UART_HandleTypeDef *huart); +void HAL_UART_TxCpltCallback(UART_HandleTypeDef *huart); +void HAL_UART_TxHalfCpltCallback(UART_HandleTypeDef *huart); +void HAL_UART_RxCpltCallback(UART_HandleTypeDef *huart); +void HAL_UART_RxHalfCpltCallback(UART_HandleTypeDef *huart); +void HAL_UART_ErrorCallback(UART_HandleTypeDef *huart); +void HAL_UART_AbortCpltCallback(UART_HandleTypeDef *huart); +void HAL_UART_AbortTransmitCpltCallback(UART_HandleTypeDef *huart); +void HAL_UART_AbortReceiveCpltCallback(UART_HandleTypeDef *huart); + +void HAL_UARTEx_RxEventCallback(UART_HandleTypeDef *huart, uint16_t Size); + +/** + * @} + */ + +/** @addtogroup UART_Exported_Functions_Group3 + * @{ + */ +/* Peripheral Control functions ************************************************/ +HAL_StatusTypeDef HAL_LIN_SendBreak(UART_HandleTypeDef *huart); +HAL_StatusTypeDef HAL_MultiProcessor_EnterMuteMode(UART_HandleTypeDef *huart); +HAL_StatusTypeDef HAL_MultiProcessor_ExitMuteMode(UART_HandleTypeDef *huart); +HAL_StatusTypeDef HAL_HalfDuplex_EnableTransmitter(UART_HandleTypeDef *huart); +HAL_StatusTypeDef HAL_HalfDuplex_EnableReceiver(UART_HandleTypeDef *huart); +/** + * @} + */ + +/** @addtogroup UART_Exported_Functions_Group4 + * @{ + */ +/* Peripheral State functions **************************************************/ +HAL_UART_StateTypeDef HAL_UART_GetState(const UART_HandleTypeDef *huart); +uint32_t HAL_UART_GetError(const UART_HandleTypeDef *huart); +/** + * @} + */ + +/** + * @} + */ +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/** @defgroup UART_Private_Constants UART Private Constants + * @{ + */ +/** @brief UART interruptions flag mask + * + */ +#define UART_IT_MASK 0x0000FFFFU + +#define UART_CR1_REG_INDEX 1U +#define UART_CR2_REG_INDEX 2U +#define UART_CR3_REG_INDEX 3U +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup UART_Private_Macros UART Private Macros + * @{ + */ +#define IS_UART_WORD_LENGTH(LENGTH) (((LENGTH) == UART_WORDLENGTH_8B) || \ + ((LENGTH) == UART_WORDLENGTH_9B)) +#define IS_UART_LIN_WORD_LENGTH(LENGTH) (((LENGTH) == UART_WORDLENGTH_8B)) +#define IS_UART_STOPBITS(STOPBITS) (((STOPBITS) == UART_STOPBITS_1) || \ + ((STOPBITS) == UART_STOPBITS_2)) +#define IS_UART_PARITY(PARITY) (((PARITY) == UART_PARITY_NONE) || \ + ((PARITY) == UART_PARITY_EVEN) || \ + ((PARITY) == UART_PARITY_ODD)) +#define IS_UART_HARDWARE_FLOW_CONTROL(CONTROL)\ + (((CONTROL) == UART_HWCONTROL_NONE) || \ + ((CONTROL) == UART_HWCONTROL_RTS) || \ + ((CONTROL) == UART_HWCONTROL_CTS) || \ + ((CONTROL) == UART_HWCONTROL_RTS_CTS)) +#define IS_UART_MODE(MODE) ((((MODE) & 0x0000FFF3U) == 0x00U) && ((MODE) != 0x00U)) +#define IS_UART_STATE(STATE) (((STATE) == UART_STATE_DISABLE) || \ + ((STATE) == UART_STATE_ENABLE)) +#define IS_UART_OVERSAMPLING(SAMPLING) (((SAMPLING) == UART_OVERSAMPLING_16) || \ + ((SAMPLING) == UART_OVERSAMPLING_8)) +#define IS_UART_LIN_OVERSAMPLING(SAMPLING) (((SAMPLING) == UART_OVERSAMPLING_16)) +#define IS_UART_LIN_BREAK_DETECT_LENGTH(LENGTH) (((LENGTH) == UART_LINBREAKDETECTLENGTH_10B) || \ + ((LENGTH) == UART_LINBREAKDETECTLENGTH_11B)) +#define IS_UART_WAKEUPMETHOD(WAKEUP) (((WAKEUP) == UART_WAKEUPMETHOD_IDLELINE) || \ + ((WAKEUP) == UART_WAKEUPMETHOD_ADDRESSMARK)) +#define IS_UART_BAUDRATE(BAUDRATE) ((BAUDRATE) <= 10500000U) +#define IS_UART_ADDRESS(ADDRESS) ((ADDRESS) <= 0x0FU) + +#define UART_DIV_SAMPLING16(_PCLK_, _BAUD_) ((uint32_t)((((uint64_t)(_PCLK_))*25U)/(4U*((uint64_t)(_BAUD_))))) +#define UART_DIVMANT_SAMPLING16(_PCLK_, _BAUD_) (UART_DIV_SAMPLING16((_PCLK_), (_BAUD_))/100U) +#define UART_DIVFRAQ_SAMPLING16(_PCLK_, _BAUD_) ((((UART_DIV_SAMPLING16((_PCLK_), (_BAUD_)) - (UART_DIVMANT_SAMPLING16((_PCLK_), (_BAUD_)) * 100U)) * 16U)\ + + 50U) / 100U) +/* UART BRR = mantissa + overflow + fraction + = (UART DIVMANT << 4) + (UART DIVFRAQ & 0xF0) + (UART DIVFRAQ & 0x0FU) */ +#define UART_BRR_SAMPLING16(_PCLK_, _BAUD_) ((UART_DIVMANT_SAMPLING16((_PCLK_), (_BAUD_)) << 4U) + \ + (UART_DIVFRAQ_SAMPLING16((_PCLK_), (_BAUD_)) & 0xF0U) + \ + (UART_DIVFRAQ_SAMPLING16((_PCLK_), (_BAUD_)) & 0x0FU)) + +#define UART_DIV_SAMPLING8(_PCLK_, _BAUD_) ((uint32_t)((((uint64_t)(_PCLK_))*25U)/(2U*((uint64_t)(_BAUD_))))) +#define UART_DIVMANT_SAMPLING8(_PCLK_, _BAUD_) (UART_DIV_SAMPLING8((_PCLK_), (_BAUD_))/100U) +#define UART_DIVFRAQ_SAMPLING8(_PCLK_, _BAUD_) ((((UART_DIV_SAMPLING8((_PCLK_), (_BAUD_)) - (UART_DIVMANT_SAMPLING8((_PCLK_), (_BAUD_)) * 100U)) * 8U)\ + + 50U) / 100U) +/* UART BRR = mantissa + overflow + fraction + = (UART DIVMANT << 4) + ((UART DIVFRAQ & 0xF8) << 1) + (UART DIVFRAQ & 0x07U) */ +#define UART_BRR_SAMPLING8(_PCLK_, _BAUD_) ((UART_DIVMANT_SAMPLING8((_PCLK_), (_BAUD_)) << 4U) + \ + ((UART_DIVFRAQ_SAMPLING8((_PCLK_), (_BAUD_)) & 0xF8U) << 1U) + \ + (UART_DIVFRAQ_SAMPLING8((_PCLK_), (_BAUD_)) & 0x07U)) + +/** + * @} + */ + +/* Private functions ---------------------------------------------------------*/ +/** @defgroup UART_Private_Functions UART Private Functions + * @{ + */ + +HAL_StatusTypeDef UART_Start_Receive_IT(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef UART_Start_Receive_DMA(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size); + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F4xx_HAL_UART_H */ + diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_bus.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_bus.h new file mode 100644 index 0000000..ce19d4d --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_bus.h @@ -0,0 +1,2105 @@ +/** + ****************************************************************************** + * @file stm32f4xx_ll_bus.h + * @author MCD Application Team + * @brief Header file of BUS LL module. + + @verbatim + ##### RCC Limitations ##### + ============================================================================== + [..] + A delay between an RCC peripheral clock enable and the effective peripheral + enabling should be taken into account in order to manage the peripheral read/write + from/to registers. + (+) This delay depends on the peripheral mapping. + (++) AHB & APB peripherals, 1 dummy read is necessary + + [..] + Workarounds: + (#) For AHB & APB peripherals, a dummy read to the peripheral register has been + inserted in each LL_{BUS}_GRP{x}_EnableClock() function. + + @endverbatim + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F4xx_LL_BUS_H +#define __STM32F4xx_LL_BUS_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx.h" + +/** @addtogroup STM32F4xx_LL_Driver + * @{ + */ + +#if defined(RCC) + +/** @defgroup BUS_LL BUS + * @{ + */ + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/* Private macros ------------------------------------------------------------*/ +/* Exported types ------------------------------------------------------------*/ +/* Exported constants --------------------------------------------------------*/ +/** @defgroup BUS_LL_Exported_Constants BUS Exported Constants + * @{ + */ + +/** @defgroup BUS_LL_EC_AHB1_GRP1_PERIPH AHB1 GRP1 PERIPH + * @{ + */ +#define LL_AHB1_GRP1_PERIPH_ALL 0xFFFFFFFFU +#define LL_AHB1_GRP1_PERIPH_GPIOA RCC_AHB1ENR_GPIOAEN +#define LL_AHB1_GRP1_PERIPH_GPIOB RCC_AHB1ENR_GPIOBEN +#define LL_AHB1_GRP1_PERIPH_GPIOC RCC_AHB1ENR_GPIOCEN +#if defined(GPIOD) +#define LL_AHB1_GRP1_PERIPH_GPIOD RCC_AHB1ENR_GPIODEN +#endif /* GPIOD */ +#if defined(GPIOE) +#define LL_AHB1_GRP1_PERIPH_GPIOE RCC_AHB1ENR_GPIOEEN +#endif /* GPIOE */ +#if defined(GPIOF) +#define LL_AHB1_GRP1_PERIPH_GPIOF RCC_AHB1ENR_GPIOFEN +#endif /* GPIOF */ +#if defined(GPIOG) +#define LL_AHB1_GRP1_PERIPH_GPIOG RCC_AHB1ENR_GPIOGEN +#endif /* GPIOG */ +#if defined(GPIOH) +#define LL_AHB1_GRP1_PERIPH_GPIOH RCC_AHB1ENR_GPIOHEN +#endif /* GPIOH */ +#if defined(GPIOI) +#define LL_AHB1_GRP1_PERIPH_GPIOI RCC_AHB1ENR_GPIOIEN +#endif /* GPIOI */ +#if defined(GPIOJ) +#define LL_AHB1_GRP1_PERIPH_GPIOJ RCC_AHB1ENR_GPIOJEN +#endif /* GPIOJ */ +#if defined(GPIOK) +#define LL_AHB1_GRP1_PERIPH_GPIOK RCC_AHB1ENR_GPIOKEN +#endif /* GPIOK */ +#define LL_AHB1_GRP1_PERIPH_CRC RCC_AHB1ENR_CRCEN +#if defined(RCC_AHB1ENR_BKPSRAMEN) +#define LL_AHB1_GRP1_PERIPH_BKPSRAM RCC_AHB1ENR_BKPSRAMEN +#endif /* RCC_AHB1ENR_BKPSRAMEN */ +#if defined(RCC_AHB1ENR_CCMDATARAMEN) +#define LL_AHB1_GRP1_PERIPH_CCMDATARAM RCC_AHB1ENR_CCMDATARAMEN +#endif /* RCC_AHB1ENR_CCMDATARAMEN */ +#define LL_AHB1_GRP1_PERIPH_DMA1 RCC_AHB1ENR_DMA1EN +#define LL_AHB1_GRP1_PERIPH_DMA2 RCC_AHB1ENR_DMA2EN +#if defined(RCC_AHB1ENR_RNGEN) +#define LL_AHB1_GRP1_PERIPH_RNG RCC_AHB1ENR_RNGEN +#endif /* RCC_AHB1ENR_RNGEN */ +#if defined(DMA2D) +#define LL_AHB1_GRP1_PERIPH_DMA2D RCC_AHB1ENR_DMA2DEN +#endif /* DMA2D */ +#if defined(ETH) +#define LL_AHB1_GRP1_PERIPH_ETHMAC RCC_AHB1ENR_ETHMACEN +#define LL_AHB1_GRP1_PERIPH_ETHMACTX RCC_AHB1ENR_ETHMACTXEN +#define LL_AHB1_GRP1_PERIPH_ETHMACRX RCC_AHB1ENR_ETHMACRXEN +#define LL_AHB1_GRP1_PERIPH_ETHMACPTP RCC_AHB1ENR_ETHMACPTPEN +#endif /* ETH */ +#if defined(USB_OTG_HS) +#define LL_AHB1_GRP1_PERIPH_OTGHS RCC_AHB1ENR_OTGHSEN +#define LL_AHB1_GRP1_PERIPH_OTGHSULPI RCC_AHB1ENR_OTGHSULPIEN +#endif /* USB_OTG_HS */ +#define LL_AHB1_GRP1_PERIPH_FLITF RCC_AHB1LPENR_FLITFLPEN +#define LL_AHB1_GRP1_PERIPH_SRAM1 RCC_AHB1LPENR_SRAM1LPEN +#if defined(RCC_AHB1LPENR_SRAM2LPEN) +#define LL_AHB1_GRP1_PERIPH_SRAM2 RCC_AHB1LPENR_SRAM2LPEN +#endif /* RCC_AHB1LPENR_SRAM2LPEN */ +#if defined(RCC_AHB1LPENR_SRAM3LPEN) +#define LL_AHB1_GRP1_PERIPH_SRAM3 RCC_AHB1LPENR_SRAM3LPEN +#endif /* RCC_AHB1LPENR_SRAM3LPEN */ +/** + * @} + */ + +#if defined(RCC_AHB2_SUPPORT) +/** @defgroup BUS_LL_EC_AHB2_GRP1_PERIPH AHB2 GRP1 PERIPH + * @{ + */ +#define LL_AHB2_GRP1_PERIPH_ALL 0xFFFFFFFFU +#if defined(DCMI) +#define LL_AHB2_GRP1_PERIPH_DCMI RCC_AHB2ENR_DCMIEN +#endif /* DCMI */ +#if defined(CRYP) +#define LL_AHB2_GRP1_PERIPH_CRYP RCC_AHB2ENR_CRYPEN +#endif /* CRYP */ +#if defined(AES) +#define LL_AHB2_GRP1_PERIPH_AES RCC_AHB2ENR_AESEN +#endif /* AES */ +#if defined(HASH) +#define LL_AHB2_GRP1_PERIPH_HASH RCC_AHB2ENR_HASHEN +#endif /* HASH */ +#if defined(RCC_AHB2ENR_RNGEN) +#define LL_AHB2_GRP1_PERIPH_RNG RCC_AHB2ENR_RNGEN +#endif /* RCC_AHB2ENR_RNGEN */ +#if defined(USB_OTG_FS) +#define LL_AHB2_GRP1_PERIPH_OTGFS RCC_AHB2ENR_OTGFSEN +#endif /* USB_OTG_FS */ +/** + * @} + */ +#endif /* RCC_AHB2_SUPPORT */ + +#if defined(RCC_AHB3_SUPPORT) +/** @defgroup BUS_LL_EC_AHB3_GRP1_PERIPH AHB3 GRP1 PERIPH + * @{ + */ +#define LL_AHB3_GRP1_PERIPH_ALL 0xFFFFFFFFU +#if defined(FSMC_Bank1) +#define LL_AHB3_GRP1_PERIPH_FSMC RCC_AHB3ENR_FSMCEN +#endif /* FSMC_Bank1 */ +#if defined(FMC_Bank1) +#define LL_AHB3_GRP1_PERIPH_FMC RCC_AHB3ENR_FMCEN +#endif /* FMC_Bank1 */ +#if defined(QUADSPI) +#define LL_AHB3_GRP1_PERIPH_QSPI RCC_AHB3ENR_QSPIEN +#endif /* QUADSPI */ +/** + * @} + */ +#endif /* RCC_AHB3_SUPPORT */ + +/** @defgroup BUS_LL_EC_APB1_GRP1_PERIPH APB1 GRP1 PERIPH + * @{ + */ +#define LL_APB1_GRP1_PERIPH_ALL 0xFFFFFFFFU +#if defined(TIM2) +#define LL_APB1_GRP1_PERIPH_TIM2 RCC_APB1ENR_TIM2EN +#endif /* TIM2 */ +#if defined(TIM3) +#define LL_APB1_GRP1_PERIPH_TIM3 RCC_APB1ENR_TIM3EN +#endif /* TIM3 */ +#if defined(TIM4) +#define LL_APB1_GRP1_PERIPH_TIM4 RCC_APB1ENR_TIM4EN +#endif /* TIM4 */ +#define LL_APB1_GRP1_PERIPH_TIM5 RCC_APB1ENR_TIM5EN +#if defined(TIM6) +#define LL_APB1_GRP1_PERIPH_TIM6 RCC_APB1ENR_TIM6EN +#endif /* TIM6 */ +#if defined(TIM7) +#define LL_APB1_GRP1_PERIPH_TIM7 RCC_APB1ENR_TIM7EN +#endif /* TIM7 */ +#if defined(TIM12) +#define LL_APB1_GRP1_PERIPH_TIM12 RCC_APB1ENR_TIM12EN +#endif /* TIM12 */ +#if defined(TIM13) +#define LL_APB1_GRP1_PERIPH_TIM13 RCC_APB1ENR_TIM13EN +#endif /* TIM13 */ +#if defined(TIM14) +#define LL_APB1_GRP1_PERIPH_TIM14 RCC_APB1ENR_TIM14EN +#endif /* TIM14 */ +#if defined(LPTIM1) +#define LL_APB1_GRP1_PERIPH_LPTIM1 RCC_APB1ENR_LPTIM1EN +#endif /* LPTIM1 */ +#if defined(RCC_APB1ENR_RTCAPBEN) +#define LL_APB1_GRP1_PERIPH_RTCAPB RCC_APB1ENR_RTCAPBEN +#endif /* RCC_APB1ENR_RTCAPBEN */ +#define LL_APB1_GRP1_PERIPH_WWDG RCC_APB1ENR_WWDGEN +#if defined(SPI2) +#define LL_APB1_GRP1_PERIPH_SPI2 RCC_APB1ENR_SPI2EN +#endif /* SPI2 */ +#if defined(SPI3) +#define LL_APB1_GRP1_PERIPH_SPI3 RCC_APB1ENR_SPI3EN +#endif /* SPI3 */ +#if defined(SPDIFRX) +#define LL_APB1_GRP1_PERIPH_SPDIFRX RCC_APB1ENR_SPDIFRXEN +#endif /* SPDIFRX */ +#define LL_APB1_GRP1_PERIPH_USART2 RCC_APB1ENR_USART2EN +#if defined(USART3) +#define LL_APB1_GRP1_PERIPH_USART3 RCC_APB1ENR_USART3EN +#endif /* USART3 */ +#if defined(UART4) +#define LL_APB1_GRP1_PERIPH_UART4 RCC_APB1ENR_UART4EN +#endif /* UART4 */ +#if defined(UART5) +#define LL_APB1_GRP1_PERIPH_UART5 RCC_APB1ENR_UART5EN +#endif /* UART5 */ +#define LL_APB1_GRP1_PERIPH_I2C1 RCC_APB1ENR_I2C1EN +#define LL_APB1_GRP1_PERIPH_I2C2 RCC_APB1ENR_I2C2EN +#if defined(I2C3) +#define LL_APB1_GRP1_PERIPH_I2C3 RCC_APB1ENR_I2C3EN +#endif /* I2C3 */ +#if defined(FMPI2C1) +#define LL_APB1_GRP1_PERIPH_FMPI2C1 RCC_APB1ENR_FMPI2C1EN +#endif /* FMPI2C1 */ +#if defined(CAN1) +#define LL_APB1_GRP1_PERIPH_CAN1 RCC_APB1ENR_CAN1EN +#endif /* CAN1 */ +#if defined(CAN2) +#define LL_APB1_GRP1_PERIPH_CAN2 RCC_APB1ENR_CAN2EN +#endif /* CAN2 */ +#if defined(CAN3) +#define LL_APB1_GRP1_PERIPH_CAN3 RCC_APB1ENR_CAN3EN +#endif /* CAN3 */ +#if defined(CEC) +#define LL_APB1_GRP1_PERIPH_CEC RCC_APB1ENR_CECEN +#endif /* CEC */ +#define LL_APB1_GRP1_PERIPH_PWR RCC_APB1ENR_PWREN +#if defined(DAC1) +#define LL_APB1_GRP1_PERIPH_DAC1 RCC_APB1ENR_DACEN +#endif /* DAC1 */ +#if defined(UART7) +#define LL_APB1_GRP1_PERIPH_UART7 RCC_APB1ENR_UART7EN +#endif /* UART7 */ +#if defined(UART8) +#define LL_APB1_GRP1_PERIPH_UART8 RCC_APB1ENR_UART8EN +#endif /* UART8 */ +/** + * @} + */ + +/** @defgroup BUS_LL_EC_APB2_GRP1_PERIPH APB2 GRP1 PERIPH + * @{ + */ +#define LL_APB2_GRP1_PERIPH_ALL 0xFFFFFFFFU +#define LL_APB2_GRP1_PERIPH_TIM1 RCC_APB2ENR_TIM1EN +#if defined(TIM8) +#define LL_APB2_GRP1_PERIPH_TIM8 RCC_APB2ENR_TIM8EN +#endif /* TIM8 */ +#define LL_APB2_GRP1_PERIPH_USART1 RCC_APB2ENR_USART1EN +#if defined(USART6) +#define LL_APB2_GRP1_PERIPH_USART6 RCC_APB2ENR_USART6EN +#endif /* USART6 */ +#if defined(UART9) +#define LL_APB2_GRP1_PERIPH_UART9 RCC_APB2ENR_UART9EN +#endif /* UART9 */ +#if defined(UART10) +#define LL_APB2_GRP1_PERIPH_UART10 RCC_APB2ENR_UART10EN +#endif /* UART10 */ +#define LL_APB2_GRP1_PERIPH_ADC1 RCC_APB2ENR_ADC1EN +#if defined(ADC2) +#define LL_APB2_GRP1_PERIPH_ADC2 RCC_APB2ENR_ADC2EN +#endif /* ADC2 */ +#if defined(ADC3) +#define LL_APB2_GRP1_PERIPH_ADC3 RCC_APB2ENR_ADC3EN +#endif /* ADC3 */ +#if defined(SDIO) +#define LL_APB2_GRP1_PERIPH_SDIO RCC_APB2ENR_SDIOEN +#endif /* SDIO */ +#define LL_APB2_GRP1_PERIPH_SPI1 RCC_APB2ENR_SPI1EN +#if defined(SPI4) +#define LL_APB2_GRP1_PERIPH_SPI4 RCC_APB2ENR_SPI4EN +#endif /* SPI4 */ +#define LL_APB2_GRP1_PERIPH_SYSCFG RCC_APB2ENR_SYSCFGEN +#if defined(RCC_APB2ENR_EXTITEN) +#define LL_APB2_GRP1_PERIPH_EXTI RCC_APB2ENR_EXTITEN +#endif /* RCC_APB2ENR_EXTITEN */ +#define LL_APB2_GRP1_PERIPH_TIM9 RCC_APB2ENR_TIM9EN +#if defined(TIM10) +#define LL_APB2_GRP1_PERIPH_TIM10 RCC_APB2ENR_TIM10EN +#endif /* TIM10 */ +#define LL_APB2_GRP1_PERIPH_TIM11 RCC_APB2ENR_TIM11EN +#if defined(SPI5) +#define LL_APB2_GRP1_PERIPH_SPI5 RCC_APB2ENR_SPI5EN +#endif /* SPI5 */ +#if defined(SPI6) +#define LL_APB2_GRP1_PERIPH_SPI6 RCC_APB2ENR_SPI6EN +#endif /* SPI6 */ +#if defined(SAI1) +#define LL_APB2_GRP1_PERIPH_SAI1 RCC_APB2ENR_SAI1EN +#endif /* SAI1 */ +#if defined(SAI2) +#define LL_APB2_GRP1_PERIPH_SAI2 RCC_APB2ENR_SAI2EN +#endif /* SAI2 */ +#if defined(LTDC) +#define LL_APB2_GRP1_PERIPH_LTDC RCC_APB2ENR_LTDCEN +#endif /* LTDC */ +#if defined(DSI) +#define LL_APB2_GRP1_PERIPH_DSI RCC_APB2ENR_DSIEN +#endif /* DSI */ +#if defined(DFSDM1_Channel0) +#define LL_APB2_GRP1_PERIPH_DFSDM1 RCC_APB2ENR_DFSDM1EN +#endif /* DFSDM1_Channel0 */ +#if defined(DFSDM2_Channel0) +#define LL_APB2_GRP1_PERIPH_DFSDM2 RCC_APB2ENR_DFSDM2EN +#endif /* DFSDM2_Channel0 */ +#define LL_APB2_GRP1_PERIPH_ADC RCC_APB2RSTR_ADCRST +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/* Exported functions --------------------------------------------------------*/ +/** @defgroup BUS_LL_Exported_Functions BUS Exported Functions + * @{ + */ + +/** @defgroup BUS_LL_EF_AHB1 AHB1 + * @{ + */ + +/** + * @brief Enable AHB1 peripherals clock. + * @rmtoll AHB1ENR GPIOAEN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR GPIOBEN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR GPIOCEN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR GPIODEN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR GPIOEEN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR GPIOFEN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR GPIOGEN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR GPIOHEN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR GPIOIEN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR GPIOJEN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR GPIOKEN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR CRCEN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR BKPSRAMEN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR CCMDATARAMEN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR DMA1EN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR DMA2EN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR RNGEN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR DMA2DEN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR ETHMACEN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR ETHMACTXEN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR ETHMACRXEN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR ETHMACPTPEN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR OTGHSEN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR OTGHSULPIEN LL_AHB1_GRP1_EnableClock + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOA + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOB + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOC + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOD (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOE (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOF (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOG (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOH (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOI (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOJ (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOK (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_CRC + * @arg @ref LL_AHB1_GRP1_PERIPH_BKPSRAM (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_CCMDATARAM (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_DMA1 + * @arg @ref LL_AHB1_GRP1_PERIPH_DMA2 + * @arg @ref LL_AHB1_GRP1_PERIPH_RNG (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_DMA2D (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_ETHMAC (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_ETHMACTX (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_ETHMACRX (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_ETHMACPTP (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_OTGHS (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_OTGHSULPI (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_AHB1_GRP1_EnableClock(uint32_t Periphs) +{ + __IO uint32_t tmpreg; + SET_BIT(RCC->AHB1ENR, Periphs); + /* Delay after an RCC peripheral clock enabling */ + tmpreg = READ_BIT(RCC->AHB1ENR, Periphs); + (void)tmpreg; +} + +/** + * @brief Check if AHB1 peripheral clock is enabled or not + * @rmtoll AHB1ENR GPIOAEN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR GPIOBEN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR GPIOCEN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR GPIODEN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR GPIOEEN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR GPIOFEN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR GPIOGEN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR GPIOHEN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR GPIOIEN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR GPIOJEN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR GPIOKEN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR CRCEN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR BKPSRAMEN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR CCMDATARAMEN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR DMA1EN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR DMA2EN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR RNGEN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR DMA2DEN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR ETHMACEN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR ETHMACTXEN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR ETHMACRXEN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR ETHMACPTPEN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR OTGHSEN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR OTGHSULPIEN LL_AHB1_GRP1_IsEnabledClock + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOA + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOB + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOC + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOD (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOE (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOF (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOG (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOH (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOI (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOJ (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOK (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_CRC + * @arg @ref LL_AHB1_GRP1_PERIPH_BKPSRAM (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_CCMDATARAM (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_DMA1 + * @arg @ref LL_AHB1_GRP1_PERIPH_DMA2 + * @arg @ref LL_AHB1_GRP1_PERIPH_RNG (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_DMA2D (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_ETHMAC (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_ETHMACTX (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_ETHMACRX (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_ETHMACPTP (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_OTGHS (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_OTGHSULPI (*) + * + * (*) value not defined in all devices. + * @retval State of Periphs (1 or 0). + */ +__STATIC_INLINE uint32_t LL_AHB1_GRP1_IsEnabledClock(uint32_t Periphs) +{ + return (READ_BIT(RCC->AHB1ENR, Periphs) == Periphs); +} + +/** + * @brief Disable AHB1 peripherals clock. + * @rmtoll AHB1ENR GPIOAEN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR GPIOBEN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR GPIOCEN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR GPIODEN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR GPIOEEN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR GPIOFEN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR GPIOGEN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR GPIOHEN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR GPIOIEN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR GPIOJEN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR GPIOKEN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR CRCEN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR BKPSRAMEN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR CCMDATARAMEN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR DMA1EN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR DMA2EN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR RNGEN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR DMA2DEN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR ETHMACEN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR ETHMACTXEN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR ETHMACRXEN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR ETHMACPTPEN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR OTGHSEN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR OTGHSULPIEN LL_AHB1_GRP1_DisableClock + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOA + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOB + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOC + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOD (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOE (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOF (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOG (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOH (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOI (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOJ (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOK (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_CRC + * @arg @ref LL_AHB1_GRP1_PERIPH_BKPSRAM (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_CCMDATARAM (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_DMA1 + * @arg @ref LL_AHB1_GRP1_PERIPH_DMA2 + * @arg @ref LL_AHB1_GRP1_PERIPH_RNG (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_DMA2D (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_ETHMAC (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_ETHMACTX (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_ETHMACRX (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_ETHMACPTP (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_OTGHS (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_OTGHSULPI (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_AHB1_GRP1_DisableClock(uint32_t Periphs) +{ + CLEAR_BIT(RCC->AHB1ENR, Periphs); +} + +/** + * @brief Force AHB1 peripherals reset. + * @rmtoll AHB1RSTR GPIOARST LL_AHB1_GRP1_ForceReset\n + * AHB1RSTR GPIOBRST LL_AHB1_GRP1_ForceReset\n + * AHB1RSTR GPIOCRST LL_AHB1_GRP1_ForceReset\n + * AHB1RSTR GPIODRST LL_AHB1_GRP1_ForceReset\n + * AHB1RSTR GPIOERST LL_AHB1_GRP1_ForceReset\n + * AHB1RSTR GPIOFRST LL_AHB1_GRP1_ForceReset\n + * AHB1RSTR GPIOGRST LL_AHB1_GRP1_ForceReset\n + * AHB1RSTR GPIOHRST LL_AHB1_GRP1_ForceReset\n + * AHB1RSTR GPIOIRST LL_AHB1_GRP1_ForceReset\n + * AHB1RSTR GPIOJRST LL_AHB1_GRP1_ForceReset\n + * AHB1RSTR GPIOKRST LL_AHB1_GRP1_ForceReset\n + * AHB1RSTR CRCRST LL_AHB1_GRP1_ForceReset\n + * AHB1RSTR DMA1RST LL_AHB1_GRP1_ForceReset\n + * AHB1RSTR DMA2RST LL_AHB1_GRP1_ForceReset\n + * AHB1RSTR RNGRST LL_AHB1_GRP1_ForceReset\n + * AHB1RSTR DMA2DRST LL_AHB1_GRP1_ForceReset\n + * AHB1RSTR ETHMACRST LL_AHB1_GRP1_ForceReset\n + * AHB1RSTR OTGHSRST LL_AHB1_GRP1_ForceReset + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_AHB1_GRP1_PERIPH_ALL + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOA + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOB + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOC + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOD (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOE (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOF (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOG (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOH (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOI (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOJ (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOK (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_CRC + * @arg @ref LL_AHB1_GRP1_PERIPH_DMA1 + * @arg @ref LL_AHB1_GRP1_PERIPH_DMA2 + * @arg @ref LL_AHB1_GRP1_PERIPH_RNG (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_DMA2D (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_ETHMAC (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_OTGHS (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_AHB1_GRP1_ForceReset(uint32_t Periphs) +{ + SET_BIT(RCC->AHB1RSTR, Periphs); +} + +/** + * @brief Release AHB1 peripherals reset. + * @rmtoll AHB1RSTR GPIOARST LL_AHB1_GRP1_ReleaseReset\n + * AHB1RSTR GPIOBRST LL_AHB1_GRP1_ReleaseReset\n + * AHB1RSTR GPIOCRST LL_AHB1_GRP1_ReleaseReset\n + * AHB1RSTR GPIODRST LL_AHB1_GRP1_ReleaseReset\n + * AHB1RSTR GPIOERST LL_AHB1_GRP1_ReleaseReset\n + * AHB1RSTR GPIOFRST LL_AHB1_GRP1_ReleaseReset\n + * AHB1RSTR GPIOGRST LL_AHB1_GRP1_ReleaseReset\n + * AHB1RSTR GPIOHRST LL_AHB1_GRP1_ReleaseReset\n + * AHB1RSTR GPIOIRST LL_AHB1_GRP1_ReleaseReset\n + * AHB1RSTR GPIOJRST LL_AHB1_GRP1_ReleaseReset\n + * AHB1RSTR GPIOKRST LL_AHB1_GRP1_ReleaseReset\n + * AHB1RSTR CRCRST LL_AHB1_GRP1_ReleaseReset\n + * AHB1RSTR DMA1RST LL_AHB1_GRP1_ReleaseReset\n + * AHB1RSTR DMA2RST LL_AHB1_GRP1_ReleaseReset\n + * AHB1RSTR RNGRST LL_AHB1_GRP1_ReleaseReset\n + * AHB1RSTR DMA2DRST LL_AHB1_GRP1_ReleaseReset\n + * AHB1RSTR ETHMACRST LL_AHB1_GRP1_ReleaseReset\n + * AHB1RSTR OTGHSRST LL_AHB1_GRP1_ReleaseReset + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_AHB1_GRP1_PERIPH_ALL + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOA + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOB + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOC + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOD (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOE (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOF (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOG (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOH (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOI (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOJ (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOK (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_CRC + * @arg @ref LL_AHB1_GRP1_PERIPH_DMA1 + * @arg @ref LL_AHB1_GRP1_PERIPH_DMA2 + * @arg @ref LL_AHB1_GRP1_PERIPH_RNG (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_DMA2D (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_ETHMAC (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_OTGHS (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_AHB1_GRP1_ReleaseReset(uint32_t Periphs) +{ + CLEAR_BIT(RCC->AHB1RSTR, Periphs); +} + +/** + * @brief Enable AHB1 peripheral clocks in low-power mode + * @rmtoll AHB1LPENR GPIOALPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR GPIOBLPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR GPIOCLPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR GPIODLPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR GPIOELPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR GPIOFLPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR GPIOGLPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR GPIOHLPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR GPIOILPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR GPIOJLPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR GPIOKLPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR CRCLPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR BKPSRAMLPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR FLITFLPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR SRAM1LPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR SRAM2LPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR SRAM3LPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR BKPSRAMLPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR DMA1LPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR DMA2LPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR DMA2DLPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR RNGLPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR ETHMACLPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR ETHMACTXLPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR ETHMACRXLPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR ETHMACPTPLPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR OTGHSLPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR OTGHSULPILPEN LL_AHB1_GRP1_EnableClockLowPower + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOA + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOB + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOC + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOD (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOE (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOF (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOG (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOH (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOI (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOJ (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOK (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_CRC + * @arg @ref LL_AHB1_GRP1_PERIPH_BKPSRAM (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_FLITF + * @arg @ref LL_AHB1_GRP1_PERIPH_SRAM1 + * @arg @ref LL_AHB1_GRP1_PERIPH_SRAM2 (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_SRAM3 (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_DMA1 + * @arg @ref LL_AHB1_GRP1_PERIPH_DMA2 + * @arg @ref LL_AHB1_GRP1_PERIPH_RNG (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_DMA2D (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_ETHMAC (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_ETHMACTX (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_ETHMACRX (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_ETHMACPTP (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_OTGHS (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_OTGHSULPI (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_AHB1_GRP1_EnableClockLowPower(uint32_t Periphs) +{ + __IO uint32_t tmpreg; + SET_BIT(RCC->AHB1LPENR, Periphs); + /* Delay after an RCC peripheral clock enabling */ + tmpreg = READ_BIT(RCC->AHB1LPENR, Periphs); + (void)tmpreg; +} + +/** + * @brief Disable AHB1 peripheral clocks in low-power mode + * @rmtoll AHB1LPENR GPIOALPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR GPIOBLPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR GPIOCLPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR GPIODLPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR GPIOELPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR GPIOFLPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR GPIOGLPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR GPIOHLPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR GPIOILPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR GPIOJLPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR GPIOKLPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR CRCLPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR BKPSRAMLPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR FLITFLPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR SRAM1LPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR SRAM2LPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR SRAM3LPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR BKPSRAMLPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR DMA1LPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR DMA2LPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR DMA2DLPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR RNGLPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR ETHMACLPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR ETHMACTXLPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR ETHMACRXLPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR ETHMACPTPLPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR OTGHSLPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR OTGHSULPILPEN LL_AHB1_GRP1_DisableClockLowPower + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOA + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOB + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOC + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOD (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOE (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOF (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOG (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOH (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOI (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOJ (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOK (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_CRC + * @arg @ref LL_AHB1_GRP1_PERIPH_BKPSRAM (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_FLITF + * @arg @ref LL_AHB1_GRP1_PERIPH_SRAM1 + * @arg @ref LL_AHB1_GRP1_PERIPH_SRAM2 (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_SRAM3 (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_DMA1 + * @arg @ref LL_AHB1_GRP1_PERIPH_DMA2 + * @arg @ref LL_AHB1_GRP1_PERIPH_RNG (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_DMA2D (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_ETHMAC (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_ETHMACTX (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_ETHMACRX (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_ETHMACPTP (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_OTGHS (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_OTGHSULPI (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_AHB1_GRP1_DisableClockLowPower(uint32_t Periphs) +{ + CLEAR_BIT(RCC->AHB1LPENR, Periphs); +} + +/** + * @} + */ + +#if defined(RCC_AHB2_SUPPORT) +/** @defgroup BUS_LL_EF_AHB2 AHB2 + * @{ + */ + +/** + * @brief Enable AHB2 peripherals clock. + * @rmtoll AHB2ENR DCMIEN LL_AHB2_GRP1_EnableClock\n + * AHB2ENR CRYPEN LL_AHB2_GRP1_EnableClock\n + * AHB2ENR AESEN LL_AHB2_GRP1_EnableClock\n + * AHB2ENR HASHEN LL_AHB2_GRP1_EnableClock\n + * AHB2ENR RNGEN LL_AHB2_GRP1_EnableClock\n + * AHB2ENR OTGFSEN LL_AHB2_GRP1_EnableClock + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_AHB2_GRP1_PERIPH_DCMI (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_CRYP (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_AES (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_HASH (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_RNG (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_OTGFS (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_AHB2_GRP1_EnableClock(uint32_t Periphs) +{ + __IO uint32_t tmpreg; + SET_BIT(RCC->AHB2ENR, Periphs); + /* Delay after an RCC peripheral clock enabling */ + tmpreg = READ_BIT(RCC->AHB2ENR, Periphs); + (void)tmpreg; +} + +/** + * @brief Check if AHB2 peripheral clock is enabled or not + * @rmtoll AHB2ENR DCMIEN LL_AHB2_GRP1_IsEnabledClock\n + * AHB2ENR CRYPEN LL_AHB2_GRP1_IsEnabledClock\n + * AHB2ENR AESEN LL_AHB2_GRP1_IsEnabledClock\n + * AHB2ENR HASHEN LL_AHB2_GRP1_IsEnabledClock\n + * AHB2ENR RNGEN LL_AHB2_GRP1_IsEnabledClock\n + * AHB2ENR OTGFSEN LL_AHB2_GRP1_IsEnabledClock + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_AHB2_GRP1_PERIPH_DCMI (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_CRYP (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_AES (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_HASH (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_RNG (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_OTGFS (*) + * + * (*) value not defined in all devices. + * @retval State of Periphs (1 or 0). + */ +__STATIC_INLINE uint32_t LL_AHB2_GRP1_IsEnabledClock(uint32_t Periphs) +{ + return (READ_BIT(RCC->AHB2ENR, Periphs) == Periphs); +} + +/** + * @brief Disable AHB2 peripherals clock. + * @rmtoll AHB2ENR DCMIEN LL_AHB2_GRP1_DisableClock\n + * AHB2ENR CRYPEN LL_AHB2_GRP1_DisableClock\n + * AHB2ENR AESEN LL_AHB2_GRP1_DisableClock\n + * AHB2ENR HASHEN LL_AHB2_GRP1_DisableClock\n + * AHB2ENR RNGEN LL_AHB2_GRP1_DisableClock\n + * AHB2ENR OTGFSEN LL_AHB2_GRP1_DisableClock + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_AHB2_GRP1_PERIPH_DCMI (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_CRYP (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_AES (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_HASH (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_RNG (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_OTGFS (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_AHB2_GRP1_DisableClock(uint32_t Periphs) +{ + CLEAR_BIT(RCC->AHB2ENR, Periphs); +} + +/** + * @brief Force AHB2 peripherals reset. + * @rmtoll AHB2RSTR DCMIRST LL_AHB2_GRP1_ForceReset\n + * AHB2RSTR CRYPRST LL_AHB2_GRP1_ForceReset\n + * AHB2RSTR AESRST LL_AHB2_GRP1_ForceReset\n + * AHB2RSTR HASHRST LL_AHB2_GRP1_ForceReset\n + * AHB2RSTR RNGRST LL_AHB2_GRP1_ForceReset\n + * AHB2RSTR OTGFSRST LL_AHB2_GRP1_ForceReset + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_AHB2_GRP1_PERIPH_ALL + * @arg @ref LL_AHB2_GRP1_PERIPH_DCMI (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_CRYP (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_AES (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_HASH (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_RNG (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_OTGFS (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_AHB2_GRP1_ForceReset(uint32_t Periphs) +{ + SET_BIT(RCC->AHB2RSTR, Periphs); +} + +/** + * @brief Release AHB2 peripherals reset. + * @rmtoll AHB2RSTR DCMIRST LL_AHB2_GRP1_ReleaseReset\n + * AHB2RSTR CRYPRST LL_AHB2_GRP1_ReleaseReset\n + * AHB2RSTR AESRST LL_AHB2_GRP1_ReleaseReset\n + * AHB2RSTR HASHRST LL_AHB2_GRP1_ReleaseReset\n + * AHB2RSTR RNGRST LL_AHB2_GRP1_ReleaseReset\n + * AHB2RSTR OTGFSRST LL_AHB2_GRP1_ReleaseReset + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_AHB2_GRP1_PERIPH_ALL + * @arg @ref LL_AHB2_GRP1_PERIPH_DCMI (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_CRYP (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_AES (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_HASH (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_RNG (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_OTGFS (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_AHB2_GRP1_ReleaseReset(uint32_t Periphs) +{ + CLEAR_BIT(RCC->AHB2RSTR, Periphs); +} + +/** + * @brief Enable AHB2 peripheral clocks in low-power mode + * @rmtoll AHB2LPENR DCMILPEN LL_AHB2_GRP1_EnableClockLowPower\n + * AHB2LPENR CRYPLPEN LL_AHB2_GRP1_EnableClockLowPower\n + * AHB2LPENR AESLPEN LL_AHB2_GRP1_EnableClockLowPower\n + * AHB2LPENR HASHLPEN LL_AHB2_GRP1_EnableClockLowPower\n + * AHB2LPENR RNGLPEN LL_AHB2_GRP1_EnableClockLowPower\n + * AHB2LPENR OTGFSLPEN LL_AHB2_GRP1_EnableClockLowPower + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_AHB2_GRP1_PERIPH_DCMI (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_CRYP (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_AES (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_HASH (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_RNG (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_OTGFS (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_AHB2_GRP1_EnableClockLowPower(uint32_t Periphs) +{ + __IO uint32_t tmpreg; + SET_BIT(RCC->AHB2LPENR, Periphs); + /* Delay after an RCC peripheral clock enabling */ + tmpreg = READ_BIT(RCC->AHB2LPENR, Periphs); + (void)tmpreg; +} + +/** + * @brief Disable AHB2 peripheral clocks in low-power mode + * @rmtoll AHB2LPENR DCMILPEN LL_AHB2_GRP1_DisableClockLowPower\n + * AHB2LPENR CRYPLPEN LL_AHB2_GRP1_DisableClockLowPower\n + * AHB2LPENR AESLPEN LL_AHB2_GRP1_DisableClockLowPower\n + * AHB2LPENR HASHLPEN LL_AHB2_GRP1_DisableClockLowPower\n + * AHB2LPENR RNGLPEN LL_AHB2_GRP1_DisableClockLowPower\n + * AHB2LPENR OTGFSLPEN LL_AHB2_GRP1_DisableClockLowPower + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_AHB2_GRP1_PERIPH_DCMI (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_CRYP (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_AES (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_HASH (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_RNG (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_OTGFS (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_AHB2_GRP1_DisableClockLowPower(uint32_t Periphs) +{ + CLEAR_BIT(RCC->AHB2LPENR, Periphs); +} + +/** + * @} + */ +#endif /* RCC_AHB2_SUPPORT */ + +#if defined(RCC_AHB3_SUPPORT) +/** @defgroup BUS_LL_EF_AHB3 AHB3 + * @{ + */ + +/** + * @brief Enable AHB3 peripherals clock. + * @rmtoll AHB3ENR FMCEN LL_AHB3_GRP1_EnableClock\n + * AHB3ENR FSMCEN LL_AHB3_GRP1_EnableClock\n + * AHB3ENR QSPIEN LL_AHB3_GRP1_EnableClock + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_AHB3_GRP1_PERIPH_FMC (*) + * @arg @ref LL_AHB3_GRP1_PERIPH_FSMC (*) + * @arg @ref LL_AHB3_GRP1_PERIPH_QSPI (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_AHB3_GRP1_EnableClock(uint32_t Periphs) +{ + __IO uint32_t tmpreg; + SET_BIT(RCC->AHB3ENR, Periphs); + /* Delay after an RCC peripheral clock enabling */ + tmpreg = READ_BIT(RCC->AHB3ENR, Periphs); + (void)tmpreg; +} + +/** + * @brief Check if AHB3 peripheral clock is enabled or not + * @rmtoll AHB3ENR FMCEN LL_AHB3_GRP1_IsEnabledClock\n + * AHB3ENR FSMCEN LL_AHB3_GRP1_IsEnabledClock\n + * AHB3ENR QSPIEN LL_AHB3_GRP1_IsEnabledClock + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_AHB3_GRP1_PERIPH_FMC (*) + * @arg @ref LL_AHB3_GRP1_PERIPH_FSMC (*) + * @arg @ref LL_AHB3_GRP1_PERIPH_QSPI (*) + * + * (*) value not defined in all devices. + * @retval State of Periphs (1 or 0). + */ +__STATIC_INLINE uint32_t LL_AHB3_GRP1_IsEnabledClock(uint32_t Periphs) +{ + return (READ_BIT(RCC->AHB3ENR, Periphs) == Periphs); +} + +/** + * @brief Disable AHB3 peripherals clock. + * @rmtoll AHB3ENR FMCEN LL_AHB3_GRP1_DisableClock\n + * AHB3ENR FSMCEN LL_AHB3_GRP1_DisableClock\n + * AHB3ENR QSPIEN LL_AHB3_GRP1_DisableClock + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_AHB3_GRP1_PERIPH_FMC (*) + * @arg @ref LL_AHB3_GRP1_PERIPH_FSMC (*) + * @arg @ref LL_AHB3_GRP1_PERIPH_QSPI (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_AHB3_GRP1_DisableClock(uint32_t Periphs) +{ + CLEAR_BIT(RCC->AHB3ENR, Periphs); +} + +/** + * @brief Force AHB3 peripherals reset. + * @rmtoll AHB3RSTR FMCRST LL_AHB3_GRP1_ForceReset\n + * AHB3RSTR FSMCRST LL_AHB3_GRP1_ForceReset\n + * AHB3RSTR QSPIRST LL_AHB3_GRP1_ForceReset + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_AHB3_GRP1_PERIPH_ALL + * @arg @ref LL_AHB3_GRP1_PERIPH_FMC (*) + * @arg @ref LL_AHB3_GRP1_PERIPH_FSMC (*) + * @arg @ref LL_AHB3_GRP1_PERIPH_QSPI (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_AHB3_GRP1_ForceReset(uint32_t Periphs) +{ + SET_BIT(RCC->AHB3RSTR, Periphs); +} + +/** + * @brief Release AHB3 peripherals reset. + * @rmtoll AHB3RSTR FMCRST LL_AHB3_GRP1_ReleaseReset\n + * AHB3RSTR FSMCRST LL_AHB3_GRP1_ReleaseReset\n + * AHB3RSTR QSPIRST LL_AHB3_GRP1_ReleaseReset + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_AHB2_GRP1_PERIPH_ALL + * @arg @ref LL_AHB3_GRP1_PERIPH_FMC (*) + * @arg @ref LL_AHB3_GRP1_PERIPH_FSMC (*) + * @arg @ref LL_AHB3_GRP1_PERIPH_QSPI (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_AHB3_GRP1_ReleaseReset(uint32_t Periphs) +{ + CLEAR_BIT(RCC->AHB3RSTR, Periphs); +} + +/** + * @brief Enable AHB3 peripheral clocks in low-power mode + * @rmtoll AHB3LPENR FMCLPEN LL_AHB3_GRP1_EnableClockLowPower\n + * AHB3LPENR FSMCLPEN LL_AHB3_GRP1_EnableClockLowPower\n + * AHB3LPENR QSPILPEN LL_AHB3_GRP1_EnableClockLowPower + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_AHB3_GRP1_PERIPH_FMC (*) + * @arg @ref LL_AHB3_GRP1_PERIPH_FSMC (*) + * @arg @ref LL_AHB3_GRP1_PERIPH_QSPI (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_AHB3_GRP1_EnableClockLowPower(uint32_t Periphs) +{ + __IO uint32_t tmpreg; + SET_BIT(RCC->AHB3LPENR, Periphs); + /* Delay after an RCC peripheral clock enabling */ + tmpreg = READ_BIT(RCC->AHB3LPENR, Periphs); + (void)tmpreg; +} + +/** + * @brief Disable AHB3 peripheral clocks in low-power mode + * @rmtoll AHB3LPENR FMCLPEN LL_AHB3_GRP1_DisableClockLowPower\n + * AHB3LPENR FSMCLPEN LL_AHB3_GRP1_DisableClockLowPower\n + * AHB3LPENR QSPILPEN LL_AHB3_GRP1_DisableClockLowPower + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_AHB3_GRP1_PERIPH_FMC (*) + * @arg @ref LL_AHB3_GRP1_PERIPH_FSMC (*) + * @arg @ref LL_AHB3_GRP1_PERIPH_QSPI (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_AHB3_GRP1_DisableClockLowPower(uint32_t Periphs) +{ + CLEAR_BIT(RCC->AHB3LPENR, Periphs); +} + +/** + * @} + */ +#endif /* RCC_AHB3_SUPPORT */ + +/** @defgroup BUS_LL_EF_APB1 APB1 + * @{ + */ + +/** + * @brief Enable APB1 peripherals clock. + * @rmtoll APB1ENR TIM2EN LL_APB1_GRP1_EnableClock\n + * APB1ENR TIM3EN LL_APB1_GRP1_EnableClock\n + * APB1ENR TIM4EN LL_APB1_GRP1_EnableClock\n + * APB1ENR TIM5EN LL_APB1_GRP1_EnableClock\n + * APB1ENR TIM6EN LL_APB1_GRP1_EnableClock\n + * APB1ENR TIM7EN LL_APB1_GRP1_EnableClock\n + * APB1ENR TIM12EN LL_APB1_GRP1_EnableClock\n + * APB1ENR TIM13EN LL_APB1_GRP1_EnableClock\n + * APB1ENR TIM14EN LL_APB1_GRP1_EnableClock\n + * APB1ENR LPTIM1EN LL_APB1_GRP1_EnableClock\n + * APB1ENR WWDGEN LL_APB1_GRP1_EnableClock\n + * APB1ENR SPI2EN LL_APB1_GRP1_EnableClock\n + * APB1ENR SPI3EN LL_APB1_GRP1_EnableClock\n + * APB1ENR SPDIFRXEN LL_APB1_GRP1_EnableClock\n + * APB1ENR USART2EN LL_APB1_GRP1_EnableClock\n + * APB1ENR USART3EN LL_APB1_GRP1_EnableClock\n + * APB1ENR UART4EN LL_APB1_GRP1_EnableClock\n + * APB1ENR UART5EN LL_APB1_GRP1_EnableClock\n + * APB1ENR I2C1EN LL_APB1_GRP1_EnableClock\n + * APB1ENR I2C2EN LL_APB1_GRP1_EnableClock\n + * APB1ENR I2C3EN LL_APB1_GRP1_EnableClock\n + * APB1ENR FMPI2C1EN LL_APB1_GRP1_EnableClock\n + * APB1ENR CAN1EN LL_APB1_GRP1_EnableClock\n + * APB1ENR CAN2EN LL_APB1_GRP1_EnableClock\n + * APB1ENR CAN3EN LL_APB1_GRP1_EnableClock\n + * APB1ENR CECEN LL_APB1_GRP1_EnableClock\n + * APB1ENR PWREN LL_APB1_GRP1_EnableClock\n + * APB1ENR DACEN LL_APB1_GRP1_EnableClock\n + * APB1ENR UART7EN LL_APB1_GRP1_EnableClock\n + * APB1ENR UART8EN LL_APB1_GRP1_EnableClock\n + * APB1ENR RTCAPBEN LL_APB1_GRP1_EnableClock + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_APB1_GRP1_PERIPH_TIM2 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM4 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM5 + * @arg @ref LL_APB1_GRP1_PERIPH_TIM6 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM7 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM12 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM13 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM14 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_LPTIM1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_WWDG + * @arg @ref LL_APB1_GRP1_PERIPH_SPI2 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_SPI3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_SPDIFRX (*) + * @arg @ref LL_APB1_GRP1_PERIPH_USART2 + * @arg @ref LL_APB1_GRP1_PERIPH_USART3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART4 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART5 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_I2C1 + * @arg @ref LL_APB1_GRP1_PERIPH_I2C2 + * @arg @ref LL_APB1_GRP1_PERIPH_I2C3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_FMPI2C1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CAN1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CAN2 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CAN3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CEC (*) + * @arg @ref LL_APB1_GRP1_PERIPH_PWR + * @arg @ref LL_APB1_GRP1_PERIPH_DAC1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART7 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART8 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_RTCAPB (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_APB1_GRP1_EnableClock(uint32_t Periphs) +{ + __IO uint32_t tmpreg; + SET_BIT(RCC->APB1ENR, Periphs); + /* Delay after an RCC peripheral clock enabling */ + tmpreg = READ_BIT(RCC->APB1ENR, Periphs); + (void)tmpreg; +} + +/** + * @brief Check if APB1 peripheral clock is enabled or not + * @rmtoll APB1ENR TIM2EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR TIM3EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR TIM4EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR TIM5EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR TIM6EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR TIM7EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR TIM12EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR TIM13EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR TIM14EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR LPTIM1EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR WWDGEN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR SPI2EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR SPI3EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR SPDIFRXEN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR USART2EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR USART3EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR UART4EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR UART5EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR I2C1EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR I2C2EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR I2C3EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR FMPI2C1EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR CAN1EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR CAN2EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR CAN3EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR CECEN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR PWREN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR DACEN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR UART7EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR UART8EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR RTCAPBEN LL_APB1_GRP1_IsEnabledClock + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_APB1_GRP1_PERIPH_TIM2 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM4 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM5 + * @arg @ref LL_APB1_GRP1_PERIPH_TIM6 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM7 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM12 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM13 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM14 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_LPTIM1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_WWDG + * @arg @ref LL_APB1_GRP1_PERIPH_SPI2 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_SPI3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_SPDIFRX (*) + * @arg @ref LL_APB1_GRP1_PERIPH_USART2 + * @arg @ref LL_APB1_GRP1_PERIPH_USART3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART4 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART5 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_I2C1 + * @arg @ref LL_APB1_GRP1_PERIPH_I2C2 + * @arg @ref LL_APB1_GRP1_PERIPH_I2C3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_FMPI2C1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CAN1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CAN2 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CAN3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CEC (*) + * @arg @ref LL_APB1_GRP1_PERIPH_PWR + * @arg @ref LL_APB1_GRP1_PERIPH_DAC1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART7 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART8 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_RTCAPB (*) + * + * (*) value not defined in all devices. + * @retval State of Periphs (1 or 0). + */ +__STATIC_INLINE uint32_t LL_APB1_GRP1_IsEnabledClock(uint32_t Periphs) +{ + return (READ_BIT(RCC->APB1ENR, Periphs) == Periphs); +} + +/** + * @brief Disable APB1 peripherals clock. + * @rmtoll APB1ENR TIM2EN LL_APB1_GRP1_DisableClock\n + * APB1ENR TIM3EN LL_APB1_GRP1_DisableClock\n + * APB1ENR TIM4EN LL_APB1_GRP1_DisableClock\n + * APB1ENR TIM5EN LL_APB1_GRP1_DisableClock\n + * APB1ENR TIM6EN LL_APB1_GRP1_DisableClock\n + * APB1ENR TIM7EN LL_APB1_GRP1_DisableClock\n + * APB1ENR TIM12EN LL_APB1_GRP1_DisableClock\n + * APB1ENR TIM13EN LL_APB1_GRP1_DisableClock\n + * APB1ENR TIM14EN LL_APB1_GRP1_DisableClock\n + * APB1ENR LPTIM1EN LL_APB1_GRP1_DisableClock\n + * APB1ENR WWDGEN LL_APB1_GRP1_DisableClock\n + * APB1ENR SPI2EN LL_APB1_GRP1_DisableClock\n + * APB1ENR SPI3EN LL_APB1_GRP1_DisableClock\n + * APB1ENR SPDIFRXEN LL_APB1_GRP1_DisableClock\n + * APB1ENR USART2EN LL_APB1_GRP1_DisableClock\n + * APB1ENR USART3EN LL_APB1_GRP1_DisableClock\n + * APB1ENR UART4EN LL_APB1_GRP1_DisableClock\n + * APB1ENR UART5EN LL_APB1_GRP1_DisableClock\n + * APB1ENR I2C1EN LL_APB1_GRP1_DisableClock\n + * APB1ENR I2C2EN LL_APB1_GRP1_DisableClock\n + * APB1ENR I2C3EN LL_APB1_GRP1_DisableClock\n + * APB1ENR FMPI2C1EN LL_APB1_GRP1_DisableClock\n + * APB1ENR CAN1EN LL_APB1_GRP1_DisableClock\n + * APB1ENR CAN2EN LL_APB1_GRP1_DisableClock\n + * APB1ENR CAN3EN LL_APB1_GRP1_DisableClock\n + * APB1ENR CECEN LL_APB1_GRP1_DisableClock\n + * APB1ENR PWREN LL_APB1_GRP1_DisableClock\n + * APB1ENR DACEN LL_APB1_GRP1_DisableClock\n + * APB1ENR UART7EN LL_APB1_GRP1_DisableClock\n + * APB1ENR UART8EN LL_APB1_GRP1_DisableClock\n + * APB1ENR RTCAPBEN LL_APB1_GRP1_DisableClock + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_APB1_GRP1_PERIPH_TIM2 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM4 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM5 + * @arg @ref LL_APB1_GRP1_PERIPH_TIM6 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM7 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM12 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM13 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM14 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_LPTIM1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_WWDG + * @arg @ref LL_APB1_GRP1_PERIPH_SPI2 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_SPI3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_SPDIFRX (*) + * @arg @ref LL_APB1_GRP1_PERIPH_USART2 + * @arg @ref LL_APB1_GRP1_PERIPH_USART3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART4 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART5 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_I2C1 + * @arg @ref LL_APB1_GRP1_PERIPH_I2C2 + * @arg @ref LL_APB1_GRP1_PERIPH_I2C3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_FMPI2C1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CAN1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CAN2 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CAN3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CEC (*) + * @arg @ref LL_APB1_GRP1_PERIPH_PWR + * @arg @ref LL_APB1_GRP1_PERIPH_DAC1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART7 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART8 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_RTCAPB (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_APB1_GRP1_DisableClock(uint32_t Periphs) +{ + CLEAR_BIT(RCC->APB1ENR, Periphs); +} + +/** + * @brief Force APB1 peripherals reset. + * @rmtoll APB1RSTR TIM2RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR TIM3RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR TIM4RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR TIM5RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR TIM6RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR TIM7RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR TIM12RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR TIM13RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR TIM14RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR LPTIM1RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR WWDGRST LL_APB1_GRP1_ForceReset\n + * APB1RSTR SPI2RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR SPI3RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR SPDIFRXRST LL_APB1_GRP1_ForceReset\n + * APB1RSTR USART2RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR USART3RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR UART4RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR UART5RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR I2C1RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR I2C2RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR I2C3RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR FMPI2C1RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR CAN1RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR CAN2RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR CAN3RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR CECRST LL_APB1_GRP1_ForceReset\n + * APB1RSTR PWRRST LL_APB1_GRP1_ForceReset\n + * APB1RSTR DACRST LL_APB1_GRP1_ForceReset\n + * APB1RSTR UART7RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR UART8RST LL_APB1_GRP1_ForceReset + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_APB1_GRP1_PERIPH_TIM2 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM4 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM5 + * @arg @ref LL_APB1_GRP1_PERIPH_TIM6 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM7 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM12 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM13 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM14 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_LPTIM1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_WWDG + * @arg @ref LL_APB1_GRP1_PERIPH_SPI2 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_SPI3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_SPDIFRX (*) + * @arg @ref LL_APB1_GRP1_PERIPH_USART2 + * @arg @ref LL_APB1_GRP1_PERIPH_USART3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART4 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART5 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_I2C1 + * @arg @ref LL_APB1_GRP1_PERIPH_I2C2 + * @arg @ref LL_APB1_GRP1_PERIPH_I2C3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_FMPI2C1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CAN1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CAN2 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CAN3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CEC (*) + * @arg @ref LL_APB1_GRP1_PERIPH_PWR + * @arg @ref LL_APB1_GRP1_PERIPH_DAC1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART7 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART8 (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_APB1_GRP1_ForceReset(uint32_t Periphs) +{ + SET_BIT(RCC->APB1RSTR, Periphs); +} + +/** + * @brief Release APB1 peripherals reset. + * @rmtoll APB1RSTR TIM2RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR TIM3RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR TIM4RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR TIM5RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR TIM6RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR TIM7RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR TIM12RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR TIM13RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR TIM14RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR LPTIM1RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR WWDGRST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR SPI2RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR SPI3RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR SPDIFRXRST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR USART2RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR USART3RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR UART4RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR UART5RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR I2C1RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR I2C2RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR I2C3RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR FMPI2C1RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR CAN1RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR CAN2RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR CAN3RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR CECRST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR PWRRST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR DACRST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR UART7RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR UART8RST LL_APB1_GRP1_ReleaseReset + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_APB1_GRP1_PERIPH_TIM2 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM4 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM5 + * @arg @ref LL_APB1_GRP1_PERIPH_TIM6 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM7 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM12 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM13 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM14 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_LPTIM1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_WWDG + * @arg @ref LL_APB1_GRP1_PERIPH_SPI2 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_SPI3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_SPDIFRX (*) + * @arg @ref LL_APB1_GRP1_PERIPH_USART2 + * @arg @ref LL_APB1_GRP1_PERIPH_USART3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART4 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART5 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_I2C1 + * @arg @ref LL_APB1_GRP1_PERIPH_I2C2 + * @arg @ref LL_APB1_GRP1_PERIPH_I2C3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_FMPI2C1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CAN1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CAN2 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CAN3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CEC (*) + * @arg @ref LL_APB1_GRP1_PERIPH_PWR + * @arg @ref LL_APB1_GRP1_PERIPH_DAC1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART7 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART8 (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_APB1_GRP1_ReleaseReset(uint32_t Periphs) +{ + CLEAR_BIT(RCC->APB1RSTR, Periphs); +} + +/** + * @brief Enable APB1 peripheral clocks in low-power mode + * @rmtoll APB1LPENR TIM2LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR TIM3LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR TIM4LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR TIM5LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR TIM6LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR TIM7LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR TIM12LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR TIM13LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR TIM14LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR LPTIM1LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR WWDGLPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR SPI2LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR SPI3LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR SPDIFRXLPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR USART2LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR USART3LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR UART4LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR UART5LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR I2C1LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR I2C2LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR I2C3LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR FMPI2C1LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR CAN1LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR CAN2LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR CAN3LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR CECLPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR PWRLPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR DACLPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR UART7LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR UART8LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR RTCAPBLPEN LL_APB1_GRP1_EnableClockLowPower + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_APB1_GRP1_PERIPH_TIM2 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM4 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM5 + * @arg @ref LL_APB1_GRP1_PERIPH_TIM6 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM7 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM12 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM13 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM14 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_LPTIM1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_WWDG + * @arg @ref LL_APB1_GRP1_PERIPH_SPI2 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_SPI3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_SPDIFRX (*) + * @arg @ref LL_APB1_GRP1_PERIPH_USART2 + * @arg @ref LL_APB1_GRP1_PERIPH_USART3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART4 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART5 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_I2C1 + * @arg @ref LL_APB1_GRP1_PERIPH_I2C2 + * @arg @ref LL_APB1_GRP1_PERIPH_I2C3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_FMPI2C1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CAN1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CAN2 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CAN3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CEC (*) + * @arg @ref LL_APB1_GRP1_PERIPH_PWR + * @arg @ref LL_APB1_GRP1_PERIPH_DAC1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART7 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART8 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_RTCAPB (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_APB1_GRP1_EnableClockLowPower(uint32_t Periphs) +{ + __IO uint32_t tmpreg; + SET_BIT(RCC->APB1LPENR, Periphs); + /* Delay after an RCC peripheral clock enabling */ + tmpreg = READ_BIT(RCC->APB1LPENR, Periphs); + (void)tmpreg; +} + +/** + * @brief Disable APB1 peripheral clocks in low-power mode + * @rmtoll APB1LPENR TIM2LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR TIM3LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR TIM4LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR TIM5LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR TIM6LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR TIM7LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR TIM12LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR TIM13LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR TIM14LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR LPTIM1LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR WWDGLPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR SPI2LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR SPI3LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR SPDIFRXLPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR USART2LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR USART3LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR UART4LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR UART5LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR I2C1LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR I2C2LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR I2C3LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR FMPI2C1LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR CAN1LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR CAN2LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR CAN3LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR CECLPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR PWRLPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR DACLPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR UART7LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR UART8LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR RTCAPBLPEN LL_APB1_GRP1_DisableClockLowPower + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_APB1_GRP1_PERIPH_TIM2 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM4 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM5 + * @arg @ref LL_APB1_GRP1_PERIPH_TIM6 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM7 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM12 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM13 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM14 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_LPTIM1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_WWDG + * @arg @ref LL_APB1_GRP1_PERIPH_SPI2 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_SPI3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_SPDIFRX (*) + * @arg @ref LL_APB1_GRP1_PERIPH_USART2 + * @arg @ref LL_APB1_GRP1_PERIPH_USART3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART4 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART5 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_I2C1 + * @arg @ref LL_APB1_GRP1_PERIPH_I2C2 + * @arg @ref LL_APB1_GRP1_PERIPH_I2C3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_FMPI2C1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CAN1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CAN2 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CAN3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CEC (*) + * @arg @ref LL_APB1_GRP1_PERIPH_PWR + * @arg @ref LL_APB1_GRP1_PERIPH_DAC1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART7 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART8 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_RTCAPB (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_APB1_GRP1_DisableClockLowPower(uint32_t Periphs) +{ + CLEAR_BIT(RCC->APB1LPENR, Periphs); +} + +/** + * @} + */ + +/** @defgroup BUS_LL_EF_APB2 APB2 + * @{ + */ + +/** + * @brief Enable APB2 peripherals clock. + * @rmtoll APB2ENR TIM1EN LL_APB2_GRP1_EnableClock\n + * APB2ENR TIM8EN LL_APB2_GRP1_EnableClock\n + * APB2ENR USART1EN LL_APB2_GRP1_EnableClock\n + * APB2ENR USART6EN LL_APB2_GRP1_EnableClock\n + * APB2ENR UART9EN LL_APB2_GRP1_EnableClock\n + * APB2ENR UART10EN LL_APB2_GRP1_EnableClock\n + * APB2ENR ADC1EN LL_APB2_GRP1_EnableClock\n + * APB2ENR ADC2EN LL_APB2_GRP1_EnableClock\n + * APB2ENR ADC3EN LL_APB2_GRP1_EnableClock\n + * APB2ENR SDIOEN LL_APB2_GRP1_EnableClock\n + * APB2ENR SPI1EN LL_APB2_GRP1_EnableClock\n + * APB2ENR SPI4EN LL_APB2_GRP1_EnableClock\n + * APB2ENR SYSCFGEN LL_APB2_GRP1_EnableClock\n + * APB2ENR EXTITEN LL_APB2_GRP1_EnableClock\n + * APB2ENR TIM9EN LL_APB2_GRP1_EnableClock\n + * APB2ENR TIM10EN LL_APB2_GRP1_EnableClock\n + * APB2ENR TIM11EN LL_APB2_GRP1_EnableClock\n + * APB2ENR SPI5EN LL_APB2_GRP1_EnableClock\n + * APB2ENR SPI6EN LL_APB2_GRP1_EnableClock\n + * APB2ENR SAI1EN LL_APB2_GRP1_EnableClock\n + * APB2ENR SAI2EN LL_APB2_GRP1_EnableClock\n + * APB2ENR LTDCEN LL_APB2_GRP1_EnableClock\n + * APB2ENR DSIEN LL_APB2_GRP1_EnableClock\n + * APB2ENR DFSDM1EN LL_APB2_GRP1_EnableClock\n + * APB2ENR DFSDM2EN LL_APB2_GRP1_EnableClock + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_APB2_GRP1_PERIPH_TIM1 + * @arg @ref LL_APB2_GRP1_PERIPH_TIM8 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_USART1 + * @arg @ref LL_APB2_GRP1_PERIPH_USART6 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_UART9 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_UART10 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_ADC1 + * @arg @ref LL_APB2_GRP1_PERIPH_ADC2 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_ADC3 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SDIO (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SPI1 + * @arg @ref LL_APB2_GRP1_PERIPH_SPI4 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SYSCFG + * @arg @ref LL_APB2_GRP1_PERIPH_EXTI (*) + * @arg @ref LL_APB2_GRP1_PERIPH_TIM9 + * @arg @ref LL_APB2_GRP1_PERIPH_TIM10 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_TIM11 + * @arg @ref LL_APB2_GRP1_PERIPH_SPI5 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SPI6 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SAI1 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SAI2 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_LTDC (*) + * @arg @ref LL_APB2_GRP1_PERIPH_DSI (*) + * @arg @ref LL_APB2_GRP1_PERIPH_DFSDM1 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_DFSDM2 (*) + + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_APB2_GRP1_EnableClock(uint32_t Periphs) +{ + __IO uint32_t tmpreg; + SET_BIT(RCC->APB2ENR, Periphs); + /* Delay after an RCC peripheral clock enabling */ + tmpreg = READ_BIT(RCC->APB2ENR, Periphs); + (void)tmpreg; +} + +/** + * @brief Check if APB2 peripheral clock is enabled or not + * @rmtoll APB2ENR TIM1EN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR TIM8EN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR USART1EN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR USART6EN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR UART9EN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR UART10EN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR ADC1EN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR ADC2EN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR ADC3EN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR SDIOEN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR SPI1EN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR SPI4EN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR SYSCFGEN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR EXTITEN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR TIM9EN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR TIM10EN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR TIM11EN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR SPI5EN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR SPI6EN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR SAI1EN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR SAI2EN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR LTDCEN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR DSIEN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR DFSDM1EN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR DFSDM2EN LL_APB2_GRP1_IsEnabledClock + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_APB2_GRP1_PERIPH_TIM1 + * @arg @ref LL_APB2_GRP1_PERIPH_TIM8 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_USART1 + * @arg @ref LL_APB2_GRP1_PERIPH_USART6 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_UART9 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_UART10 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_ADC1 + * @arg @ref LL_APB2_GRP1_PERIPH_ADC2 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_ADC3 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SDIO (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SPI1 + * @arg @ref LL_APB2_GRP1_PERIPH_SPI4 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SYSCFG + * @arg @ref LL_APB2_GRP1_PERIPH_EXTI (*) + * @arg @ref LL_APB2_GRP1_PERIPH_TIM9 + * @arg @ref LL_APB2_GRP1_PERIPH_TIM10 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_TIM11 + * @arg @ref LL_APB2_GRP1_PERIPH_SPI5 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SPI6 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SAI1 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SAI2 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_LTDC (*) + * @arg @ref LL_APB2_GRP1_PERIPH_DSI (*) + * @arg @ref LL_APB2_GRP1_PERIPH_DFSDM1 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_DFSDM2 (*) + * + * (*) value not defined in all devices. + * @retval State of Periphs (1 or 0). + */ +__STATIC_INLINE uint32_t LL_APB2_GRP1_IsEnabledClock(uint32_t Periphs) +{ + return (READ_BIT(RCC->APB2ENR, Periphs) == Periphs); +} + +/** + * @brief Disable APB2 peripherals clock. + * @rmtoll APB2ENR TIM1EN LL_APB2_GRP1_DisableClock\n + * APB2ENR TIM8EN LL_APB2_GRP1_DisableClock\n + * APB2ENR USART1EN LL_APB2_GRP1_DisableClock\n + * APB2ENR USART6EN LL_APB2_GRP1_DisableClock\n + * APB2ENR UART9EN LL_APB2_GRP1_DisableClock\n + * APB2ENR UART10EN LL_APB2_GRP1_DisableClock\n + * APB2ENR ADC1EN LL_APB2_GRP1_DisableClock\n + * APB2ENR ADC2EN LL_APB2_GRP1_DisableClock\n + * APB2ENR ADC3EN LL_APB2_GRP1_DisableClock\n + * APB2ENR SDIOEN LL_APB2_GRP1_DisableClock\n + * APB2ENR SPI1EN LL_APB2_GRP1_DisableClock\n + * APB2ENR SPI4EN LL_APB2_GRP1_DisableClock\n + * APB2ENR SYSCFGEN LL_APB2_GRP1_DisableClock\n + * APB2ENR EXTITEN LL_APB2_GRP1_DisableClock\n + * APB2ENR TIM9EN LL_APB2_GRP1_DisableClock\n + * APB2ENR TIM10EN LL_APB2_GRP1_DisableClock\n + * APB2ENR TIM11EN LL_APB2_GRP1_DisableClock\n + * APB2ENR SPI5EN LL_APB2_GRP1_DisableClock\n + * APB2ENR SPI6EN LL_APB2_GRP1_DisableClock\n + * APB2ENR SAI1EN LL_APB2_GRP1_DisableClock\n + * APB2ENR SAI2EN LL_APB2_GRP1_DisableClock\n + * APB2ENR LTDCEN LL_APB2_GRP1_DisableClock\n + * APB2ENR DSIEN LL_APB2_GRP1_DisableClock\n + * APB2ENR DFSDM1EN LL_APB2_GRP1_DisableClock\n + * APB2ENR DFSDM2EN LL_APB2_GRP1_DisableClock + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_APB2_GRP1_PERIPH_TIM1 + * @arg @ref LL_APB2_GRP1_PERIPH_TIM8 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_USART1 + * @arg @ref LL_APB2_GRP1_PERIPH_USART6 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_UART9 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_UART10 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_ADC1 + * @arg @ref LL_APB2_GRP1_PERIPH_ADC2 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_ADC3 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SDIO (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SPI1 + * @arg @ref LL_APB2_GRP1_PERIPH_SPI4 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SYSCFG + * @arg @ref LL_APB2_GRP1_PERIPH_EXTI (*) + * @arg @ref LL_APB2_GRP1_PERIPH_TIM9 + * @arg @ref LL_APB2_GRP1_PERIPH_TIM10 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_TIM11 + * @arg @ref LL_APB2_GRP1_PERIPH_SPI5 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SPI6 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SAI1 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SAI2 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_LTDC (*) + * @arg @ref LL_APB2_GRP1_PERIPH_DSI (*) + * @arg @ref LL_APB2_GRP1_PERIPH_DFSDM1 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_DFSDM2 (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_APB2_GRP1_DisableClock(uint32_t Periphs) +{ + CLEAR_BIT(RCC->APB2ENR, Periphs); +} + +/** + * @brief Force APB2 peripherals reset. + * @rmtoll APB2RSTR TIM1RST LL_APB2_GRP1_ForceReset\n + * APB2RSTR TIM8RST LL_APB2_GRP1_ForceReset\n + * APB2RSTR USART1RST LL_APB2_GRP1_ForceReset\n + * APB2RSTR USART6RST LL_APB2_GRP1_ForceReset\n + * APB2RSTR UART9RST LL_APB2_GRP1_ForceReset\n + * APB2RSTR UART10RST LL_APB2_GRP1_ForceReset\n + * APB2RSTR ADCRST LL_APB2_GRP1_ForceReset\n + * APB2RSTR SDIORST LL_APB2_GRP1_ForceReset\n + * APB2RSTR SPI1RST LL_APB2_GRP1_ForceReset\n + * APB2RSTR SPI4RST LL_APB2_GRP1_ForceReset\n + * APB2RSTR SYSCFGRST LL_APB2_GRP1_ForceReset\n + * APB2RSTR TIM9RST LL_APB2_GRP1_ForceReset\n + * APB2RSTR TIM10RST LL_APB2_GRP1_ForceReset\n + * APB2RSTR TIM11RST LL_APB2_GRP1_ForceReset\n + * APB2RSTR SPI5RST LL_APB2_GRP1_ForceReset\n + * APB2RSTR SPI6RST LL_APB2_GRP1_ForceReset\n + * APB2RSTR SAI1RST LL_APB2_GRP1_ForceReset\n + * APB2RSTR SAI2RST LL_APB2_GRP1_ForceReset\n + * APB2RSTR LTDCRST LL_APB2_GRP1_ForceReset\n + * APB2RSTR DSIRST LL_APB2_GRP1_ForceReset\n + * APB2RSTR DFSDM1RST LL_APB2_GRP1_ForceReset\n + * APB2RSTR DFSDM2RST LL_APB2_GRP1_ForceReset + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_APB2_GRP1_PERIPH_ALL + * @arg @ref LL_APB2_GRP1_PERIPH_TIM1 + * @arg @ref LL_APB2_GRP1_PERIPH_TIM8 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_USART1 + * @arg @ref LL_APB2_GRP1_PERIPH_USART6 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_UART9 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_UART10 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_ADC + * @arg @ref LL_APB2_GRP1_PERIPH_SDIO (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SPI1 + * @arg @ref LL_APB2_GRP1_PERIPH_SPI4 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SYSCFG + * @arg @ref LL_APB2_GRP1_PERIPH_TIM9 + * @arg @ref LL_APB2_GRP1_PERIPH_TIM10 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_TIM11 + * @arg @ref LL_APB2_GRP1_PERIPH_SPI5 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SPI6 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SAI1 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SAI2 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_LTDC (*) + * @arg @ref LL_APB2_GRP1_PERIPH_DSI (*) + * @arg @ref LL_APB2_GRP1_PERIPH_DFSDM1 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_DFSDM2 (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_APB2_GRP1_ForceReset(uint32_t Periphs) +{ + SET_BIT(RCC->APB2RSTR, Periphs); +} + +/** + * @brief Release APB2 peripherals reset. + * @rmtoll APB2RSTR TIM1RST LL_APB2_GRP1_ReleaseReset\n + * APB2RSTR TIM8RST LL_APB2_GRP1_ReleaseReset\n + * APB2RSTR USART1RST LL_APB2_GRP1_ReleaseReset\n + * APB2RSTR USART6RST LL_APB2_GRP1_ReleaseReset\n + * APB2RSTR UART9RST LL_APB2_GRP1_ReleaseReset\n + * APB2RSTR UART10RST LL_APB2_GRP1_ReleaseReset\n + * APB2RSTR ADCRST LL_APB2_GRP1_ReleaseReset\n + * APB2RSTR SDIORST LL_APB2_GRP1_ReleaseReset\n + * APB2RSTR SPI1RST LL_APB2_GRP1_ReleaseReset\n + * APB2RSTR SPI4RST LL_APB2_GRP1_ReleaseReset\n + * APB2RSTR SYSCFGRST LL_APB2_GRP1_ReleaseReset\n + * APB2RSTR TIM9RST LL_APB2_GRP1_ReleaseReset\n + * APB2RSTR TIM10RST LL_APB2_GRP1_ReleaseReset\n + * APB2RSTR TIM11RST LL_APB2_GRP1_ReleaseReset\n + * APB2RSTR SPI5RST LL_APB2_GRP1_ReleaseReset\n + * APB2RSTR SPI6RST LL_APB2_GRP1_ReleaseReset\n + * APB2RSTR SAI1RST LL_APB2_GRP1_ReleaseReset\n + * APB2RSTR SAI2RST LL_APB2_GRP1_ReleaseReset\n + * APB2RSTR LTDCRST LL_APB2_GRP1_ReleaseReset\n + * APB2RSTR DSIRST LL_APB2_GRP1_ReleaseReset\n + * APB2RSTR DFSDM1RST LL_APB2_GRP1_ReleaseReset\n + * APB2RSTR DFSDM2RST LL_APB2_GRP1_ReleaseReset + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_APB2_GRP1_PERIPH_ALL + * @arg @ref LL_APB2_GRP1_PERIPH_TIM1 + * @arg @ref LL_APB2_GRP1_PERIPH_TIM8 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_USART1 + * @arg @ref LL_APB2_GRP1_PERIPH_USART6 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_UART9 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_UART10 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_ADC + * @arg @ref LL_APB2_GRP1_PERIPH_SDIO (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SPI1 + * @arg @ref LL_APB2_GRP1_PERIPH_SPI4 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SYSCFG + * @arg @ref LL_APB2_GRP1_PERIPH_EXTI (*) + * @arg @ref LL_APB2_GRP1_PERIPH_TIM9 + * @arg @ref LL_APB2_GRP1_PERIPH_TIM10 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_TIM11 + * @arg @ref LL_APB2_GRP1_PERIPH_SPI5 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SPI6 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SAI1 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SAI2 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_LTDC (*) + * @arg @ref LL_APB2_GRP1_PERIPH_DSI (*) + * @arg @ref LL_APB2_GRP1_PERIPH_DFSDM1 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_DFSDM2 (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_APB2_GRP1_ReleaseReset(uint32_t Periphs) +{ + CLEAR_BIT(RCC->APB2RSTR, Periphs); +} + +/** + * @brief Enable APB2 peripheral clocks in low-power mode + * @rmtoll APB2LPENR TIM1LPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR TIM8LPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR USART1LPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR USART6LPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR UART9LPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR UART10LPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR ADC1LPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR ADC2LPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR ADC3LPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR SDIOLPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR SPI1LPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR SPI4LPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR SYSCFGLPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR EXTITLPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR TIM9LPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR TIM10LPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR TIM11LPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR SPI5LPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR SPI6LPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR SAI1LPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR SAI2LPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR LTDCLPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR DSILPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR DFSDM1LPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR DSILPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR DFSDM2LPEN LL_APB2_GRP1_EnableClockLowPower + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_APB2_GRP1_PERIPH_TIM1 + * @arg @ref LL_APB2_GRP1_PERIPH_TIM8 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_USART1 + * @arg @ref LL_APB2_GRP1_PERIPH_USART6 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_UART9 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_UART10 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_ADC1 + * @arg @ref LL_APB2_GRP1_PERIPH_ADC2 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_ADC3 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SDIO (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SPI1 + * @arg @ref LL_APB2_GRP1_PERIPH_SPI4 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SYSCFG + * @arg @ref LL_APB2_GRP1_PERIPH_EXTI (*) + * @arg @ref LL_APB2_GRP1_PERIPH_TIM9 + * @arg @ref LL_APB2_GRP1_PERIPH_TIM10 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_TIM11 + * @arg @ref LL_APB2_GRP1_PERIPH_SPI5 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SPI6 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SAI1 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SAI2 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_LTDC (*) + * @arg @ref LL_APB2_GRP1_PERIPH_DSI (*) + * @arg @ref LL_APB2_GRP1_PERIPH_DFSDM1 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_DFSDM2 (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_APB2_GRP1_EnableClockLowPower(uint32_t Periphs) +{ + __IO uint32_t tmpreg; + SET_BIT(RCC->APB2LPENR, Periphs); + /* Delay after an RCC peripheral clock enabling */ + tmpreg = READ_BIT(RCC->APB2LPENR, Periphs); + (void)tmpreg; +} + +/** + * @brief Disable APB2 peripheral clocks in low-power mode + * @rmtoll APB2LPENR TIM1LPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR TIM8LPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR USART1LPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR USART6LPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR UART9LPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR UART10LPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR ADC1LPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR ADC2LPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR ADC3LPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR SDIOLPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR SPI1LPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR SPI4LPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR SYSCFGLPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR EXTITLPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR TIM9LPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR TIM10LPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR TIM11LPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR SPI5LPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR SPI6LPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR SAI1LPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR SAI2LPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR LTDCLPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR DSILPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR DFSDM1LPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR DSILPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR DFSDM2LPEN LL_APB2_GRP1_DisableClockLowPower + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_APB2_GRP1_PERIPH_TIM1 + * @arg @ref LL_APB2_GRP1_PERIPH_TIM8 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_USART1 + * @arg @ref LL_APB2_GRP1_PERIPH_USART6 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_UART9 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_UART10 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_ADC1 + * @arg @ref LL_APB2_GRP1_PERIPH_ADC2 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_ADC3 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SDIO (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SPI1 + * @arg @ref LL_APB2_GRP1_PERIPH_SPI4 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SYSCFG + * @arg @ref LL_APB2_GRP1_PERIPH_EXTI (*) + * @arg @ref LL_APB2_GRP1_PERIPH_TIM9 + * @arg @ref LL_APB2_GRP1_PERIPH_TIM10 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_TIM11 + * @arg @ref LL_APB2_GRP1_PERIPH_SPI5 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SPI6 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SAI1 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SAI2 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_LTDC (*) + * @arg @ref LL_APB2_GRP1_PERIPH_DSI (*) + * @arg @ref LL_APB2_GRP1_PERIPH_DFSDM1 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_DFSDM2 (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_APB2_GRP1_DisableClockLowPower(uint32_t Periphs) +{ + CLEAR_BIT(RCC->APB2LPENR, Periphs); +} + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#endif /* defined(RCC) */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F4xx_LL_BUS_H */ + diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_cortex.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_cortex.h new file mode 100644 index 0000000..9a183ea --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_cortex.h @@ -0,0 +1,647 @@ +/** + ****************************************************************************** + * @file stm32f4xx_ll_cortex.h + * @author MCD Application Team + * @brief Header file of CORTEX LL module. + @verbatim + ============================================================================== + ##### How to use this driver ##### + ============================================================================== + [..] + The LL CORTEX driver contains a set of generic APIs that can be + used by user: + (+) SYSTICK configuration used by LL_mDelay and LL_Init1msTick + functions + (+) Low power mode configuration (SCB register of Cortex-MCU) + (+) MPU API to configure and enable regions + (MPU services provided only on some devices) + (+) API to access to MCU info (CPUID register) + (+) API to enable fault handler (SHCSR accesses) + + @endverbatim + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F4xx_LL_CORTEX_H +#define __STM32F4xx_LL_CORTEX_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx.h" + +/** @addtogroup STM32F4xx_LL_Driver + * @{ + */ + +/** @defgroup CORTEX_LL CORTEX + * @{ + */ + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ + +/* Private constants ---------------------------------------------------------*/ + +/* Private macros ------------------------------------------------------------*/ + +/* Exported types ------------------------------------------------------------*/ +/* Exported constants --------------------------------------------------------*/ +/** @defgroup CORTEX_LL_Exported_Constants CORTEX Exported Constants + * @{ + */ + +/** @defgroup CORTEX_LL_EC_CLKSOURCE_HCLK SYSTICK Clock Source + * @{ + */ +#define LL_SYSTICK_CLKSOURCE_HCLK_DIV8 0x00000000U /*!< AHB clock divided by 8 selected as SysTick clock source.*/ +#define LL_SYSTICK_CLKSOURCE_HCLK SysTick_CTRL_CLKSOURCE_Msk /*!< AHB clock selected as SysTick clock source. */ +/** + * @} + */ + +/** @defgroup CORTEX_LL_EC_FAULT Handler Fault type + * @{ + */ +#define LL_HANDLER_FAULT_USG SCB_SHCSR_USGFAULTENA_Msk /*!< Usage fault */ +#define LL_HANDLER_FAULT_BUS SCB_SHCSR_BUSFAULTENA_Msk /*!< Bus fault */ +#define LL_HANDLER_FAULT_MEM SCB_SHCSR_MEMFAULTENA_Msk /*!< Memory management fault */ +/** + * @} + */ + +#if __MPU_PRESENT + +/** @defgroup CORTEX_LL_EC_CTRL_HFNMI_PRIVDEF MPU Control + * @{ + */ +#define LL_MPU_CTRL_HFNMI_PRIVDEF_NONE 0x00000000U /*!< Disable NMI and privileged SW access */ +#define LL_MPU_CTRL_HARDFAULT_NMI MPU_CTRL_HFNMIENA_Msk /*!< Enables the operation of MPU during hard fault, NMI, and FAULTMASK handlers */ +#define LL_MPU_CTRL_PRIVILEGED_DEFAULT MPU_CTRL_PRIVDEFENA_Msk /*!< Enable privileged software access to default memory map */ +#define LL_MPU_CTRL_HFNMI_PRIVDEF (MPU_CTRL_HFNMIENA_Msk | MPU_CTRL_PRIVDEFENA_Msk) /*!< Enable NMI and privileged SW access */ +/** + * @} + */ + +/** @defgroup CORTEX_LL_EC_REGION MPU Region Number + * @{ + */ +#define LL_MPU_REGION_NUMBER0 0x00U /*!< REGION Number 0 */ +#define LL_MPU_REGION_NUMBER1 0x01U /*!< REGION Number 1 */ +#define LL_MPU_REGION_NUMBER2 0x02U /*!< REGION Number 2 */ +#define LL_MPU_REGION_NUMBER3 0x03U /*!< REGION Number 3 */ +#define LL_MPU_REGION_NUMBER4 0x04U /*!< REGION Number 4 */ +#define LL_MPU_REGION_NUMBER5 0x05U /*!< REGION Number 5 */ +#define LL_MPU_REGION_NUMBER6 0x06U /*!< REGION Number 6 */ +#define LL_MPU_REGION_NUMBER7 0x07U /*!< REGION Number 7 */ +/** + * @} + */ + +/** @defgroup CORTEX_LL_EC_REGION_SIZE MPU Region Size + * @{ + */ +#define LL_MPU_REGION_SIZE_32B (0x04U << MPU_RASR_SIZE_Pos) /*!< 32B Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_64B (0x05U << MPU_RASR_SIZE_Pos) /*!< 64B Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_128B (0x06U << MPU_RASR_SIZE_Pos) /*!< 128B Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_256B (0x07U << MPU_RASR_SIZE_Pos) /*!< 256B Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_512B (0x08U << MPU_RASR_SIZE_Pos) /*!< 512B Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_1KB (0x09U << MPU_RASR_SIZE_Pos) /*!< 1KB Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_2KB (0x0AU << MPU_RASR_SIZE_Pos) /*!< 2KB Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_4KB (0x0BU << MPU_RASR_SIZE_Pos) /*!< 4KB Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_8KB (0x0CU << MPU_RASR_SIZE_Pos) /*!< 8KB Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_16KB (0x0DU << MPU_RASR_SIZE_Pos) /*!< 16KB Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_32KB (0x0EU << MPU_RASR_SIZE_Pos) /*!< 32KB Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_64KB (0x0FU << MPU_RASR_SIZE_Pos) /*!< 64KB Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_128KB (0x10U << MPU_RASR_SIZE_Pos) /*!< 128KB Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_256KB (0x11U << MPU_RASR_SIZE_Pos) /*!< 256KB Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_512KB (0x12U << MPU_RASR_SIZE_Pos) /*!< 512KB Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_1MB (0x13U << MPU_RASR_SIZE_Pos) /*!< 1MB Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_2MB (0x14U << MPU_RASR_SIZE_Pos) /*!< 2MB Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_4MB (0x15U << MPU_RASR_SIZE_Pos) /*!< 4MB Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_8MB (0x16U << MPU_RASR_SIZE_Pos) /*!< 8MB Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_16MB (0x17U << MPU_RASR_SIZE_Pos) /*!< 16MB Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_32MB (0x18U << MPU_RASR_SIZE_Pos) /*!< 32MB Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_64MB (0x19U << MPU_RASR_SIZE_Pos) /*!< 64MB Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_128MB (0x1AU << MPU_RASR_SIZE_Pos) /*!< 128MB Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_256MB (0x1BU << MPU_RASR_SIZE_Pos) /*!< 256MB Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_512MB (0x1CU << MPU_RASR_SIZE_Pos) /*!< 512MB Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_1GB (0x1DU << MPU_RASR_SIZE_Pos) /*!< 1GB Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_2GB (0x1EU << MPU_RASR_SIZE_Pos) /*!< 2GB Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_4GB (0x1FU << MPU_RASR_SIZE_Pos) /*!< 4GB Size of the MPU protection region */ +/** + * @} + */ + +/** @defgroup CORTEX_LL_EC_REGION_PRIVILEDGES MPU Region Privileges + * @{ + */ +#define LL_MPU_REGION_NO_ACCESS (0x00U << MPU_RASR_AP_Pos) /*!< No access*/ +#define LL_MPU_REGION_PRIV_RW (0x01U << MPU_RASR_AP_Pos) /*!< RW privileged (privileged access only)*/ +#define LL_MPU_REGION_PRIV_RW_URO (0x02U << MPU_RASR_AP_Pos) /*!< RW privileged - RO user (Write in a user program generates a fault) */ +#define LL_MPU_REGION_FULL_ACCESS (0x03U << MPU_RASR_AP_Pos) /*!< RW privileged & user (Full access) */ +#define LL_MPU_REGION_PRIV_RO (0x05U << MPU_RASR_AP_Pos) /*!< RO privileged (privileged read only)*/ +#define LL_MPU_REGION_PRIV_RO_URO (0x06U << MPU_RASR_AP_Pos) /*!< RO privileged & user (read only) */ +/** + * @} + */ + +/** @defgroup CORTEX_LL_EC_TEX MPU TEX Level + * @{ + */ +#define LL_MPU_TEX_LEVEL0 (0x00U << MPU_RASR_TEX_Pos) /*!< b000 for TEX bits */ +#define LL_MPU_TEX_LEVEL1 (0x01U << MPU_RASR_TEX_Pos) /*!< b001 for TEX bits */ +#define LL_MPU_TEX_LEVEL2 (0x02U << MPU_RASR_TEX_Pos) /*!< b010 for TEX bits */ +#define LL_MPU_TEX_LEVEL4 (0x04U << MPU_RASR_TEX_Pos) /*!< b100 for TEX bits */ +/** + * @} + */ + +/** @defgroup CORTEX_LL_EC_INSTRUCTION_ACCESS MPU Instruction Access + * @{ + */ +#define LL_MPU_INSTRUCTION_ACCESS_ENABLE 0x00U /*!< Instruction fetches enabled */ +#define LL_MPU_INSTRUCTION_ACCESS_DISABLE MPU_RASR_XN_Msk /*!< Instruction fetches disabled*/ +/** + * @} + */ + +/** @defgroup CORTEX_LL_EC_SHAREABLE_ACCESS MPU Shareable Access + * @{ + */ +#define LL_MPU_ACCESS_SHAREABLE MPU_RASR_S_Msk /*!< Shareable memory attribute */ +#define LL_MPU_ACCESS_NOT_SHAREABLE 0x00U /*!< Not Shareable memory attribute */ +/** + * @} + */ + +/** @defgroup CORTEX_LL_EC_CACHEABLE_ACCESS MPU Cacheable Access + * @{ + */ +#define LL_MPU_ACCESS_CACHEABLE MPU_RASR_C_Msk /*!< Cacheable memory attribute */ +#define LL_MPU_ACCESS_NOT_CACHEABLE 0x00U /*!< Not Cacheable memory attribute */ +/** + * @} + */ + +/** @defgroup CORTEX_LL_EC_BUFFERABLE_ACCESS MPU Bufferable Access + * @{ + */ +#define LL_MPU_ACCESS_BUFFERABLE MPU_RASR_B_Msk /*!< Bufferable memory attribute */ +#define LL_MPU_ACCESS_NOT_BUFFERABLE 0x00U /*!< Not Bufferable memory attribute */ +/** + * @} + */ +#endif /* __MPU_PRESENT */ +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup CORTEX_LL_Exported_Functions CORTEX Exported Functions + * @{ + */ + +/** @defgroup CORTEX_LL_EF_SYSTICK SYSTICK + * @{ + */ + +/** + * @brief This function checks if the Systick counter flag is active or not. + * @note It can be used in timeout function on application side. + * @rmtoll STK_CTRL COUNTFLAG LL_SYSTICK_IsActiveCounterFlag + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_SYSTICK_IsActiveCounterFlag(void) +{ + return ((SysTick->CTRL & SysTick_CTRL_COUNTFLAG_Msk) == (SysTick_CTRL_COUNTFLAG_Msk)); +} + +/** + * @brief Configures the SysTick clock source + * @rmtoll STK_CTRL CLKSOURCE LL_SYSTICK_SetClkSource + * @param Source This parameter can be one of the following values: + * @arg @ref LL_SYSTICK_CLKSOURCE_HCLK_DIV8 + * @arg @ref LL_SYSTICK_CLKSOURCE_HCLK + * @retval None + */ +__STATIC_INLINE void LL_SYSTICK_SetClkSource(uint32_t Source) +{ + if (Source == LL_SYSTICK_CLKSOURCE_HCLK) + { + SET_BIT(SysTick->CTRL, LL_SYSTICK_CLKSOURCE_HCLK); + } + else + { + CLEAR_BIT(SysTick->CTRL, LL_SYSTICK_CLKSOURCE_HCLK); + } +} + +/** + * @brief Get the SysTick clock source + * @rmtoll STK_CTRL CLKSOURCE LL_SYSTICK_GetClkSource + * @retval Returned value can be one of the following values: + * @arg @ref LL_SYSTICK_CLKSOURCE_HCLK_DIV8 + * @arg @ref LL_SYSTICK_CLKSOURCE_HCLK + */ +__STATIC_INLINE uint32_t LL_SYSTICK_GetClkSource(void) +{ + return READ_BIT(SysTick->CTRL, LL_SYSTICK_CLKSOURCE_HCLK); +} + +/** + * @brief Enable SysTick exception request + * @rmtoll STK_CTRL TICKINT LL_SYSTICK_EnableIT + * @retval None + */ +__STATIC_INLINE void LL_SYSTICK_EnableIT(void) +{ + SET_BIT(SysTick->CTRL, SysTick_CTRL_TICKINT_Msk); +} + +/** + * @brief Disable SysTick exception request + * @rmtoll STK_CTRL TICKINT LL_SYSTICK_DisableIT + * @retval None + */ +__STATIC_INLINE void LL_SYSTICK_DisableIT(void) +{ + CLEAR_BIT(SysTick->CTRL, SysTick_CTRL_TICKINT_Msk); +} + +/** + * @brief Checks if the SYSTICK interrupt is enabled or disabled. + * @rmtoll STK_CTRL TICKINT LL_SYSTICK_IsEnabledIT + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_SYSTICK_IsEnabledIT(void) +{ + return (READ_BIT(SysTick->CTRL, SysTick_CTRL_TICKINT_Msk) == (SysTick_CTRL_TICKINT_Msk)); +} + +/** + * @} + */ + +/** @defgroup CORTEX_LL_EF_LOW_POWER_MODE LOW POWER MODE + * @{ + */ + +/** + * @brief Processor uses sleep as its low power mode + * @rmtoll SCB_SCR SLEEPDEEP LL_LPM_EnableSleep + * @retval None + */ +__STATIC_INLINE void LL_LPM_EnableSleep(void) +{ + /* Clear SLEEPDEEP bit of Cortex System Control Register */ + CLEAR_BIT(SCB->SCR, ((uint32_t)SCB_SCR_SLEEPDEEP_Msk)); +} + +/** + * @brief Processor uses deep sleep as its low power mode + * @rmtoll SCB_SCR SLEEPDEEP LL_LPM_EnableDeepSleep + * @retval None + */ +__STATIC_INLINE void LL_LPM_EnableDeepSleep(void) +{ + /* Set SLEEPDEEP bit of Cortex System Control Register */ + SET_BIT(SCB->SCR, ((uint32_t)SCB_SCR_SLEEPDEEP_Msk)); +} + +/** + * @brief Configures sleep-on-exit when returning from Handler mode to Thread mode. + * @note Setting this bit to 1 enables an interrupt-driven application to avoid returning to an + * empty main application. + * @rmtoll SCB_SCR SLEEPONEXIT LL_LPM_EnableSleepOnExit + * @retval None + */ +__STATIC_INLINE void LL_LPM_EnableSleepOnExit(void) +{ + /* Set SLEEPONEXIT bit of Cortex System Control Register */ + SET_BIT(SCB->SCR, ((uint32_t)SCB_SCR_SLEEPONEXIT_Msk)); +} + +/** + * @brief Do not sleep when returning to Thread mode. + * @rmtoll SCB_SCR SLEEPONEXIT LL_LPM_DisableSleepOnExit + * @retval None + */ +__STATIC_INLINE void LL_LPM_DisableSleepOnExit(void) +{ + /* Clear SLEEPONEXIT bit of Cortex System Control Register */ + CLEAR_BIT(SCB->SCR, ((uint32_t)SCB_SCR_SLEEPONEXIT_Msk)); +} + +/** + * @brief Enabled events and all interrupts, including disabled interrupts, can wakeup the + * processor. + * @rmtoll SCB_SCR SEVEONPEND LL_LPM_EnableEventOnPend + * @retval None + */ +__STATIC_INLINE void LL_LPM_EnableEventOnPend(void) +{ + /* Set SEVEONPEND bit of Cortex System Control Register */ + SET_BIT(SCB->SCR, ((uint32_t)SCB_SCR_SEVONPEND_Msk)); +} + +/** + * @brief Only enabled interrupts or events can wakeup the processor, disabled interrupts are + * excluded + * @rmtoll SCB_SCR SEVEONPEND LL_LPM_DisableEventOnPend + * @retval None + */ +__STATIC_INLINE void LL_LPM_DisableEventOnPend(void) +{ + /* Clear SEVEONPEND bit of Cortex System Control Register */ + CLEAR_BIT(SCB->SCR, ((uint32_t)SCB_SCR_SEVONPEND_Msk)); +} + +/** + * @brief Clear pending events. + * @retval None + */ +__STATIC_INLINE void LL_LPM_ClearEvent(void) +{ + __SEV(); + __WFE(); +} + +/** + * @} + */ + +/** @defgroup CORTEX_LL_EF_HANDLER HANDLER + * @{ + */ + +/** + * @brief Enable a fault in System handler control register (SHCSR) + * @rmtoll SCB_SHCSR MEMFAULTENA LL_HANDLER_EnableFault + * @param Fault This parameter can be a combination of the following values: + * @arg @ref LL_HANDLER_FAULT_USG + * @arg @ref LL_HANDLER_FAULT_BUS + * @arg @ref LL_HANDLER_FAULT_MEM + * @retval None + */ +__STATIC_INLINE void LL_HANDLER_EnableFault(uint32_t Fault) +{ + /* Enable the system handler fault */ + SET_BIT(SCB->SHCSR, Fault); +} + +/** + * @brief Disable a fault in System handler control register (SHCSR) + * @rmtoll SCB_SHCSR MEMFAULTENA LL_HANDLER_DisableFault + * @param Fault This parameter can be a combination of the following values: + * @arg @ref LL_HANDLER_FAULT_USG + * @arg @ref LL_HANDLER_FAULT_BUS + * @arg @ref LL_HANDLER_FAULT_MEM + * @retval None + */ +__STATIC_INLINE void LL_HANDLER_DisableFault(uint32_t Fault) +{ + /* Disable the system handler fault */ + CLEAR_BIT(SCB->SHCSR, Fault); +} + +/** + * @} + */ + +/** @defgroup CORTEX_LL_EF_MCU_INFO MCU INFO + * @{ + */ + +/** + * @brief Get Implementer code + * @rmtoll SCB_CPUID IMPLEMENTER LL_CPUID_GetImplementer + * @retval Value should be equal to 0x41 for ARM + */ +__STATIC_INLINE uint32_t LL_CPUID_GetImplementer(void) +{ + return (uint32_t)(READ_BIT(SCB->CPUID, SCB_CPUID_IMPLEMENTER_Msk) >> SCB_CPUID_IMPLEMENTER_Pos); +} + +/** + * @brief Get Variant number (The r value in the rnpn product revision identifier) + * @rmtoll SCB_CPUID VARIANT LL_CPUID_GetVariant + * @retval Value between 0 and 255 (0x0: revision 0) + */ +__STATIC_INLINE uint32_t LL_CPUID_GetVariant(void) +{ + return (uint32_t)(READ_BIT(SCB->CPUID, SCB_CPUID_VARIANT_Msk) >> SCB_CPUID_VARIANT_Pos); +} + +/** + * @brief Get Constant number + * @rmtoll SCB_CPUID ARCHITECTURE LL_CPUID_GetConstant + * @retval Value should be equal to 0xF for Cortex-M4 devices + */ +__STATIC_INLINE uint32_t LL_CPUID_GetConstant(void) +{ + return (uint32_t)(READ_BIT(SCB->CPUID, SCB_CPUID_ARCHITECTURE_Msk) >> SCB_CPUID_ARCHITECTURE_Pos); +} + +/** + * @brief Get Part number + * @rmtoll SCB_CPUID PARTNO LL_CPUID_GetParNo + * @retval Value should be equal to 0xC24 for Cortex-M4 + */ +__STATIC_INLINE uint32_t LL_CPUID_GetParNo(void) +{ + return (uint32_t)(READ_BIT(SCB->CPUID, SCB_CPUID_PARTNO_Msk) >> SCB_CPUID_PARTNO_Pos); +} + +/** + * @brief Get Revision number (The p value in the rnpn product revision identifier, indicates patch release) + * @rmtoll SCB_CPUID REVISION LL_CPUID_GetRevision + * @retval Value between 0 and 255 (0x1: patch 1) + */ +__STATIC_INLINE uint32_t LL_CPUID_GetRevision(void) +{ + return (uint32_t)(READ_BIT(SCB->CPUID, SCB_CPUID_REVISION_Msk) >> SCB_CPUID_REVISION_Pos); +} + +/** + * @} + */ + +#if __MPU_PRESENT +/** @defgroup CORTEX_LL_EF_MPU MPU + * @{ + */ + +/** + * @brief Enable MPU with input options + * @rmtoll MPU_CTRL ENABLE LL_MPU_Enable + * @param Options This parameter can be one of the following values: + * @arg @ref LL_MPU_CTRL_HFNMI_PRIVDEF_NONE + * @arg @ref LL_MPU_CTRL_HARDFAULT_NMI + * @arg @ref LL_MPU_CTRL_PRIVILEGED_DEFAULT + * @arg @ref LL_MPU_CTRL_HFNMI_PRIVDEF + * @retval None + */ +__STATIC_INLINE void LL_MPU_Enable(uint32_t Options) +{ + /* Enable the MPU*/ + WRITE_REG(MPU->CTRL, (MPU_CTRL_ENABLE_Msk | Options)); + /* Ensure MPU settings take effects */ + __DSB(); + /* Sequence instruction fetches using update settings */ + __ISB(); +} + +/** + * @brief Disable MPU + * @rmtoll MPU_CTRL ENABLE LL_MPU_Disable + * @retval None + */ +__STATIC_INLINE void LL_MPU_Disable(void) +{ + /* Make sure outstanding transfers are done */ + __DMB(); + /* Disable MPU*/ + WRITE_REG(MPU->CTRL, 0U); +} + +/** + * @brief Check if MPU is enabled or not + * @rmtoll MPU_CTRL ENABLE LL_MPU_IsEnabled + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_MPU_IsEnabled(void) +{ + return (READ_BIT(MPU->CTRL, MPU_CTRL_ENABLE_Msk) == (MPU_CTRL_ENABLE_Msk)); +} + +/** + * @brief Enable a MPU region + * @rmtoll MPU_RASR ENABLE LL_MPU_EnableRegion + * @param Region This parameter can be one of the following values: + * @arg @ref LL_MPU_REGION_NUMBER0 + * @arg @ref LL_MPU_REGION_NUMBER1 + * @arg @ref LL_MPU_REGION_NUMBER2 + * @arg @ref LL_MPU_REGION_NUMBER3 + * @arg @ref LL_MPU_REGION_NUMBER4 + * @arg @ref LL_MPU_REGION_NUMBER5 + * @arg @ref LL_MPU_REGION_NUMBER6 + * @arg @ref LL_MPU_REGION_NUMBER7 + * @retval None + */ +__STATIC_INLINE void LL_MPU_EnableRegion(uint32_t Region) +{ + /* Set Region number */ + WRITE_REG(MPU->RNR, Region); + /* Enable the MPU region */ + SET_BIT(MPU->RASR, MPU_RASR_ENABLE_Msk); +} + +/** + * @brief Configure and enable a region + * @rmtoll MPU_RNR REGION LL_MPU_ConfigRegion\n + * MPU_RBAR REGION LL_MPU_ConfigRegion\n + * MPU_RBAR ADDR LL_MPU_ConfigRegion\n + * MPU_RASR XN LL_MPU_ConfigRegion\n + * MPU_RASR AP LL_MPU_ConfigRegion\n + * MPU_RASR S LL_MPU_ConfigRegion\n + * MPU_RASR C LL_MPU_ConfigRegion\n + * MPU_RASR B LL_MPU_ConfigRegion\n + * MPU_RASR SIZE LL_MPU_ConfigRegion + * @param Region This parameter can be one of the following values: + * @arg @ref LL_MPU_REGION_NUMBER0 + * @arg @ref LL_MPU_REGION_NUMBER1 + * @arg @ref LL_MPU_REGION_NUMBER2 + * @arg @ref LL_MPU_REGION_NUMBER3 + * @arg @ref LL_MPU_REGION_NUMBER4 + * @arg @ref LL_MPU_REGION_NUMBER5 + * @arg @ref LL_MPU_REGION_NUMBER6 + * @arg @ref LL_MPU_REGION_NUMBER7 + * @param Address Value of region base address + * @param SubRegionDisable Sub-region disable value between Min_Data = 0x00 and Max_Data = 0xFF + * @param Attributes This parameter can be a combination of the following values: + * @arg @ref LL_MPU_REGION_SIZE_32B or @ref LL_MPU_REGION_SIZE_64B or @ref LL_MPU_REGION_SIZE_128B or @ref LL_MPU_REGION_SIZE_256B or @ref LL_MPU_REGION_SIZE_512B + * or @ref LL_MPU_REGION_SIZE_1KB or @ref LL_MPU_REGION_SIZE_2KB or @ref LL_MPU_REGION_SIZE_4KB or @ref LL_MPU_REGION_SIZE_8KB or @ref LL_MPU_REGION_SIZE_16KB + * or @ref LL_MPU_REGION_SIZE_32KB or @ref LL_MPU_REGION_SIZE_64KB or @ref LL_MPU_REGION_SIZE_128KB or @ref LL_MPU_REGION_SIZE_256KB or @ref LL_MPU_REGION_SIZE_512KB + * or @ref LL_MPU_REGION_SIZE_1MB or @ref LL_MPU_REGION_SIZE_2MB or @ref LL_MPU_REGION_SIZE_4MB or @ref LL_MPU_REGION_SIZE_8MB or @ref LL_MPU_REGION_SIZE_16MB + * or @ref LL_MPU_REGION_SIZE_32MB or @ref LL_MPU_REGION_SIZE_64MB or @ref LL_MPU_REGION_SIZE_128MB or @ref LL_MPU_REGION_SIZE_256MB or @ref LL_MPU_REGION_SIZE_512MB + * or @ref LL_MPU_REGION_SIZE_1GB or @ref LL_MPU_REGION_SIZE_2GB or @ref LL_MPU_REGION_SIZE_4GB + * @arg @ref LL_MPU_REGION_NO_ACCESS or @ref LL_MPU_REGION_PRIV_RW or @ref LL_MPU_REGION_PRIV_RW_URO or @ref LL_MPU_REGION_FULL_ACCESS + * or @ref LL_MPU_REGION_PRIV_RO or @ref LL_MPU_REGION_PRIV_RO_URO + * @arg @ref LL_MPU_TEX_LEVEL0 or @ref LL_MPU_TEX_LEVEL1 or @ref LL_MPU_TEX_LEVEL2 or @ref LL_MPU_TEX_LEVEL4 + * @arg @ref LL_MPU_INSTRUCTION_ACCESS_ENABLE or @ref LL_MPU_INSTRUCTION_ACCESS_DISABLE + * @arg @ref LL_MPU_ACCESS_SHAREABLE or @ref LL_MPU_ACCESS_NOT_SHAREABLE + * @arg @ref LL_MPU_ACCESS_CACHEABLE or @ref LL_MPU_ACCESS_NOT_CACHEABLE + * @arg @ref LL_MPU_ACCESS_BUFFERABLE or @ref LL_MPU_ACCESS_NOT_BUFFERABLE + * @retval None + */ +__STATIC_INLINE void LL_MPU_ConfigRegion(uint32_t Region, uint32_t SubRegionDisable, uint32_t Address, uint32_t Attributes) +{ + /* Set Region number */ + WRITE_REG(MPU->RNR, Region); + /* Set base address */ + WRITE_REG(MPU->RBAR, (Address & 0xFFFFFFE0U)); + /* Configure MPU */ + WRITE_REG(MPU->RASR, (MPU_RASR_ENABLE_Msk | Attributes | (SubRegionDisable << MPU_RASR_SRD_Pos))); +} + +/** + * @brief Disable a region + * @rmtoll MPU_RNR REGION LL_MPU_DisableRegion\n + * MPU_RASR ENABLE LL_MPU_DisableRegion + * @param Region This parameter can be one of the following values: + * @arg @ref LL_MPU_REGION_NUMBER0 + * @arg @ref LL_MPU_REGION_NUMBER1 + * @arg @ref LL_MPU_REGION_NUMBER2 + * @arg @ref LL_MPU_REGION_NUMBER3 + * @arg @ref LL_MPU_REGION_NUMBER4 + * @arg @ref LL_MPU_REGION_NUMBER5 + * @arg @ref LL_MPU_REGION_NUMBER6 + * @arg @ref LL_MPU_REGION_NUMBER7 + * @retval None + */ +__STATIC_INLINE void LL_MPU_DisableRegion(uint32_t Region) +{ + /* Set Region number */ + WRITE_REG(MPU->RNR, Region); + /* Disable the MPU region */ + CLEAR_BIT(MPU->RASR, MPU_RASR_ENABLE_Msk); +} + +/** + * @} + */ + +#endif /* __MPU_PRESENT */ +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F4xx_LL_CORTEX_H */ + diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_dma.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_dma.h new file mode 100644 index 0000000..c0182de --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_dma.h @@ -0,0 +1,2868 @@ +/** + ****************************************************************************** + * @file stm32f4xx_ll_dma.h + * @author MCD Application Team + * @brief Header file of DMA LL module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F4xx_LL_DMA_H +#define __STM32F4xx_LL_DMA_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx.h" + +/** @addtogroup STM32F4xx_LL_Driver + * @{ + */ + +#if defined (DMA1) || defined (DMA2) + +/** @defgroup DMA_LL DMA + * @{ + */ + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/** @defgroup DMA_LL_Private_Variables DMA Private Variables + * @{ + */ +/* Array used to get the DMA stream register offset versus stream index LL_DMA_STREAM_x */ +static const uint8_t STREAM_OFFSET_TAB[] = +{ + (uint8_t)(DMA1_Stream0_BASE - DMA1_BASE), + (uint8_t)(DMA1_Stream1_BASE - DMA1_BASE), + (uint8_t)(DMA1_Stream2_BASE - DMA1_BASE), + (uint8_t)(DMA1_Stream3_BASE - DMA1_BASE), + (uint8_t)(DMA1_Stream4_BASE - DMA1_BASE), + (uint8_t)(DMA1_Stream5_BASE - DMA1_BASE), + (uint8_t)(DMA1_Stream6_BASE - DMA1_BASE), + (uint8_t)(DMA1_Stream7_BASE - DMA1_BASE) +}; + +/** + * @} + */ + +/* Private constants ---------------------------------------------------------*/ +/** @defgroup DMA_LL_Private_Constants DMA Private Constants + * @{ + */ +/** + * @} + */ + + +/* Private macros ------------------------------------------------------------*/ +/* Exported types ------------------------------------------------------------*/ +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup DMA_LL_ES_INIT DMA Exported Init structure + * @{ + */ +typedef struct +{ + uint32_t PeriphOrM2MSrcAddress; /*!< Specifies the peripheral base address for DMA transfer + or as Source base address in case of memory to memory transfer direction. + + This parameter must be a value between Min_Data = 0 and Max_Data = 0xFFFFFFFF. */ + + uint32_t MemoryOrM2MDstAddress; /*!< Specifies the memory base address for DMA transfer + or as Destination base address in case of memory to memory transfer direction. + + This parameter must be a value between Min_Data = 0 and Max_Data = 0xFFFFFFFF. */ + + uint32_t Direction; /*!< Specifies if the data will be transferred from memory to peripheral, + from memory to memory or from peripheral to memory. + This parameter can be a value of @ref DMA_LL_EC_DIRECTION + + This feature can be modified afterwards using unitary function @ref LL_DMA_SetDataTransferDirection(). */ + + uint32_t Mode; /*!< Specifies the normal or circular operation mode. + This parameter can be a value of @ref DMA_LL_EC_MODE + @note The circular buffer mode cannot be used if the memory to memory + data transfer direction is configured on the selected Stream + + This feature can be modified afterwards using unitary function @ref LL_DMA_SetMode(). */ + + uint32_t PeriphOrM2MSrcIncMode; /*!< Specifies whether the Peripheral address or Source address in case of memory to memory transfer direction + is incremented or not. + This parameter can be a value of @ref DMA_LL_EC_PERIPH + + This feature can be modified afterwards using unitary function @ref LL_DMA_SetPeriphIncMode(). */ + + uint32_t MemoryOrM2MDstIncMode; /*!< Specifies whether the Memory address or Destination address in case of memory to memory transfer direction + is incremented or not. + This parameter can be a value of @ref DMA_LL_EC_MEMORY + + This feature can be modified afterwards using unitary function @ref LL_DMA_SetMemoryIncMode(). */ + + uint32_t PeriphOrM2MSrcDataSize; /*!< Specifies the Peripheral data size alignment or Source data size alignment (byte, half word, word) + in case of memory to memory transfer direction. + This parameter can be a value of @ref DMA_LL_EC_PDATAALIGN + + This feature can be modified afterwards using unitary function @ref LL_DMA_SetPeriphSize(). */ + + uint32_t MemoryOrM2MDstDataSize; /*!< Specifies the Memory data size alignment or Destination data size alignment (byte, half word, word) + in case of memory to memory transfer direction. + This parameter can be a value of @ref DMA_LL_EC_MDATAALIGN + + This feature can be modified afterwards using unitary function @ref LL_DMA_SetMemorySize(). */ + + uint32_t NbData; /*!< Specifies the number of data to transfer, in data unit. + The data unit is equal to the source buffer configuration set in PeripheralSize + or MemorySize parameters depending in the transfer direction. + This parameter must be a value between Min_Data = 0 and Max_Data = 0x0000FFFF + + This feature can be modified afterwards using unitary function @ref LL_DMA_SetDataLength(). */ + + uint32_t Channel; /*!< Specifies the peripheral channel. + This parameter can be a value of @ref DMA_LL_EC_CHANNEL + + This feature can be modified afterwards using unitary function @ref LL_DMA_SetChannelSelection(). */ + + uint32_t Priority; /*!< Specifies the channel priority level. + This parameter can be a value of @ref DMA_LL_EC_PRIORITY + + This feature can be modified afterwards using unitary function @ref LL_DMA_SetStreamPriorityLevel(). */ + + uint32_t FIFOMode; /*!< Specifies if the FIFO mode or Direct mode will be used for the specified stream. + This parameter can be a value of @ref DMA_LL_FIFOMODE + @note The Direct mode (FIFO mode disabled) cannot be used if the + memory-to-memory data transfer is configured on the selected stream + + This feature can be modified afterwards using unitary functions @ref LL_DMA_EnableFifoMode() or @ref LL_DMA_EnableFifoMode() . */ + + uint32_t FIFOThreshold; /*!< Specifies the FIFO threshold level. + This parameter can be a value of @ref DMA_LL_EC_FIFOTHRESHOLD + + This feature can be modified afterwards using unitary function @ref LL_DMA_SetFIFOThreshold(). */ + + uint32_t MemBurst; /*!< Specifies the Burst transfer configuration for the memory transfers. + It specifies the amount of data to be transferred in a single non interruptible + transaction. + This parameter can be a value of @ref DMA_LL_EC_MBURST + @note The burst mode is possible only if the address Increment mode is enabled. + + This feature can be modified afterwards using unitary function @ref LL_DMA_SetMemoryBurstxfer(). */ + + uint32_t PeriphBurst; /*!< Specifies the Burst transfer configuration for the peripheral transfers. + It specifies the amount of data to be transferred in a single non interruptible + transaction. + This parameter can be a value of @ref DMA_LL_EC_PBURST + @note The burst mode is possible only if the address Increment mode is enabled. + + This feature can be modified afterwards using unitary function @ref LL_DMA_SetPeriphBurstxfer(). */ + +} LL_DMA_InitTypeDef; +/** + * @} + */ +#endif /*USE_FULL_LL_DRIVER*/ +/* Exported constants --------------------------------------------------------*/ +/** @defgroup DMA_LL_Exported_Constants DMA Exported Constants + * @{ + */ + +/** @defgroup DMA_LL_EC_STREAM STREAM + * @{ + */ +#define LL_DMA_STREAM_0 0x00000000U +#define LL_DMA_STREAM_1 0x00000001U +#define LL_DMA_STREAM_2 0x00000002U +#define LL_DMA_STREAM_3 0x00000003U +#define LL_DMA_STREAM_4 0x00000004U +#define LL_DMA_STREAM_5 0x00000005U +#define LL_DMA_STREAM_6 0x00000006U +#define LL_DMA_STREAM_7 0x00000007U +#define LL_DMA_STREAM_ALL 0xFFFF0000U +/** + * @} + */ + +/** @defgroup DMA_LL_EC_DIRECTION DIRECTION + * @{ + */ +#define LL_DMA_DIRECTION_PERIPH_TO_MEMORY 0x00000000U /*!< Peripheral to memory direction */ +#define LL_DMA_DIRECTION_MEMORY_TO_PERIPH DMA_SxCR_DIR_0 /*!< Memory to peripheral direction */ +#define LL_DMA_DIRECTION_MEMORY_TO_MEMORY DMA_SxCR_DIR_1 /*!< Memory to memory direction */ +/** + * @} + */ + +/** @defgroup DMA_LL_EC_MODE MODE + * @{ + */ +#define LL_DMA_MODE_NORMAL 0x00000000U /*!< Normal Mode */ +#define LL_DMA_MODE_CIRCULAR DMA_SxCR_CIRC /*!< Circular Mode */ +#define LL_DMA_MODE_PFCTRL DMA_SxCR_PFCTRL /*!< Peripheral flow control mode */ +/** + * @} + */ + +/** @defgroup DMA_LL_EC_DOUBLEBUFFER_MODE DOUBLEBUFFER MODE + * @{ + */ +#define LL_DMA_DOUBLEBUFFER_MODE_DISABLE 0x00000000U /*!< Disable double buffering mode */ +#define LL_DMA_DOUBLEBUFFER_MODE_ENABLE DMA_SxCR_DBM /*!< Enable double buffering mode */ +/** + * @} + */ + +/** @defgroup DMA_LL_EC_PERIPH PERIPH + * @{ + */ +#define LL_DMA_PERIPH_NOINCREMENT 0x00000000U /*!< Peripheral increment mode Disable */ +#define LL_DMA_PERIPH_INCREMENT DMA_SxCR_PINC /*!< Peripheral increment mode Enable */ +/** + * @} + */ + +/** @defgroup DMA_LL_EC_MEMORY MEMORY + * @{ + */ +#define LL_DMA_MEMORY_NOINCREMENT 0x00000000U /*!< Memory increment mode Disable */ +#define LL_DMA_MEMORY_INCREMENT DMA_SxCR_MINC /*!< Memory increment mode Enable */ +/** + * @} + */ + +/** @defgroup DMA_LL_EC_PDATAALIGN PDATAALIGN + * @{ + */ +#define LL_DMA_PDATAALIGN_BYTE 0x00000000U /*!< Peripheral data alignment : Byte */ +#define LL_DMA_PDATAALIGN_HALFWORD DMA_SxCR_PSIZE_0 /*!< Peripheral data alignment : HalfWord */ +#define LL_DMA_PDATAALIGN_WORD DMA_SxCR_PSIZE_1 /*!< Peripheral data alignment : Word */ +/** + * @} + */ + +/** @defgroup DMA_LL_EC_MDATAALIGN MDATAALIGN + * @{ + */ +#define LL_DMA_MDATAALIGN_BYTE 0x00000000U /*!< Memory data alignment : Byte */ +#define LL_DMA_MDATAALIGN_HALFWORD DMA_SxCR_MSIZE_0 /*!< Memory data alignment : HalfWord */ +#define LL_DMA_MDATAALIGN_WORD DMA_SxCR_MSIZE_1 /*!< Memory data alignment : Word */ +/** + * @} + */ + +/** @defgroup DMA_LL_EC_OFFSETSIZE OFFSETSIZE + * @{ + */ +#define LL_DMA_OFFSETSIZE_PSIZE 0x00000000U /*!< Peripheral increment offset size is linked to the PSIZE */ +#define LL_DMA_OFFSETSIZE_FIXEDTO4 DMA_SxCR_PINCOS /*!< Peripheral increment offset size is fixed to 4 (32-bit alignment) */ +/** + * @} + */ + +/** @defgroup DMA_LL_EC_PRIORITY PRIORITY + * @{ + */ +#define LL_DMA_PRIORITY_LOW 0x00000000U /*!< Priority level : Low */ +#define LL_DMA_PRIORITY_MEDIUM DMA_SxCR_PL_0 /*!< Priority level : Medium */ +#define LL_DMA_PRIORITY_HIGH DMA_SxCR_PL_1 /*!< Priority level : High */ +#define LL_DMA_PRIORITY_VERYHIGH DMA_SxCR_PL /*!< Priority level : Very_High */ +/** + * @} + */ + +/** @defgroup DMA_LL_EC_CHANNEL CHANNEL + * @{ + */ +#define LL_DMA_CHANNEL_0 0x00000000U /* Select Channel0 of DMA Instance */ +#define LL_DMA_CHANNEL_1 DMA_SxCR_CHSEL_0 /* Select Channel1 of DMA Instance */ +#define LL_DMA_CHANNEL_2 DMA_SxCR_CHSEL_1 /* Select Channel2 of DMA Instance */ +#define LL_DMA_CHANNEL_3 (DMA_SxCR_CHSEL_0 | DMA_SxCR_CHSEL_1) /* Select Channel3 of DMA Instance */ +#define LL_DMA_CHANNEL_4 DMA_SxCR_CHSEL_2 /* Select Channel4 of DMA Instance */ +#define LL_DMA_CHANNEL_5 (DMA_SxCR_CHSEL_2 | DMA_SxCR_CHSEL_0) /* Select Channel5 of DMA Instance */ +#define LL_DMA_CHANNEL_6 (DMA_SxCR_CHSEL_2 | DMA_SxCR_CHSEL_1) /* Select Channel6 of DMA Instance */ +#define LL_DMA_CHANNEL_7 (DMA_SxCR_CHSEL_2 | DMA_SxCR_CHSEL_1 | DMA_SxCR_CHSEL_0) /* Select Channel7 of DMA Instance */ +#if defined (DMA_SxCR_CHSEL_3) +#define LL_DMA_CHANNEL_8 DMA_SxCR_CHSEL_3 /* Select Channel8 of DMA Instance */ +#define LL_DMA_CHANNEL_9 (DMA_SxCR_CHSEL_3 | DMA_SxCR_CHSEL_0) /* Select Channel9 of DMA Instance */ +#define LL_DMA_CHANNEL_10 (DMA_SxCR_CHSEL_3 | DMA_SxCR_CHSEL_1) /* Select Channel10 of DMA Instance */ +#define LL_DMA_CHANNEL_11 (DMA_SxCR_CHSEL_3 | DMA_SxCR_CHSEL_1 | DMA_SxCR_CHSEL_0) /* Select Channel11 of DMA Instance */ +#define LL_DMA_CHANNEL_12 (DMA_SxCR_CHSEL_3 | DMA_SxCR_CHSEL_2) /* Select Channel12 of DMA Instance */ +#define LL_DMA_CHANNEL_13 (DMA_SxCR_CHSEL_3 | DMA_SxCR_CHSEL_2 | DMA_SxCR_CHSEL_0) /* Select Channel13 of DMA Instance */ +#define LL_DMA_CHANNEL_14 (DMA_SxCR_CHSEL_3 | DMA_SxCR_CHSEL_2 | DMA_SxCR_CHSEL_1) /* Select Channel14 of DMA Instance */ +#define LL_DMA_CHANNEL_15 (DMA_SxCR_CHSEL_3 | DMA_SxCR_CHSEL_2 | DMA_SxCR_CHSEL_1 | DMA_SxCR_CHSEL_0) /* Select Channel15 of DMA Instance */ +#endif /* DMA_SxCR_CHSEL_3 */ +/** + * @} + */ + +/** @defgroup DMA_LL_EC_MBURST MBURST + * @{ + */ +#define LL_DMA_MBURST_SINGLE 0x00000000U /*!< Memory burst single transfer configuration */ +#define LL_DMA_MBURST_INC4 DMA_SxCR_MBURST_0 /*!< Memory burst of 4 beats transfer configuration */ +#define LL_DMA_MBURST_INC8 DMA_SxCR_MBURST_1 /*!< Memory burst of 8 beats transfer configuration */ +#define LL_DMA_MBURST_INC16 (DMA_SxCR_MBURST_0 | DMA_SxCR_MBURST_1) /*!< Memory burst of 16 beats transfer configuration */ +/** + * @} + */ + +/** @defgroup DMA_LL_EC_PBURST PBURST + * @{ + */ +#define LL_DMA_PBURST_SINGLE 0x00000000U /*!< Peripheral burst single transfer configuration */ +#define LL_DMA_PBURST_INC4 DMA_SxCR_PBURST_0 /*!< Peripheral burst of 4 beats transfer configuration */ +#define LL_DMA_PBURST_INC8 DMA_SxCR_PBURST_1 /*!< Peripheral burst of 8 beats transfer configuration */ +#define LL_DMA_PBURST_INC16 (DMA_SxCR_PBURST_0 | DMA_SxCR_PBURST_1) /*!< Peripheral burst of 16 beats transfer configuration */ +/** + * @} + */ + +/** @defgroup DMA_LL_FIFOMODE DMA_LL_FIFOMODE + * @{ + */ +#define LL_DMA_FIFOMODE_DISABLE 0x00000000U /*!< FIFO mode disable (direct mode is enabled) */ +#define LL_DMA_FIFOMODE_ENABLE DMA_SxFCR_DMDIS /*!< FIFO mode enable */ +/** + * @} + */ + +/** @defgroup DMA_LL_EC_FIFOSTATUS_0 FIFOSTATUS 0 + * @{ + */ +#define LL_DMA_FIFOSTATUS_0_25 0x00000000U /*!< 0 < fifo_level < 1/4 */ +#define LL_DMA_FIFOSTATUS_25_50 DMA_SxFCR_FS_0 /*!< 1/4 < fifo_level < 1/2 */ +#define LL_DMA_FIFOSTATUS_50_75 DMA_SxFCR_FS_1 /*!< 1/2 < fifo_level < 3/4 */ +#define LL_DMA_FIFOSTATUS_75_100 (DMA_SxFCR_FS_1 | DMA_SxFCR_FS_0) /*!< 3/4 < fifo_level < full */ +#define LL_DMA_FIFOSTATUS_EMPTY DMA_SxFCR_FS_2 /*!< FIFO is empty */ +#define LL_DMA_FIFOSTATUS_FULL (DMA_SxFCR_FS_2 | DMA_SxFCR_FS_0) /*!< FIFO is full */ +/** + * @} + */ + +/** @defgroup DMA_LL_EC_FIFOTHRESHOLD FIFOTHRESHOLD + * @{ + */ +#define LL_DMA_FIFOTHRESHOLD_1_4 0x00000000U /*!< FIFO threshold 1 quart full configuration */ +#define LL_DMA_FIFOTHRESHOLD_1_2 DMA_SxFCR_FTH_0 /*!< FIFO threshold half full configuration */ +#define LL_DMA_FIFOTHRESHOLD_3_4 DMA_SxFCR_FTH_1 /*!< FIFO threshold 3 quarts full configuration */ +#define LL_DMA_FIFOTHRESHOLD_FULL DMA_SxFCR_FTH /*!< FIFO threshold full configuration */ +/** + * @} + */ + +/** @defgroup DMA_LL_EC_CURRENTTARGETMEM CURRENTTARGETMEM + * @{ + */ +#define LL_DMA_CURRENTTARGETMEM0 0x00000000U /*!< Set CurrentTarget Memory to Memory 0 */ +#define LL_DMA_CURRENTTARGETMEM1 DMA_SxCR_CT /*!< Set CurrentTarget Memory to Memory 1 */ +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup DMA_LL_Exported_Macros DMA Exported Macros + * @{ + */ + +/** @defgroup DMA_LL_EM_WRITE_READ Common Write and read registers macros + * @{ + */ +/** + * @brief Write a value in DMA register + * @param __INSTANCE__ DMA Instance + * @param __REG__ Register to be written + * @param __VALUE__ Value to be written in the register + * @retval None + */ +#define LL_DMA_WriteReg(__INSTANCE__, __REG__, __VALUE__) WRITE_REG(__INSTANCE__->__REG__, (__VALUE__)) + +/** + * @brief Read a value in DMA register + * @param __INSTANCE__ DMA Instance + * @param __REG__ Register to be read + * @retval Register value + */ +#define LL_DMA_ReadReg(__INSTANCE__, __REG__) READ_REG(__INSTANCE__->__REG__) +/** + * @} + */ + +/** @defgroup DMA_LL_EM_CONVERT_DMAxCHANNELy Convert DMAxStreamy + * @{ + */ +/** + * @brief Convert DMAx_Streamy into DMAx + * @param __STREAM_INSTANCE__ DMAx_Streamy + * @retval DMAx + */ +#define __LL_DMA_GET_INSTANCE(__STREAM_INSTANCE__) \ +(((uint32_t)(__STREAM_INSTANCE__) > ((uint32_t)DMA1_Stream7)) ? DMA2 : DMA1) + +/** + * @brief Convert DMAx_Streamy into LL_DMA_STREAM_y + * @param __STREAM_INSTANCE__ DMAx_Streamy + * @retval LL_DMA_CHANNEL_y + */ +#define __LL_DMA_GET_STREAM(__STREAM_INSTANCE__) \ +(((uint32_t)(__STREAM_INSTANCE__) == ((uint32_t)DMA1_Stream0)) ? LL_DMA_STREAM_0 : \ + ((uint32_t)(__STREAM_INSTANCE__) == ((uint32_t)DMA2_Stream0)) ? LL_DMA_STREAM_0 : \ + ((uint32_t)(__STREAM_INSTANCE__) == ((uint32_t)DMA1_Stream1)) ? LL_DMA_STREAM_1 : \ + ((uint32_t)(__STREAM_INSTANCE__) == ((uint32_t)DMA2_Stream1)) ? LL_DMA_STREAM_1 : \ + ((uint32_t)(__STREAM_INSTANCE__) == ((uint32_t)DMA1_Stream2)) ? LL_DMA_STREAM_2 : \ + ((uint32_t)(__STREAM_INSTANCE__) == ((uint32_t)DMA2_Stream2)) ? LL_DMA_STREAM_2 : \ + ((uint32_t)(__STREAM_INSTANCE__) == ((uint32_t)DMA1_Stream3)) ? LL_DMA_STREAM_3 : \ + ((uint32_t)(__STREAM_INSTANCE__) == ((uint32_t)DMA2_Stream3)) ? LL_DMA_STREAM_3 : \ + ((uint32_t)(__STREAM_INSTANCE__) == ((uint32_t)DMA1_Stream4)) ? LL_DMA_STREAM_4 : \ + ((uint32_t)(__STREAM_INSTANCE__) == ((uint32_t)DMA2_Stream4)) ? LL_DMA_STREAM_4 : \ + ((uint32_t)(__STREAM_INSTANCE__) == ((uint32_t)DMA1_Stream5)) ? LL_DMA_STREAM_5 : \ + ((uint32_t)(__STREAM_INSTANCE__) == ((uint32_t)DMA2_Stream5)) ? LL_DMA_STREAM_5 : \ + ((uint32_t)(__STREAM_INSTANCE__) == ((uint32_t)DMA1_Stream6)) ? LL_DMA_STREAM_6 : \ + ((uint32_t)(__STREAM_INSTANCE__) == ((uint32_t)DMA2_Stream6)) ? LL_DMA_STREAM_6 : \ + LL_DMA_STREAM_7) + +/** + * @brief Convert DMA Instance DMAx and LL_DMA_STREAM_y into DMAx_Streamy + * @param __DMA_INSTANCE__ DMAx + * @param __STREAM__ LL_DMA_STREAM_y + * @retval DMAx_Streamy + */ +#define __LL_DMA_GET_STREAM_INSTANCE(__DMA_INSTANCE__, __STREAM__) \ +((((uint32_t)(__DMA_INSTANCE__) == ((uint32_t)DMA1)) && ((uint32_t)(__STREAM__) == ((uint32_t)LL_DMA_STREAM_0))) ? DMA1_Stream0 : \ + (((uint32_t)(__DMA_INSTANCE__) == ((uint32_t)DMA2)) && ((uint32_t)(__STREAM__) == ((uint32_t)LL_DMA_STREAM_0))) ? DMA2_Stream0 : \ + (((uint32_t)(__DMA_INSTANCE__) == ((uint32_t)DMA1)) && ((uint32_t)(__STREAM__) == ((uint32_t)LL_DMA_STREAM_1))) ? DMA1_Stream1 : \ + (((uint32_t)(__DMA_INSTANCE__) == ((uint32_t)DMA2)) && ((uint32_t)(__STREAM__) == ((uint32_t)LL_DMA_STREAM_1))) ? DMA2_Stream1 : \ + (((uint32_t)(__DMA_INSTANCE__) == ((uint32_t)DMA1)) && ((uint32_t)(__STREAM__) == ((uint32_t)LL_DMA_STREAM_2))) ? DMA1_Stream2 : \ + (((uint32_t)(__DMA_INSTANCE__) == ((uint32_t)DMA2)) && ((uint32_t)(__STREAM__) == ((uint32_t)LL_DMA_STREAM_2))) ? DMA2_Stream2 : \ + (((uint32_t)(__DMA_INSTANCE__) == ((uint32_t)DMA1)) && ((uint32_t)(__STREAM__) == ((uint32_t)LL_DMA_STREAM_3))) ? DMA1_Stream3 : \ + (((uint32_t)(__DMA_INSTANCE__) == ((uint32_t)DMA2)) && ((uint32_t)(__STREAM__) == ((uint32_t)LL_DMA_STREAM_3))) ? DMA2_Stream3 : \ + (((uint32_t)(__DMA_INSTANCE__) == ((uint32_t)DMA1)) && ((uint32_t)(__STREAM__) == ((uint32_t)LL_DMA_STREAM_4))) ? DMA1_Stream4 : \ + (((uint32_t)(__DMA_INSTANCE__) == ((uint32_t)DMA2)) && ((uint32_t)(__STREAM__) == ((uint32_t)LL_DMA_STREAM_4))) ? DMA2_Stream4 : \ + (((uint32_t)(__DMA_INSTANCE__) == ((uint32_t)DMA1)) && ((uint32_t)(__STREAM__) == ((uint32_t)LL_DMA_STREAM_5))) ? DMA1_Stream5 : \ + (((uint32_t)(__DMA_INSTANCE__) == ((uint32_t)DMA2)) && ((uint32_t)(__STREAM__) == ((uint32_t)LL_DMA_STREAM_5))) ? DMA2_Stream5 : \ + (((uint32_t)(__DMA_INSTANCE__) == ((uint32_t)DMA1)) && ((uint32_t)(__STREAM__) == ((uint32_t)LL_DMA_STREAM_6))) ? DMA1_Stream6 : \ + (((uint32_t)(__DMA_INSTANCE__) == ((uint32_t)DMA2)) && ((uint32_t)(__STREAM__) == ((uint32_t)LL_DMA_STREAM_6))) ? DMA2_Stream6 : \ + (((uint32_t)(__DMA_INSTANCE__) == ((uint32_t)DMA1)) && ((uint32_t)(__STREAM__) == ((uint32_t)LL_DMA_STREAM_7))) ? DMA1_Stream7 : \ + DMA2_Stream7) + +/** + * @} + */ + +/** + * @} + */ + + +/* Exported functions --------------------------------------------------------*/ + /** @defgroup DMA_LL_Exported_Functions DMA Exported Functions + * @{ + */ + +/** @defgroup DMA_LL_EF_Configuration Configuration + * @{ + */ +/** + * @brief Enable DMA stream. + * @rmtoll CR EN LL_DMA_EnableStream + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval None + */ +__STATIC_INLINE void LL_DMA_EnableStream(DMA_TypeDef *DMAx, uint32_t Stream) +{ + SET_BIT(((DMA_Stream_TypeDef *)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_EN); +} + +/** + * @brief Disable DMA stream. + * @rmtoll CR EN LL_DMA_DisableStream + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval None + */ +__STATIC_INLINE void LL_DMA_DisableStream(DMA_TypeDef *DMAx, uint32_t Stream) +{ + CLEAR_BIT(((DMA_Stream_TypeDef *)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_EN); +} + +/** + * @brief Check if DMA stream is enabled or disabled. + * @rmtoll CR EN LL_DMA_IsEnabledStream + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsEnabledStream(DMA_TypeDef *DMAx, uint32_t Stream) +{ + return (READ_BIT(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_EN) == (DMA_SxCR_EN)); +} + +/** + * @brief Configure all parameters linked to DMA transfer. + * @rmtoll CR DIR LL_DMA_ConfigTransfer\n + * CR CIRC LL_DMA_ConfigTransfer\n + * CR PINC LL_DMA_ConfigTransfer\n + * CR MINC LL_DMA_ConfigTransfer\n + * CR PSIZE LL_DMA_ConfigTransfer\n + * CR MSIZE LL_DMA_ConfigTransfer\n + * CR PL LL_DMA_ConfigTransfer\n + * CR PFCTRL LL_DMA_ConfigTransfer + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @param Configuration This parameter must be a combination of all the following values: + * @arg @ref LL_DMA_DIRECTION_PERIPH_TO_MEMORY or @ref LL_DMA_DIRECTION_MEMORY_TO_PERIPH or @ref LL_DMA_DIRECTION_MEMORY_TO_MEMORY + * @arg @ref LL_DMA_MODE_NORMAL or @ref LL_DMA_MODE_CIRCULAR or @ref LL_DMA_MODE_PFCTRL + * @arg @ref LL_DMA_PERIPH_INCREMENT or @ref LL_DMA_PERIPH_NOINCREMENT + * @arg @ref LL_DMA_MEMORY_INCREMENT or @ref LL_DMA_MEMORY_NOINCREMENT + * @arg @ref LL_DMA_PDATAALIGN_BYTE or @ref LL_DMA_PDATAALIGN_HALFWORD or @ref LL_DMA_PDATAALIGN_WORD + * @arg @ref LL_DMA_MDATAALIGN_BYTE or @ref LL_DMA_MDATAALIGN_HALFWORD or @ref LL_DMA_MDATAALIGN_WORD + * @arg @ref LL_DMA_PRIORITY_LOW or @ref LL_DMA_PRIORITY_MEDIUM or @ref LL_DMA_PRIORITY_HIGH or @ref LL_DMA_PRIORITY_VERYHIGH + *@retval None + */ +__STATIC_INLINE void LL_DMA_ConfigTransfer(DMA_TypeDef *DMAx, uint32_t Stream, uint32_t Configuration) +{ + MODIFY_REG(((DMA_Stream_TypeDef *)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, + DMA_SxCR_DIR | DMA_SxCR_CIRC | DMA_SxCR_PINC | DMA_SxCR_MINC | DMA_SxCR_PSIZE | DMA_SxCR_MSIZE | DMA_SxCR_PL | DMA_SxCR_PFCTRL, + Configuration); +} + +/** + * @brief Set Data transfer direction (read from peripheral or from memory). + * @rmtoll CR DIR LL_DMA_SetDataTransferDirection + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @param Direction This parameter can be one of the following values: + * @arg @ref LL_DMA_DIRECTION_PERIPH_TO_MEMORY + * @arg @ref LL_DMA_DIRECTION_MEMORY_TO_PERIPH + * @arg @ref LL_DMA_DIRECTION_MEMORY_TO_MEMORY + * @retval None + */ +__STATIC_INLINE void LL_DMA_SetDataTransferDirection(DMA_TypeDef *DMAx, uint32_t Stream, uint32_t Direction) +{ + MODIFY_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_DIR, Direction); +} + +/** + * @brief Get Data transfer direction (read from peripheral or from memory). + * @rmtoll CR DIR LL_DMA_GetDataTransferDirection + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval Returned value can be one of the following values: + * @arg @ref LL_DMA_DIRECTION_PERIPH_TO_MEMORY + * @arg @ref LL_DMA_DIRECTION_MEMORY_TO_PERIPH + * @arg @ref LL_DMA_DIRECTION_MEMORY_TO_MEMORY + */ +__STATIC_INLINE uint32_t LL_DMA_GetDataTransferDirection(DMA_TypeDef *DMAx, uint32_t Stream) +{ + return (READ_BIT(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_DIR)); +} + +/** + * @brief Set DMA mode normal, circular or peripheral flow control. + * @rmtoll CR CIRC LL_DMA_SetMode\n + * CR PFCTRL LL_DMA_SetMode + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @param Mode This parameter can be one of the following values: + * @arg @ref LL_DMA_MODE_NORMAL + * @arg @ref LL_DMA_MODE_CIRCULAR + * @arg @ref LL_DMA_MODE_PFCTRL + * @retval None + */ +__STATIC_INLINE void LL_DMA_SetMode(DMA_TypeDef *DMAx, uint32_t Stream, uint32_t Mode) +{ + MODIFY_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_CIRC | DMA_SxCR_PFCTRL, Mode); +} + +/** + * @brief Get DMA mode normal, circular or peripheral flow control. + * @rmtoll CR CIRC LL_DMA_GetMode\n + * CR PFCTRL LL_DMA_GetMode + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval Returned value can be one of the following values: + * @arg @ref LL_DMA_MODE_NORMAL + * @arg @ref LL_DMA_MODE_CIRCULAR + * @arg @ref LL_DMA_MODE_PFCTRL + */ +__STATIC_INLINE uint32_t LL_DMA_GetMode(DMA_TypeDef *DMAx, uint32_t Stream) +{ + return (READ_BIT(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_CIRC | DMA_SxCR_PFCTRL)); +} + +/** + * @brief Set Peripheral increment mode. + * @rmtoll CR PINC LL_DMA_SetPeriphIncMode + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @param IncrementMode This parameter can be one of the following values: + * @arg @ref LL_DMA_PERIPH_NOINCREMENT + * @arg @ref LL_DMA_PERIPH_INCREMENT + * @retval None + */ +__STATIC_INLINE void LL_DMA_SetPeriphIncMode(DMA_TypeDef *DMAx, uint32_t Stream, uint32_t IncrementMode) +{ + MODIFY_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_PINC, IncrementMode); +} + +/** + * @brief Get Peripheral increment mode. + * @rmtoll CR PINC LL_DMA_GetPeriphIncMode + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval Returned value can be one of the following values: + * @arg @ref LL_DMA_PERIPH_NOINCREMENT + * @arg @ref LL_DMA_PERIPH_INCREMENT + */ +__STATIC_INLINE uint32_t LL_DMA_GetPeriphIncMode(DMA_TypeDef *DMAx, uint32_t Stream) +{ + return (READ_BIT(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_PINC)); +} + +/** + * @brief Set Memory increment mode. + * @rmtoll CR MINC LL_DMA_SetMemoryIncMode + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @param IncrementMode This parameter can be one of the following values: + * @arg @ref LL_DMA_MEMORY_NOINCREMENT + * @arg @ref LL_DMA_MEMORY_INCREMENT + * @retval None + */ +__STATIC_INLINE void LL_DMA_SetMemoryIncMode(DMA_TypeDef *DMAx, uint32_t Stream, uint32_t IncrementMode) +{ + MODIFY_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_MINC, IncrementMode); +} + +/** + * @brief Get Memory increment mode. + * @rmtoll CR MINC LL_DMA_GetMemoryIncMode + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval Returned value can be one of the following values: + * @arg @ref LL_DMA_MEMORY_NOINCREMENT + * @arg @ref LL_DMA_MEMORY_INCREMENT + */ +__STATIC_INLINE uint32_t LL_DMA_GetMemoryIncMode(DMA_TypeDef *DMAx, uint32_t Stream) +{ + return (READ_BIT(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_MINC)); +} + +/** + * @brief Set Peripheral size. + * @rmtoll CR PSIZE LL_DMA_SetPeriphSize + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @param Size This parameter can be one of the following values: + * @arg @ref LL_DMA_PDATAALIGN_BYTE + * @arg @ref LL_DMA_PDATAALIGN_HALFWORD + * @arg @ref LL_DMA_PDATAALIGN_WORD + * @retval None + */ +__STATIC_INLINE void LL_DMA_SetPeriphSize(DMA_TypeDef *DMAx, uint32_t Stream, uint32_t Size) +{ + MODIFY_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_PSIZE, Size); +} + +/** + * @brief Get Peripheral size. + * @rmtoll CR PSIZE LL_DMA_GetPeriphSize + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval Returned value can be one of the following values: + * @arg @ref LL_DMA_PDATAALIGN_BYTE + * @arg @ref LL_DMA_PDATAALIGN_HALFWORD + * @arg @ref LL_DMA_PDATAALIGN_WORD + */ +__STATIC_INLINE uint32_t LL_DMA_GetPeriphSize(DMA_TypeDef *DMAx, uint32_t Stream) +{ + return (READ_BIT(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_PSIZE)); +} + +/** + * @brief Set Memory size. + * @rmtoll CR MSIZE LL_DMA_SetMemorySize + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @param Size This parameter can be one of the following values: + * @arg @ref LL_DMA_MDATAALIGN_BYTE + * @arg @ref LL_DMA_MDATAALIGN_HALFWORD + * @arg @ref LL_DMA_MDATAALIGN_WORD + * @retval None + */ +__STATIC_INLINE void LL_DMA_SetMemorySize(DMA_TypeDef *DMAx, uint32_t Stream, uint32_t Size) +{ + MODIFY_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_MSIZE, Size); +} + +/** + * @brief Get Memory size. + * @rmtoll CR MSIZE LL_DMA_GetMemorySize + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval Returned value can be one of the following values: + * @arg @ref LL_DMA_MDATAALIGN_BYTE + * @arg @ref LL_DMA_MDATAALIGN_HALFWORD + * @arg @ref LL_DMA_MDATAALIGN_WORD + */ +__STATIC_INLINE uint32_t LL_DMA_GetMemorySize(DMA_TypeDef *DMAx, uint32_t Stream) +{ + return (READ_BIT(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_MSIZE)); +} + +/** + * @brief Set Peripheral increment offset size. + * @rmtoll CR PINCOS LL_DMA_SetIncOffsetSize + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @param OffsetSize This parameter can be one of the following values: + * @arg @ref LL_DMA_OFFSETSIZE_PSIZE + * @arg @ref LL_DMA_OFFSETSIZE_FIXEDTO4 + * @retval None + */ +__STATIC_INLINE void LL_DMA_SetIncOffsetSize(DMA_TypeDef *DMAx, uint32_t Stream, uint32_t OffsetSize) +{ + MODIFY_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_PINCOS, OffsetSize); +} + +/** + * @brief Get Peripheral increment offset size. + * @rmtoll CR PINCOS LL_DMA_GetIncOffsetSize + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval Returned value can be one of the following values: + * @arg @ref LL_DMA_OFFSETSIZE_PSIZE + * @arg @ref LL_DMA_OFFSETSIZE_FIXEDTO4 + */ +__STATIC_INLINE uint32_t LL_DMA_GetIncOffsetSize(DMA_TypeDef *DMAx, uint32_t Stream) +{ + return (READ_BIT(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_PINCOS)); +} + +/** + * @brief Set Stream priority level. + * @rmtoll CR PL LL_DMA_SetStreamPriorityLevel + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @param Priority This parameter can be one of the following values: + * @arg @ref LL_DMA_PRIORITY_LOW + * @arg @ref LL_DMA_PRIORITY_MEDIUM + * @arg @ref LL_DMA_PRIORITY_HIGH + * @arg @ref LL_DMA_PRIORITY_VERYHIGH + * @retval None + */ +__STATIC_INLINE void LL_DMA_SetStreamPriorityLevel(DMA_TypeDef *DMAx, uint32_t Stream, uint32_t Priority) +{ + MODIFY_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_PL, Priority); +} + +/** + * @brief Get Stream priority level. + * @rmtoll CR PL LL_DMA_GetStreamPriorityLevel + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval Returned value can be one of the following values: + * @arg @ref LL_DMA_PRIORITY_LOW + * @arg @ref LL_DMA_PRIORITY_MEDIUM + * @arg @ref LL_DMA_PRIORITY_HIGH + * @arg @ref LL_DMA_PRIORITY_VERYHIGH + */ +__STATIC_INLINE uint32_t LL_DMA_GetStreamPriorityLevel(DMA_TypeDef *DMAx, uint32_t Stream) +{ + return (READ_BIT(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_PL)); +} + +/** + * @brief Set Number of data to transfer. + * @rmtoll NDTR NDT LL_DMA_SetDataLength + * @note This action has no effect if + * stream is enabled. + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @param NbData Between 0 to 0xFFFFFFFF + * @retval None + */ +__STATIC_INLINE void LL_DMA_SetDataLength(DMA_TypeDef* DMAx, uint32_t Stream, uint32_t NbData) +{ + MODIFY_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->NDTR, DMA_SxNDT, NbData); +} + +/** + * @brief Get Number of data to transfer. + * @rmtoll NDTR NDT LL_DMA_GetDataLength + * @note Once the stream is enabled, the return value indicate the + * remaining bytes to be transmitted. + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval Between 0 to 0xFFFFFFFF + */ +__STATIC_INLINE uint32_t LL_DMA_GetDataLength(DMA_TypeDef* DMAx, uint32_t Stream) +{ + return (READ_BIT(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->NDTR, DMA_SxNDT)); +} + +/** + * @brief Select Channel number associated to the Stream. + * @rmtoll CR CHSEL LL_DMA_SetChannelSelection + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_DMA_CHANNEL_0 + * @arg @ref LL_DMA_CHANNEL_1 + * @arg @ref LL_DMA_CHANNEL_2 + * @arg @ref LL_DMA_CHANNEL_3 + * @arg @ref LL_DMA_CHANNEL_4 + * @arg @ref LL_DMA_CHANNEL_5 + * @arg @ref LL_DMA_CHANNEL_6 + * @arg @ref LL_DMA_CHANNEL_7 + * @retval None + */ +__STATIC_INLINE void LL_DMA_SetChannelSelection(DMA_TypeDef *DMAx, uint32_t Stream, uint32_t Channel) +{ + MODIFY_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_CHSEL, Channel); +} + +/** + * @brief Get the Channel number associated to the Stream. + * @rmtoll CR CHSEL LL_DMA_GetChannelSelection + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval Returned value can be one of the following values: + * @arg @ref LL_DMA_CHANNEL_0 + * @arg @ref LL_DMA_CHANNEL_1 + * @arg @ref LL_DMA_CHANNEL_2 + * @arg @ref LL_DMA_CHANNEL_3 + * @arg @ref LL_DMA_CHANNEL_4 + * @arg @ref LL_DMA_CHANNEL_5 + * @arg @ref LL_DMA_CHANNEL_6 + * @arg @ref LL_DMA_CHANNEL_7 + */ +__STATIC_INLINE uint32_t LL_DMA_GetChannelSelection(DMA_TypeDef *DMAx, uint32_t Stream) +{ + return (READ_BIT(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_CHSEL)); +} + +/** + * @brief Set Memory burst transfer configuration. + * @rmtoll CR MBURST LL_DMA_SetMemoryBurstxfer + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @param Mburst This parameter can be one of the following values: + * @arg @ref LL_DMA_MBURST_SINGLE + * @arg @ref LL_DMA_MBURST_INC4 + * @arg @ref LL_DMA_MBURST_INC8 + * @arg @ref LL_DMA_MBURST_INC16 + * @retval None + */ +__STATIC_INLINE void LL_DMA_SetMemoryBurstxfer(DMA_TypeDef *DMAx, uint32_t Stream, uint32_t Mburst) +{ + MODIFY_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_MBURST, Mburst); +} + +/** + * @brief Get Memory burst transfer configuration. + * @rmtoll CR MBURST LL_DMA_GetMemoryBurstxfer + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval Returned value can be one of the following values: + * @arg @ref LL_DMA_MBURST_SINGLE + * @arg @ref LL_DMA_MBURST_INC4 + * @arg @ref LL_DMA_MBURST_INC8 + * @arg @ref LL_DMA_MBURST_INC16 + */ +__STATIC_INLINE uint32_t LL_DMA_GetMemoryBurstxfer(DMA_TypeDef *DMAx, uint32_t Stream) +{ + return (READ_BIT(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_MBURST)); +} + +/** + * @brief Set Peripheral burst transfer configuration. + * @rmtoll CR PBURST LL_DMA_SetPeriphBurstxfer + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @param Pburst This parameter can be one of the following values: + * @arg @ref LL_DMA_PBURST_SINGLE + * @arg @ref LL_DMA_PBURST_INC4 + * @arg @ref LL_DMA_PBURST_INC8 + * @arg @ref LL_DMA_PBURST_INC16 + * @retval None + */ +__STATIC_INLINE void LL_DMA_SetPeriphBurstxfer(DMA_TypeDef *DMAx, uint32_t Stream, uint32_t Pburst) +{ + MODIFY_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_PBURST, Pburst); +} + +/** + * @brief Get Peripheral burst transfer configuration. + * @rmtoll CR PBURST LL_DMA_GetPeriphBurstxfer + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval Returned value can be one of the following values: + * @arg @ref LL_DMA_PBURST_SINGLE + * @arg @ref LL_DMA_PBURST_INC4 + * @arg @ref LL_DMA_PBURST_INC8 + * @arg @ref LL_DMA_PBURST_INC16 + */ +__STATIC_INLINE uint32_t LL_DMA_GetPeriphBurstxfer(DMA_TypeDef *DMAx, uint32_t Stream) +{ + return (READ_BIT(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_PBURST)); +} + +/** + * @brief Set Current target (only in double buffer mode) to Memory 1 or Memory 0. + * @rmtoll CR CT LL_DMA_SetCurrentTargetMem + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @param CurrentMemory This parameter can be one of the following values: + * @arg @ref LL_DMA_CURRENTTARGETMEM0 + * @arg @ref LL_DMA_CURRENTTARGETMEM1 + * @retval None + */ +__STATIC_INLINE void LL_DMA_SetCurrentTargetMem(DMA_TypeDef *DMAx, uint32_t Stream, uint32_t CurrentMemory) +{ + MODIFY_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_CT, CurrentMemory); +} + +/** + * @brief Get Current target (only in double buffer mode). + * @rmtoll CR CT LL_DMA_GetCurrentTargetMem + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval Returned value can be one of the following values: + * @arg @ref LL_DMA_CURRENTTARGETMEM0 + * @arg @ref LL_DMA_CURRENTTARGETMEM1 + */ +__STATIC_INLINE uint32_t LL_DMA_GetCurrentTargetMem(DMA_TypeDef *DMAx, uint32_t Stream) +{ + return (READ_BIT(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_CT)); +} + +/** + * @brief Enable the double buffer mode. + * @rmtoll CR DBM LL_DMA_EnableDoubleBufferMode + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval None + */ +__STATIC_INLINE void LL_DMA_EnableDoubleBufferMode(DMA_TypeDef *DMAx, uint32_t Stream) +{ + SET_BIT(((DMA_Stream_TypeDef *)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_DBM); +} + +/** + * @brief Disable the double buffer mode. + * @rmtoll CR DBM LL_DMA_DisableDoubleBufferMode + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval None + */ +__STATIC_INLINE void LL_DMA_DisableDoubleBufferMode(DMA_TypeDef *DMAx, uint32_t Stream) +{ + CLEAR_BIT(((DMA_Stream_TypeDef *)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_DBM); +} + +/** + * @brief Get FIFO status. + * @rmtoll FCR FS LL_DMA_GetFIFOStatus + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval Returned value can be one of the following values: + * @arg @ref LL_DMA_FIFOSTATUS_0_25 + * @arg @ref LL_DMA_FIFOSTATUS_25_50 + * @arg @ref LL_DMA_FIFOSTATUS_50_75 + * @arg @ref LL_DMA_FIFOSTATUS_75_100 + * @arg @ref LL_DMA_FIFOSTATUS_EMPTY + * @arg @ref LL_DMA_FIFOSTATUS_FULL + */ +__STATIC_INLINE uint32_t LL_DMA_GetFIFOStatus(DMA_TypeDef *DMAx, uint32_t Stream) +{ + return (READ_BIT(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->FCR, DMA_SxFCR_FS)); +} + +/** + * @brief Disable Fifo mode. + * @rmtoll FCR DMDIS LL_DMA_DisableFifoMode + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval None + */ +__STATIC_INLINE void LL_DMA_DisableFifoMode(DMA_TypeDef *DMAx, uint32_t Stream) +{ + CLEAR_BIT(((DMA_Stream_TypeDef *)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->FCR, DMA_SxFCR_DMDIS); +} + +/** + * @brief Enable Fifo mode. + * @rmtoll FCR DMDIS LL_DMA_EnableFifoMode + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval None + */ +__STATIC_INLINE void LL_DMA_EnableFifoMode(DMA_TypeDef *DMAx, uint32_t Stream) +{ + SET_BIT(((DMA_Stream_TypeDef *)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->FCR, DMA_SxFCR_DMDIS); +} + +/** + * @brief Select FIFO threshold. + * @rmtoll FCR FTH LL_DMA_SetFIFOThreshold + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @param Threshold This parameter can be one of the following values: + * @arg @ref LL_DMA_FIFOTHRESHOLD_1_4 + * @arg @ref LL_DMA_FIFOTHRESHOLD_1_2 + * @arg @ref LL_DMA_FIFOTHRESHOLD_3_4 + * @arg @ref LL_DMA_FIFOTHRESHOLD_FULL + * @retval None + */ +__STATIC_INLINE void LL_DMA_SetFIFOThreshold(DMA_TypeDef *DMAx, uint32_t Stream, uint32_t Threshold) +{ + MODIFY_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->FCR, DMA_SxFCR_FTH, Threshold); +} + +/** + * @brief Get FIFO threshold. + * @rmtoll FCR FTH LL_DMA_GetFIFOThreshold + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval Returned value can be one of the following values: + * @arg @ref LL_DMA_FIFOTHRESHOLD_1_4 + * @arg @ref LL_DMA_FIFOTHRESHOLD_1_2 + * @arg @ref LL_DMA_FIFOTHRESHOLD_3_4 + * @arg @ref LL_DMA_FIFOTHRESHOLD_FULL + */ +__STATIC_INLINE uint32_t LL_DMA_GetFIFOThreshold(DMA_TypeDef *DMAx, uint32_t Stream) +{ + return (READ_BIT(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->FCR, DMA_SxFCR_FTH)); +} + +/** + * @brief Configure the FIFO . + * @rmtoll FCR FTH LL_DMA_ConfigFifo\n + * FCR DMDIS LL_DMA_ConfigFifo + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @param FifoMode This parameter can be one of the following values: + * @arg @ref LL_DMA_FIFOMODE_ENABLE + * @arg @ref LL_DMA_FIFOMODE_DISABLE + * @param FifoThreshold This parameter can be one of the following values: + * @arg @ref LL_DMA_FIFOTHRESHOLD_1_4 + * @arg @ref LL_DMA_FIFOTHRESHOLD_1_2 + * @arg @ref LL_DMA_FIFOTHRESHOLD_3_4 + * @arg @ref LL_DMA_FIFOTHRESHOLD_FULL + * @retval None + */ +__STATIC_INLINE void LL_DMA_ConfigFifo(DMA_TypeDef *DMAx, uint32_t Stream, uint32_t FifoMode, uint32_t FifoThreshold) +{ + MODIFY_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->FCR, DMA_SxFCR_FTH|DMA_SxFCR_DMDIS, FifoMode|FifoThreshold); +} + +/** + * @brief Configure the Source and Destination addresses. + * @note This API must not be called when the DMA stream is enabled. + * @rmtoll M0AR M0A LL_DMA_ConfigAddresses\n + * PAR PA LL_DMA_ConfigAddresses + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @param SrcAddress Between 0 to 0xFFFFFFFF + * @param DstAddress Between 0 to 0xFFFFFFFF + * @param Direction This parameter can be one of the following values: + * @arg @ref LL_DMA_DIRECTION_PERIPH_TO_MEMORY + * @arg @ref LL_DMA_DIRECTION_MEMORY_TO_PERIPH + * @arg @ref LL_DMA_DIRECTION_MEMORY_TO_MEMORY + * @retval None + */ +__STATIC_INLINE void LL_DMA_ConfigAddresses(DMA_TypeDef* DMAx, uint32_t Stream, uint32_t SrcAddress, uint32_t DstAddress, uint32_t Direction) +{ + /* Direction Memory to Periph */ + if (Direction == LL_DMA_DIRECTION_MEMORY_TO_PERIPH) + { + WRITE_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->M0AR, SrcAddress); + WRITE_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->PAR, DstAddress); + } + /* Direction Periph to Memory and Memory to Memory */ + else + { + WRITE_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->PAR, SrcAddress); + WRITE_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->M0AR, DstAddress); + } +} + +/** + * @brief Set the Memory address. + * @rmtoll M0AR M0A LL_DMA_SetMemoryAddress + * @note Interface used for direction LL_DMA_DIRECTION_PERIPH_TO_MEMORY or LL_DMA_DIRECTION_MEMORY_TO_PERIPH only. + * @note This API must not be called when the DMA channel is enabled. + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @param MemoryAddress Between 0 to 0xFFFFFFFF + * @retval None + */ +__STATIC_INLINE void LL_DMA_SetMemoryAddress(DMA_TypeDef* DMAx, uint32_t Stream, uint32_t MemoryAddress) +{ + WRITE_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->M0AR, MemoryAddress); +} + +/** + * @brief Set the Peripheral address. + * @rmtoll PAR PA LL_DMA_SetPeriphAddress + * @note Interface used for direction LL_DMA_DIRECTION_PERIPH_TO_MEMORY or LL_DMA_DIRECTION_MEMORY_TO_PERIPH only. + * @note This API must not be called when the DMA channel is enabled. + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @param PeriphAddress Between 0 to 0xFFFFFFFF + * @retval None + */ +__STATIC_INLINE void LL_DMA_SetPeriphAddress(DMA_TypeDef* DMAx, uint32_t Stream, uint32_t PeriphAddress) +{ + WRITE_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->PAR, PeriphAddress); +} + +/** + * @brief Get the Memory address. + * @rmtoll M0AR M0A LL_DMA_GetMemoryAddress + * @note Interface used for direction LL_DMA_DIRECTION_PERIPH_TO_MEMORY or LL_DMA_DIRECTION_MEMORY_TO_PERIPH only. + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval Between 0 to 0xFFFFFFFF + */ +__STATIC_INLINE uint32_t LL_DMA_GetMemoryAddress(DMA_TypeDef* DMAx, uint32_t Stream) +{ + return (READ_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->M0AR)); +} + +/** + * @brief Get the Peripheral address. + * @rmtoll PAR PA LL_DMA_GetPeriphAddress + * @note Interface used for direction LL_DMA_DIRECTION_PERIPH_TO_MEMORY or LL_DMA_DIRECTION_MEMORY_TO_PERIPH only. + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval Between 0 to 0xFFFFFFFF + */ +__STATIC_INLINE uint32_t LL_DMA_GetPeriphAddress(DMA_TypeDef* DMAx, uint32_t Stream) +{ + return (READ_REG(((DMA_Stream_TypeDef *)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->PAR)); +} + +/** + * @brief Set the Memory to Memory Source address. + * @rmtoll PAR PA LL_DMA_SetM2MSrcAddress + * @note Interface used for direction LL_DMA_DIRECTION_MEMORY_TO_MEMORY only. + * @note This API must not be called when the DMA channel is enabled. + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @param MemoryAddress Between 0 to 0xFFFFFFFF + * @retval None + */ +__STATIC_INLINE void LL_DMA_SetM2MSrcAddress(DMA_TypeDef* DMAx, uint32_t Stream, uint32_t MemoryAddress) +{ + WRITE_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->PAR, MemoryAddress); +} + +/** + * @brief Set the Memory to Memory Destination address. + * @rmtoll M0AR M0A LL_DMA_SetM2MDstAddress + * @note Interface used for direction LL_DMA_DIRECTION_MEMORY_TO_MEMORY only. + * @note This API must not be called when the DMA channel is enabled. + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @param MemoryAddress Between 0 to 0xFFFFFFFF + * @retval None + */ +__STATIC_INLINE void LL_DMA_SetM2MDstAddress(DMA_TypeDef* DMAx, uint32_t Stream, uint32_t MemoryAddress) + { + WRITE_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->M0AR, MemoryAddress); + } + +/** + * @brief Get the Memory to Memory Source address. + * @rmtoll PAR PA LL_DMA_GetM2MSrcAddress + * @note Interface used for direction LL_DMA_DIRECTION_MEMORY_TO_MEMORY only. + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval Between 0 to 0xFFFFFFFF + */ +__STATIC_INLINE uint32_t LL_DMA_GetM2MSrcAddress(DMA_TypeDef* DMAx, uint32_t Stream) + { + return (READ_REG(((DMA_Stream_TypeDef *)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->PAR)); + } + +/** + * @brief Get the Memory to Memory Destination address. + * @rmtoll M0AR M0A LL_DMA_GetM2MDstAddress + * @note Interface used for direction LL_DMA_DIRECTION_MEMORY_TO_MEMORY only. + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval Between 0 to 0xFFFFFFFF + */ +__STATIC_INLINE uint32_t LL_DMA_GetM2MDstAddress(DMA_TypeDef* DMAx, uint32_t Stream) +{ + return (READ_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->M0AR)); +} + +/** + * @brief Set Memory 1 address (used in case of Double buffer mode). + * @rmtoll M1AR M1A LL_DMA_SetMemory1Address + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @param Address Between 0 to 0xFFFFFFFF + * @retval None + */ +__STATIC_INLINE void LL_DMA_SetMemory1Address(DMA_TypeDef *DMAx, uint32_t Stream, uint32_t Address) +{ + MODIFY_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->M1AR, DMA_SxM1AR_M1A, Address); +} + +/** + * @brief Get Memory 1 address (used in case of Double buffer mode). + * @rmtoll M1AR M1A LL_DMA_GetMemory1Address + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval Between 0 to 0xFFFFFFFF + */ +__STATIC_INLINE uint32_t LL_DMA_GetMemory1Address(DMA_TypeDef *DMAx, uint32_t Stream) +{ + return (((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->M1AR); +} + +/** + * @} + */ + +/** @defgroup DMA_LL_EF_FLAG_Management FLAG_Management + * @{ + */ + +/** + * @brief Get Stream 0 half transfer flag. + * @rmtoll LISR HTIF0 LL_DMA_IsActiveFlag_HT0 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_HT0(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->LISR ,DMA_LISR_HTIF0)==(DMA_LISR_HTIF0)); +} + +/** + * @brief Get Stream 1 half transfer flag. + * @rmtoll LISR HTIF1 LL_DMA_IsActiveFlag_HT1 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_HT1(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->LISR ,DMA_LISR_HTIF1)==(DMA_LISR_HTIF1)); +} + +/** + * @brief Get Stream 2 half transfer flag. + * @rmtoll LISR HTIF2 LL_DMA_IsActiveFlag_HT2 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_HT2(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->LISR ,DMA_LISR_HTIF2)==(DMA_LISR_HTIF2)); +} + +/** + * @brief Get Stream 3 half transfer flag. + * @rmtoll LISR HTIF3 LL_DMA_IsActiveFlag_HT3 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_HT3(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->LISR ,DMA_LISR_HTIF3)==(DMA_LISR_HTIF3)); +} + +/** + * @brief Get Stream 4 half transfer flag. + * @rmtoll HISR HTIF4 LL_DMA_IsActiveFlag_HT4 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_HT4(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->HISR ,DMA_HISR_HTIF4)==(DMA_HISR_HTIF4)); +} + +/** + * @brief Get Stream 5 half transfer flag. + * @rmtoll HISR HTIF0 LL_DMA_IsActiveFlag_HT5 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_HT5(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->HISR ,DMA_HISR_HTIF5)==(DMA_HISR_HTIF5)); +} + +/** + * @brief Get Stream 6 half transfer flag. + * @rmtoll HISR HTIF6 LL_DMA_IsActiveFlag_HT6 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_HT6(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->HISR ,DMA_HISR_HTIF6)==(DMA_HISR_HTIF6)); +} + +/** + * @brief Get Stream 7 half transfer flag. + * @rmtoll HISR HTIF7 LL_DMA_IsActiveFlag_HT7 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_HT7(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->HISR ,DMA_HISR_HTIF7)==(DMA_HISR_HTIF7)); +} + +/** + * @brief Get Stream 0 transfer complete flag. + * @rmtoll LISR TCIF0 LL_DMA_IsActiveFlag_TC0 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_TC0(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->LISR ,DMA_LISR_TCIF0)==(DMA_LISR_TCIF0)); +} + +/** + * @brief Get Stream 1 transfer complete flag. + * @rmtoll LISR TCIF1 LL_DMA_IsActiveFlag_TC1 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_TC1(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->LISR ,DMA_LISR_TCIF1)==(DMA_LISR_TCIF1)); +} + +/** + * @brief Get Stream 2 transfer complete flag. + * @rmtoll LISR TCIF2 LL_DMA_IsActiveFlag_TC2 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_TC2(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->LISR ,DMA_LISR_TCIF2)==(DMA_LISR_TCIF2)); +} + +/** + * @brief Get Stream 3 transfer complete flag. + * @rmtoll LISR TCIF3 LL_DMA_IsActiveFlag_TC3 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_TC3(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->LISR ,DMA_LISR_TCIF3)==(DMA_LISR_TCIF3)); +} + +/** + * @brief Get Stream 4 transfer complete flag. + * @rmtoll HISR TCIF4 LL_DMA_IsActiveFlag_TC4 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_TC4(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->HISR ,DMA_HISR_TCIF4)==(DMA_HISR_TCIF4)); +} + +/** + * @brief Get Stream 5 transfer complete flag. + * @rmtoll HISR TCIF0 LL_DMA_IsActiveFlag_TC5 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_TC5(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->HISR ,DMA_HISR_TCIF5)==(DMA_HISR_TCIF5)); +} + +/** + * @brief Get Stream 6 transfer complete flag. + * @rmtoll HISR TCIF6 LL_DMA_IsActiveFlag_TC6 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_TC6(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->HISR ,DMA_HISR_TCIF6)==(DMA_HISR_TCIF6)); +} + +/** + * @brief Get Stream 7 transfer complete flag. + * @rmtoll HISR TCIF7 LL_DMA_IsActiveFlag_TC7 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_TC7(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->HISR ,DMA_HISR_TCIF7)==(DMA_HISR_TCIF7)); +} + +/** + * @brief Get Stream 0 transfer error flag. + * @rmtoll LISR TEIF0 LL_DMA_IsActiveFlag_TE0 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_TE0(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->LISR ,DMA_LISR_TEIF0)==(DMA_LISR_TEIF0)); +} + +/** + * @brief Get Stream 1 transfer error flag. + * @rmtoll LISR TEIF1 LL_DMA_IsActiveFlag_TE1 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_TE1(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->LISR ,DMA_LISR_TEIF1)==(DMA_LISR_TEIF1)); +} + +/** + * @brief Get Stream 2 transfer error flag. + * @rmtoll LISR TEIF2 LL_DMA_IsActiveFlag_TE2 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_TE2(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->LISR ,DMA_LISR_TEIF2)==(DMA_LISR_TEIF2)); +} + +/** + * @brief Get Stream 3 transfer error flag. + * @rmtoll LISR TEIF3 LL_DMA_IsActiveFlag_TE3 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_TE3(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->LISR ,DMA_LISR_TEIF3)==(DMA_LISR_TEIF3)); +} + +/** + * @brief Get Stream 4 transfer error flag. + * @rmtoll HISR TEIF4 LL_DMA_IsActiveFlag_TE4 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_TE4(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->HISR ,DMA_HISR_TEIF4)==(DMA_HISR_TEIF4)); +} + +/** + * @brief Get Stream 5 transfer error flag. + * @rmtoll HISR TEIF0 LL_DMA_IsActiveFlag_TE5 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_TE5(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->HISR ,DMA_HISR_TEIF5)==(DMA_HISR_TEIF5)); +} + +/** + * @brief Get Stream 6 transfer error flag. + * @rmtoll HISR TEIF6 LL_DMA_IsActiveFlag_TE6 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_TE6(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->HISR ,DMA_HISR_TEIF6)==(DMA_HISR_TEIF6)); +} + +/** + * @brief Get Stream 7 transfer error flag. + * @rmtoll HISR TEIF7 LL_DMA_IsActiveFlag_TE7 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_TE7(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->HISR ,DMA_HISR_TEIF7)==(DMA_HISR_TEIF7)); +} + +/** + * @brief Get Stream 0 direct mode error flag. + * @rmtoll LISR DMEIF0 LL_DMA_IsActiveFlag_DME0 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_DME0(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->LISR ,DMA_LISR_DMEIF0)==(DMA_LISR_DMEIF0)); +} + +/** + * @brief Get Stream 1 direct mode error flag. + * @rmtoll LISR DMEIF1 LL_DMA_IsActiveFlag_DME1 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_DME1(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->LISR ,DMA_LISR_DMEIF1)==(DMA_LISR_DMEIF1)); +} + +/** + * @brief Get Stream 2 direct mode error flag. + * @rmtoll LISR DMEIF2 LL_DMA_IsActiveFlag_DME2 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_DME2(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->LISR ,DMA_LISR_DMEIF2)==(DMA_LISR_DMEIF2)); +} + +/** + * @brief Get Stream 3 direct mode error flag. + * @rmtoll LISR DMEIF3 LL_DMA_IsActiveFlag_DME3 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_DME3(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->LISR ,DMA_LISR_DMEIF3)==(DMA_LISR_DMEIF3)); +} + +/** + * @brief Get Stream 4 direct mode error flag. + * @rmtoll HISR DMEIF4 LL_DMA_IsActiveFlag_DME4 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_DME4(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->HISR ,DMA_HISR_DMEIF4)==(DMA_HISR_DMEIF4)); +} + +/** + * @brief Get Stream 5 direct mode error flag. + * @rmtoll HISR DMEIF0 LL_DMA_IsActiveFlag_DME5 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_DME5(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->HISR ,DMA_HISR_DMEIF5)==(DMA_HISR_DMEIF5)); +} + +/** + * @brief Get Stream 6 direct mode error flag. + * @rmtoll HISR DMEIF6 LL_DMA_IsActiveFlag_DME6 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_DME6(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->HISR ,DMA_HISR_DMEIF6)==(DMA_HISR_DMEIF6)); +} + +/** + * @brief Get Stream 7 direct mode error flag. + * @rmtoll HISR DMEIF7 LL_DMA_IsActiveFlag_DME7 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_DME7(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->HISR ,DMA_HISR_DMEIF7)==(DMA_HISR_DMEIF7)); +} + +/** + * @brief Get Stream 0 FIFO error flag. + * @rmtoll LISR FEIF0 LL_DMA_IsActiveFlag_FE0 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_FE0(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->LISR ,DMA_LISR_FEIF0)==(DMA_LISR_FEIF0)); +} + +/** + * @brief Get Stream 1 FIFO error flag. + * @rmtoll LISR FEIF1 LL_DMA_IsActiveFlag_FE1 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_FE1(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->LISR ,DMA_LISR_FEIF1)==(DMA_LISR_FEIF1)); +} + +/** + * @brief Get Stream 2 FIFO error flag. + * @rmtoll LISR FEIF2 LL_DMA_IsActiveFlag_FE2 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_FE2(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->LISR ,DMA_LISR_FEIF2)==(DMA_LISR_FEIF2)); +} + +/** + * @brief Get Stream 3 FIFO error flag. + * @rmtoll LISR FEIF3 LL_DMA_IsActiveFlag_FE3 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_FE3(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->LISR ,DMA_LISR_FEIF3)==(DMA_LISR_FEIF3)); +} + +/** + * @brief Get Stream 4 FIFO error flag. + * @rmtoll HISR FEIF4 LL_DMA_IsActiveFlag_FE4 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_FE4(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->HISR ,DMA_HISR_FEIF4)==(DMA_HISR_FEIF4)); +} + +/** + * @brief Get Stream 5 FIFO error flag. + * @rmtoll HISR FEIF0 LL_DMA_IsActiveFlag_FE5 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_FE5(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->HISR ,DMA_HISR_FEIF5)==(DMA_HISR_FEIF5)); +} + +/** + * @brief Get Stream 6 FIFO error flag. + * @rmtoll HISR FEIF6 LL_DMA_IsActiveFlag_FE6 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_FE6(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->HISR ,DMA_HISR_FEIF6)==(DMA_HISR_FEIF6)); +} + +/** + * @brief Get Stream 7 FIFO error flag. + * @rmtoll HISR FEIF7 LL_DMA_IsActiveFlag_FE7 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_FE7(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->HISR ,DMA_HISR_FEIF7)==(DMA_HISR_FEIF7)); +} + +/** + * @brief Clear Stream 0 half transfer flag. + * @rmtoll LIFCR CHTIF0 LL_DMA_ClearFlag_HT0 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_HT0(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->LIFCR , DMA_LIFCR_CHTIF0); +} + +/** + * @brief Clear Stream 1 half transfer flag. + * @rmtoll LIFCR CHTIF1 LL_DMA_ClearFlag_HT1 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_HT1(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->LIFCR , DMA_LIFCR_CHTIF1); +} + +/** + * @brief Clear Stream 2 half transfer flag. + * @rmtoll LIFCR CHTIF2 LL_DMA_ClearFlag_HT2 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_HT2(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->LIFCR , DMA_LIFCR_CHTIF2); +} + +/** + * @brief Clear Stream 3 half transfer flag. + * @rmtoll LIFCR CHTIF3 LL_DMA_ClearFlag_HT3 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_HT3(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->LIFCR , DMA_LIFCR_CHTIF3); +} + +/** + * @brief Clear Stream 4 half transfer flag. + * @rmtoll HIFCR CHTIF4 LL_DMA_ClearFlag_HT4 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_HT4(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->HIFCR , DMA_HIFCR_CHTIF4); +} + +/** + * @brief Clear Stream 5 half transfer flag. + * @rmtoll HIFCR CHTIF5 LL_DMA_ClearFlag_HT5 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_HT5(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->HIFCR , DMA_HIFCR_CHTIF5); +} + +/** + * @brief Clear Stream 6 half transfer flag. + * @rmtoll HIFCR CHTIF6 LL_DMA_ClearFlag_HT6 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_HT6(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->HIFCR , DMA_HIFCR_CHTIF6); +} + +/** + * @brief Clear Stream 7 half transfer flag. + * @rmtoll HIFCR CHTIF7 LL_DMA_ClearFlag_HT7 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_HT7(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->HIFCR , DMA_HIFCR_CHTIF7); +} + +/** + * @brief Clear Stream 0 transfer complete flag. + * @rmtoll LIFCR CTCIF0 LL_DMA_ClearFlag_TC0 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_TC0(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->LIFCR , DMA_LIFCR_CTCIF0); +} + +/** + * @brief Clear Stream 1 transfer complete flag. + * @rmtoll LIFCR CTCIF1 LL_DMA_ClearFlag_TC1 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_TC1(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->LIFCR , DMA_LIFCR_CTCIF1); +} + +/** + * @brief Clear Stream 2 transfer complete flag. + * @rmtoll LIFCR CTCIF2 LL_DMA_ClearFlag_TC2 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_TC2(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->LIFCR , DMA_LIFCR_CTCIF2); +} + +/** + * @brief Clear Stream 3 transfer complete flag. + * @rmtoll LIFCR CTCIF3 LL_DMA_ClearFlag_TC3 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_TC3(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->LIFCR , DMA_LIFCR_CTCIF3); +} + +/** + * @brief Clear Stream 4 transfer complete flag. + * @rmtoll HIFCR CTCIF4 LL_DMA_ClearFlag_TC4 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_TC4(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->HIFCR , DMA_HIFCR_CTCIF4); +} + +/** + * @brief Clear Stream 5 transfer complete flag. + * @rmtoll HIFCR CTCIF5 LL_DMA_ClearFlag_TC5 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_TC5(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->HIFCR , DMA_HIFCR_CTCIF5); +} + +/** + * @brief Clear Stream 6 transfer complete flag. + * @rmtoll HIFCR CTCIF6 LL_DMA_ClearFlag_TC6 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_TC6(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->HIFCR , DMA_HIFCR_CTCIF6); +} + +/** + * @brief Clear Stream 7 transfer complete flag. + * @rmtoll HIFCR CTCIF7 LL_DMA_ClearFlag_TC7 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_TC7(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->HIFCR , DMA_HIFCR_CTCIF7); +} + +/** + * @brief Clear Stream 0 transfer error flag. + * @rmtoll LIFCR CTEIF0 LL_DMA_ClearFlag_TE0 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_TE0(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->LIFCR , DMA_LIFCR_CTEIF0); +} + +/** + * @brief Clear Stream 1 transfer error flag. + * @rmtoll LIFCR CTEIF1 LL_DMA_ClearFlag_TE1 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_TE1(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->LIFCR , DMA_LIFCR_CTEIF1); +} + +/** + * @brief Clear Stream 2 transfer error flag. + * @rmtoll LIFCR CTEIF2 LL_DMA_ClearFlag_TE2 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_TE2(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->LIFCR , DMA_LIFCR_CTEIF2); +} + +/** + * @brief Clear Stream 3 transfer error flag. + * @rmtoll LIFCR CTEIF3 LL_DMA_ClearFlag_TE3 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_TE3(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->LIFCR , DMA_LIFCR_CTEIF3); +} + +/** + * @brief Clear Stream 4 transfer error flag. + * @rmtoll HIFCR CTEIF4 LL_DMA_ClearFlag_TE4 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_TE4(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->HIFCR , DMA_HIFCR_CTEIF4); +} + +/** + * @brief Clear Stream 5 transfer error flag. + * @rmtoll HIFCR CTEIF5 LL_DMA_ClearFlag_TE5 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_TE5(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->HIFCR , DMA_HIFCR_CTEIF5); +} + +/** + * @brief Clear Stream 6 transfer error flag. + * @rmtoll HIFCR CTEIF6 LL_DMA_ClearFlag_TE6 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_TE6(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->HIFCR , DMA_HIFCR_CTEIF6); +} + +/** + * @brief Clear Stream 7 transfer error flag. + * @rmtoll HIFCR CTEIF7 LL_DMA_ClearFlag_TE7 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_TE7(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->HIFCR , DMA_HIFCR_CTEIF7); +} + +/** + * @brief Clear Stream 0 direct mode error flag. + * @rmtoll LIFCR CDMEIF0 LL_DMA_ClearFlag_DME0 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_DME0(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->LIFCR , DMA_LIFCR_CDMEIF0); +} + +/** + * @brief Clear Stream 1 direct mode error flag. + * @rmtoll LIFCR CDMEIF1 LL_DMA_ClearFlag_DME1 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_DME1(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->LIFCR , DMA_LIFCR_CDMEIF1); +} + +/** + * @brief Clear Stream 2 direct mode error flag. + * @rmtoll LIFCR CDMEIF2 LL_DMA_ClearFlag_DME2 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_DME2(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->LIFCR , DMA_LIFCR_CDMEIF2); +} + +/** + * @brief Clear Stream 3 direct mode error flag. + * @rmtoll LIFCR CDMEIF3 LL_DMA_ClearFlag_DME3 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_DME3(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->LIFCR , DMA_LIFCR_CDMEIF3); +} + +/** + * @brief Clear Stream 4 direct mode error flag. + * @rmtoll HIFCR CDMEIF4 LL_DMA_ClearFlag_DME4 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_DME4(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->HIFCR , DMA_HIFCR_CDMEIF4); +} + +/** + * @brief Clear Stream 5 direct mode error flag. + * @rmtoll HIFCR CDMEIF5 LL_DMA_ClearFlag_DME5 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_DME5(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->HIFCR , DMA_HIFCR_CDMEIF5); +} + +/** + * @brief Clear Stream 6 direct mode error flag. + * @rmtoll HIFCR CDMEIF6 LL_DMA_ClearFlag_DME6 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_DME6(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->HIFCR , DMA_HIFCR_CDMEIF6); +} + +/** + * @brief Clear Stream 7 direct mode error flag. + * @rmtoll HIFCR CDMEIF7 LL_DMA_ClearFlag_DME7 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_DME7(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->HIFCR , DMA_HIFCR_CDMEIF7); +} + +/** + * @brief Clear Stream 0 FIFO error flag. + * @rmtoll LIFCR CFEIF0 LL_DMA_ClearFlag_FE0 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_FE0(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->LIFCR , DMA_LIFCR_CFEIF0); +} + +/** + * @brief Clear Stream 1 FIFO error flag. + * @rmtoll LIFCR CFEIF1 LL_DMA_ClearFlag_FE1 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_FE1(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->LIFCR , DMA_LIFCR_CFEIF1); +} + +/** + * @brief Clear Stream 2 FIFO error flag. + * @rmtoll LIFCR CFEIF2 LL_DMA_ClearFlag_FE2 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_FE2(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->LIFCR , DMA_LIFCR_CFEIF2); +} + +/** + * @brief Clear Stream 3 FIFO error flag. + * @rmtoll LIFCR CFEIF3 LL_DMA_ClearFlag_FE3 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_FE3(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->LIFCR , DMA_LIFCR_CFEIF3); +} + +/** + * @brief Clear Stream 4 FIFO error flag. + * @rmtoll HIFCR CFEIF4 LL_DMA_ClearFlag_FE4 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_FE4(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->HIFCR , DMA_HIFCR_CFEIF4); +} + +/** + * @brief Clear Stream 5 FIFO error flag. + * @rmtoll HIFCR CFEIF5 LL_DMA_ClearFlag_FE5 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_FE5(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->HIFCR , DMA_HIFCR_CFEIF5); +} + +/** + * @brief Clear Stream 6 FIFO error flag. + * @rmtoll HIFCR CFEIF6 LL_DMA_ClearFlag_FE6 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_FE6(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->HIFCR , DMA_HIFCR_CFEIF6); +} + +/** + * @brief Clear Stream 7 FIFO error flag. + * @rmtoll HIFCR CFEIF7 LL_DMA_ClearFlag_FE7 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_FE7(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->HIFCR , DMA_HIFCR_CFEIF7); +} + +/** + * @} + */ + +/** @defgroup DMA_LL_EF_IT_Management IT_Management + * @{ + */ + +/** + * @brief Enable Half transfer interrupt. + * @rmtoll CR HTIE LL_DMA_EnableIT_HT + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval None + */ +__STATIC_INLINE void LL_DMA_EnableIT_HT(DMA_TypeDef *DMAx, uint32_t Stream) +{ + SET_BIT(((DMA_Stream_TypeDef *)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_HTIE); +} + +/** + * @brief Enable Transfer error interrupt. + * @rmtoll CR TEIE LL_DMA_EnableIT_TE + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval None + */ +__STATIC_INLINE void LL_DMA_EnableIT_TE(DMA_TypeDef *DMAx, uint32_t Stream) +{ + SET_BIT(((DMA_Stream_TypeDef *)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_TEIE); +} + +/** + * @brief Enable Transfer complete interrupt. + * @rmtoll CR TCIE LL_DMA_EnableIT_TC + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval None + */ +__STATIC_INLINE void LL_DMA_EnableIT_TC(DMA_TypeDef *DMAx, uint32_t Stream) +{ + SET_BIT(((DMA_Stream_TypeDef *)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_TCIE); +} + +/** + * @brief Enable Direct mode error interrupt. + * @rmtoll CR DMEIE LL_DMA_EnableIT_DME + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval None + */ +__STATIC_INLINE void LL_DMA_EnableIT_DME(DMA_TypeDef *DMAx, uint32_t Stream) +{ + SET_BIT(((DMA_Stream_TypeDef *)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_DMEIE); +} + +/** + * @brief Enable FIFO error interrupt. + * @rmtoll FCR FEIE LL_DMA_EnableIT_FE + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval None + */ +__STATIC_INLINE void LL_DMA_EnableIT_FE(DMA_TypeDef *DMAx, uint32_t Stream) +{ + SET_BIT(((DMA_Stream_TypeDef *)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->FCR, DMA_SxFCR_FEIE); +} + +/** + * @brief Disable Half transfer interrupt. + * @rmtoll CR HTIE LL_DMA_DisableIT_HT + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval None + */ +__STATIC_INLINE void LL_DMA_DisableIT_HT(DMA_TypeDef *DMAx, uint32_t Stream) +{ + CLEAR_BIT(((DMA_Stream_TypeDef *)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_HTIE); +} + +/** + * @brief Disable Transfer error interrupt. + * @rmtoll CR TEIE LL_DMA_DisableIT_TE + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval None + */ +__STATIC_INLINE void LL_DMA_DisableIT_TE(DMA_TypeDef *DMAx, uint32_t Stream) +{ + CLEAR_BIT(((DMA_Stream_TypeDef *)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_TEIE); +} + +/** + * @brief Disable Transfer complete interrupt. + * @rmtoll CR TCIE LL_DMA_DisableIT_TC + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval None + */ +__STATIC_INLINE void LL_DMA_DisableIT_TC(DMA_TypeDef *DMAx, uint32_t Stream) +{ + CLEAR_BIT(((DMA_Stream_TypeDef *)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_TCIE); +} + +/** + * @brief Disable Direct mode error interrupt. + * @rmtoll CR DMEIE LL_DMA_DisableIT_DME + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval None + */ +__STATIC_INLINE void LL_DMA_DisableIT_DME(DMA_TypeDef *DMAx, uint32_t Stream) +{ + CLEAR_BIT(((DMA_Stream_TypeDef *)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_DMEIE); +} + +/** + * @brief Disable FIFO error interrupt. + * @rmtoll FCR FEIE LL_DMA_DisableIT_FE + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval None + */ +__STATIC_INLINE void LL_DMA_DisableIT_FE(DMA_TypeDef *DMAx, uint32_t Stream) +{ + CLEAR_BIT(((DMA_Stream_TypeDef *)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->FCR, DMA_SxFCR_FEIE); +} + +/** + * @brief Check if Half transfer interrupt is enabled. + * @rmtoll CR HTIE LL_DMA_IsEnabledIT_HT + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsEnabledIT_HT(DMA_TypeDef *DMAx, uint32_t Stream) +{ + return (READ_BIT(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_HTIE) == DMA_SxCR_HTIE); +} + +/** + * @brief Check if Transfer error nterrup is enabled. + * @rmtoll CR TEIE LL_DMA_IsEnabledIT_TE + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsEnabledIT_TE(DMA_TypeDef *DMAx, uint32_t Stream) +{ + return (READ_BIT(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_TEIE) == DMA_SxCR_TEIE); +} + +/** + * @brief Check if Transfer complete interrupt is enabled. + * @rmtoll CR TCIE LL_DMA_IsEnabledIT_TC + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsEnabledIT_TC(DMA_TypeDef *DMAx, uint32_t Stream) +{ + return (READ_BIT(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_TCIE) == DMA_SxCR_TCIE); +} + +/** + * @brief Check if Direct mode error interrupt is enabled. + * @rmtoll CR DMEIE LL_DMA_IsEnabledIT_DME + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsEnabledIT_DME(DMA_TypeDef *DMAx, uint32_t Stream) +{ + return (READ_BIT(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_DMEIE) == DMA_SxCR_DMEIE); +} + +/** + * @brief Check if FIFO error interrupt is enabled. + * @rmtoll FCR FEIE LL_DMA_IsEnabledIT_FE + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsEnabledIT_FE(DMA_TypeDef *DMAx, uint32_t Stream) +{ + return (READ_BIT(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->FCR, DMA_SxFCR_FEIE) == DMA_SxFCR_FEIE); +} + +/** + * @} + */ + +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup DMA_LL_EF_Init Initialization and de-initialization functions + * @{ + */ + +uint32_t LL_DMA_Init(DMA_TypeDef *DMAx, uint32_t Stream, LL_DMA_InitTypeDef *DMA_InitStruct); +uint32_t LL_DMA_DeInit(DMA_TypeDef *DMAx, uint32_t Stream); +void LL_DMA_StructInit(LL_DMA_InitTypeDef *DMA_InitStruct); + +/** + * @} + */ +#endif /* USE_FULL_LL_DRIVER */ + +/** + * @} + */ + +/** + * @} + */ + +#endif /* DMA1 || DMA2 */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F4xx_LL_DMA_H */ + diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_exti.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_exti.h new file mode 100644 index 0000000..65ab691 --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_exti.h @@ -0,0 +1,954 @@ +/** + ****************************************************************************** + * @file stm32f4xx_ll_exti.h + * @author MCD Application Team + * @brief Header file of EXTI LL module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS.Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F4xx_LL_EXTI_H +#define __STM32F4xx_LL_EXTI_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx.h" + +/** @addtogroup STM32F4xx_LL_Driver + * @{ + */ + +#if defined (EXTI) + +/** @defgroup EXTI_LL EXTI + * @{ + */ + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/* Private Macros ------------------------------------------------------------*/ +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup EXTI_LL_Private_Macros EXTI Private Macros + * @{ + */ +/** + * @} + */ +#endif /*USE_FULL_LL_DRIVER*/ +/* Exported types ------------------------------------------------------------*/ +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup EXTI_LL_ES_INIT EXTI Exported Init structure + * @{ + */ +typedef struct +{ + + uint32_t Line_0_31; /*!< Specifies the EXTI lines to be enabled or disabled for Lines in range 0 to 31 + This parameter can be any combination of @ref EXTI_LL_EC_LINE */ + + FunctionalState LineCommand; /*!< Specifies the new state of the selected EXTI lines. + This parameter can be set either to ENABLE or DISABLE */ + + uint8_t Mode; /*!< Specifies the mode for the EXTI lines. + This parameter can be a value of @ref EXTI_LL_EC_MODE. */ + + uint8_t Trigger; /*!< Specifies the trigger signal active edge for the EXTI lines. + This parameter can be a value of @ref EXTI_LL_EC_TRIGGER. */ +} LL_EXTI_InitTypeDef; + +/** + * @} + */ +#endif /*USE_FULL_LL_DRIVER*/ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup EXTI_LL_Exported_Constants EXTI Exported Constants + * @{ + */ + +/** @defgroup EXTI_LL_EC_LINE LINE + * @{ + */ +#define LL_EXTI_LINE_0 EXTI_IMR_IM0 /*!< Extended line 0 */ +#define LL_EXTI_LINE_1 EXTI_IMR_IM1 /*!< Extended line 1 */ +#define LL_EXTI_LINE_2 EXTI_IMR_IM2 /*!< Extended line 2 */ +#define LL_EXTI_LINE_3 EXTI_IMR_IM3 /*!< Extended line 3 */ +#define LL_EXTI_LINE_4 EXTI_IMR_IM4 /*!< Extended line 4 */ +#define LL_EXTI_LINE_5 EXTI_IMR_IM5 /*!< Extended line 5 */ +#define LL_EXTI_LINE_6 EXTI_IMR_IM6 /*!< Extended line 6 */ +#define LL_EXTI_LINE_7 EXTI_IMR_IM7 /*!< Extended line 7 */ +#define LL_EXTI_LINE_8 EXTI_IMR_IM8 /*!< Extended line 8 */ +#define LL_EXTI_LINE_9 EXTI_IMR_IM9 /*!< Extended line 9 */ +#define LL_EXTI_LINE_10 EXTI_IMR_IM10 /*!< Extended line 10 */ +#define LL_EXTI_LINE_11 EXTI_IMR_IM11 /*!< Extended line 11 */ +#define LL_EXTI_LINE_12 EXTI_IMR_IM12 /*!< Extended line 12 */ +#define LL_EXTI_LINE_13 EXTI_IMR_IM13 /*!< Extended line 13 */ +#define LL_EXTI_LINE_14 EXTI_IMR_IM14 /*!< Extended line 14 */ +#define LL_EXTI_LINE_15 EXTI_IMR_IM15 /*!< Extended line 15 */ +#if defined(EXTI_IMR_IM16) +#define LL_EXTI_LINE_16 EXTI_IMR_IM16 /*!< Extended line 16 */ +#endif +#define LL_EXTI_LINE_17 EXTI_IMR_IM17 /*!< Extended line 17 */ +#if defined(EXTI_IMR_IM18) +#define LL_EXTI_LINE_18 EXTI_IMR_IM18 /*!< Extended line 18 */ +#endif +#define LL_EXTI_LINE_19 EXTI_IMR_IM19 /*!< Extended line 19 */ +#if defined(EXTI_IMR_IM20) +#define LL_EXTI_LINE_20 EXTI_IMR_IM20 /*!< Extended line 20 */ +#endif +#if defined(EXTI_IMR_IM21) +#define LL_EXTI_LINE_21 EXTI_IMR_IM21 /*!< Extended line 21 */ +#endif +#if defined(EXTI_IMR_IM22) +#define LL_EXTI_LINE_22 EXTI_IMR_IM22 /*!< Extended line 22 */ +#endif +#if defined(EXTI_IMR_IM23) +#define LL_EXTI_LINE_23 EXTI_IMR_IM23 /*!< Extended line 23 */ +#endif +#if defined(EXTI_IMR_IM24) +#define LL_EXTI_LINE_24 EXTI_IMR_IM24 /*!< Extended line 24 */ +#endif +#if defined(EXTI_IMR_IM25) +#define LL_EXTI_LINE_25 EXTI_IMR_IM25 /*!< Extended line 25 */ +#endif +#if defined(EXTI_IMR_IM26) +#define LL_EXTI_LINE_26 EXTI_IMR_IM26 /*!< Extended line 26 */ +#endif +#if defined(EXTI_IMR_IM27) +#define LL_EXTI_LINE_27 EXTI_IMR_IM27 /*!< Extended line 27 */ +#endif +#if defined(EXTI_IMR_IM28) +#define LL_EXTI_LINE_28 EXTI_IMR_IM28 /*!< Extended line 28 */ +#endif +#if defined(EXTI_IMR_IM29) +#define LL_EXTI_LINE_29 EXTI_IMR_IM29 /*!< Extended line 29 */ +#endif +#if defined(EXTI_IMR_IM30) +#define LL_EXTI_LINE_30 EXTI_IMR_IM30 /*!< Extended line 30 */ +#endif +#if defined(EXTI_IMR_IM31) +#define LL_EXTI_LINE_31 EXTI_IMR_IM31 /*!< Extended line 31 */ +#endif +#define LL_EXTI_LINE_ALL_0_31 EXTI_IMR_IM /*!< All Extended line not reserved*/ + + +#define LL_EXTI_LINE_ALL ((uint32_t)0xFFFFFFFFU) /*!< All Extended line */ + +#if defined(USE_FULL_LL_DRIVER) +#define LL_EXTI_LINE_NONE ((uint32_t)0x00000000U) /*!< None Extended line */ +#endif /*USE_FULL_LL_DRIVER*/ + +/** + * @} + */ +#if defined(USE_FULL_LL_DRIVER) + +/** @defgroup EXTI_LL_EC_MODE Mode + * @{ + */ +#define LL_EXTI_MODE_IT ((uint8_t)0x00U) /*!< Interrupt Mode */ +#define LL_EXTI_MODE_EVENT ((uint8_t)0x01U) /*!< Event Mode */ +#define LL_EXTI_MODE_IT_EVENT ((uint8_t)0x02U) /*!< Interrupt & Event Mode */ +/** + * @} + */ + +/** @defgroup EXTI_LL_EC_TRIGGER Edge Trigger + * @{ + */ +#define LL_EXTI_TRIGGER_NONE ((uint8_t)0x00U) /*!< No Trigger Mode */ +#define LL_EXTI_TRIGGER_RISING ((uint8_t)0x01U) /*!< Trigger Rising Mode */ +#define LL_EXTI_TRIGGER_FALLING ((uint8_t)0x02U) /*!< Trigger Falling Mode */ +#define LL_EXTI_TRIGGER_RISING_FALLING ((uint8_t)0x03U) /*!< Trigger Rising & Falling Mode */ + +/** + * @} + */ + + +#endif /*USE_FULL_LL_DRIVER*/ + + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup EXTI_LL_Exported_Macros EXTI Exported Macros + * @{ + */ + +/** @defgroup EXTI_LL_EM_WRITE_READ Common Write and read registers Macros + * @{ + */ + +/** + * @brief Write a value in EXTI register + * @param __REG__ Register to be written + * @param __VALUE__ Value to be written in the register + * @retval None + */ +#define LL_EXTI_WriteReg(__REG__, __VALUE__) WRITE_REG(EXTI->__REG__, (__VALUE__)) + +/** + * @brief Read a value in EXTI register + * @param __REG__ Register to be read + * @retval Register value + */ +#define LL_EXTI_ReadReg(__REG__) READ_REG(EXTI->__REG__) +/** + * @} + */ + + +/** + * @} + */ + + + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup EXTI_LL_Exported_Functions EXTI Exported Functions + * @{ + */ +/** @defgroup EXTI_LL_EF_IT_Management IT_Management + * @{ + */ + +/** + * @brief Enable ExtiLine Interrupt request for Lines in range 0 to 31 + * @note The reset value for the direct or internal lines (see RM) + * is set to 1 in order to enable the interrupt by default. + * Bits are set automatically at Power on. + * @rmtoll IMR IMx LL_EXTI_EnableIT_0_31 + * @param ExtiLine This parameter can be one of the following values: + * @arg @ref LL_EXTI_LINE_0 + * @arg @ref LL_EXTI_LINE_1 + * @arg @ref LL_EXTI_LINE_2 + * @arg @ref LL_EXTI_LINE_3 + * @arg @ref LL_EXTI_LINE_4 + * @arg @ref LL_EXTI_LINE_5 + * @arg @ref LL_EXTI_LINE_6 + * @arg @ref LL_EXTI_LINE_7 + * @arg @ref LL_EXTI_LINE_8 + * @arg @ref LL_EXTI_LINE_9 + * @arg @ref LL_EXTI_LINE_10 + * @arg @ref LL_EXTI_LINE_11 + * @arg @ref LL_EXTI_LINE_12 + * @arg @ref LL_EXTI_LINE_13 + * @arg @ref LL_EXTI_LINE_14 + * @arg @ref LL_EXTI_LINE_15 + * @arg @ref LL_EXTI_LINE_16 + * @arg @ref LL_EXTI_LINE_17 + * @arg @ref LL_EXTI_LINE_18 + * @arg @ref LL_EXTI_LINE_19(*) + * @arg @ref LL_EXTI_LINE_20(*) + * @arg @ref LL_EXTI_LINE_21 + * @arg @ref LL_EXTI_LINE_22 + * @arg @ref LL_EXTI_LINE_23(*) + * @arg @ref LL_EXTI_LINE_ALL_0_31 + * @note (*): Available in some devices + * @note Please check each device line mapping for EXTI Line availability + * @retval None + */ +__STATIC_INLINE void LL_EXTI_EnableIT_0_31(uint32_t ExtiLine) +{ + SET_BIT(EXTI->IMR, ExtiLine); +} + +/** + * @brief Disable ExtiLine Interrupt request for Lines in range 0 to 31 + * @note The reset value for the direct or internal lines (see RM) + * is set to 1 in order to enable the interrupt by default. + * Bits are set automatically at Power on. + * @rmtoll IMR IMx LL_EXTI_DisableIT_0_31 + * @param ExtiLine This parameter can be one of the following values: + * @arg @ref LL_EXTI_LINE_0 + * @arg @ref LL_EXTI_LINE_1 + * @arg @ref LL_EXTI_LINE_2 + * @arg @ref LL_EXTI_LINE_3 + * @arg @ref LL_EXTI_LINE_4 + * @arg @ref LL_EXTI_LINE_5 + * @arg @ref LL_EXTI_LINE_6 + * @arg @ref LL_EXTI_LINE_7 + * @arg @ref LL_EXTI_LINE_8 + * @arg @ref LL_EXTI_LINE_9 + * @arg @ref LL_EXTI_LINE_10 + * @arg @ref LL_EXTI_LINE_11 + * @arg @ref LL_EXTI_LINE_12 + * @arg @ref LL_EXTI_LINE_13 + * @arg @ref LL_EXTI_LINE_14 + * @arg @ref LL_EXTI_LINE_15 + * @arg @ref LL_EXTI_LINE_16 + * @arg @ref LL_EXTI_LINE_17 + * @arg @ref LL_EXTI_LINE_18 + * @arg @ref LL_EXTI_LINE_19(*) + * @arg @ref LL_EXTI_LINE_20(*) + * @arg @ref LL_EXTI_LINE_21 + * @arg @ref LL_EXTI_LINE_22 + * @arg @ref LL_EXTI_LINE_23(*) + * @arg @ref LL_EXTI_LINE_ALL_0_31 + * @note (*): Available in some devices + * @note Please check each device line mapping for EXTI Line availability + * @retval None + */ +__STATIC_INLINE void LL_EXTI_DisableIT_0_31(uint32_t ExtiLine) +{ + CLEAR_BIT(EXTI->IMR, ExtiLine); +} + + +/** + * @brief Indicate if ExtiLine Interrupt request is enabled for Lines in range 0 to 31 + * @note The reset value for the direct or internal lines (see RM) + * is set to 1 in order to enable the interrupt by default. + * Bits are set automatically at Power on. + * @rmtoll IMR IMx LL_EXTI_IsEnabledIT_0_31 + * @param ExtiLine This parameter can be one of the following values: + * @arg @ref LL_EXTI_LINE_0 + * @arg @ref LL_EXTI_LINE_1 + * @arg @ref LL_EXTI_LINE_2 + * @arg @ref LL_EXTI_LINE_3 + * @arg @ref LL_EXTI_LINE_4 + * @arg @ref LL_EXTI_LINE_5 + * @arg @ref LL_EXTI_LINE_6 + * @arg @ref LL_EXTI_LINE_7 + * @arg @ref LL_EXTI_LINE_8 + * @arg @ref LL_EXTI_LINE_9 + * @arg @ref LL_EXTI_LINE_10 + * @arg @ref LL_EXTI_LINE_11 + * @arg @ref LL_EXTI_LINE_12 + * @arg @ref LL_EXTI_LINE_13 + * @arg @ref LL_EXTI_LINE_14 + * @arg @ref LL_EXTI_LINE_15 + * @arg @ref LL_EXTI_LINE_16 + * @arg @ref LL_EXTI_LINE_17 + * @arg @ref LL_EXTI_LINE_18 + * @arg @ref LL_EXTI_LINE_19(*) + * @arg @ref LL_EXTI_LINE_20(*) + * @arg @ref LL_EXTI_LINE_21 + * @arg @ref LL_EXTI_LINE_22 + * @arg @ref LL_EXTI_LINE_23(*) + * @arg @ref LL_EXTI_LINE_ALL_0_31 + * @note (*): Available in some devices + * @note Please check each device line mapping for EXTI Line availability + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_EXTI_IsEnabledIT_0_31(uint32_t ExtiLine) +{ + return (READ_BIT(EXTI->IMR, ExtiLine) == (ExtiLine)); +} + + +/** + * @} + */ + +/** @defgroup EXTI_LL_EF_Event_Management Event_Management + * @{ + */ + +/** + * @brief Enable ExtiLine Event request for Lines in range 0 to 31 + * @rmtoll EMR EMx LL_EXTI_EnableEvent_0_31 + * @param ExtiLine This parameter can be one of the following values: + * @arg @ref LL_EXTI_LINE_0 + * @arg @ref LL_EXTI_LINE_1 + * @arg @ref LL_EXTI_LINE_2 + * @arg @ref LL_EXTI_LINE_3 + * @arg @ref LL_EXTI_LINE_4 + * @arg @ref LL_EXTI_LINE_5 + * @arg @ref LL_EXTI_LINE_6 + * @arg @ref LL_EXTI_LINE_7 + * @arg @ref LL_EXTI_LINE_8 + * @arg @ref LL_EXTI_LINE_9 + * @arg @ref LL_EXTI_LINE_10 + * @arg @ref LL_EXTI_LINE_11 + * @arg @ref LL_EXTI_LINE_12 + * @arg @ref LL_EXTI_LINE_13 + * @arg @ref LL_EXTI_LINE_14 + * @arg @ref LL_EXTI_LINE_15 + * @arg @ref LL_EXTI_LINE_16 + * @arg @ref LL_EXTI_LINE_17 + * @arg @ref LL_EXTI_LINE_18 + * @arg @ref LL_EXTI_LINE_19(*) + * @arg @ref LL_EXTI_LINE_20(*) + * @arg @ref LL_EXTI_LINE_21 + * @arg @ref LL_EXTI_LINE_22 + * @arg @ref LL_EXTI_LINE_23(*) + * @arg @ref LL_EXTI_LINE_ALL_0_31 + * @note (*): Available in some devices + * @note Please check each device line mapping for EXTI Line availability + * @retval None + */ +__STATIC_INLINE void LL_EXTI_EnableEvent_0_31(uint32_t ExtiLine) +{ + SET_BIT(EXTI->EMR, ExtiLine); + +} + + +/** + * @brief Disable ExtiLine Event request for Lines in range 0 to 31 + * @rmtoll EMR EMx LL_EXTI_DisableEvent_0_31 + * @param ExtiLine This parameter can be one of the following values: + * @arg @ref LL_EXTI_LINE_0 + * @arg @ref LL_EXTI_LINE_1 + * @arg @ref LL_EXTI_LINE_2 + * @arg @ref LL_EXTI_LINE_3 + * @arg @ref LL_EXTI_LINE_4 + * @arg @ref LL_EXTI_LINE_5 + * @arg @ref LL_EXTI_LINE_6 + * @arg @ref LL_EXTI_LINE_7 + * @arg @ref LL_EXTI_LINE_8 + * @arg @ref LL_EXTI_LINE_9 + * @arg @ref LL_EXTI_LINE_10 + * @arg @ref LL_EXTI_LINE_11 + * @arg @ref LL_EXTI_LINE_12 + * @arg @ref LL_EXTI_LINE_13 + * @arg @ref LL_EXTI_LINE_14 + * @arg @ref LL_EXTI_LINE_15 + * @arg @ref LL_EXTI_LINE_16 + * @arg @ref LL_EXTI_LINE_17 + * @arg @ref LL_EXTI_LINE_18 + * @arg @ref LL_EXTI_LINE_19(*) + * @arg @ref LL_EXTI_LINE_20(*) + * @arg @ref LL_EXTI_LINE_21 + * @arg @ref LL_EXTI_LINE_22 + * @arg @ref LL_EXTI_LINE_23(*) + * @arg @ref LL_EXTI_LINE_ALL_0_31 + * @note (*): Available in some devices + * @note Please check each device line mapping for EXTI Line availability + * @retval None + */ +__STATIC_INLINE void LL_EXTI_DisableEvent_0_31(uint32_t ExtiLine) +{ + CLEAR_BIT(EXTI->EMR, ExtiLine); +} + + +/** + * @brief Indicate if ExtiLine Event request is enabled for Lines in range 0 to 31 + * @rmtoll EMR EMx LL_EXTI_IsEnabledEvent_0_31 + * @param ExtiLine This parameter can be one of the following values: + * @arg @ref LL_EXTI_LINE_0 + * @arg @ref LL_EXTI_LINE_1 + * @arg @ref LL_EXTI_LINE_2 + * @arg @ref LL_EXTI_LINE_3 + * @arg @ref LL_EXTI_LINE_4 + * @arg @ref LL_EXTI_LINE_5 + * @arg @ref LL_EXTI_LINE_6 + * @arg @ref LL_EXTI_LINE_7 + * @arg @ref LL_EXTI_LINE_8 + * @arg @ref LL_EXTI_LINE_9 + * @arg @ref LL_EXTI_LINE_10 + * @arg @ref LL_EXTI_LINE_11 + * @arg @ref LL_EXTI_LINE_12 + * @arg @ref LL_EXTI_LINE_13 + * @arg @ref LL_EXTI_LINE_14 + * @arg @ref LL_EXTI_LINE_15 + * @arg @ref LL_EXTI_LINE_16 + * @arg @ref LL_EXTI_LINE_17 + * @arg @ref LL_EXTI_LINE_18 + * @arg @ref LL_EXTI_LINE_19(*) + * @arg @ref LL_EXTI_LINE_20(*) + * @arg @ref LL_EXTI_LINE_21 + * @arg @ref LL_EXTI_LINE_22 + * @arg @ref LL_EXTI_LINE_23(*) + * @arg @ref LL_EXTI_LINE_ALL_0_31 + * @note (*): Available in some devices + * @note Please check each device line mapping for EXTI Line availability + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_EXTI_IsEnabledEvent_0_31(uint32_t ExtiLine) +{ + return (READ_BIT(EXTI->EMR, ExtiLine) == (ExtiLine)); + +} + + +/** + * @} + */ + +/** @defgroup EXTI_LL_EF_Rising_Trigger_Management Rising_Trigger_Management + * @{ + */ + +/** + * @brief Enable ExtiLine Rising Edge Trigger for Lines in range 0 to 31 + * @note The configurable wakeup lines are edge-triggered. No glitch must be + * generated on these lines. If a rising edge on a configurable interrupt + * line occurs during a write operation in the EXTI_RTSR register, the + * pending bit is not set. + * Rising and falling edge triggers can be set for + * the same interrupt line. In this case, both generate a trigger + * condition. + * @rmtoll RTSR RTx LL_EXTI_EnableRisingTrig_0_31 + * @param ExtiLine This parameter can be a combination of the following values: + * @arg @ref LL_EXTI_LINE_0 + * @arg @ref LL_EXTI_LINE_1 + * @arg @ref LL_EXTI_LINE_2 + * @arg @ref LL_EXTI_LINE_3 + * @arg @ref LL_EXTI_LINE_4 + * @arg @ref LL_EXTI_LINE_5 + * @arg @ref LL_EXTI_LINE_6 + * @arg @ref LL_EXTI_LINE_7 + * @arg @ref LL_EXTI_LINE_8 + * @arg @ref LL_EXTI_LINE_9 + * @arg @ref LL_EXTI_LINE_10 + * @arg @ref LL_EXTI_LINE_11 + * @arg @ref LL_EXTI_LINE_12 + * @arg @ref LL_EXTI_LINE_13 + * @arg @ref LL_EXTI_LINE_14 + * @arg @ref LL_EXTI_LINE_15 + * @arg @ref LL_EXTI_LINE_16 + * @arg @ref LL_EXTI_LINE_18 + * @arg @ref LL_EXTI_LINE_19(*) + * @arg @ref LL_EXTI_LINE_20(*) + * @arg @ref LL_EXTI_LINE_21 + * @arg @ref LL_EXTI_LINE_22 + * @note (*): Available in some devices + * @note Please check each device line mapping for EXTI Line availability + * @retval None + */ +__STATIC_INLINE void LL_EXTI_EnableRisingTrig_0_31(uint32_t ExtiLine) +{ + SET_BIT(EXTI->RTSR, ExtiLine); + +} + + +/** + * @brief Disable ExtiLine Rising Edge Trigger for Lines in range 0 to 31 + * @note The configurable wakeup lines are edge-triggered. No glitch must be + * generated on these lines. If a rising edge on a configurable interrupt + * line occurs during a write operation in the EXTI_RTSR register, the + * pending bit is not set. + * Rising and falling edge triggers can be set for + * the same interrupt line. In this case, both generate a trigger + * condition. + * @rmtoll RTSR RTx LL_EXTI_DisableRisingTrig_0_31 + * @param ExtiLine This parameter can be a combination of the following values: + * @arg @ref LL_EXTI_LINE_0 + * @arg @ref LL_EXTI_LINE_1 + * @arg @ref LL_EXTI_LINE_2 + * @arg @ref LL_EXTI_LINE_3 + * @arg @ref LL_EXTI_LINE_4 + * @arg @ref LL_EXTI_LINE_5 + * @arg @ref LL_EXTI_LINE_6 + * @arg @ref LL_EXTI_LINE_7 + * @arg @ref LL_EXTI_LINE_8 + * @arg @ref LL_EXTI_LINE_9 + * @arg @ref LL_EXTI_LINE_10 + * @arg @ref LL_EXTI_LINE_11 + * @arg @ref LL_EXTI_LINE_12 + * @arg @ref LL_EXTI_LINE_13 + * @arg @ref LL_EXTI_LINE_14 + * @arg @ref LL_EXTI_LINE_15 + * @arg @ref LL_EXTI_LINE_16 + * @arg @ref LL_EXTI_LINE_18 + * @arg @ref LL_EXTI_LINE_19(*) + * @arg @ref LL_EXTI_LINE_20(*) + * @arg @ref LL_EXTI_LINE_21 + * @arg @ref LL_EXTI_LINE_22 + * @note (*): Available in some devices + * @note Please check each device line mapping for EXTI Line availability + * @retval None + */ +__STATIC_INLINE void LL_EXTI_DisableRisingTrig_0_31(uint32_t ExtiLine) +{ + CLEAR_BIT(EXTI->RTSR, ExtiLine); + +} + + +/** + * @brief Check if rising edge trigger is enabled for Lines in range 0 to 31 + * @rmtoll RTSR RTx LL_EXTI_IsEnabledRisingTrig_0_31 + * @param ExtiLine This parameter can be a combination of the following values: + * @arg @ref LL_EXTI_LINE_0 + * @arg @ref LL_EXTI_LINE_1 + * @arg @ref LL_EXTI_LINE_2 + * @arg @ref LL_EXTI_LINE_3 + * @arg @ref LL_EXTI_LINE_4 + * @arg @ref LL_EXTI_LINE_5 + * @arg @ref LL_EXTI_LINE_6 + * @arg @ref LL_EXTI_LINE_7 + * @arg @ref LL_EXTI_LINE_8 + * @arg @ref LL_EXTI_LINE_9 + * @arg @ref LL_EXTI_LINE_10 + * @arg @ref LL_EXTI_LINE_11 + * @arg @ref LL_EXTI_LINE_12 + * @arg @ref LL_EXTI_LINE_13 + * @arg @ref LL_EXTI_LINE_14 + * @arg @ref LL_EXTI_LINE_15 + * @arg @ref LL_EXTI_LINE_16 + * @arg @ref LL_EXTI_LINE_18 + * @arg @ref LL_EXTI_LINE_19(*) + * @arg @ref LL_EXTI_LINE_20(*) + * @arg @ref LL_EXTI_LINE_21 + * @arg @ref LL_EXTI_LINE_22 + * @note (*): Available in some devices + * @note Please check each device line mapping for EXTI Line availability + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_EXTI_IsEnabledRisingTrig_0_31(uint32_t ExtiLine) +{ + return (READ_BIT(EXTI->RTSR, ExtiLine) == (ExtiLine)); +} + + +/** + * @} + */ + +/** @defgroup EXTI_LL_EF_Falling_Trigger_Management Falling_Trigger_Management + * @{ + */ + +/** + * @brief Enable ExtiLine Falling Edge Trigger for Lines in range 0 to 31 + * @note The configurable wakeup lines are edge-triggered. No glitch must be + * generated on these lines. If a falling edge on a configurable interrupt + * line occurs during a write operation in the EXTI_FTSR register, the + * pending bit is not set. + * Rising and falling edge triggers can be set for + * the same interrupt line. In this case, both generate a trigger + * condition. + * @rmtoll FTSR FTx LL_EXTI_EnableFallingTrig_0_31 + * @param ExtiLine This parameter can be a combination of the following values: + * @arg @ref LL_EXTI_LINE_0 + * @arg @ref LL_EXTI_LINE_1 + * @arg @ref LL_EXTI_LINE_2 + * @arg @ref LL_EXTI_LINE_3 + * @arg @ref LL_EXTI_LINE_4 + * @arg @ref LL_EXTI_LINE_5 + * @arg @ref LL_EXTI_LINE_6 + * @arg @ref LL_EXTI_LINE_7 + * @arg @ref LL_EXTI_LINE_8 + * @arg @ref LL_EXTI_LINE_9 + * @arg @ref LL_EXTI_LINE_10 + * @arg @ref LL_EXTI_LINE_11 + * @arg @ref LL_EXTI_LINE_12 + * @arg @ref LL_EXTI_LINE_13 + * @arg @ref LL_EXTI_LINE_14 + * @arg @ref LL_EXTI_LINE_15 + * @arg @ref LL_EXTI_LINE_16 + * @arg @ref LL_EXTI_LINE_18 + * @arg @ref LL_EXTI_LINE_19(*) + * @arg @ref LL_EXTI_LINE_20(*) + * @arg @ref LL_EXTI_LINE_21 + * @arg @ref LL_EXTI_LINE_22 + * @note (*): Available in some devices + * @note Please check each device line mapping for EXTI Line availability + * @retval None + */ +__STATIC_INLINE void LL_EXTI_EnableFallingTrig_0_31(uint32_t ExtiLine) +{ + SET_BIT(EXTI->FTSR, ExtiLine); +} + + +/** + * @brief Disable ExtiLine Falling Edge Trigger for Lines in range 0 to 31 + * @note The configurable wakeup lines are edge-triggered. No glitch must be + * generated on these lines. If a Falling edge on a configurable interrupt + * line occurs during a write operation in the EXTI_FTSR register, the + * pending bit is not set. + * Rising and falling edge triggers can be set for the same interrupt line. + * In this case, both generate a trigger condition. + * @rmtoll FTSR FTx LL_EXTI_DisableFallingTrig_0_31 + * @param ExtiLine This parameter can be a combination of the following values: + * @arg @ref LL_EXTI_LINE_0 + * @arg @ref LL_EXTI_LINE_1 + * @arg @ref LL_EXTI_LINE_2 + * @arg @ref LL_EXTI_LINE_3 + * @arg @ref LL_EXTI_LINE_4 + * @arg @ref LL_EXTI_LINE_5 + * @arg @ref LL_EXTI_LINE_6 + * @arg @ref LL_EXTI_LINE_7 + * @arg @ref LL_EXTI_LINE_8 + * @arg @ref LL_EXTI_LINE_9 + * @arg @ref LL_EXTI_LINE_10 + * @arg @ref LL_EXTI_LINE_11 + * @arg @ref LL_EXTI_LINE_12 + * @arg @ref LL_EXTI_LINE_13 + * @arg @ref LL_EXTI_LINE_14 + * @arg @ref LL_EXTI_LINE_15 + * @arg @ref LL_EXTI_LINE_16 + * @arg @ref LL_EXTI_LINE_18 + * @arg @ref LL_EXTI_LINE_19(*) + * @arg @ref LL_EXTI_LINE_20(*) + * @arg @ref LL_EXTI_LINE_21 + * @arg @ref LL_EXTI_LINE_22 + * @note (*): Available in some devices + * @note Please check each device line mapping for EXTI Line availability + * @retval None + */ +__STATIC_INLINE void LL_EXTI_DisableFallingTrig_0_31(uint32_t ExtiLine) +{ + CLEAR_BIT(EXTI->FTSR, ExtiLine); +} + + +/** + * @brief Check if falling edge trigger is enabled for Lines in range 0 to 31 + * @rmtoll FTSR FTx LL_EXTI_IsEnabledFallingTrig_0_31 + * @param ExtiLine This parameter can be a combination of the following values: + * @arg @ref LL_EXTI_LINE_0 + * @arg @ref LL_EXTI_LINE_1 + * @arg @ref LL_EXTI_LINE_2 + * @arg @ref LL_EXTI_LINE_3 + * @arg @ref LL_EXTI_LINE_4 + * @arg @ref LL_EXTI_LINE_5 + * @arg @ref LL_EXTI_LINE_6 + * @arg @ref LL_EXTI_LINE_7 + * @arg @ref LL_EXTI_LINE_8 + * @arg @ref LL_EXTI_LINE_9 + * @arg @ref LL_EXTI_LINE_10 + * @arg @ref LL_EXTI_LINE_11 + * @arg @ref LL_EXTI_LINE_12 + * @arg @ref LL_EXTI_LINE_13 + * @arg @ref LL_EXTI_LINE_14 + * @arg @ref LL_EXTI_LINE_15 + * @arg @ref LL_EXTI_LINE_16 + * @arg @ref LL_EXTI_LINE_18 + * @arg @ref LL_EXTI_LINE_19(*) + * @arg @ref LL_EXTI_LINE_20(*) + * @arg @ref LL_EXTI_LINE_21 + * @arg @ref LL_EXTI_LINE_22 + * @note (*): Available in some devices + * @note Please check each device line mapping for EXTI Line availability + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_EXTI_IsEnabledFallingTrig_0_31(uint32_t ExtiLine) +{ + return (READ_BIT(EXTI->FTSR, ExtiLine) == (ExtiLine)); +} + + +/** + * @} + */ + +/** @defgroup EXTI_LL_EF_Software_Interrupt_Management Software_Interrupt_Management + * @{ + */ + +/** + * @brief Generate a software Interrupt Event for Lines in range 0 to 31 + * @note If the interrupt is enabled on this line in the EXTI_IMR, writing a 1 to + * this bit when it is at '0' sets the corresponding pending bit in EXTI_PR + * resulting in an interrupt request generation. + * This bit is cleared by clearing the corresponding bit in the EXTI_PR + * register (by writing a 1 into the bit) + * @rmtoll SWIER SWIx LL_EXTI_GenerateSWI_0_31 + * @param ExtiLine This parameter can be a combination of the following values: + * @arg @ref LL_EXTI_LINE_0 + * @arg @ref LL_EXTI_LINE_1 + * @arg @ref LL_EXTI_LINE_2 + * @arg @ref LL_EXTI_LINE_3 + * @arg @ref LL_EXTI_LINE_4 + * @arg @ref LL_EXTI_LINE_5 + * @arg @ref LL_EXTI_LINE_6 + * @arg @ref LL_EXTI_LINE_7 + * @arg @ref LL_EXTI_LINE_8 + * @arg @ref LL_EXTI_LINE_9 + * @arg @ref LL_EXTI_LINE_10 + * @arg @ref LL_EXTI_LINE_11 + * @arg @ref LL_EXTI_LINE_12 + * @arg @ref LL_EXTI_LINE_13 + * @arg @ref LL_EXTI_LINE_14 + * @arg @ref LL_EXTI_LINE_15 + * @arg @ref LL_EXTI_LINE_16 + * @arg @ref LL_EXTI_LINE_18 + * @arg @ref LL_EXTI_LINE_19(*) + * @arg @ref LL_EXTI_LINE_20(*) + * @arg @ref LL_EXTI_LINE_21 + * @arg @ref LL_EXTI_LINE_22 + * @note (*): Available in some devices + * @note Please check each device line mapping for EXTI Line availability + * @retval None + */ +__STATIC_INLINE void LL_EXTI_GenerateSWI_0_31(uint32_t ExtiLine) +{ + SET_BIT(EXTI->SWIER, ExtiLine); +} + + +/** + * @} + */ + +/** @defgroup EXTI_LL_EF_Flag_Management Flag_Management + * @{ + */ + +/** + * @brief Check if the ExtLine Flag is set or not for Lines in range 0 to 31 + * @note This bit is set when the selected edge event arrives on the interrupt + * line. This bit is cleared by writing a 1 to the bit. + * @rmtoll PR PIFx LL_EXTI_IsActiveFlag_0_31 + * @param ExtiLine This parameter can be a combination of the following values: + * @arg @ref LL_EXTI_LINE_0 + * @arg @ref LL_EXTI_LINE_1 + * @arg @ref LL_EXTI_LINE_2 + * @arg @ref LL_EXTI_LINE_3 + * @arg @ref LL_EXTI_LINE_4 + * @arg @ref LL_EXTI_LINE_5 + * @arg @ref LL_EXTI_LINE_6 + * @arg @ref LL_EXTI_LINE_7 + * @arg @ref LL_EXTI_LINE_8 + * @arg @ref LL_EXTI_LINE_9 + * @arg @ref LL_EXTI_LINE_10 + * @arg @ref LL_EXTI_LINE_11 + * @arg @ref LL_EXTI_LINE_12 + * @arg @ref LL_EXTI_LINE_13 + * @arg @ref LL_EXTI_LINE_14 + * @arg @ref LL_EXTI_LINE_15 + * @arg @ref LL_EXTI_LINE_16 + * @arg @ref LL_EXTI_LINE_18 + * @arg @ref LL_EXTI_LINE_19(*) + * @arg @ref LL_EXTI_LINE_20(*) + * @arg @ref LL_EXTI_LINE_21 + * @arg @ref LL_EXTI_LINE_22 + * @note (*): Available in some devices + * @note Please check each device line mapping for EXTI Line availability + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_EXTI_IsActiveFlag_0_31(uint32_t ExtiLine) +{ + return (READ_BIT(EXTI->PR, ExtiLine) == (ExtiLine)); +} + + +/** + * @brief Read ExtLine Combination Flag for Lines in range 0 to 31 + * @note This bit is set when the selected edge event arrives on the interrupt + * line. This bit is cleared by writing a 1 to the bit. + * @rmtoll PR PIFx LL_EXTI_ReadFlag_0_31 + * @param ExtiLine This parameter can be a combination of the following values: + * @arg @ref LL_EXTI_LINE_0 + * @arg @ref LL_EXTI_LINE_1 + * @arg @ref LL_EXTI_LINE_2 + * @arg @ref LL_EXTI_LINE_3 + * @arg @ref LL_EXTI_LINE_4 + * @arg @ref LL_EXTI_LINE_5 + * @arg @ref LL_EXTI_LINE_6 + * @arg @ref LL_EXTI_LINE_7 + * @arg @ref LL_EXTI_LINE_8 + * @arg @ref LL_EXTI_LINE_9 + * @arg @ref LL_EXTI_LINE_10 + * @arg @ref LL_EXTI_LINE_11 + * @arg @ref LL_EXTI_LINE_12 + * @arg @ref LL_EXTI_LINE_13 + * @arg @ref LL_EXTI_LINE_14 + * @arg @ref LL_EXTI_LINE_15 + * @arg @ref LL_EXTI_LINE_16 + * @arg @ref LL_EXTI_LINE_18 + * @arg @ref LL_EXTI_LINE_19(*) + * @arg @ref LL_EXTI_LINE_20(*) + * @arg @ref LL_EXTI_LINE_21 + * @arg @ref LL_EXTI_LINE_22 + * @note (*): Available in some devices + * @note Please check each device line mapping for EXTI Line availability + * @retval @note This bit is set when the selected edge event arrives on the interrupt + */ +__STATIC_INLINE uint32_t LL_EXTI_ReadFlag_0_31(uint32_t ExtiLine) +{ + return (uint32_t)(READ_BIT(EXTI->PR, ExtiLine)); +} + + +/** + * @brief Clear ExtLine Flags for Lines in range 0 to 31 + * @note This bit is set when the selected edge event arrives on the interrupt + * line. This bit is cleared by writing a 1 to the bit. + * @rmtoll PR PIFx LL_EXTI_ClearFlag_0_31 + * @param ExtiLine This parameter can be a combination of the following values: + * @arg @ref LL_EXTI_LINE_0 + * @arg @ref LL_EXTI_LINE_1 + * @arg @ref LL_EXTI_LINE_2 + * @arg @ref LL_EXTI_LINE_3 + * @arg @ref LL_EXTI_LINE_4 + * @arg @ref LL_EXTI_LINE_5 + * @arg @ref LL_EXTI_LINE_6 + * @arg @ref LL_EXTI_LINE_7 + * @arg @ref LL_EXTI_LINE_8 + * @arg @ref LL_EXTI_LINE_9 + * @arg @ref LL_EXTI_LINE_10 + * @arg @ref LL_EXTI_LINE_11 + * @arg @ref LL_EXTI_LINE_12 + * @arg @ref LL_EXTI_LINE_13 + * @arg @ref LL_EXTI_LINE_14 + * @arg @ref LL_EXTI_LINE_15 + * @arg @ref LL_EXTI_LINE_16 + * @arg @ref LL_EXTI_LINE_18 + * @arg @ref LL_EXTI_LINE_19(*) + * @arg @ref LL_EXTI_LINE_20(*) + * @arg @ref LL_EXTI_LINE_21 + * @arg @ref LL_EXTI_LINE_22 + * @note (*): Available in some devices + * @note Please check each device line mapping for EXTI Line availability + * @retval None + */ +__STATIC_INLINE void LL_EXTI_ClearFlag_0_31(uint32_t ExtiLine) +{ + WRITE_REG(EXTI->PR, ExtiLine); +} + + +/** + * @} + */ + +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup EXTI_LL_EF_Init Initialization and de-initialization functions + * @{ + */ + +uint32_t LL_EXTI_Init(LL_EXTI_InitTypeDef *EXTI_InitStruct); +uint32_t LL_EXTI_DeInit(void); +void LL_EXTI_StructInit(LL_EXTI_InitTypeDef *EXTI_InitStruct); + + +/** + * @} + */ +#endif /* USE_FULL_LL_DRIVER */ + +/** + * @} + */ + +/** + * @} + */ + +#endif /* EXTI */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F4xx_LL_EXTI_H */ + diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_gpio.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_gpio.h new file mode 100644 index 0000000..6bee7fd --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_gpio.h @@ -0,0 +1,981 @@ +/** + ****************************************************************************** + * @file stm32f4xx_ll_gpio.h + * @author MCD Application Team + * @brief Header file of GPIO LL module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F4xx_LL_GPIO_H +#define __STM32F4xx_LL_GPIO_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx.h" + +/** @addtogroup STM32F4xx_LL_Driver + * @{ + */ + +#if defined (GPIOA) || defined (GPIOB) || defined (GPIOC) || defined (GPIOD) || defined (GPIOE) || defined (GPIOF) || defined (GPIOG) || defined (GPIOH) || defined (GPIOI) || defined (GPIOJ) || defined (GPIOK) + +/** @defgroup GPIO_LL GPIO + * @{ + */ + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/* Private macros ------------------------------------------------------------*/ +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup GPIO_LL_Private_Macros GPIO Private Macros + * @{ + */ + +/** + * @} + */ +#endif /*USE_FULL_LL_DRIVER*/ + +/* Exported types ------------------------------------------------------------*/ +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup GPIO_LL_ES_INIT GPIO Exported Init structures + * @{ + */ + +/** + * @brief LL GPIO Init Structure definition + */ +typedef struct +{ + uint32_t Pin; /*!< Specifies the GPIO pins to be configured. + This parameter can be any value of @ref GPIO_LL_EC_PIN */ + + uint32_t Mode; /*!< Specifies the operating mode for the selected pins. + This parameter can be a value of @ref GPIO_LL_EC_MODE. + + GPIO HW configuration can be modified afterwards using unitary function @ref LL_GPIO_SetPinMode().*/ + + uint32_t Speed; /*!< Specifies the speed for the selected pins. + This parameter can be a value of @ref GPIO_LL_EC_SPEED. + + GPIO HW configuration can be modified afterwards using unitary function @ref LL_GPIO_SetPinSpeed().*/ + + uint32_t OutputType; /*!< Specifies the operating output type for the selected pins. + This parameter can be a value of @ref GPIO_LL_EC_OUTPUT. + + GPIO HW configuration can be modified afterwards using unitary function @ref LL_GPIO_SetPinOutputType().*/ + + uint32_t Pull; /*!< Specifies the operating Pull-up/Pull down for the selected pins. + This parameter can be a value of @ref GPIO_LL_EC_PULL. + + GPIO HW configuration can be modified afterwards using unitary function @ref LL_GPIO_SetPinPull().*/ + + uint32_t Alternate; /*!< Specifies the Peripheral to be connected to the selected pins. + This parameter can be a value of @ref GPIO_LL_EC_AF. + + GPIO HW configuration can be modified afterwards using unitary function @ref LL_GPIO_SetAFPin_0_7() and LL_GPIO_SetAFPin_8_15().*/ +} LL_GPIO_InitTypeDef; + +/** + * @} + */ +#endif /* USE_FULL_LL_DRIVER */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup GPIO_LL_Exported_Constants GPIO Exported Constants + * @{ + */ + +/** @defgroup GPIO_LL_EC_PIN PIN + * @{ + */ +#define LL_GPIO_PIN_0 GPIO_BSRR_BS_0 /*!< Select pin 0 */ +#define LL_GPIO_PIN_1 GPIO_BSRR_BS_1 /*!< Select pin 1 */ +#define LL_GPIO_PIN_2 GPIO_BSRR_BS_2 /*!< Select pin 2 */ +#define LL_GPIO_PIN_3 GPIO_BSRR_BS_3 /*!< Select pin 3 */ +#define LL_GPIO_PIN_4 GPIO_BSRR_BS_4 /*!< Select pin 4 */ +#define LL_GPIO_PIN_5 GPIO_BSRR_BS_5 /*!< Select pin 5 */ +#define LL_GPIO_PIN_6 GPIO_BSRR_BS_6 /*!< Select pin 6 */ +#define LL_GPIO_PIN_7 GPIO_BSRR_BS_7 /*!< Select pin 7 */ +#define LL_GPIO_PIN_8 GPIO_BSRR_BS_8 /*!< Select pin 8 */ +#define LL_GPIO_PIN_9 GPIO_BSRR_BS_9 /*!< Select pin 9 */ +#define LL_GPIO_PIN_10 GPIO_BSRR_BS_10 /*!< Select pin 10 */ +#define LL_GPIO_PIN_11 GPIO_BSRR_BS_11 /*!< Select pin 11 */ +#define LL_GPIO_PIN_12 GPIO_BSRR_BS_12 /*!< Select pin 12 */ +#define LL_GPIO_PIN_13 GPIO_BSRR_BS_13 /*!< Select pin 13 */ +#define LL_GPIO_PIN_14 GPIO_BSRR_BS_14 /*!< Select pin 14 */ +#define LL_GPIO_PIN_15 GPIO_BSRR_BS_15 /*!< Select pin 15 */ +#define LL_GPIO_PIN_ALL (GPIO_BSRR_BS_0 | GPIO_BSRR_BS_1 | GPIO_BSRR_BS_2 | \ + GPIO_BSRR_BS_3 | GPIO_BSRR_BS_4 | GPIO_BSRR_BS_5 | \ + GPIO_BSRR_BS_6 | GPIO_BSRR_BS_7 | GPIO_BSRR_BS_8 | \ + GPIO_BSRR_BS_9 | GPIO_BSRR_BS_10 | GPIO_BSRR_BS_11 | \ + GPIO_BSRR_BS_12 | GPIO_BSRR_BS_13 | GPIO_BSRR_BS_14 | \ + GPIO_BSRR_BS_15) /*!< Select all pins */ +/** + * @} + */ + +/** @defgroup GPIO_LL_EC_MODE Mode + * @{ + */ +#define LL_GPIO_MODE_INPUT (0x00000000U) /*!< Select input mode */ +#define LL_GPIO_MODE_OUTPUT GPIO_MODER_MODER0_0 /*!< Select output mode */ +#define LL_GPIO_MODE_ALTERNATE GPIO_MODER_MODER0_1 /*!< Select alternate function mode */ +#define LL_GPIO_MODE_ANALOG GPIO_MODER_MODER0 /*!< Select analog mode */ +/** + * @} + */ + +/** @defgroup GPIO_LL_EC_OUTPUT Output Type + * @{ + */ +#define LL_GPIO_OUTPUT_PUSHPULL (0x00000000U) /*!< Select push-pull as output type */ +#define LL_GPIO_OUTPUT_OPENDRAIN GPIO_OTYPER_OT_0 /*!< Select open-drain as output type */ +/** + * @} + */ + +/** @defgroup GPIO_LL_EC_SPEED Output Speed + * @{ + */ +#define LL_GPIO_SPEED_FREQ_LOW (0x00000000U) /*!< Select I/O low output speed */ +#define LL_GPIO_SPEED_FREQ_MEDIUM GPIO_OSPEEDER_OSPEEDR0_0 /*!< Select I/O medium output speed */ +#define LL_GPIO_SPEED_FREQ_HIGH GPIO_OSPEEDER_OSPEEDR0_1 /*!< Select I/O fast output speed */ +#define LL_GPIO_SPEED_FREQ_VERY_HIGH GPIO_OSPEEDER_OSPEEDR0 /*!< Select I/O high output speed */ +/** + * @} + */ + +/** @defgroup GPIO_LL_EC_PULL Pull Up Pull Down + * @{ + */ +#define LL_GPIO_PULL_NO (0x00000000U) /*!< Select I/O no pull */ +#define LL_GPIO_PULL_UP GPIO_PUPDR_PUPDR0_0 /*!< Select I/O pull up */ +#define LL_GPIO_PULL_DOWN GPIO_PUPDR_PUPDR0_1 /*!< Select I/O pull down */ +/** + * @} + */ + +/** @defgroup GPIO_LL_EC_AF Alternate Function + * @{ + */ +#define LL_GPIO_AF_0 (0x0000000U) /*!< Select alternate function 0 */ +#define LL_GPIO_AF_1 (0x0000001U) /*!< Select alternate function 1 */ +#define LL_GPIO_AF_2 (0x0000002U) /*!< Select alternate function 2 */ +#define LL_GPIO_AF_3 (0x0000003U) /*!< Select alternate function 3 */ +#define LL_GPIO_AF_4 (0x0000004U) /*!< Select alternate function 4 */ +#define LL_GPIO_AF_5 (0x0000005U) /*!< Select alternate function 5 */ +#define LL_GPIO_AF_6 (0x0000006U) /*!< Select alternate function 6 */ +#define LL_GPIO_AF_7 (0x0000007U) /*!< Select alternate function 7 */ +#define LL_GPIO_AF_8 (0x0000008U) /*!< Select alternate function 8 */ +#define LL_GPIO_AF_9 (0x0000009U) /*!< Select alternate function 9 */ +#define LL_GPIO_AF_10 (0x000000AU) /*!< Select alternate function 10 */ +#define LL_GPIO_AF_11 (0x000000BU) /*!< Select alternate function 11 */ +#define LL_GPIO_AF_12 (0x000000CU) /*!< Select alternate function 12 */ +#define LL_GPIO_AF_13 (0x000000DU) /*!< Select alternate function 13 */ +#define LL_GPIO_AF_14 (0x000000EU) /*!< Select alternate function 14 */ +#define LL_GPIO_AF_15 (0x000000FU) /*!< Select alternate function 15 */ +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup GPIO_LL_Exported_Macros GPIO Exported Macros + * @{ + */ + +/** @defgroup GPIO_LL_EM_WRITE_READ Common Write and read registers Macros + * @{ + */ + +/** + * @brief Write a value in GPIO register + * @param __INSTANCE__ GPIO Instance + * @param __REG__ Register to be written + * @param __VALUE__ Value to be written in the register + * @retval None + */ +#define LL_GPIO_WriteReg(__INSTANCE__, __REG__, __VALUE__) WRITE_REG(__INSTANCE__->__REG__, (__VALUE__)) + +/** + * @brief Read a value in GPIO register + * @param __INSTANCE__ GPIO Instance + * @param __REG__ Register to be read + * @retval Register value + */ +#define LL_GPIO_ReadReg(__INSTANCE__, __REG__) READ_REG(__INSTANCE__->__REG__) +/** + * @} + */ + +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup GPIO_LL_Exported_Functions GPIO Exported Functions + * @{ + */ + +/** @defgroup GPIO_LL_EF_Port_Configuration Port Configuration + * @{ + */ + +/** + * @brief Configure gpio mode for a dedicated pin on dedicated port. + * @note I/O mode can be Input mode, General purpose output, Alternate function mode or Analog. + * @note Warning: only one pin can be passed as parameter. + * @rmtoll MODER MODEy LL_GPIO_SetPinMode + * @param GPIOx GPIO Port + * @param Pin This parameter can be one of the following values: + * @arg @ref LL_GPIO_PIN_0 + * @arg @ref LL_GPIO_PIN_1 + * @arg @ref LL_GPIO_PIN_2 + * @arg @ref LL_GPIO_PIN_3 + * @arg @ref LL_GPIO_PIN_4 + * @arg @ref LL_GPIO_PIN_5 + * @arg @ref LL_GPIO_PIN_6 + * @arg @ref LL_GPIO_PIN_7 + * @arg @ref LL_GPIO_PIN_8 + * @arg @ref LL_GPIO_PIN_9 + * @arg @ref LL_GPIO_PIN_10 + * @arg @ref LL_GPIO_PIN_11 + * @arg @ref LL_GPIO_PIN_12 + * @arg @ref LL_GPIO_PIN_13 + * @arg @ref LL_GPIO_PIN_14 + * @arg @ref LL_GPIO_PIN_15 + * @param Mode This parameter can be one of the following values: + * @arg @ref LL_GPIO_MODE_INPUT + * @arg @ref LL_GPIO_MODE_OUTPUT + * @arg @ref LL_GPIO_MODE_ALTERNATE + * @arg @ref LL_GPIO_MODE_ANALOG + * @retval None + */ +__STATIC_INLINE void LL_GPIO_SetPinMode(GPIO_TypeDef *GPIOx, uint32_t Pin, uint32_t Mode) +{ + MODIFY_REG(GPIOx->MODER, (GPIO_MODER_MODER0 << (POSITION_VAL(Pin) * 2U)), (Mode << (POSITION_VAL(Pin) * 2U))); +} + +/** + * @brief Return gpio mode for a dedicated pin on dedicated port. + * @note I/O mode can be Input mode, General purpose output, Alternate function mode or Analog. + * @note Warning: only one pin can be passed as parameter. + * @rmtoll MODER MODEy LL_GPIO_GetPinMode + * @param GPIOx GPIO Port + * @param Pin This parameter can be one of the following values: + * @arg @ref LL_GPIO_PIN_0 + * @arg @ref LL_GPIO_PIN_1 + * @arg @ref LL_GPIO_PIN_2 + * @arg @ref LL_GPIO_PIN_3 + * @arg @ref LL_GPIO_PIN_4 + * @arg @ref LL_GPIO_PIN_5 + * @arg @ref LL_GPIO_PIN_6 + * @arg @ref LL_GPIO_PIN_7 + * @arg @ref LL_GPIO_PIN_8 + * @arg @ref LL_GPIO_PIN_9 + * @arg @ref LL_GPIO_PIN_10 + * @arg @ref LL_GPIO_PIN_11 + * @arg @ref LL_GPIO_PIN_12 + * @arg @ref LL_GPIO_PIN_13 + * @arg @ref LL_GPIO_PIN_14 + * @arg @ref LL_GPIO_PIN_15 + * @retval Returned value can be one of the following values: + * @arg @ref LL_GPIO_MODE_INPUT + * @arg @ref LL_GPIO_MODE_OUTPUT + * @arg @ref LL_GPIO_MODE_ALTERNATE + * @arg @ref LL_GPIO_MODE_ANALOG + */ +__STATIC_INLINE uint32_t LL_GPIO_GetPinMode(GPIO_TypeDef *GPIOx, uint32_t Pin) +{ + return (uint32_t)(READ_BIT(GPIOx->MODER, + (GPIO_MODER_MODER0 << (POSITION_VAL(Pin) * 2U))) >> (POSITION_VAL(Pin) * 2U)); +} + +/** + * @brief Configure gpio output type for several pins on dedicated port. + * @note Output type as to be set when gpio pin is in output or + * alternate modes. Possible type are Push-pull or Open-drain. + * @rmtoll OTYPER OTy LL_GPIO_SetPinOutputType + * @param GPIOx GPIO Port + * @param PinMask This parameter can be a combination of the following values: + * @arg @ref LL_GPIO_PIN_0 + * @arg @ref LL_GPIO_PIN_1 + * @arg @ref LL_GPIO_PIN_2 + * @arg @ref LL_GPIO_PIN_3 + * @arg @ref LL_GPIO_PIN_4 + * @arg @ref LL_GPIO_PIN_5 + * @arg @ref LL_GPIO_PIN_6 + * @arg @ref LL_GPIO_PIN_7 + * @arg @ref LL_GPIO_PIN_8 + * @arg @ref LL_GPIO_PIN_9 + * @arg @ref LL_GPIO_PIN_10 + * @arg @ref LL_GPIO_PIN_11 + * @arg @ref LL_GPIO_PIN_12 + * @arg @ref LL_GPIO_PIN_13 + * @arg @ref LL_GPIO_PIN_14 + * @arg @ref LL_GPIO_PIN_15 + * @arg @ref LL_GPIO_PIN_ALL + * @param OutputType This parameter can be one of the following values: + * @arg @ref LL_GPIO_OUTPUT_PUSHPULL + * @arg @ref LL_GPIO_OUTPUT_OPENDRAIN + * @retval None + */ +__STATIC_INLINE void LL_GPIO_SetPinOutputType(GPIO_TypeDef *GPIOx, uint32_t PinMask, uint32_t OutputType) +{ + MODIFY_REG(GPIOx->OTYPER, PinMask, (PinMask * OutputType)); +} + +/** + * @brief Return gpio output type for several pins on dedicated port. + * @note Output type as to be set when gpio pin is in output or + * alternate modes. Possible type are Push-pull or Open-drain. + * @note Warning: only one pin can be passed as parameter. + * @rmtoll OTYPER OTy LL_GPIO_GetPinOutputType + * @param GPIOx GPIO Port + * @param Pin This parameter can be one of the following values: + * @arg @ref LL_GPIO_PIN_0 + * @arg @ref LL_GPIO_PIN_1 + * @arg @ref LL_GPIO_PIN_2 + * @arg @ref LL_GPIO_PIN_3 + * @arg @ref LL_GPIO_PIN_4 + * @arg @ref LL_GPIO_PIN_5 + * @arg @ref LL_GPIO_PIN_6 + * @arg @ref LL_GPIO_PIN_7 + * @arg @ref LL_GPIO_PIN_8 + * @arg @ref LL_GPIO_PIN_9 + * @arg @ref LL_GPIO_PIN_10 + * @arg @ref LL_GPIO_PIN_11 + * @arg @ref LL_GPIO_PIN_12 + * @arg @ref LL_GPIO_PIN_13 + * @arg @ref LL_GPIO_PIN_14 + * @arg @ref LL_GPIO_PIN_15 + * @arg @ref LL_GPIO_PIN_ALL + * @retval Returned value can be one of the following values: + * @arg @ref LL_GPIO_OUTPUT_PUSHPULL + * @arg @ref LL_GPIO_OUTPUT_OPENDRAIN + */ +__STATIC_INLINE uint32_t LL_GPIO_GetPinOutputType(GPIO_TypeDef *GPIOx, uint32_t Pin) +{ + return (uint32_t)(READ_BIT(GPIOx->OTYPER, Pin) >> POSITION_VAL(Pin)); +} + +/** + * @brief Configure gpio speed for a dedicated pin on dedicated port. + * @note I/O speed can be Low, Medium, Fast or High speed. + * @note Warning: only one pin can be passed as parameter. + * @note Refer to datasheet for frequency specifications and the power + * supply and load conditions for each speed. + * @rmtoll OSPEEDR OSPEEDy LL_GPIO_SetPinSpeed + * @param GPIOx GPIO Port + * @param Pin This parameter can be one of the following values: + * @arg @ref LL_GPIO_PIN_0 + * @arg @ref LL_GPIO_PIN_1 + * @arg @ref LL_GPIO_PIN_2 + * @arg @ref LL_GPIO_PIN_3 + * @arg @ref LL_GPIO_PIN_4 + * @arg @ref LL_GPIO_PIN_5 + * @arg @ref LL_GPIO_PIN_6 + * @arg @ref LL_GPIO_PIN_7 + * @arg @ref LL_GPIO_PIN_8 + * @arg @ref LL_GPIO_PIN_9 + * @arg @ref LL_GPIO_PIN_10 + * @arg @ref LL_GPIO_PIN_11 + * @arg @ref LL_GPIO_PIN_12 + * @arg @ref LL_GPIO_PIN_13 + * @arg @ref LL_GPIO_PIN_14 + * @arg @ref LL_GPIO_PIN_15 + * @param Speed This parameter can be one of the following values: + * @arg @ref LL_GPIO_SPEED_FREQ_LOW + * @arg @ref LL_GPIO_SPEED_FREQ_MEDIUM + * @arg @ref LL_GPIO_SPEED_FREQ_HIGH + * @arg @ref LL_GPIO_SPEED_FREQ_VERY_HIGH + * @retval None + */ +__STATIC_INLINE void LL_GPIO_SetPinSpeed(GPIO_TypeDef *GPIOx, uint32_t Pin, uint32_t Speed) +{ + MODIFY_REG(GPIOx->OSPEEDR, (GPIO_OSPEEDER_OSPEEDR0 << (POSITION_VAL(Pin) * 2U)), + (Speed << (POSITION_VAL(Pin) * 2U))); +} + +/** + * @brief Return gpio speed for a dedicated pin on dedicated port. + * @note I/O speed can be Low, Medium, Fast or High speed. + * @note Warning: only one pin can be passed as parameter. + * @note Refer to datasheet for frequency specifications and the power + * supply and load conditions for each speed. + * @rmtoll OSPEEDR OSPEEDy LL_GPIO_GetPinSpeed + * @param GPIOx GPIO Port + * @param Pin This parameter can be one of the following values: + * @arg @ref LL_GPIO_PIN_0 + * @arg @ref LL_GPIO_PIN_1 + * @arg @ref LL_GPIO_PIN_2 + * @arg @ref LL_GPIO_PIN_3 + * @arg @ref LL_GPIO_PIN_4 + * @arg @ref LL_GPIO_PIN_5 + * @arg @ref LL_GPIO_PIN_6 + * @arg @ref LL_GPIO_PIN_7 + * @arg @ref LL_GPIO_PIN_8 + * @arg @ref LL_GPIO_PIN_9 + * @arg @ref LL_GPIO_PIN_10 + * @arg @ref LL_GPIO_PIN_11 + * @arg @ref LL_GPIO_PIN_12 + * @arg @ref LL_GPIO_PIN_13 + * @arg @ref LL_GPIO_PIN_14 + * @arg @ref LL_GPIO_PIN_15 + * @retval Returned value can be one of the following values: + * @arg @ref LL_GPIO_SPEED_FREQ_LOW + * @arg @ref LL_GPIO_SPEED_FREQ_MEDIUM + * @arg @ref LL_GPIO_SPEED_FREQ_HIGH + * @arg @ref LL_GPIO_SPEED_FREQ_VERY_HIGH + */ +__STATIC_INLINE uint32_t LL_GPIO_GetPinSpeed(GPIO_TypeDef *GPIOx, uint32_t Pin) +{ + return (uint32_t)(READ_BIT(GPIOx->OSPEEDR, + (GPIO_OSPEEDER_OSPEEDR0 << (POSITION_VAL(Pin) * 2U))) >> (POSITION_VAL(Pin) * 2U)); +} + +/** + * @brief Configure gpio pull-up or pull-down for a dedicated pin on a dedicated port. + * @note Warning: only one pin can be passed as parameter. + * @rmtoll PUPDR PUPDy LL_GPIO_SetPinPull + * @param GPIOx GPIO Port + * @param Pin This parameter can be one of the following values: + * @arg @ref LL_GPIO_PIN_0 + * @arg @ref LL_GPIO_PIN_1 + * @arg @ref LL_GPIO_PIN_2 + * @arg @ref LL_GPIO_PIN_3 + * @arg @ref LL_GPIO_PIN_4 + * @arg @ref LL_GPIO_PIN_5 + * @arg @ref LL_GPIO_PIN_6 + * @arg @ref LL_GPIO_PIN_7 + * @arg @ref LL_GPIO_PIN_8 + * @arg @ref LL_GPIO_PIN_9 + * @arg @ref LL_GPIO_PIN_10 + * @arg @ref LL_GPIO_PIN_11 + * @arg @ref LL_GPIO_PIN_12 + * @arg @ref LL_GPIO_PIN_13 + * @arg @ref LL_GPIO_PIN_14 + * @arg @ref LL_GPIO_PIN_15 + * @param Pull This parameter can be one of the following values: + * @arg @ref LL_GPIO_PULL_NO + * @arg @ref LL_GPIO_PULL_UP + * @arg @ref LL_GPIO_PULL_DOWN + * @retval None + */ +__STATIC_INLINE void LL_GPIO_SetPinPull(GPIO_TypeDef *GPIOx, uint32_t Pin, uint32_t Pull) +{ + MODIFY_REG(GPIOx->PUPDR, (GPIO_PUPDR_PUPDR0 << (POSITION_VAL(Pin) * 2U)), (Pull << (POSITION_VAL(Pin) * 2U))); +} + +/** + * @brief Return gpio pull-up or pull-down for a dedicated pin on a dedicated port + * @note Warning: only one pin can be passed as parameter. + * @rmtoll PUPDR PUPDy LL_GPIO_GetPinPull + * @param GPIOx GPIO Port + * @param Pin This parameter can be one of the following values: + * @arg @ref LL_GPIO_PIN_0 + * @arg @ref LL_GPIO_PIN_1 + * @arg @ref LL_GPIO_PIN_2 + * @arg @ref LL_GPIO_PIN_3 + * @arg @ref LL_GPIO_PIN_4 + * @arg @ref LL_GPIO_PIN_5 + * @arg @ref LL_GPIO_PIN_6 + * @arg @ref LL_GPIO_PIN_7 + * @arg @ref LL_GPIO_PIN_8 + * @arg @ref LL_GPIO_PIN_9 + * @arg @ref LL_GPIO_PIN_10 + * @arg @ref LL_GPIO_PIN_11 + * @arg @ref LL_GPIO_PIN_12 + * @arg @ref LL_GPIO_PIN_13 + * @arg @ref LL_GPIO_PIN_14 + * @arg @ref LL_GPIO_PIN_15 + * @retval Returned value can be one of the following values: + * @arg @ref LL_GPIO_PULL_NO + * @arg @ref LL_GPIO_PULL_UP + * @arg @ref LL_GPIO_PULL_DOWN + */ +__STATIC_INLINE uint32_t LL_GPIO_GetPinPull(GPIO_TypeDef *GPIOx, uint32_t Pin) +{ + return (uint32_t)(READ_BIT(GPIOx->PUPDR, + (GPIO_PUPDR_PUPDR0 << (POSITION_VAL(Pin) * 2U))) >> (POSITION_VAL(Pin) * 2U)); +} + +/** + * @brief Configure gpio alternate function of a dedicated pin from 0 to 7 for a dedicated port. + * @note Possible values are from AF0 to AF15 depending on target. + * @note Warning: only one pin can be passed as parameter. + * @rmtoll AFRL AFSELy LL_GPIO_SetAFPin_0_7 + * @param GPIOx GPIO Port + * @param Pin This parameter can be one of the following values: + * @arg @ref LL_GPIO_PIN_0 + * @arg @ref LL_GPIO_PIN_1 + * @arg @ref LL_GPIO_PIN_2 + * @arg @ref LL_GPIO_PIN_3 + * @arg @ref LL_GPIO_PIN_4 + * @arg @ref LL_GPIO_PIN_5 + * @arg @ref LL_GPIO_PIN_6 + * @arg @ref LL_GPIO_PIN_7 + * @param Alternate This parameter can be one of the following values: + * @arg @ref LL_GPIO_AF_0 + * @arg @ref LL_GPIO_AF_1 + * @arg @ref LL_GPIO_AF_2 + * @arg @ref LL_GPIO_AF_3 + * @arg @ref LL_GPIO_AF_4 + * @arg @ref LL_GPIO_AF_5 + * @arg @ref LL_GPIO_AF_6 + * @arg @ref LL_GPIO_AF_7 + * @arg @ref LL_GPIO_AF_8 + * @arg @ref LL_GPIO_AF_9 + * @arg @ref LL_GPIO_AF_10 + * @arg @ref LL_GPIO_AF_11 + * @arg @ref LL_GPIO_AF_12 + * @arg @ref LL_GPIO_AF_13 + * @arg @ref LL_GPIO_AF_14 + * @arg @ref LL_GPIO_AF_15 + * @retval None + */ +__STATIC_INLINE void LL_GPIO_SetAFPin_0_7(GPIO_TypeDef *GPIOx, uint32_t Pin, uint32_t Alternate) +{ + MODIFY_REG(GPIOx->AFR[0], (GPIO_AFRL_AFSEL0 << (POSITION_VAL(Pin) * 4U)), + (Alternate << (POSITION_VAL(Pin) * 4U))); +} + +/** + * @brief Return gpio alternate function of a dedicated pin from 0 to 7 for a dedicated port. + * @rmtoll AFRL AFSELy LL_GPIO_GetAFPin_0_7 + * @param GPIOx GPIO Port + * @param Pin This parameter can be one of the following values: + * @arg @ref LL_GPIO_PIN_0 + * @arg @ref LL_GPIO_PIN_1 + * @arg @ref LL_GPIO_PIN_2 + * @arg @ref LL_GPIO_PIN_3 + * @arg @ref LL_GPIO_PIN_4 + * @arg @ref LL_GPIO_PIN_5 + * @arg @ref LL_GPIO_PIN_6 + * @arg @ref LL_GPIO_PIN_7 + * @retval Returned value can be one of the following values: + * @arg @ref LL_GPIO_AF_0 + * @arg @ref LL_GPIO_AF_1 + * @arg @ref LL_GPIO_AF_2 + * @arg @ref LL_GPIO_AF_3 + * @arg @ref LL_GPIO_AF_4 + * @arg @ref LL_GPIO_AF_5 + * @arg @ref LL_GPIO_AF_6 + * @arg @ref LL_GPIO_AF_7 + * @arg @ref LL_GPIO_AF_8 + * @arg @ref LL_GPIO_AF_9 + * @arg @ref LL_GPIO_AF_10 + * @arg @ref LL_GPIO_AF_11 + * @arg @ref LL_GPIO_AF_12 + * @arg @ref LL_GPIO_AF_13 + * @arg @ref LL_GPIO_AF_14 + * @arg @ref LL_GPIO_AF_15 + */ +__STATIC_INLINE uint32_t LL_GPIO_GetAFPin_0_7(GPIO_TypeDef *GPIOx, uint32_t Pin) +{ + return (uint32_t)(READ_BIT(GPIOx->AFR[0], + (GPIO_AFRL_AFSEL0 << (POSITION_VAL(Pin) * 4U))) >> (POSITION_VAL(Pin) * 4U)); +} + +/** + * @brief Configure gpio alternate function of a dedicated pin from 8 to 15 for a dedicated port. + * @note Possible values are from AF0 to AF15 depending on target. + * @note Warning: only one pin can be passed as parameter. + * @rmtoll AFRH AFSELy LL_GPIO_SetAFPin_8_15 + * @param GPIOx GPIO Port + * @param Pin This parameter can be one of the following values: + * @arg @ref LL_GPIO_PIN_8 + * @arg @ref LL_GPIO_PIN_9 + * @arg @ref LL_GPIO_PIN_10 + * @arg @ref LL_GPIO_PIN_11 + * @arg @ref LL_GPIO_PIN_12 + * @arg @ref LL_GPIO_PIN_13 + * @arg @ref LL_GPIO_PIN_14 + * @arg @ref LL_GPIO_PIN_15 + * @param Alternate This parameter can be one of the following values: + * @arg @ref LL_GPIO_AF_0 + * @arg @ref LL_GPIO_AF_1 + * @arg @ref LL_GPIO_AF_2 + * @arg @ref LL_GPIO_AF_3 + * @arg @ref LL_GPIO_AF_4 + * @arg @ref LL_GPIO_AF_5 + * @arg @ref LL_GPIO_AF_6 + * @arg @ref LL_GPIO_AF_7 + * @arg @ref LL_GPIO_AF_8 + * @arg @ref LL_GPIO_AF_9 + * @arg @ref LL_GPIO_AF_10 + * @arg @ref LL_GPIO_AF_11 + * @arg @ref LL_GPIO_AF_12 + * @arg @ref LL_GPIO_AF_13 + * @arg @ref LL_GPIO_AF_14 + * @arg @ref LL_GPIO_AF_15 + * @retval None + */ +__STATIC_INLINE void LL_GPIO_SetAFPin_8_15(GPIO_TypeDef *GPIOx, uint32_t Pin, uint32_t Alternate) +{ + MODIFY_REG(GPIOx->AFR[1], (GPIO_AFRH_AFSEL8 << (POSITION_VAL(Pin >> 8U) * 4U)), + (Alternate << (POSITION_VAL(Pin >> 8U) * 4U))); +} + +/** + * @brief Return gpio alternate function of a dedicated pin from 8 to 15 for a dedicated port. + * @note Possible values are from AF0 to AF15 depending on target. + * @rmtoll AFRH AFSELy LL_GPIO_GetAFPin_8_15 + * @param GPIOx GPIO Port + * @param Pin This parameter can be one of the following values: + * @arg @ref LL_GPIO_PIN_8 + * @arg @ref LL_GPIO_PIN_9 + * @arg @ref LL_GPIO_PIN_10 + * @arg @ref LL_GPIO_PIN_11 + * @arg @ref LL_GPIO_PIN_12 + * @arg @ref LL_GPIO_PIN_13 + * @arg @ref LL_GPIO_PIN_14 + * @arg @ref LL_GPIO_PIN_15 + * @retval Returned value can be one of the following values: + * @arg @ref LL_GPIO_AF_0 + * @arg @ref LL_GPIO_AF_1 + * @arg @ref LL_GPIO_AF_2 + * @arg @ref LL_GPIO_AF_3 + * @arg @ref LL_GPIO_AF_4 + * @arg @ref LL_GPIO_AF_5 + * @arg @ref LL_GPIO_AF_6 + * @arg @ref LL_GPIO_AF_7 + * @arg @ref LL_GPIO_AF_8 + * @arg @ref LL_GPIO_AF_9 + * @arg @ref LL_GPIO_AF_10 + * @arg @ref LL_GPIO_AF_11 + * @arg @ref LL_GPIO_AF_12 + * @arg @ref LL_GPIO_AF_13 + * @arg @ref LL_GPIO_AF_14 + * @arg @ref LL_GPIO_AF_15 + */ +__STATIC_INLINE uint32_t LL_GPIO_GetAFPin_8_15(GPIO_TypeDef *GPIOx, uint32_t Pin) +{ + return (uint32_t)(READ_BIT(GPIOx->AFR[1], + (GPIO_AFRH_AFSEL8 << (POSITION_VAL(Pin >> 8U) * 4U))) >> (POSITION_VAL(Pin >> 8U) * 4U)); +} + + +/** + * @brief Lock configuration of several pins for a dedicated port. + * @note When the lock sequence has been applied on a port bit, the + * value of this port bit can no longer be modified until the + * next reset. + * @note Each lock bit freezes a specific configuration register + * (control and alternate function registers). + * @rmtoll LCKR LCKK LL_GPIO_LockPin + * @param GPIOx GPIO Port + * @param PinMask This parameter can be a combination of the following values: + * @arg @ref LL_GPIO_PIN_0 + * @arg @ref LL_GPIO_PIN_1 + * @arg @ref LL_GPIO_PIN_2 + * @arg @ref LL_GPIO_PIN_3 + * @arg @ref LL_GPIO_PIN_4 + * @arg @ref LL_GPIO_PIN_5 + * @arg @ref LL_GPIO_PIN_6 + * @arg @ref LL_GPIO_PIN_7 + * @arg @ref LL_GPIO_PIN_8 + * @arg @ref LL_GPIO_PIN_9 + * @arg @ref LL_GPIO_PIN_10 + * @arg @ref LL_GPIO_PIN_11 + * @arg @ref LL_GPIO_PIN_12 + * @arg @ref LL_GPIO_PIN_13 + * @arg @ref LL_GPIO_PIN_14 + * @arg @ref LL_GPIO_PIN_15 + * @arg @ref LL_GPIO_PIN_ALL + * @retval None + */ +__STATIC_INLINE void LL_GPIO_LockPin(GPIO_TypeDef *GPIOx, uint32_t PinMask) +{ + __IO uint32_t temp; + WRITE_REG(GPIOx->LCKR, GPIO_LCKR_LCKK | PinMask); + WRITE_REG(GPIOx->LCKR, PinMask); + WRITE_REG(GPIOx->LCKR, GPIO_LCKR_LCKK | PinMask); + temp = READ_REG(GPIOx->LCKR); + (void) temp; +} + +/** + * @brief Return 1 if all pins passed as parameter, of a dedicated port, are locked. else Return 0. + * @rmtoll LCKR LCKy LL_GPIO_IsPinLocked + * @param GPIOx GPIO Port + * @param PinMask This parameter can be a combination of the following values: + * @arg @ref LL_GPIO_PIN_0 + * @arg @ref LL_GPIO_PIN_1 + * @arg @ref LL_GPIO_PIN_2 + * @arg @ref LL_GPIO_PIN_3 + * @arg @ref LL_GPIO_PIN_4 + * @arg @ref LL_GPIO_PIN_5 + * @arg @ref LL_GPIO_PIN_6 + * @arg @ref LL_GPIO_PIN_7 + * @arg @ref LL_GPIO_PIN_8 + * @arg @ref LL_GPIO_PIN_9 + * @arg @ref LL_GPIO_PIN_10 + * @arg @ref LL_GPIO_PIN_11 + * @arg @ref LL_GPIO_PIN_12 + * @arg @ref LL_GPIO_PIN_13 + * @arg @ref LL_GPIO_PIN_14 + * @arg @ref LL_GPIO_PIN_15 + * @arg @ref LL_GPIO_PIN_ALL + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_GPIO_IsPinLocked(GPIO_TypeDef *GPIOx, uint32_t PinMask) +{ + return (READ_BIT(GPIOx->LCKR, PinMask) == (PinMask)); +} + +/** + * @brief Return 1 if one of the pin of a dedicated port is locked. else return 0. + * @rmtoll LCKR LCKK LL_GPIO_IsAnyPinLocked + * @param GPIOx GPIO Port + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_GPIO_IsAnyPinLocked(GPIO_TypeDef *GPIOx) +{ + return (READ_BIT(GPIOx->LCKR, GPIO_LCKR_LCKK) == (GPIO_LCKR_LCKK)); +} + +/** + * @} + */ + +/** @defgroup GPIO_LL_EF_Data_Access Data Access + * @{ + */ + +/** + * @brief Return full input data register value for a dedicated port. + * @rmtoll IDR IDy LL_GPIO_ReadInputPort + * @param GPIOx GPIO Port + * @retval Input data register value of port + */ +__STATIC_INLINE uint32_t LL_GPIO_ReadInputPort(GPIO_TypeDef *GPIOx) +{ + return (uint32_t)(READ_REG(GPIOx->IDR)); +} + +/** + * @brief Return if input data level for several pins of dedicated port is high or low. + * @rmtoll IDR IDy LL_GPIO_IsInputPinSet + * @param GPIOx GPIO Port + * @param PinMask This parameter can be a combination of the following values: + * @arg @ref LL_GPIO_PIN_0 + * @arg @ref LL_GPIO_PIN_1 + * @arg @ref LL_GPIO_PIN_2 + * @arg @ref LL_GPIO_PIN_3 + * @arg @ref LL_GPIO_PIN_4 + * @arg @ref LL_GPIO_PIN_5 + * @arg @ref LL_GPIO_PIN_6 + * @arg @ref LL_GPIO_PIN_7 + * @arg @ref LL_GPIO_PIN_8 + * @arg @ref LL_GPIO_PIN_9 + * @arg @ref LL_GPIO_PIN_10 + * @arg @ref LL_GPIO_PIN_11 + * @arg @ref LL_GPIO_PIN_12 + * @arg @ref LL_GPIO_PIN_13 + * @arg @ref LL_GPIO_PIN_14 + * @arg @ref LL_GPIO_PIN_15 + * @arg @ref LL_GPIO_PIN_ALL + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_GPIO_IsInputPinSet(GPIO_TypeDef *GPIOx, uint32_t PinMask) +{ + return (READ_BIT(GPIOx->IDR, PinMask) == (PinMask)); +} + +/** + * @brief Write output data register for the port. + * @rmtoll ODR ODy LL_GPIO_WriteOutputPort + * @param GPIOx GPIO Port + * @param PortValue Level value for each pin of the port + * @retval None + */ +__STATIC_INLINE void LL_GPIO_WriteOutputPort(GPIO_TypeDef *GPIOx, uint32_t PortValue) +{ + WRITE_REG(GPIOx->ODR, PortValue); +} + +/** + * @brief Return full output data register value for a dedicated port. + * @rmtoll ODR ODy LL_GPIO_ReadOutputPort + * @param GPIOx GPIO Port + * @retval Output data register value of port + */ +__STATIC_INLINE uint32_t LL_GPIO_ReadOutputPort(GPIO_TypeDef *GPIOx) +{ + return (uint32_t)(READ_REG(GPIOx->ODR)); +} + +/** + * @brief Return if input data level for several pins of dedicated port is high or low. + * @rmtoll ODR ODy LL_GPIO_IsOutputPinSet + * @param GPIOx GPIO Port + * @param PinMask This parameter can be a combination of the following values: + * @arg @ref LL_GPIO_PIN_0 + * @arg @ref LL_GPIO_PIN_1 + * @arg @ref LL_GPIO_PIN_2 + * @arg @ref LL_GPIO_PIN_3 + * @arg @ref LL_GPIO_PIN_4 + * @arg @ref LL_GPIO_PIN_5 + * @arg @ref LL_GPIO_PIN_6 + * @arg @ref LL_GPIO_PIN_7 + * @arg @ref LL_GPIO_PIN_8 + * @arg @ref LL_GPIO_PIN_9 + * @arg @ref LL_GPIO_PIN_10 + * @arg @ref LL_GPIO_PIN_11 + * @arg @ref LL_GPIO_PIN_12 + * @arg @ref LL_GPIO_PIN_13 + * @arg @ref LL_GPIO_PIN_14 + * @arg @ref LL_GPIO_PIN_15 + * @arg @ref LL_GPIO_PIN_ALL + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_GPIO_IsOutputPinSet(GPIO_TypeDef *GPIOx, uint32_t PinMask) +{ + return (READ_BIT(GPIOx->ODR, PinMask) == (PinMask)); +} + +/** + * @brief Set several pins to high level on dedicated gpio port. + * @rmtoll BSRR BSy LL_GPIO_SetOutputPin + * @param GPIOx GPIO Port + * @param PinMask This parameter can be a combination of the following values: + * @arg @ref LL_GPIO_PIN_0 + * @arg @ref LL_GPIO_PIN_1 + * @arg @ref LL_GPIO_PIN_2 + * @arg @ref LL_GPIO_PIN_3 + * @arg @ref LL_GPIO_PIN_4 + * @arg @ref LL_GPIO_PIN_5 + * @arg @ref LL_GPIO_PIN_6 + * @arg @ref LL_GPIO_PIN_7 + * @arg @ref LL_GPIO_PIN_8 + * @arg @ref LL_GPIO_PIN_9 + * @arg @ref LL_GPIO_PIN_10 + * @arg @ref LL_GPIO_PIN_11 + * @arg @ref LL_GPIO_PIN_12 + * @arg @ref LL_GPIO_PIN_13 + * @arg @ref LL_GPIO_PIN_14 + * @arg @ref LL_GPIO_PIN_15 + * @arg @ref LL_GPIO_PIN_ALL + * @retval None + */ +__STATIC_INLINE void LL_GPIO_SetOutputPin(GPIO_TypeDef *GPIOx, uint32_t PinMask) +{ + WRITE_REG(GPIOx->BSRR, PinMask); +} + +/** + * @brief Set several pins to low level on dedicated gpio port. + * @rmtoll BSRR BRy LL_GPIO_ResetOutputPin + * @param GPIOx GPIO Port + * @param PinMask This parameter can be a combination of the following values: + * @arg @ref LL_GPIO_PIN_0 + * @arg @ref LL_GPIO_PIN_1 + * @arg @ref LL_GPIO_PIN_2 + * @arg @ref LL_GPIO_PIN_3 + * @arg @ref LL_GPIO_PIN_4 + * @arg @ref LL_GPIO_PIN_5 + * @arg @ref LL_GPIO_PIN_6 + * @arg @ref LL_GPIO_PIN_7 + * @arg @ref LL_GPIO_PIN_8 + * @arg @ref LL_GPIO_PIN_9 + * @arg @ref LL_GPIO_PIN_10 + * @arg @ref LL_GPIO_PIN_11 + * @arg @ref LL_GPIO_PIN_12 + * @arg @ref LL_GPIO_PIN_13 + * @arg @ref LL_GPIO_PIN_14 + * @arg @ref LL_GPIO_PIN_15 + * @arg @ref LL_GPIO_PIN_ALL + * @retval None + */ +__STATIC_INLINE void LL_GPIO_ResetOutputPin(GPIO_TypeDef *GPIOx, uint32_t PinMask) +{ + WRITE_REG(GPIOx->BSRR, (PinMask << 16)); +} + +/** + * @brief Toggle data value for several pin of dedicated port. + * @rmtoll ODR ODy LL_GPIO_TogglePin + * @param GPIOx GPIO Port + * @param PinMask This parameter can be a combination of the following values: + * @arg @ref LL_GPIO_PIN_0 + * @arg @ref LL_GPIO_PIN_1 + * @arg @ref LL_GPIO_PIN_2 + * @arg @ref LL_GPIO_PIN_3 + * @arg @ref LL_GPIO_PIN_4 + * @arg @ref LL_GPIO_PIN_5 + * @arg @ref LL_GPIO_PIN_6 + * @arg @ref LL_GPIO_PIN_7 + * @arg @ref LL_GPIO_PIN_8 + * @arg @ref LL_GPIO_PIN_9 + * @arg @ref LL_GPIO_PIN_10 + * @arg @ref LL_GPIO_PIN_11 + * @arg @ref LL_GPIO_PIN_12 + * @arg @ref LL_GPIO_PIN_13 + * @arg @ref LL_GPIO_PIN_14 + * @arg @ref LL_GPIO_PIN_15 + * @arg @ref LL_GPIO_PIN_ALL + * @retval None + */ +__STATIC_INLINE void LL_GPIO_TogglePin(GPIO_TypeDef *GPIOx, uint32_t PinMask) +{ + uint32_t odr = READ_REG(GPIOx->ODR); + WRITE_REG(GPIOx->BSRR, ((odr & PinMask) << 16u) | (~odr & PinMask)); +} + +/** + * @} + */ + +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup GPIO_LL_EF_Init Initialization and de-initialization functions + * @{ + */ + +ErrorStatus LL_GPIO_DeInit(GPIO_TypeDef *GPIOx); +ErrorStatus LL_GPIO_Init(GPIO_TypeDef *GPIOx, LL_GPIO_InitTypeDef *GPIO_InitStruct); +void LL_GPIO_StructInit(LL_GPIO_InitTypeDef *GPIO_InitStruct); + +/** + * @} + */ +#endif /* USE_FULL_LL_DRIVER */ + +/** + * @} + */ + +/** + * @} + */ + +#endif /* defined (GPIOA) || defined (GPIOB) || defined (GPIOC) || defined (GPIOD) || defined (GPIOE) || defined (GPIOF) || defined (GPIOG) || defined (GPIOH) || defined (GPIOI) || defined (GPIOJ) || defined (GPIOK) */ +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F4xx_LL_GPIO_H */ + diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_pwr.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_pwr.h new file mode 100644 index 0000000..ea23dc5 --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_pwr.h @@ -0,0 +1,985 @@ +/** + ****************************************************************************** + * @file stm32f4xx_ll_pwr.h + * @author MCD Application Team + * @brief Header file of PWR LL module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F4xx_LL_PWR_H +#define __STM32F4xx_LL_PWR_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx.h" + +/** @addtogroup STM32F4xx_LL_Driver + * @{ + */ + +#if defined(PWR) + +/** @defgroup PWR_LL PWR + * @{ + */ + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/* Private macros ------------------------------------------------------------*/ +/* Exported types ------------------------------------------------------------*/ +/* Exported constants --------------------------------------------------------*/ +/** @defgroup PWR_LL_Exported_Constants PWR Exported Constants + * @{ + */ + +/** @defgroup PWR_LL_EC_CLEAR_FLAG Clear Flags Defines + * @brief Flags defines which can be used with LL_PWR_WriteReg function + * @{ + */ +#define LL_PWR_CR_CSBF PWR_CR_CSBF /*!< Clear standby flag */ +#define LL_PWR_CR_CWUF PWR_CR_CWUF /*!< Clear wakeup flag */ +/** + * @} + */ + +/** @defgroup PWR_LL_EC_GET_FLAG Get Flags Defines + * @brief Flags defines which can be used with LL_PWR_ReadReg function + * @{ + */ +#define LL_PWR_CSR_WUF PWR_CSR_WUF /*!< Wakeup flag */ +#define LL_PWR_CSR_SBF PWR_CSR_SBF /*!< Standby flag */ +#define LL_PWR_CSR_PVDO PWR_CSR_PVDO /*!< Power voltage detector output flag */ +#define LL_PWR_CSR_VOS PWR_CSR_VOSRDY /*!< Voltage scaling select flag */ +#if defined(PWR_CSR_EWUP) +#define LL_PWR_CSR_EWUP1 PWR_CSR_EWUP /*!< Enable WKUP pin */ +#elif defined(PWR_CSR_EWUP1) +#define LL_PWR_CSR_EWUP1 PWR_CSR_EWUP1 /*!< Enable WKUP pin 1 */ +#endif /* PWR_CSR_EWUP */ +#if defined(PWR_CSR_EWUP2) +#define LL_PWR_CSR_EWUP2 PWR_CSR_EWUP2 /*!< Enable WKUP pin 2 */ +#endif /* PWR_CSR_EWUP2 */ +#if defined(PWR_CSR_EWUP3) +#define LL_PWR_CSR_EWUP3 PWR_CSR_EWUP3 /*!< Enable WKUP pin 3 */ +#endif /* PWR_CSR_EWUP3 */ +/** + * @} + */ + +/** @defgroup PWR_LL_EC_REGU_VOLTAGE Regulator Voltage + * @{ + */ +#if defined(PWR_CR_VOS_0) +#define LL_PWR_REGU_VOLTAGE_SCALE3 (PWR_CR_VOS_0) +#define LL_PWR_REGU_VOLTAGE_SCALE2 (PWR_CR_VOS_1) +#define LL_PWR_REGU_VOLTAGE_SCALE1 (PWR_CR_VOS_0 | PWR_CR_VOS_1) /* The SCALE1 is not available for STM32F401xx devices */ +#else +#define LL_PWR_REGU_VOLTAGE_SCALE1 (PWR_CR_VOS) +#define LL_PWR_REGU_VOLTAGE_SCALE2 0x00000000U +#endif /* PWR_CR_VOS_0 */ +/** + * @} + */ + +/** @defgroup PWR_LL_EC_MODE_PWR Mode Power + * @{ + */ +#define LL_PWR_MODE_STOP_MAINREGU 0x00000000U /*!< Enter Stop mode when the CPU enters deepsleep */ +#define LL_PWR_MODE_STOP_LPREGU (PWR_CR_LPDS) /*!< Enter Stop mode (with low power Regulator ON) when the CPU enters deepsleep */ +#if defined(PWR_CR_MRUDS) && defined(PWR_CR_LPUDS) && defined(PWR_CR_FPDS) +#define LL_PWR_MODE_STOP_MAINREGU_UNDERDRIVE (PWR_CR_MRUDS | PWR_CR_FPDS) /*!< Enter Stop mode (with main Regulator in under-drive mode) when the CPU enters deepsleep */ +#define LL_PWR_MODE_STOP_LPREGU_UNDERDRIVE (PWR_CR_LPDS | PWR_CR_LPUDS | PWR_CR_FPDS) /*!< Enter Stop mode (with low power Regulator in under-drive mode) when the CPU enters deepsleep */ +#endif /* PWR_CR_MRUDS && PWR_CR_LPUDS && PWR_CR_FPDS */ +#if defined(PWR_CR_MRLVDS) && defined(PWR_CR_LPLVDS) && defined(PWR_CR_FPDS) +#define LL_PWR_MODE_STOP_MAINREGU_DEEPSLEEP (PWR_CR_MRLVDS | PWR_CR_FPDS) /*!< Enter Stop mode (with main Regulator in Deep Sleep mode) when the CPU enters deepsleep */ +#define LL_PWR_MODE_STOP_LPREGU_DEEPSLEEP (PWR_CR_LPDS | PWR_CR_LPLVDS | PWR_CR_FPDS) /*!< Enter Stop mode (with low power Regulator in Deep Sleep mode) when the CPU enters deepsleep */ +#endif /* PWR_CR_MRLVDS && PWR_CR_LPLVDS && PWR_CR_FPDS */ +#define LL_PWR_MODE_STANDBY (PWR_CR_PDDS) /*!< Enter Standby mode when the CPU enters deepsleep */ +/** + * @} + */ + +/** @defgroup PWR_LL_EC_REGU_MODE_DS_MODE Regulator Mode In Deep Sleep Mode + * @{ + */ +#define LL_PWR_REGU_DSMODE_MAIN 0x00000000U /*!< Voltage Regulator in main mode during deepsleep mode */ +#define LL_PWR_REGU_DSMODE_LOW_POWER (PWR_CR_LPDS) /*!< Voltage Regulator in low-power mode during deepsleep mode */ +/** + * @} + */ + +/** @defgroup PWR_LL_EC_PVDLEVEL Power Voltage Detector Level + * @{ + */ +#define LL_PWR_PVDLEVEL_0 (PWR_CR_PLS_LEV0) /*!< Voltage threshold detected by PVD 2.2 V */ +#define LL_PWR_PVDLEVEL_1 (PWR_CR_PLS_LEV1) /*!< Voltage threshold detected by PVD 2.3 V */ +#define LL_PWR_PVDLEVEL_2 (PWR_CR_PLS_LEV2) /*!< Voltage threshold detected by PVD 2.4 V */ +#define LL_PWR_PVDLEVEL_3 (PWR_CR_PLS_LEV3) /*!< Voltage threshold detected by PVD 2.5 V */ +#define LL_PWR_PVDLEVEL_4 (PWR_CR_PLS_LEV4) /*!< Voltage threshold detected by PVD 2.6 V */ +#define LL_PWR_PVDLEVEL_5 (PWR_CR_PLS_LEV5) /*!< Voltage threshold detected by PVD 2.7 V */ +#define LL_PWR_PVDLEVEL_6 (PWR_CR_PLS_LEV6) /*!< Voltage threshold detected by PVD 2.8 V */ +#define LL_PWR_PVDLEVEL_7 (PWR_CR_PLS_LEV7) /*!< Voltage threshold detected by PVD 2.9 V */ +/** + * @} + */ +/** @defgroup PWR_LL_EC_WAKEUP_PIN Wakeup Pins + * @{ + */ +#if defined(PWR_CSR_EWUP) +#define LL_PWR_WAKEUP_PIN1 (PWR_CSR_EWUP) /*!< WKUP pin : PA0 */ +#endif /* PWR_CSR_EWUP */ +#if defined(PWR_CSR_EWUP1) +#define LL_PWR_WAKEUP_PIN1 (PWR_CSR_EWUP1) /*!< WKUP pin 1 : PA0 */ +#endif /* PWR_CSR_EWUP1 */ +#if defined(PWR_CSR_EWUP2) +#define LL_PWR_WAKEUP_PIN2 (PWR_CSR_EWUP2) /*!< WKUP pin 2 : PC0 or PC13 according to device */ +#endif /* PWR_CSR_EWUP2 */ +#if defined(PWR_CSR_EWUP3) +#define LL_PWR_WAKEUP_PIN3 (PWR_CSR_EWUP3) /*!< WKUP pin 3 : PC1 */ +#endif /* PWR_CSR_EWUP3 */ +/** + * @} + */ + +/** + * @} + */ + + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup PWR_LL_Exported_Macros PWR Exported Macros + * @{ + */ + +/** @defgroup PWR_LL_EM_WRITE_READ Common write and read registers Macros + * @{ + */ + +/** + * @brief Write a value in PWR register + * @param __REG__ Register to be written + * @param __VALUE__ Value to be written in the register + * @retval None + */ +#define LL_PWR_WriteReg(__REG__, __VALUE__) WRITE_REG(PWR->__REG__, (__VALUE__)) + +/** + * @brief Read a value in PWR register + * @param __REG__ Register to be read + * @retval Register value + */ +#define LL_PWR_ReadReg(__REG__) READ_REG(PWR->__REG__) +/** + * @} + */ + +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup PWR_LL_Exported_Functions PWR Exported Functions + * @{ + */ + +/** @defgroup PWR_LL_EF_Configuration Configuration + * @{ + */ +#if defined(PWR_CR_FISSR) +/** + * @brief Enable FLASH interface STOP while system Run is ON + * @rmtoll CR FISSR LL_PWR_EnableFLASHInterfaceSTOP + * @note This mode is enabled only with STOP low power mode. + * @retval None + */ +__STATIC_INLINE void LL_PWR_EnableFLASHInterfaceSTOP(void) +{ + SET_BIT(PWR->CR, PWR_CR_FISSR); +} + +/** + * @brief Disable FLASH Interface STOP while system Run is ON + * @rmtoll CR FISSR LL_PWR_DisableFLASHInterfaceSTOP + * @retval None + */ +__STATIC_INLINE void LL_PWR_DisableFLASHInterfaceSTOP(void) +{ + CLEAR_BIT(PWR->CR, PWR_CR_FISSR); +} + +/** + * @brief Check if FLASH Interface STOP while system Run feature is enabled + * @rmtoll CR FISSR LL_PWR_IsEnabledFLASHInterfaceSTOP + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_PWR_IsEnabledFLASHInterfaceSTOP(void) +{ + return (READ_BIT(PWR->CR, PWR_CR_FISSR) == (PWR_CR_FISSR)); +} +#endif /* PWR_CR_FISSR */ + +#if defined(PWR_CR_FMSSR) +/** + * @brief Enable FLASH Memory STOP while system Run is ON + * @rmtoll CR FMSSR LL_PWR_EnableFLASHMemorySTOP + * @note This mode is enabled only with STOP low power mode. + * @retval None + */ +__STATIC_INLINE void LL_PWR_EnableFLASHMemorySTOP(void) +{ + SET_BIT(PWR->CR, PWR_CR_FMSSR); +} + +/** + * @brief Disable FLASH Memory STOP while system Run is ON + * @rmtoll CR FMSSR LL_PWR_DisableFLASHMemorySTOP + * @retval None + */ +__STATIC_INLINE void LL_PWR_DisableFLASHMemorySTOP(void) +{ + CLEAR_BIT(PWR->CR, PWR_CR_FMSSR); +} + +/** + * @brief Check if FLASH Memory STOP while system Run feature is enabled + * @rmtoll CR FMSSR LL_PWR_IsEnabledFLASHMemorySTOP + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_PWR_IsEnabledFLASHMemorySTOP(void) +{ + return (READ_BIT(PWR->CR, PWR_CR_FMSSR) == (PWR_CR_FMSSR)); +} +#endif /* PWR_CR_FMSSR */ +#if defined(PWR_CR_UDEN) +/** + * @brief Enable Under Drive Mode + * @rmtoll CR UDEN LL_PWR_EnableUnderDriveMode + * @note This mode is enabled only with STOP low power mode. + * In this mode, the 1.2V domain is preserved in reduced leakage mode. This + * mode is only available when the main Regulator or the low power Regulator + * is in low voltage mode. + * @note If the Under-drive mode was enabled, it is automatically disabled after + * exiting Stop mode. + * When the voltage Regulator operates in Under-drive mode, an additional + * startup delay is induced when waking up from Stop mode. + * @retval None + */ +__STATIC_INLINE void LL_PWR_EnableUnderDriveMode(void) +{ + SET_BIT(PWR->CR, PWR_CR_UDEN); +} + +/** + * @brief Disable Under Drive Mode + * @rmtoll CR UDEN LL_PWR_DisableUnderDriveMode + * @retval None + */ +__STATIC_INLINE void LL_PWR_DisableUnderDriveMode(void) +{ + CLEAR_BIT(PWR->CR, PWR_CR_UDEN); +} + +/** + * @brief Check if Under Drive Mode is enabled + * @rmtoll CR UDEN LL_PWR_IsEnabledUnderDriveMode + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_PWR_IsEnabledUnderDriveMode(void) +{ + return (READ_BIT(PWR->CR, PWR_CR_UDEN) == (PWR_CR_UDEN)); +} +#endif /* PWR_CR_UDEN */ + +#if defined(PWR_CR_ODSWEN) +/** + * @brief Enable Over drive switching + * @rmtoll CR ODSWEN LL_PWR_EnableOverDriveSwitching + * @retval None + */ +__STATIC_INLINE void LL_PWR_EnableOverDriveSwitching(void) +{ + SET_BIT(PWR->CR, PWR_CR_ODSWEN); +} + +/** + * @brief Disable Over drive switching + * @rmtoll CR ODSWEN LL_PWR_DisableOverDriveSwitching + * @retval None + */ +__STATIC_INLINE void LL_PWR_DisableOverDriveSwitching(void) +{ + CLEAR_BIT(PWR->CR, PWR_CR_ODSWEN); +} + +/** + * @brief Check if Over drive switching is enabled + * @rmtoll CR ODSWEN LL_PWR_IsEnabledOverDriveSwitching + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_PWR_IsEnabledOverDriveSwitching(void) +{ + return (READ_BIT(PWR->CR, PWR_CR_ODSWEN) == (PWR_CR_ODSWEN)); +} +#endif /* PWR_CR_ODSWEN */ +#if defined(PWR_CR_ODEN) +/** + * @brief Enable Over drive Mode + * @rmtoll CR ODEN LL_PWR_EnableOverDriveMode + * @retval None + */ +__STATIC_INLINE void LL_PWR_EnableOverDriveMode(void) +{ + SET_BIT(PWR->CR, PWR_CR_ODEN); +} + +/** + * @brief Disable Over drive Mode + * @rmtoll CR ODEN LL_PWR_DisableOverDriveMode + * @retval None + */ +__STATIC_INLINE void LL_PWR_DisableOverDriveMode(void) +{ + CLEAR_BIT(PWR->CR, PWR_CR_ODEN); +} + +/** + * @brief Check if Over drive switching is enabled + * @rmtoll CR ODEN LL_PWR_IsEnabledOverDriveMode + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_PWR_IsEnabledOverDriveMode(void) +{ + return (READ_BIT(PWR->CR, PWR_CR_ODEN) == (PWR_CR_ODEN)); +} +#endif /* PWR_CR_ODEN */ +#if defined(PWR_CR_MRUDS) +/** + * @brief Enable Main Regulator in deepsleep under-drive Mode + * @rmtoll CR MRUDS LL_PWR_EnableMainRegulatorDeepSleepUDMode + * @retval None + */ +__STATIC_INLINE void LL_PWR_EnableMainRegulatorDeepSleepUDMode(void) +{ + SET_BIT(PWR->CR, PWR_CR_MRUDS); +} + +/** + * @brief Disable Main Regulator in deepsleep under-drive Mode + * @rmtoll CR MRUDS LL_PWR_DisableMainRegulatorDeepSleepUDMode + * @retval None + */ +__STATIC_INLINE void LL_PWR_DisableMainRegulatorDeepSleepUDMode(void) +{ + CLEAR_BIT(PWR->CR, PWR_CR_MRUDS); +} + +/** + * @brief Check if Main Regulator in deepsleep under-drive Mode is enabled + * @rmtoll CR MRUDS LL_PWR_IsEnabledMainRegulatorDeepSleepUDMode + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_PWR_IsEnabledMainRegulatorDeepSleepUDMode(void) +{ + return (READ_BIT(PWR->CR, PWR_CR_MRUDS) == (PWR_CR_MRUDS)); +} +#endif /* PWR_CR_MRUDS */ + +#if defined(PWR_CR_LPUDS) +/** + * @brief Enable Low Power Regulator in deepsleep under-drive Mode + * @rmtoll CR LPUDS LL_PWR_EnableLowPowerRegulatorDeepSleepUDMode + * @retval None + */ +__STATIC_INLINE void LL_PWR_EnableLowPowerRegulatorDeepSleepUDMode(void) +{ + SET_BIT(PWR->CR, PWR_CR_LPUDS); +} + +/** + * @brief Disable Low Power Regulator in deepsleep under-drive Mode + * @rmtoll CR LPUDS LL_PWR_DisableLowPowerRegulatorDeepSleepUDMode + * @retval None + */ +__STATIC_INLINE void LL_PWR_DisableLowPowerRegulatorDeepSleepUDMode(void) +{ + CLEAR_BIT(PWR->CR, PWR_CR_LPUDS); +} + +/** + * @brief Check if Low Power Regulator in deepsleep under-drive Mode is enabled + * @rmtoll CR LPUDS LL_PWR_IsEnabledLowPowerRegulatorDeepSleepUDMode + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_PWR_IsEnabledLowPowerRegulatorDeepSleepUDMode(void) +{ + return (READ_BIT(PWR->CR, PWR_CR_LPUDS) == (PWR_CR_LPUDS)); +} +#endif /* PWR_CR_LPUDS */ + +#if defined(PWR_CR_MRLVDS) +/** + * @brief Enable Main Regulator low voltage Mode + * @rmtoll CR MRLVDS LL_PWR_EnableMainRegulatorLowVoltageMode + * @retval None + */ +__STATIC_INLINE void LL_PWR_EnableMainRegulatorLowVoltageMode(void) +{ + SET_BIT(PWR->CR, PWR_CR_MRLVDS); +} + +/** + * @brief Disable Main Regulator low voltage Mode + * @rmtoll CR MRLVDS LL_PWR_DisableMainRegulatorLowVoltageMode + * @retval None + */ +__STATIC_INLINE void LL_PWR_DisableMainRegulatorLowVoltageMode(void) +{ + CLEAR_BIT(PWR->CR, PWR_CR_MRLVDS); +} + +/** + * @brief Check if Main Regulator low voltage Mode is enabled + * @rmtoll CR MRLVDS LL_PWR_IsEnabledMainRegulatorLowVoltageMode + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_PWR_IsEnabledMainRegulatorLowVoltageMode(void) +{ + return (READ_BIT(PWR->CR, PWR_CR_MRLVDS) == (PWR_CR_MRLVDS)); +} +#endif /* PWR_CR_MRLVDS */ + +#if defined(PWR_CR_LPLVDS) +/** + * @brief Enable Low Power Regulator low voltage Mode + * @rmtoll CR LPLVDS LL_PWR_EnableLowPowerRegulatorLowVoltageMode + * @retval None + */ +__STATIC_INLINE void LL_PWR_EnableLowPowerRegulatorLowVoltageMode(void) +{ + SET_BIT(PWR->CR, PWR_CR_LPLVDS); +} + +/** + * @brief Disable Low Power Regulator low voltage Mode + * @rmtoll CR LPLVDS LL_PWR_DisableLowPowerRegulatorLowVoltageMode + * @retval None + */ +__STATIC_INLINE void LL_PWR_DisableLowPowerRegulatorLowVoltageMode(void) +{ + CLEAR_BIT(PWR->CR, PWR_CR_LPLVDS); +} + +/** + * @brief Check if Low Power Regulator low voltage Mode is enabled + * @rmtoll CR LPLVDS LL_PWR_IsEnabledLowPowerRegulatorLowVoltageMode + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_PWR_IsEnabledLowPowerRegulatorLowVoltageMode(void) +{ + return (READ_BIT(PWR->CR, PWR_CR_LPLVDS) == (PWR_CR_LPLVDS)); +} +#endif /* PWR_CR_LPLVDS */ +/** + * @brief Set the main internal Regulator output voltage + * @rmtoll CR VOS LL_PWR_SetRegulVoltageScaling + * @param VoltageScaling This parameter can be one of the following values: + * @arg @ref LL_PWR_REGU_VOLTAGE_SCALE1 (*) + * @arg @ref LL_PWR_REGU_VOLTAGE_SCALE2 + * @arg @ref LL_PWR_REGU_VOLTAGE_SCALE3 + * (*) LL_PWR_REGU_VOLTAGE_SCALE1 is not available for STM32F401xx devices + * @retval None + */ +__STATIC_INLINE void LL_PWR_SetRegulVoltageScaling(uint32_t VoltageScaling) +{ + MODIFY_REG(PWR->CR, PWR_CR_VOS, VoltageScaling); +} + +/** + * @brief Get the main internal Regulator output voltage + * @rmtoll CR VOS LL_PWR_GetRegulVoltageScaling + * @retval Returned value can be one of the following values: + * @arg @ref LL_PWR_REGU_VOLTAGE_SCALE1 (*) + * @arg @ref LL_PWR_REGU_VOLTAGE_SCALE2 + * @arg @ref LL_PWR_REGU_VOLTAGE_SCALE3 + * (*) LL_PWR_REGU_VOLTAGE_SCALE1 is not available for STM32F401xx devices + */ +__STATIC_INLINE uint32_t LL_PWR_GetRegulVoltageScaling(void) +{ + return (uint32_t)(READ_BIT(PWR->CR, PWR_CR_VOS)); +} +/** + * @brief Enable the Flash Power Down in Stop Mode + * @rmtoll CR FPDS LL_PWR_EnableFlashPowerDown + * @retval None + */ +__STATIC_INLINE void LL_PWR_EnableFlashPowerDown(void) +{ + SET_BIT(PWR->CR, PWR_CR_FPDS); +} + +/** + * @brief Disable the Flash Power Down in Stop Mode + * @rmtoll CR FPDS LL_PWR_DisableFlashPowerDown + * @retval None + */ +__STATIC_INLINE void LL_PWR_DisableFlashPowerDown(void) +{ + CLEAR_BIT(PWR->CR, PWR_CR_FPDS); +} + +/** + * @brief Check if the Flash Power Down in Stop Mode is enabled + * @rmtoll CR FPDS LL_PWR_IsEnabledFlashPowerDown + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_PWR_IsEnabledFlashPowerDown(void) +{ + return (READ_BIT(PWR->CR, PWR_CR_FPDS) == (PWR_CR_FPDS)); +} + +/** + * @brief Enable access to the backup domain + * @rmtoll CR DBP LL_PWR_EnableBkUpAccess + * @retval None + */ +__STATIC_INLINE void LL_PWR_EnableBkUpAccess(void) +{ + SET_BIT(PWR->CR, PWR_CR_DBP); +} + +/** + * @brief Disable access to the backup domain + * @rmtoll CR DBP LL_PWR_DisableBkUpAccess + * @retval None + */ +__STATIC_INLINE void LL_PWR_DisableBkUpAccess(void) +{ + CLEAR_BIT(PWR->CR, PWR_CR_DBP); +} + +/** + * @brief Check if the backup domain is enabled + * @rmtoll CR DBP LL_PWR_IsEnabledBkUpAccess + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_PWR_IsEnabledBkUpAccess(void) +{ + return (READ_BIT(PWR->CR, PWR_CR_DBP) == (PWR_CR_DBP)); +} +/** + * @brief Enable the backup Regulator + * @rmtoll CSR BRE LL_PWR_EnableBkUpRegulator + * @note The BRE bit of the PWR_CSR register is protected against parasitic write access. + * The LL_PWR_EnableBkUpAccess() must be called before using this API. + * @retval None + */ +__STATIC_INLINE void LL_PWR_EnableBkUpRegulator(void) +{ + SET_BIT(PWR->CSR, PWR_CSR_BRE); +} + +/** + * @brief Disable the backup Regulator + * @rmtoll CSR BRE LL_PWR_DisableBkUpRegulator + * @note The BRE bit of the PWR_CSR register is protected against parasitic write access. + * The LL_PWR_EnableBkUpAccess() must be called before using this API. + * @retval None + */ +__STATIC_INLINE void LL_PWR_DisableBkUpRegulator(void) +{ + CLEAR_BIT(PWR->CSR, PWR_CSR_BRE); +} + +/** + * @brief Check if the backup Regulator is enabled + * @rmtoll CSR BRE LL_PWR_IsEnabledBkUpRegulator + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_PWR_IsEnabledBkUpRegulator(void) +{ + return (READ_BIT(PWR->CSR, PWR_CSR_BRE) == (PWR_CSR_BRE)); +} + +/** + * @brief Set voltage Regulator mode during deep sleep mode + * @rmtoll CR LPDS LL_PWR_SetRegulModeDS + * @param RegulMode This parameter can be one of the following values: + * @arg @ref LL_PWR_REGU_DSMODE_MAIN + * @arg @ref LL_PWR_REGU_DSMODE_LOW_POWER + * @retval None + */ +__STATIC_INLINE void LL_PWR_SetRegulModeDS(uint32_t RegulMode) +{ + MODIFY_REG(PWR->CR, PWR_CR_LPDS, RegulMode); +} + +/** + * @brief Get voltage Regulator mode during deep sleep mode + * @rmtoll CR LPDS LL_PWR_GetRegulModeDS + * @retval Returned value can be one of the following values: + * @arg @ref LL_PWR_REGU_DSMODE_MAIN + * @arg @ref LL_PWR_REGU_DSMODE_LOW_POWER + */ +__STATIC_INLINE uint32_t LL_PWR_GetRegulModeDS(void) +{ + return (uint32_t)(READ_BIT(PWR->CR, PWR_CR_LPDS)); +} + +/** + * @brief Set Power Down mode when CPU enters deepsleep + * @rmtoll CR PDDS LL_PWR_SetPowerMode\n + * @rmtoll CR MRUDS LL_PWR_SetPowerMode\n + * @rmtoll CR LPUDS LL_PWR_SetPowerMode\n + * @rmtoll CR FPDS LL_PWR_SetPowerMode\n + * @rmtoll CR MRLVDS LL_PWR_SetPowerMode\n + * @rmtoll CR LPlVDS LL_PWR_SetPowerMode\n + * @rmtoll CR FPDS LL_PWR_SetPowerMode\n + * @rmtoll CR LPDS LL_PWR_SetPowerMode + * @param PDMode This parameter can be one of the following values: + * @arg @ref LL_PWR_MODE_STOP_MAINREGU + * @arg @ref LL_PWR_MODE_STOP_LPREGU + * @arg @ref LL_PWR_MODE_STOP_MAINREGU_UNDERDRIVE (*) + * @arg @ref LL_PWR_MODE_STOP_LPREGU_UNDERDRIVE (*) + * @arg @ref LL_PWR_MODE_STOP_MAINREGU_DEEPSLEEP (*) + * @arg @ref LL_PWR_MODE_STOP_LPREGU_DEEPSLEEP (*) + * + * (*) not available on all devices + * @arg @ref LL_PWR_MODE_STANDBY + * @retval None + */ +__STATIC_INLINE void LL_PWR_SetPowerMode(uint32_t PDMode) +{ +#if defined(PWR_CR_MRUDS) && defined(PWR_CR_LPUDS) && defined(PWR_CR_FPDS) + MODIFY_REG(PWR->CR, (PWR_CR_PDDS | PWR_CR_LPDS | PWR_CR_FPDS | PWR_CR_LPUDS | PWR_CR_MRUDS), PDMode); +#elif defined(PWR_CR_MRLVDS) && defined(PWR_CR_LPLVDS) && defined(PWR_CR_FPDS) + MODIFY_REG(PWR->CR, (PWR_CR_PDDS | PWR_CR_LPDS | PWR_CR_FPDS | PWR_CR_LPLVDS | PWR_CR_MRLVDS), PDMode); +#else + MODIFY_REG(PWR->CR, (PWR_CR_PDDS| PWR_CR_LPDS), PDMode); +#endif /* PWR_CR_MRUDS && PWR_CR_LPUDS && PWR_CR_FPDS */ +} + +/** + * @brief Get Power Down mode when CPU enters deepsleep + * @rmtoll CR PDDS LL_PWR_GetPowerMode\n + * @rmtoll CR MRUDS LL_PWR_GetPowerMode\n + * @rmtoll CR LPUDS LL_PWR_GetPowerMode\n + * @rmtoll CR FPDS LL_PWR_GetPowerMode\n + * @rmtoll CR MRLVDS LL_PWR_GetPowerMode\n + * @rmtoll CR LPLVDS LL_PWR_GetPowerMode\n + * @rmtoll CR FPDS LL_PWR_GetPowerMode\n + * @rmtoll CR LPDS LL_PWR_GetPowerMode + * @retval Returned value can be one of the following values: + * @arg @ref LL_PWR_MODE_STOP_MAINREGU + * @arg @ref LL_PWR_MODE_STOP_LPREGU + * @arg @ref LL_PWR_MODE_STOP_MAINREGU_UNDERDRIVE (*) + * @arg @ref LL_PWR_MODE_STOP_LPREGU_UNDERDRIVE (*) + * @arg @ref LL_PWR_MODE_STOP_MAINREGU_DEEPSLEEP (*) + * @arg @ref LL_PWR_MODE_STOP_LPREGU_DEEPSLEEP (*) + * + * (*) not available on all devices + * @arg @ref LL_PWR_MODE_STANDBY + */ +__STATIC_INLINE uint32_t LL_PWR_GetPowerMode(void) +{ +#if defined(PWR_CR_MRUDS) && defined(PWR_CR_LPUDS) && defined(PWR_CR_FPDS) + return (uint32_t)(READ_BIT(PWR->CR, (PWR_CR_PDDS | PWR_CR_LPDS | PWR_CR_FPDS | PWR_CR_LPUDS | PWR_CR_MRUDS))); +#elif defined(PWR_CR_MRLVDS) && defined(PWR_CR_LPLVDS) && defined(PWR_CR_FPDS) + return (uint32_t)(READ_BIT(PWR->CR, (PWR_CR_PDDS | PWR_CR_LPDS | PWR_CR_FPDS | PWR_CR_LPLVDS | PWR_CR_MRLVDS))); +#else + return (uint32_t)(READ_BIT(PWR->CR, (PWR_CR_PDDS| PWR_CR_LPDS))); +#endif /* PWR_CR_MRUDS && PWR_CR_LPUDS && PWR_CR_FPDS */ +} + +/** + * @brief Configure the voltage threshold detected by the Power Voltage Detector + * @rmtoll CR PLS LL_PWR_SetPVDLevel + * @param PVDLevel This parameter can be one of the following values: + * @arg @ref LL_PWR_PVDLEVEL_0 + * @arg @ref LL_PWR_PVDLEVEL_1 + * @arg @ref LL_PWR_PVDLEVEL_2 + * @arg @ref LL_PWR_PVDLEVEL_3 + * @arg @ref LL_PWR_PVDLEVEL_4 + * @arg @ref LL_PWR_PVDLEVEL_5 + * @arg @ref LL_PWR_PVDLEVEL_6 + * @arg @ref LL_PWR_PVDLEVEL_7 + * @retval None + */ +__STATIC_INLINE void LL_PWR_SetPVDLevel(uint32_t PVDLevel) +{ + MODIFY_REG(PWR->CR, PWR_CR_PLS, PVDLevel); +} + +/** + * @brief Get the voltage threshold detection + * @rmtoll CR PLS LL_PWR_GetPVDLevel + * @retval Returned value can be one of the following values: + * @arg @ref LL_PWR_PVDLEVEL_0 + * @arg @ref LL_PWR_PVDLEVEL_1 + * @arg @ref LL_PWR_PVDLEVEL_2 + * @arg @ref LL_PWR_PVDLEVEL_3 + * @arg @ref LL_PWR_PVDLEVEL_4 + * @arg @ref LL_PWR_PVDLEVEL_5 + * @arg @ref LL_PWR_PVDLEVEL_6 + * @arg @ref LL_PWR_PVDLEVEL_7 + */ +__STATIC_INLINE uint32_t LL_PWR_GetPVDLevel(void) +{ + return (uint32_t)(READ_BIT(PWR->CR, PWR_CR_PLS)); +} + +/** + * @brief Enable Power Voltage Detector + * @rmtoll CR PVDE LL_PWR_EnablePVD + * @retval None + */ +__STATIC_INLINE void LL_PWR_EnablePVD(void) +{ + SET_BIT(PWR->CR, PWR_CR_PVDE); +} + +/** + * @brief Disable Power Voltage Detector + * @rmtoll CR PVDE LL_PWR_DisablePVD + * @retval None + */ +__STATIC_INLINE void LL_PWR_DisablePVD(void) +{ + CLEAR_BIT(PWR->CR, PWR_CR_PVDE); +} + +/** + * @brief Check if Power Voltage Detector is enabled + * @rmtoll CR PVDE LL_PWR_IsEnabledPVD + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_PWR_IsEnabledPVD(void) +{ + return (READ_BIT(PWR->CR, PWR_CR_PVDE) == (PWR_CR_PVDE)); +} + +/** + * @brief Enable the WakeUp PINx functionality + * @rmtoll CSR EWUP LL_PWR_EnableWakeUpPin\n + * @rmtoll CSR EWUP1 LL_PWR_EnableWakeUpPin\n + * @rmtoll CSR EWUP2 LL_PWR_EnableWakeUpPin\n + * @rmtoll CSR EWUP3 LL_PWR_EnableWakeUpPin + * @param WakeUpPin This parameter can be one of the following values: + * @arg @ref LL_PWR_WAKEUP_PIN1 + * @arg @ref LL_PWR_WAKEUP_PIN2 (*) + * @arg @ref LL_PWR_WAKEUP_PIN3 (*) + * + * (*) not available on all devices + * @retval None + */ +__STATIC_INLINE void LL_PWR_EnableWakeUpPin(uint32_t WakeUpPin) +{ + SET_BIT(PWR->CSR, WakeUpPin); +} + +/** + * @brief Disable the WakeUp PINx functionality + * @rmtoll CSR EWUP LL_PWR_DisableWakeUpPin\n + * @rmtoll CSR EWUP1 LL_PWR_DisableWakeUpPin\n + * @rmtoll CSR EWUP2 LL_PWR_DisableWakeUpPin\n + * @rmtoll CSR EWUP3 LL_PWR_DisableWakeUpPin + * @param WakeUpPin This parameter can be one of the following values: + * @arg @ref LL_PWR_WAKEUP_PIN1 + * @arg @ref LL_PWR_WAKEUP_PIN2 (*) + * @arg @ref LL_PWR_WAKEUP_PIN3 (*) + * + * (*) not available on all devices + * @retval None + */ +__STATIC_INLINE void LL_PWR_DisableWakeUpPin(uint32_t WakeUpPin) +{ + CLEAR_BIT(PWR->CSR, WakeUpPin); +} + +/** + * @brief Check if the WakeUp PINx functionality is enabled + * @rmtoll CSR EWUP LL_PWR_IsEnabledWakeUpPin\n + * @rmtoll CSR EWUP1 LL_PWR_IsEnabledWakeUpPin\n + * @rmtoll CSR EWUP2 LL_PWR_IsEnabledWakeUpPin\n + * @rmtoll CSR EWUP3 LL_PWR_IsEnabledWakeUpPin + * @param WakeUpPin This parameter can be one of the following values: + * @arg @ref LL_PWR_WAKEUP_PIN1 + * @arg @ref LL_PWR_WAKEUP_PIN2 (*) + * @arg @ref LL_PWR_WAKEUP_PIN3 (*) + * + * (*) not available on all devices + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_PWR_IsEnabledWakeUpPin(uint32_t WakeUpPin) +{ + return (READ_BIT(PWR->CSR, WakeUpPin) == (WakeUpPin)); +} + + +/** + * @} + */ + +/** @defgroup PWR_LL_EF_FLAG_Management FLAG_Management + * @{ + */ + +/** + * @brief Get Wake-up Flag + * @rmtoll CSR WUF LL_PWR_IsActiveFlag_WU + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_PWR_IsActiveFlag_WU(void) +{ + return (READ_BIT(PWR->CSR, PWR_CSR_WUF) == (PWR_CSR_WUF)); +} + +/** + * @brief Get Standby Flag + * @rmtoll CSR SBF LL_PWR_IsActiveFlag_SB + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_PWR_IsActiveFlag_SB(void) +{ + return (READ_BIT(PWR->CSR, PWR_CSR_SBF) == (PWR_CSR_SBF)); +} + +/** + * @brief Get Backup Regulator ready Flag + * @rmtoll CSR BRR LL_PWR_IsActiveFlag_BRR + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_PWR_IsActiveFlag_BRR(void) +{ + return (READ_BIT(PWR->CSR, PWR_CSR_BRR) == (PWR_CSR_BRR)); +} +/** + * @brief Indicate whether VDD voltage is below the selected PVD threshold + * @rmtoll CSR PVDO LL_PWR_IsActiveFlag_PVDO + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_PWR_IsActiveFlag_PVDO(void) +{ + return (READ_BIT(PWR->CSR, PWR_CSR_PVDO) == (PWR_CSR_PVDO)); +} + +/** + * @brief Indicate whether the Regulator is ready in the selected voltage range or if its output voltage is still changing to the required voltage level + * @rmtoll CSR VOS LL_PWR_IsActiveFlag_VOS + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_PWR_IsActiveFlag_VOS(void) +{ + return (READ_BIT(PWR->CSR, LL_PWR_CSR_VOS) == (LL_PWR_CSR_VOS)); +} +#if defined(PWR_CR_ODEN) +/** + * @brief Indicate whether the Over-Drive mode is ready or not + * @rmtoll CSR ODRDY LL_PWR_IsActiveFlag_OD + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_PWR_IsActiveFlag_OD(void) +{ + return (READ_BIT(PWR->CSR, PWR_CSR_ODRDY) == (PWR_CSR_ODRDY)); +} +#endif /* PWR_CR_ODEN */ + +#if defined(PWR_CR_ODSWEN) +/** + * @brief Indicate whether the Over-Drive mode switching is ready or not + * @rmtoll CSR ODSWRDY LL_PWR_IsActiveFlag_ODSW + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_PWR_IsActiveFlag_ODSW(void) +{ + return (READ_BIT(PWR->CSR, PWR_CSR_ODSWRDY) == (PWR_CSR_ODSWRDY)); +} +#endif /* PWR_CR_ODSWEN */ + +#if defined(PWR_CR_UDEN) +/** + * @brief Indicate whether the Under-Drive mode is ready or not + * @rmtoll CSR UDRDY LL_PWR_IsActiveFlag_UD + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_PWR_IsActiveFlag_UD(void) +{ + return (READ_BIT(PWR->CSR, PWR_CSR_UDRDY) == (PWR_CSR_UDRDY)); +} +#endif /* PWR_CR_UDEN */ +/** + * @brief Clear Standby Flag + * @rmtoll CR CSBF LL_PWR_ClearFlag_SB + * @retval None + */ +__STATIC_INLINE void LL_PWR_ClearFlag_SB(void) +{ + SET_BIT(PWR->CR, PWR_CR_CSBF); +} + +/** + * @brief Clear Wake-up Flags + * @rmtoll CR CWUF LL_PWR_ClearFlag_WU + * @retval None + */ +__STATIC_INLINE void LL_PWR_ClearFlag_WU(void) +{ + SET_BIT(PWR->CR, PWR_CR_CWUF); +} +#if defined(PWR_CSR_UDRDY) +/** + * @brief Clear Under-Drive ready Flag + * @rmtoll CSR UDRDY LL_PWR_ClearFlag_UD + * @retval None + */ +__STATIC_INLINE void LL_PWR_ClearFlag_UD(void) +{ + WRITE_REG(PWR->CSR, PWR_CSR_UDRDY); +} +#endif /* PWR_CSR_UDRDY */ + +/** + * @} + */ + +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup PWR_LL_EF_Init De-initialization function + * @{ + */ +ErrorStatus LL_PWR_DeInit(void); +/** + * @} + */ +#endif /* USE_FULL_LL_DRIVER */ + +/** + * @} + */ + +/** + * @} + */ + +#endif /* defined(PWR) */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F4xx_LL_PWR_H */ diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_rcc.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_rcc.h new file mode 100644 index 0000000..796f06d --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_rcc.h @@ -0,0 +1,7101 @@ +/** + ****************************************************************************** + * @file stm32f4xx_ll_rcc.h + * @author MCD Application Team + * @brief Header file of RCC LL module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F4xx_LL_RCC_H +#define __STM32F4xx_LL_RCC_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx.h" + +/** @addtogroup STM32F4xx_LL_Driver + * @{ + */ + +#if defined(RCC) + +/** @defgroup RCC_LL RCC + * @{ + */ + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/** @defgroup RCC_LL_Private_Variables RCC Private Variables + * @{ + */ + +#if defined(RCC_DCKCFGR_PLLSAIDIVR) +static const uint8_t aRCC_PLLSAIDIVRPrescTable[4] = {2, 4, 8, 16}; +#endif /* RCC_DCKCFGR_PLLSAIDIVR */ + +/** + * @} + */ +/* Private constants ---------------------------------------------------------*/ +/* Private macros ------------------------------------------------------------*/ +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup RCC_LL_Private_Macros RCC Private Macros + * @{ + */ +/** + * @} + */ +#endif /*USE_FULL_LL_DRIVER*/ +/* Exported types ------------------------------------------------------------*/ +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup RCC_LL_Exported_Types RCC Exported Types + * @{ + */ + +/** @defgroup LL_ES_CLOCK_FREQ Clocks Frequency Structure + * @{ + */ + +/** + * @brief RCC Clocks Frequency Structure + */ +typedef struct +{ + uint32_t SYSCLK_Frequency; /*!< SYSCLK clock frequency */ + uint32_t HCLK_Frequency; /*!< HCLK clock frequency */ + uint32_t PCLK1_Frequency; /*!< PCLK1 clock frequency */ + uint32_t PCLK2_Frequency; /*!< PCLK2 clock frequency */ +} LL_RCC_ClocksTypeDef; + +/** + * @} + */ + +/** + * @} + */ +#endif /* USE_FULL_LL_DRIVER */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup RCC_LL_Exported_Constants RCC Exported Constants + * @{ + */ + +/** @defgroup RCC_LL_EC_OSC_VALUES Oscillator Values adaptation + * @brief Defines used to adapt values of different oscillators + * @note These values could be modified in the user environment according to + * HW set-up. + * @{ + */ +#if !defined (HSE_VALUE) +#define HSE_VALUE 25000000U /*!< Value of the HSE oscillator in Hz */ +#endif /* HSE_VALUE */ + +#if !defined (HSI_VALUE) +#define HSI_VALUE 16000000U /*!< Value of the HSI oscillator in Hz */ +#endif /* HSI_VALUE */ + +#if !defined (LSE_VALUE) +#define LSE_VALUE 32768U /*!< Value of the LSE oscillator in Hz */ +#endif /* LSE_VALUE */ + +#if !defined (LSI_VALUE) +#define LSI_VALUE 32000U /*!< Value of the LSI oscillator in Hz */ +#endif /* LSI_VALUE */ + +#if !defined (EXTERNAL_CLOCK_VALUE) +#define EXTERNAL_CLOCK_VALUE 12288000U /*!< Value of the I2S_CKIN external oscillator in Hz */ +#endif /* EXTERNAL_CLOCK_VALUE */ +/** + * @} + */ + +/** @defgroup RCC_LL_EC_CLEAR_FLAG Clear Flags Defines + * @brief Flags defines which can be used with LL_RCC_WriteReg function + * @{ + */ +#define LL_RCC_CIR_LSIRDYC RCC_CIR_LSIRDYC /*!< LSI Ready Interrupt Clear */ +#define LL_RCC_CIR_LSERDYC RCC_CIR_LSERDYC /*!< LSE Ready Interrupt Clear */ +#define LL_RCC_CIR_HSIRDYC RCC_CIR_HSIRDYC /*!< HSI Ready Interrupt Clear */ +#define LL_RCC_CIR_HSERDYC RCC_CIR_HSERDYC /*!< HSE Ready Interrupt Clear */ +#define LL_RCC_CIR_PLLRDYC RCC_CIR_PLLRDYC /*!< PLL Ready Interrupt Clear */ +#if defined(RCC_PLLI2S_SUPPORT) +#define LL_RCC_CIR_PLLI2SRDYC RCC_CIR_PLLI2SRDYC /*!< PLLI2S Ready Interrupt Clear */ +#endif /* RCC_PLLI2S_SUPPORT */ +#if defined(RCC_PLLSAI_SUPPORT) +#define LL_RCC_CIR_PLLSAIRDYC RCC_CIR_PLLSAIRDYC /*!< PLLSAI Ready Interrupt Clear */ +#endif /* RCC_PLLSAI_SUPPORT */ +#define LL_RCC_CIR_CSSC RCC_CIR_CSSC /*!< Clock Security System Interrupt Clear */ +/** + * @} + */ + +/** @defgroup RCC_LL_EC_GET_FLAG Get Flags Defines + * @brief Flags defines which can be used with LL_RCC_ReadReg function + * @{ + */ +#define LL_RCC_CIR_LSIRDYF RCC_CIR_LSIRDYF /*!< LSI Ready Interrupt flag */ +#define LL_RCC_CIR_LSERDYF RCC_CIR_LSERDYF /*!< LSE Ready Interrupt flag */ +#define LL_RCC_CIR_HSIRDYF RCC_CIR_HSIRDYF /*!< HSI Ready Interrupt flag */ +#define LL_RCC_CIR_HSERDYF RCC_CIR_HSERDYF /*!< HSE Ready Interrupt flag */ +#define LL_RCC_CIR_PLLRDYF RCC_CIR_PLLRDYF /*!< PLL Ready Interrupt flag */ +#if defined(RCC_PLLI2S_SUPPORT) +#define LL_RCC_CIR_PLLI2SRDYF RCC_CIR_PLLI2SRDYF /*!< PLLI2S Ready Interrupt flag */ +#endif /* RCC_PLLI2S_SUPPORT */ +#if defined(RCC_PLLSAI_SUPPORT) +#define LL_RCC_CIR_PLLSAIRDYF RCC_CIR_PLLSAIRDYF /*!< PLLSAI Ready Interrupt flag */ +#endif /* RCC_PLLSAI_SUPPORT */ +#define LL_RCC_CIR_CSSF RCC_CIR_CSSF /*!< Clock Security System Interrupt flag */ +#define LL_RCC_CSR_LPWRRSTF RCC_CSR_LPWRRSTF /*!< Low-Power reset flag */ +#define LL_RCC_CSR_PINRSTF RCC_CSR_PINRSTF /*!< PIN reset flag */ +#define LL_RCC_CSR_PORRSTF RCC_CSR_PORRSTF /*!< POR/PDR reset flag */ +#define LL_RCC_CSR_SFTRSTF RCC_CSR_SFTRSTF /*!< Software Reset flag */ +#define LL_RCC_CSR_IWDGRSTF RCC_CSR_IWDGRSTF /*!< Independent Watchdog reset flag */ +#define LL_RCC_CSR_WWDGRSTF RCC_CSR_WWDGRSTF /*!< Window watchdog reset flag */ +#if defined(RCC_CSR_BORRSTF) +#define LL_RCC_CSR_BORRSTF RCC_CSR_BORRSTF /*!< BOR reset flag */ +#endif /* RCC_CSR_BORRSTF */ +/** + * @} + */ + +/** @defgroup RCC_LL_EC_IT IT Defines + * @brief IT defines which can be used with LL_RCC_ReadReg and LL_RCC_WriteReg functions + * @{ + */ +#define LL_RCC_CIR_LSIRDYIE RCC_CIR_LSIRDYIE /*!< LSI Ready Interrupt Enable */ +#define LL_RCC_CIR_LSERDYIE RCC_CIR_LSERDYIE /*!< LSE Ready Interrupt Enable */ +#define LL_RCC_CIR_HSIRDYIE RCC_CIR_HSIRDYIE /*!< HSI Ready Interrupt Enable */ +#define LL_RCC_CIR_HSERDYIE RCC_CIR_HSERDYIE /*!< HSE Ready Interrupt Enable */ +#define LL_RCC_CIR_PLLRDYIE RCC_CIR_PLLRDYIE /*!< PLL Ready Interrupt Enable */ +#if defined(RCC_PLLI2S_SUPPORT) +#define LL_RCC_CIR_PLLI2SRDYIE RCC_CIR_PLLI2SRDYIE /*!< PLLI2S Ready Interrupt Enable */ +#endif /* RCC_PLLI2S_SUPPORT */ +#if defined(RCC_PLLSAI_SUPPORT) +#define LL_RCC_CIR_PLLSAIRDYIE RCC_CIR_PLLSAIRDYIE /*!< PLLSAI Ready Interrupt Enable */ +#endif /* RCC_PLLSAI_SUPPORT */ +/** + * @} + */ + +/** @defgroup RCC_LL_EC_SYS_CLKSOURCE System clock switch + * @{ + */ +#define LL_RCC_SYS_CLKSOURCE_HSI RCC_CFGR_SW_HSI /*!< HSI selection as system clock */ +#define LL_RCC_SYS_CLKSOURCE_HSE RCC_CFGR_SW_HSE /*!< HSE selection as system clock */ +#define LL_RCC_SYS_CLKSOURCE_PLL RCC_CFGR_SW_PLL /*!< PLL selection as system clock */ +#if defined(RCC_CFGR_SW_PLLR) +#define LL_RCC_SYS_CLKSOURCE_PLLR RCC_CFGR_SW_PLLR /*!< PLLR selection as system clock */ +#endif /* RCC_CFGR_SW_PLLR */ +/** + * @} + */ + +/** @defgroup RCC_LL_EC_SYS_CLKSOURCE_STATUS System clock switch status + * @{ + */ +#define LL_RCC_SYS_CLKSOURCE_STATUS_HSI RCC_CFGR_SWS_HSI /*!< HSI used as system clock */ +#define LL_RCC_SYS_CLKSOURCE_STATUS_HSE RCC_CFGR_SWS_HSE /*!< HSE used as system clock */ +#define LL_RCC_SYS_CLKSOURCE_STATUS_PLL RCC_CFGR_SWS_PLL /*!< PLL used as system clock */ +#if defined(RCC_PLLR_SYSCLK_SUPPORT) +#define LL_RCC_SYS_CLKSOURCE_STATUS_PLLR RCC_CFGR_SWS_PLLR /*!< PLLR used as system clock */ +#endif /* RCC_PLLR_SYSCLK_SUPPORT */ +/** + * @} + */ + +/** @defgroup RCC_LL_EC_SYSCLK_DIV AHB prescaler + * @{ + */ +#define LL_RCC_SYSCLK_DIV_1 RCC_CFGR_HPRE_DIV1 /*!< SYSCLK not divided */ +#define LL_RCC_SYSCLK_DIV_2 RCC_CFGR_HPRE_DIV2 /*!< SYSCLK divided by 2 */ +#define LL_RCC_SYSCLK_DIV_4 RCC_CFGR_HPRE_DIV4 /*!< SYSCLK divided by 4 */ +#define LL_RCC_SYSCLK_DIV_8 RCC_CFGR_HPRE_DIV8 /*!< SYSCLK divided by 8 */ +#define LL_RCC_SYSCLK_DIV_16 RCC_CFGR_HPRE_DIV16 /*!< SYSCLK divided by 16 */ +#define LL_RCC_SYSCLK_DIV_64 RCC_CFGR_HPRE_DIV64 /*!< SYSCLK divided by 64 */ +#define LL_RCC_SYSCLK_DIV_128 RCC_CFGR_HPRE_DIV128 /*!< SYSCLK divided by 128 */ +#define LL_RCC_SYSCLK_DIV_256 RCC_CFGR_HPRE_DIV256 /*!< SYSCLK divided by 256 */ +#define LL_RCC_SYSCLK_DIV_512 RCC_CFGR_HPRE_DIV512 /*!< SYSCLK divided by 512 */ +/** + * @} + */ + +/** @defgroup RCC_LL_EC_APB1_DIV APB low-speed prescaler (APB1) + * @{ + */ +#define LL_RCC_APB1_DIV_1 RCC_CFGR_PPRE1_DIV1 /*!< HCLK not divided */ +#define LL_RCC_APB1_DIV_2 RCC_CFGR_PPRE1_DIV2 /*!< HCLK divided by 2 */ +#define LL_RCC_APB1_DIV_4 RCC_CFGR_PPRE1_DIV4 /*!< HCLK divided by 4 */ +#define LL_RCC_APB1_DIV_8 RCC_CFGR_PPRE1_DIV8 /*!< HCLK divided by 8 */ +#define LL_RCC_APB1_DIV_16 RCC_CFGR_PPRE1_DIV16 /*!< HCLK divided by 16 */ +/** + * @} + */ + +/** @defgroup RCC_LL_EC_APB2_DIV APB high-speed prescaler (APB2) + * @{ + */ +#define LL_RCC_APB2_DIV_1 RCC_CFGR_PPRE2_DIV1 /*!< HCLK not divided */ +#define LL_RCC_APB2_DIV_2 RCC_CFGR_PPRE2_DIV2 /*!< HCLK divided by 2 */ +#define LL_RCC_APB2_DIV_4 RCC_CFGR_PPRE2_DIV4 /*!< HCLK divided by 4 */ +#define LL_RCC_APB2_DIV_8 RCC_CFGR_PPRE2_DIV8 /*!< HCLK divided by 8 */ +#define LL_RCC_APB2_DIV_16 RCC_CFGR_PPRE2_DIV16 /*!< HCLK divided by 16 */ +/** + * @} + */ + +/** @defgroup RCC_LL_EC_MCOxSOURCE MCO source selection + * @{ + */ +#define LL_RCC_MCO1SOURCE_HSI (uint32_t)(RCC_CFGR_MCO1|0x00000000U) /*!< HSI selection as MCO1 source */ +#define LL_RCC_MCO1SOURCE_LSE (uint32_t)(RCC_CFGR_MCO1|(RCC_CFGR_MCO1_0 >> 16U)) /*!< LSE selection as MCO1 source */ +#define LL_RCC_MCO1SOURCE_HSE (uint32_t)(RCC_CFGR_MCO1|(RCC_CFGR_MCO1_1 >> 16U)) /*!< HSE selection as MCO1 source */ +#define LL_RCC_MCO1SOURCE_PLLCLK (uint32_t)(RCC_CFGR_MCO1|((RCC_CFGR_MCO1_1|RCC_CFGR_MCO1_0) >> 16U)) /*!< PLLCLK selection as MCO1 source */ +#if defined(RCC_CFGR_MCO2) +#define LL_RCC_MCO2SOURCE_SYSCLK (uint32_t)(RCC_CFGR_MCO2|0x00000000U) /*!< SYSCLK selection as MCO2 source */ +#define LL_RCC_MCO2SOURCE_PLLI2S (uint32_t)(RCC_CFGR_MCO2|(RCC_CFGR_MCO2_0 >> 16U)) /*!< PLLI2S selection as MCO2 source */ +#define LL_RCC_MCO2SOURCE_HSE (uint32_t)(RCC_CFGR_MCO2|(RCC_CFGR_MCO2_1 >> 16U)) /*!< HSE selection as MCO2 source */ +#define LL_RCC_MCO2SOURCE_PLLCLK (uint32_t)(RCC_CFGR_MCO2|((RCC_CFGR_MCO2_1|RCC_CFGR_MCO2_0) >> 16U)) /*!< PLLCLK selection as MCO2 source */ +#endif /* RCC_CFGR_MCO2 */ +/** + * @} + */ + +/** @defgroup RCC_LL_EC_MCOx_DIV MCO prescaler + * @{ + */ +#define LL_RCC_MCO1_DIV_1 (uint32_t)(RCC_CFGR_MCO1PRE|0x00000000U) /*!< MCO1 not divided */ +#define LL_RCC_MCO1_DIV_2 (uint32_t)(RCC_CFGR_MCO1PRE|(RCC_CFGR_MCO1PRE_2 >> 16U)) /*!< MCO1 divided by 2 */ +#define LL_RCC_MCO1_DIV_3 (uint32_t)(RCC_CFGR_MCO1PRE|((RCC_CFGR_MCO1PRE_2|RCC_CFGR_MCO1PRE_0) >> 16U)) /*!< MCO1 divided by 3 */ +#define LL_RCC_MCO1_DIV_4 (uint32_t)(RCC_CFGR_MCO1PRE|((RCC_CFGR_MCO1PRE_2|RCC_CFGR_MCO1PRE_1) >> 16U)) /*!< MCO1 divided by 4 */ +#define LL_RCC_MCO1_DIV_5 (uint32_t)(RCC_CFGR_MCO1PRE|(RCC_CFGR_MCO1PRE >> 16U)) /*!< MCO1 divided by 5 */ +#if defined(RCC_CFGR_MCO2PRE) +#define LL_RCC_MCO2_DIV_1 (uint32_t)(RCC_CFGR_MCO2PRE|0x00000000U) /*!< MCO2 not divided */ +#define LL_RCC_MCO2_DIV_2 (uint32_t)(RCC_CFGR_MCO2PRE|(RCC_CFGR_MCO2PRE_2 >> 16U)) /*!< MCO2 divided by 2 */ +#define LL_RCC_MCO2_DIV_3 (uint32_t)(RCC_CFGR_MCO2PRE|((RCC_CFGR_MCO2PRE_2|RCC_CFGR_MCO2PRE_0) >> 16U)) /*!< MCO2 divided by 3 */ +#define LL_RCC_MCO2_DIV_4 (uint32_t)(RCC_CFGR_MCO2PRE|((RCC_CFGR_MCO2PRE_2|RCC_CFGR_MCO2PRE_1) >> 16U)) /*!< MCO2 divided by 4 */ +#define LL_RCC_MCO2_DIV_5 (uint32_t)(RCC_CFGR_MCO2PRE|(RCC_CFGR_MCO2PRE >> 16U)) /*!< MCO2 divided by 5 */ +#endif /* RCC_CFGR_MCO2PRE */ +/** + * @} + */ + +/** @defgroup RCC_LL_EC_RTC_HSEDIV HSE prescaler for RTC clock + * @{ + */ +#define LL_RCC_RTC_NOCLOCK 0x00000000U /*!< HSE not divided */ +#define LL_RCC_RTC_HSE_DIV_2 RCC_CFGR_RTCPRE_1 /*!< HSE clock divided by 2 */ +#define LL_RCC_RTC_HSE_DIV_3 (RCC_CFGR_RTCPRE_1|RCC_CFGR_RTCPRE_0) /*!< HSE clock divided by 3 */ +#define LL_RCC_RTC_HSE_DIV_4 RCC_CFGR_RTCPRE_2 /*!< HSE clock divided by 4 */ +#define LL_RCC_RTC_HSE_DIV_5 (RCC_CFGR_RTCPRE_2|RCC_CFGR_RTCPRE_0) /*!< HSE clock divided by 5 */ +#define LL_RCC_RTC_HSE_DIV_6 (RCC_CFGR_RTCPRE_2|RCC_CFGR_RTCPRE_1) /*!< HSE clock divided by 6 */ +#define LL_RCC_RTC_HSE_DIV_7 (RCC_CFGR_RTCPRE_2|RCC_CFGR_RTCPRE_1|RCC_CFGR_RTCPRE_0) /*!< HSE clock divided by 7 */ +#define LL_RCC_RTC_HSE_DIV_8 RCC_CFGR_RTCPRE_3 /*!< HSE clock divided by 8 */ +#define LL_RCC_RTC_HSE_DIV_9 (RCC_CFGR_RTCPRE_3|RCC_CFGR_RTCPRE_0) /*!< HSE clock divided by 9 */ +#define LL_RCC_RTC_HSE_DIV_10 (RCC_CFGR_RTCPRE_3|RCC_CFGR_RTCPRE_1) /*!< HSE clock divided by 10 */ +#define LL_RCC_RTC_HSE_DIV_11 (RCC_CFGR_RTCPRE_3|RCC_CFGR_RTCPRE_1|RCC_CFGR_RTCPRE_0) /*!< HSE clock divided by 11 */ +#define LL_RCC_RTC_HSE_DIV_12 (RCC_CFGR_RTCPRE_3|RCC_CFGR_RTCPRE_2) /*!< HSE clock divided by 12 */ +#define LL_RCC_RTC_HSE_DIV_13 (RCC_CFGR_RTCPRE_3|RCC_CFGR_RTCPRE_2|RCC_CFGR_RTCPRE_0) /*!< HSE clock divided by 13 */ +#define LL_RCC_RTC_HSE_DIV_14 (RCC_CFGR_RTCPRE_3|RCC_CFGR_RTCPRE_2|RCC_CFGR_RTCPRE_1) /*!< HSE clock divided by 14 */ +#define LL_RCC_RTC_HSE_DIV_15 (RCC_CFGR_RTCPRE_3|RCC_CFGR_RTCPRE_2|RCC_CFGR_RTCPRE_1|RCC_CFGR_RTCPRE_0) /*!< HSE clock divided by 15 */ +#define LL_RCC_RTC_HSE_DIV_16 RCC_CFGR_RTCPRE_4 /*!< HSE clock divided by 16 */ +#define LL_RCC_RTC_HSE_DIV_17 (RCC_CFGR_RTCPRE_4|RCC_CFGR_RTCPRE_0) /*!< HSE clock divided by 17 */ +#define LL_RCC_RTC_HSE_DIV_18 (RCC_CFGR_RTCPRE_4|RCC_CFGR_RTCPRE_1) /*!< HSE clock divided by 18 */ +#define LL_RCC_RTC_HSE_DIV_19 (RCC_CFGR_RTCPRE_4|RCC_CFGR_RTCPRE_1|RCC_CFGR_RTCPRE_0) /*!< HSE clock divided by 19 */ +#define LL_RCC_RTC_HSE_DIV_20 (RCC_CFGR_RTCPRE_4|RCC_CFGR_RTCPRE_2) /*!< HSE clock divided by 20 */ +#define LL_RCC_RTC_HSE_DIV_21 (RCC_CFGR_RTCPRE_4|RCC_CFGR_RTCPRE_2|RCC_CFGR_RTCPRE_0) /*!< HSE clock divided by 21 */ +#define LL_RCC_RTC_HSE_DIV_22 (RCC_CFGR_RTCPRE_4|RCC_CFGR_RTCPRE_2|RCC_CFGR_RTCPRE_1) /*!< HSE clock divided by 22 */ +#define LL_RCC_RTC_HSE_DIV_23 (RCC_CFGR_RTCPRE_4|RCC_CFGR_RTCPRE_2|RCC_CFGR_RTCPRE_1|RCC_CFGR_RTCPRE_0) /*!< HSE clock divided by 23 */ +#define LL_RCC_RTC_HSE_DIV_24 (RCC_CFGR_RTCPRE_4|RCC_CFGR_RTCPRE_3) /*!< HSE clock divided by 24 */ +#define LL_RCC_RTC_HSE_DIV_25 (RCC_CFGR_RTCPRE_4|RCC_CFGR_RTCPRE_3|RCC_CFGR_RTCPRE_0) /*!< HSE clock divided by 25 */ +#define LL_RCC_RTC_HSE_DIV_26 (RCC_CFGR_RTCPRE_4|RCC_CFGR_RTCPRE_3|RCC_CFGR_RTCPRE_1) /*!< HSE clock divided by 26 */ +#define LL_RCC_RTC_HSE_DIV_27 (RCC_CFGR_RTCPRE_4|RCC_CFGR_RTCPRE_3|RCC_CFGR_RTCPRE_1|RCC_CFGR_RTCPRE_0) /*!< HSE clock divided by 27 */ +#define LL_RCC_RTC_HSE_DIV_28 (RCC_CFGR_RTCPRE_4|RCC_CFGR_RTCPRE_3|RCC_CFGR_RTCPRE_2) /*!< HSE clock divided by 28 */ +#define LL_RCC_RTC_HSE_DIV_29 (RCC_CFGR_RTCPRE_4|RCC_CFGR_RTCPRE_3|RCC_CFGR_RTCPRE_2|RCC_CFGR_RTCPRE_0) /*!< HSE clock divided by 29 */ +#define LL_RCC_RTC_HSE_DIV_30 (RCC_CFGR_RTCPRE_4|RCC_CFGR_RTCPRE_3|RCC_CFGR_RTCPRE_2|RCC_CFGR_RTCPRE_1) /*!< HSE clock divided by 30 */ +#define LL_RCC_RTC_HSE_DIV_31 (RCC_CFGR_RTCPRE_4|RCC_CFGR_RTCPRE_3|RCC_CFGR_RTCPRE_2|RCC_CFGR_RTCPRE_1|RCC_CFGR_RTCPRE_0) /*!< HSE clock divided by 31 */ +/** + * @} + */ + +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup RCC_LL_EC_PERIPH_FREQUENCY Peripheral clock frequency + * @{ + */ +#define LL_RCC_PERIPH_FREQUENCY_NO 0x00000000U /*!< No clock enabled for the peripheral */ +#define LL_RCC_PERIPH_FREQUENCY_NA 0xFFFFFFFFU /*!< Frequency cannot be provided as external clock */ +/** + * @} + */ +#endif /* USE_FULL_LL_DRIVER */ + +#if defined(FMPI2C1) +/** @defgroup RCC_LL_EC_FMPI2C1_CLKSOURCE Peripheral FMPI2C clock source selection + * @{ + */ +#define LL_RCC_FMPI2C1_CLKSOURCE_PCLK1 0x00000000U /*!< PCLK1 clock used as FMPI2C1 clock source */ +#define LL_RCC_FMPI2C1_CLKSOURCE_SYSCLK RCC_DCKCFGR2_FMPI2C1SEL_0 /*!< SYSCLK clock used as FMPI2C1 clock source */ +#define LL_RCC_FMPI2C1_CLKSOURCE_HSI RCC_DCKCFGR2_FMPI2C1SEL_1 /*!< HSI clock used as FMPI2C1 clock source */ +/** + * @} + */ +#endif /* FMPI2C1 */ + +#if defined(LPTIM1) +/** @defgroup RCC_LL_EC_LPTIM1_CLKSOURCE Peripheral LPTIM clock source selection + * @{ + */ +#define LL_RCC_LPTIM1_CLKSOURCE_PCLK1 0x00000000U /*!< PCLK1 clock used as LPTIM1 clock */ +#define LL_RCC_LPTIM1_CLKSOURCE_HSI RCC_DCKCFGR2_LPTIM1SEL_0 /*!< LSI oscillator clock used as LPTIM1 clock */ +#define LL_RCC_LPTIM1_CLKSOURCE_LSI RCC_DCKCFGR2_LPTIM1SEL_1 /*!< HSI oscillator clock used as LPTIM1 clock */ +#define LL_RCC_LPTIM1_CLKSOURCE_LSE (uint32_t)(RCC_DCKCFGR2_LPTIM1SEL_1 | RCC_DCKCFGR2_LPTIM1SEL_0) /*!< LSE oscillator clock used as LPTIM1 clock */ +/** + * @} + */ +#endif /* LPTIM1 */ + +#if defined(SAI1) +/** @defgroup RCC_LL_EC_SAIx_CLKSOURCE Peripheral SAI clock source selection + * @{ + */ +#if defined(RCC_DCKCFGR_SAI1SRC) +#define LL_RCC_SAI1_CLKSOURCE_PLLSAI (uint32_t)(RCC_DCKCFGR_SAI1SRC | 0x00000000U) /*!< PLLSAI clock used as SAI1 clock source */ +#define LL_RCC_SAI1_CLKSOURCE_PLLI2S (uint32_t)(RCC_DCKCFGR_SAI1SRC | (RCC_DCKCFGR_SAI1SRC_0 >> 16)) /*!< PLLI2S clock used as SAI1 clock source */ +#define LL_RCC_SAI1_CLKSOURCE_PLL (uint32_t)(RCC_DCKCFGR_SAI1SRC | (RCC_DCKCFGR_SAI1SRC_1 >> 16)) /*!< PLL clock used as SAI1 clock source */ +#define LL_RCC_SAI1_CLKSOURCE_PIN (uint32_t)(RCC_DCKCFGR_SAI1SRC | (RCC_DCKCFGR_SAI1SRC >> 16)) /*!< External pin clock used as SAI1 clock source */ +#endif /* RCC_DCKCFGR_SAI1SRC */ +#if defined(RCC_DCKCFGR_SAI2SRC) +#define LL_RCC_SAI2_CLKSOURCE_PLLSAI (uint32_t)(RCC_DCKCFGR_SAI2SRC | 0x00000000U) /*!< PLLSAI clock used as SAI2 clock source */ +#define LL_RCC_SAI2_CLKSOURCE_PLLI2S (uint32_t)(RCC_DCKCFGR_SAI2SRC | (RCC_DCKCFGR_SAI2SRC_0 >> 16)) /*!< PLLI2S clock used as SAI2 clock source */ +#define LL_RCC_SAI2_CLKSOURCE_PLL (uint32_t)(RCC_DCKCFGR_SAI2SRC | (RCC_DCKCFGR_SAI2SRC_1 >> 16)) /*!< PLL clock used as SAI2 clock source */ +#define LL_RCC_SAI2_CLKSOURCE_PLLSRC (uint32_t)(RCC_DCKCFGR_SAI2SRC | (RCC_DCKCFGR_SAI2SRC >> 16)) /*!< PLL Main clock used as SAI2 clock source */ +#endif /* RCC_DCKCFGR_SAI2SRC */ +#if defined(RCC_DCKCFGR_SAI1ASRC) +#if defined(RCC_SAI1A_PLLSOURCE_SUPPORT) +#define LL_RCC_SAI1_A_CLKSOURCE_PLLI2S (uint32_t)(RCC_DCKCFGR_SAI1ASRC | 0x00000000U) /*!< PLLI2S clock used as SAI1 block A clock source */ +#define LL_RCC_SAI1_A_CLKSOURCE_PIN (uint32_t)(RCC_DCKCFGR_SAI1ASRC | (RCC_DCKCFGR_SAI1ASRC_0 >> 16)) /*!< External pin used as SAI1 block A clock source */ +#define LL_RCC_SAI1_A_CLKSOURCE_PLL (uint32_t)(RCC_DCKCFGR_SAI1ASRC | (RCC_DCKCFGR_SAI1ASRC_1 >> 16)) /*!< PLL clock used as SAI1 block A clock source */ +#define LL_RCC_SAI1_A_CLKSOURCE_PLLSRC (uint32_t)(RCC_DCKCFGR_SAI1ASRC | (RCC_DCKCFGR_SAI1ASRC >> 16)) /*!< PLL Main clock used as SAI1 block A clock source */ +#else +#define LL_RCC_SAI1_A_CLKSOURCE_PLLSAI (uint32_t)(RCC_DCKCFGR_SAI1ASRC | 0x00000000U) /*!< PLLSAI clock used as SAI1 block A clock source */ +#define LL_RCC_SAI1_A_CLKSOURCE_PLLI2S (uint32_t)(RCC_DCKCFGR_SAI1ASRC | (RCC_DCKCFGR_SAI1ASRC_0 >> 16)) /*!< PLLI2S clock used as SAI1 block A clock source */ +#define LL_RCC_SAI1_A_CLKSOURCE_PIN (uint32_t)(RCC_DCKCFGR_SAI1ASRC | (RCC_DCKCFGR_SAI1ASRC_1 >> 16)) /*!< External pin clock used as SAI1 block A clock source */ +#endif /* RCC_SAI1A_PLLSOURCE_SUPPORT */ +#endif /* RCC_DCKCFGR_SAI1ASRC */ +#if defined(RCC_DCKCFGR_SAI1BSRC) +#if defined(RCC_SAI1B_PLLSOURCE_SUPPORT) +#define LL_RCC_SAI1_B_CLKSOURCE_PLLI2S (uint32_t)(RCC_DCKCFGR_SAI1BSRC | 0x00000000U) /*!< PLLI2S clock used as SAI1 block B clock source */ +#define LL_RCC_SAI1_B_CLKSOURCE_PIN (uint32_t)(RCC_DCKCFGR_SAI1BSRC | (RCC_DCKCFGR_SAI1BSRC_0 >> 16)) /*!< External pin used as SAI1 block B clock source */ +#define LL_RCC_SAI1_B_CLKSOURCE_PLL (uint32_t)(RCC_DCKCFGR_SAI1BSRC | (RCC_DCKCFGR_SAI1BSRC_1 >> 16)) /*!< PLL clock used as SAI1 block B clock source */ +#define LL_RCC_SAI1_B_CLKSOURCE_PLLSRC (uint32_t)(RCC_DCKCFGR_SAI1BSRC | (RCC_DCKCFGR_SAI1BSRC >> 16)) /*!< PLL Main clock used as SAI1 block B clock source */ +#else +#define LL_RCC_SAI1_B_CLKSOURCE_PLLSAI (uint32_t)(RCC_DCKCFGR_SAI1BSRC | 0x00000000U) /*!< PLLSAI clock used as SAI1 block B clock source */ +#define LL_RCC_SAI1_B_CLKSOURCE_PLLI2S (uint32_t)(RCC_DCKCFGR_SAI1BSRC | (RCC_DCKCFGR_SAI1BSRC_0 >> 16)) /*!< PLLI2S clock used as SAI1 block B clock source */ +#define LL_RCC_SAI1_B_CLKSOURCE_PIN (uint32_t)(RCC_DCKCFGR_SAI1BSRC | (RCC_DCKCFGR_SAI1BSRC_1 >> 16)) /*!< External pin clock used as SAI1 block B clock source */ +#endif /* RCC_SAI1B_PLLSOURCE_SUPPORT */ +#endif /* RCC_DCKCFGR_SAI1BSRC */ +/** + * @} + */ +#endif /* SAI1 */ + +#if defined(RCC_DCKCFGR_SDIOSEL) || defined(RCC_DCKCFGR2_SDIOSEL) +/** @defgroup RCC_LL_EC_SDIOx_CLKSOURCE Peripheral SDIO clock source selection + * @{ + */ +#define LL_RCC_SDIO_CLKSOURCE_PLL48CLK 0x00000000U /*!< PLL 48M domain clock used as SDIO clock */ +#if defined(RCC_DCKCFGR_SDIOSEL) +#define LL_RCC_SDIO_CLKSOURCE_SYSCLK RCC_DCKCFGR_SDIOSEL /*!< System clock clock used as SDIO clock */ +#else +#define LL_RCC_SDIO_CLKSOURCE_SYSCLK RCC_DCKCFGR2_SDIOSEL /*!< System clock clock used as SDIO clock */ +#endif /* RCC_DCKCFGR_SDIOSEL */ +/** + * @} + */ +#endif /* RCC_DCKCFGR_SDIOSEL || RCC_DCKCFGR2_SDIOSEL */ + +#if defined(DSI) +/** @defgroup RCC_LL_EC_DSI_CLKSOURCE Peripheral DSI clock source selection + * @{ + */ +#define LL_RCC_DSI_CLKSOURCE_PHY 0x00000000U /*!< DSI-PHY clock used as DSI byte lane clock source */ +#define LL_RCC_DSI_CLKSOURCE_PLL RCC_DCKCFGR_DSISEL /*!< PLL clock used as DSI byte lane clock source */ +/** + * @} + */ +#endif /* DSI */ + +#if defined(CEC) +/** @defgroup RCC_LL_EC_CEC_CLKSOURCE Peripheral CEC clock source selection + * @{ + */ +#define LL_RCC_CEC_CLKSOURCE_HSI_DIV488 0x00000000U /*!< HSI oscillator clock divided by 488 used as CEC clock */ +#define LL_RCC_CEC_CLKSOURCE_LSE RCC_DCKCFGR2_CECSEL /*!< LSE oscillator clock used as CEC clock */ +/** + * @} + */ +#endif /* CEC */ + +/** @defgroup RCC_LL_EC_I2S1_CLKSOURCE Peripheral I2S clock source selection + * @{ + */ +#if defined(RCC_CFGR_I2SSRC) +#define LL_RCC_I2S1_CLKSOURCE_PLLI2S 0x00000000U /*!< I2S oscillator clock used as I2S1 clock */ +#define LL_RCC_I2S1_CLKSOURCE_PIN RCC_CFGR_I2SSRC /*!< External pin clock used as I2S1 clock */ +#endif /* RCC_CFGR_I2SSRC */ +#if defined(RCC_DCKCFGR_I2SSRC) +#define LL_RCC_I2S1_CLKSOURCE_PLL (uint32_t)(RCC_DCKCFGR_I2SSRC | 0x00000000U) /*!< PLL clock used as I2S1 clock source */ +#define LL_RCC_I2S1_CLKSOURCE_PIN (uint32_t)(RCC_DCKCFGR_I2SSRC | (RCC_DCKCFGR_I2SSRC_0 >> 16)) /*!< External pin used as I2S1 clock source */ +#define LL_RCC_I2S1_CLKSOURCE_PLLSRC (uint32_t)(RCC_DCKCFGR_I2SSRC | (RCC_DCKCFGR_I2SSRC_1 >> 16)) /*!< PLL Main clock used as I2S1 clock source */ +#endif /* RCC_DCKCFGR_I2SSRC */ +#if defined(RCC_DCKCFGR_I2S1SRC) +#define LL_RCC_I2S1_CLKSOURCE_PLLI2S (uint32_t)(RCC_DCKCFGR_I2S1SRC | 0x00000000U) /*!< PLLI2S clock used as I2S1 clock source */ +#define LL_RCC_I2S1_CLKSOURCE_PIN (uint32_t)(RCC_DCKCFGR_I2S1SRC | (RCC_DCKCFGR_I2S1SRC_0 >> 16)) /*!< External pin used as I2S1 clock source */ +#define LL_RCC_I2S1_CLKSOURCE_PLL (uint32_t)(RCC_DCKCFGR_I2S1SRC | (RCC_DCKCFGR_I2S1SRC_1 >> 16)) /*!< PLL clock used as I2S1 clock source */ +#define LL_RCC_I2S1_CLKSOURCE_PLLSRC (uint32_t)(RCC_DCKCFGR_I2S1SRC | (RCC_DCKCFGR_I2S1SRC >> 16)) /*!< PLL Main clock used as I2S1 clock source */ +#endif /* RCC_DCKCFGR_I2S1SRC */ +#if defined(RCC_DCKCFGR_I2S2SRC) +#define LL_RCC_I2S2_CLKSOURCE_PLLI2S (uint32_t)(RCC_DCKCFGR_I2S2SRC | 0x00000000U) /*!< PLLI2S clock used as I2S2 clock source */ +#define LL_RCC_I2S2_CLKSOURCE_PIN (uint32_t)(RCC_DCKCFGR_I2S2SRC | (RCC_DCKCFGR_I2S2SRC_0 >> 16)) /*!< External pin used as I2S2 clock source */ +#define LL_RCC_I2S2_CLKSOURCE_PLL (uint32_t)(RCC_DCKCFGR_I2S2SRC | (RCC_DCKCFGR_I2S2SRC_1 >> 16)) /*!< PLL clock used as I2S2 clock source */ +#define LL_RCC_I2S2_CLKSOURCE_PLLSRC (uint32_t)(RCC_DCKCFGR_I2S2SRC | (RCC_DCKCFGR_I2S2SRC >> 16)) /*!< PLL Main clock used as I2S2 clock source */ +#endif /* RCC_DCKCFGR_I2S2SRC */ +/** + * @} + */ + +#if defined(RCC_DCKCFGR_CK48MSEL) || defined(RCC_DCKCFGR2_CK48MSEL) +/** @defgroup RCC_LL_EC_CK48M_CLKSOURCE Peripheral 48Mhz domain clock source selection + * @{ + */ +#if defined(RCC_DCKCFGR_CK48MSEL) +#define LL_RCC_CK48M_CLKSOURCE_PLL 0x00000000U /*!< PLL oscillator clock used as 48Mhz domain clock */ +#define LL_RCC_CK48M_CLKSOURCE_PLLSAI RCC_DCKCFGR_CK48MSEL /*!< PLLSAI oscillator clock used as 48Mhz domain clock */ +#endif /* RCC_DCKCFGR_CK48MSEL */ +#if defined(RCC_DCKCFGR2_CK48MSEL) +#define LL_RCC_CK48M_CLKSOURCE_PLL 0x00000000U /*!< PLL oscillator clock used as 48Mhz domain clock */ +#if defined(RCC_PLLSAI_SUPPORT) +#define LL_RCC_CK48M_CLKSOURCE_PLLSAI RCC_DCKCFGR2_CK48MSEL /*!< PLLSAI oscillator clock used as 48Mhz domain clock */ +#endif /* RCC_PLLSAI_SUPPORT */ +#if defined(RCC_PLLI2SCFGR_PLLI2SQ) && !defined(RCC_DCKCFGR_PLLI2SDIVQ) +#define LL_RCC_CK48M_CLKSOURCE_PLLI2S RCC_DCKCFGR2_CK48MSEL /*!< PLLI2S oscillator clock used as 48Mhz domain clock */ +#endif /* RCC_PLLI2SCFGR_PLLI2SQ && !RCC_DCKCFGR_PLLI2SDIVQ */ +#endif /* RCC_DCKCFGR2_CK48MSEL */ +/** + * @} + */ + +#if defined(RNG) +/** @defgroup RCC_LL_EC_RNG_CLKSOURCE Peripheral RNG clock source selection + * @{ + */ +#define LL_RCC_RNG_CLKSOURCE_PLL LL_RCC_CK48M_CLKSOURCE_PLL /*!< PLL clock used as RNG clock source */ +#if defined(RCC_PLLSAI_SUPPORT) +#define LL_RCC_RNG_CLKSOURCE_PLLSAI LL_RCC_CK48M_CLKSOURCE_PLLSAI /*!< PLLSAI clock used as RNG clock source */ +#endif /* RCC_PLLSAI_SUPPORT */ +#if defined(RCC_PLLI2SCFGR_PLLI2SQ) && !defined(RCC_DCKCFGR_PLLI2SDIVQ) +#define LL_RCC_RNG_CLKSOURCE_PLLI2S LL_RCC_CK48M_CLKSOURCE_PLLI2S /*!< PLLI2S clock used as RNG clock source */ +#endif /* RCC_PLLI2SCFGR_PLLI2SQ && !RCC_DCKCFGR_PLLI2SDIVQ */ +/** + * @} + */ +#endif /* RNG */ + +#if defined(USB_OTG_FS) || defined(USB_OTG_HS) +/** @defgroup RCC_LL_EC_USB_CLKSOURCE Peripheral USB clock source selection + * @{ + */ +#define LL_RCC_USB_CLKSOURCE_PLL LL_RCC_CK48M_CLKSOURCE_PLL /*!< PLL clock used as USB clock source */ +#if defined(RCC_PLLSAI_SUPPORT) +#define LL_RCC_USB_CLKSOURCE_PLLSAI LL_RCC_CK48M_CLKSOURCE_PLLSAI /*!< PLLSAI clock used as USB clock source */ +#endif /* RCC_PLLSAI_SUPPORT */ +#if defined(RCC_PLLI2SCFGR_PLLI2SQ) && !defined(RCC_DCKCFGR_PLLI2SDIVQ) +#define LL_RCC_USB_CLKSOURCE_PLLI2S LL_RCC_CK48M_CLKSOURCE_PLLI2S /*!< PLLI2S clock used as USB clock source */ +#endif /* RCC_PLLI2SCFGR_PLLI2SQ && !RCC_DCKCFGR_PLLI2SDIVQ */ +/** + * @} + */ +#endif /* USB_OTG_FS || USB_OTG_HS */ + +#endif /* RCC_DCKCFGR_CK48MSEL || RCC_DCKCFGR2_CK48MSEL */ + +#if defined(DFSDM1_Channel0) || defined(DFSDM2_Channel0) +/** @defgroup RCC_LL_EC_DFSDM1_AUDIO_CLKSOURCE Peripheral DFSDM Audio clock source selection + * @{ + */ +#define LL_RCC_DFSDM1_AUDIO_CLKSOURCE_I2S1 (uint32_t)(RCC_DCKCFGR_CKDFSDM1ASEL | 0x00000000U) /*!< I2S1 clock used as DFSDM1 Audio clock source */ +#define LL_RCC_DFSDM1_AUDIO_CLKSOURCE_I2S2 (uint32_t)(RCC_DCKCFGR_CKDFSDM1ASEL | (RCC_DCKCFGR_CKDFSDM1ASEL << 16)) /*!< I2S2 clock used as DFSDM1 Audio clock source */ +#if defined(DFSDM2_Channel0) +#define LL_RCC_DFSDM2_AUDIO_CLKSOURCE_I2S1 (uint32_t)(RCC_DCKCFGR_CKDFSDM2ASEL | 0x00000000U) /*!< I2S1 clock used as DFSDM2 Audio clock source */ +#define LL_RCC_DFSDM2_AUDIO_CLKSOURCE_I2S2 (uint32_t)(RCC_DCKCFGR_CKDFSDM2ASEL | (RCC_DCKCFGR_CKDFSDM2ASEL << 16)) /*!< I2S2 clock used as DFSDM2 Audio clock source */ +#endif /* DFSDM2_Channel0 */ +/** + * @} + */ + +/** @defgroup RCC_LL_EC_DFSDM1_CLKSOURCE Peripheral DFSDM clock source selection + * @{ + */ +#define LL_RCC_DFSDM1_CLKSOURCE_PCLK2 0x00000000U /*!< PCLK2 clock used as DFSDM1 clock */ +#define LL_RCC_DFSDM1_CLKSOURCE_SYSCLK RCC_DCKCFGR_CKDFSDM1SEL /*!< System clock used as DFSDM1 clock */ +#if defined(DFSDM2_Channel0) +#define LL_RCC_DFSDM2_CLKSOURCE_PCLK2 0x00000000U /*!< PCLK2 clock used as DFSDM2 clock */ +#define LL_RCC_DFSDM2_CLKSOURCE_SYSCLK RCC_DCKCFGR_CKDFSDM1SEL /*!< System clock used as DFSDM2 clock */ +#endif /* DFSDM2_Channel0 */ +/** + * @} + */ +#endif /* DFSDM1_Channel0 || DFSDM2_Channel0 */ + +#if defined(FMPI2C1) +/** @defgroup RCC_LL_EC_FMPI2C1 Peripheral FMPI2C get clock source + * @{ + */ +#define LL_RCC_FMPI2C1_CLKSOURCE RCC_DCKCFGR2_FMPI2C1SEL /*!< FMPI2C1 Clock source selection */ +/** + * @} + */ +#endif /* FMPI2C1 */ + +#if defined(SPDIFRX) +/** @defgroup RCC_LL_EC_SPDIFRX_CLKSOURCE Peripheral SPDIFRX clock source selection + * @{ + */ +#define LL_RCC_SPDIFRX1_CLKSOURCE_PLL 0x00000000U /*!< PLL clock used as SPDIFRX clock source */ +#define LL_RCC_SPDIFRX1_CLKSOURCE_PLLI2S RCC_DCKCFGR2_SPDIFRXSEL /*!< PLLI2S clock used as SPDIFRX clock source */ +/** + * @} + */ +#endif /* SPDIFRX */ + +#if defined(LPTIM1) +/** @defgroup RCC_LL_EC_LPTIM1 Peripheral LPTIM get clock source + * @{ + */ +#define LL_RCC_LPTIM1_CLKSOURCE RCC_DCKCFGR2_LPTIM1SEL /*!< LPTIM1 Clock source selection */ +/** + * @} + */ +#endif /* LPTIM1 */ + +#if defined(SAI1) +/** @defgroup RCC_LL_EC_SAIx Peripheral SAI get clock source + * @{ + */ +#if defined(RCC_DCKCFGR_SAI1ASRC) +#define LL_RCC_SAI1_A_CLKSOURCE RCC_DCKCFGR_SAI1ASRC /*!< SAI1 block A Clock source selection */ +#endif /* RCC_DCKCFGR_SAI1ASRC */ +#if defined(RCC_DCKCFGR_SAI1BSRC) +#define LL_RCC_SAI1_B_CLKSOURCE RCC_DCKCFGR_SAI1BSRC /*!< SAI1 block B Clock source selection */ +#endif /* RCC_DCKCFGR_SAI1BSRC */ +#if defined(RCC_DCKCFGR_SAI1SRC) +#define LL_RCC_SAI1_CLKSOURCE RCC_DCKCFGR_SAI1SRC /*!< SAI1 Clock source selection */ +#endif /* RCC_DCKCFGR_SAI1SRC */ +#if defined(RCC_DCKCFGR_SAI2SRC) +#define LL_RCC_SAI2_CLKSOURCE RCC_DCKCFGR_SAI2SRC /*!< SAI2 Clock source selection */ +#endif /* RCC_DCKCFGR_SAI2SRC */ +/** + * @} + */ +#endif /* SAI1 */ + +#if defined(SDIO) +/** @defgroup RCC_LL_EC_SDIOx Peripheral SDIO get clock source + * @{ + */ +#if defined(RCC_DCKCFGR_SDIOSEL) +#define LL_RCC_SDIO_CLKSOURCE RCC_DCKCFGR_SDIOSEL /*!< SDIO Clock source selection */ +#elif defined(RCC_DCKCFGR2_SDIOSEL) +#define LL_RCC_SDIO_CLKSOURCE RCC_DCKCFGR2_SDIOSEL /*!< SDIO Clock source selection */ +#else +#define LL_RCC_SDIO_CLKSOURCE RCC_PLLCFGR_PLLQ /*!< SDIO Clock source selection */ +#endif /* RCC_DCKCFGR_SDIOSEL */ +/** + * @} + */ +#endif /* SDIO */ + +#if defined(RCC_DCKCFGR_CK48MSEL) || defined(RCC_DCKCFGR2_CK48MSEL) +/** @defgroup RCC_LL_EC_CK48M Peripheral CK48M get clock source + * @{ + */ +#if defined(RCC_DCKCFGR_CK48MSEL) +#define LL_RCC_CK48M_CLKSOURCE RCC_DCKCFGR_CK48MSEL /*!< CK48M Domain clock source selection */ +#endif /* RCC_DCKCFGR_CK48MSEL */ +#if defined(RCC_DCKCFGR2_CK48MSEL) +#define LL_RCC_CK48M_CLKSOURCE RCC_DCKCFGR2_CK48MSEL /*!< CK48M Domain clock source selection */ +#endif /* RCC_DCKCFGR_CK48MSEL */ +/** + * @} + */ +#endif /* RCC_DCKCFGR_CK48MSEL || RCC_DCKCFGR2_CK48MSEL */ + +#if defined(RNG) +/** @defgroup RCC_LL_EC_RNG Peripheral RNG get clock source + * @{ + */ +#if defined(RCC_DCKCFGR_CK48MSEL) || defined(RCC_DCKCFGR2_CK48MSEL) +#define LL_RCC_RNG_CLKSOURCE LL_RCC_CK48M_CLKSOURCE /*!< RNG Clock source selection */ +#else +#define LL_RCC_RNG_CLKSOURCE RCC_PLLCFGR_PLLQ /*!< RNG Clock source selection */ +#endif /* RCC_DCKCFGR_CK48MSEL || RCC_DCKCFGR2_CK48MSEL */ +/** + * @} + */ +#endif /* RNG */ + +#if defined(USB_OTG_FS) || defined(USB_OTG_HS) +/** @defgroup RCC_LL_EC_USB Peripheral USB get clock source + * @{ + */ +#if defined(RCC_DCKCFGR_CK48MSEL) || defined(RCC_DCKCFGR2_CK48MSEL) +#define LL_RCC_USB_CLKSOURCE LL_RCC_CK48M_CLKSOURCE /*!< USB Clock source selection */ +#else +#define LL_RCC_USB_CLKSOURCE RCC_PLLCFGR_PLLQ /*!< USB Clock source selection */ +#endif /* RCC_DCKCFGR_CK48MSEL || RCC_DCKCFGR2_CK48MSEL */ +/** + * @} + */ +#endif /* USB_OTG_FS || USB_OTG_HS */ + +#if defined(CEC) +/** @defgroup RCC_LL_EC_CEC Peripheral CEC get clock source + * @{ + */ +#define LL_RCC_CEC_CLKSOURCE RCC_DCKCFGR2_CECSEL /*!< CEC Clock source selection */ +/** + * @} + */ +#endif /* CEC */ + +/** @defgroup RCC_LL_EC_I2S1 Peripheral I2S get clock source + * @{ + */ +#if defined(RCC_CFGR_I2SSRC) +#define LL_RCC_I2S1_CLKSOURCE RCC_CFGR_I2SSRC /*!< I2S1 Clock source selection */ +#endif /* RCC_CFGR_I2SSRC */ +#if defined(RCC_DCKCFGR_I2SSRC) +#define LL_RCC_I2S1_CLKSOURCE RCC_DCKCFGR_I2SSRC /*!< I2S1 Clock source selection */ +#endif /* RCC_DCKCFGR_I2SSRC */ +#if defined(RCC_DCKCFGR_I2S1SRC) +#define LL_RCC_I2S1_CLKSOURCE RCC_DCKCFGR_I2S1SRC /*!< I2S1 Clock source selection */ +#endif /* RCC_DCKCFGR_I2S1SRC */ +#if defined(RCC_DCKCFGR_I2S2SRC) +#define LL_RCC_I2S2_CLKSOURCE RCC_DCKCFGR_I2S2SRC /*!< I2S2 Clock source selection */ +#endif /* RCC_DCKCFGR_I2S2SRC */ +/** + * @} + */ + +#if defined(DFSDM1_Channel0) || defined(DFSDM2_Channel0) +/** @defgroup RCC_LL_EC_DFSDM_AUDIO Peripheral DFSDM Audio get clock source + * @{ + */ +#define LL_RCC_DFSDM1_AUDIO_CLKSOURCE RCC_DCKCFGR_CKDFSDM1ASEL /*!< DFSDM1 Audio Clock source selection */ +#if defined(DFSDM2_Channel0) +#define LL_RCC_DFSDM2_AUDIO_CLKSOURCE RCC_DCKCFGR_CKDFSDM2ASEL /*!< DFSDM2 Audio Clock source selection */ +#endif /* DFSDM2_Channel0 */ +/** + * @} + */ + +/** @defgroup RCC_LL_EC_DFSDM Peripheral DFSDM get clock source + * @{ + */ +#define LL_RCC_DFSDM1_CLKSOURCE RCC_DCKCFGR_CKDFSDM1SEL /*!< DFSDM1 Clock source selection */ +#if defined(DFSDM2_Channel0) +#define LL_RCC_DFSDM2_CLKSOURCE RCC_DCKCFGR_CKDFSDM1SEL /*!< DFSDM2 Clock source selection */ +#endif /* DFSDM2_Channel0 */ +/** + * @} + */ +#endif /* DFSDM1_Channel0 || DFSDM2_Channel0 */ + +#if defined(SPDIFRX) +/** @defgroup RCC_LL_EC_SPDIFRX Peripheral SPDIFRX get clock source + * @{ + */ +#define LL_RCC_SPDIFRX1_CLKSOURCE RCC_DCKCFGR2_SPDIFRXSEL /*!< SPDIFRX Clock source selection */ +/** + * @} + */ +#endif /* SPDIFRX */ + +#if defined(DSI) +/** @defgroup RCC_LL_EC_DSI Peripheral DSI get clock source + * @{ + */ +#define LL_RCC_DSI_CLKSOURCE RCC_DCKCFGR_DSISEL /*!< DSI Clock source selection */ +/** + * @} + */ +#endif /* DSI */ + +#if defined(LTDC) +/** @defgroup RCC_LL_EC_LTDC Peripheral LTDC get clock source + * @{ + */ +#define LL_RCC_LTDC_CLKSOURCE RCC_DCKCFGR_PLLSAIDIVR /*!< LTDC Clock source selection */ +/** + * @} + */ +#endif /* LTDC */ + + +/** @defgroup RCC_LL_EC_RTC_CLKSOURCE RTC clock source selection + * @{ + */ +#define LL_RCC_RTC_CLKSOURCE_NONE 0x00000000U /*!< No clock used as RTC clock */ +#define LL_RCC_RTC_CLKSOURCE_LSE RCC_BDCR_RTCSEL_0 /*!< LSE oscillator clock used as RTC clock */ +#define LL_RCC_RTC_CLKSOURCE_LSI RCC_BDCR_RTCSEL_1 /*!< LSI oscillator clock used as RTC clock */ +#define LL_RCC_RTC_CLKSOURCE_HSE RCC_BDCR_RTCSEL /*!< HSE oscillator clock divided by HSE prescaler used as RTC clock */ +/** + * @} + */ + +#if defined(RCC_DCKCFGR_TIMPRE) +/** @defgroup RCC_LL_EC_TIM_CLKPRESCALER Timers clocks prescalers selection + * @{ + */ +#define LL_RCC_TIM_PRESCALER_TWICE 0x00000000U /*!< Timers clock to twice PCLK */ +#define LL_RCC_TIM_PRESCALER_FOUR_TIMES RCC_DCKCFGR_TIMPRE /*!< Timers clock to four time PCLK */ +/** + * @} + */ +#endif /* RCC_DCKCFGR_TIMPRE */ + +/** @defgroup RCC_LL_EC_PLLSOURCE PLL, PLLI2S and PLLSAI entry clock source + * @{ + */ +#define LL_RCC_PLLSOURCE_HSI RCC_PLLCFGR_PLLSRC_HSI /*!< HSI16 clock selected as PLL entry clock source */ +#define LL_RCC_PLLSOURCE_HSE RCC_PLLCFGR_PLLSRC_HSE /*!< HSE clock selected as PLL entry clock source */ +#if defined(RCC_PLLI2SCFGR_PLLI2SSRC) +#define LL_RCC_PLLI2SSOURCE_PIN (RCC_PLLI2SCFGR_PLLI2SSRC | 0x80U) /*!< I2S External pin input clock selected as PLLI2S entry clock source */ +#endif /* RCC_PLLI2SCFGR_PLLI2SSRC */ +/** + * @} + */ + +/** @defgroup RCC_LL_EC_PLLM_DIV PLL, PLLI2S and PLLSAI division factor + * @{ + */ +#define LL_RCC_PLLM_DIV_2 (RCC_PLLCFGR_PLLM_1) /*!< PLL, PLLI2S and PLLSAI division factor by 2 */ +#define LL_RCC_PLLM_DIV_3 (RCC_PLLCFGR_PLLM_1 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 3 */ +#define LL_RCC_PLLM_DIV_4 (RCC_PLLCFGR_PLLM_2) /*!< PLL, PLLI2S and PLLSAI division factor by 4 */ +#define LL_RCC_PLLM_DIV_5 (RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 5 */ +#define LL_RCC_PLLM_DIV_6 (RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_1) /*!< PLL, PLLI2S and PLLSAI division factor by 6 */ +#define LL_RCC_PLLM_DIV_7 (RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_1 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 7 */ +#define LL_RCC_PLLM_DIV_8 (RCC_PLLCFGR_PLLM_3) /*!< PLL, PLLI2S and PLLSAI division factor by 8 */ +#define LL_RCC_PLLM_DIV_9 (RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 9 */ +#define LL_RCC_PLLM_DIV_10 (RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_1) /*!< PLL, PLLI2S and PLLSAI division factor by 10 */ +#define LL_RCC_PLLM_DIV_11 (RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_1 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 11 */ +#define LL_RCC_PLLM_DIV_12 (RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_2) /*!< PLL, PLLI2S and PLLSAI division factor by 12 */ +#define LL_RCC_PLLM_DIV_13 (RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 13 */ +#define LL_RCC_PLLM_DIV_14 (RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_1) /*!< PLL, PLLI2S and PLLSAI division factor by 14 */ +#define LL_RCC_PLLM_DIV_15 (RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_1 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 15 */ +#define LL_RCC_PLLM_DIV_16 (RCC_PLLCFGR_PLLM_4) /*!< PLL, PLLI2S and PLLSAI division factor by 16 */ +#define LL_RCC_PLLM_DIV_17 (RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 17 */ +#define LL_RCC_PLLM_DIV_18 (RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_1) /*!< PLL, PLLI2S and PLLSAI division factor by 18 */ +#define LL_RCC_PLLM_DIV_19 (RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_1 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 19 */ +#define LL_RCC_PLLM_DIV_20 (RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_2) /*!< PLL, PLLI2S and PLLSAI division factor by 20 */ +#define LL_RCC_PLLM_DIV_21 (RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 21 */ +#define LL_RCC_PLLM_DIV_22 (RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_1) /*!< PLL, PLLI2S and PLLSAI division factor by 22 */ +#define LL_RCC_PLLM_DIV_23 (RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_1 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 23 */ +#define LL_RCC_PLLM_DIV_24 (RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_3) /*!< PLL, PLLI2S and PLLSAI division factor by 24 */ +#define LL_RCC_PLLM_DIV_25 (RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 25 */ +#define LL_RCC_PLLM_DIV_26 (RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_1) /*!< PLL, PLLI2S and PLLSAI division factor by 26 */ +#define LL_RCC_PLLM_DIV_27 (RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_1 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 27 */ +#define LL_RCC_PLLM_DIV_28 (RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_2) /*!< PLL, PLLI2S and PLLSAI division factor by 28 */ +#define LL_RCC_PLLM_DIV_29 (RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 29 */ +#define LL_RCC_PLLM_DIV_30 (RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_1) /*!< PLL, PLLI2S and PLLSAI division factor by 30 */ +#define LL_RCC_PLLM_DIV_31 (RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_1 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 31 */ +#define LL_RCC_PLLM_DIV_32 (RCC_PLLCFGR_PLLM_5) /*!< PLL, PLLI2S and PLLSAI division factor by 32 */ +#define LL_RCC_PLLM_DIV_33 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 33 */ +#define LL_RCC_PLLM_DIV_34 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_1) /*!< PLL, PLLI2S and PLLSAI division factor by 34 */ +#define LL_RCC_PLLM_DIV_35 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_1 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 35 */ +#define LL_RCC_PLLM_DIV_36 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_2) /*!< PLL, PLLI2S and PLLSAI division factor by 36 */ +#define LL_RCC_PLLM_DIV_37 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 37 */ +#define LL_RCC_PLLM_DIV_38 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_1) /*!< PLL, PLLI2S and PLLSAI division factor by 38 */ +#define LL_RCC_PLLM_DIV_39 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_1 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 39 */ +#define LL_RCC_PLLM_DIV_40 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_3) /*!< PLL, PLLI2S and PLLSAI division factor by 40 */ +#define LL_RCC_PLLM_DIV_41 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 41 */ +#define LL_RCC_PLLM_DIV_42 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_1) /*!< PLL, PLLI2S and PLLSAI division factor by 42 */ +#define LL_RCC_PLLM_DIV_43 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_1 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 43 */ +#define LL_RCC_PLLM_DIV_44 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_2) /*!< PLL, PLLI2S and PLLSAI division factor by 44 */ +#define LL_RCC_PLLM_DIV_45 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 45 */ +#define LL_RCC_PLLM_DIV_46 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_1) /*!< PLL, PLLI2S and PLLSAI division factor by 46 */ +#define LL_RCC_PLLM_DIV_47 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_1 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 47 */ +#define LL_RCC_PLLM_DIV_48 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_4) /*!< PLL, PLLI2S and PLLSAI division factor by 48 */ +#define LL_RCC_PLLM_DIV_49 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 49 */ +#define LL_RCC_PLLM_DIV_50 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_1) /*!< PLL, PLLI2S and PLLSAI division factor by 50 */ +#define LL_RCC_PLLM_DIV_51 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_1 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 51 */ +#define LL_RCC_PLLM_DIV_52 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_2) /*!< PLL, PLLI2S and PLLSAI division factor by 52 */ +#define LL_RCC_PLLM_DIV_53 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 53 */ +#define LL_RCC_PLLM_DIV_54 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_1) /*!< PLL, PLLI2S and PLLSAI division factor by 54 */ +#define LL_RCC_PLLM_DIV_55 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_1 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 55 */ +#define LL_RCC_PLLM_DIV_56 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_3) /*!< PLL, PLLI2S and PLLSAI division factor by 56 */ +#define LL_RCC_PLLM_DIV_57 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 57 */ +#define LL_RCC_PLLM_DIV_58 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_1) /*!< PLL, PLLI2S and PLLSAI division factor by 58 */ +#define LL_RCC_PLLM_DIV_59 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_1 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 59 */ +#define LL_RCC_PLLM_DIV_60 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_2) /*!< PLL, PLLI2S and PLLSAI division factor by 60 */ +#define LL_RCC_PLLM_DIV_61 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 61 */ +#define LL_RCC_PLLM_DIV_62 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_1) /*!< PLL, PLLI2S and PLLSAI division factor by 62 */ +#define LL_RCC_PLLM_DIV_63 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_1 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 63 */ +/** + * @} + */ + +#if defined(RCC_PLLCFGR_PLLR) +/** @defgroup RCC_LL_EC_PLLR_DIV PLL division factor (PLLR) + * @{ + */ +#define LL_RCC_PLLR_DIV_2 (RCC_PLLCFGR_PLLR_1) /*!< Main PLL division factor for PLLCLK (system clock) by 2 */ +#define LL_RCC_PLLR_DIV_3 (RCC_PLLCFGR_PLLR_1|RCC_PLLCFGR_PLLR_0) /*!< Main PLL division factor for PLLCLK (system clock) by 3 */ +#define LL_RCC_PLLR_DIV_4 (RCC_PLLCFGR_PLLR_2) /*!< Main PLL division factor for PLLCLK (system clock) by 4 */ +#define LL_RCC_PLLR_DIV_5 (RCC_PLLCFGR_PLLR_2|RCC_PLLCFGR_PLLR_0) /*!< Main PLL division factor for PLLCLK (system clock) by 5 */ +#define LL_RCC_PLLR_DIV_6 (RCC_PLLCFGR_PLLR_2|RCC_PLLCFGR_PLLR_1) /*!< Main PLL division factor for PLLCLK (system clock) by 6 */ +#define LL_RCC_PLLR_DIV_7 (RCC_PLLCFGR_PLLR) /*!< Main PLL division factor for PLLCLK (system clock) by 7 */ +/** + * @} + */ +#endif /* RCC_PLLCFGR_PLLR */ + +#if defined(RCC_DCKCFGR_PLLDIVR) +/** @defgroup RCC_LL_EC_PLLDIVR PLLDIVR division factor (PLLDIVR) + * @{ + */ +#define LL_RCC_PLLDIVR_DIV_1 (RCC_DCKCFGR_PLLDIVR_0) /*!< PLL division factor for PLLDIVR output by 1 */ +#define LL_RCC_PLLDIVR_DIV_2 (RCC_DCKCFGR_PLLDIVR_1) /*!< PLL division factor for PLLDIVR output by 2 */ +#define LL_RCC_PLLDIVR_DIV_3 (RCC_DCKCFGR_PLLDIVR_1 | RCC_DCKCFGR_PLLDIVR_0) /*!< PLL division factor for PLLDIVR output by 3 */ +#define LL_RCC_PLLDIVR_DIV_4 (RCC_DCKCFGR_PLLDIVR_2) /*!< PLL division factor for PLLDIVR output by 4 */ +#define LL_RCC_PLLDIVR_DIV_5 (RCC_DCKCFGR_PLLDIVR_2 | RCC_DCKCFGR_PLLDIVR_0) /*!< PLL division factor for PLLDIVR output by 5 */ +#define LL_RCC_PLLDIVR_DIV_6 (RCC_DCKCFGR_PLLDIVR_2 | RCC_DCKCFGR_PLLDIVR_1) /*!< PLL division factor for PLLDIVR output by 6 */ +#define LL_RCC_PLLDIVR_DIV_7 (RCC_DCKCFGR_PLLDIVR_2 | RCC_DCKCFGR_PLLDIVR_1 | RCC_DCKCFGR_PLLDIVR_0) /*!< PLL division factor for PLLDIVR output by 7 */ +#define LL_RCC_PLLDIVR_DIV_8 (RCC_DCKCFGR_PLLDIVR_3) /*!< PLL division factor for PLLDIVR output by 8 */ +#define LL_RCC_PLLDIVR_DIV_9 (RCC_DCKCFGR_PLLDIVR_3 | RCC_DCKCFGR_PLLDIVR_0) /*!< PLL division factor for PLLDIVR output by 9 */ +#define LL_RCC_PLLDIVR_DIV_10 (RCC_DCKCFGR_PLLDIVR_3 | RCC_DCKCFGR_PLLDIVR_1) /*!< PLL division factor for PLLDIVR output by 10 */ +#define LL_RCC_PLLDIVR_DIV_11 (RCC_DCKCFGR_PLLDIVR_3 | RCC_DCKCFGR_PLLDIVR_1 | RCC_DCKCFGR_PLLDIVR_0) /*!< PLL division factor for PLLDIVR output by 11 */ +#define LL_RCC_PLLDIVR_DIV_12 (RCC_DCKCFGR_PLLDIVR_3 | RCC_DCKCFGR_PLLDIVR_2) /*!< PLL division factor for PLLDIVR output by 12 */ +#define LL_RCC_PLLDIVR_DIV_13 (RCC_DCKCFGR_PLLDIVR_3 | RCC_DCKCFGR_PLLDIVR_2 | RCC_DCKCFGR_PLLDIVR_0) /*!< PLL division factor for PLLDIVR output by 13 */ +#define LL_RCC_PLLDIVR_DIV_14 (RCC_DCKCFGR_PLLDIVR_3 | RCC_DCKCFGR_PLLDIVR_2 | RCC_DCKCFGR_PLLDIVR_1) /*!< PLL division factor for PLLDIVR output by 14 */ +#define LL_RCC_PLLDIVR_DIV_15 (RCC_DCKCFGR_PLLDIVR_3 | RCC_DCKCFGR_PLLDIVR_2 | RCC_DCKCFGR_PLLDIVR_1 | RCC_DCKCFGR_PLLDIVR_0) /*!< PLL division factor for PLLDIVR output by 15 */ +#define LL_RCC_PLLDIVR_DIV_16 (RCC_DCKCFGR_PLLDIVR_4) /*!< PLL division factor for PLLDIVR output by 16 */ +#define LL_RCC_PLLDIVR_DIV_17 (RCC_DCKCFGR_PLLDIVR_4 | RCC_DCKCFGR_PLLDIVR_0) /*!< PLL division factor for PLLDIVR output by 17 */ +#define LL_RCC_PLLDIVR_DIV_18 (RCC_DCKCFGR_PLLDIVR_4 | RCC_DCKCFGR_PLLDIVR_1) /*!< PLL division factor for PLLDIVR output by 18 */ +#define LL_RCC_PLLDIVR_DIV_19 (RCC_DCKCFGR_PLLDIVR_4 | RCC_DCKCFGR_PLLDIVR_1 | RCC_DCKCFGR_PLLDIVR_0) /*!< PLL division factor for PLLDIVR output by 19 */ +#define LL_RCC_PLLDIVR_DIV_20 (RCC_DCKCFGR_PLLDIVR_4 | RCC_DCKCFGR_PLLDIVR_2) /*!< PLL division factor for PLLDIVR output by 20 */ +#define LL_RCC_PLLDIVR_DIV_21 (RCC_DCKCFGR_PLLDIVR_4 | RCC_DCKCFGR_PLLDIVR_2 | RCC_DCKCFGR_PLLDIVR_0) /*!< PLL division factor for PLLDIVR output by 21 */ +#define LL_RCC_PLLDIVR_DIV_22 (RCC_DCKCFGR_PLLDIVR_4 | RCC_DCKCFGR_PLLDIVR_2 | RCC_DCKCFGR_PLLDIVR_1) /*!< PLL division factor for PLLDIVR output by 22 */ +#define LL_RCC_PLLDIVR_DIV_23 (RCC_DCKCFGR_PLLDIVR_4 | RCC_DCKCFGR_PLLDIVR_2 | RCC_DCKCFGR_PLLDIVR_1 | RCC_DCKCFGR_PLLDIVR_0) /*!< PLL division factor for PLLDIVR output by 23 */ +#define LL_RCC_PLLDIVR_DIV_24 (RCC_DCKCFGR_PLLDIVR_4 | RCC_DCKCFGR_PLLDIVR_3) /*!< PLL division factor for PLLDIVR output by 24 */ +#define LL_RCC_PLLDIVR_DIV_25 (RCC_DCKCFGR_PLLDIVR_4 | RCC_DCKCFGR_PLLDIVR_3 | RCC_DCKCFGR_PLLDIVR_0) /*!< PLL division factor for PLLDIVR output by 25 */ +#define LL_RCC_PLLDIVR_DIV_26 (RCC_DCKCFGR_PLLDIVR_4 | RCC_DCKCFGR_PLLDIVR_3 | RCC_DCKCFGR_PLLDIVR_1) /*!< PLL division factor for PLLDIVR output by 26 */ +#define LL_RCC_PLLDIVR_DIV_27 (RCC_DCKCFGR_PLLDIVR_4 | RCC_DCKCFGR_PLLDIVR_3 | RCC_DCKCFGR_PLLDIVR_1 | RCC_DCKCFGR_PLLDIVR_0) /*!< PLL division factor for PLLDIVR output by 27 */ +#define LL_RCC_PLLDIVR_DIV_28 (RCC_DCKCFGR_PLLDIVR_4 | RCC_DCKCFGR_PLLDIVR_3 | RCC_DCKCFGR_PLLDIVR_2) /*!< PLL division factor for PLLDIVR output by 28 */ +#define LL_RCC_PLLDIVR_DIV_29 (RCC_DCKCFGR_PLLDIVR_4 | RCC_DCKCFGR_PLLDIVR_3 | RCC_DCKCFGR_PLLDIVR_2 | RCC_DCKCFGR_PLLDIVR_0) /*!< PLL division factor for PLLDIVR output by 29 */ +#define LL_RCC_PLLDIVR_DIV_30 (RCC_DCKCFGR_PLLDIVR_4 | RCC_DCKCFGR_PLLDIVR_3 | RCC_DCKCFGR_PLLDIVR_2 | RCC_DCKCFGR_PLLDIVR_1) /*!< PLL division factor for PLLDIVR output by 30 */ +#define LL_RCC_PLLDIVR_DIV_31 (RCC_DCKCFGR_PLLDIVR_4 | RCC_DCKCFGR_PLLDIVR_3 | RCC_DCKCFGR_PLLDIVR_2 | RCC_DCKCFGR_PLLDIVR_1 | RCC_DCKCFGR_PLLDIVR_0) /*!< PLL division factor for PLLDIVR output by 31 */ +/** + * @} + */ +#endif /* RCC_DCKCFGR_PLLDIVR */ + +/** @defgroup RCC_LL_EC_PLLP_DIV PLL division factor (PLLP) + * @{ + */ +#define LL_RCC_PLLP_DIV_2 0x00000000U /*!< Main PLL division factor for PLLP output by 2 */ +#define LL_RCC_PLLP_DIV_4 RCC_PLLCFGR_PLLP_0 /*!< Main PLL division factor for PLLP output by 4 */ +#define LL_RCC_PLLP_DIV_6 RCC_PLLCFGR_PLLP_1 /*!< Main PLL division factor for PLLP output by 6 */ +#define LL_RCC_PLLP_DIV_8 (RCC_PLLCFGR_PLLP_1 | RCC_PLLCFGR_PLLP_0) /*!< Main PLL division factor for PLLP output by 8 */ +/** + * @} + */ + +/** @defgroup RCC_LL_EC_PLLQ_DIV PLL division factor (PLLQ) + * @{ + */ +#define LL_RCC_PLLQ_DIV_2 RCC_PLLCFGR_PLLQ_1 /*!< Main PLL division factor for PLLQ output by 2 */ +#define LL_RCC_PLLQ_DIV_3 (RCC_PLLCFGR_PLLQ_1|RCC_PLLCFGR_PLLQ_0) /*!< Main PLL division factor for PLLQ output by 3 */ +#define LL_RCC_PLLQ_DIV_4 RCC_PLLCFGR_PLLQ_2 /*!< Main PLL division factor for PLLQ output by 4 */ +#define LL_RCC_PLLQ_DIV_5 (RCC_PLLCFGR_PLLQ_2|RCC_PLLCFGR_PLLQ_0) /*!< Main PLL division factor for PLLQ output by 5 */ +#define LL_RCC_PLLQ_DIV_6 (RCC_PLLCFGR_PLLQ_2|RCC_PLLCFGR_PLLQ_1) /*!< Main PLL division factor for PLLQ output by 6 */ +#define LL_RCC_PLLQ_DIV_7 (RCC_PLLCFGR_PLLQ_2|RCC_PLLCFGR_PLLQ_1|RCC_PLLCFGR_PLLQ_0) /*!< Main PLL division factor for PLLQ output by 7 */ +#define LL_RCC_PLLQ_DIV_8 RCC_PLLCFGR_PLLQ_3 /*!< Main PLL division factor for PLLQ output by 8 */ +#define LL_RCC_PLLQ_DIV_9 (RCC_PLLCFGR_PLLQ_3|RCC_PLLCFGR_PLLQ_0) /*!< Main PLL division factor for PLLQ output by 9 */ +#define LL_RCC_PLLQ_DIV_10 (RCC_PLLCFGR_PLLQ_3|RCC_PLLCFGR_PLLQ_1) /*!< Main PLL division factor for PLLQ output by 10 */ +#define LL_RCC_PLLQ_DIV_11 (RCC_PLLCFGR_PLLQ_3|RCC_PLLCFGR_PLLQ_1|RCC_PLLCFGR_PLLQ_0) /*!< Main PLL division factor for PLLQ output by 11 */ +#define LL_RCC_PLLQ_DIV_12 (RCC_PLLCFGR_PLLQ_3|RCC_PLLCFGR_PLLQ_2) /*!< Main PLL division factor for PLLQ output by 12 */ +#define LL_RCC_PLLQ_DIV_13 (RCC_PLLCFGR_PLLQ_3|RCC_PLLCFGR_PLLQ_2|RCC_PLLCFGR_PLLQ_0) /*!< Main PLL division factor for PLLQ output by 13 */ +#define LL_RCC_PLLQ_DIV_14 (RCC_PLLCFGR_PLLQ_3|RCC_PLLCFGR_PLLQ_2|RCC_PLLCFGR_PLLQ_1) /*!< Main PLL division factor for PLLQ output by 14 */ +#define LL_RCC_PLLQ_DIV_15 (RCC_PLLCFGR_PLLQ_3|RCC_PLLCFGR_PLLQ_2|RCC_PLLCFGR_PLLQ_1|RCC_PLLCFGR_PLLQ_0) /*!< Main PLL division factor for PLLQ output by 15 */ +/** + * @} + */ + +/** @defgroup RCC_LL_EC_PLL_SPRE_SEL PLL Spread Spectrum Selection + * @{ + */ +#define LL_RCC_SPREAD_SELECT_CENTER 0x00000000U /*!< PLL center spread spectrum selection */ +#define LL_RCC_SPREAD_SELECT_DOWN RCC_SSCGR_SPREADSEL /*!< PLL down spread spectrum selection */ +/** + * @} + */ + +#if defined(RCC_PLLI2S_SUPPORT) +/** @defgroup RCC_LL_EC_PLLI2SM PLLI2SM division factor (PLLI2SM) + * @{ + */ +#if defined(RCC_PLLI2SCFGR_PLLI2SM) +#define LL_RCC_PLLI2SM_DIV_2 (RCC_PLLI2SCFGR_PLLI2SM_1) /*!< PLLI2S division factor for PLLI2SM output by 2 */ +#define LL_RCC_PLLI2SM_DIV_3 (RCC_PLLI2SCFGR_PLLI2SM_1 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 3 */ +#define LL_RCC_PLLI2SM_DIV_4 (RCC_PLLI2SCFGR_PLLI2SM_2) /*!< PLLI2S division factor for PLLI2SM output by 4 */ +#define LL_RCC_PLLI2SM_DIV_5 (RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 5 */ +#define LL_RCC_PLLI2SM_DIV_6 (RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_1) /*!< PLLI2S division factor for PLLI2SM output by 6 */ +#define LL_RCC_PLLI2SM_DIV_7 (RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_1 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 7 */ +#define LL_RCC_PLLI2SM_DIV_8 (RCC_PLLI2SCFGR_PLLI2SM_3) /*!< PLLI2S division factor for PLLI2SM output by 8 */ +#define LL_RCC_PLLI2SM_DIV_9 (RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 9 */ +#define LL_RCC_PLLI2SM_DIV_10 (RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_1) /*!< PLLI2S division factor for PLLI2SM output by 10 */ +#define LL_RCC_PLLI2SM_DIV_11 (RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_1 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 11 */ +#define LL_RCC_PLLI2SM_DIV_12 (RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_2) /*!< PLLI2S division factor for PLLI2SM output by 12 */ +#define LL_RCC_PLLI2SM_DIV_13 (RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 13 */ +#define LL_RCC_PLLI2SM_DIV_14 (RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_1) /*!< PLLI2S division factor for PLLI2SM output by 14 */ +#define LL_RCC_PLLI2SM_DIV_15 (RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_1 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 15 */ +#define LL_RCC_PLLI2SM_DIV_16 (RCC_PLLI2SCFGR_PLLI2SM_4) /*!< PLLI2S division factor for PLLI2SM output by 16 */ +#define LL_RCC_PLLI2SM_DIV_17 (RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 17 */ +#define LL_RCC_PLLI2SM_DIV_18 (RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_1) /*!< PLLI2S division factor for PLLI2SM output by 18 */ +#define LL_RCC_PLLI2SM_DIV_19 (RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_1 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 19 */ +#define LL_RCC_PLLI2SM_DIV_20 (RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_2) /*!< PLLI2S division factor for PLLI2SM output by 20 */ +#define LL_RCC_PLLI2SM_DIV_21 (RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 21 */ +#define LL_RCC_PLLI2SM_DIV_22 (RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_1) /*!< PLLI2S division factor for PLLI2SM output by 22 */ +#define LL_RCC_PLLI2SM_DIV_23 (RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_1 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 23 */ +#define LL_RCC_PLLI2SM_DIV_24 (RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_3) /*!< PLLI2S division factor for PLLI2SM output by 24 */ +#define LL_RCC_PLLI2SM_DIV_25 (RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 25 */ +#define LL_RCC_PLLI2SM_DIV_26 (RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_1) /*!< PLLI2S division factor for PLLI2SM output by 26 */ +#define LL_RCC_PLLI2SM_DIV_27 (RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_1 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 27 */ +#define LL_RCC_PLLI2SM_DIV_28 (RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_2) /*!< PLLI2S division factor for PLLI2SM output by 28 */ +#define LL_RCC_PLLI2SM_DIV_29 (RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 29 */ +#define LL_RCC_PLLI2SM_DIV_30 (RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_1) /*!< PLLI2S division factor for PLLI2SM output by 30 */ +#define LL_RCC_PLLI2SM_DIV_31 (RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_1 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 31 */ +#define LL_RCC_PLLI2SM_DIV_32 (RCC_PLLI2SCFGR_PLLI2SM_5) /*!< PLLI2S division factor for PLLI2SM output by 32 */ +#define LL_RCC_PLLI2SM_DIV_33 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 33 */ +#define LL_RCC_PLLI2SM_DIV_34 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_1) /*!< PLLI2S division factor for PLLI2SM output by 34 */ +#define LL_RCC_PLLI2SM_DIV_35 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_1 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 35 */ +#define LL_RCC_PLLI2SM_DIV_36 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_2) /*!< PLLI2S division factor for PLLI2SM output by 36 */ +#define LL_RCC_PLLI2SM_DIV_37 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 37 */ +#define LL_RCC_PLLI2SM_DIV_38 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_1) /*!< PLLI2S division factor for PLLI2SM output by 38 */ +#define LL_RCC_PLLI2SM_DIV_39 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_1 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 39 */ +#define LL_RCC_PLLI2SM_DIV_40 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_3) /*!< PLLI2S division factor for PLLI2SM output by 40 */ +#define LL_RCC_PLLI2SM_DIV_41 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 41 */ +#define LL_RCC_PLLI2SM_DIV_42 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_1) /*!< PLLI2S division factor for PLLI2SM output by 42 */ +#define LL_RCC_PLLI2SM_DIV_43 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_1 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 43 */ +#define LL_RCC_PLLI2SM_DIV_44 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_2) /*!< PLLI2S division factor for PLLI2SM output by 44 */ +#define LL_RCC_PLLI2SM_DIV_45 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 45 */ +#define LL_RCC_PLLI2SM_DIV_46 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_1) /*!< PLLI2S division factor for PLLI2SM output by 46 */ +#define LL_RCC_PLLI2SM_DIV_47 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_1 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 47 */ +#define LL_RCC_PLLI2SM_DIV_48 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_4) /*!< PLLI2S division factor for PLLI2SM output by 48 */ +#define LL_RCC_PLLI2SM_DIV_49 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 49 */ +#define LL_RCC_PLLI2SM_DIV_50 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_1) /*!< PLLI2S division factor for PLLI2SM output by 50 */ +#define LL_RCC_PLLI2SM_DIV_51 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_1 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 51 */ +#define LL_RCC_PLLI2SM_DIV_52 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_2) /*!< PLLI2S division factor for PLLI2SM output by 52 */ +#define LL_RCC_PLLI2SM_DIV_53 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 53 */ +#define LL_RCC_PLLI2SM_DIV_54 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_1) /*!< PLLI2S division factor for PLLI2SM output by 54 */ +#define LL_RCC_PLLI2SM_DIV_55 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_1 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 55 */ +#define LL_RCC_PLLI2SM_DIV_56 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_3) /*!< PLLI2S division factor for PLLI2SM output by 56 */ +#define LL_RCC_PLLI2SM_DIV_57 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 57 */ +#define LL_RCC_PLLI2SM_DIV_58 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_1) /*!< PLLI2S division factor for PLLI2SM output by 58 */ +#define LL_RCC_PLLI2SM_DIV_59 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_1 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 59 */ +#define LL_RCC_PLLI2SM_DIV_60 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_2) /*!< PLLI2S division factor for PLLI2SM output by 60 */ +#define LL_RCC_PLLI2SM_DIV_61 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 61 */ +#define LL_RCC_PLLI2SM_DIV_62 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_1) /*!< PLLI2S division factor for PLLI2SM output by 62 */ +#define LL_RCC_PLLI2SM_DIV_63 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_1 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 63 */ +#else +#define LL_RCC_PLLI2SM_DIV_2 LL_RCC_PLLM_DIV_2 /*!< PLLI2S division factor for PLLI2SM output by 2 */ +#define LL_RCC_PLLI2SM_DIV_3 LL_RCC_PLLM_DIV_3 /*!< PLLI2S division factor for PLLI2SM output by 3 */ +#define LL_RCC_PLLI2SM_DIV_4 LL_RCC_PLLM_DIV_4 /*!< PLLI2S division factor for PLLI2SM output by 4 */ +#define LL_RCC_PLLI2SM_DIV_5 LL_RCC_PLLM_DIV_5 /*!< PLLI2S division factor for PLLI2SM output by 5 */ +#define LL_RCC_PLLI2SM_DIV_6 LL_RCC_PLLM_DIV_6 /*!< PLLI2S division factor for PLLI2SM output by 6 */ +#define LL_RCC_PLLI2SM_DIV_7 LL_RCC_PLLM_DIV_7 /*!< PLLI2S division factor for PLLI2SM output by 7 */ +#define LL_RCC_PLLI2SM_DIV_8 LL_RCC_PLLM_DIV_8 /*!< PLLI2S division factor for PLLI2SM output by 8 */ +#define LL_RCC_PLLI2SM_DIV_9 LL_RCC_PLLM_DIV_9 /*!< PLLI2S division factor for PLLI2SM output by 9 */ +#define LL_RCC_PLLI2SM_DIV_10 LL_RCC_PLLM_DIV_10 /*!< PLLI2S division factor for PLLI2SM output by 10 */ +#define LL_RCC_PLLI2SM_DIV_11 LL_RCC_PLLM_DIV_11 /*!< PLLI2S division factor for PLLI2SM output by 11 */ +#define LL_RCC_PLLI2SM_DIV_12 LL_RCC_PLLM_DIV_12 /*!< PLLI2S division factor for PLLI2SM output by 12 */ +#define LL_RCC_PLLI2SM_DIV_13 LL_RCC_PLLM_DIV_13 /*!< PLLI2S division factor for PLLI2SM output by 13 */ +#define LL_RCC_PLLI2SM_DIV_14 LL_RCC_PLLM_DIV_14 /*!< PLLI2S division factor for PLLI2SM output by 14 */ +#define LL_RCC_PLLI2SM_DIV_15 LL_RCC_PLLM_DIV_15 /*!< PLLI2S division factor for PLLI2SM output by 15 */ +#define LL_RCC_PLLI2SM_DIV_16 LL_RCC_PLLM_DIV_16 /*!< PLLI2S division factor for PLLI2SM output by 16 */ +#define LL_RCC_PLLI2SM_DIV_17 LL_RCC_PLLM_DIV_17 /*!< PLLI2S division factor for PLLI2SM output by 17 */ +#define LL_RCC_PLLI2SM_DIV_18 LL_RCC_PLLM_DIV_18 /*!< PLLI2S division factor for PLLI2SM output by 18 */ +#define LL_RCC_PLLI2SM_DIV_19 LL_RCC_PLLM_DIV_19 /*!< PLLI2S division factor for PLLI2SM output by 19 */ +#define LL_RCC_PLLI2SM_DIV_20 LL_RCC_PLLM_DIV_20 /*!< PLLI2S division factor for PLLI2SM output by 20 */ +#define LL_RCC_PLLI2SM_DIV_21 LL_RCC_PLLM_DIV_21 /*!< PLLI2S division factor for PLLI2SM output by 21 */ +#define LL_RCC_PLLI2SM_DIV_22 LL_RCC_PLLM_DIV_22 /*!< PLLI2S division factor for PLLI2SM output by 22 */ +#define LL_RCC_PLLI2SM_DIV_23 LL_RCC_PLLM_DIV_23 /*!< PLLI2S division factor for PLLI2SM output by 23 */ +#define LL_RCC_PLLI2SM_DIV_24 LL_RCC_PLLM_DIV_24 /*!< PLLI2S division factor for PLLI2SM output by 24 */ +#define LL_RCC_PLLI2SM_DIV_25 LL_RCC_PLLM_DIV_25 /*!< PLLI2S division factor for PLLI2SM output by 25 */ +#define LL_RCC_PLLI2SM_DIV_26 LL_RCC_PLLM_DIV_26 /*!< PLLI2S division factor for PLLI2SM output by 26 */ +#define LL_RCC_PLLI2SM_DIV_27 LL_RCC_PLLM_DIV_27 /*!< PLLI2S division factor for PLLI2SM output by 27 */ +#define LL_RCC_PLLI2SM_DIV_28 LL_RCC_PLLM_DIV_28 /*!< PLLI2S division factor for PLLI2SM output by 28 */ +#define LL_RCC_PLLI2SM_DIV_29 LL_RCC_PLLM_DIV_29 /*!< PLLI2S division factor for PLLI2SM output by 29 */ +#define LL_RCC_PLLI2SM_DIV_30 LL_RCC_PLLM_DIV_30 /*!< PLLI2S division factor for PLLI2SM output by 30 */ +#define LL_RCC_PLLI2SM_DIV_31 LL_RCC_PLLM_DIV_31 /*!< PLLI2S division factor for PLLI2SM output by 31 */ +#define LL_RCC_PLLI2SM_DIV_32 LL_RCC_PLLM_DIV_32 /*!< PLLI2S division factor for PLLI2SM output by 32 */ +#define LL_RCC_PLLI2SM_DIV_33 LL_RCC_PLLM_DIV_33 /*!< PLLI2S division factor for PLLI2SM output by 33 */ +#define LL_RCC_PLLI2SM_DIV_34 LL_RCC_PLLM_DIV_34 /*!< PLLI2S division factor for PLLI2SM output by 34 */ +#define LL_RCC_PLLI2SM_DIV_35 LL_RCC_PLLM_DIV_35 /*!< PLLI2S division factor for PLLI2SM output by 35 */ +#define LL_RCC_PLLI2SM_DIV_36 LL_RCC_PLLM_DIV_36 /*!< PLLI2S division factor for PLLI2SM output by 36 */ +#define LL_RCC_PLLI2SM_DIV_37 LL_RCC_PLLM_DIV_37 /*!< PLLI2S division factor for PLLI2SM output by 37 */ +#define LL_RCC_PLLI2SM_DIV_38 LL_RCC_PLLM_DIV_38 /*!< PLLI2S division factor for PLLI2SM output by 38 */ +#define LL_RCC_PLLI2SM_DIV_39 LL_RCC_PLLM_DIV_39 /*!< PLLI2S division factor for PLLI2SM output by 39 */ +#define LL_RCC_PLLI2SM_DIV_40 LL_RCC_PLLM_DIV_40 /*!< PLLI2S division factor for PLLI2SM output by 40 */ +#define LL_RCC_PLLI2SM_DIV_41 LL_RCC_PLLM_DIV_41 /*!< PLLI2S division factor for PLLI2SM output by 41 */ +#define LL_RCC_PLLI2SM_DIV_42 LL_RCC_PLLM_DIV_42 /*!< PLLI2S division factor for PLLI2SM output by 42 */ +#define LL_RCC_PLLI2SM_DIV_43 LL_RCC_PLLM_DIV_43 /*!< PLLI2S division factor for PLLI2SM output by 43 */ +#define LL_RCC_PLLI2SM_DIV_44 LL_RCC_PLLM_DIV_44 /*!< PLLI2S division factor for PLLI2SM output by 44 */ +#define LL_RCC_PLLI2SM_DIV_45 LL_RCC_PLLM_DIV_45 /*!< PLLI2S division factor for PLLI2SM output by 45 */ +#define LL_RCC_PLLI2SM_DIV_46 LL_RCC_PLLM_DIV_46 /*!< PLLI2S division factor for PLLI2SM output by 46 */ +#define LL_RCC_PLLI2SM_DIV_47 LL_RCC_PLLM_DIV_47 /*!< PLLI2S division factor for PLLI2SM output by 47 */ +#define LL_RCC_PLLI2SM_DIV_48 LL_RCC_PLLM_DIV_48 /*!< PLLI2S division factor for PLLI2SM output by 48 */ +#define LL_RCC_PLLI2SM_DIV_49 LL_RCC_PLLM_DIV_49 /*!< PLLI2S division factor for PLLI2SM output by 49 */ +#define LL_RCC_PLLI2SM_DIV_50 LL_RCC_PLLM_DIV_50 /*!< PLLI2S division factor for PLLI2SM output by 50 */ +#define LL_RCC_PLLI2SM_DIV_51 LL_RCC_PLLM_DIV_51 /*!< PLLI2S division factor for PLLI2SM output by 51 */ +#define LL_RCC_PLLI2SM_DIV_52 LL_RCC_PLLM_DIV_52 /*!< PLLI2S division factor for PLLI2SM output by 52 */ +#define LL_RCC_PLLI2SM_DIV_53 LL_RCC_PLLM_DIV_53 /*!< PLLI2S division factor for PLLI2SM output by 53 */ +#define LL_RCC_PLLI2SM_DIV_54 LL_RCC_PLLM_DIV_54 /*!< PLLI2S division factor for PLLI2SM output by 54 */ +#define LL_RCC_PLLI2SM_DIV_55 LL_RCC_PLLM_DIV_55 /*!< PLLI2S division factor for PLLI2SM output by 55 */ +#define LL_RCC_PLLI2SM_DIV_56 LL_RCC_PLLM_DIV_56 /*!< PLLI2S division factor for PLLI2SM output by 56 */ +#define LL_RCC_PLLI2SM_DIV_57 LL_RCC_PLLM_DIV_57 /*!< PLLI2S division factor for PLLI2SM output by 57 */ +#define LL_RCC_PLLI2SM_DIV_58 LL_RCC_PLLM_DIV_58 /*!< PLLI2S division factor for PLLI2SM output by 58 */ +#define LL_RCC_PLLI2SM_DIV_59 LL_RCC_PLLM_DIV_59 /*!< PLLI2S division factor for PLLI2SM output by 59 */ +#define LL_RCC_PLLI2SM_DIV_60 LL_RCC_PLLM_DIV_60 /*!< PLLI2S division factor for PLLI2SM output by 60 */ +#define LL_RCC_PLLI2SM_DIV_61 LL_RCC_PLLM_DIV_61 /*!< PLLI2S division factor for PLLI2SM output by 61 */ +#define LL_RCC_PLLI2SM_DIV_62 LL_RCC_PLLM_DIV_62 /*!< PLLI2S division factor for PLLI2SM output by 62 */ +#define LL_RCC_PLLI2SM_DIV_63 LL_RCC_PLLM_DIV_63 /*!< PLLI2S division factor for PLLI2SM output by 63 */ +#endif /* RCC_PLLI2SCFGR_PLLI2SM */ +/** + * @} + */ + +#if defined(RCC_PLLI2SCFGR_PLLI2SQ) +/** @defgroup RCC_LL_EC_PLLI2SQ PLLI2SQ division factor (PLLI2SQ) + * @{ + */ +#define LL_RCC_PLLI2SQ_DIV_2 RCC_PLLI2SCFGR_PLLI2SQ_1 /*!< PLLI2S division factor for PLLI2SQ output by 2 */ +#define LL_RCC_PLLI2SQ_DIV_3 (RCC_PLLI2SCFGR_PLLI2SQ_1 | RCC_PLLI2SCFGR_PLLI2SQ_0) /*!< PLLI2S division factor for PLLI2SQ output by 3 */ +#define LL_RCC_PLLI2SQ_DIV_4 RCC_PLLI2SCFGR_PLLI2SQ_2 /*!< PLLI2S division factor for PLLI2SQ output by 4 */ +#define LL_RCC_PLLI2SQ_DIV_5 (RCC_PLLI2SCFGR_PLLI2SQ_2 | RCC_PLLI2SCFGR_PLLI2SQ_0) /*!< PLLI2S division factor for PLLI2SQ output by 5 */ +#define LL_RCC_PLLI2SQ_DIV_6 (RCC_PLLI2SCFGR_PLLI2SQ_2 | RCC_PLLI2SCFGR_PLLI2SQ_1) /*!< PLLI2S division factor for PLLI2SQ output by 6 */ +#define LL_RCC_PLLI2SQ_DIV_7 (RCC_PLLI2SCFGR_PLLI2SQ_2 | RCC_PLLI2SCFGR_PLLI2SQ_1 | RCC_PLLI2SCFGR_PLLI2SQ_0) /*!< PLLI2S division factor for PLLI2SQ output by 7 */ +#define LL_RCC_PLLI2SQ_DIV_8 RCC_PLLI2SCFGR_PLLI2SQ_3 /*!< PLLI2S division factor for PLLI2SQ output by 8 */ +#define LL_RCC_PLLI2SQ_DIV_9 (RCC_PLLI2SCFGR_PLLI2SQ_3 | RCC_PLLI2SCFGR_PLLI2SQ_0) /*!< PLLI2S division factor for PLLI2SQ output by 9 */ +#define LL_RCC_PLLI2SQ_DIV_10 (RCC_PLLI2SCFGR_PLLI2SQ_3 | RCC_PLLI2SCFGR_PLLI2SQ_1) /*!< PLLI2S division factor for PLLI2SQ output by 10 */ +#define LL_RCC_PLLI2SQ_DIV_11 (RCC_PLLI2SCFGR_PLLI2SQ_3 | RCC_PLLI2SCFGR_PLLI2SQ_1 | RCC_PLLI2SCFGR_PLLI2SQ_0) /*!< PLLI2S division factor for PLLI2SQ output by 11 */ +#define LL_RCC_PLLI2SQ_DIV_12 (RCC_PLLI2SCFGR_PLLI2SQ_3 | RCC_PLLI2SCFGR_PLLI2SQ_2) /*!< PLLI2S division factor for PLLI2SQ output by 12 */ +#define LL_RCC_PLLI2SQ_DIV_13 (RCC_PLLI2SCFGR_PLLI2SQ_3 | RCC_PLLI2SCFGR_PLLI2SQ_2 | RCC_PLLI2SCFGR_PLLI2SQ_0) /*!< PLLI2S division factor for PLLI2SQ output by 13 */ +#define LL_RCC_PLLI2SQ_DIV_14 (RCC_PLLI2SCFGR_PLLI2SQ_3 | RCC_PLLI2SCFGR_PLLI2SQ_2 | RCC_PLLI2SCFGR_PLLI2SQ_1) /*!< PLLI2S division factor for PLLI2SQ output by 14 */ +#define LL_RCC_PLLI2SQ_DIV_15 (RCC_PLLI2SCFGR_PLLI2SQ_3 | RCC_PLLI2SCFGR_PLLI2SQ_2 | RCC_PLLI2SCFGR_PLLI2SQ_1 | RCC_PLLI2SCFGR_PLLI2SQ_0) /*!< PLLI2S division factor for PLLI2SQ output by 15 */ +/** + * @} + */ +#endif /* RCC_PLLI2SCFGR_PLLI2SQ */ + +#if defined(RCC_DCKCFGR_PLLI2SDIVQ) +/** @defgroup RCC_LL_EC_PLLI2SDIVQ PLLI2SDIVQ division factor (PLLI2SDIVQ) + * @{ + */ +#define LL_RCC_PLLI2SDIVQ_DIV_1 0x00000000U /*!< PLLI2S division factor for PLLI2SDIVQ output by 1 */ +#define LL_RCC_PLLI2SDIVQ_DIV_2 RCC_DCKCFGR_PLLI2SDIVQ_0 /*!< PLLI2S division factor for PLLI2SDIVQ output by 2 */ +#define LL_RCC_PLLI2SDIVQ_DIV_3 RCC_DCKCFGR_PLLI2SDIVQ_1 /*!< PLLI2S division factor for PLLI2SDIVQ output by 3 */ +#define LL_RCC_PLLI2SDIVQ_DIV_4 (RCC_DCKCFGR_PLLI2SDIVQ_1 | RCC_DCKCFGR_PLLI2SDIVQ_0) /*!< PLLI2S division factor for PLLI2SDIVQ output by 4 */ +#define LL_RCC_PLLI2SDIVQ_DIV_5 RCC_DCKCFGR_PLLI2SDIVQ_2 /*!< PLLI2S division factor for PLLI2SDIVQ output by 5 */ +#define LL_RCC_PLLI2SDIVQ_DIV_6 (RCC_DCKCFGR_PLLI2SDIVQ_2 | RCC_DCKCFGR_PLLI2SDIVQ_0) /*!< PLLI2S division factor for PLLI2SDIVQ output by 6 */ +#define LL_RCC_PLLI2SDIVQ_DIV_7 (RCC_DCKCFGR_PLLI2SDIVQ_2 | RCC_DCKCFGR_PLLI2SDIVQ_1) /*!< PLLI2S division factor for PLLI2SDIVQ output by 7 */ +#define LL_RCC_PLLI2SDIVQ_DIV_8 (RCC_DCKCFGR_PLLI2SDIVQ_2 | RCC_DCKCFGR_PLLI2SDIVQ_1 | RCC_DCKCFGR_PLLI2SDIVQ_0) /*!< PLLI2S division factor for PLLI2SDIVQ output by 8 */ +#define LL_RCC_PLLI2SDIVQ_DIV_9 RCC_DCKCFGR_PLLI2SDIVQ_3 /*!< PLLI2S division factor for PLLI2SDIVQ output by 9 */ +#define LL_RCC_PLLI2SDIVQ_DIV_10 (RCC_DCKCFGR_PLLI2SDIVQ_3 | RCC_DCKCFGR_PLLI2SDIVQ_0) /*!< PLLI2S division factor for PLLI2SDIVQ output by 10 */ +#define LL_RCC_PLLI2SDIVQ_DIV_11 (RCC_DCKCFGR_PLLI2SDIVQ_3 | RCC_DCKCFGR_PLLI2SDIVQ_1) /*!< PLLI2S division factor for PLLI2SDIVQ output by 11 */ +#define LL_RCC_PLLI2SDIVQ_DIV_12 (RCC_DCKCFGR_PLLI2SDIVQ_3 | RCC_DCKCFGR_PLLI2SDIVQ_1 | RCC_DCKCFGR_PLLI2SDIVQ_0) /*!< PLLI2S division factor for PLLI2SDIVQ output by 12 */ +#define LL_RCC_PLLI2SDIVQ_DIV_13 (RCC_DCKCFGR_PLLI2SDIVQ_3 | RCC_DCKCFGR_PLLI2SDIVQ_2) /*!< PLLI2S division factor for PLLI2SDIVQ output by 13 */ +#define LL_RCC_PLLI2SDIVQ_DIV_14 (RCC_DCKCFGR_PLLI2SDIVQ_3 | RCC_DCKCFGR_PLLI2SDIVQ_2 | RCC_DCKCFGR_PLLI2SDIVQ_0) /*!< PLLI2S division factor for PLLI2SDIVQ output by 14 */ +#define LL_RCC_PLLI2SDIVQ_DIV_15 (RCC_DCKCFGR_PLLI2SDIVQ_3 | RCC_DCKCFGR_PLLI2SDIVQ_2 | RCC_DCKCFGR_PLLI2SDIVQ_1) /*!< PLLI2S division factor for PLLI2SDIVQ output by 15 */ +#define LL_RCC_PLLI2SDIVQ_DIV_16 (RCC_DCKCFGR_PLLI2SDIVQ_3 | RCC_DCKCFGR_PLLI2SDIVQ_2 | RCC_DCKCFGR_PLLI2SDIVQ_1 | RCC_DCKCFGR_PLLI2SDIVQ_0) /*!< PLLI2S division factor for PLLI2SDIVQ output by 16 */ +#define LL_RCC_PLLI2SDIVQ_DIV_17 RCC_DCKCFGR_PLLI2SDIVQ_4 /*!< PLLI2S division factor for PLLI2SDIVQ output by 17 */ +#define LL_RCC_PLLI2SDIVQ_DIV_18 (RCC_DCKCFGR_PLLI2SDIVQ_4 | RCC_DCKCFGR_PLLI2SDIVQ_0) /*!< PLLI2S division factor for PLLI2SDIVQ output by 18 */ +#define LL_RCC_PLLI2SDIVQ_DIV_19 (RCC_DCKCFGR_PLLI2SDIVQ_4 | RCC_DCKCFGR_PLLI2SDIVQ_1) /*!< PLLI2S division factor for PLLI2SDIVQ output by 19 */ +#define LL_RCC_PLLI2SDIVQ_DIV_20 (RCC_DCKCFGR_PLLI2SDIVQ_4 | RCC_DCKCFGR_PLLI2SDIVQ_1 | RCC_DCKCFGR_PLLI2SDIVQ_0) /*!< PLLI2S division factor for PLLI2SDIVQ output by 20 */ +#define LL_RCC_PLLI2SDIVQ_DIV_21 (RCC_DCKCFGR_PLLI2SDIVQ_4 | RCC_DCKCFGR_PLLI2SDIVQ_2) /*!< PLLI2S division factor for PLLI2SDIVQ output by 21 */ +#define LL_RCC_PLLI2SDIVQ_DIV_22 (RCC_DCKCFGR_PLLI2SDIVQ_4 | RCC_DCKCFGR_PLLI2SDIVQ_2 | RCC_DCKCFGR_PLLI2SDIVQ_0) /*!< PLLI2S division factor for PLLI2SDIVQ output by 22 */ +#define LL_RCC_PLLI2SDIVQ_DIV_23 (RCC_DCKCFGR_PLLI2SDIVQ_4 | RCC_DCKCFGR_PLLI2SDIVQ_2 | RCC_DCKCFGR_PLLI2SDIVQ_1) /*!< PLLI2S division factor for PLLI2SDIVQ output by 23 */ +#define LL_RCC_PLLI2SDIVQ_DIV_24 (RCC_DCKCFGR_PLLI2SDIVQ_4 | RCC_DCKCFGR_PLLI2SDIVQ_2 | RCC_DCKCFGR_PLLI2SDIVQ_1 | RCC_DCKCFGR_PLLI2SDIVQ_0) /*!< PLLI2S division factor for PLLI2SDIVQ output by 24 */ +#define LL_RCC_PLLI2SDIVQ_DIV_25 (RCC_DCKCFGR_PLLI2SDIVQ_4 | RCC_DCKCFGR_PLLI2SDIVQ_3) /*!< PLLI2S division factor for PLLI2SDIVQ output by 25 */ +#define LL_RCC_PLLI2SDIVQ_DIV_26 (RCC_DCKCFGR_PLLI2SDIVQ_4 | RCC_DCKCFGR_PLLI2SDIVQ_3 | RCC_DCKCFGR_PLLI2SDIVQ_0) /*!< PLLI2S division factor for PLLI2SDIVQ output by 26 */ +#define LL_RCC_PLLI2SDIVQ_DIV_27 (RCC_DCKCFGR_PLLI2SDIVQ_4 | RCC_DCKCFGR_PLLI2SDIVQ_3 | RCC_DCKCFGR_PLLI2SDIVQ_1) /*!< PLLI2S division factor for PLLI2SDIVQ output by 27 */ +#define LL_RCC_PLLI2SDIVQ_DIV_28 (RCC_DCKCFGR_PLLI2SDIVQ_4 | RCC_DCKCFGR_PLLI2SDIVQ_3 | RCC_DCKCFGR_PLLI2SDIVQ_1 | RCC_DCKCFGR_PLLI2SDIVQ_0) /*!< PLLI2S division factor for PLLI2SDIVQ output by 28 */ +#define LL_RCC_PLLI2SDIVQ_DIV_29 (RCC_DCKCFGR_PLLI2SDIVQ_4 | RCC_DCKCFGR_PLLI2SDIVQ_3 | RCC_DCKCFGR_PLLI2SDIVQ_2) /*!< PLLI2S division factor for PLLI2SDIVQ output by 29 */ +#define LL_RCC_PLLI2SDIVQ_DIV_30 (RCC_DCKCFGR_PLLI2SDIVQ_4 | RCC_DCKCFGR_PLLI2SDIVQ_3 | RCC_DCKCFGR_PLLI2SDIVQ_2 | RCC_DCKCFGR_PLLI2SDIVQ_0) /*!< PLLI2S division factor for PLLI2SDIVQ output by 30 */ +#define LL_RCC_PLLI2SDIVQ_DIV_31 (RCC_DCKCFGR_PLLI2SDIVQ_4 | RCC_DCKCFGR_PLLI2SDIVQ_3 | RCC_DCKCFGR_PLLI2SDIVQ_2 | RCC_DCKCFGR_PLLI2SDIVQ_1) /*!< PLLI2S division factor for PLLI2SDIVQ output by 31 */ +#define LL_RCC_PLLI2SDIVQ_DIV_32 (RCC_DCKCFGR_PLLI2SDIVQ_4 | RCC_DCKCFGR_PLLI2SDIVQ_3 | RCC_DCKCFGR_PLLI2SDIVQ_2 | RCC_DCKCFGR_PLLI2SDIVQ_1 | RCC_DCKCFGR_PLLI2SDIVQ_0) /*!< PLLI2S division factor for PLLI2SDIVQ output by 32 */ +/** + * @} + */ +#endif /* RCC_DCKCFGR_PLLI2SDIVQ */ + +#if defined(RCC_DCKCFGR_PLLI2SDIVR) +/** @defgroup RCC_LL_EC_PLLI2SDIVR PLLI2SDIVR division factor (PLLI2SDIVR) + * @{ + */ +#define LL_RCC_PLLI2SDIVR_DIV_1 (RCC_DCKCFGR_PLLI2SDIVR_0) /*!< PLLI2S division factor for PLLI2SDIVR output by 1 */ +#define LL_RCC_PLLI2SDIVR_DIV_2 (RCC_DCKCFGR_PLLI2SDIVR_1) /*!< PLLI2S division factor for PLLI2SDIVR output by 2 */ +#define LL_RCC_PLLI2SDIVR_DIV_3 (RCC_DCKCFGR_PLLI2SDIVR_1 | RCC_DCKCFGR_PLLI2SDIVR_0) /*!< PLLI2S division factor for PLLI2SDIVR output by 3 */ +#define LL_RCC_PLLI2SDIVR_DIV_4 (RCC_DCKCFGR_PLLI2SDIVR_2) /*!< PLLI2S division factor for PLLI2SDIVR output by 4 */ +#define LL_RCC_PLLI2SDIVR_DIV_5 (RCC_DCKCFGR_PLLI2SDIVR_2 | RCC_DCKCFGR_PLLI2SDIVR_0) /*!< PLLI2S division factor for PLLI2SDIVR output by 5 */ +#define LL_RCC_PLLI2SDIVR_DIV_6 (RCC_DCKCFGR_PLLI2SDIVR_2 | RCC_DCKCFGR_PLLI2SDIVR_1) /*!< PLLI2S division factor for PLLI2SDIVR output by 6 */ +#define LL_RCC_PLLI2SDIVR_DIV_7 (RCC_DCKCFGR_PLLI2SDIVR_2 | RCC_DCKCFGR_PLLI2SDIVR_1 | RCC_DCKCFGR_PLLI2SDIVR_0) /*!< PLLI2S division factor for PLLI2SDIVR output by 7 */ +#define LL_RCC_PLLI2SDIVR_DIV_8 (RCC_DCKCFGR_PLLI2SDIVR_3) /*!< PLLI2S division factor for PLLI2SDIVR output by 8 */ +#define LL_RCC_PLLI2SDIVR_DIV_9 (RCC_DCKCFGR_PLLI2SDIVR_3 | RCC_DCKCFGR_PLLI2SDIVR_0) /*!< PLLI2S division factor for PLLI2SDIVR output by 9 */ +#define LL_RCC_PLLI2SDIVR_DIV_10 (RCC_DCKCFGR_PLLI2SDIVR_3 | RCC_DCKCFGR_PLLI2SDIVR_1) /*!< PLLI2S division factor for PLLI2SDIVR output by 10 */ +#define LL_RCC_PLLI2SDIVR_DIV_11 (RCC_DCKCFGR_PLLI2SDIVR_3 | RCC_DCKCFGR_PLLI2SDIVR_1 | RCC_DCKCFGR_PLLI2SDIVR_0) /*!< PLLI2S division factor for PLLI2SDIVR output by 11 */ +#define LL_RCC_PLLI2SDIVR_DIV_12 (RCC_DCKCFGR_PLLI2SDIVR_3 | RCC_DCKCFGR_PLLI2SDIVR_2) /*!< PLLI2S division factor for PLLI2SDIVR output by 12 */ +#define LL_RCC_PLLI2SDIVR_DIV_13 (RCC_DCKCFGR_PLLI2SDIVR_3 | RCC_DCKCFGR_PLLI2SDIVR_2 | RCC_DCKCFGR_PLLI2SDIVR_0) /*!< PLLI2S division factor for PLLI2SDIVR output by 13 */ +#define LL_RCC_PLLI2SDIVR_DIV_14 (RCC_DCKCFGR_PLLI2SDIVR_3 | RCC_DCKCFGR_PLLI2SDIVR_2 | RCC_DCKCFGR_PLLI2SDIVR_1) /*!< PLLI2S division factor for PLLI2SDIVR output by 14 */ +#define LL_RCC_PLLI2SDIVR_DIV_15 (RCC_DCKCFGR_PLLI2SDIVR_3 | RCC_DCKCFGR_PLLI2SDIVR_2 | RCC_DCKCFGR_PLLI2SDIVR_1 | RCC_DCKCFGR_PLLI2SDIVR_0) /*!< PLLI2S division factor for PLLI2SDIVR output by 15 */ +#define LL_RCC_PLLI2SDIVR_DIV_16 (RCC_DCKCFGR_PLLI2SDIVR_4) /*!< PLLI2S division factor for PLLI2SDIVR output by 16 */ +#define LL_RCC_PLLI2SDIVR_DIV_17 (RCC_DCKCFGR_PLLI2SDIVR_4 | RCC_DCKCFGR_PLLI2SDIVR_0) /*!< PLLI2S division factor for PLLI2SDIVR output by 17 */ +#define LL_RCC_PLLI2SDIVR_DIV_18 (RCC_DCKCFGR_PLLI2SDIVR_4 | RCC_DCKCFGR_PLLI2SDIVR_1) /*!< PLLI2S division factor for PLLI2SDIVR output by 18 */ +#define LL_RCC_PLLI2SDIVR_DIV_19 (RCC_DCKCFGR_PLLI2SDIVR_4 | RCC_DCKCFGR_PLLI2SDIVR_1 | RCC_DCKCFGR_PLLI2SDIVR_0) /*!< PLLI2S division factor for PLLI2SDIVR output by 19 */ +#define LL_RCC_PLLI2SDIVR_DIV_20 (RCC_DCKCFGR_PLLI2SDIVR_4 | RCC_DCKCFGR_PLLI2SDIVR_2) /*!< PLLI2S division factor for PLLI2SDIVR output by 20 */ +#define LL_RCC_PLLI2SDIVR_DIV_21 (RCC_DCKCFGR_PLLI2SDIVR_4 | RCC_DCKCFGR_PLLI2SDIVR_2 | RCC_DCKCFGR_PLLI2SDIVR_0) /*!< PLLI2S division factor for PLLI2SDIVR output by 21 */ +#define LL_RCC_PLLI2SDIVR_DIV_22 (RCC_DCKCFGR_PLLI2SDIVR_4 | RCC_DCKCFGR_PLLI2SDIVR_2 | RCC_DCKCFGR_PLLI2SDIVR_1) /*!< PLLI2S division factor for PLLI2SDIVR output by 22 */ +#define LL_RCC_PLLI2SDIVR_DIV_23 (RCC_DCKCFGR_PLLI2SDIVR_4 | RCC_DCKCFGR_PLLI2SDIVR_2 | RCC_DCKCFGR_PLLI2SDIVR_1 | RCC_DCKCFGR_PLLI2SDIVR_0) /*!< PLLI2S division factor for PLLI2SDIVR output by 23 */ +#define LL_RCC_PLLI2SDIVR_DIV_24 (RCC_DCKCFGR_PLLI2SDIVR_4 | RCC_DCKCFGR_PLLI2SDIVR_3) /*!< PLLI2S division factor for PLLI2SDIVR output by 24 */ +#define LL_RCC_PLLI2SDIVR_DIV_25 (RCC_DCKCFGR_PLLI2SDIVR_4 | RCC_DCKCFGR_PLLI2SDIVR_3 | RCC_DCKCFGR_PLLI2SDIVR_0) /*!< PLLI2S division factor for PLLI2SDIVR output by 25 */ +#define LL_RCC_PLLI2SDIVR_DIV_26 (RCC_DCKCFGR_PLLI2SDIVR_4 | RCC_DCKCFGR_PLLI2SDIVR_3 | RCC_DCKCFGR_PLLI2SDIVR_1) /*!< PLLI2S division factor for PLLI2SDIVR output by 26 */ +#define LL_RCC_PLLI2SDIVR_DIV_27 (RCC_DCKCFGR_PLLI2SDIVR_4 | RCC_DCKCFGR_PLLI2SDIVR_3 | RCC_DCKCFGR_PLLI2SDIVR_1 | RCC_DCKCFGR_PLLI2SDIVR_0) /*!< PLLI2S division factor for PLLI2SDIVR output by 27 */ +#define LL_RCC_PLLI2SDIVR_DIV_28 (RCC_DCKCFGR_PLLI2SDIVR_4 | RCC_DCKCFGR_PLLI2SDIVR_3 | RCC_DCKCFGR_PLLI2SDIVR_2) /*!< PLLI2S division factor for PLLI2SDIVR output by 28 */ +#define LL_RCC_PLLI2SDIVR_DIV_29 (RCC_DCKCFGR_PLLI2SDIVR_4 | RCC_DCKCFGR_PLLI2SDIVR_3 | RCC_DCKCFGR_PLLI2SDIVR_2 | RCC_DCKCFGR_PLLI2SDIVR_0) /*!< PLLI2S division factor for PLLI2SDIVR output by 29 */ +#define LL_RCC_PLLI2SDIVR_DIV_30 (RCC_DCKCFGR_PLLI2SDIVR_4 | RCC_DCKCFGR_PLLI2SDIVR_3 | RCC_DCKCFGR_PLLI2SDIVR_2 | RCC_DCKCFGR_PLLI2SDIVR_1) /*!< PLLI2S division factor for PLLI2SDIVR output by 30 */ +#define LL_RCC_PLLI2SDIVR_DIV_31 (RCC_DCKCFGR_PLLI2SDIVR_4 | RCC_DCKCFGR_PLLI2SDIVR_3 | RCC_DCKCFGR_PLLI2SDIVR_2 | RCC_DCKCFGR_PLLI2SDIVR_1 | RCC_DCKCFGR_PLLI2SDIVR_0) /*!< PLLI2S division factor for PLLI2SDIVR output by 31 */ +/** + * @} + */ +#endif /* RCC_DCKCFGR_PLLI2SDIVR */ + +/** @defgroup RCC_LL_EC_PLLI2SR PLLI2SR division factor (PLLI2SR) + * @{ + */ +#define LL_RCC_PLLI2SR_DIV_2 RCC_PLLI2SCFGR_PLLI2SR_1 /*!< PLLI2S division factor for PLLI2SR output by 2 */ +#define LL_RCC_PLLI2SR_DIV_3 (RCC_PLLI2SCFGR_PLLI2SR_1 | RCC_PLLI2SCFGR_PLLI2SR_0) /*!< PLLI2S division factor for PLLI2SR output by 3 */ +#define LL_RCC_PLLI2SR_DIV_4 RCC_PLLI2SCFGR_PLLI2SR_2 /*!< PLLI2S division factor for PLLI2SR output by 4 */ +#define LL_RCC_PLLI2SR_DIV_5 (RCC_PLLI2SCFGR_PLLI2SR_2 | RCC_PLLI2SCFGR_PLLI2SR_0) /*!< PLLI2S division factor for PLLI2SR output by 5 */ +#define LL_RCC_PLLI2SR_DIV_6 (RCC_PLLI2SCFGR_PLLI2SR_2 | RCC_PLLI2SCFGR_PLLI2SR_1) /*!< PLLI2S division factor for PLLI2SR output by 6 */ +#define LL_RCC_PLLI2SR_DIV_7 (RCC_PLLI2SCFGR_PLLI2SR_2 | RCC_PLLI2SCFGR_PLLI2SR_1 | RCC_PLLI2SCFGR_PLLI2SR_0) /*!< PLLI2S division factor for PLLI2SR output by 7 */ +/** + * @} + */ + +#if defined(RCC_PLLI2SCFGR_PLLI2SP) +/** @defgroup RCC_LL_EC_PLLI2SP PLLI2SP division factor (PLLI2SP) + * @{ + */ +#define LL_RCC_PLLI2SP_DIV_2 0x00000000U /*!< PLLI2S division factor for PLLI2SP output by 2 */ +#define LL_RCC_PLLI2SP_DIV_4 RCC_PLLI2SCFGR_PLLI2SP_0 /*!< PLLI2S division factor for PLLI2SP output by 4 */ +#define LL_RCC_PLLI2SP_DIV_6 RCC_PLLI2SCFGR_PLLI2SP_1 /*!< PLLI2S division factor for PLLI2SP output by 6 */ +#define LL_RCC_PLLI2SP_DIV_8 (RCC_PLLI2SCFGR_PLLI2SP_1 | RCC_PLLI2SCFGR_PLLI2SP_0) /*!< PLLI2S division factor for PLLI2SP output by 8 */ +/** + * @} + */ +#endif /* RCC_PLLI2SCFGR_PLLI2SP */ +#endif /* RCC_PLLI2S_SUPPORT */ + +#if defined(RCC_PLLSAI_SUPPORT) +/** @defgroup RCC_LL_EC_PLLSAIM PLLSAIM division factor (PLLSAIM or PLLM) + * @{ + */ +#if defined(RCC_PLLSAICFGR_PLLSAIM) +#define LL_RCC_PLLSAIM_DIV_2 (RCC_PLLSAICFGR_PLLSAIM_1) /*!< PLLSAI division factor for PLLSAIM output by 2 */ +#define LL_RCC_PLLSAIM_DIV_3 (RCC_PLLSAICFGR_PLLSAIM_1 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 3 */ +#define LL_RCC_PLLSAIM_DIV_4 (RCC_PLLSAICFGR_PLLSAIM_2) /*!< PLLSAI division factor for PLLSAIM output by 4 */ +#define LL_RCC_PLLSAIM_DIV_5 (RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 5 */ +#define LL_RCC_PLLSAIM_DIV_6 (RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_1) /*!< PLLSAI division factor for PLLSAIM output by 6 */ +#define LL_RCC_PLLSAIM_DIV_7 (RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_1 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 7 */ +#define LL_RCC_PLLSAIM_DIV_8 (RCC_PLLSAICFGR_PLLSAIM_3) /*!< PLLSAI division factor for PLLSAIM output by 8 */ +#define LL_RCC_PLLSAIM_DIV_9 (RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 9 */ +#define LL_RCC_PLLSAIM_DIV_10 (RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_1) /*!< PLLSAI division factor for PLLSAIM output by 10 */ +#define LL_RCC_PLLSAIM_DIV_11 (RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_1 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 11 */ +#define LL_RCC_PLLSAIM_DIV_12 (RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_2) /*!< PLLSAI division factor for PLLSAIM output by 12 */ +#define LL_RCC_PLLSAIM_DIV_13 (RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 13 */ +#define LL_RCC_PLLSAIM_DIV_14 (RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_1) /*!< PLLSAI division factor for PLLSAIM output by 14 */ +#define LL_RCC_PLLSAIM_DIV_15 (RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_1 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 15 */ +#define LL_RCC_PLLSAIM_DIV_16 (RCC_PLLSAICFGR_PLLSAIM_4) /*!< PLLSAI division factor for PLLSAIM output by 16 */ +#define LL_RCC_PLLSAIM_DIV_17 (RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 17 */ +#define LL_RCC_PLLSAIM_DIV_18 (RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_1) /*!< PLLSAI division factor for PLLSAIM output by 18 */ +#define LL_RCC_PLLSAIM_DIV_19 (RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_1 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 19 */ +#define LL_RCC_PLLSAIM_DIV_20 (RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_2) /*!< PLLSAI division factor for PLLSAIM output by 20 */ +#define LL_RCC_PLLSAIM_DIV_21 (RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 21 */ +#define LL_RCC_PLLSAIM_DIV_22 (RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_1) /*!< PLLSAI division factor for PLLSAIM output by 22 */ +#define LL_RCC_PLLSAIM_DIV_23 (RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_1 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 23 */ +#define LL_RCC_PLLSAIM_DIV_24 (RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_3) /*!< PLLSAI division factor for PLLSAIM output by 24 */ +#define LL_RCC_PLLSAIM_DIV_25 (RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 25 */ +#define LL_RCC_PLLSAIM_DIV_26 (RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_1) /*!< PLLSAI division factor for PLLSAIM output by 26 */ +#define LL_RCC_PLLSAIM_DIV_27 (RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_1 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 27 */ +#define LL_RCC_PLLSAIM_DIV_28 (RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_2) /*!< PLLSAI division factor for PLLSAIM output by 28 */ +#define LL_RCC_PLLSAIM_DIV_29 (RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 29 */ +#define LL_RCC_PLLSAIM_DIV_30 (RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_1) /*!< PLLSAI division factor for PLLSAIM output by 30 */ +#define LL_RCC_PLLSAIM_DIV_31 (RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_1 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 31 */ +#define LL_RCC_PLLSAIM_DIV_32 (RCC_PLLSAICFGR_PLLSAIM_5) /*!< PLLSAI division factor for PLLSAIM output by 32 */ +#define LL_RCC_PLLSAIM_DIV_33 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 33 */ +#define LL_RCC_PLLSAIM_DIV_34 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_1) /*!< PLLSAI division factor for PLLSAIM output by 34 */ +#define LL_RCC_PLLSAIM_DIV_35 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_1 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 35 */ +#define LL_RCC_PLLSAIM_DIV_36 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_2) /*!< PLLSAI division factor for PLLSAIM output by 36 */ +#define LL_RCC_PLLSAIM_DIV_37 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 37 */ +#define LL_RCC_PLLSAIM_DIV_38 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_1) /*!< PLLSAI division factor for PLLSAIM output by 38 */ +#define LL_RCC_PLLSAIM_DIV_39 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_1 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 39 */ +#define LL_RCC_PLLSAIM_DIV_40 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_3) /*!< PLLSAI division factor for PLLSAIM output by 40 */ +#define LL_RCC_PLLSAIM_DIV_41 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 41 */ +#define LL_RCC_PLLSAIM_DIV_42 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_1) /*!< PLLSAI division factor for PLLSAIM output by 42 */ +#define LL_RCC_PLLSAIM_DIV_43 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_1 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 43 */ +#define LL_RCC_PLLSAIM_DIV_44 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_2) /*!< PLLSAI division factor for PLLSAIM output by 44 */ +#define LL_RCC_PLLSAIM_DIV_45 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 45 */ +#define LL_RCC_PLLSAIM_DIV_46 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_1) /*!< PLLSAI division factor for PLLSAIM output by 46 */ +#define LL_RCC_PLLSAIM_DIV_47 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_1 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 47 */ +#define LL_RCC_PLLSAIM_DIV_48 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_4) /*!< PLLSAI division factor for PLLSAIM output by 48 */ +#define LL_RCC_PLLSAIM_DIV_49 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 49 */ +#define LL_RCC_PLLSAIM_DIV_50 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_1) /*!< PLLSAI division factor for PLLSAIM output by 50 */ +#define LL_RCC_PLLSAIM_DIV_51 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_1 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 51 */ +#define LL_RCC_PLLSAIM_DIV_52 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_2) /*!< PLLSAI division factor for PLLSAIM output by 52 */ +#define LL_RCC_PLLSAIM_DIV_53 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 53 */ +#define LL_RCC_PLLSAIM_DIV_54 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_1) /*!< PLLSAI division factor for PLLSAIM output by 54 */ +#define LL_RCC_PLLSAIM_DIV_55 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_1 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 55 */ +#define LL_RCC_PLLSAIM_DIV_56 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_3) /*!< PLLSAI division factor for PLLSAIM output by 56 */ +#define LL_RCC_PLLSAIM_DIV_57 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 57 */ +#define LL_RCC_PLLSAIM_DIV_58 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_1) /*!< PLLSAI division factor for PLLSAIM output by 58 */ +#define LL_RCC_PLLSAIM_DIV_59 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_1 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 59 */ +#define LL_RCC_PLLSAIM_DIV_60 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_2) /*!< PLLSAI division factor for PLLSAIM output by 60 */ +#define LL_RCC_PLLSAIM_DIV_61 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 61 */ +#define LL_RCC_PLLSAIM_DIV_62 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_1) /*!< PLLSAI division factor for PLLSAIM output by 62 */ +#define LL_RCC_PLLSAIM_DIV_63 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_1 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 63 */ +#else +#define LL_RCC_PLLSAIM_DIV_2 LL_RCC_PLLM_DIV_2 /*!< PLLSAI division factor for PLLSAIM output by 2 */ +#define LL_RCC_PLLSAIM_DIV_3 LL_RCC_PLLM_DIV_3 /*!< PLLSAI division factor for PLLSAIM output by 3 */ +#define LL_RCC_PLLSAIM_DIV_4 LL_RCC_PLLM_DIV_4 /*!< PLLSAI division factor for PLLSAIM output by 4 */ +#define LL_RCC_PLLSAIM_DIV_5 LL_RCC_PLLM_DIV_5 /*!< PLLSAI division factor for PLLSAIM output by 5 */ +#define LL_RCC_PLLSAIM_DIV_6 LL_RCC_PLLM_DIV_6 /*!< PLLSAI division factor for PLLSAIM output by 6 */ +#define LL_RCC_PLLSAIM_DIV_7 LL_RCC_PLLM_DIV_7 /*!< PLLSAI division factor for PLLSAIM output by 7 */ +#define LL_RCC_PLLSAIM_DIV_8 LL_RCC_PLLM_DIV_8 /*!< PLLSAI division factor for PLLSAIM output by 8 */ +#define LL_RCC_PLLSAIM_DIV_9 LL_RCC_PLLM_DIV_9 /*!< PLLSAI division factor for PLLSAIM output by 9 */ +#define LL_RCC_PLLSAIM_DIV_10 LL_RCC_PLLM_DIV_10 /*!< PLLSAI division factor for PLLSAIM output by 10 */ +#define LL_RCC_PLLSAIM_DIV_11 LL_RCC_PLLM_DIV_11 /*!< PLLSAI division factor for PLLSAIM output by 11 */ +#define LL_RCC_PLLSAIM_DIV_12 LL_RCC_PLLM_DIV_12 /*!< PLLSAI division factor for PLLSAIM output by 12 */ +#define LL_RCC_PLLSAIM_DIV_13 LL_RCC_PLLM_DIV_13 /*!< PLLSAI division factor for PLLSAIM output by 13 */ +#define LL_RCC_PLLSAIM_DIV_14 LL_RCC_PLLM_DIV_14 /*!< PLLSAI division factor for PLLSAIM output by 14 */ +#define LL_RCC_PLLSAIM_DIV_15 LL_RCC_PLLM_DIV_15 /*!< PLLSAI division factor for PLLSAIM output by 15 */ +#define LL_RCC_PLLSAIM_DIV_16 LL_RCC_PLLM_DIV_16 /*!< PLLSAI division factor for PLLSAIM output by 16 */ +#define LL_RCC_PLLSAIM_DIV_17 LL_RCC_PLLM_DIV_17 /*!< PLLSAI division factor for PLLSAIM output by 17 */ +#define LL_RCC_PLLSAIM_DIV_18 LL_RCC_PLLM_DIV_18 /*!< PLLSAI division factor for PLLSAIM output by 18 */ +#define LL_RCC_PLLSAIM_DIV_19 LL_RCC_PLLM_DIV_19 /*!< PLLSAI division factor for PLLSAIM output by 19 */ +#define LL_RCC_PLLSAIM_DIV_20 LL_RCC_PLLM_DIV_20 /*!< PLLSAI division factor for PLLSAIM output by 20 */ +#define LL_RCC_PLLSAIM_DIV_21 LL_RCC_PLLM_DIV_21 /*!< PLLSAI division factor for PLLSAIM output by 21 */ +#define LL_RCC_PLLSAIM_DIV_22 LL_RCC_PLLM_DIV_22 /*!< PLLSAI division factor for PLLSAIM output by 22 */ +#define LL_RCC_PLLSAIM_DIV_23 LL_RCC_PLLM_DIV_23 /*!< PLLSAI division factor for PLLSAIM output by 23 */ +#define LL_RCC_PLLSAIM_DIV_24 LL_RCC_PLLM_DIV_24 /*!< PLLSAI division factor for PLLSAIM output by 24 */ +#define LL_RCC_PLLSAIM_DIV_25 LL_RCC_PLLM_DIV_25 /*!< PLLSAI division factor for PLLSAIM output by 25 */ +#define LL_RCC_PLLSAIM_DIV_26 LL_RCC_PLLM_DIV_26 /*!< PLLSAI division factor for PLLSAIM output by 26 */ +#define LL_RCC_PLLSAIM_DIV_27 LL_RCC_PLLM_DIV_27 /*!< PLLSAI division factor for PLLSAIM output by 27 */ +#define LL_RCC_PLLSAIM_DIV_28 LL_RCC_PLLM_DIV_28 /*!< PLLSAI division factor for PLLSAIM output by 28 */ +#define LL_RCC_PLLSAIM_DIV_29 LL_RCC_PLLM_DIV_29 /*!< PLLSAI division factor for PLLSAIM output by 29 */ +#define LL_RCC_PLLSAIM_DIV_30 LL_RCC_PLLM_DIV_30 /*!< PLLSAI division factor for PLLSAIM output by 30 */ +#define LL_RCC_PLLSAIM_DIV_31 LL_RCC_PLLM_DIV_31 /*!< PLLSAI division factor for PLLSAIM output by 31 */ +#define LL_RCC_PLLSAIM_DIV_32 LL_RCC_PLLM_DIV_32 /*!< PLLSAI division factor for PLLSAIM output by 32 */ +#define LL_RCC_PLLSAIM_DIV_33 LL_RCC_PLLM_DIV_33 /*!< PLLSAI division factor for PLLSAIM output by 33 */ +#define LL_RCC_PLLSAIM_DIV_34 LL_RCC_PLLM_DIV_34 /*!< PLLSAI division factor for PLLSAIM output by 34 */ +#define LL_RCC_PLLSAIM_DIV_35 LL_RCC_PLLM_DIV_35 /*!< PLLSAI division factor for PLLSAIM output by 35 */ +#define LL_RCC_PLLSAIM_DIV_36 LL_RCC_PLLM_DIV_36 /*!< PLLSAI division factor for PLLSAIM output by 36 */ +#define LL_RCC_PLLSAIM_DIV_37 LL_RCC_PLLM_DIV_37 /*!< PLLSAI division factor for PLLSAIM output by 37 */ +#define LL_RCC_PLLSAIM_DIV_38 LL_RCC_PLLM_DIV_38 /*!< PLLSAI division factor for PLLSAIM output by 38 */ +#define LL_RCC_PLLSAIM_DIV_39 LL_RCC_PLLM_DIV_39 /*!< PLLSAI division factor for PLLSAIM output by 39 */ +#define LL_RCC_PLLSAIM_DIV_40 LL_RCC_PLLM_DIV_40 /*!< PLLSAI division factor for PLLSAIM output by 40 */ +#define LL_RCC_PLLSAIM_DIV_41 LL_RCC_PLLM_DIV_41 /*!< PLLSAI division factor for PLLSAIM output by 41 */ +#define LL_RCC_PLLSAIM_DIV_42 LL_RCC_PLLM_DIV_42 /*!< PLLSAI division factor for PLLSAIM output by 42 */ +#define LL_RCC_PLLSAIM_DIV_43 LL_RCC_PLLM_DIV_43 /*!< PLLSAI division factor for PLLSAIM output by 43 */ +#define LL_RCC_PLLSAIM_DIV_44 LL_RCC_PLLM_DIV_44 /*!< PLLSAI division factor for PLLSAIM output by 44 */ +#define LL_RCC_PLLSAIM_DIV_45 LL_RCC_PLLM_DIV_45 /*!< PLLSAI division factor for PLLSAIM output by 45 */ +#define LL_RCC_PLLSAIM_DIV_46 LL_RCC_PLLM_DIV_46 /*!< PLLSAI division factor for PLLSAIM output by 46 */ +#define LL_RCC_PLLSAIM_DIV_47 LL_RCC_PLLM_DIV_47 /*!< PLLSAI division factor for PLLSAIM output by 47 */ +#define LL_RCC_PLLSAIM_DIV_48 LL_RCC_PLLM_DIV_48 /*!< PLLSAI division factor for PLLSAIM output by 48 */ +#define LL_RCC_PLLSAIM_DIV_49 LL_RCC_PLLM_DIV_49 /*!< PLLSAI division factor for PLLSAIM output by 49 */ +#define LL_RCC_PLLSAIM_DIV_50 LL_RCC_PLLM_DIV_50 /*!< PLLSAI division factor for PLLSAIM output by 50 */ +#define LL_RCC_PLLSAIM_DIV_51 LL_RCC_PLLM_DIV_51 /*!< PLLSAI division factor for PLLSAIM output by 51 */ +#define LL_RCC_PLLSAIM_DIV_52 LL_RCC_PLLM_DIV_52 /*!< PLLSAI division factor for PLLSAIM output by 52 */ +#define LL_RCC_PLLSAIM_DIV_53 LL_RCC_PLLM_DIV_53 /*!< PLLSAI division factor for PLLSAIM output by 53 */ +#define LL_RCC_PLLSAIM_DIV_54 LL_RCC_PLLM_DIV_54 /*!< PLLSAI division factor for PLLSAIM output by 54 */ +#define LL_RCC_PLLSAIM_DIV_55 LL_RCC_PLLM_DIV_55 /*!< PLLSAI division factor for PLLSAIM output by 55 */ +#define LL_RCC_PLLSAIM_DIV_56 LL_RCC_PLLM_DIV_56 /*!< PLLSAI division factor for PLLSAIM output by 56 */ +#define LL_RCC_PLLSAIM_DIV_57 LL_RCC_PLLM_DIV_57 /*!< PLLSAI division factor for PLLSAIM output by 57 */ +#define LL_RCC_PLLSAIM_DIV_58 LL_RCC_PLLM_DIV_58 /*!< PLLSAI division factor for PLLSAIM output by 58 */ +#define LL_RCC_PLLSAIM_DIV_59 LL_RCC_PLLM_DIV_59 /*!< PLLSAI division factor for PLLSAIM output by 59 */ +#define LL_RCC_PLLSAIM_DIV_60 LL_RCC_PLLM_DIV_60 /*!< PLLSAI division factor for PLLSAIM output by 60 */ +#define LL_RCC_PLLSAIM_DIV_61 LL_RCC_PLLM_DIV_61 /*!< PLLSAI division factor for PLLSAIM output by 61 */ +#define LL_RCC_PLLSAIM_DIV_62 LL_RCC_PLLM_DIV_62 /*!< PLLSAI division factor for PLLSAIM output by 62 */ +#define LL_RCC_PLLSAIM_DIV_63 LL_RCC_PLLM_DIV_63 /*!< PLLSAI division factor for PLLSAIM output by 63 */ +#endif /* RCC_PLLSAICFGR_PLLSAIM */ +/** + * @} + */ + +/** @defgroup RCC_LL_EC_PLLSAIQ PLLSAIQ division factor (PLLSAIQ) + * @{ + */ +#define LL_RCC_PLLSAIQ_DIV_2 RCC_PLLSAICFGR_PLLSAIQ_1 /*!< PLLSAI division factor for PLLSAIQ output by 2 */ +#define LL_RCC_PLLSAIQ_DIV_3 (RCC_PLLSAICFGR_PLLSAIQ_1 | RCC_PLLSAICFGR_PLLSAIQ_0) /*!< PLLSAI division factor for PLLSAIQ output by 3 */ +#define LL_RCC_PLLSAIQ_DIV_4 RCC_PLLSAICFGR_PLLSAIQ_2 /*!< PLLSAI division factor for PLLSAIQ output by 4 */ +#define LL_RCC_PLLSAIQ_DIV_5 (RCC_PLLSAICFGR_PLLSAIQ_2 | RCC_PLLSAICFGR_PLLSAIQ_0) /*!< PLLSAI division factor for PLLSAIQ output by 5 */ +#define LL_RCC_PLLSAIQ_DIV_6 (RCC_PLLSAICFGR_PLLSAIQ_2 | RCC_PLLSAICFGR_PLLSAIQ_1) /*!< PLLSAI division factor for PLLSAIQ output by 6 */ +#define LL_RCC_PLLSAIQ_DIV_7 (RCC_PLLSAICFGR_PLLSAIQ_2 | RCC_PLLSAICFGR_PLLSAIQ_1 | RCC_PLLSAICFGR_PLLSAIQ_0) /*!< PLLSAI division factor for PLLSAIQ output by 7 */ +#define LL_RCC_PLLSAIQ_DIV_8 RCC_PLLSAICFGR_PLLSAIQ_3 /*!< PLLSAI division factor for PLLSAIQ output by 8 */ +#define LL_RCC_PLLSAIQ_DIV_9 (RCC_PLLSAICFGR_PLLSAIQ_3 | RCC_PLLSAICFGR_PLLSAIQ_0) /*!< PLLSAI division factor for PLLSAIQ output by 9 */ +#define LL_RCC_PLLSAIQ_DIV_10 (RCC_PLLSAICFGR_PLLSAIQ_3 | RCC_PLLSAICFGR_PLLSAIQ_1) /*!< PLLSAI division factor for PLLSAIQ output by 10 */ +#define LL_RCC_PLLSAIQ_DIV_11 (RCC_PLLSAICFGR_PLLSAIQ_3 | RCC_PLLSAICFGR_PLLSAIQ_1 | RCC_PLLSAICFGR_PLLSAIQ_0) /*!< PLLSAI division factor for PLLSAIQ output by 11 */ +#define LL_RCC_PLLSAIQ_DIV_12 (RCC_PLLSAICFGR_PLLSAIQ_3 | RCC_PLLSAICFGR_PLLSAIQ_2) /*!< PLLSAI division factor for PLLSAIQ output by 12 */ +#define LL_RCC_PLLSAIQ_DIV_13 (RCC_PLLSAICFGR_PLLSAIQ_3 | RCC_PLLSAICFGR_PLLSAIQ_2 | RCC_PLLSAICFGR_PLLSAIQ_0) /*!< PLLSAI division factor for PLLSAIQ output by 13 */ +#define LL_RCC_PLLSAIQ_DIV_14 (RCC_PLLSAICFGR_PLLSAIQ_3 | RCC_PLLSAICFGR_PLLSAIQ_2 | RCC_PLLSAICFGR_PLLSAIQ_1) /*!< PLLSAI division factor for PLLSAIQ output by 14 */ +#define LL_RCC_PLLSAIQ_DIV_15 (RCC_PLLSAICFGR_PLLSAIQ_3 | RCC_PLLSAICFGR_PLLSAIQ_2 | RCC_PLLSAICFGR_PLLSAIQ_1 | RCC_PLLSAICFGR_PLLSAIQ_0) /*!< PLLSAI division factor for PLLSAIQ output by 15 */ +/** + * @} + */ + +#if defined(RCC_DCKCFGR_PLLSAIDIVQ) +/** @defgroup RCC_LL_EC_PLLSAIDIVQ PLLSAIDIVQ division factor (PLLSAIDIVQ) + * @{ + */ +#define LL_RCC_PLLSAIDIVQ_DIV_1 0x00000000U /*!< PLLSAI division factor for PLLSAIDIVQ output by 1 */ +#define LL_RCC_PLLSAIDIVQ_DIV_2 RCC_DCKCFGR_PLLSAIDIVQ_0 /*!< PLLSAI division factor for PLLSAIDIVQ output by 2 */ +#define LL_RCC_PLLSAIDIVQ_DIV_3 RCC_DCKCFGR_PLLSAIDIVQ_1 /*!< PLLSAI division factor for PLLSAIDIVQ output by 3 */ +#define LL_RCC_PLLSAIDIVQ_DIV_4 (RCC_DCKCFGR_PLLSAIDIVQ_1 | RCC_DCKCFGR_PLLSAIDIVQ_0) /*!< PLLSAI division factor for PLLSAIDIVQ output by 4 */ +#define LL_RCC_PLLSAIDIVQ_DIV_5 RCC_DCKCFGR_PLLSAIDIVQ_2 /*!< PLLSAI division factor for PLLSAIDIVQ output by 5 */ +#define LL_RCC_PLLSAIDIVQ_DIV_6 (RCC_DCKCFGR_PLLSAIDIVQ_2 | RCC_DCKCFGR_PLLSAIDIVQ_0) /*!< PLLSAI division factor for PLLSAIDIVQ output by 6 */ +#define LL_RCC_PLLSAIDIVQ_DIV_7 (RCC_DCKCFGR_PLLSAIDIVQ_2 | RCC_DCKCFGR_PLLSAIDIVQ_1) /*!< PLLSAI division factor for PLLSAIDIVQ output by 7 */ +#define LL_RCC_PLLSAIDIVQ_DIV_8 (RCC_DCKCFGR_PLLSAIDIVQ_2 | RCC_DCKCFGR_PLLSAIDIVQ_1 | RCC_DCKCFGR_PLLSAIDIVQ_0) /*!< PLLSAI division factor for PLLSAIDIVQ output by 8 */ +#define LL_RCC_PLLSAIDIVQ_DIV_9 RCC_DCKCFGR_PLLSAIDIVQ_3 /*!< PLLSAI division factor for PLLSAIDIVQ output by 9 */ +#define LL_RCC_PLLSAIDIVQ_DIV_10 (RCC_DCKCFGR_PLLSAIDIVQ_3 | RCC_DCKCFGR_PLLSAIDIVQ_0) /*!< PLLSAI division factor for PLLSAIDIVQ output by 10 */ +#define LL_RCC_PLLSAIDIVQ_DIV_11 (RCC_DCKCFGR_PLLSAIDIVQ_3 | RCC_DCKCFGR_PLLSAIDIVQ_1) /*!< PLLSAI division factor for PLLSAIDIVQ output by 11 */ +#define LL_RCC_PLLSAIDIVQ_DIV_12 (RCC_DCKCFGR_PLLSAIDIVQ_3 | RCC_DCKCFGR_PLLSAIDIVQ_1 | RCC_DCKCFGR_PLLSAIDIVQ_0) /*!< PLLSAI division factor for PLLSAIDIVQ output by 12 */ +#define LL_RCC_PLLSAIDIVQ_DIV_13 (RCC_DCKCFGR_PLLSAIDIVQ_3 | RCC_DCKCFGR_PLLSAIDIVQ_2) /*!< PLLSAI division factor for PLLSAIDIVQ output by 13 */ +#define LL_RCC_PLLSAIDIVQ_DIV_14 (RCC_DCKCFGR_PLLSAIDIVQ_3 | RCC_DCKCFGR_PLLSAIDIVQ_2 | RCC_DCKCFGR_PLLSAIDIVQ_0) /*!< PLLSAI division factor for PLLSAIDIVQ output by 14 */ +#define LL_RCC_PLLSAIDIVQ_DIV_15 (RCC_DCKCFGR_PLLSAIDIVQ_3 | RCC_DCKCFGR_PLLSAIDIVQ_2 | RCC_DCKCFGR_PLLSAIDIVQ_1) /*!< PLLSAI division factor for PLLSAIDIVQ output by 15 */ +#define LL_RCC_PLLSAIDIVQ_DIV_16 (RCC_DCKCFGR_PLLSAIDIVQ_3 | RCC_DCKCFGR_PLLSAIDIVQ_2 | RCC_DCKCFGR_PLLSAIDIVQ_1 | RCC_DCKCFGR_PLLSAIDIVQ_0) /*!< PLLSAI division factor for PLLSAIDIVQ output by 16 */ +#define LL_RCC_PLLSAIDIVQ_DIV_17 RCC_DCKCFGR_PLLSAIDIVQ_4 /*!< PLLSAI division factor for PLLSAIDIVQ output by 17 */ +#define LL_RCC_PLLSAIDIVQ_DIV_18 (RCC_DCKCFGR_PLLSAIDIVQ_4 | RCC_DCKCFGR_PLLSAIDIVQ_0) /*!< PLLSAI division factor for PLLSAIDIVQ output by 18 */ +#define LL_RCC_PLLSAIDIVQ_DIV_19 (RCC_DCKCFGR_PLLSAIDIVQ_4 | RCC_DCKCFGR_PLLSAIDIVQ_1) /*!< PLLSAI division factor for PLLSAIDIVQ output by 19 */ +#define LL_RCC_PLLSAIDIVQ_DIV_20 (RCC_DCKCFGR_PLLSAIDIVQ_4 | RCC_DCKCFGR_PLLSAIDIVQ_1 | RCC_DCKCFGR_PLLSAIDIVQ_0) /*!< PLLSAI division factor for PLLSAIDIVQ output by 20 */ +#define LL_RCC_PLLSAIDIVQ_DIV_21 (RCC_DCKCFGR_PLLSAIDIVQ_4 | RCC_DCKCFGR_PLLSAIDIVQ_2) /*!< PLLSAI division factor for PLLSAIDIVQ output by 21 */ +#define LL_RCC_PLLSAIDIVQ_DIV_22 (RCC_DCKCFGR_PLLSAIDIVQ_4 | RCC_DCKCFGR_PLLSAIDIVQ_2 | RCC_DCKCFGR_PLLSAIDIVQ_0) /*!< PLLSAI division factor for PLLSAIDIVQ output by 22 */ +#define LL_RCC_PLLSAIDIVQ_DIV_23 (RCC_DCKCFGR_PLLSAIDIVQ_4 | RCC_DCKCFGR_PLLSAIDIVQ_2 | RCC_DCKCFGR_PLLSAIDIVQ_1) /*!< PLLSAI division factor for PLLSAIDIVQ output by 23 */ +#define LL_RCC_PLLSAIDIVQ_DIV_24 (RCC_DCKCFGR_PLLSAIDIVQ_4 | RCC_DCKCFGR_PLLSAIDIVQ_2 | RCC_DCKCFGR_PLLSAIDIVQ_1 | RCC_DCKCFGR_PLLSAIDIVQ_0) /*!< PLLSAI division factor for PLLSAIDIVQ output by 24 */ +#define LL_RCC_PLLSAIDIVQ_DIV_25 (RCC_DCKCFGR_PLLSAIDIVQ_4 | RCC_DCKCFGR_PLLSAIDIVQ_3) /*!< PLLSAI division factor for PLLSAIDIVQ output by 25 */ +#define LL_RCC_PLLSAIDIVQ_DIV_26 (RCC_DCKCFGR_PLLSAIDIVQ_4 | RCC_DCKCFGR_PLLSAIDIVQ_3 | RCC_DCKCFGR_PLLSAIDIVQ_0) /*!< PLLSAI division factor for PLLSAIDIVQ output by 26 */ +#define LL_RCC_PLLSAIDIVQ_DIV_27 (RCC_DCKCFGR_PLLSAIDIVQ_4 | RCC_DCKCFGR_PLLSAIDIVQ_3 | RCC_DCKCFGR_PLLSAIDIVQ_1) /*!< PLLSAI division factor for PLLSAIDIVQ output by 27 */ +#define LL_RCC_PLLSAIDIVQ_DIV_28 (RCC_DCKCFGR_PLLSAIDIVQ_4 | RCC_DCKCFGR_PLLSAIDIVQ_3 | RCC_DCKCFGR_PLLSAIDIVQ_1 | RCC_DCKCFGR_PLLSAIDIVQ_0) /*!< PLLSAI division factor for PLLSAIDIVQ output by 28 */ +#define LL_RCC_PLLSAIDIVQ_DIV_29 (RCC_DCKCFGR_PLLSAIDIVQ_4 | RCC_DCKCFGR_PLLSAIDIVQ_3 | RCC_DCKCFGR_PLLSAIDIVQ_2) /*!< PLLSAI division factor for PLLSAIDIVQ output by 29 */ +#define LL_RCC_PLLSAIDIVQ_DIV_30 (RCC_DCKCFGR_PLLSAIDIVQ_4 | RCC_DCKCFGR_PLLSAIDIVQ_3 | RCC_DCKCFGR_PLLSAIDIVQ_2 | RCC_DCKCFGR_PLLSAIDIVQ_0) /*!< PLLSAI division factor for PLLSAIDIVQ output by 30 */ +#define LL_RCC_PLLSAIDIVQ_DIV_31 (RCC_DCKCFGR_PLLSAIDIVQ_4 | RCC_DCKCFGR_PLLSAIDIVQ_3 | RCC_DCKCFGR_PLLSAIDIVQ_2 | RCC_DCKCFGR_PLLSAIDIVQ_1) /*!< PLLSAI division factor for PLLSAIDIVQ output by 31 */ +#define LL_RCC_PLLSAIDIVQ_DIV_32 (RCC_DCKCFGR_PLLSAIDIVQ_4 | RCC_DCKCFGR_PLLSAIDIVQ_3 | RCC_DCKCFGR_PLLSAIDIVQ_2 | RCC_DCKCFGR_PLLSAIDIVQ_1 | RCC_DCKCFGR_PLLSAIDIVQ_0) /*!< PLLSAI division factor for PLLSAIDIVQ output by 32 */ +/** + * @} + */ +#endif /* RCC_DCKCFGR_PLLSAIDIVQ */ + +#if defined(RCC_PLLSAICFGR_PLLSAIR) +/** @defgroup RCC_LL_EC_PLLSAIR PLLSAIR division factor (PLLSAIR) + * @{ + */ +#define LL_RCC_PLLSAIR_DIV_2 RCC_PLLSAICFGR_PLLSAIR_1 /*!< PLLSAI division factor for PLLSAIR output by 2 */ +#define LL_RCC_PLLSAIR_DIV_3 (RCC_PLLSAICFGR_PLLSAIR_1 | RCC_PLLSAICFGR_PLLSAIR_0) /*!< PLLSAI division factor for PLLSAIR output by 3 */ +#define LL_RCC_PLLSAIR_DIV_4 RCC_PLLSAICFGR_PLLSAIR_2 /*!< PLLSAI division factor for PLLSAIR output by 4 */ +#define LL_RCC_PLLSAIR_DIV_5 (RCC_PLLSAICFGR_PLLSAIR_2 | RCC_PLLSAICFGR_PLLSAIR_0) /*!< PLLSAI division factor for PLLSAIR output by 5 */ +#define LL_RCC_PLLSAIR_DIV_6 (RCC_PLLSAICFGR_PLLSAIR_2 | RCC_PLLSAICFGR_PLLSAIR_1) /*!< PLLSAI division factor for PLLSAIR output by 6 */ +#define LL_RCC_PLLSAIR_DIV_7 (RCC_PLLSAICFGR_PLLSAIR_2 | RCC_PLLSAICFGR_PLLSAIR_1 | RCC_PLLSAICFGR_PLLSAIR_0) /*!< PLLSAI division factor for PLLSAIR output by 7 */ +/** + * @} + */ +#endif /* RCC_PLLSAICFGR_PLLSAIR */ + +#if defined(RCC_DCKCFGR_PLLSAIDIVR) +/** @defgroup RCC_LL_EC_PLLSAIDIVR PLLSAIDIVR division factor (PLLSAIDIVR) + * @{ + */ +#define LL_RCC_PLLSAIDIVR_DIV_2 0x00000000U /*!< PLLSAI division factor for PLLSAIDIVR output by 2 */ +#define LL_RCC_PLLSAIDIVR_DIV_4 RCC_DCKCFGR_PLLSAIDIVR_0 /*!< PLLSAI division factor for PLLSAIDIVR output by 4 */ +#define LL_RCC_PLLSAIDIVR_DIV_8 RCC_DCKCFGR_PLLSAIDIVR_1 /*!< PLLSAI division factor for PLLSAIDIVR output by 8 */ +#define LL_RCC_PLLSAIDIVR_DIV_16 (RCC_DCKCFGR_PLLSAIDIVR_1 | RCC_DCKCFGR_PLLSAIDIVR_0) /*!< PLLSAI division factor for PLLSAIDIVR output by 16 */ +/** + * @} + */ +#endif /* RCC_DCKCFGR_PLLSAIDIVR */ + +#if defined(RCC_PLLSAICFGR_PLLSAIP) +/** @defgroup RCC_LL_EC_PLLSAIP PLLSAIP division factor (PLLSAIP) + * @{ + */ +#define LL_RCC_PLLSAIP_DIV_2 0x00000000U /*!< PLLSAI division factor for PLLSAIP output by 2 */ +#define LL_RCC_PLLSAIP_DIV_4 RCC_PLLSAICFGR_PLLSAIP_0 /*!< PLLSAI division factor for PLLSAIP output by 4 */ +#define LL_RCC_PLLSAIP_DIV_6 RCC_PLLSAICFGR_PLLSAIP_1 /*!< PLLSAI division factor for PLLSAIP output by 6 */ +#define LL_RCC_PLLSAIP_DIV_8 (RCC_PLLSAICFGR_PLLSAIP_1 | RCC_PLLSAICFGR_PLLSAIP_0) /*!< PLLSAI division factor for PLLSAIP output by 8 */ +/** + * @} + */ +#endif /* RCC_PLLSAICFGR_PLLSAIP */ +#endif /* RCC_PLLSAI_SUPPORT */ +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup RCC_LL_Exported_Macros RCC Exported Macros + * @{ + */ + +/** @defgroup RCC_LL_EM_WRITE_READ Common Write and read registers Macros + * @{ + */ + +/** + * @brief Write a value in RCC register + * @param __REG__ Register to be written + * @param __VALUE__ Value to be written in the register + * @retval None + */ +#define LL_RCC_WriteReg(__REG__, __VALUE__) WRITE_REG(RCC->__REG__, (__VALUE__)) + +/** + * @brief Read a value in RCC register + * @param __REG__ Register to be read + * @retval Register value + */ +#define LL_RCC_ReadReg(__REG__) READ_REG(RCC->__REG__) +/** + * @} + */ + +/** @defgroup RCC_LL_EM_CALC_FREQ Calculate frequencies + * @{ + */ + +/** + * @brief Helper macro to calculate the PLLCLK frequency on system domain + * @note ex: @ref __LL_RCC_CALC_PLLCLK_FREQ (HSE_VALUE,@ref LL_RCC_PLL_GetDivider (), + * @ref LL_RCC_PLL_GetN (), @ref LL_RCC_PLL_GetP ()); + * @param __INPUTFREQ__ PLL Input frequency (based on HSE/HSI) + * @param __PLLM__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLM_DIV_2 + * @arg @ref LL_RCC_PLLM_DIV_3 + * @arg @ref LL_RCC_PLLM_DIV_4 + * @arg @ref LL_RCC_PLLM_DIV_5 + * @arg @ref LL_RCC_PLLM_DIV_6 + * @arg @ref LL_RCC_PLLM_DIV_7 + * @arg @ref LL_RCC_PLLM_DIV_8 + * @arg @ref LL_RCC_PLLM_DIV_9 + * @arg @ref LL_RCC_PLLM_DIV_10 + * @arg @ref LL_RCC_PLLM_DIV_11 + * @arg @ref LL_RCC_PLLM_DIV_12 + * @arg @ref LL_RCC_PLLM_DIV_13 + * @arg @ref LL_RCC_PLLM_DIV_14 + * @arg @ref LL_RCC_PLLM_DIV_15 + * @arg @ref LL_RCC_PLLM_DIV_16 + * @arg @ref LL_RCC_PLLM_DIV_17 + * @arg @ref LL_RCC_PLLM_DIV_18 + * @arg @ref LL_RCC_PLLM_DIV_19 + * @arg @ref LL_RCC_PLLM_DIV_20 + * @arg @ref LL_RCC_PLLM_DIV_21 + * @arg @ref LL_RCC_PLLM_DIV_22 + * @arg @ref LL_RCC_PLLM_DIV_23 + * @arg @ref LL_RCC_PLLM_DIV_24 + * @arg @ref LL_RCC_PLLM_DIV_25 + * @arg @ref LL_RCC_PLLM_DIV_26 + * @arg @ref LL_RCC_PLLM_DIV_27 + * @arg @ref LL_RCC_PLLM_DIV_28 + * @arg @ref LL_RCC_PLLM_DIV_29 + * @arg @ref LL_RCC_PLLM_DIV_30 + * @arg @ref LL_RCC_PLLM_DIV_31 + * @arg @ref LL_RCC_PLLM_DIV_32 + * @arg @ref LL_RCC_PLLM_DIV_33 + * @arg @ref LL_RCC_PLLM_DIV_34 + * @arg @ref LL_RCC_PLLM_DIV_35 + * @arg @ref LL_RCC_PLLM_DIV_36 + * @arg @ref LL_RCC_PLLM_DIV_37 + * @arg @ref LL_RCC_PLLM_DIV_38 + * @arg @ref LL_RCC_PLLM_DIV_39 + * @arg @ref LL_RCC_PLLM_DIV_40 + * @arg @ref LL_RCC_PLLM_DIV_41 + * @arg @ref LL_RCC_PLLM_DIV_42 + * @arg @ref LL_RCC_PLLM_DIV_43 + * @arg @ref LL_RCC_PLLM_DIV_44 + * @arg @ref LL_RCC_PLLM_DIV_45 + * @arg @ref LL_RCC_PLLM_DIV_46 + * @arg @ref LL_RCC_PLLM_DIV_47 + * @arg @ref LL_RCC_PLLM_DIV_48 + * @arg @ref LL_RCC_PLLM_DIV_49 + * @arg @ref LL_RCC_PLLM_DIV_50 + * @arg @ref LL_RCC_PLLM_DIV_51 + * @arg @ref LL_RCC_PLLM_DIV_52 + * @arg @ref LL_RCC_PLLM_DIV_53 + * @arg @ref LL_RCC_PLLM_DIV_54 + * @arg @ref LL_RCC_PLLM_DIV_55 + * @arg @ref LL_RCC_PLLM_DIV_56 + * @arg @ref LL_RCC_PLLM_DIV_57 + * @arg @ref LL_RCC_PLLM_DIV_58 + * @arg @ref LL_RCC_PLLM_DIV_59 + * @arg @ref LL_RCC_PLLM_DIV_60 + * @arg @ref LL_RCC_PLLM_DIV_61 + * @arg @ref LL_RCC_PLLM_DIV_62 + * @arg @ref LL_RCC_PLLM_DIV_63 + * @param __PLLN__ Between 50/192(*) and 432 + * + * (*) value not defined in all devices. + * @param __PLLP__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLP_DIV_2 + * @arg @ref LL_RCC_PLLP_DIV_4 + * @arg @ref LL_RCC_PLLP_DIV_6 + * @arg @ref LL_RCC_PLLP_DIV_8 + * @retval PLL clock frequency (in Hz) + */ +#define __LL_RCC_CALC_PLLCLK_FREQ(__INPUTFREQ__, __PLLM__, __PLLN__, __PLLP__) ((__INPUTFREQ__) / (__PLLM__) * (__PLLN__) / \ + ((((__PLLP__) >> RCC_PLLCFGR_PLLP_Pos ) + 1U) * 2U)) + +#if defined(RCC_PLLR_SYSCLK_SUPPORT) +/** + * @brief Helper macro to calculate the PLLRCLK frequency on system domain + * @note ex: @ref __LL_RCC_CALC_PLLRCLK_FREQ (HSE_VALUE,@ref LL_RCC_PLL_GetDivider (), + * @ref LL_RCC_PLL_GetN (), @ref LL_RCC_PLL_GetR ()); + * @param __INPUTFREQ__ PLL Input frequency (based on HSE/HSI) + * @param __PLLM__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLM_DIV_2 + * @arg @ref LL_RCC_PLLM_DIV_3 + * @arg @ref LL_RCC_PLLM_DIV_4 + * @arg @ref LL_RCC_PLLM_DIV_5 + * @arg @ref LL_RCC_PLLM_DIV_6 + * @arg @ref LL_RCC_PLLM_DIV_7 + * @arg @ref LL_RCC_PLLM_DIV_8 + * @arg @ref LL_RCC_PLLM_DIV_9 + * @arg @ref LL_RCC_PLLM_DIV_10 + * @arg @ref LL_RCC_PLLM_DIV_11 + * @arg @ref LL_RCC_PLLM_DIV_12 + * @arg @ref LL_RCC_PLLM_DIV_13 + * @arg @ref LL_RCC_PLLM_DIV_14 + * @arg @ref LL_RCC_PLLM_DIV_15 + * @arg @ref LL_RCC_PLLM_DIV_16 + * @arg @ref LL_RCC_PLLM_DIV_17 + * @arg @ref LL_RCC_PLLM_DIV_18 + * @arg @ref LL_RCC_PLLM_DIV_19 + * @arg @ref LL_RCC_PLLM_DIV_20 + * @arg @ref LL_RCC_PLLM_DIV_21 + * @arg @ref LL_RCC_PLLM_DIV_22 + * @arg @ref LL_RCC_PLLM_DIV_23 + * @arg @ref LL_RCC_PLLM_DIV_24 + * @arg @ref LL_RCC_PLLM_DIV_25 + * @arg @ref LL_RCC_PLLM_DIV_26 + * @arg @ref LL_RCC_PLLM_DIV_27 + * @arg @ref LL_RCC_PLLM_DIV_28 + * @arg @ref LL_RCC_PLLM_DIV_29 + * @arg @ref LL_RCC_PLLM_DIV_30 + * @arg @ref LL_RCC_PLLM_DIV_31 + * @arg @ref LL_RCC_PLLM_DIV_32 + * @arg @ref LL_RCC_PLLM_DIV_33 + * @arg @ref LL_RCC_PLLM_DIV_34 + * @arg @ref LL_RCC_PLLM_DIV_35 + * @arg @ref LL_RCC_PLLM_DIV_36 + * @arg @ref LL_RCC_PLLM_DIV_37 + * @arg @ref LL_RCC_PLLM_DIV_38 + * @arg @ref LL_RCC_PLLM_DIV_39 + * @arg @ref LL_RCC_PLLM_DIV_40 + * @arg @ref LL_RCC_PLLM_DIV_41 + * @arg @ref LL_RCC_PLLM_DIV_42 + * @arg @ref LL_RCC_PLLM_DIV_43 + * @arg @ref LL_RCC_PLLM_DIV_44 + * @arg @ref LL_RCC_PLLM_DIV_45 + * @arg @ref LL_RCC_PLLM_DIV_46 + * @arg @ref LL_RCC_PLLM_DIV_47 + * @arg @ref LL_RCC_PLLM_DIV_48 + * @arg @ref LL_RCC_PLLM_DIV_49 + * @arg @ref LL_RCC_PLLM_DIV_50 + * @arg @ref LL_RCC_PLLM_DIV_51 + * @arg @ref LL_RCC_PLLM_DIV_52 + * @arg @ref LL_RCC_PLLM_DIV_53 + * @arg @ref LL_RCC_PLLM_DIV_54 + * @arg @ref LL_RCC_PLLM_DIV_55 + * @arg @ref LL_RCC_PLLM_DIV_56 + * @arg @ref LL_RCC_PLLM_DIV_57 + * @arg @ref LL_RCC_PLLM_DIV_58 + * @arg @ref LL_RCC_PLLM_DIV_59 + * @arg @ref LL_RCC_PLLM_DIV_60 + * @arg @ref LL_RCC_PLLM_DIV_61 + * @arg @ref LL_RCC_PLLM_DIV_62 + * @arg @ref LL_RCC_PLLM_DIV_63 + * @param __PLLN__ Between 50 and 432 + * @param __PLLR__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLR_DIV_2 + * @arg @ref LL_RCC_PLLR_DIV_3 + * @arg @ref LL_RCC_PLLR_DIV_4 + * @arg @ref LL_RCC_PLLR_DIV_5 + * @arg @ref LL_RCC_PLLR_DIV_6 + * @arg @ref LL_RCC_PLLR_DIV_7 + * @retval PLL clock frequency (in Hz) + */ +#define __LL_RCC_CALC_PLLRCLK_FREQ(__INPUTFREQ__, __PLLM__, __PLLN__, __PLLR__) ((__INPUTFREQ__) / (__PLLM__) * (__PLLN__) / \ + ((__PLLR__) >> RCC_PLLCFGR_PLLR_Pos )) + +#endif /* RCC_PLLR_SYSCLK_SUPPORT */ + +/** + * @brief Helper macro to calculate the PLLCLK frequency used on 48M domain + * @note ex: @ref __LL_RCC_CALC_PLLCLK_48M_FREQ (HSE_VALUE,@ref LL_RCC_PLL_GetDivider (), + * @ref LL_RCC_PLL_GetN (), @ref LL_RCC_PLL_GetQ ()); + * @param __INPUTFREQ__ PLL Input frequency (based on HSE/HSI) + * @param __PLLM__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLM_DIV_2 + * @arg @ref LL_RCC_PLLM_DIV_3 + * @arg @ref LL_RCC_PLLM_DIV_4 + * @arg @ref LL_RCC_PLLM_DIV_5 + * @arg @ref LL_RCC_PLLM_DIV_6 + * @arg @ref LL_RCC_PLLM_DIV_7 + * @arg @ref LL_RCC_PLLM_DIV_8 + * @arg @ref LL_RCC_PLLM_DIV_9 + * @arg @ref LL_RCC_PLLM_DIV_10 + * @arg @ref LL_RCC_PLLM_DIV_11 + * @arg @ref LL_RCC_PLLM_DIV_12 + * @arg @ref LL_RCC_PLLM_DIV_13 + * @arg @ref LL_RCC_PLLM_DIV_14 + * @arg @ref LL_RCC_PLLM_DIV_15 + * @arg @ref LL_RCC_PLLM_DIV_16 + * @arg @ref LL_RCC_PLLM_DIV_17 + * @arg @ref LL_RCC_PLLM_DIV_18 + * @arg @ref LL_RCC_PLLM_DIV_19 + * @arg @ref LL_RCC_PLLM_DIV_20 + * @arg @ref LL_RCC_PLLM_DIV_21 + * @arg @ref LL_RCC_PLLM_DIV_22 + * @arg @ref LL_RCC_PLLM_DIV_23 + * @arg @ref LL_RCC_PLLM_DIV_24 + * @arg @ref LL_RCC_PLLM_DIV_25 + * @arg @ref LL_RCC_PLLM_DIV_26 + * @arg @ref LL_RCC_PLLM_DIV_27 + * @arg @ref LL_RCC_PLLM_DIV_28 + * @arg @ref LL_RCC_PLLM_DIV_29 + * @arg @ref LL_RCC_PLLM_DIV_30 + * @arg @ref LL_RCC_PLLM_DIV_31 + * @arg @ref LL_RCC_PLLM_DIV_32 + * @arg @ref LL_RCC_PLLM_DIV_33 + * @arg @ref LL_RCC_PLLM_DIV_34 + * @arg @ref LL_RCC_PLLM_DIV_35 + * @arg @ref LL_RCC_PLLM_DIV_36 + * @arg @ref LL_RCC_PLLM_DIV_37 + * @arg @ref LL_RCC_PLLM_DIV_38 + * @arg @ref LL_RCC_PLLM_DIV_39 + * @arg @ref LL_RCC_PLLM_DIV_40 + * @arg @ref LL_RCC_PLLM_DIV_41 + * @arg @ref LL_RCC_PLLM_DIV_42 + * @arg @ref LL_RCC_PLLM_DIV_43 + * @arg @ref LL_RCC_PLLM_DIV_44 + * @arg @ref LL_RCC_PLLM_DIV_45 + * @arg @ref LL_RCC_PLLM_DIV_46 + * @arg @ref LL_RCC_PLLM_DIV_47 + * @arg @ref LL_RCC_PLLM_DIV_48 + * @arg @ref LL_RCC_PLLM_DIV_49 + * @arg @ref LL_RCC_PLLM_DIV_50 + * @arg @ref LL_RCC_PLLM_DIV_51 + * @arg @ref LL_RCC_PLLM_DIV_52 + * @arg @ref LL_RCC_PLLM_DIV_53 + * @arg @ref LL_RCC_PLLM_DIV_54 + * @arg @ref LL_RCC_PLLM_DIV_55 + * @arg @ref LL_RCC_PLLM_DIV_56 + * @arg @ref LL_RCC_PLLM_DIV_57 + * @arg @ref LL_RCC_PLLM_DIV_58 + * @arg @ref LL_RCC_PLLM_DIV_59 + * @arg @ref LL_RCC_PLLM_DIV_60 + * @arg @ref LL_RCC_PLLM_DIV_61 + * @arg @ref LL_RCC_PLLM_DIV_62 + * @arg @ref LL_RCC_PLLM_DIV_63 + * @param __PLLN__ Between 50/192(*) and 432 + * + * (*) value not defined in all devices. + * @param __PLLQ__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLQ_DIV_2 + * @arg @ref LL_RCC_PLLQ_DIV_3 + * @arg @ref LL_RCC_PLLQ_DIV_4 + * @arg @ref LL_RCC_PLLQ_DIV_5 + * @arg @ref LL_RCC_PLLQ_DIV_6 + * @arg @ref LL_RCC_PLLQ_DIV_7 + * @arg @ref LL_RCC_PLLQ_DIV_8 + * @arg @ref LL_RCC_PLLQ_DIV_9 + * @arg @ref LL_RCC_PLLQ_DIV_10 + * @arg @ref LL_RCC_PLLQ_DIV_11 + * @arg @ref LL_RCC_PLLQ_DIV_12 + * @arg @ref LL_RCC_PLLQ_DIV_13 + * @arg @ref LL_RCC_PLLQ_DIV_14 + * @arg @ref LL_RCC_PLLQ_DIV_15 + * @retval PLL clock frequency (in Hz) + */ +#define __LL_RCC_CALC_PLLCLK_48M_FREQ(__INPUTFREQ__, __PLLM__, __PLLN__, __PLLQ__) ((__INPUTFREQ__) / (__PLLM__) * (__PLLN__) / \ + ((__PLLQ__) >> RCC_PLLCFGR_PLLQ_Pos )) + +#if defined(DSI) +/** + * @brief Helper macro to calculate the PLLCLK frequency used on DSI + * @note ex: @ref __LL_RCC_CALC_PLLCLK_DSI_FREQ (HSE_VALUE, @ref LL_RCC_PLL_GetDivider (), + * @ref LL_RCC_PLL_GetN (), @ref LL_RCC_PLL_GetR ()); + * @param __INPUTFREQ__ PLL Input frequency (based on HSE/HSI) + * @param __PLLM__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLM_DIV_2 + * @arg @ref LL_RCC_PLLM_DIV_3 + * @arg @ref LL_RCC_PLLM_DIV_4 + * @arg @ref LL_RCC_PLLM_DIV_5 + * @arg @ref LL_RCC_PLLM_DIV_6 + * @arg @ref LL_RCC_PLLM_DIV_7 + * @arg @ref LL_RCC_PLLM_DIV_8 + * @arg @ref LL_RCC_PLLM_DIV_9 + * @arg @ref LL_RCC_PLLM_DIV_10 + * @arg @ref LL_RCC_PLLM_DIV_11 + * @arg @ref LL_RCC_PLLM_DIV_12 + * @arg @ref LL_RCC_PLLM_DIV_13 + * @arg @ref LL_RCC_PLLM_DIV_14 + * @arg @ref LL_RCC_PLLM_DIV_15 + * @arg @ref LL_RCC_PLLM_DIV_16 + * @arg @ref LL_RCC_PLLM_DIV_17 + * @arg @ref LL_RCC_PLLM_DIV_18 + * @arg @ref LL_RCC_PLLM_DIV_19 + * @arg @ref LL_RCC_PLLM_DIV_20 + * @arg @ref LL_RCC_PLLM_DIV_21 + * @arg @ref LL_RCC_PLLM_DIV_22 + * @arg @ref LL_RCC_PLLM_DIV_23 + * @arg @ref LL_RCC_PLLM_DIV_24 + * @arg @ref LL_RCC_PLLM_DIV_25 + * @arg @ref LL_RCC_PLLM_DIV_26 + * @arg @ref LL_RCC_PLLM_DIV_27 + * @arg @ref LL_RCC_PLLM_DIV_28 + * @arg @ref LL_RCC_PLLM_DIV_29 + * @arg @ref LL_RCC_PLLM_DIV_30 + * @arg @ref LL_RCC_PLLM_DIV_31 + * @arg @ref LL_RCC_PLLM_DIV_32 + * @arg @ref LL_RCC_PLLM_DIV_33 + * @arg @ref LL_RCC_PLLM_DIV_34 + * @arg @ref LL_RCC_PLLM_DIV_35 + * @arg @ref LL_RCC_PLLM_DIV_36 + * @arg @ref LL_RCC_PLLM_DIV_37 + * @arg @ref LL_RCC_PLLM_DIV_38 + * @arg @ref LL_RCC_PLLM_DIV_39 + * @arg @ref LL_RCC_PLLM_DIV_40 + * @arg @ref LL_RCC_PLLM_DIV_41 + * @arg @ref LL_RCC_PLLM_DIV_42 + * @arg @ref LL_RCC_PLLM_DIV_43 + * @arg @ref LL_RCC_PLLM_DIV_44 + * @arg @ref LL_RCC_PLLM_DIV_45 + * @arg @ref LL_RCC_PLLM_DIV_46 + * @arg @ref LL_RCC_PLLM_DIV_47 + * @arg @ref LL_RCC_PLLM_DIV_48 + * @arg @ref LL_RCC_PLLM_DIV_49 + * @arg @ref LL_RCC_PLLM_DIV_50 + * @arg @ref LL_RCC_PLLM_DIV_51 + * @arg @ref LL_RCC_PLLM_DIV_52 + * @arg @ref LL_RCC_PLLM_DIV_53 + * @arg @ref LL_RCC_PLLM_DIV_54 + * @arg @ref LL_RCC_PLLM_DIV_55 + * @arg @ref LL_RCC_PLLM_DIV_56 + * @arg @ref LL_RCC_PLLM_DIV_57 + * @arg @ref LL_RCC_PLLM_DIV_58 + * @arg @ref LL_RCC_PLLM_DIV_59 + * @arg @ref LL_RCC_PLLM_DIV_60 + * @arg @ref LL_RCC_PLLM_DIV_61 + * @arg @ref LL_RCC_PLLM_DIV_62 + * @arg @ref LL_RCC_PLLM_DIV_63 + * @param __PLLN__ Between 50 and 432 + * @param __PLLR__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLR_DIV_2 + * @arg @ref LL_RCC_PLLR_DIV_3 + * @arg @ref LL_RCC_PLLR_DIV_4 + * @arg @ref LL_RCC_PLLR_DIV_5 + * @arg @ref LL_RCC_PLLR_DIV_6 + * @arg @ref LL_RCC_PLLR_DIV_7 + * @retval PLL clock frequency (in Hz) + */ +#define __LL_RCC_CALC_PLLCLK_DSI_FREQ(__INPUTFREQ__, __PLLM__, __PLLN__, __PLLR__) ((__INPUTFREQ__) / (__PLLM__) * (__PLLN__) / \ + ((__PLLR__) >> RCC_PLLCFGR_PLLR_Pos )) +#endif /* DSI */ + +#if defined(RCC_PLLR_I2S_CLKSOURCE_SUPPORT) +/** + * @brief Helper macro to calculate the PLLCLK frequency used on I2S + * @note ex: @ref __LL_RCC_CALC_PLLCLK_I2S_FREQ (HSE_VALUE, @ref LL_RCC_PLL_GetDivider (), + * @ref LL_RCC_PLL_GetN (), @ref LL_RCC_PLL_GetR ()); + * @param __INPUTFREQ__ PLL Input frequency (based on HSE/HSI) + * @param __PLLM__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLM_DIV_2 + * @arg @ref LL_RCC_PLLM_DIV_3 + * @arg @ref LL_RCC_PLLM_DIV_4 + * @arg @ref LL_RCC_PLLM_DIV_5 + * @arg @ref LL_RCC_PLLM_DIV_6 + * @arg @ref LL_RCC_PLLM_DIV_7 + * @arg @ref LL_RCC_PLLM_DIV_8 + * @arg @ref LL_RCC_PLLM_DIV_9 + * @arg @ref LL_RCC_PLLM_DIV_10 + * @arg @ref LL_RCC_PLLM_DIV_11 + * @arg @ref LL_RCC_PLLM_DIV_12 + * @arg @ref LL_RCC_PLLM_DIV_13 + * @arg @ref LL_RCC_PLLM_DIV_14 + * @arg @ref LL_RCC_PLLM_DIV_15 + * @arg @ref LL_RCC_PLLM_DIV_16 + * @arg @ref LL_RCC_PLLM_DIV_17 + * @arg @ref LL_RCC_PLLM_DIV_18 + * @arg @ref LL_RCC_PLLM_DIV_19 + * @arg @ref LL_RCC_PLLM_DIV_20 + * @arg @ref LL_RCC_PLLM_DIV_21 + * @arg @ref LL_RCC_PLLM_DIV_22 + * @arg @ref LL_RCC_PLLM_DIV_23 + * @arg @ref LL_RCC_PLLM_DIV_24 + * @arg @ref LL_RCC_PLLM_DIV_25 + * @arg @ref LL_RCC_PLLM_DIV_26 + * @arg @ref LL_RCC_PLLM_DIV_27 + * @arg @ref LL_RCC_PLLM_DIV_28 + * @arg @ref LL_RCC_PLLM_DIV_29 + * @arg @ref LL_RCC_PLLM_DIV_30 + * @arg @ref LL_RCC_PLLM_DIV_31 + * @arg @ref LL_RCC_PLLM_DIV_32 + * @arg @ref LL_RCC_PLLM_DIV_33 + * @arg @ref LL_RCC_PLLM_DIV_34 + * @arg @ref LL_RCC_PLLM_DIV_35 + * @arg @ref LL_RCC_PLLM_DIV_36 + * @arg @ref LL_RCC_PLLM_DIV_37 + * @arg @ref LL_RCC_PLLM_DIV_38 + * @arg @ref LL_RCC_PLLM_DIV_39 + * @arg @ref LL_RCC_PLLM_DIV_40 + * @arg @ref LL_RCC_PLLM_DIV_41 + * @arg @ref LL_RCC_PLLM_DIV_42 + * @arg @ref LL_RCC_PLLM_DIV_43 + * @arg @ref LL_RCC_PLLM_DIV_44 + * @arg @ref LL_RCC_PLLM_DIV_45 + * @arg @ref LL_RCC_PLLM_DIV_46 + * @arg @ref LL_RCC_PLLM_DIV_47 + * @arg @ref LL_RCC_PLLM_DIV_48 + * @arg @ref LL_RCC_PLLM_DIV_49 + * @arg @ref LL_RCC_PLLM_DIV_50 + * @arg @ref LL_RCC_PLLM_DIV_51 + * @arg @ref LL_RCC_PLLM_DIV_52 + * @arg @ref LL_RCC_PLLM_DIV_53 + * @arg @ref LL_RCC_PLLM_DIV_54 + * @arg @ref LL_RCC_PLLM_DIV_55 + * @arg @ref LL_RCC_PLLM_DIV_56 + * @arg @ref LL_RCC_PLLM_DIV_57 + * @arg @ref LL_RCC_PLLM_DIV_58 + * @arg @ref LL_RCC_PLLM_DIV_59 + * @arg @ref LL_RCC_PLLM_DIV_60 + * @arg @ref LL_RCC_PLLM_DIV_61 + * @arg @ref LL_RCC_PLLM_DIV_62 + * @arg @ref LL_RCC_PLLM_DIV_63 + * @param __PLLN__ Between 50 and 432 + * @param __PLLR__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLR_DIV_2 + * @arg @ref LL_RCC_PLLR_DIV_3 + * @arg @ref LL_RCC_PLLR_DIV_4 + * @arg @ref LL_RCC_PLLR_DIV_5 + * @arg @ref LL_RCC_PLLR_DIV_6 + * @arg @ref LL_RCC_PLLR_DIV_7 + * @retval PLL clock frequency (in Hz) + */ +#define __LL_RCC_CALC_PLLCLK_I2S_FREQ(__INPUTFREQ__, __PLLM__, __PLLN__, __PLLR__) ((__INPUTFREQ__) / (__PLLM__) * (__PLLN__) / \ + ((__PLLR__) >> RCC_PLLCFGR_PLLR_Pos )) +#endif /* RCC_PLLR_I2S_CLKSOURCE_SUPPORT */ + +#if defined(SPDIFRX) +/** + * @brief Helper macro to calculate the PLLCLK frequency used on SPDIFRX + * @note ex: @ref __LL_RCC_CALC_PLLCLK_SPDIFRX_FREQ (HSE_VALUE, @ref LL_RCC_PLL_GetDivider (), + * @ref LL_RCC_PLL_GetN (), @ref LL_RCC_PLL_GetR ()); + * @param __INPUTFREQ__ PLL Input frequency (based on HSE/HSI) + * @param __PLLM__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLM_DIV_2 + * @arg @ref LL_RCC_PLLM_DIV_3 + * @arg @ref LL_RCC_PLLM_DIV_4 + * @arg @ref LL_RCC_PLLM_DIV_5 + * @arg @ref LL_RCC_PLLM_DIV_6 + * @arg @ref LL_RCC_PLLM_DIV_7 + * @arg @ref LL_RCC_PLLM_DIV_8 + * @arg @ref LL_RCC_PLLM_DIV_9 + * @arg @ref LL_RCC_PLLM_DIV_10 + * @arg @ref LL_RCC_PLLM_DIV_11 + * @arg @ref LL_RCC_PLLM_DIV_12 + * @arg @ref LL_RCC_PLLM_DIV_13 + * @arg @ref LL_RCC_PLLM_DIV_14 + * @arg @ref LL_RCC_PLLM_DIV_15 + * @arg @ref LL_RCC_PLLM_DIV_16 + * @arg @ref LL_RCC_PLLM_DIV_17 + * @arg @ref LL_RCC_PLLM_DIV_18 + * @arg @ref LL_RCC_PLLM_DIV_19 + * @arg @ref LL_RCC_PLLM_DIV_20 + * @arg @ref LL_RCC_PLLM_DIV_21 + * @arg @ref LL_RCC_PLLM_DIV_22 + * @arg @ref LL_RCC_PLLM_DIV_23 + * @arg @ref LL_RCC_PLLM_DIV_24 + * @arg @ref LL_RCC_PLLM_DIV_25 + * @arg @ref LL_RCC_PLLM_DIV_26 + * @arg @ref LL_RCC_PLLM_DIV_27 + * @arg @ref LL_RCC_PLLM_DIV_28 + * @arg @ref LL_RCC_PLLM_DIV_29 + * @arg @ref LL_RCC_PLLM_DIV_30 + * @arg @ref LL_RCC_PLLM_DIV_31 + * @arg @ref LL_RCC_PLLM_DIV_32 + * @arg @ref LL_RCC_PLLM_DIV_33 + * @arg @ref LL_RCC_PLLM_DIV_34 + * @arg @ref LL_RCC_PLLM_DIV_35 + * @arg @ref LL_RCC_PLLM_DIV_36 + * @arg @ref LL_RCC_PLLM_DIV_37 + * @arg @ref LL_RCC_PLLM_DIV_38 + * @arg @ref LL_RCC_PLLM_DIV_39 + * @arg @ref LL_RCC_PLLM_DIV_40 + * @arg @ref LL_RCC_PLLM_DIV_41 + * @arg @ref LL_RCC_PLLM_DIV_42 + * @arg @ref LL_RCC_PLLM_DIV_43 + * @arg @ref LL_RCC_PLLM_DIV_44 + * @arg @ref LL_RCC_PLLM_DIV_45 + * @arg @ref LL_RCC_PLLM_DIV_46 + * @arg @ref LL_RCC_PLLM_DIV_47 + * @arg @ref LL_RCC_PLLM_DIV_48 + * @arg @ref LL_RCC_PLLM_DIV_49 + * @arg @ref LL_RCC_PLLM_DIV_50 + * @arg @ref LL_RCC_PLLM_DIV_51 + * @arg @ref LL_RCC_PLLM_DIV_52 + * @arg @ref LL_RCC_PLLM_DIV_53 + * @arg @ref LL_RCC_PLLM_DIV_54 + * @arg @ref LL_RCC_PLLM_DIV_55 + * @arg @ref LL_RCC_PLLM_DIV_56 + * @arg @ref LL_RCC_PLLM_DIV_57 + * @arg @ref LL_RCC_PLLM_DIV_58 + * @arg @ref LL_RCC_PLLM_DIV_59 + * @arg @ref LL_RCC_PLLM_DIV_60 + * @arg @ref LL_RCC_PLLM_DIV_61 + * @arg @ref LL_RCC_PLLM_DIV_62 + * @arg @ref LL_RCC_PLLM_DIV_63 + * @param __PLLN__ Between 50 and 432 + * @param __PLLR__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLR_DIV_2 + * @arg @ref LL_RCC_PLLR_DIV_3 + * @arg @ref LL_RCC_PLLR_DIV_4 + * @arg @ref LL_RCC_PLLR_DIV_5 + * @arg @ref LL_RCC_PLLR_DIV_6 + * @arg @ref LL_RCC_PLLR_DIV_7 + * @retval PLL clock frequency (in Hz) + */ +#define __LL_RCC_CALC_PLLCLK_SPDIFRX_FREQ(__INPUTFREQ__, __PLLM__, __PLLN__, __PLLR__) ((__INPUTFREQ__) / (__PLLM__) * (__PLLN__) / \ + ((__PLLR__) >> RCC_PLLCFGR_PLLR_Pos )) +#endif /* SPDIFRX */ + +#if defined(RCC_PLLCFGR_PLLR) +#if defined(SAI1) +/** + * @brief Helper macro to calculate the PLLCLK frequency used on SAI + * @note ex: @ref __LL_RCC_CALC_PLLCLK_SAI_FREQ (HSE_VALUE, @ref LL_RCC_PLL_GetDivider (), + * @ref LL_RCC_PLL_GetN (), @ref LL_RCC_PLL_GetR (), @ref LL_RCC_PLL_GetDIVR ()); + * @param __INPUTFREQ__ PLL Input frequency (based on HSE/HSI) + * @param __PLLM__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLM_DIV_2 + * @arg @ref LL_RCC_PLLM_DIV_3 + * @arg @ref LL_RCC_PLLM_DIV_4 + * @arg @ref LL_RCC_PLLM_DIV_5 + * @arg @ref LL_RCC_PLLM_DIV_6 + * @arg @ref LL_RCC_PLLM_DIV_7 + * @arg @ref LL_RCC_PLLM_DIV_8 + * @arg @ref LL_RCC_PLLM_DIV_9 + * @arg @ref LL_RCC_PLLM_DIV_10 + * @arg @ref LL_RCC_PLLM_DIV_11 + * @arg @ref LL_RCC_PLLM_DIV_12 + * @arg @ref LL_RCC_PLLM_DIV_13 + * @arg @ref LL_RCC_PLLM_DIV_14 + * @arg @ref LL_RCC_PLLM_DIV_15 + * @arg @ref LL_RCC_PLLM_DIV_16 + * @arg @ref LL_RCC_PLLM_DIV_17 + * @arg @ref LL_RCC_PLLM_DIV_18 + * @arg @ref LL_RCC_PLLM_DIV_19 + * @arg @ref LL_RCC_PLLM_DIV_20 + * @arg @ref LL_RCC_PLLM_DIV_21 + * @arg @ref LL_RCC_PLLM_DIV_22 + * @arg @ref LL_RCC_PLLM_DIV_23 + * @arg @ref LL_RCC_PLLM_DIV_24 + * @arg @ref LL_RCC_PLLM_DIV_25 + * @arg @ref LL_RCC_PLLM_DIV_26 + * @arg @ref LL_RCC_PLLM_DIV_27 + * @arg @ref LL_RCC_PLLM_DIV_28 + * @arg @ref LL_RCC_PLLM_DIV_29 + * @arg @ref LL_RCC_PLLM_DIV_30 + * @arg @ref LL_RCC_PLLM_DIV_31 + * @arg @ref LL_RCC_PLLM_DIV_32 + * @arg @ref LL_RCC_PLLM_DIV_33 + * @arg @ref LL_RCC_PLLM_DIV_34 + * @arg @ref LL_RCC_PLLM_DIV_35 + * @arg @ref LL_RCC_PLLM_DIV_36 + * @arg @ref LL_RCC_PLLM_DIV_37 + * @arg @ref LL_RCC_PLLM_DIV_38 + * @arg @ref LL_RCC_PLLM_DIV_39 + * @arg @ref LL_RCC_PLLM_DIV_40 + * @arg @ref LL_RCC_PLLM_DIV_41 + * @arg @ref LL_RCC_PLLM_DIV_42 + * @arg @ref LL_RCC_PLLM_DIV_43 + * @arg @ref LL_RCC_PLLM_DIV_44 + * @arg @ref LL_RCC_PLLM_DIV_45 + * @arg @ref LL_RCC_PLLM_DIV_46 + * @arg @ref LL_RCC_PLLM_DIV_47 + * @arg @ref LL_RCC_PLLM_DIV_48 + * @arg @ref LL_RCC_PLLM_DIV_49 + * @arg @ref LL_RCC_PLLM_DIV_50 + * @arg @ref LL_RCC_PLLM_DIV_51 + * @arg @ref LL_RCC_PLLM_DIV_52 + * @arg @ref LL_RCC_PLLM_DIV_53 + * @arg @ref LL_RCC_PLLM_DIV_54 + * @arg @ref LL_RCC_PLLM_DIV_55 + * @arg @ref LL_RCC_PLLM_DIV_56 + * @arg @ref LL_RCC_PLLM_DIV_57 + * @arg @ref LL_RCC_PLLM_DIV_58 + * @arg @ref LL_RCC_PLLM_DIV_59 + * @arg @ref LL_RCC_PLLM_DIV_60 + * @arg @ref LL_RCC_PLLM_DIV_61 + * @arg @ref LL_RCC_PLLM_DIV_62 + * @arg @ref LL_RCC_PLLM_DIV_63 + * @param __PLLN__ Between 50 and 432 + * @param __PLLR__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLR_DIV_2 + * @arg @ref LL_RCC_PLLR_DIV_3 + * @arg @ref LL_RCC_PLLR_DIV_4 + * @arg @ref LL_RCC_PLLR_DIV_5 + * @arg @ref LL_RCC_PLLR_DIV_6 + * @arg @ref LL_RCC_PLLR_DIV_7 + * @param __PLLDIVR__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLDIVR_DIV_1 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_2 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_3 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_4 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_5 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_6 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_7 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_8 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_9 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_10 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_11 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_12 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_13 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_14 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_15 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_16 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_17 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_18 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_19 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_20 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_21 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_22 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_23 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_24 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_25 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_26 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_27 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_28 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_29 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_30 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_31 (*) + * + * (*) value not defined in all devices. + * @retval PLL clock frequency (in Hz) + */ +#if defined(RCC_DCKCFGR_PLLDIVR) +#define __LL_RCC_CALC_PLLCLK_SAI_FREQ(__INPUTFREQ__, __PLLM__, __PLLN__, __PLLR__, __PLLDIVR__) (((__INPUTFREQ__) / (__PLLM__) * (__PLLN__) / \ + ((__PLLR__) >> RCC_PLLCFGR_PLLR_Pos )) / ((__PLLDIVR__) >> RCC_DCKCFGR_PLLDIVR_Pos )) +#else +#define __LL_RCC_CALC_PLLCLK_SAI_FREQ(__INPUTFREQ__, __PLLM__, __PLLN__, __PLLR__) ((__INPUTFREQ__) / (__PLLM__) * (__PLLN__) / \ + ((__PLLR__) >> RCC_PLLCFGR_PLLR_Pos )) +#endif /* RCC_DCKCFGR_PLLDIVR */ +#endif /* SAI1 */ +#endif /* RCC_PLLCFGR_PLLR */ + +#if defined(RCC_PLLSAI_SUPPORT) +/** + * @brief Helper macro to calculate the PLLSAI frequency used for SAI domain + * @note ex: @ref __LL_RCC_CALC_PLLSAI_SAI_FREQ (HSE_VALUE,@ref LL_RCC_PLLSAI_GetDivider (), + * @ref LL_RCC_PLLSAI_GetN (), @ref LL_RCC_PLLSAI_GetQ (), @ref LL_RCC_PLLSAI_GetDIVQ ()); + * @param __INPUTFREQ__ PLL Input frequency (based on HSE/HSI) + * @param __PLLM__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSAIM_DIV_2 + * @arg @ref LL_RCC_PLLSAIM_DIV_3 + * @arg @ref LL_RCC_PLLSAIM_DIV_4 + * @arg @ref LL_RCC_PLLSAIM_DIV_5 + * @arg @ref LL_RCC_PLLSAIM_DIV_6 + * @arg @ref LL_RCC_PLLSAIM_DIV_7 + * @arg @ref LL_RCC_PLLSAIM_DIV_8 + * @arg @ref LL_RCC_PLLSAIM_DIV_9 + * @arg @ref LL_RCC_PLLSAIM_DIV_10 + * @arg @ref LL_RCC_PLLSAIM_DIV_11 + * @arg @ref LL_RCC_PLLSAIM_DIV_12 + * @arg @ref LL_RCC_PLLSAIM_DIV_13 + * @arg @ref LL_RCC_PLLSAIM_DIV_14 + * @arg @ref LL_RCC_PLLSAIM_DIV_15 + * @arg @ref LL_RCC_PLLSAIM_DIV_16 + * @arg @ref LL_RCC_PLLSAIM_DIV_17 + * @arg @ref LL_RCC_PLLSAIM_DIV_18 + * @arg @ref LL_RCC_PLLSAIM_DIV_19 + * @arg @ref LL_RCC_PLLSAIM_DIV_20 + * @arg @ref LL_RCC_PLLSAIM_DIV_21 + * @arg @ref LL_RCC_PLLSAIM_DIV_22 + * @arg @ref LL_RCC_PLLSAIM_DIV_23 + * @arg @ref LL_RCC_PLLSAIM_DIV_24 + * @arg @ref LL_RCC_PLLSAIM_DIV_25 + * @arg @ref LL_RCC_PLLSAIM_DIV_26 + * @arg @ref LL_RCC_PLLSAIM_DIV_27 + * @arg @ref LL_RCC_PLLSAIM_DIV_28 + * @arg @ref LL_RCC_PLLSAIM_DIV_29 + * @arg @ref LL_RCC_PLLSAIM_DIV_30 + * @arg @ref LL_RCC_PLLSAIM_DIV_31 + * @arg @ref LL_RCC_PLLSAIM_DIV_32 + * @arg @ref LL_RCC_PLLSAIM_DIV_33 + * @arg @ref LL_RCC_PLLSAIM_DIV_34 + * @arg @ref LL_RCC_PLLSAIM_DIV_35 + * @arg @ref LL_RCC_PLLSAIM_DIV_36 + * @arg @ref LL_RCC_PLLSAIM_DIV_37 + * @arg @ref LL_RCC_PLLSAIM_DIV_38 + * @arg @ref LL_RCC_PLLSAIM_DIV_39 + * @arg @ref LL_RCC_PLLSAIM_DIV_40 + * @arg @ref LL_RCC_PLLSAIM_DIV_41 + * @arg @ref LL_RCC_PLLSAIM_DIV_42 + * @arg @ref LL_RCC_PLLSAIM_DIV_43 + * @arg @ref LL_RCC_PLLSAIM_DIV_44 + * @arg @ref LL_RCC_PLLSAIM_DIV_45 + * @arg @ref LL_RCC_PLLSAIM_DIV_46 + * @arg @ref LL_RCC_PLLSAIM_DIV_47 + * @arg @ref LL_RCC_PLLSAIM_DIV_48 + * @arg @ref LL_RCC_PLLSAIM_DIV_49 + * @arg @ref LL_RCC_PLLSAIM_DIV_50 + * @arg @ref LL_RCC_PLLSAIM_DIV_51 + * @arg @ref LL_RCC_PLLSAIM_DIV_52 + * @arg @ref LL_RCC_PLLSAIM_DIV_53 + * @arg @ref LL_RCC_PLLSAIM_DIV_54 + * @arg @ref LL_RCC_PLLSAIM_DIV_55 + * @arg @ref LL_RCC_PLLSAIM_DIV_56 + * @arg @ref LL_RCC_PLLSAIM_DIV_57 + * @arg @ref LL_RCC_PLLSAIM_DIV_58 + * @arg @ref LL_RCC_PLLSAIM_DIV_59 + * @arg @ref LL_RCC_PLLSAIM_DIV_60 + * @arg @ref LL_RCC_PLLSAIM_DIV_61 + * @arg @ref LL_RCC_PLLSAIM_DIV_62 + * @arg @ref LL_RCC_PLLSAIM_DIV_63 + * @param __PLLSAIN__ Between 49/50(*) and 432 + * + * (*) value not defined in all devices. + * @param __PLLSAIQ__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSAIQ_DIV_2 + * @arg @ref LL_RCC_PLLSAIQ_DIV_3 + * @arg @ref LL_RCC_PLLSAIQ_DIV_4 + * @arg @ref LL_RCC_PLLSAIQ_DIV_5 + * @arg @ref LL_RCC_PLLSAIQ_DIV_6 + * @arg @ref LL_RCC_PLLSAIQ_DIV_7 + * @arg @ref LL_RCC_PLLSAIQ_DIV_8 + * @arg @ref LL_RCC_PLLSAIQ_DIV_9 + * @arg @ref LL_RCC_PLLSAIQ_DIV_10 + * @arg @ref LL_RCC_PLLSAIQ_DIV_11 + * @arg @ref LL_RCC_PLLSAIQ_DIV_12 + * @arg @ref LL_RCC_PLLSAIQ_DIV_13 + * @arg @ref LL_RCC_PLLSAIQ_DIV_14 + * @arg @ref LL_RCC_PLLSAIQ_DIV_15 + * @param __PLLSAIDIVQ__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_1 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_2 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_3 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_4 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_5 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_6 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_7 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_8 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_9 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_10 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_11 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_12 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_13 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_14 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_15 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_16 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_17 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_18 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_19 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_20 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_21 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_22 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_23 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_24 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_25 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_26 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_27 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_28 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_29 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_30 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_31 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_32 + * @retval PLLSAI clock frequency (in Hz) + */ +#define __LL_RCC_CALC_PLLSAI_SAI_FREQ(__INPUTFREQ__, __PLLM__, __PLLSAIN__, __PLLSAIQ__, __PLLSAIDIVQ__) (((__INPUTFREQ__) / (__PLLM__)) * (__PLLSAIN__) / \ + (((__PLLSAIQ__) >> RCC_PLLSAICFGR_PLLSAIQ_Pos) * (((__PLLSAIDIVQ__) >> RCC_DCKCFGR_PLLSAIDIVQ_Pos) + 1U))) + +#if defined(RCC_PLLSAICFGR_PLLSAIP) +/** + * @brief Helper macro to calculate the PLLSAI frequency used on 48Mhz domain + * @note ex: @ref __LL_RCC_CALC_PLLSAI_48M_FREQ (HSE_VALUE,@ref LL_RCC_PLLSAI_GetDivider (), + * @ref LL_RCC_PLLSAI_GetN (), @ref LL_RCC_PLLSAI_GetP ()); + * @param __INPUTFREQ__ PLL Input frequency (based on HSE/HSI) + * @param __PLLM__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSAIM_DIV_2 + * @arg @ref LL_RCC_PLLSAIM_DIV_3 + * @arg @ref LL_RCC_PLLSAIM_DIV_4 + * @arg @ref LL_RCC_PLLSAIM_DIV_5 + * @arg @ref LL_RCC_PLLSAIM_DIV_6 + * @arg @ref LL_RCC_PLLSAIM_DIV_7 + * @arg @ref LL_RCC_PLLSAIM_DIV_8 + * @arg @ref LL_RCC_PLLSAIM_DIV_9 + * @arg @ref LL_RCC_PLLSAIM_DIV_10 + * @arg @ref LL_RCC_PLLSAIM_DIV_11 + * @arg @ref LL_RCC_PLLSAIM_DIV_12 + * @arg @ref LL_RCC_PLLSAIM_DIV_13 + * @arg @ref LL_RCC_PLLSAIM_DIV_14 + * @arg @ref LL_RCC_PLLSAIM_DIV_15 + * @arg @ref LL_RCC_PLLSAIM_DIV_16 + * @arg @ref LL_RCC_PLLSAIM_DIV_17 + * @arg @ref LL_RCC_PLLSAIM_DIV_18 + * @arg @ref LL_RCC_PLLSAIM_DIV_19 + * @arg @ref LL_RCC_PLLSAIM_DIV_20 + * @arg @ref LL_RCC_PLLSAIM_DIV_21 + * @arg @ref LL_RCC_PLLSAIM_DIV_22 + * @arg @ref LL_RCC_PLLSAIM_DIV_23 + * @arg @ref LL_RCC_PLLSAIM_DIV_24 + * @arg @ref LL_RCC_PLLSAIM_DIV_25 + * @arg @ref LL_RCC_PLLSAIM_DIV_26 + * @arg @ref LL_RCC_PLLSAIM_DIV_27 + * @arg @ref LL_RCC_PLLSAIM_DIV_28 + * @arg @ref LL_RCC_PLLSAIM_DIV_29 + * @arg @ref LL_RCC_PLLSAIM_DIV_30 + * @arg @ref LL_RCC_PLLSAIM_DIV_31 + * @arg @ref LL_RCC_PLLSAIM_DIV_32 + * @arg @ref LL_RCC_PLLSAIM_DIV_33 + * @arg @ref LL_RCC_PLLSAIM_DIV_34 + * @arg @ref LL_RCC_PLLSAIM_DIV_35 + * @arg @ref LL_RCC_PLLSAIM_DIV_36 + * @arg @ref LL_RCC_PLLSAIM_DIV_37 + * @arg @ref LL_RCC_PLLSAIM_DIV_38 + * @arg @ref LL_RCC_PLLSAIM_DIV_39 + * @arg @ref LL_RCC_PLLSAIM_DIV_40 + * @arg @ref LL_RCC_PLLSAIM_DIV_41 + * @arg @ref LL_RCC_PLLSAIM_DIV_42 + * @arg @ref LL_RCC_PLLSAIM_DIV_43 + * @arg @ref LL_RCC_PLLSAIM_DIV_44 + * @arg @ref LL_RCC_PLLSAIM_DIV_45 + * @arg @ref LL_RCC_PLLSAIM_DIV_46 + * @arg @ref LL_RCC_PLLSAIM_DIV_47 + * @arg @ref LL_RCC_PLLSAIM_DIV_48 + * @arg @ref LL_RCC_PLLSAIM_DIV_49 + * @arg @ref LL_RCC_PLLSAIM_DIV_50 + * @arg @ref LL_RCC_PLLSAIM_DIV_51 + * @arg @ref LL_RCC_PLLSAIM_DIV_52 + * @arg @ref LL_RCC_PLLSAIM_DIV_53 + * @arg @ref LL_RCC_PLLSAIM_DIV_54 + * @arg @ref LL_RCC_PLLSAIM_DIV_55 + * @arg @ref LL_RCC_PLLSAIM_DIV_56 + * @arg @ref LL_RCC_PLLSAIM_DIV_57 + * @arg @ref LL_RCC_PLLSAIM_DIV_58 + * @arg @ref LL_RCC_PLLSAIM_DIV_59 + * @arg @ref LL_RCC_PLLSAIM_DIV_60 + * @arg @ref LL_RCC_PLLSAIM_DIV_61 + * @arg @ref LL_RCC_PLLSAIM_DIV_62 + * @arg @ref LL_RCC_PLLSAIM_DIV_63 + * @param __PLLSAIN__ Between 50 and 432 + * @param __PLLSAIP__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSAIP_DIV_2 + * @arg @ref LL_RCC_PLLSAIP_DIV_4 + * @arg @ref LL_RCC_PLLSAIP_DIV_6 + * @arg @ref LL_RCC_PLLSAIP_DIV_8 + * @retval PLLSAI clock frequency (in Hz) + */ +#define __LL_RCC_CALC_PLLSAI_48M_FREQ(__INPUTFREQ__, __PLLM__, __PLLSAIN__, __PLLSAIP__) (((__INPUTFREQ__) / (__PLLM__)) * (__PLLSAIN__) / \ + ((((__PLLSAIP__) >> RCC_PLLSAICFGR_PLLSAIP_Pos) + 1U) * 2U)) +#endif /* RCC_PLLSAICFGR_PLLSAIP */ + +#if defined(LTDC) +/** + * @brief Helper macro to calculate the PLLSAI frequency used for LTDC domain + * @note ex: @ref __LL_RCC_CALC_PLLSAI_LTDC_FREQ (HSE_VALUE,@ref LL_RCC_PLLSAI_GetDivider (), + * @ref LL_RCC_PLLSAI_GetN (), @ref LL_RCC_PLLSAI_GetR (), @ref LL_RCC_PLLSAI_GetDIVR ()); + * @param __INPUTFREQ__ PLL Input frequency (based on HSE/HSI) + * @param __PLLM__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSAIM_DIV_2 + * @arg @ref LL_RCC_PLLSAIM_DIV_3 + * @arg @ref LL_RCC_PLLSAIM_DIV_4 + * @arg @ref LL_RCC_PLLSAIM_DIV_5 + * @arg @ref LL_RCC_PLLSAIM_DIV_6 + * @arg @ref LL_RCC_PLLSAIM_DIV_7 + * @arg @ref LL_RCC_PLLSAIM_DIV_8 + * @arg @ref LL_RCC_PLLSAIM_DIV_9 + * @arg @ref LL_RCC_PLLSAIM_DIV_10 + * @arg @ref LL_RCC_PLLSAIM_DIV_11 + * @arg @ref LL_RCC_PLLSAIM_DIV_12 + * @arg @ref LL_RCC_PLLSAIM_DIV_13 + * @arg @ref LL_RCC_PLLSAIM_DIV_14 + * @arg @ref LL_RCC_PLLSAIM_DIV_15 + * @arg @ref LL_RCC_PLLSAIM_DIV_16 + * @arg @ref LL_RCC_PLLSAIM_DIV_17 + * @arg @ref LL_RCC_PLLSAIM_DIV_18 + * @arg @ref LL_RCC_PLLSAIM_DIV_19 + * @arg @ref LL_RCC_PLLSAIM_DIV_20 + * @arg @ref LL_RCC_PLLSAIM_DIV_21 + * @arg @ref LL_RCC_PLLSAIM_DIV_22 + * @arg @ref LL_RCC_PLLSAIM_DIV_23 + * @arg @ref LL_RCC_PLLSAIM_DIV_24 + * @arg @ref LL_RCC_PLLSAIM_DIV_25 + * @arg @ref LL_RCC_PLLSAIM_DIV_26 + * @arg @ref LL_RCC_PLLSAIM_DIV_27 + * @arg @ref LL_RCC_PLLSAIM_DIV_28 + * @arg @ref LL_RCC_PLLSAIM_DIV_29 + * @arg @ref LL_RCC_PLLSAIM_DIV_30 + * @arg @ref LL_RCC_PLLSAIM_DIV_31 + * @arg @ref LL_RCC_PLLSAIM_DIV_32 + * @arg @ref LL_RCC_PLLSAIM_DIV_33 + * @arg @ref LL_RCC_PLLSAIM_DIV_34 + * @arg @ref LL_RCC_PLLSAIM_DIV_35 + * @arg @ref LL_RCC_PLLSAIM_DIV_36 + * @arg @ref LL_RCC_PLLSAIM_DIV_37 + * @arg @ref LL_RCC_PLLSAIM_DIV_38 + * @arg @ref LL_RCC_PLLSAIM_DIV_39 + * @arg @ref LL_RCC_PLLSAIM_DIV_40 + * @arg @ref LL_RCC_PLLSAIM_DIV_41 + * @arg @ref LL_RCC_PLLSAIM_DIV_42 + * @arg @ref LL_RCC_PLLSAIM_DIV_43 + * @arg @ref LL_RCC_PLLSAIM_DIV_44 + * @arg @ref LL_RCC_PLLSAIM_DIV_45 + * @arg @ref LL_RCC_PLLSAIM_DIV_46 + * @arg @ref LL_RCC_PLLSAIM_DIV_47 + * @arg @ref LL_RCC_PLLSAIM_DIV_48 + * @arg @ref LL_RCC_PLLSAIM_DIV_49 + * @arg @ref LL_RCC_PLLSAIM_DIV_50 + * @arg @ref LL_RCC_PLLSAIM_DIV_51 + * @arg @ref LL_RCC_PLLSAIM_DIV_52 + * @arg @ref LL_RCC_PLLSAIM_DIV_53 + * @arg @ref LL_RCC_PLLSAIM_DIV_54 + * @arg @ref LL_RCC_PLLSAIM_DIV_55 + * @arg @ref LL_RCC_PLLSAIM_DIV_56 + * @arg @ref LL_RCC_PLLSAIM_DIV_57 + * @arg @ref LL_RCC_PLLSAIM_DIV_58 + * @arg @ref LL_RCC_PLLSAIM_DIV_59 + * @arg @ref LL_RCC_PLLSAIM_DIV_60 + * @arg @ref LL_RCC_PLLSAIM_DIV_61 + * @arg @ref LL_RCC_PLLSAIM_DIV_62 + * @arg @ref LL_RCC_PLLSAIM_DIV_63 + * @param __PLLSAIN__ Between 49/50(*) and 432 + * + * (*) value not defined in all devices. + * @param __PLLSAIR__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSAIR_DIV_2 + * @arg @ref LL_RCC_PLLSAIR_DIV_3 + * @arg @ref LL_RCC_PLLSAIR_DIV_4 + * @arg @ref LL_RCC_PLLSAIR_DIV_5 + * @arg @ref LL_RCC_PLLSAIR_DIV_6 + * @arg @ref LL_RCC_PLLSAIR_DIV_7 + * @param __PLLSAIDIVR__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSAIDIVR_DIV_2 + * @arg @ref LL_RCC_PLLSAIDIVR_DIV_4 + * @arg @ref LL_RCC_PLLSAIDIVR_DIV_8 + * @arg @ref LL_RCC_PLLSAIDIVR_DIV_16 + * @retval PLLSAI clock frequency (in Hz) + */ +#define __LL_RCC_CALC_PLLSAI_LTDC_FREQ(__INPUTFREQ__, __PLLM__, __PLLSAIN__, __PLLSAIR__, __PLLSAIDIVR__) (((__INPUTFREQ__) / (__PLLM__)) * (__PLLSAIN__) / \ + (((__PLLSAIR__) >> RCC_PLLSAICFGR_PLLSAIR_Pos) * (aRCC_PLLSAIDIVRPrescTable[(__PLLSAIDIVR__) >> RCC_DCKCFGR_PLLSAIDIVR_Pos]))) +#endif /* LTDC */ +#endif /* RCC_PLLSAI_SUPPORT */ + +#if defined(RCC_PLLI2S_SUPPORT) +#if defined(RCC_DCKCFGR_PLLI2SDIVQ) || defined(RCC_DCKCFGR_PLLI2SDIVR) +/** + * @brief Helper macro to calculate the PLLI2S frequency used for SAI domain + * @note ex: @ref __LL_RCC_CALC_PLLI2S_SAI_FREQ (HSE_VALUE,@ref LL_RCC_PLLI2S_GetDivider (), + * @ref LL_RCC_PLLI2S_GetN (), @ref LL_RCC_PLLI2S_GetQ (), @ref LL_RCC_PLLI2S_GetDIVQ ()); + * @param __INPUTFREQ__ PLL Input frequency (based on HSE/HSI) + * @param __PLLM__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLI2SM_DIV_2 + * @arg @ref LL_RCC_PLLI2SM_DIV_3 + * @arg @ref LL_RCC_PLLI2SM_DIV_4 + * @arg @ref LL_RCC_PLLI2SM_DIV_5 + * @arg @ref LL_RCC_PLLI2SM_DIV_6 + * @arg @ref LL_RCC_PLLI2SM_DIV_7 + * @arg @ref LL_RCC_PLLI2SM_DIV_8 + * @arg @ref LL_RCC_PLLI2SM_DIV_9 + * @arg @ref LL_RCC_PLLI2SM_DIV_10 + * @arg @ref LL_RCC_PLLI2SM_DIV_11 + * @arg @ref LL_RCC_PLLI2SM_DIV_12 + * @arg @ref LL_RCC_PLLI2SM_DIV_13 + * @arg @ref LL_RCC_PLLI2SM_DIV_14 + * @arg @ref LL_RCC_PLLI2SM_DIV_15 + * @arg @ref LL_RCC_PLLI2SM_DIV_16 + * @arg @ref LL_RCC_PLLI2SM_DIV_17 + * @arg @ref LL_RCC_PLLI2SM_DIV_18 + * @arg @ref LL_RCC_PLLI2SM_DIV_19 + * @arg @ref LL_RCC_PLLI2SM_DIV_20 + * @arg @ref LL_RCC_PLLI2SM_DIV_21 + * @arg @ref LL_RCC_PLLI2SM_DIV_22 + * @arg @ref LL_RCC_PLLI2SM_DIV_23 + * @arg @ref LL_RCC_PLLI2SM_DIV_24 + * @arg @ref LL_RCC_PLLI2SM_DIV_25 + * @arg @ref LL_RCC_PLLI2SM_DIV_26 + * @arg @ref LL_RCC_PLLI2SM_DIV_27 + * @arg @ref LL_RCC_PLLI2SM_DIV_28 + * @arg @ref LL_RCC_PLLI2SM_DIV_29 + * @arg @ref LL_RCC_PLLI2SM_DIV_30 + * @arg @ref LL_RCC_PLLI2SM_DIV_31 + * @arg @ref LL_RCC_PLLI2SM_DIV_32 + * @arg @ref LL_RCC_PLLI2SM_DIV_33 + * @arg @ref LL_RCC_PLLI2SM_DIV_34 + * @arg @ref LL_RCC_PLLI2SM_DIV_35 + * @arg @ref LL_RCC_PLLI2SM_DIV_36 + * @arg @ref LL_RCC_PLLI2SM_DIV_37 + * @arg @ref LL_RCC_PLLI2SM_DIV_38 + * @arg @ref LL_RCC_PLLI2SM_DIV_39 + * @arg @ref LL_RCC_PLLI2SM_DIV_40 + * @arg @ref LL_RCC_PLLI2SM_DIV_41 + * @arg @ref LL_RCC_PLLI2SM_DIV_42 + * @arg @ref LL_RCC_PLLI2SM_DIV_43 + * @arg @ref LL_RCC_PLLI2SM_DIV_44 + * @arg @ref LL_RCC_PLLI2SM_DIV_45 + * @arg @ref LL_RCC_PLLI2SM_DIV_46 + * @arg @ref LL_RCC_PLLI2SM_DIV_47 + * @arg @ref LL_RCC_PLLI2SM_DIV_48 + * @arg @ref LL_RCC_PLLI2SM_DIV_49 + * @arg @ref LL_RCC_PLLI2SM_DIV_50 + * @arg @ref LL_RCC_PLLI2SM_DIV_51 + * @arg @ref LL_RCC_PLLI2SM_DIV_52 + * @arg @ref LL_RCC_PLLI2SM_DIV_53 + * @arg @ref LL_RCC_PLLI2SM_DIV_54 + * @arg @ref LL_RCC_PLLI2SM_DIV_55 + * @arg @ref LL_RCC_PLLI2SM_DIV_56 + * @arg @ref LL_RCC_PLLI2SM_DIV_57 + * @arg @ref LL_RCC_PLLI2SM_DIV_58 + * @arg @ref LL_RCC_PLLI2SM_DIV_59 + * @arg @ref LL_RCC_PLLI2SM_DIV_60 + * @arg @ref LL_RCC_PLLI2SM_DIV_61 + * @arg @ref LL_RCC_PLLI2SM_DIV_62 + * @arg @ref LL_RCC_PLLI2SM_DIV_63 + * @param __PLLI2SN__ Between 50/192(*) and 432 + * + * (*) value not defined in all devices. + * @param __PLLI2SQ_R__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLI2SQ_DIV_2 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_3 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_4 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_5 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_6 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_7 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_8 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_9 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_10 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_11 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_12 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_13 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_14 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_15 (*) + * @arg @ref LL_RCC_PLLI2SR_DIV_2 (*) + * @arg @ref LL_RCC_PLLI2SR_DIV_3 (*) + * @arg @ref LL_RCC_PLLI2SR_DIV_4 (*) + * @arg @ref LL_RCC_PLLI2SR_DIV_5 (*) + * @arg @ref LL_RCC_PLLI2SR_DIV_6 (*) + * @arg @ref LL_RCC_PLLI2SR_DIV_7 (*) + * + * (*) value not defined in all devices. + * @param __PLLI2SDIVQ_R__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_1 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_2 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_3 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_4 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_5 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_6 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_7 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_8 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_9 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_10 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_11 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_12 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_13 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_14 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_15 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_16 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_17 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_18 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_19 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_20 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_21 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_22 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_23 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_24 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_25 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_26 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_27 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_28 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_29 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_30 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_31 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_32 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_1 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_2 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_3 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_4 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_5 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_6 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_7 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_8 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_9 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_10 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_11 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_12 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_13 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_14 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_15 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_16 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_17 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_18 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_19 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_20 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_21 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_22 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_23 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_24 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_25 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_26 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_27 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_28 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_29 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_30 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_31 (*) + * + * (*) value not defined in all devices. + * @retval PLLI2S clock frequency (in Hz) + */ +#if defined(RCC_DCKCFGR_PLLI2SDIVQ) +#define __LL_RCC_CALC_PLLI2S_SAI_FREQ(__INPUTFREQ__, __PLLM__, __PLLI2SN__, __PLLI2SQ_R__, __PLLI2SDIVQ_R__) (((__INPUTFREQ__) / (__PLLM__)) * (__PLLI2SN__) / \ + (((__PLLI2SQ_R__) >> RCC_PLLI2SCFGR_PLLI2SQ_Pos) * (((__PLLI2SDIVQ_R__) >> RCC_DCKCFGR_PLLI2SDIVQ_Pos) + 1U))) +#else +#define __LL_RCC_CALC_PLLI2S_SAI_FREQ(__INPUTFREQ__, __PLLM__, __PLLI2SN__, __PLLI2SQ_R__, __PLLI2SDIVQ_R__) (((__INPUTFREQ__) / (__PLLM__)) * (__PLLI2SN__) / \ + (((__PLLI2SQ_R__) >> RCC_PLLI2SCFGR_PLLI2SR_Pos) * ((__PLLI2SDIVQ_R__) >> RCC_DCKCFGR_PLLI2SDIVR_Pos))) + +#endif /* RCC_DCKCFGR_PLLI2SDIVQ */ +#endif /* RCC_DCKCFGR_PLLI2SDIVQ || RCC_DCKCFGR_PLLI2SDIVR */ + +#if defined(SPDIFRX) +/** + * @brief Helper macro to calculate the PLLI2S frequency used on SPDIFRX domain + * @note ex: @ref __LL_RCC_CALC_PLLI2S_SPDIFRX_FREQ (HSE_VALUE,@ref LL_RCC_PLLI2S_GetDivider (), + * @ref LL_RCC_PLLI2S_GetN (), @ref LL_RCC_PLLI2S_GetP ()); + * @param __INPUTFREQ__ PLL Input frequency (based on HSE/HSI) + * @param __PLLM__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLI2SM_DIV_2 + * @arg @ref LL_RCC_PLLI2SM_DIV_3 + * @arg @ref LL_RCC_PLLI2SM_DIV_4 + * @arg @ref LL_RCC_PLLI2SM_DIV_5 + * @arg @ref LL_RCC_PLLI2SM_DIV_6 + * @arg @ref LL_RCC_PLLI2SM_DIV_7 + * @arg @ref LL_RCC_PLLI2SM_DIV_8 + * @arg @ref LL_RCC_PLLI2SM_DIV_9 + * @arg @ref LL_RCC_PLLI2SM_DIV_10 + * @arg @ref LL_RCC_PLLI2SM_DIV_11 + * @arg @ref LL_RCC_PLLI2SM_DIV_12 + * @arg @ref LL_RCC_PLLI2SM_DIV_13 + * @arg @ref LL_RCC_PLLI2SM_DIV_14 + * @arg @ref LL_RCC_PLLI2SM_DIV_15 + * @arg @ref LL_RCC_PLLI2SM_DIV_16 + * @arg @ref LL_RCC_PLLI2SM_DIV_17 + * @arg @ref LL_RCC_PLLI2SM_DIV_18 + * @arg @ref LL_RCC_PLLI2SM_DIV_19 + * @arg @ref LL_RCC_PLLI2SM_DIV_20 + * @arg @ref LL_RCC_PLLI2SM_DIV_21 + * @arg @ref LL_RCC_PLLI2SM_DIV_22 + * @arg @ref LL_RCC_PLLI2SM_DIV_23 + * @arg @ref LL_RCC_PLLI2SM_DIV_24 + * @arg @ref LL_RCC_PLLI2SM_DIV_25 + * @arg @ref LL_RCC_PLLI2SM_DIV_26 + * @arg @ref LL_RCC_PLLI2SM_DIV_27 + * @arg @ref LL_RCC_PLLI2SM_DIV_28 + * @arg @ref LL_RCC_PLLI2SM_DIV_29 + * @arg @ref LL_RCC_PLLI2SM_DIV_30 + * @arg @ref LL_RCC_PLLI2SM_DIV_31 + * @arg @ref LL_RCC_PLLI2SM_DIV_32 + * @arg @ref LL_RCC_PLLI2SM_DIV_33 + * @arg @ref LL_RCC_PLLI2SM_DIV_34 + * @arg @ref LL_RCC_PLLI2SM_DIV_35 + * @arg @ref LL_RCC_PLLI2SM_DIV_36 + * @arg @ref LL_RCC_PLLI2SM_DIV_37 + * @arg @ref LL_RCC_PLLI2SM_DIV_38 + * @arg @ref LL_RCC_PLLI2SM_DIV_39 + * @arg @ref LL_RCC_PLLI2SM_DIV_40 + * @arg @ref LL_RCC_PLLI2SM_DIV_41 + * @arg @ref LL_RCC_PLLI2SM_DIV_42 + * @arg @ref LL_RCC_PLLI2SM_DIV_43 + * @arg @ref LL_RCC_PLLI2SM_DIV_44 + * @arg @ref LL_RCC_PLLI2SM_DIV_45 + * @arg @ref LL_RCC_PLLI2SM_DIV_46 + * @arg @ref LL_RCC_PLLI2SM_DIV_47 + * @arg @ref LL_RCC_PLLI2SM_DIV_48 + * @arg @ref LL_RCC_PLLI2SM_DIV_49 + * @arg @ref LL_RCC_PLLI2SM_DIV_50 + * @arg @ref LL_RCC_PLLI2SM_DIV_51 + * @arg @ref LL_RCC_PLLI2SM_DIV_52 + * @arg @ref LL_RCC_PLLI2SM_DIV_53 + * @arg @ref LL_RCC_PLLI2SM_DIV_54 + * @arg @ref LL_RCC_PLLI2SM_DIV_55 + * @arg @ref LL_RCC_PLLI2SM_DIV_56 + * @arg @ref LL_RCC_PLLI2SM_DIV_57 + * @arg @ref LL_RCC_PLLI2SM_DIV_58 + * @arg @ref LL_RCC_PLLI2SM_DIV_59 + * @arg @ref LL_RCC_PLLI2SM_DIV_60 + * @arg @ref LL_RCC_PLLI2SM_DIV_61 + * @arg @ref LL_RCC_PLLI2SM_DIV_62 + * @arg @ref LL_RCC_PLLI2SM_DIV_63 + * @param __PLLI2SN__ Between 50 and 432 + * @param __PLLI2SP__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLI2SP_DIV_2 + * @arg @ref LL_RCC_PLLI2SP_DIV_4 + * @arg @ref LL_RCC_PLLI2SP_DIV_6 + * @arg @ref LL_RCC_PLLI2SP_DIV_8 + * @retval PLLI2S clock frequency (in Hz) + */ +#define __LL_RCC_CALC_PLLI2S_SPDIFRX_FREQ(__INPUTFREQ__, __PLLM__, __PLLI2SN__, __PLLI2SP__) (((__INPUTFREQ__) / (__PLLM__)) * (__PLLI2SN__) / \ + ((((__PLLI2SP__) >> RCC_PLLI2SCFGR_PLLI2SP_Pos) + 1U) * 2U)) + +#endif /* SPDIFRX */ + +/** + * @brief Helper macro to calculate the PLLI2S frequency used for I2S domain + * @note ex: @ref __LL_RCC_CALC_PLLI2S_I2S_FREQ (HSE_VALUE,@ref LL_RCC_PLLI2S_GetDivider (), + * @ref LL_RCC_PLLI2S_GetN (), @ref LL_RCC_PLLI2S_GetR ()); + * @param __INPUTFREQ__ PLL Input frequency (based on HSE/HSI) + * @param __PLLM__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLI2SM_DIV_2 + * @arg @ref LL_RCC_PLLI2SM_DIV_3 + * @arg @ref LL_RCC_PLLI2SM_DIV_4 + * @arg @ref LL_RCC_PLLI2SM_DIV_5 + * @arg @ref LL_RCC_PLLI2SM_DIV_6 + * @arg @ref LL_RCC_PLLI2SM_DIV_7 + * @arg @ref LL_RCC_PLLI2SM_DIV_8 + * @arg @ref LL_RCC_PLLI2SM_DIV_9 + * @arg @ref LL_RCC_PLLI2SM_DIV_10 + * @arg @ref LL_RCC_PLLI2SM_DIV_11 + * @arg @ref LL_RCC_PLLI2SM_DIV_12 + * @arg @ref LL_RCC_PLLI2SM_DIV_13 + * @arg @ref LL_RCC_PLLI2SM_DIV_14 + * @arg @ref LL_RCC_PLLI2SM_DIV_15 + * @arg @ref LL_RCC_PLLI2SM_DIV_16 + * @arg @ref LL_RCC_PLLI2SM_DIV_17 + * @arg @ref LL_RCC_PLLI2SM_DIV_18 + * @arg @ref LL_RCC_PLLI2SM_DIV_19 + * @arg @ref LL_RCC_PLLI2SM_DIV_20 + * @arg @ref LL_RCC_PLLI2SM_DIV_21 + * @arg @ref LL_RCC_PLLI2SM_DIV_22 + * @arg @ref LL_RCC_PLLI2SM_DIV_23 + * @arg @ref LL_RCC_PLLI2SM_DIV_24 + * @arg @ref LL_RCC_PLLI2SM_DIV_25 + * @arg @ref LL_RCC_PLLI2SM_DIV_26 + * @arg @ref LL_RCC_PLLI2SM_DIV_27 + * @arg @ref LL_RCC_PLLI2SM_DIV_28 + * @arg @ref LL_RCC_PLLI2SM_DIV_29 + * @arg @ref LL_RCC_PLLI2SM_DIV_30 + * @arg @ref LL_RCC_PLLI2SM_DIV_31 + * @arg @ref LL_RCC_PLLI2SM_DIV_32 + * @arg @ref LL_RCC_PLLI2SM_DIV_33 + * @arg @ref LL_RCC_PLLI2SM_DIV_34 + * @arg @ref LL_RCC_PLLI2SM_DIV_35 + * @arg @ref LL_RCC_PLLI2SM_DIV_36 + * @arg @ref LL_RCC_PLLI2SM_DIV_37 + * @arg @ref LL_RCC_PLLI2SM_DIV_38 + * @arg @ref LL_RCC_PLLI2SM_DIV_39 + * @arg @ref LL_RCC_PLLI2SM_DIV_40 + * @arg @ref LL_RCC_PLLI2SM_DIV_41 + * @arg @ref LL_RCC_PLLI2SM_DIV_42 + * @arg @ref LL_RCC_PLLI2SM_DIV_43 + * @arg @ref LL_RCC_PLLI2SM_DIV_44 + * @arg @ref LL_RCC_PLLI2SM_DIV_45 + * @arg @ref LL_RCC_PLLI2SM_DIV_46 + * @arg @ref LL_RCC_PLLI2SM_DIV_47 + * @arg @ref LL_RCC_PLLI2SM_DIV_48 + * @arg @ref LL_RCC_PLLI2SM_DIV_49 + * @arg @ref LL_RCC_PLLI2SM_DIV_50 + * @arg @ref LL_RCC_PLLI2SM_DIV_51 + * @arg @ref LL_RCC_PLLI2SM_DIV_52 + * @arg @ref LL_RCC_PLLI2SM_DIV_53 + * @arg @ref LL_RCC_PLLI2SM_DIV_54 + * @arg @ref LL_RCC_PLLI2SM_DIV_55 + * @arg @ref LL_RCC_PLLI2SM_DIV_56 + * @arg @ref LL_RCC_PLLI2SM_DIV_57 + * @arg @ref LL_RCC_PLLI2SM_DIV_58 + * @arg @ref LL_RCC_PLLI2SM_DIV_59 + * @arg @ref LL_RCC_PLLI2SM_DIV_60 + * @arg @ref LL_RCC_PLLI2SM_DIV_61 + * @arg @ref LL_RCC_PLLI2SM_DIV_62 + * @arg @ref LL_RCC_PLLI2SM_DIV_63 + * @param __PLLI2SN__ Between 50/192(*) and 432 + * + * (*) value not defined in all devices. + * @param __PLLI2SR__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLI2SR_DIV_2 + * @arg @ref LL_RCC_PLLI2SR_DIV_3 + * @arg @ref LL_RCC_PLLI2SR_DIV_4 + * @arg @ref LL_RCC_PLLI2SR_DIV_5 + * @arg @ref LL_RCC_PLLI2SR_DIV_6 + * @arg @ref LL_RCC_PLLI2SR_DIV_7 + * @retval PLLI2S clock frequency (in Hz) + */ +#define __LL_RCC_CALC_PLLI2S_I2S_FREQ(__INPUTFREQ__, __PLLM__, __PLLI2SN__, __PLLI2SR__) (((__INPUTFREQ__) / (__PLLM__)) * (__PLLI2SN__) / \ + ((__PLLI2SR__) >> RCC_PLLI2SCFGR_PLLI2SR_Pos)) + +#if defined(RCC_PLLI2SCFGR_PLLI2SQ) && !defined(RCC_DCKCFGR_PLLI2SDIVQ) +/** + * @brief Helper macro to calculate the PLLI2S frequency used for 48Mhz domain + * @note ex: @ref __LL_RCC_CALC_PLLI2S_48M_FREQ (HSE_VALUE,@ref LL_RCC_PLLI2S_GetDivider (), + * @ref LL_RCC_PLLI2S_GetN (), @ref LL_RCC_PLLI2S_GetQ ()); + * @param __INPUTFREQ__ PLL Input frequency (based on HSE/HSI) + * @param __PLLM__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLI2SM_DIV_2 + * @arg @ref LL_RCC_PLLI2SM_DIV_3 + * @arg @ref LL_RCC_PLLI2SM_DIV_4 + * @arg @ref LL_RCC_PLLI2SM_DIV_5 + * @arg @ref LL_RCC_PLLI2SM_DIV_6 + * @arg @ref LL_RCC_PLLI2SM_DIV_7 + * @arg @ref LL_RCC_PLLI2SM_DIV_8 + * @arg @ref LL_RCC_PLLI2SM_DIV_9 + * @arg @ref LL_RCC_PLLI2SM_DIV_10 + * @arg @ref LL_RCC_PLLI2SM_DIV_11 + * @arg @ref LL_RCC_PLLI2SM_DIV_12 + * @arg @ref LL_RCC_PLLI2SM_DIV_13 + * @arg @ref LL_RCC_PLLI2SM_DIV_14 + * @arg @ref LL_RCC_PLLI2SM_DIV_15 + * @arg @ref LL_RCC_PLLI2SM_DIV_16 + * @arg @ref LL_RCC_PLLI2SM_DIV_17 + * @arg @ref LL_RCC_PLLI2SM_DIV_18 + * @arg @ref LL_RCC_PLLI2SM_DIV_19 + * @arg @ref LL_RCC_PLLI2SM_DIV_20 + * @arg @ref LL_RCC_PLLI2SM_DIV_21 + * @arg @ref LL_RCC_PLLI2SM_DIV_22 + * @arg @ref LL_RCC_PLLI2SM_DIV_23 + * @arg @ref LL_RCC_PLLI2SM_DIV_24 + * @arg @ref LL_RCC_PLLI2SM_DIV_25 + * @arg @ref LL_RCC_PLLI2SM_DIV_26 + * @arg @ref LL_RCC_PLLI2SM_DIV_27 + * @arg @ref LL_RCC_PLLI2SM_DIV_28 + * @arg @ref LL_RCC_PLLI2SM_DIV_29 + * @arg @ref LL_RCC_PLLI2SM_DIV_30 + * @arg @ref LL_RCC_PLLI2SM_DIV_31 + * @arg @ref LL_RCC_PLLI2SM_DIV_32 + * @arg @ref LL_RCC_PLLI2SM_DIV_33 + * @arg @ref LL_RCC_PLLI2SM_DIV_34 + * @arg @ref LL_RCC_PLLI2SM_DIV_35 + * @arg @ref LL_RCC_PLLI2SM_DIV_36 + * @arg @ref LL_RCC_PLLI2SM_DIV_37 + * @arg @ref LL_RCC_PLLI2SM_DIV_38 + * @arg @ref LL_RCC_PLLI2SM_DIV_39 + * @arg @ref LL_RCC_PLLI2SM_DIV_40 + * @arg @ref LL_RCC_PLLI2SM_DIV_41 + * @arg @ref LL_RCC_PLLI2SM_DIV_42 + * @arg @ref LL_RCC_PLLI2SM_DIV_43 + * @arg @ref LL_RCC_PLLI2SM_DIV_44 + * @arg @ref LL_RCC_PLLI2SM_DIV_45 + * @arg @ref LL_RCC_PLLI2SM_DIV_46 + * @arg @ref LL_RCC_PLLI2SM_DIV_47 + * @arg @ref LL_RCC_PLLI2SM_DIV_48 + * @arg @ref LL_RCC_PLLI2SM_DIV_49 + * @arg @ref LL_RCC_PLLI2SM_DIV_50 + * @arg @ref LL_RCC_PLLI2SM_DIV_51 + * @arg @ref LL_RCC_PLLI2SM_DIV_52 + * @arg @ref LL_RCC_PLLI2SM_DIV_53 + * @arg @ref LL_RCC_PLLI2SM_DIV_54 + * @arg @ref LL_RCC_PLLI2SM_DIV_55 + * @arg @ref LL_RCC_PLLI2SM_DIV_56 + * @arg @ref LL_RCC_PLLI2SM_DIV_57 + * @arg @ref LL_RCC_PLLI2SM_DIV_58 + * @arg @ref LL_RCC_PLLI2SM_DIV_59 + * @arg @ref LL_RCC_PLLI2SM_DIV_60 + * @arg @ref LL_RCC_PLLI2SM_DIV_61 + * @arg @ref LL_RCC_PLLI2SM_DIV_62 + * @arg @ref LL_RCC_PLLI2SM_DIV_63 + * @param __PLLI2SN__ Between 50 and 432 + * @param __PLLI2SQ__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLI2SQ_DIV_2 + * @arg @ref LL_RCC_PLLI2SQ_DIV_3 + * @arg @ref LL_RCC_PLLI2SQ_DIV_4 + * @arg @ref LL_RCC_PLLI2SQ_DIV_5 + * @arg @ref LL_RCC_PLLI2SQ_DIV_6 + * @arg @ref LL_RCC_PLLI2SQ_DIV_7 + * @arg @ref LL_RCC_PLLI2SQ_DIV_8 + * @arg @ref LL_RCC_PLLI2SQ_DIV_9 + * @arg @ref LL_RCC_PLLI2SQ_DIV_10 + * @arg @ref LL_RCC_PLLI2SQ_DIV_11 + * @arg @ref LL_RCC_PLLI2SQ_DIV_12 + * @arg @ref LL_RCC_PLLI2SQ_DIV_13 + * @arg @ref LL_RCC_PLLI2SQ_DIV_14 + * @arg @ref LL_RCC_PLLI2SQ_DIV_15 + * @retval PLLI2S clock frequency (in Hz) + */ +#define __LL_RCC_CALC_PLLI2S_48M_FREQ(__INPUTFREQ__, __PLLM__, __PLLI2SN__, __PLLI2SQ__) (((__INPUTFREQ__) / (__PLLM__)) * (__PLLI2SN__) / \ + ((__PLLI2SQ__) >> RCC_PLLI2SCFGR_PLLI2SQ_Pos)) + +#endif /* RCC_PLLI2SCFGR_PLLI2SQ && !RCC_DCKCFGR_PLLI2SDIVQ */ +#endif /* RCC_PLLI2S_SUPPORT */ + +/** + * @brief Helper macro to calculate the HCLK frequency + * @param __SYSCLKFREQ__ SYSCLK frequency (based on HSE/HSI/PLLCLK) + * @param __AHBPRESCALER__ This parameter can be one of the following values: + * @arg @ref LL_RCC_SYSCLK_DIV_1 + * @arg @ref LL_RCC_SYSCLK_DIV_2 + * @arg @ref LL_RCC_SYSCLK_DIV_4 + * @arg @ref LL_RCC_SYSCLK_DIV_8 + * @arg @ref LL_RCC_SYSCLK_DIV_16 + * @arg @ref LL_RCC_SYSCLK_DIV_64 + * @arg @ref LL_RCC_SYSCLK_DIV_128 + * @arg @ref LL_RCC_SYSCLK_DIV_256 + * @arg @ref LL_RCC_SYSCLK_DIV_512 + * @retval HCLK clock frequency (in Hz) + */ +#define __LL_RCC_CALC_HCLK_FREQ(__SYSCLKFREQ__, __AHBPRESCALER__) ((__SYSCLKFREQ__) >> AHBPrescTable[((__AHBPRESCALER__) &\ + RCC_CFGR_HPRE) >> RCC_CFGR_HPRE_Pos]) + +/** + * @brief Helper macro to calculate the PCLK1 frequency (ABP1) + * @param __HCLKFREQ__ HCLK frequency + * @param __APB1PRESCALER__ This parameter can be one of the following values: + * @arg @ref LL_RCC_APB1_DIV_1 + * @arg @ref LL_RCC_APB1_DIV_2 + * @arg @ref LL_RCC_APB1_DIV_4 + * @arg @ref LL_RCC_APB1_DIV_8 + * @arg @ref LL_RCC_APB1_DIV_16 + * @retval PCLK1 clock frequency (in Hz) + */ +#define __LL_RCC_CALC_PCLK1_FREQ(__HCLKFREQ__, __APB1PRESCALER__) ((__HCLKFREQ__) >> APBPrescTable[(__APB1PRESCALER__) >> RCC_CFGR_PPRE1_Pos]) + +/** + * @brief Helper macro to calculate the PCLK2 frequency (ABP2) + * @param __HCLKFREQ__ HCLK frequency + * @param __APB2PRESCALER__ This parameter can be one of the following values: + * @arg @ref LL_RCC_APB2_DIV_1 + * @arg @ref LL_RCC_APB2_DIV_2 + * @arg @ref LL_RCC_APB2_DIV_4 + * @arg @ref LL_RCC_APB2_DIV_8 + * @arg @ref LL_RCC_APB2_DIV_16 + * @retval PCLK2 clock frequency (in Hz) + */ +#define __LL_RCC_CALC_PCLK2_FREQ(__HCLKFREQ__, __APB2PRESCALER__) ((__HCLKFREQ__) >> APBPrescTable[(__APB2PRESCALER__) >> RCC_CFGR_PPRE2_Pos]) + +/** + * @} + */ + +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup RCC_LL_Exported_Functions RCC Exported Functions + * @{ + */ + +/** @defgroup RCC_LL_EF_HSE HSE + * @{ + */ + +/** + * @brief Enable the Clock Security System. + * @rmtoll CR CSSON LL_RCC_HSE_EnableCSS + * @retval None + */ +__STATIC_INLINE void LL_RCC_HSE_EnableCSS(void) +{ + SET_BIT(RCC->CR, RCC_CR_CSSON); +} + +/** + * @brief Enable HSE external oscillator (HSE Bypass) + * @rmtoll CR HSEBYP LL_RCC_HSE_EnableBypass + * @retval None + */ +__STATIC_INLINE void LL_RCC_HSE_EnableBypass(void) +{ + SET_BIT(RCC->CR, RCC_CR_HSEBYP); +} + +/** + * @brief Disable HSE external oscillator (HSE Bypass) + * @rmtoll CR HSEBYP LL_RCC_HSE_DisableBypass + * @retval None + */ +__STATIC_INLINE void LL_RCC_HSE_DisableBypass(void) +{ + CLEAR_BIT(RCC->CR, RCC_CR_HSEBYP); +} + +/** + * @brief Enable HSE crystal oscillator (HSE ON) + * @rmtoll CR HSEON LL_RCC_HSE_Enable + * @retval None + */ +__STATIC_INLINE void LL_RCC_HSE_Enable(void) +{ + SET_BIT(RCC->CR, RCC_CR_HSEON); +} + +/** + * @brief Disable HSE crystal oscillator (HSE ON) + * @rmtoll CR HSEON LL_RCC_HSE_Disable + * @retval None + */ +__STATIC_INLINE void LL_RCC_HSE_Disable(void) +{ + CLEAR_BIT(RCC->CR, RCC_CR_HSEON); +} + +/** + * @brief Check if HSE oscillator Ready + * @rmtoll CR HSERDY LL_RCC_HSE_IsReady + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_HSE_IsReady(void) +{ + return (READ_BIT(RCC->CR, RCC_CR_HSERDY) == (RCC_CR_HSERDY)); +} + +/** + * @} + */ + +/** @defgroup RCC_LL_EF_HSI HSI + * @{ + */ + +/** + * @brief Enable HSI oscillator + * @rmtoll CR HSION LL_RCC_HSI_Enable + * @retval None + */ +__STATIC_INLINE void LL_RCC_HSI_Enable(void) +{ + SET_BIT(RCC->CR, RCC_CR_HSION); +} + +/** + * @brief Disable HSI oscillator + * @rmtoll CR HSION LL_RCC_HSI_Disable + * @retval None + */ +__STATIC_INLINE void LL_RCC_HSI_Disable(void) +{ + CLEAR_BIT(RCC->CR, RCC_CR_HSION); +} + +/** + * @brief Check if HSI clock is ready + * @rmtoll CR HSIRDY LL_RCC_HSI_IsReady + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_HSI_IsReady(void) +{ + return (READ_BIT(RCC->CR, RCC_CR_HSIRDY) == (RCC_CR_HSIRDY)); +} + +/** + * @brief Get HSI Calibration value + * @note When HSITRIM is written, HSICAL is updated with the sum of + * HSITRIM and the factory trim value + * @rmtoll CR HSICAL LL_RCC_HSI_GetCalibration + * @retval Between Min_Data = 0x00 and Max_Data = 0xFF + */ +__STATIC_INLINE uint32_t LL_RCC_HSI_GetCalibration(void) +{ + return (uint32_t)(READ_BIT(RCC->CR, RCC_CR_HSICAL) >> RCC_CR_HSICAL_Pos); +} + +/** + * @brief Set HSI Calibration trimming + * @note user-programmable trimming value that is added to the HSICAL + * @note Default value is 16, which, when added to the HSICAL value, + * should trim the HSI to 16 MHz +/- 1 % + * @rmtoll CR HSITRIM LL_RCC_HSI_SetCalibTrimming + * @param Value Between Min_Data = 0 and Max_Data = 31 + * @retval None + */ +__STATIC_INLINE void LL_RCC_HSI_SetCalibTrimming(uint32_t Value) +{ + MODIFY_REG(RCC->CR, RCC_CR_HSITRIM, Value << RCC_CR_HSITRIM_Pos); +} + +/** + * @brief Get HSI Calibration trimming + * @rmtoll CR HSITRIM LL_RCC_HSI_GetCalibTrimming + * @retval Between Min_Data = 0 and Max_Data = 31 + */ +__STATIC_INLINE uint32_t LL_RCC_HSI_GetCalibTrimming(void) +{ + return (uint32_t)(READ_BIT(RCC->CR, RCC_CR_HSITRIM) >> RCC_CR_HSITRIM_Pos); +} + +/** + * @} + */ + +/** @defgroup RCC_LL_EF_LSE LSE + * @{ + */ + +/** + * @brief Enable Low Speed External (LSE) crystal. + * @rmtoll BDCR LSEON LL_RCC_LSE_Enable + * @retval None + */ +__STATIC_INLINE void LL_RCC_LSE_Enable(void) +{ + SET_BIT(RCC->BDCR, RCC_BDCR_LSEON); +} + +/** + * @brief Disable Low Speed External (LSE) crystal. + * @rmtoll BDCR LSEON LL_RCC_LSE_Disable + * @retval None + */ +__STATIC_INLINE void LL_RCC_LSE_Disable(void) +{ + CLEAR_BIT(RCC->BDCR, RCC_BDCR_LSEON); +} + +/** + * @brief Enable external clock source (LSE bypass). + * @rmtoll BDCR LSEBYP LL_RCC_LSE_EnableBypass + * @retval None + */ +__STATIC_INLINE void LL_RCC_LSE_EnableBypass(void) +{ + SET_BIT(RCC->BDCR, RCC_BDCR_LSEBYP); +} + +/** + * @brief Disable external clock source (LSE bypass). + * @rmtoll BDCR LSEBYP LL_RCC_LSE_DisableBypass + * @retval None + */ +__STATIC_INLINE void LL_RCC_LSE_DisableBypass(void) +{ + CLEAR_BIT(RCC->BDCR, RCC_BDCR_LSEBYP); +} + +/** + * @brief Check if LSE oscillator Ready + * @rmtoll BDCR LSERDY LL_RCC_LSE_IsReady + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_LSE_IsReady(void) +{ + return (READ_BIT(RCC->BDCR, RCC_BDCR_LSERDY) == (RCC_BDCR_LSERDY)); +} + +#if defined(RCC_BDCR_LSEMOD) +/** + * @brief Enable LSE high drive mode. + * @note LSE high drive mode can be enabled only when the LSE clock is disabled + * @rmtoll BDCR LSEMOD LL_RCC_LSE_EnableHighDriveMode + * @retval None + */ +__STATIC_INLINE void LL_RCC_LSE_EnableHighDriveMode(void) +{ + SET_BIT(RCC->BDCR, RCC_BDCR_LSEMOD); +} + +/** + * @brief Disable LSE high drive mode. + * @note LSE high drive mode can be disabled only when the LSE clock is disabled + * @rmtoll BDCR LSEMOD LL_RCC_LSE_DisableHighDriveMode + * @retval None + */ +__STATIC_INLINE void LL_RCC_LSE_DisableHighDriveMode(void) +{ + CLEAR_BIT(RCC->BDCR, RCC_BDCR_LSEMOD); +} +#endif /* RCC_BDCR_LSEMOD */ + +/** + * @} + */ + +/** @defgroup RCC_LL_EF_LSI LSI + * @{ + */ + +/** + * @brief Enable LSI Oscillator + * @rmtoll CSR LSION LL_RCC_LSI_Enable + * @retval None + */ +__STATIC_INLINE void LL_RCC_LSI_Enable(void) +{ + SET_BIT(RCC->CSR, RCC_CSR_LSION); +} + +/** + * @brief Disable LSI Oscillator + * @rmtoll CSR LSION LL_RCC_LSI_Disable + * @retval None + */ +__STATIC_INLINE void LL_RCC_LSI_Disable(void) +{ + CLEAR_BIT(RCC->CSR, RCC_CSR_LSION); +} + +/** + * @brief Check if LSI is Ready + * @rmtoll CSR LSIRDY LL_RCC_LSI_IsReady + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_LSI_IsReady(void) +{ + return (READ_BIT(RCC->CSR, RCC_CSR_LSIRDY) == (RCC_CSR_LSIRDY)); +} + +/** + * @} + */ + +/** @defgroup RCC_LL_EF_System System + * @{ + */ + +/** + * @brief Configure the system clock source + * @rmtoll CFGR SW LL_RCC_SetSysClkSource + * @param Source This parameter can be one of the following values: + * @arg @ref LL_RCC_SYS_CLKSOURCE_HSI + * @arg @ref LL_RCC_SYS_CLKSOURCE_HSE + * @arg @ref LL_RCC_SYS_CLKSOURCE_PLL + * @arg @ref LL_RCC_SYS_CLKSOURCE_PLLR (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_RCC_SetSysClkSource(uint32_t Source) +{ + MODIFY_REG(RCC->CFGR, RCC_CFGR_SW, Source); +} + +/** + * @brief Get the system clock source + * @rmtoll CFGR SWS LL_RCC_GetSysClkSource + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_SYS_CLKSOURCE_STATUS_HSI + * @arg @ref LL_RCC_SYS_CLKSOURCE_STATUS_HSE + * @arg @ref LL_RCC_SYS_CLKSOURCE_STATUS_PLL + * @arg @ref LL_RCC_SYS_CLKSOURCE_STATUS_PLLR (*) + * + * (*) value not defined in all devices. + */ +__STATIC_INLINE uint32_t LL_RCC_GetSysClkSource(void) +{ + return (uint32_t)(READ_BIT(RCC->CFGR, RCC_CFGR_SWS)); +} + +/** + * @brief Set AHB prescaler + * @rmtoll CFGR HPRE LL_RCC_SetAHBPrescaler + * @param Prescaler This parameter can be one of the following values: + * @arg @ref LL_RCC_SYSCLK_DIV_1 + * @arg @ref LL_RCC_SYSCLK_DIV_2 + * @arg @ref LL_RCC_SYSCLK_DIV_4 + * @arg @ref LL_RCC_SYSCLK_DIV_8 + * @arg @ref LL_RCC_SYSCLK_DIV_16 + * @arg @ref LL_RCC_SYSCLK_DIV_64 + * @arg @ref LL_RCC_SYSCLK_DIV_128 + * @arg @ref LL_RCC_SYSCLK_DIV_256 + * @arg @ref LL_RCC_SYSCLK_DIV_512 + * @retval None + */ +__STATIC_INLINE void LL_RCC_SetAHBPrescaler(uint32_t Prescaler) +{ + MODIFY_REG(RCC->CFGR, RCC_CFGR_HPRE, Prescaler); +} + +/** + * @brief Set APB1 prescaler + * @rmtoll CFGR PPRE1 LL_RCC_SetAPB1Prescaler + * @param Prescaler This parameter can be one of the following values: + * @arg @ref LL_RCC_APB1_DIV_1 + * @arg @ref LL_RCC_APB1_DIV_2 + * @arg @ref LL_RCC_APB1_DIV_4 + * @arg @ref LL_RCC_APB1_DIV_8 + * @arg @ref LL_RCC_APB1_DIV_16 + * @retval None + */ +__STATIC_INLINE void LL_RCC_SetAPB1Prescaler(uint32_t Prescaler) +{ + MODIFY_REG(RCC->CFGR, RCC_CFGR_PPRE1, Prescaler); +} + +/** + * @brief Set APB2 prescaler + * @rmtoll CFGR PPRE2 LL_RCC_SetAPB2Prescaler + * @param Prescaler This parameter can be one of the following values: + * @arg @ref LL_RCC_APB2_DIV_1 + * @arg @ref LL_RCC_APB2_DIV_2 + * @arg @ref LL_RCC_APB2_DIV_4 + * @arg @ref LL_RCC_APB2_DIV_8 + * @arg @ref LL_RCC_APB2_DIV_16 + * @retval None + */ +__STATIC_INLINE void LL_RCC_SetAPB2Prescaler(uint32_t Prescaler) +{ + MODIFY_REG(RCC->CFGR, RCC_CFGR_PPRE2, Prescaler); +} + +/** + * @brief Get AHB prescaler + * @rmtoll CFGR HPRE LL_RCC_GetAHBPrescaler + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_SYSCLK_DIV_1 + * @arg @ref LL_RCC_SYSCLK_DIV_2 + * @arg @ref LL_RCC_SYSCLK_DIV_4 + * @arg @ref LL_RCC_SYSCLK_DIV_8 + * @arg @ref LL_RCC_SYSCLK_DIV_16 + * @arg @ref LL_RCC_SYSCLK_DIV_64 + * @arg @ref LL_RCC_SYSCLK_DIV_128 + * @arg @ref LL_RCC_SYSCLK_DIV_256 + * @arg @ref LL_RCC_SYSCLK_DIV_512 + */ +__STATIC_INLINE uint32_t LL_RCC_GetAHBPrescaler(void) +{ + return (uint32_t)(READ_BIT(RCC->CFGR, RCC_CFGR_HPRE)); +} + +/** + * @brief Get APB1 prescaler + * @rmtoll CFGR PPRE1 LL_RCC_GetAPB1Prescaler + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_APB1_DIV_1 + * @arg @ref LL_RCC_APB1_DIV_2 + * @arg @ref LL_RCC_APB1_DIV_4 + * @arg @ref LL_RCC_APB1_DIV_8 + * @arg @ref LL_RCC_APB1_DIV_16 + */ +__STATIC_INLINE uint32_t LL_RCC_GetAPB1Prescaler(void) +{ + return (uint32_t)(READ_BIT(RCC->CFGR, RCC_CFGR_PPRE1)); +} + +/** + * @brief Get APB2 prescaler + * @rmtoll CFGR PPRE2 LL_RCC_GetAPB2Prescaler + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_APB2_DIV_1 + * @arg @ref LL_RCC_APB2_DIV_2 + * @arg @ref LL_RCC_APB2_DIV_4 + * @arg @ref LL_RCC_APB2_DIV_8 + * @arg @ref LL_RCC_APB2_DIV_16 + */ +__STATIC_INLINE uint32_t LL_RCC_GetAPB2Prescaler(void) +{ + return (uint32_t)(READ_BIT(RCC->CFGR, RCC_CFGR_PPRE2)); +} + +/** + * @} + */ + +/** @defgroup RCC_LL_EF_MCO MCO + * @{ + */ + +#if defined(RCC_CFGR_MCO1EN) +/** + * @brief Enable MCO1 output + * @rmtoll CFGR RCC_CFGR_MCO1EN LL_RCC_MCO1_Enable + * @retval None + */ +__STATIC_INLINE void LL_RCC_MCO1_Enable(void) +{ + SET_BIT(RCC->CFGR, RCC_CFGR_MCO1EN); +} + +/** + * @brief Disable MCO1 output + * @rmtoll CFGR RCC_CFGR_MCO1EN LL_RCC_MCO1_Disable + * @retval None + */ +__STATIC_INLINE void LL_RCC_MCO1_Disable(void) +{ + CLEAR_BIT(RCC->CFGR, RCC_CFGR_MCO1EN); +} +#endif /* RCC_CFGR_MCO1EN */ + +#if defined(RCC_CFGR_MCO2EN) +/** + * @brief Enable MCO2 output + * @rmtoll CFGR RCC_CFGR_MCO2EN LL_RCC_MCO2_Enable + * @retval None + */ +__STATIC_INLINE void LL_RCC_MCO2_Enable(void) +{ + SET_BIT(RCC->CFGR, RCC_CFGR_MCO2EN); +} + +/** + * @brief Disable MCO2 output + * @rmtoll CFGR RCC_CFGR_MCO2EN LL_RCC_MCO2_Disable + * @retval None + */ +__STATIC_INLINE void LL_RCC_MCO2_Disable(void) +{ + CLEAR_BIT(RCC->CFGR, RCC_CFGR_MCO2EN); +} +#endif /* RCC_CFGR_MCO2EN */ + +/** + * @brief Configure MCOx + * @rmtoll CFGR MCO1 LL_RCC_ConfigMCO\n + * CFGR MCO1PRE LL_RCC_ConfigMCO\n + * CFGR MCO2 LL_RCC_ConfigMCO\n + * CFGR MCO2PRE LL_RCC_ConfigMCO + * @param MCOxSource This parameter can be one of the following values: + * @arg @ref LL_RCC_MCO1SOURCE_HSI + * @arg @ref LL_RCC_MCO1SOURCE_LSE + * @arg @ref LL_RCC_MCO1SOURCE_HSE + * @arg @ref LL_RCC_MCO1SOURCE_PLLCLK + * @arg @ref LL_RCC_MCO2SOURCE_SYSCLK + * @arg @ref LL_RCC_MCO2SOURCE_PLLI2S + * @arg @ref LL_RCC_MCO2SOURCE_HSE + * @arg @ref LL_RCC_MCO2SOURCE_PLLCLK + * @param MCOxPrescaler This parameter can be one of the following values: + * @arg @ref LL_RCC_MCO1_DIV_1 + * @arg @ref LL_RCC_MCO1_DIV_2 + * @arg @ref LL_RCC_MCO1_DIV_3 + * @arg @ref LL_RCC_MCO1_DIV_4 + * @arg @ref LL_RCC_MCO1_DIV_5 + * @arg @ref LL_RCC_MCO2_DIV_1 + * @arg @ref LL_RCC_MCO2_DIV_2 + * @arg @ref LL_RCC_MCO2_DIV_3 + * @arg @ref LL_RCC_MCO2_DIV_4 + * @arg @ref LL_RCC_MCO2_DIV_5 + * @retval None + */ +__STATIC_INLINE void LL_RCC_ConfigMCO(uint32_t MCOxSource, uint32_t MCOxPrescaler) +{ + MODIFY_REG(RCC->CFGR, (MCOxSource & 0xFFFF0000U) | (MCOxPrescaler & 0xFFFF0000U), (MCOxSource << 16U) | (MCOxPrescaler << 16U)); +} + +/** + * @} + */ + +/** @defgroup RCC_LL_EF_Peripheral_Clock_Source Peripheral Clock Source + * @{ + */ +#if defined(FMPI2C1) +/** + * @brief Configure FMPI2C clock source + * @rmtoll DCKCFGR2 FMPI2C1SEL LL_RCC_SetFMPI2CClockSource + * @param FMPI2CxSource This parameter can be one of the following values: + * @arg @ref LL_RCC_FMPI2C1_CLKSOURCE_PCLK1 + * @arg @ref LL_RCC_FMPI2C1_CLKSOURCE_SYSCLK + * @arg @ref LL_RCC_FMPI2C1_CLKSOURCE_HSI + * @retval None + */ +__STATIC_INLINE void LL_RCC_SetFMPI2CClockSource(uint32_t FMPI2CxSource) +{ + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_FMPI2C1SEL, FMPI2CxSource); +} +#endif /* FMPI2C1 */ + +#if defined(LPTIM1) +/** + * @brief Configure LPTIMx clock source + * @rmtoll DCKCFGR2 LPTIM1SEL LL_RCC_SetLPTIMClockSource + * @param LPTIMxSource This parameter can be one of the following values: + * @arg @ref LL_RCC_LPTIM1_CLKSOURCE_PCLK1 + * @arg @ref LL_RCC_LPTIM1_CLKSOURCE_HSI + * @arg @ref LL_RCC_LPTIM1_CLKSOURCE_LSI + * @arg @ref LL_RCC_LPTIM1_CLKSOURCE_LSE + * @retval None + */ +__STATIC_INLINE void LL_RCC_SetLPTIMClockSource(uint32_t LPTIMxSource) +{ + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_LPTIM1SEL, LPTIMxSource); +} +#endif /* LPTIM1 */ + +#if defined(SAI1) +/** + * @brief Configure SAIx clock source + * @rmtoll DCKCFGR SAI1SRC LL_RCC_SetSAIClockSource\n + * DCKCFGR SAI2SRC LL_RCC_SetSAIClockSource\n + * DCKCFGR SAI1ASRC LL_RCC_SetSAIClockSource\n + * DCKCFGR SAI1BSRC LL_RCC_SetSAIClockSource + * @param SAIxSource This parameter can be one of the following values: + * @arg @ref LL_RCC_SAI1_CLKSOURCE_PLLSAI (*) + * @arg @ref LL_RCC_SAI1_CLKSOURCE_PLLI2S (*) + * @arg @ref LL_RCC_SAI1_CLKSOURCE_PLL (*) + * @arg @ref LL_RCC_SAI1_CLKSOURCE_PIN (*) + * @arg @ref LL_RCC_SAI2_CLKSOURCE_PLLSAI (*) + * @arg @ref LL_RCC_SAI2_CLKSOURCE_PLLI2S (*) + * @arg @ref LL_RCC_SAI2_CLKSOURCE_PLL (*) + * @arg @ref LL_RCC_SAI2_CLKSOURCE_PLLSRC (*) + * @arg @ref LL_RCC_SAI1_A_CLKSOURCE_PLLSAI (*) + * @arg @ref LL_RCC_SAI1_A_CLKSOURCE_PLLI2S (*) + * @arg @ref LL_RCC_SAI1_A_CLKSOURCE_PIN (*) + * @arg @ref LL_RCC_SAI1_A_CLKSOURCE_PLL (*) + * @arg @ref LL_RCC_SAI1_A_CLKSOURCE_PLLSRC (*) + * @arg @ref LL_RCC_SAI1_B_CLKSOURCE_PLLSAI (*) + * @arg @ref LL_RCC_SAI1_B_CLKSOURCE_PLLI2S (*) + * @arg @ref LL_RCC_SAI1_B_CLKSOURCE_PIN (*) + * @arg @ref LL_RCC_SAI1_B_CLKSOURCE_PLL (*) + * @arg @ref LL_RCC_SAI1_B_CLKSOURCE_PLLSRC (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_RCC_SetSAIClockSource(uint32_t SAIxSource) +{ + MODIFY_REG(RCC->DCKCFGR, (SAIxSource & 0xFFFF0000U), (SAIxSource << 16U)); +} +#endif /* SAI1 */ + +#if defined(RCC_DCKCFGR_SDIOSEL) || defined(RCC_DCKCFGR2_SDIOSEL) +/** + * @brief Configure SDIO clock source + * @rmtoll DCKCFGR SDIOSEL LL_RCC_SetSDIOClockSource\n + * DCKCFGR2 SDIOSEL LL_RCC_SetSDIOClockSource + * @param SDIOxSource This parameter can be one of the following values: + * @arg @ref LL_RCC_SDIO_CLKSOURCE_PLL48CLK + * @arg @ref LL_RCC_SDIO_CLKSOURCE_SYSCLK + * @retval None + */ +__STATIC_INLINE void LL_RCC_SetSDIOClockSource(uint32_t SDIOxSource) +{ +#if defined(RCC_DCKCFGR_SDIOSEL) + MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_SDIOSEL, SDIOxSource); +#else + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_SDIOSEL, SDIOxSource); +#endif /* RCC_DCKCFGR_SDIOSEL */ +} +#endif /* RCC_DCKCFGR_SDIOSEL || RCC_DCKCFGR2_SDIOSEL */ + +#if defined(RCC_DCKCFGR_CK48MSEL) || defined(RCC_DCKCFGR2_CK48MSEL) +/** + * @brief Configure 48Mhz domain clock source + * @rmtoll DCKCFGR CK48MSEL LL_RCC_SetCK48MClockSource\n + * DCKCFGR2 CK48MSEL LL_RCC_SetCK48MClockSource + * @param CK48MxSource This parameter can be one of the following values: + * @arg @ref LL_RCC_CK48M_CLKSOURCE_PLL + * @arg @ref LL_RCC_CK48M_CLKSOURCE_PLLSAI (*) + * @arg @ref LL_RCC_CK48M_CLKSOURCE_PLLI2S (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_RCC_SetCK48MClockSource(uint32_t CK48MxSource) +{ +#if defined(RCC_DCKCFGR_CK48MSEL) + MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_CK48MSEL, CK48MxSource); +#else + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_CK48MSEL, CK48MxSource); +#endif /* RCC_DCKCFGR_CK48MSEL */ +} + +#if defined(RNG) +/** + * @brief Configure RNG clock source + * @rmtoll DCKCFGR CK48MSEL LL_RCC_SetRNGClockSource\n + * DCKCFGR2 CK48MSEL LL_RCC_SetRNGClockSource + * @param RNGxSource This parameter can be one of the following values: + * @arg @ref LL_RCC_RNG_CLKSOURCE_PLL + * @arg @ref LL_RCC_RNG_CLKSOURCE_PLLSAI (*) + * @arg @ref LL_RCC_RNG_CLKSOURCE_PLLI2S (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_RCC_SetRNGClockSource(uint32_t RNGxSource) +{ +#if defined(RCC_DCKCFGR_CK48MSEL) + MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_CK48MSEL, RNGxSource); +#else + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_CK48MSEL, RNGxSource); +#endif /* RCC_DCKCFGR_CK48MSEL */ +} +#endif /* RNG */ + +#if defined(USB_OTG_FS) || defined(USB_OTG_HS) +/** + * @brief Configure USB clock source + * @rmtoll DCKCFGR CK48MSEL LL_RCC_SetUSBClockSource\n + * DCKCFGR2 CK48MSEL LL_RCC_SetUSBClockSource + * @param USBxSource This parameter can be one of the following values: + * @arg @ref LL_RCC_USB_CLKSOURCE_PLL + * @arg @ref LL_RCC_USB_CLKSOURCE_PLLSAI (*) + * @arg @ref LL_RCC_USB_CLKSOURCE_PLLI2S (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_RCC_SetUSBClockSource(uint32_t USBxSource) +{ +#if defined(RCC_DCKCFGR_CK48MSEL) + MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_CK48MSEL, USBxSource); +#else + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_CK48MSEL, USBxSource); +#endif /* RCC_DCKCFGR_CK48MSEL */ +} +#endif /* USB_OTG_FS || USB_OTG_HS */ +#endif /* RCC_DCKCFGR_CK48MSEL || RCC_DCKCFGR2_CK48MSEL */ + +#if defined(CEC) +/** + * @brief Configure CEC clock source + * @rmtoll DCKCFGR2 CECSEL LL_RCC_SetCECClockSource + * @param Source This parameter can be one of the following values: + * @arg @ref LL_RCC_CEC_CLKSOURCE_HSI_DIV488 + * @arg @ref LL_RCC_CEC_CLKSOURCE_LSE + * @retval None + */ +__STATIC_INLINE void LL_RCC_SetCECClockSource(uint32_t Source) +{ + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_CECSEL, Source); +} +#endif /* CEC */ + +/** + * @brief Configure I2S clock source + * @rmtoll CFGR I2SSRC LL_RCC_SetI2SClockSource\n + * DCKCFGR I2SSRC LL_RCC_SetI2SClockSource\n + * DCKCFGR I2S1SRC LL_RCC_SetI2SClockSource\n + * DCKCFGR I2S2SRC LL_RCC_SetI2SClockSource + * @param Source This parameter can be one of the following values: + * @arg @ref LL_RCC_I2S1_CLKSOURCE_PLLI2S (*) + * @arg @ref LL_RCC_I2S1_CLKSOURCE_PIN + * @arg @ref LL_RCC_I2S1_CLKSOURCE_PLL (*) + * @arg @ref LL_RCC_I2S1_CLKSOURCE_PLLSRC (*) + * @arg @ref LL_RCC_I2S2_CLKSOURCE_PLLI2S (*) + * @arg @ref LL_RCC_I2S2_CLKSOURCE_PIN (*) + * @arg @ref LL_RCC_I2S2_CLKSOURCE_PLL (*) + * @arg @ref LL_RCC_I2S2_CLKSOURCE_PLLSRC (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_RCC_SetI2SClockSource(uint32_t Source) +{ +#if defined(RCC_CFGR_I2SSRC) + MODIFY_REG(RCC->CFGR, RCC_CFGR_I2SSRC, Source); +#else + MODIFY_REG(RCC->DCKCFGR, (Source & 0xFFFF0000U), (Source << 16U)); +#endif /* RCC_CFGR_I2SSRC */ +} + +#if defined(DSI) +/** + * @brief Configure DSI clock source + * @rmtoll DCKCFGR DSISEL LL_RCC_SetDSIClockSource + * @param Source This parameter can be one of the following values: + * @arg @ref LL_RCC_DSI_CLKSOURCE_PHY + * @arg @ref LL_RCC_DSI_CLKSOURCE_PLL + * @retval None + */ +__STATIC_INLINE void LL_RCC_SetDSIClockSource(uint32_t Source) +{ + MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_DSISEL, Source); +} +#endif /* DSI */ + +#if defined(DFSDM1_Channel0) +/** + * @brief Configure DFSDM Audio clock source + * @rmtoll DCKCFGR CKDFSDM1ASEL LL_RCC_SetDFSDMAudioClockSource\n + * DCKCFGR CKDFSDM2ASEL LL_RCC_SetDFSDMAudioClockSource + * @param Source This parameter can be one of the following values: + * @arg @ref LL_RCC_DFSDM1_AUDIO_CLKSOURCE_I2S1 + * @arg @ref LL_RCC_DFSDM1_AUDIO_CLKSOURCE_I2S2 + * @arg @ref LL_RCC_DFSDM2_AUDIO_CLKSOURCE_I2S1 (*) + * @arg @ref LL_RCC_DFSDM2_AUDIO_CLKSOURCE_I2S2 (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_RCC_SetDFSDMAudioClockSource(uint32_t Source) +{ + MODIFY_REG(RCC->DCKCFGR, (Source & 0x0000FFFFU), (Source >> 16U)); +} + +/** + * @brief Configure DFSDM Kernel clock source + * @rmtoll DCKCFGR CKDFSDM1SEL LL_RCC_SetDFSDMClockSource + * @param Source This parameter can be one of the following values: + * @arg @ref LL_RCC_DFSDM1_CLKSOURCE_PCLK2 + * @arg @ref LL_RCC_DFSDM1_CLKSOURCE_SYSCLK + * @arg @ref LL_RCC_DFSDM2_CLKSOURCE_PCLK2 (*) + * @arg @ref LL_RCC_DFSDM2_CLKSOURCE_SYSCLK (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_RCC_SetDFSDMClockSource(uint32_t Source) +{ + MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_CKDFSDM1SEL, Source); +} +#endif /* DFSDM1_Channel0 */ + +#if defined(SPDIFRX) +/** + * @brief Configure SPDIFRX clock source + * @rmtoll DCKCFGR2 SPDIFRXSEL LL_RCC_SetSPDIFRXClockSource + * @param SPDIFRXxSource This parameter can be one of the following values: + * @arg @ref LL_RCC_SPDIFRX1_CLKSOURCE_PLL + * @arg @ref LL_RCC_SPDIFRX1_CLKSOURCE_PLLI2S + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_RCC_SetSPDIFRXClockSource(uint32_t SPDIFRXxSource) +{ + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_SPDIFRXSEL, SPDIFRXxSource); +} +#endif /* SPDIFRX */ + +#if defined(FMPI2C1) +/** + * @brief Get FMPI2C clock source + * @rmtoll DCKCFGR2 FMPI2C1SEL LL_RCC_GetFMPI2CClockSource + * @param FMPI2Cx This parameter can be one of the following values: + * @arg @ref LL_RCC_FMPI2C1_CLKSOURCE + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_FMPI2C1_CLKSOURCE_PCLK1 + * @arg @ref LL_RCC_FMPI2C1_CLKSOURCE_SYSCLK + * @arg @ref LL_RCC_FMPI2C1_CLKSOURCE_HSI + */ +__STATIC_INLINE uint32_t LL_RCC_GetFMPI2CClockSource(uint32_t FMPI2Cx) +{ + return (uint32_t)(READ_BIT(RCC->DCKCFGR2, FMPI2Cx)); +} +#endif /* FMPI2C1 */ + +#if defined(LPTIM1) +/** + * @brief Get LPTIMx clock source + * @rmtoll DCKCFGR2 LPTIM1SEL LL_RCC_GetLPTIMClockSource + * @param LPTIMx This parameter can be one of the following values: + * @arg @ref LL_RCC_LPTIM1_CLKSOURCE + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_LPTIM1_CLKSOURCE_PCLK1 + * @arg @ref LL_RCC_LPTIM1_CLKSOURCE_HSI + * @arg @ref LL_RCC_LPTIM1_CLKSOURCE_LSI + * @arg @ref LL_RCC_LPTIM1_CLKSOURCE_LSE + */ +__STATIC_INLINE uint32_t LL_RCC_GetLPTIMClockSource(uint32_t LPTIMx) +{ + return (uint32_t)(READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_LPTIM1SEL)); +} +#endif /* LPTIM1 */ + +#if defined(SAI1) +/** + * @brief Get SAIx clock source + * @rmtoll DCKCFGR SAI1SEL LL_RCC_GetSAIClockSource\n + * DCKCFGR SAI2SEL LL_RCC_GetSAIClockSource\n + * DCKCFGR SAI1ASRC LL_RCC_GetSAIClockSource\n + * DCKCFGR SAI1BSRC LL_RCC_GetSAIClockSource + * @param SAIx This parameter can be one of the following values: + * @arg @ref LL_RCC_SAI1_CLKSOURCE (*) + * @arg @ref LL_RCC_SAI2_CLKSOURCE (*) + * @arg @ref LL_RCC_SAI1_A_CLKSOURCE (*) + * @arg @ref LL_RCC_SAI1_B_CLKSOURCE (*) + * + * (*) value not defined in all devices. + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_SAI1_CLKSOURCE_PLLSAI (*) + * @arg @ref LL_RCC_SAI1_CLKSOURCE_PLLI2S (*) + * @arg @ref LL_RCC_SAI1_CLKSOURCE_PLL (*) + * @arg @ref LL_RCC_SAI1_CLKSOURCE_PIN (*) + * @arg @ref LL_RCC_SAI2_CLKSOURCE_PLLSAI (*) + * @arg @ref LL_RCC_SAI2_CLKSOURCE_PLLI2S (*) + * @arg @ref LL_RCC_SAI2_CLKSOURCE_PLL (*) + * @arg @ref LL_RCC_SAI2_CLKSOURCE_PLLSRC (*) + * @arg @ref LL_RCC_SAI1_A_CLKSOURCE_PLLSAI (*) + * @arg @ref LL_RCC_SAI1_A_CLKSOURCE_PLLI2S (*) + * @arg @ref LL_RCC_SAI1_A_CLKSOURCE_PIN (*) + * @arg @ref LL_RCC_SAI1_A_CLKSOURCE_PLL (*) + * @arg @ref LL_RCC_SAI1_A_CLKSOURCE_PLLSRC (*) + * @arg @ref LL_RCC_SAI1_B_CLKSOURCE_PLLSAI (*) + * @arg @ref LL_RCC_SAI1_B_CLKSOURCE_PLLI2S (*) + * @arg @ref LL_RCC_SAI1_B_CLKSOURCE_PIN (*) + * @arg @ref LL_RCC_SAI1_B_CLKSOURCE_PLL (*) + * @arg @ref LL_RCC_SAI1_B_CLKSOURCE_PLLSRC (*) + * + * (*) value not defined in all devices. + */ +__STATIC_INLINE uint32_t LL_RCC_GetSAIClockSource(uint32_t SAIx) +{ + return (uint32_t)(READ_BIT(RCC->DCKCFGR, SAIx) >> 16U | SAIx); +} +#endif /* SAI1 */ + +#if defined(RCC_DCKCFGR_SDIOSEL) || defined(RCC_DCKCFGR2_SDIOSEL) +/** + * @brief Get SDIOx clock source + * @rmtoll DCKCFGR SDIOSEL LL_RCC_GetSDIOClockSource\n + * DCKCFGR2 SDIOSEL LL_RCC_GetSDIOClockSource + * @param SDIOx This parameter can be one of the following values: + * @arg @ref LL_RCC_SDIO_CLKSOURCE + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_SDIO_CLKSOURCE_PLL48CLK + * @arg @ref LL_RCC_SDIO_CLKSOURCE_SYSCLK + */ +__STATIC_INLINE uint32_t LL_RCC_GetSDIOClockSource(uint32_t SDIOx) +{ +#if defined(RCC_DCKCFGR_SDIOSEL) + return (uint32_t)(READ_BIT(RCC->DCKCFGR, SDIOx)); +#else + return (uint32_t)(READ_BIT(RCC->DCKCFGR2, SDIOx)); +#endif /* RCC_DCKCFGR_SDIOSEL */ +} +#endif /* RCC_DCKCFGR_SDIOSEL || RCC_DCKCFGR2_SDIOSEL */ + +#if defined(RCC_DCKCFGR_CK48MSEL) || defined(RCC_DCKCFGR2_CK48MSEL) +/** + * @brief Get 48Mhz domain clock source + * @rmtoll DCKCFGR CK48MSEL LL_RCC_GetCK48MClockSource\n + * DCKCFGR2 CK48MSEL LL_RCC_GetCK48MClockSource + * @param CK48Mx This parameter can be one of the following values: + * @arg @ref LL_RCC_CK48M_CLKSOURCE + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_CK48M_CLKSOURCE_PLL + * @arg @ref LL_RCC_CK48M_CLKSOURCE_PLLSAI (*) + * @arg @ref LL_RCC_CK48M_CLKSOURCE_PLLI2S (*) + * + * (*) value not defined in all devices. + */ +__STATIC_INLINE uint32_t LL_RCC_GetCK48MClockSource(uint32_t CK48Mx) +{ +#if defined(RCC_DCKCFGR_CK48MSEL) + return (uint32_t)(READ_BIT(RCC->DCKCFGR, CK48Mx)); +#else + return (uint32_t)(READ_BIT(RCC->DCKCFGR2, CK48Mx)); +#endif /* RCC_DCKCFGR_CK48MSEL */ +} + +#if defined(RNG) +/** + * @brief Get RNGx clock source + * @rmtoll DCKCFGR CK48MSEL LL_RCC_GetRNGClockSource\n + * DCKCFGR2 CK48MSEL LL_RCC_GetRNGClockSource + * @param RNGx This parameter can be one of the following values: + * @arg @ref LL_RCC_RNG_CLKSOURCE + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_RNG_CLKSOURCE_PLL + * @arg @ref LL_RCC_RNG_CLKSOURCE_PLLSAI (*) + * @arg @ref LL_RCC_RNG_CLKSOURCE_PLLI2S (*) + * + * (*) value not defined in all devices. + */ +__STATIC_INLINE uint32_t LL_RCC_GetRNGClockSource(uint32_t RNGx) +{ +#if defined(RCC_DCKCFGR_CK48MSEL) + return (uint32_t)(READ_BIT(RCC->DCKCFGR, RNGx)); +#else + return (uint32_t)(READ_BIT(RCC->DCKCFGR2, RNGx)); +#endif /* RCC_DCKCFGR_CK48MSEL */ +} +#endif /* RNG */ + +#if defined(USB_OTG_FS) || defined(USB_OTG_HS) +/** + * @brief Get USBx clock source + * @rmtoll DCKCFGR CK48MSEL LL_RCC_GetUSBClockSource\n + * DCKCFGR2 CK48MSEL LL_RCC_GetUSBClockSource + * @param USBx This parameter can be one of the following values: + * @arg @ref LL_RCC_USB_CLKSOURCE + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_USB_CLKSOURCE_PLL + * @arg @ref LL_RCC_USB_CLKSOURCE_PLLSAI (*) + * @arg @ref LL_RCC_USB_CLKSOURCE_PLLI2S (*) + * + * (*) value not defined in all devices. + */ +__STATIC_INLINE uint32_t LL_RCC_GetUSBClockSource(uint32_t USBx) +{ +#if defined(RCC_DCKCFGR_CK48MSEL) + return (uint32_t)(READ_BIT(RCC->DCKCFGR, USBx)); +#else + return (uint32_t)(READ_BIT(RCC->DCKCFGR2, USBx)); +#endif /* RCC_DCKCFGR_CK48MSEL */ +} +#endif /* USB_OTG_FS || USB_OTG_HS */ +#endif /* RCC_DCKCFGR_CK48MSEL || RCC_DCKCFGR2_CK48MSEL */ + +#if defined(CEC) +/** + * @brief Get CEC Clock Source + * @rmtoll DCKCFGR2 CECSEL LL_RCC_GetCECClockSource + * @param CECx This parameter can be one of the following values: + * @arg @ref LL_RCC_CEC_CLKSOURCE + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_CEC_CLKSOURCE_HSI_DIV488 + * @arg @ref LL_RCC_CEC_CLKSOURCE_LSE + */ +__STATIC_INLINE uint32_t LL_RCC_GetCECClockSource(uint32_t CECx) +{ + return (uint32_t)(READ_BIT(RCC->DCKCFGR2, CECx)); +} +#endif /* CEC */ + +/** + * @brief Get I2S Clock Source + * @rmtoll CFGR I2SSRC LL_RCC_GetI2SClockSource\n + * DCKCFGR I2SSRC LL_RCC_GetI2SClockSource\n + * DCKCFGR I2S1SRC LL_RCC_GetI2SClockSource\n + * DCKCFGR I2S2SRC LL_RCC_GetI2SClockSource + * @param I2Sx This parameter can be one of the following values: + * @arg @ref LL_RCC_I2S1_CLKSOURCE + * @arg @ref LL_RCC_I2S2_CLKSOURCE (*) + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_I2S1_CLKSOURCE_PLLI2S (*) + * @arg @ref LL_RCC_I2S1_CLKSOURCE_PIN + * @arg @ref LL_RCC_I2S1_CLKSOURCE_PLL (*) + * @arg @ref LL_RCC_I2S1_CLKSOURCE_PLLSRC (*) + * @arg @ref LL_RCC_I2S2_CLKSOURCE_PLLI2S (*) + * @arg @ref LL_RCC_I2S2_CLKSOURCE_PIN (*) + * @arg @ref LL_RCC_I2S2_CLKSOURCE_PLL (*) + * @arg @ref LL_RCC_I2S2_CLKSOURCE_PLLSRC (*) + * + * (*) value not defined in all devices. + */ +__STATIC_INLINE uint32_t LL_RCC_GetI2SClockSource(uint32_t I2Sx) +{ +#if defined(RCC_CFGR_I2SSRC) + return (uint32_t)(READ_BIT(RCC->CFGR, I2Sx)); +#else + return (uint32_t)(READ_BIT(RCC->DCKCFGR, I2Sx) >> 16U | I2Sx); +#endif /* RCC_CFGR_I2SSRC */ +} + +#if defined(DFSDM1_Channel0) +/** + * @brief Get DFSDM Audio Clock Source + * @rmtoll DCKCFGR CKDFSDM1ASEL LL_RCC_GetDFSDMAudioClockSource\n + * DCKCFGR CKDFSDM2ASEL LL_RCC_GetDFSDMAudioClockSource + * @param DFSDMx This parameter can be one of the following values: + * @arg @ref LL_RCC_DFSDM1_AUDIO_CLKSOURCE + * @arg @ref LL_RCC_DFSDM2_AUDIO_CLKSOURCE (*) + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_DFSDM1_AUDIO_CLKSOURCE_I2S1 + * @arg @ref LL_RCC_DFSDM1_AUDIO_CLKSOURCE_I2S2 + * @arg @ref LL_RCC_DFSDM2_AUDIO_CLKSOURCE_I2S1 (*) + * @arg @ref LL_RCC_DFSDM2_AUDIO_CLKSOURCE_I2S2 (*) + * + * (*) value not defined in all devices. + */ +__STATIC_INLINE uint32_t LL_RCC_GetDFSDMAudioClockSource(uint32_t DFSDMx) +{ + return (uint32_t)(READ_BIT(RCC->DCKCFGR, DFSDMx) << 16U | DFSDMx); +} + +/** + * @brief Get DFSDM Audio Clock Source + * @rmtoll DCKCFGR CKDFSDM1SEL LL_RCC_GetDFSDMClockSource + * @param DFSDMx This parameter can be one of the following values: + * @arg @ref LL_RCC_DFSDM1_CLKSOURCE + * @arg @ref LL_RCC_DFSDM2_CLKSOURCE (*) + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_DFSDM1_CLKSOURCE_PCLK2 + * @arg @ref LL_RCC_DFSDM1_CLKSOURCE_SYSCLK + * @arg @ref LL_RCC_DFSDM2_CLKSOURCE_PCLK2 (*) + * @arg @ref LL_RCC_DFSDM2_CLKSOURCE_SYSCLK (*) + * + * (*) value not defined in all devices. + */ +__STATIC_INLINE uint32_t LL_RCC_GetDFSDMClockSource(uint32_t DFSDMx) +{ + return (uint32_t)(READ_BIT(RCC->DCKCFGR, DFSDMx)); +} +#endif /* DFSDM1_Channel0 */ + +#if defined(SPDIFRX) +/** + * @brief Get SPDIFRX clock source + * @rmtoll DCKCFGR2 SPDIFRXSEL LL_RCC_GetSPDIFRXClockSource + * @param SPDIFRXx This parameter can be one of the following values: + * @arg @ref LL_RCC_SPDIFRX1_CLKSOURCE + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_SPDIFRX1_CLKSOURCE_PLL + * @arg @ref LL_RCC_SPDIFRX1_CLKSOURCE_PLLI2S + * + * (*) value not defined in all devices. + */ +__STATIC_INLINE uint32_t LL_RCC_GetSPDIFRXClockSource(uint32_t SPDIFRXx) +{ + return (uint32_t)(READ_BIT(RCC->DCKCFGR2, SPDIFRXx)); +} +#endif /* SPDIFRX */ + +#if defined(DSI) +/** + * @brief Get DSI Clock Source + * @rmtoll DCKCFGR DSISEL LL_RCC_GetDSIClockSource + * @param DSIx This parameter can be one of the following values: + * @arg @ref LL_RCC_DSI_CLKSOURCE + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_DSI_CLKSOURCE_PHY + * @arg @ref LL_RCC_DSI_CLKSOURCE_PLL + */ +__STATIC_INLINE uint32_t LL_RCC_GetDSIClockSource(uint32_t DSIx) +{ + return (uint32_t)(READ_BIT(RCC->DCKCFGR, DSIx)); +} +#endif /* DSI */ + +/** + * @} + */ + +/** @defgroup RCC_LL_EF_RTC RTC + * @{ + */ + +/** + * @brief Set RTC Clock Source + * @note Once the RTC clock source has been selected, it cannot be changed anymore unless + * the Backup domain is reset, or unless a failure is detected on LSE (LSECSSD is + * set). The BDRST bit can be used to reset them. + * @rmtoll BDCR RTCSEL LL_RCC_SetRTCClockSource + * @param Source This parameter can be one of the following values: + * @arg @ref LL_RCC_RTC_CLKSOURCE_NONE + * @arg @ref LL_RCC_RTC_CLKSOURCE_LSE + * @arg @ref LL_RCC_RTC_CLKSOURCE_LSI + * @arg @ref LL_RCC_RTC_CLKSOURCE_HSE + * @retval None + */ +__STATIC_INLINE void LL_RCC_SetRTCClockSource(uint32_t Source) +{ + MODIFY_REG(RCC->BDCR, RCC_BDCR_RTCSEL, Source); +} + +/** + * @brief Get RTC Clock Source + * @rmtoll BDCR RTCSEL LL_RCC_GetRTCClockSource + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_RTC_CLKSOURCE_NONE + * @arg @ref LL_RCC_RTC_CLKSOURCE_LSE + * @arg @ref LL_RCC_RTC_CLKSOURCE_LSI + * @arg @ref LL_RCC_RTC_CLKSOURCE_HSE + */ +__STATIC_INLINE uint32_t LL_RCC_GetRTCClockSource(void) +{ + return (uint32_t)(READ_BIT(RCC->BDCR, RCC_BDCR_RTCSEL)); +} + +/** + * @brief Enable RTC + * @rmtoll BDCR RTCEN LL_RCC_EnableRTC + * @retval None + */ +__STATIC_INLINE void LL_RCC_EnableRTC(void) +{ + SET_BIT(RCC->BDCR, RCC_BDCR_RTCEN); +} + +/** + * @brief Disable RTC + * @rmtoll BDCR RTCEN LL_RCC_DisableRTC + * @retval None + */ +__STATIC_INLINE void LL_RCC_DisableRTC(void) +{ + CLEAR_BIT(RCC->BDCR, RCC_BDCR_RTCEN); +} + +/** + * @brief Check if RTC has been enabled or not + * @rmtoll BDCR RTCEN LL_RCC_IsEnabledRTC + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsEnabledRTC(void) +{ + return (READ_BIT(RCC->BDCR, RCC_BDCR_RTCEN) == (RCC_BDCR_RTCEN)); +} + +/** + * @brief Force the Backup domain reset + * @rmtoll BDCR BDRST LL_RCC_ForceBackupDomainReset + * @retval None + */ +__STATIC_INLINE void LL_RCC_ForceBackupDomainReset(void) +{ + SET_BIT(RCC->BDCR, RCC_BDCR_BDRST); +} + +/** + * @brief Release the Backup domain reset + * @rmtoll BDCR BDRST LL_RCC_ReleaseBackupDomainReset + * @retval None + */ +__STATIC_INLINE void LL_RCC_ReleaseBackupDomainReset(void) +{ + CLEAR_BIT(RCC->BDCR, RCC_BDCR_BDRST); +} + +/** + * @brief Set HSE Prescalers for RTC Clock + * @rmtoll CFGR RTCPRE LL_RCC_SetRTC_HSEPrescaler + * @param Prescaler This parameter can be one of the following values: + * @arg @ref LL_RCC_RTC_NOCLOCK + * @arg @ref LL_RCC_RTC_HSE_DIV_2 + * @arg @ref LL_RCC_RTC_HSE_DIV_3 + * @arg @ref LL_RCC_RTC_HSE_DIV_4 + * @arg @ref LL_RCC_RTC_HSE_DIV_5 + * @arg @ref LL_RCC_RTC_HSE_DIV_6 + * @arg @ref LL_RCC_RTC_HSE_DIV_7 + * @arg @ref LL_RCC_RTC_HSE_DIV_8 + * @arg @ref LL_RCC_RTC_HSE_DIV_9 + * @arg @ref LL_RCC_RTC_HSE_DIV_10 + * @arg @ref LL_RCC_RTC_HSE_DIV_11 + * @arg @ref LL_RCC_RTC_HSE_DIV_12 + * @arg @ref LL_RCC_RTC_HSE_DIV_13 + * @arg @ref LL_RCC_RTC_HSE_DIV_14 + * @arg @ref LL_RCC_RTC_HSE_DIV_15 + * @arg @ref LL_RCC_RTC_HSE_DIV_16 + * @arg @ref LL_RCC_RTC_HSE_DIV_17 + * @arg @ref LL_RCC_RTC_HSE_DIV_18 + * @arg @ref LL_RCC_RTC_HSE_DIV_19 + * @arg @ref LL_RCC_RTC_HSE_DIV_20 + * @arg @ref LL_RCC_RTC_HSE_DIV_21 + * @arg @ref LL_RCC_RTC_HSE_DIV_22 + * @arg @ref LL_RCC_RTC_HSE_DIV_23 + * @arg @ref LL_RCC_RTC_HSE_DIV_24 + * @arg @ref LL_RCC_RTC_HSE_DIV_25 + * @arg @ref LL_RCC_RTC_HSE_DIV_26 + * @arg @ref LL_RCC_RTC_HSE_DIV_27 + * @arg @ref LL_RCC_RTC_HSE_DIV_28 + * @arg @ref LL_RCC_RTC_HSE_DIV_29 + * @arg @ref LL_RCC_RTC_HSE_DIV_30 + * @arg @ref LL_RCC_RTC_HSE_DIV_31 + * @retval None + */ +__STATIC_INLINE void LL_RCC_SetRTC_HSEPrescaler(uint32_t Prescaler) +{ + MODIFY_REG(RCC->CFGR, RCC_CFGR_RTCPRE, Prescaler); +} + +/** + * @brief Get HSE Prescalers for RTC Clock + * @rmtoll CFGR RTCPRE LL_RCC_GetRTC_HSEPrescaler + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_RTC_NOCLOCK + * @arg @ref LL_RCC_RTC_HSE_DIV_2 + * @arg @ref LL_RCC_RTC_HSE_DIV_3 + * @arg @ref LL_RCC_RTC_HSE_DIV_4 + * @arg @ref LL_RCC_RTC_HSE_DIV_5 + * @arg @ref LL_RCC_RTC_HSE_DIV_6 + * @arg @ref LL_RCC_RTC_HSE_DIV_7 + * @arg @ref LL_RCC_RTC_HSE_DIV_8 + * @arg @ref LL_RCC_RTC_HSE_DIV_9 + * @arg @ref LL_RCC_RTC_HSE_DIV_10 + * @arg @ref LL_RCC_RTC_HSE_DIV_11 + * @arg @ref LL_RCC_RTC_HSE_DIV_12 + * @arg @ref LL_RCC_RTC_HSE_DIV_13 + * @arg @ref LL_RCC_RTC_HSE_DIV_14 + * @arg @ref LL_RCC_RTC_HSE_DIV_15 + * @arg @ref LL_RCC_RTC_HSE_DIV_16 + * @arg @ref LL_RCC_RTC_HSE_DIV_17 + * @arg @ref LL_RCC_RTC_HSE_DIV_18 + * @arg @ref LL_RCC_RTC_HSE_DIV_19 + * @arg @ref LL_RCC_RTC_HSE_DIV_20 + * @arg @ref LL_RCC_RTC_HSE_DIV_21 + * @arg @ref LL_RCC_RTC_HSE_DIV_22 + * @arg @ref LL_RCC_RTC_HSE_DIV_23 + * @arg @ref LL_RCC_RTC_HSE_DIV_24 + * @arg @ref LL_RCC_RTC_HSE_DIV_25 + * @arg @ref LL_RCC_RTC_HSE_DIV_26 + * @arg @ref LL_RCC_RTC_HSE_DIV_27 + * @arg @ref LL_RCC_RTC_HSE_DIV_28 + * @arg @ref LL_RCC_RTC_HSE_DIV_29 + * @arg @ref LL_RCC_RTC_HSE_DIV_30 + * @arg @ref LL_RCC_RTC_HSE_DIV_31 + */ +__STATIC_INLINE uint32_t LL_RCC_GetRTC_HSEPrescaler(void) +{ + return (uint32_t)(READ_BIT(RCC->CFGR, RCC_CFGR_RTCPRE)); +} + +/** + * @} + */ + +#if defined(RCC_DCKCFGR_TIMPRE) +/** @defgroup RCC_LL_EF_TIM_CLOCK_PRESCALER TIM + * @{ + */ + +/** + * @brief Set Timers Clock Prescalers + * @rmtoll DCKCFGR TIMPRE LL_RCC_SetTIMPrescaler + * @param Prescaler This parameter can be one of the following values: + * @arg @ref LL_RCC_TIM_PRESCALER_TWICE + * @arg @ref LL_RCC_TIM_PRESCALER_FOUR_TIMES + * @retval None + */ +__STATIC_INLINE void LL_RCC_SetTIMPrescaler(uint32_t Prescaler) +{ + MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_TIMPRE, Prescaler); +} + +/** + * @brief Get Timers Clock Prescalers + * @rmtoll DCKCFGR TIMPRE LL_RCC_GetTIMPrescaler + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_TIM_PRESCALER_TWICE + * @arg @ref LL_RCC_TIM_PRESCALER_FOUR_TIMES + */ +__STATIC_INLINE uint32_t LL_RCC_GetTIMPrescaler(void) +{ + return (uint32_t)(READ_BIT(RCC->DCKCFGR, RCC_DCKCFGR_TIMPRE)); +} + +/** + * @} + */ +#endif /* RCC_DCKCFGR_TIMPRE */ + +/** @defgroup RCC_LL_EF_PLL PLL + * @{ + */ + +/** + * @brief Enable PLL + * @rmtoll CR PLLON LL_RCC_PLL_Enable + * @retval None + */ +__STATIC_INLINE void LL_RCC_PLL_Enable(void) +{ + SET_BIT(RCC->CR, RCC_CR_PLLON); +} + +/** + * @brief Disable PLL + * @note Cannot be disabled if the PLL clock is used as the system clock + * @rmtoll CR PLLON LL_RCC_PLL_Disable + * @retval None + */ +__STATIC_INLINE void LL_RCC_PLL_Disable(void) +{ + CLEAR_BIT(RCC->CR, RCC_CR_PLLON); +} + +/** + * @brief Check if PLL Ready + * @rmtoll CR PLLRDY LL_RCC_PLL_IsReady + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_PLL_IsReady(void) +{ + return (READ_BIT(RCC->CR, RCC_CR_PLLRDY) == (RCC_CR_PLLRDY)); +} + +/** + * @brief Configure PLL used for SYSCLK Domain + * @note PLL Source and PLLM Divider can be written only when PLL, + * PLLI2S and PLLSAI(*) are disabled + * @note PLLN/PLLP can be written only when PLL is disabled + * @rmtoll PLLCFGR PLLSRC LL_RCC_PLL_ConfigDomain_SYS\n + * PLLCFGR PLLM LL_RCC_PLL_ConfigDomain_SYS\n + * PLLCFGR PLLN LL_RCC_PLL_ConfigDomain_SYS\n + * PLLCFGR PLLR LL_RCC_PLL_ConfigDomain_SYS\n + * PLLCFGR PLLP LL_RCC_PLL_ConfigDomain_SYS + * @param Source This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSOURCE_HSI + * @arg @ref LL_RCC_PLLSOURCE_HSE + * @param PLLM This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLM_DIV_2 + * @arg @ref LL_RCC_PLLM_DIV_3 + * @arg @ref LL_RCC_PLLM_DIV_4 + * @arg @ref LL_RCC_PLLM_DIV_5 + * @arg @ref LL_RCC_PLLM_DIV_6 + * @arg @ref LL_RCC_PLLM_DIV_7 + * @arg @ref LL_RCC_PLLM_DIV_8 + * @arg @ref LL_RCC_PLLM_DIV_9 + * @arg @ref LL_RCC_PLLM_DIV_10 + * @arg @ref LL_RCC_PLLM_DIV_11 + * @arg @ref LL_RCC_PLLM_DIV_12 + * @arg @ref LL_RCC_PLLM_DIV_13 + * @arg @ref LL_RCC_PLLM_DIV_14 + * @arg @ref LL_RCC_PLLM_DIV_15 + * @arg @ref LL_RCC_PLLM_DIV_16 + * @arg @ref LL_RCC_PLLM_DIV_17 + * @arg @ref LL_RCC_PLLM_DIV_18 + * @arg @ref LL_RCC_PLLM_DIV_19 + * @arg @ref LL_RCC_PLLM_DIV_20 + * @arg @ref LL_RCC_PLLM_DIV_21 + * @arg @ref LL_RCC_PLLM_DIV_22 + * @arg @ref LL_RCC_PLLM_DIV_23 + * @arg @ref LL_RCC_PLLM_DIV_24 + * @arg @ref LL_RCC_PLLM_DIV_25 + * @arg @ref LL_RCC_PLLM_DIV_26 + * @arg @ref LL_RCC_PLLM_DIV_27 + * @arg @ref LL_RCC_PLLM_DIV_28 + * @arg @ref LL_RCC_PLLM_DIV_29 + * @arg @ref LL_RCC_PLLM_DIV_30 + * @arg @ref LL_RCC_PLLM_DIV_31 + * @arg @ref LL_RCC_PLLM_DIV_32 + * @arg @ref LL_RCC_PLLM_DIV_33 + * @arg @ref LL_RCC_PLLM_DIV_34 + * @arg @ref LL_RCC_PLLM_DIV_35 + * @arg @ref LL_RCC_PLLM_DIV_36 + * @arg @ref LL_RCC_PLLM_DIV_37 + * @arg @ref LL_RCC_PLLM_DIV_38 + * @arg @ref LL_RCC_PLLM_DIV_39 + * @arg @ref LL_RCC_PLLM_DIV_40 + * @arg @ref LL_RCC_PLLM_DIV_41 + * @arg @ref LL_RCC_PLLM_DIV_42 + * @arg @ref LL_RCC_PLLM_DIV_43 + * @arg @ref LL_RCC_PLLM_DIV_44 + * @arg @ref LL_RCC_PLLM_DIV_45 + * @arg @ref LL_RCC_PLLM_DIV_46 + * @arg @ref LL_RCC_PLLM_DIV_47 + * @arg @ref LL_RCC_PLLM_DIV_48 + * @arg @ref LL_RCC_PLLM_DIV_49 + * @arg @ref LL_RCC_PLLM_DIV_50 + * @arg @ref LL_RCC_PLLM_DIV_51 + * @arg @ref LL_RCC_PLLM_DIV_52 + * @arg @ref LL_RCC_PLLM_DIV_53 + * @arg @ref LL_RCC_PLLM_DIV_54 + * @arg @ref LL_RCC_PLLM_DIV_55 + * @arg @ref LL_RCC_PLLM_DIV_56 + * @arg @ref LL_RCC_PLLM_DIV_57 + * @arg @ref LL_RCC_PLLM_DIV_58 + * @arg @ref LL_RCC_PLLM_DIV_59 + * @arg @ref LL_RCC_PLLM_DIV_60 + * @arg @ref LL_RCC_PLLM_DIV_61 + * @arg @ref LL_RCC_PLLM_DIV_62 + * @arg @ref LL_RCC_PLLM_DIV_63 + * @param PLLN Between 50/192(*) and 432 + * + * (*) value not defined in all devices. + * @param PLLP_R This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLP_DIV_2 + * @arg @ref LL_RCC_PLLP_DIV_4 + * @arg @ref LL_RCC_PLLP_DIV_6 + * @arg @ref LL_RCC_PLLP_DIV_8 + * @arg @ref LL_RCC_PLLR_DIV_2 (*) + * @arg @ref LL_RCC_PLLR_DIV_3 (*) + * @arg @ref LL_RCC_PLLR_DIV_4 (*) + * @arg @ref LL_RCC_PLLR_DIV_5 (*) + * @arg @ref LL_RCC_PLLR_DIV_6 (*) + * @arg @ref LL_RCC_PLLR_DIV_7 (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_RCC_PLL_ConfigDomain_SYS(uint32_t Source, uint32_t PLLM, uint32_t PLLN, uint32_t PLLP_R) +{ + MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLSRC | RCC_PLLCFGR_PLLM | RCC_PLLCFGR_PLLN, + Source | PLLM | PLLN << RCC_PLLCFGR_PLLN_Pos); + MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLP, PLLP_R); +#if defined(RCC_PLLR_SYSCLK_SUPPORT) + MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLR, PLLP_R); +#endif /* RCC_PLLR_SYSCLK_SUPPORT */ +} + +/** + * @brief Configure PLL used for 48Mhz domain clock + * @note PLL Source and PLLM Divider can be written only when PLL, + * PLLI2S and PLLSAI(*) are disabled + * @note PLLN/PLLQ can be written only when PLL is disabled + * @note This can be selected for USB, RNG, SDIO + * @rmtoll PLLCFGR PLLSRC LL_RCC_PLL_ConfigDomain_48M\n + * PLLCFGR PLLM LL_RCC_PLL_ConfigDomain_48M\n + * PLLCFGR PLLN LL_RCC_PLL_ConfigDomain_48M\n + * PLLCFGR PLLQ LL_RCC_PLL_ConfigDomain_48M + * @param Source This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSOURCE_HSI + * @arg @ref LL_RCC_PLLSOURCE_HSE + * @param PLLM This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLM_DIV_2 + * @arg @ref LL_RCC_PLLM_DIV_3 + * @arg @ref LL_RCC_PLLM_DIV_4 + * @arg @ref LL_RCC_PLLM_DIV_5 + * @arg @ref LL_RCC_PLLM_DIV_6 + * @arg @ref LL_RCC_PLLM_DIV_7 + * @arg @ref LL_RCC_PLLM_DIV_8 + * @arg @ref LL_RCC_PLLM_DIV_9 + * @arg @ref LL_RCC_PLLM_DIV_10 + * @arg @ref LL_RCC_PLLM_DIV_11 + * @arg @ref LL_RCC_PLLM_DIV_12 + * @arg @ref LL_RCC_PLLM_DIV_13 + * @arg @ref LL_RCC_PLLM_DIV_14 + * @arg @ref LL_RCC_PLLM_DIV_15 + * @arg @ref LL_RCC_PLLM_DIV_16 + * @arg @ref LL_RCC_PLLM_DIV_17 + * @arg @ref LL_RCC_PLLM_DIV_18 + * @arg @ref LL_RCC_PLLM_DIV_19 + * @arg @ref LL_RCC_PLLM_DIV_20 + * @arg @ref LL_RCC_PLLM_DIV_21 + * @arg @ref LL_RCC_PLLM_DIV_22 + * @arg @ref LL_RCC_PLLM_DIV_23 + * @arg @ref LL_RCC_PLLM_DIV_24 + * @arg @ref LL_RCC_PLLM_DIV_25 + * @arg @ref LL_RCC_PLLM_DIV_26 + * @arg @ref LL_RCC_PLLM_DIV_27 + * @arg @ref LL_RCC_PLLM_DIV_28 + * @arg @ref LL_RCC_PLLM_DIV_29 + * @arg @ref LL_RCC_PLLM_DIV_30 + * @arg @ref LL_RCC_PLLM_DIV_31 + * @arg @ref LL_RCC_PLLM_DIV_32 + * @arg @ref LL_RCC_PLLM_DIV_33 + * @arg @ref LL_RCC_PLLM_DIV_34 + * @arg @ref LL_RCC_PLLM_DIV_35 + * @arg @ref LL_RCC_PLLM_DIV_36 + * @arg @ref LL_RCC_PLLM_DIV_37 + * @arg @ref LL_RCC_PLLM_DIV_38 + * @arg @ref LL_RCC_PLLM_DIV_39 + * @arg @ref LL_RCC_PLLM_DIV_40 + * @arg @ref LL_RCC_PLLM_DIV_41 + * @arg @ref LL_RCC_PLLM_DIV_42 + * @arg @ref LL_RCC_PLLM_DIV_43 + * @arg @ref LL_RCC_PLLM_DIV_44 + * @arg @ref LL_RCC_PLLM_DIV_45 + * @arg @ref LL_RCC_PLLM_DIV_46 + * @arg @ref LL_RCC_PLLM_DIV_47 + * @arg @ref LL_RCC_PLLM_DIV_48 + * @arg @ref LL_RCC_PLLM_DIV_49 + * @arg @ref LL_RCC_PLLM_DIV_50 + * @arg @ref LL_RCC_PLLM_DIV_51 + * @arg @ref LL_RCC_PLLM_DIV_52 + * @arg @ref LL_RCC_PLLM_DIV_53 + * @arg @ref LL_RCC_PLLM_DIV_54 + * @arg @ref LL_RCC_PLLM_DIV_55 + * @arg @ref LL_RCC_PLLM_DIV_56 + * @arg @ref LL_RCC_PLLM_DIV_57 + * @arg @ref LL_RCC_PLLM_DIV_58 + * @arg @ref LL_RCC_PLLM_DIV_59 + * @arg @ref LL_RCC_PLLM_DIV_60 + * @arg @ref LL_RCC_PLLM_DIV_61 + * @arg @ref LL_RCC_PLLM_DIV_62 + * @arg @ref LL_RCC_PLLM_DIV_63 + * @param PLLN Between 50/192(*) and 432 + * + * (*) value not defined in all devices. + * @param PLLQ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLQ_DIV_2 + * @arg @ref LL_RCC_PLLQ_DIV_3 + * @arg @ref LL_RCC_PLLQ_DIV_4 + * @arg @ref LL_RCC_PLLQ_DIV_5 + * @arg @ref LL_RCC_PLLQ_DIV_6 + * @arg @ref LL_RCC_PLLQ_DIV_7 + * @arg @ref LL_RCC_PLLQ_DIV_8 + * @arg @ref LL_RCC_PLLQ_DIV_9 + * @arg @ref LL_RCC_PLLQ_DIV_10 + * @arg @ref LL_RCC_PLLQ_DIV_11 + * @arg @ref LL_RCC_PLLQ_DIV_12 + * @arg @ref LL_RCC_PLLQ_DIV_13 + * @arg @ref LL_RCC_PLLQ_DIV_14 + * @arg @ref LL_RCC_PLLQ_DIV_15 + * @retval None + */ +__STATIC_INLINE void LL_RCC_PLL_ConfigDomain_48M(uint32_t Source, uint32_t PLLM, uint32_t PLLN, uint32_t PLLQ) +{ + MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLSRC | RCC_PLLCFGR_PLLM | RCC_PLLCFGR_PLLN | RCC_PLLCFGR_PLLQ, + Source | PLLM | PLLN << RCC_PLLCFGR_PLLN_Pos | PLLQ); +} + +#if defined(DSI) +/** + * @brief Configure PLL used for DSI clock + * @note PLL Source and PLLM Divider can be written only when PLL, + * PLLI2S and PLLSAI are disabled + * @note PLLN/PLLR can be written only when PLL is disabled + * @note This can be selected for DSI + * @rmtoll PLLCFGR PLLSRC LL_RCC_PLL_ConfigDomain_DSI\n + * PLLCFGR PLLM LL_RCC_PLL_ConfigDomain_DSI\n + * PLLCFGR PLLN LL_RCC_PLL_ConfigDomain_DSI\n + * PLLCFGR PLLR LL_RCC_PLL_ConfigDomain_DSI + * @param Source This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSOURCE_HSI + * @arg @ref LL_RCC_PLLSOURCE_HSE + * @param PLLM This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLM_DIV_2 + * @arg @ref LL_RCC_PLLM_DIV_3 + * @arg @ref LL_RCC_PLLM_DIV_4 + * @arg @ref LL_RCC_PLLM_DIV_5 + * @arg @ref LL_RCC_PLLM_DIV_6 + * @arg @ref LL_RCC_PLLM_DIV_7 + * @arg @ref LL_RCC_PLLM_DIV_8 + * @arg @ref LL_RCC_PLLM_DIV_9 + * @arg @ref LL_RCC_PLLM_DIV_10 + * @arg @ref LL_RCC_PLLM_DIV_11 + * @arg @ref LL_RCC_PLLM_DIV_12 + * @arg @ref LL_RCC_PLLM_DIV_13 + * @arg @ref LL_RCC_PLLM_DIV_14 + * @arg @ref LL_RCC_PLLM_DIV_15 + * @arg @ref LL_RCC_PLLM_DIV_16 + * @arg @ref LL_RCC_PLLM_DIV_17 + * @arg @ref LL_RCC_PLLM_DIV_18 + * @arg @ref LL_RCC_PLLM_DIV_19 + * @arg @ref LL_RCC_PLLM_DIV_20 + * @arg @ref LL_RCC_PLLM_DIV_21 + * @arg @ref LL_RCC_PLLM_DIV_22 + * @arg @ref LL_RCC_PLLM_DIV_23 + * @arg @ref LL_RCC_PLLM_DIV_24 + * @arg @ref LL_RCC_PLLM_DIV_25 + * @arg @ref LL_RCC_PLLM_DIV_26 + * @arg @ref LL_RCC_PLLM_DIV_27 + * @arg @ref LL_RCC_PLLM_DIV_28 + * @arg @ref LL_RCC_PLLM_DIV_29 + * @arg @ref LL_RCC_PLLM_DIV_30 + * @arg @ref LL_RCC_PLLM_DIV_31 + * @arg @ref LL_RCC_PLLM_DIV_32 + * @arg @ref LL_RCC_PLLM_DIV_33 + * @arg @ref LL_RCC_PLLM_DIV_34 + * @arg @ref LL_RCC_PLLM_DIV_35 + * @arg @ref LL_RCC_PLLM_DIV_36 + * @arg @ref LL_RCC_PLLM_DIV_37 + * @arg @ref LL_RCC_PLLM_DIV_38 + * @arg @ref LL_RCC_PLLM_DIV_39 + * @arg @ref LL_RCC_PLLM_DIV_40 + * @arg @ref LL_RCC_PLLM_DIV_41 + * @arg @ref LL_RCC_PLLM_DIV_42 + * @arg @ref LL_RCC_PLLM_DIV_43 + * @arg @ref LL_RCC_PLLM_DIV_44 + * @arg @ref LL_RCC_PLLM_DIV_45 + * @arg @ref LL_RCC_PLLM_DIV_46 + * @arg @ref LL_RCC_PLLM_DIV_47 + * @arg @ref LL_RCC_PLLM_DIV_48 + * @arg @ref LL_RCC_PLLM_DIV_49 + * @arg @ref LL_RCC_PLLM_DIV_50 + * @arg @ref LL_RCC_PLLM_DIV_51 + * @arg @ref LL_RCC_PLLM_DIV_52 + * @arg @ref LL_RCC_PLLM_DIV_53 + * @arg @ref LL_RCC_PLLM_DIV_54 + * @arg @ref LL_RCC_PLLM_DIV_55 + * @arg @ref LL_RCC_PLLM_DIV_56 + * @arg @ref LL_RCC_PLLM_DIV_57 + * @arg @ref LL_RCC_PLLM_DIV_58 + * @arg @ref LL_RCC_PLLM_DIV_59 + * @arg @ref LL_RCC_PLLM_DIV_60 + * @arg @ref LL_RCC_PLLM_DIV_61 + * @arg @ref LL_RCC_PLLM_DIV_62 + * @arg @ref LL_RCC_PLLM_DIV_63 + * @param PLLN Between 50 and 432 + * @param PLLR This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLR_DIV_2 + * @arg @ref LL_RCC_PLLR_DIV_3 + * @arg @ref LL_RCC_PLLR_DIV_4 + * @arg @ref LL_RCC_PLLR_DIV_5 + * @arg @ref LL_RCC_PLLR_DIV_6 + * @arg @ref LL_RCC_PLLR_DIV_7 + * @retval None + */ +__STATIC_INLINE void LL_RCC_PLL_ConfigDomain_DSI(uint32_t Source, uint32_t PLLM, uint32_t PLLN, uint32_t PLLR) +{ + MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLSRC | RCC_PLLCFGR_PLLM | RCC_PLLCFGR_PLLN | RCC_PLLCFGR_PLLR, + Source | PLLM | PLLN << RCC_PLLCFGR_PLLN_Pos | PLLR); +} +#endif /* DSI */ + +#if defined(RCC_PLLR_I2S_CLKSOURCE_SUPPORT) +/** + * @brief Configure PLL used for I2S clock + * @note PLL Source and PLLM Divider can be written only when PLL, + * PLLI2S and PLLSAI are disabled + * @note PLLN/PLLR can be written only when PLL is disabled + * @note This can be selected for I2S + * @rmtoll PLLCFGR PLLSRC LL_RCC_PLL_ConfigDomain_I2S\n + * PLLCFGR PLLM LL_RCC_PLL_ConfigDomain_I2S\n + * PLLCFGR PLLN LL_RCC_PLL_ConfigDomain_I2S\n + * PLLCFGR PLLR LL_RCC_PLL_ConfigDomain_I2S + * @param Source This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSOURCE_HSI + * @arg @ref LL_RCC_PLLSOURCE_HSE + * @param PLLM This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLM_DIV_2 + * @arg @ref LL_RCC_PLLM_DIV_3 + * @arg @ref LL_RCC_PLLM_DIV_4 + * @arg @ref LL_RCC_PLLM_DIV_5 + * @arg @ref LL_RCC_PLLM_DIV_6 + * @arg @ref LL_RCC_PLLM_DIV_7 + * @arg @ref LL_RCC_PLLM_DIV_8 + * @arg @ref LL_RCC_PLLM_DIV_9 + * @arg @ref LL_RCC_PLLM_DIV_10 + * @arg @ref LL_RCC_PLLM_DIV_11 + * @arg @ref LL_RCC_PLLM_DIV_12 + * @arg @ref LL_RCC_PLLM_DIV_13 + * @arg @ref LL_RCC_PLLM_DIV_14 + * @arg @ref LL_RCC_PLLM_DIV_15 + * @arg @ref LL_RCC_PLLM_DIV_16 + * @arg @ref LL_RCC_PLLM_DIV_17 + * @arg @ref LL_RCC_PLLM_DIV_18 + * @arg @ref LL_RCC_PLLM_DIV_19 + * @arg @ref LL_RCC_PLLM_DIV_20 + * @arg @ref LL_RCC_PLLM_DIV_21 + * @arg @ref LL_RCC_PLLM_DIV_22 + * @arg @ref LL_RCC_PLLM_DIV_23 + * @arg @ref LL_RCC_PLLM_DIV_24 + * @arg @ref LL_RCC_PLLM_DIV_25 + * @arg @ref LL_RCC_PLLM_DIV_26 + * @arg @ref LL_RCC_PLLM_DIV_27 + * @arg @ref LL_RCC_PLLM_DIV_28 + * @arg @ref LL_RCC_PLLM_DIV_29 + * @arg @ref LL_RCC_PLLM_DIV_30 + * @arg @ref LL_RCC_PLLM_DIV_31 + * @arg @ref LL_RCC_PLLM_DIV_32 + * @arg @ref LL_RCC_PLLM_DIV_33 + * @arg @ref LL_RCC_PLLM_DIV_34 + * @arg @ref LL_RCC_PLLM_DIV_35 + * @arg @ref LL_RCC_PLLM_DIV_36 + * @arg @ref LL_RCC_PLLM_DIV_37 + * @arg @ref LL_RCC_PLLM_DIV_38 + * @arg @ref LL_RCC_PLLM_DIV_39 + * @arg @ref LL_RCC_PLLM_DIV_40 + * @arg @ref LL_RCC_PLLM_DIV_41 + * @arg @ref LL_RCC_PLLM_DIV_42 + * @arg @ref LL_RCC_PLLM_DIV_43 + * @arg @ref LL_RCC_PLLM_DIV_44 + * @arg @ref LL_RCC_PLLM_DIV_45 + * @arg @ref LL_RCC_PLLM_DIV_46 + * @arg @ref LL_RCC_PLLM_DIV_47 + * @arg @ref LL_RCC_PLLM_DIV_48 + * @arg @ref LL_RCC_PLLM_DIV_49 + * @arg @ref LL_RCC_PLLM_DIV_50 + * @arg @ref LL_RCC_PLLM_DIV_51 + * @arg @ref LL_RCC_PLLM_DIV_52 + * @arg @ref LL_RCC_PLLM_DIV_53 + * @arg @ref LL_RCC_PLLM_DIV_54 + * @arg @ref LL_RCC_PLLM_DIV_55 + * @arg @ref LL_RCC_PLLM_DIV_56 + * @arg @ref LL_RCC_PLLM_DIV_57 + * @arg @ref LL_RCC_PLLM_DIV_58 + * @arg @ref LL_RCC_PLLM_DIV_59 + * @arg @ref LL_RCC_PLLM_DIV_60 + * @arg @ref LL_RCC_PLLM_DIV_61 + * @arg @ref LL_RCC_PLLM_DIV_62 + * @arg @ref LL_RCC_PLLM_DIV_63 + * @param PLLN Between 50 and 432 + * @param PLLR This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLR_DIV_2 + * @arg @ref LL_RCC_PLLR_DIV_3 + * @arg @ref LL_RCC_PLLR_DIV_4 + * @arg @ref LL_RCC_PLLR_DIV_5 + * @arg @ref LL_RCC_PLLR_DIV_6 + * @arg @ref LL_RCC_PLLR_DIV_7 + * @retval None + */ +__STATIC_INLINE void LL_RCC_PLL_ConfigDomain_I2S(uint32_t Source, uint32_t PLLM, uint32_t PLLN, uint32_t PLLR) +{ + MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLSRC | RCC_PLLCFGR_PLLM | RCC_PLLCFGR_PLLN | RCC_PLLCFGR_PLLR, + Source | PLLM | PLLN << RCC_PLLCFGR_PLLN_Pos | PLLR); +} +#endif /* RCC_PLLR_I2S_CLKSOURCE_SUPPORT */ + +#if defined(SPDIFRX) +/** + * @brief Configure PLL used for SPDIFRX clock + * @note PLL Source and PLLM Divider can be written only when PLL, + * PLLI2S and PLLSAI are disabled + * @note PLLN/PLLR can be written only when PLL is disabled + * @note This can be selected for SPDIFRX + * @rmtoll PLLCFGR PLLSRC LL_RCC_PLL_ConfigDomain_SPDIFRX\n + * PLLCFGR PLLM LL_RCC_PLL_ConfigDomain_SPDIFRX\n + * PLLCFGR PLLN LL_RCC_PLL_ConfigDomain_SPDIFRX\n + * PLLCFGR PLLR LL_RCC_PLL_ConfigDomain_SPDIFRX + * @param Source This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSOURCE_HSI + * @arg @ref LL_RCC_PLLSOURCE_HSE + * @param PLLM This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLM_DIV_2 + * @arg @ref LL_RCC_PLLM_DIV_3 + * @arg @ref LL_RCC_PLLM_DIV_4 + * @arg @ref LL_RCC_PLLM_DIV_5 + * @arg @ref LL_RCC_PLLM_DIV_6 + * @arg @ref LL_RCC_PLLM_DIV_7 + * @arg @ref LL_RCC_PLLM_DIV_8 + * @arg @ref LL_RCC_PLLM_DIV_9 + * @arg @ref LL_RCC_PLLM_DIV_10 + * @arg @ref LL_RCC_PLLM_DIV_11 + * @arg @ref LL_RCC_PLLM_DIV_12 + * @arg @ref LL_RCC_PLLM_DIV_13 + * @arg @ref LL_RCC_PLLM_DIV_14 + * @arg @ref LL_RCC_PLLM_DIV_15 + * @arg @ref LL_RCC_PLLM_DIV_16 + * @arg @ref LL_RCC_PLLM_DIV_17 + * @arg @ref LL_RCC_PLLM_DIV_18 + * @arg @ref LL_RCC_PLLM_DIV_19 + * @arg @ref LL_RCC_PLLM_DIV_20 + * @arg @ref LL_RCC_PLLM_DIV_21 + * @arg @ref LL_RCC_PLLM_DIV_22 + * @arg @ref LL_RCC_PLLM_DIV_23 + * @arg @ref LL_RCC_PLLM_DIV_24 + * @arg @ref LL_RCC_PLLM_DIV_25 + * @arg @ref LL_RCC_PLLM_DIV_26 + * @arg @ref LL_RCC_PLLM_DIV_27 + * @arg @ref LL_RCC_PLLM_DIV_28 + * @arg @ref LL_RCC_PLLM_DIV_29 + * @arg @ref LL_RCC_PLLM_DIV_30 + * @arg @ref LL_RCC_PLLM_DIV_31 + * @arg @ref LL_RCC_PLLM_DIV_32 + * @arg @ref LL_RCC_PLLM_DIV_33 + * @arg @ref LL_RCC_PLLM_DIV_34 + * @arg @ref LL_RCC_PLLM_DIV_35 + * @arg @ref LL_RCC_PLLM_DIV_36 + * @arg @ref LL_RCC_PLLM_DIV_37 + * @arg @ref LL_RCC_PLLM_DIV_38 + * @arg @ref LL_RCC_PLLM_DIV_39 + * @arg @ref LL_RCC_PLLM_DIV_40 + * @arg @ref LL_RCC_PLLM_DIV_41 + * @arg @ref LL_RCC_PLLM_DIV_42 + * @arg @ref LL_RCC_PLLM_DIV_43 + * @arg @ref LL_RCC_PLLM_DIV_44 + * @arg @ref LL_RCC_PLLM_DIV_45 + * @arg @ref LL_RCC_PLLM_DIV_46 + * @arg @ref LL_RCC_PLLM_DIV_47 + * @arg @ref LL_RCC_PLLM_DIV_48 + * @arg @ref LL_RCC_PLLM_DIV_49 + * @arg @ref LL_RCC_PLLM_DIV_50 + * @arg @ref LL_RCC_PLLM_DIV_51 + * @arg @ref LL_RCC_PLLM_DIV_52 + * @arg @ref LL_RCC_PLLM_DIV_53 + * @arg @ref LL_RCC_PLLM_DIV_54 + * @arg @ref LL_RCC_PLLM_DIV_55 + * @arg @ref LL_RCC_PLLM_DIV_56 + * @arg @ref LL_RCC_PLLM_DIV_57 + * @arg @ref LL_RCC_PLLM_DIV_58 + * @arg @ref LL_RCC_PLLM_DIV_59 + * @arg @ref LL_RCC_PLLM_DIV_60 + * @arg @ref LL_RCC_PLLM_DIV_61 + * @arg @ref LL_RCC_PLLM_DIV_62 + * @arg @ref LL_RCC_PLLM_DIV_63 + * @param PLLN Between 50 and 432 + * @param PLLR This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLR_DIV_2 + * @arg @ref LL_RCC_PLLR_DIV_3 + * @arg @ref LL_RCC_PLLR_DIV_4 + * @arg @ref LL_RCC_PLLR_DIV_5 + * @arg @ref LL_RCC_PLLR_DIV_6 + * @arg @ref LL_RCC_PLLR_DIV_7 + * @retval None + */ +__STATIC_INLINE void LL_RCC_PLL_ConfigDomain_SPDIFRX(uint32_t Source, uint32_t PLLM, uint32_t PLLN, uint32_t PLLR) +{ + MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLSRC | RCC_PLLCFGR_PLLM | RCC_PLLCFGR_PLLN | RCC_PLLCFGR_PLLR, + Source | PLLM | PLLN << RCC_PLLCFGR_PLLN_Pos | PLLR); +} +#endif /* SPDIFRX */ + +#if defined(RCC_PLLCFGR_PLLR) +#if defined(SAI1) +/** + * @brief Configure PLL used for SAI clock + * @note PLL Source and PLLM Divider can be written only when PLL, + * PLLI2S and PLLSAI are disabled + * @note PLLN/PLLR can be written only when PLL is disabled + * @note This can be selected for SAI + * @rmtoll PLLCFGR PLLSRC LL_RCC_PLL_ConfigDomain_SAI\n + * PLLCFGR PLLM LL_RCC_PLL_ConfigDomain_SAI\n + * PLLCFGR PLLN LL_RCC_PLL_ConfigDomain_SAI\n + * PLLCFGR PLLR LL_RCC_PLL_ConfigDomain_SAI\n + * DCKCFGR PLLDIVR LL_RCC_PLL_ConfigDomain_SAI + * @param Source This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSOURCE_HSI + * @arg @ref LL_RCC_PLLSOURCE_HSE + * @param PLLM This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLM_DIV_2 + * @arg @ref LL_RCC_PLLM_DIV_3 + * @arg @ref LL_RCC_PLLM_DIV_4 + * @arg @ref LL_RCC_PLLM_DIV_5 + * @arg @ref LL_RCC_PLLM_DIV_6 + * @arg @ref LL_RCC_PLLM_DIV_7 + * @arg @ref LL_RCC_PLLM_DIV_8 + * @arg @ref LL_RCC_PLLM_DIV_9 + * @arg @ref LL_RCC_PLLM_DIV_10 + * @arg @ref LL_RCC_PLLM_DIV_11 + * @arg @ref LL_RCC_PLLM_DIV_12 + * @arg @ref LL_RCC_PLLM_DIV_13 + * @arg @ref LL_RCC_PLLM_DIV_14 + * @arg @ref LL_RCC_PLLM_DIV_15 + * @arg @ref LL_RCC_PLLM_DIV_16 + * @arg @ref LL_RCC_PLLM_DIV_17 + * @arg @ref LL_RCC_PLLM_DIV_18 + * @arg @ref LL_RCC_PLLM_DIV_19 + * @arg @ref LL_RCC_PLLM_DIV_20 + * @arg @ref LL_RCC_PLLM_DIV_21 + * @arg @ref LL_RCC_PLLM_DIV_22 + * @arg @ref LL_RCC_PLLM_DIV_23 + * @arg @ref LL_RCC_PLLM_DIV_24 + * @arg @ref LL_RCC_PLLM_DIV_25 + * @arg @ref LL_RCC_PLLM_DIV_26 + * @arg @ref LL_RCC_PLLM_DIV_27 + * @arg @ref LL_RCC_PLLM_DIV_28 + * @arg @ref LL_RCC_PLLM_DIV_29 + * @arg @ref LL_RCC_PLLM_DIV_30 + * @arg @ref LL_RCC_PLLM_DIV_31 + * @arg @ref LL_RCC_PLLM_DIV_32 + * @arg @ref LL_RCC_PLLM_DIV_33 + * @arg @ref LL_RCC_PLLM_DIV_34 + * @arg @ref LL_RCC_PLLM_DIV_35 + * @arg @ref LL_RCC_PLLM_DIV_36 + * @arg @ref LL_RCC_PLLM_DIV_37 + * @arg @ref LL_RCC_PLLM_DIV_38 + * @arg @ref LL_RCC_PLLM_DIV_39 + * @arg @ref LL_RCC_PLLM_DIV_40 + * @arg @ref LL_RCC_PLLM_DIV_41 + * @arg @ref LL_RCC_PLLM_DIV_42 + * @arg @ref LL_RCC_PLLM_DIV_43 + * @arg @ref LL_RCC_PLLM_DIV_44 + * @arg @ref LL_RCC_PLLM_DIV_45 + * @arg @ref LL_RCC_PLLM_DIV_46 + * @arg @ref LL_RCC_PLLM_DIV_47 + * @arg @ref LL_RCC_PLLM_DIV_48 + * @arg @ref LL_RCC_PLLM_DIV_49 + * @arg @ref LL_RCC_PLLM_DIV_50 + * @arg @ref LL_RCC_PLLM_DIV_51 + * @arg @ref LL_RCC_PLLM_DIV_52 + * @arg @ref LL_RCC_PLLM_DIV_53 + * @arg @ref LL_RCC_PLLM_DIV_54 + * @arg @ref LL_RCC_PLLM_DIV_55 + * @arg @ref LL_RCC_PLLM_DIV_56 + * @arg @ref LL_RCC_PLLM_DIV_57 + * @arg @ref LL_RCC_PLLM_DIV_58 + * @arg @ref LL_RCC_PLLM_DIV_59 + * @arg @ref LL_RCC_PLLM_DIV_60 + * @arg @ref LL_RCC_PLLM_DIV_61 + * @arg @ref LL_RCC_PLLM_DIV_62 + * @arg @ref LL_RCC_PLLM_DIV_63 + * @param PLLN Between 50 and 432 + * @param PLLR This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLR_DIV_2 + * @arg @ref LL_RCC_PLLR_DIV_3 + * @arg @ref LL_RCC_PLLR_DIV_4 + * @arg @ref LL_RCC_PLLR_DIV_5 + * @arg @ref LL_RCC_PLLR_DIV_6 + * @arg @ref LL_RCC_PLLR_DIV_7 + * @param PLLDIVR This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLDIVR_DIV_1 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_2 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_3 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_4 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_5 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_6 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_7 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_8 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_9 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_10 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_11 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_12 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_13 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_14 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_15 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_16 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_17 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_18 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_19 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_20 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_21 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_22 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_23 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_24 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_25 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_26 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_27 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_28 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_29 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_30 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_31 (*) + * + * (*) value not defined in all devices. + * @retval None + */ +#if defined(RCC_DCKCFGR_PLLDIVR) +__STATIC_INLINE void LL_RCC_PLL_ConfigDomain_SAI(uint32_t Source, uint32_t PLLM, uint32_t PLLN, uint32_t PLLR, + uint32_t PLLDIVR) +#else +__STATIC_INLINE void LL_RCC_PLL_ConfigDomain_SAI(uint32_t Source, uint32_t PLLM, uint32_t PLLN, uint32_t PLLR) +#endif /* RCC_DCKCFGR_PLLDIVR */ +{ + MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLSRC | RCC_PLLCFGR_PLLM | RCC_PLLCFGR_PLLN | RCC_PLLCFGR_PLLR, + Source | PLLM | PLLN << RCC_PLLCFGR_PLLN_Pos | PLLR); +#if defined(RCC_DCKCFGR_PLLDIVR) + MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_PLLDIVR, PLLDIVR); +#endif /* RCC_DCKCFGR_PLLDIVR */ +} +#endif /* SAI1 */ +#endif /* RCC_PLLCFGR_PLLR */ + +/** + * @brief Configure PLL clock source + * @rmtoll PLLCFGR PLLSRC LL_RCC_PLL_SetMainSource + * @param PLLSource This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSOURCE_HSI + * @arg @ref LL_RCC_PLLSOURCE_HSE + * @retval None + */ +__STATIC_INLINE void LL_RCC_PLL_SetMainSource(uint32_t PLLSource) +{ + MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLSRC, PLLSource); +} + +/** + * @brief Get the oscillator used as PLL clock source. + * @rmtoll PLLCFGR PLLSRC LL_RCC_PLL_GetMainSource + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_PLLSOURCE_HSI + * @arg @ref LL_RCC_PLLSOURCE_HSE + */ +__STATIC_INLINE uint32_t LL_RCC_PLL_GetMainSource(void) +{ + return (uint32_t)(READ_BIT(RCC->PLLCFGR, RCC_PLLCFGR_PLLSRC)); +} + +/** + * @brief Get Main PLL multiplication factor for VCO + * @rmtoll PLLCFGR PLLN LL_RCC_PLL_GetN + * @retval Between 50/192(*) and 432 + * + * (*) value not defined in all devices. + */ +__STATIC_INLINE uint32_t LL_RCC_PLL_GetN(void) +{ + return (uint32_t)(READ_BIT(RCC->PLLCFGR, RCC_PLLCFGR_PLLN) >> RCC_PLLCFGR_PLLN_Pos); +} + +/** + * @brief Get Main PLL division factor for PLLP + * @rmtoll PLLCFGR PLLP LL_RCC_PLL_GetP + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_PLLP_DIV_2 + * @arg @ref LL_RCC_PLLP_DIV_4 + * @arg @ref LL_RCC_PLLP_DIV_6 + * @arg @ref LL_RCC_PLLP_DIV_8 + */ +__STATIC_INLINE uint32_t LL_RCC_PLL_GetP(void) +{ + return (uint32_t)(READ_BIT(RCC->PLLCFGR, RCC_PLLCFGR_PLLP)); +} + +/** + * @brief Get Main PLL division factor for PLLQ + * @note used for PLL48MCLK selected for USB, RNG, SDIO (48 MHz clock) + * @rmtoll PLLCFGR PLLQ LL_RCC_PLL_GetQ + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_PLLQ_DIV_2 + * @arg @ref LL_RCC_PLLQ_DIV_3 + * @arg @ref LL_RCC_PLLQ_DIV_4 + * @arg @ref LL_RCC_PLLQ_DIV_5 + * @arg @ref LL_RCC_PLLQ_DIV_6 + * @arg @ref LL_RCC_PLLQ_DIV_7 + * @arg @ref LL_RCC_PLLQ_DIV_8 + * @arg @ref LL_RCC_PLLQ_DIV_9 + * @arg @ref LL_RCC_PLLQ_DIV_10 + * @arg @ref LL_RCC_PLLQ_DIV_11 + * @arg @ref LL_RCC_PLLQ_DIV_12 + * @arg @ref LL_RCC_PLLQ_DIV_13 + * @arg @ref LL_RCC_PLLQ_DIV_14 + * @arg @ref LL_RCC_PLLQ_DIV_15 + */ +__STATIC_INLINE uint32_t LL_RCC_PLL_GetQ(void) +{ + return (uint32_t)(READ_BIT(RCC->PLLCFGR, RCC_PLLCFGR_PLLQ)); +} + +#if defined(RCC_PLLCFGR_PLLR) +/** + * @brief Get Main PLL division factor for PLLR + * @note used for PLLCLK (system clock) + * @rmtoll PLLCFGR PLLR LL_RCC_PLL_GetR + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_PLLR_DIV_2 + * @arg @ref LL_RCC_PLLR_DIV_3 + * @arg @ref LL_RCC_PLLR_DIV_4 + * @arg @ref LL_RCC_PLLR_DIV_5 + * @arg @ref LL_RCC_PLLR_DIV_6 + * @arg @ref LL_RCC_PLLR_DIV_7 + */ +__STATIC_INLINE uint32_t LL_RCC_PLL_GetR(void) +{ + return (uint32_t)(READ_BIT(RCC->PLLCFGR, RCC_PLLCFGR_PLLR)); +} +#endif /* RCC_PLLCFGR_PLLR */ + +#if defined(RCC_DCKCFGR_PLLDIVR) +/** + * @brief Get Main PLL division factor for PLLDIVR + * @note used for PLLSAICLK (SAI1 and SAI2 clock) + * @rmtoll DCKCFGR PLLDIVR LL_RCC_PLL_GetDIVR + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_PLLDIVR_DIV_1 + * @arg @ref LL_RCC_PLLDIVR_DIV_2 + * @arg @ref LL_RCC_PLLDIVR_DIV_3 + * @arg @ref LL_RCC_PLLDIVR_DIV_4 + * @arg @ref LL_RCC_PLLDIVR_DIV_5 + * @arg @ref LL_RCC_PLLDIVR_DIV_6 + * @arg @ref LL_RCC_PLLDIVR_DIV_7 + * @arg @ref LL_RCC_PLLDIVR_DIV_8 + * @arg @ref LL_RCC_PLLDIVR_DIV_9 + * @arg @ref LL_RCC_PLLDIVR_DIV_10 + * @arg @ref LL_RCC_PLLDIVR_DIV_11 + * @arg @ref LL_RCC_PLLDIVR_DIV_12 + * @arg @ref LL_RCC_PLLDIVR_DIV_13 + * @arg @ref LL_RCC_PLLDIVR_DIV_14 + * @arg @ref LL_RCC_PLLDIVR_DIV_15 + * @arg @ref LL_RCC_PLLDIVR_DIV_16 + * @arg @ref LL_RCC_PLLDIVR_DIV_17 + * @arg @ref LL_RCC_PLLDIVR_DIV_18 + * @arg @ref LL_RCC_PLLDIVR_DIV_19 + * @arg @ref LL_RCC_PLLDIVR_DIV_20 + * @arg @ref LL_RCC_PLLDIVR_DIV_21 + * @arg @ref LL_RCC_PLLDIVR_DIV_22 + * @arg @ref LL_RCC_PLLDIVR_DIV_23 + * @arg @ref LL_RCC_PLLDIVR_DIV_24 + * @arg @ref LL_RCC_PLLDIVR_DIV_25 + * @arg @ref LL_RCC_PLLDIVR_DIV_26 + * @arg @ref LL_RCC_PLLDIVR_DIV_27 + * @arg @ref LL_RCC_PLLDIVR_DIV_28 + * @arg @ref LL_RCC_PLLDIVR_DIV_29 + * @arg @ref LL_RCC_PLLDIVR_DIV_30 + * @arg @ref LL_RCC_PLLDIVR_DIV_31 + */ +__STATIC_INLINE uint32_t LL_RCC_PLL_GetDIVR(void) +{ + return (uint32_t)(READ_BIT(RCC->DCKCFGR, RCC_DCKCFGR_PLLDIVR)); +} +#endif /* RCC_DCKCFGR_PLLDIVR */ + +/** + * @brief Get Division factor for the main PLL and other PLL + * @rmtoll PLLCFGR PLLM LL_RCC_PLL_GetDivider + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_PLLM_DIV_2 + * @arg @ref LL_RCC_PLLM_DIV_3 + * @arg @ref LL_RCC_PLLM_DIV_4 + * @arg @ref LL_RCC_PLLM_DIV_5 + * @arg @ref LL_RCC_PLLM_DIV_6 + * @arg @ref LL_RCC_PLLM_DIV_7 + * @arg @ref LL_RCC_PLLM_DIV_8 + * @arg @ref LL_RCC_PLLM_DIV_9 + * @arg @ref LL_RCC_PLLM_DIV_10 + * @arg @ref LL_RCC_PLLM_DIV_11 + * @arg @ref LL_RCC_PLLM_DIV_12 + * @arg @ref LL_RCC_PLLM_DIV_13 + * @arg @ref LL_RCC_PLLM_DIV_14 + * @arg @ref LL_RCC_PLLM_DIV_15 + * @arg @ref LL_RCC_PLLM_DIV_16 + * @arg @ref LL_RCC_PLLM_DIV_17 + * @arg @ref LL_RCC_PLLM_DIV_18 + * @arg @ref LL_RCC_PLLM_DIV_19 + * @arg @ref LL_RCC_PLLM_DIV_20 + * @arg @ref LL_RCC_PLLM_DIV_21 + * @arg @ref LL_RCC_PLLM_DIV_22 + * @arg @ref LL_RCC_PLLM_DIV_23 + * @arg @ref LL_RCC_PLLM_DIV_24 + * @arg @ref LL_RCC_PLLM_DIV_25 + * @arg @ref LL_RCC_PLLM_DIV_26 + * @arg @ref LL_RCC_PLLM_DIV_27 + * @arg @ref LL_RCC_PLLM_DIV_28 + * @arg @ref LL_RCC_PLLM_DIV_29 + * @arg @ref LL_RCC_PLLM_DIV_30 + * @arg @ref LL_RCC_PLLM_DIV_31 + * @arg @ref LL_RCC_PLLM_DIV_32 + * @arg @ref LL_RCC_PLLM_DIV_33 + * @arg @ref LL_RCC_PLLM_DIV_34 + * @arg @ref LL_RCC_PLLM_DIV_35 + * @arg @ref LL_RCC_PLLM_DIV_36 + * @arg @ref LL_RCC_PLLM_DIV_37 + * @arg @ref LL_RCC_PLLM_DIV_38 + * @arg @ref LL_RCC_PLLM_DIV_39 + * @arg @ref LL_RCC_PLLM_DIV_40 + * @arg @ref LL_RCC_PLLM_DIV_41 + * @arg @ref LL_RCC_PLLM_DIV_42 + * @arg @ref LL_RCC_PLLM_DIV_43 + * @arg @ref LL_RCC_PLLM_DIV_44 + * @arg @ref LL_RCC_PLLM_DIV_45 + * @arg @ref LL_RCC_PLLM_DIV_46 + * @arg @ref LL_RCC_PLLM_DIV_47 + * @arg @ref LL_RCC_PLLM_DIV_48 + * @arg @ref LL_RCC_PLLM_DIV_49 + * @arg @ref LL_RCC_PLLM_DIV_50 + * @arg @ref LL_RCC_PLLM_DIV_51 + * @arg @ref LL_RCC_PLLM_DIV_52 + * @arg @ref LL_RCC_PLLM_DIV_53 + * @arg @ref LL_RCC_PLLM_DIV_54 + * @arg @ref LL_RCC_PLLM_DIV_55 + * @arg @ref LL_RCC_PLLM_DIV_56 + * @arg @ref LL_RCC_PLLM_DIV_57 + * @arg @ref LL_RCC_PLLM_DIV_58 + * @arg @ref LL_RCC_PLLM_DIV_59 + * @arg @ref LL_RCC_PLLM_DIV_60 + * @arg @ref LL_RCC_PLLM_DIV_61 + * @arg @ref LL_RCC_PLLM_DIV_62 + * @arg @ref LL_RCC_PLLM_DIV_63 + */ +__STATIC_INLINE uint32_t LL_RCC_PLL_GetDivider(void) +{ + return (uint32_t)(READ_BIT(RCC->PLLCFGR, RCC_PLLCFGR_PLLM)); +} + +/** + * @brief Configure Spread Spectrum used for PLL + * @note These bits must be written before enabling PLL + * @rmtoll SSCGR MODPER LL_RCC_PLL_ConfigSpreadSpectrum\n + * SSCGR INCSTEP LL_RCC_PLL_ConfigSpreadSpectrum\n + * SSCGR SPREADSEL LL_RCC_PLL_ConfigSpreadSpectrum + * @param Mod Between Min_Data=0 and Max_Data=8191 + * @param Inc Between Min_Data=0 and Max_Data=32767 + * @param Sel This parameter can be one of the following values: + * @arg @ref LL_RCC_SPREAD_SELECT_CENTER + * @arg @ref LL_RCC_SPREAD_SELECT_DOWN + * @retval None + */ +__STATIC_INLINE void LL_RCC_PLL_ConfigSpreadSpectrum(uint32_t Mod, uint32_t Inc, uint32_t Sel) +{ + MODIFY_REG(RCC->SSCGR, RCC_SSCGR_MODPER | RCC_SSCGR_INCSTEP | RCC_SSCGR_SPREADSEL, Mod | (Inc << RCC_SSCGR_INCSTEP_Pos) | Sel); +} + +/** + * @brief Get Spread Spectrum Modulation Period for PLL + * @rmtoll SSCGR MODPER LL_RCC_PLL_GetPeriodModulation + * @retval Between Min_Data=0 and Max_Data=8191 + */ +__STATIC_INLINE uint32_t LL_RCC_PLL_GetPeriodModulation(void) +{ + return (uint32_t)(READ_BIT(RCC->SSCGR, RCC_SSCGR_MODPER)); +} + +/** + * @brief Get Spread Spectrum Incrementation Step for PLL + * @note Must be written before enabling PLL + * @rmtoll SSCGR INCSTEP LL_RCC_PLL_GetStepIncrementation + * @retval Between Min_Data=0 and Max_Data=32767 + */ +__STATIC_INLINE uint32_t LL_RCC_PLL_GetStepIncrementation(void) +{ + return (uint32_t)(READ_BIT(RCC->SSCGR, RCC_SSCGR_INCSTEP) >> RCC_SSCGR_INCSTEP_Pos); +} + +/** + * @brief Get Spread Spectrum Selection for PLL + * @note Must be written before enabling PLL + * @rmtoll SSCGR SPREADSEL LL_RCC_PLL_GetSpreadSelection + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_SPREAD_SELECT_CENTER + * @arg @ref LL_RCC_SPREAD_SELECT_DOWN + */ +__STATIC_INLINE uint32_t LL_RCC_PLL_GetSpreadSelection(void) +{ + return (uint32_t)(READ_BIT(RCC->SSCGR, RCC_SSCGR_SPREADSEL)); +} + +/** + * @brief Enable Spread Spectrum for PLL. + * @rmtoll SSCGR SSCGEN LL_RCC_PLL_SpreadSpectrum_Enable + * @retval None + */ +__STATIC_INLINE void LL_RCC_PLL_SpreadSpectrum_Enable(void) +{ + SET_BIT(RCC->SSCGR, RCC_SSCGR_SSCGEN); +} + +/** + * @brief Disable Spread Spectrum for PLL. + * @rmtoll SSCGR SSCGEN LL_RCC_PLL_SpreadSpectrum_Disable + * @retval None + */ +__STATIC_INLINE void LL_RCC_PLL_SpreadSpectrum_Disable(void) +{ + CLEAR_BIT(RCC->SSCGR, RCC_SSCGR_SSCGEN); +} + +/** + * @} + */ + +#if defined(RCC_PLLI2S_SUPPORT) +/** @defgroup RCC_LL_EF_PLLI2S PLLI2S + * @{ + */ + +/** + * @brief Enable PLLI2S + * @rmtoll CR PLLI2SON LL_RCC_PLLI2S_Enable + * @retval None + */ +__STATIC_INLINE void LL_RCC_PLLI2S_Enable(void) +{ + SET_BIT(RCC->CR, RCC_CR_PLLI2SON); +} + +/** + * @brief Disable PLLI2S + * @rmtoll CR PLLI2SON LL_RCC_PLLI2S_Disable + * @retval None + */ +__STATIC_INLINE void LL_RCC_PLLI2S_Disable(void) +{ + CLEAR_BIT(RCC->CR, RCC_CR_PLLI2SON); +} + +/** + * @brief Check if PLLI2S Ready + * @rmtoll CR PLLI2SRDY LL_RCC_PLLI2S_IsReady + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_PLLI2S_IsReady(void) +{ + return (READ_BIT(RCC->CR, RCC_CR_PLLI2SRDY) == (RCC_CR_PLLI2SRDY)); +} + +#if (defined(RCC_DCKCFGR_PLLI2SDIVQ) || defined(RCC_DCKCFGR_PLLI2SDIVR)) +/** + * @brief Configure PLLI2S used for SAI domain clock + * @note PLL Source and PLLM Divider can be written only when PLL, + * PLLI2S and PLLSAI(*) are disabled + * @note PLLN/PLLQ/PLLR can be written only when PLLI2S is disabled + * @note This can be selected for SAI + * @rmtoll PLLCFGR PLLSRC LL_RCC_PLLI2S_ConfigDomain_SAI\n + * PLLI2SCFGR PLLI2SSRC LL_RCC_PLLI2S_ConfigDomain_SAI\n + * PLLCFGR PLLM LL_RCC_PLLI2S_ConfigDomain_SAI\n + * PLLI2SCFGR PLLI2SM LL_RCC_PLLI2S_ConfigDomain_SAI\n + * PLLI2SCFGR PLLI2SN LL_RCC_PLLI2S_ConfigDomain_SAI\n + * PLLI2SCFGR PLLI2SQ LL_RCC_PLLI2S_ConfigDomain_SAI\n + * PLLI2SCFGR PLLI2SR LL_RCC_PLLI2S_ConfigDomain_SAI\n + * DCKCFGR PLLI2SDIVQ LL_RCC_PLLI2S_ConfigDomain_SAI\n + * DCKCFGR PLLI2SDIVR LL_RCC_PLLI2S_ConfigDomain_SAI + * @param Source This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSOURCE_HSI + * @arg @ref LL_RCC_PLLSOURCE_HSE + * @arg @ref LL_RCC_PLLI2SSOURCE_PIN (*) + * + * (*) value not defined in all devices. + * @param PLLM This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLI2SM_DIV_2 + * @arg @ref LL_RCC_PLLI2SM_DIV_3 + * @arg @ref LL_RCC_PLLI2SM_DIV_4 + * @arg @ref LL_RCC_PLLI2SM_DIV_5 + * @arg @ref LL_RCC_PLLI2SM_DIV_6 + * @arg @ref LL_RCC_PLLI2SM_DIV_7 + * @arg @ref LL_RCC_PLLI2SM_DIV_8 + * @arg @ref LL_RCC_PLLI2SM_DIV_9 + * @arg @ref LL_RCC_PLLI2SM_DIV_10 + * @arg @ref LL_RCC_PLLI2SM_DIV_11 + * @arg @ref LL_RCC_PLLI2SM_DIV_12 + * @arg @ref LL_RCC_PLLI2SM_DIV_13 + * @arg @ref LL_RCC_PLLI2SM_DIV_14 + * @arg @ref LL_RCC_PLLI2SM_DIV_15 + * @arg @ref LL_RCC_PLLI2SM_DIV_16 + * @arg @ref LL_RCC_PLLI2SM_DIV_17 + * @arg @ref LL_RCC_PLLI2SM_DIV_18 + * @arg @ref LL_RCC_PLLI2SM_DIV_19 + * @arg @ref LL_RCC_PLLI2SM_DIV_20 + * @arg @ref LL_RCC_PLLI2SM_DIV_21 + * @arg @ref LL_RCC_PLLI2SM_DIV_22 + * @arg @ref LL_RCC_PLLI2SM_DIV_23 + * @arg @ref LL_RCC_PLLI2SM_DIV_24 + * @arg @ref LL_RCC_PLLI2SM_DIV_25 + * @arg @ref LL_RCC_PLLI2SM_DIV_26 + * @arg @ref LL_RCC_PLLI2SM_DIV_27 + * @arg @ref LL_RCC_PLLI2SM_DIV_28 + * @arg @ref LL_RCC_PLLI2SM_DIV_29 + * @arg @ref LL_RCC_PLLI2SM_DIV_30 + * @arg @ref LL_RCC_PLLI2SM_DIV_31 + * @arg @ref LL_RCC_PLLI2SM_DIV_32 + * @arg @ref LL_RCC_PLLI2SM_DIV_33 + * @arg @ref LL_RCC_PLLI2SM_DIV_34 + * @arg @ref LL_RCC_PLLI2SM_DIV_35 + * @arg @ref LL_RCC_PLLI2SM_DIV_36 + * @arg @ref LL_RCC_PLLI2SM_DIV_37 + * @arg @ref LL_RCC_PLLI2SM_DIV_38 + * @arg @ref LL_RCC_PLLI2SM_DIV_39 + * @arg @ref LL_RCC_PLLI2SM_DIV_40 + * @arg @ref LL_RCC_PLLI2SM_DIV_41 + * @arg @ref LL_RCC_PLLI2SM_DIV_42 + * @arg @ref LL_RCC_PLLI2SM_DIV_43 + * @arg @ref LL_RCC_PLLI2SM_DIV_44 + * @arg @ref LL_RCC_PLLI2SM_DIV_45 + * @arg @ref LL_RCC_PLLI2SM_DIV_46 + * @arg @ref LL_RCC_PLLI2SM_DIV_47 + * @arg @ref LL_RCC_PLLI2SM_DIV_48 + * @arg @ref LL_RCC_PLLI2SM_DIV_49 + * @arg @ref LL_RCC_PLLI2SM_DIV_50 + * @arg @ref LL_RCC_PLLI2SM_DIV_51 + * @arg @ref LL_RCC_PLLI2SM_DIV_52 + * @arg @ref LL_RCC_PLLI2SM_DIV_53 + * @arg @ref LL_RCC_PLLI2SM_DIV_54 + * @arg @ref LL_RCC_PLLI2SM_DIV_55 + * @arg @ref LL_RCC_PLLI2SM_DIV_56 + * @arg @ref LL_RCC_PLLI2SM_DIV_57 + * @arg @ref LL_RCC_PLLI2SM_DIV_58 + * @arg @ref LL_RCC_PLLI2SM_DIV_59 + * @arg @ref LL_RCC_PLLI2SM_DIV_60 + * @arg @ref LL_RCC_PLLI2SM_DIV_61 + * @arg @ref LL_RCC_PLLI2SM_DIV_62 + * @arg @ref LL_RCC_PLLI2SM_DIV_63 + * @param PLLN Between 50/192(*) and 432 + * + * (*) value not defined in all devices. + * @param PLLQ_R This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLI2SQ_DIV_2 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_3 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_4 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_5 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_6 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_7 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_8 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_9 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_10 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_11 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_12 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_13 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_14 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_15 (*) + * @arg @ref LL_RCC_PLLI2SR_DIV_2 (*) + * @arg @ref LL_RCC_PLLI2SR_DIV_3 (*) + * @arg @ref LL_RCC_PLLI2SR_DIV_4 (*) + * @arg @ref LL_RCC_PLLI2SR_DIV_5 (*) + * @arg @ref LL_RCC_PLLI2SR_DIV_6 (*) + * @arg @ref LL_RCC_PLLI2SR_DIV_7 (*) + * + * (*) value not defined in all devices. + * @param PLLDIVQ_R This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_1 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_2 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_3 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_4 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_5 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_6 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_7 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_8 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_9 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_10 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_11 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_12 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_13 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_14 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_15 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_16 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_17 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_18 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_19 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_20 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_21 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_22 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_23 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_24 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_25 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_26 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_27 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_28 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_29 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_30 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_31 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_32 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_1 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_2 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_3 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_4 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_5 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_6 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_7 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_8 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_9 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_10 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_11 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_12 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_13 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_14 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_15 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_16 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_17 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_18 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_19 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_20 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_21 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_22 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_23 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_24 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_25 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_26 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_27 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_28 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_29 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_30 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_31 (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_RCC_PLLI2S_ConfigDomain_SAI(uint32_t Source, uint32_t PLLM, uint32_t PLLN, uint32_t PLLQ_R, + uint32_t PLLDIVQ_R) +{ + __IO uint32_t *pReg = (__IO uint32_t *)((uint32_t)((uint32_t)(&RCC->PLLCFGR) + (Source & 0x80U))); + MODIFY_REG(*pReg, RCC_PLLCFGR_PLLSRC, (Source & (~0x80U))); +#if defined(RCC_PLLI2SCFGR_PLLI2SM) + MODIFY_REG(RCC->PLLI2SCFGR, RCC_PLLI2SCFGR_PLLI2SM, PLLM); +#else + MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLM, PLLM); +#endif /* RCC_PLLI2SCFGR_PLLI2SM */ + MODIFY_REG(RCC->PLLI2SCFGR, RCC_PLLI2SCFGR_PLLI2SN, PLLN << RCC_PLLI2SCFGR_PLLI2SN_Pos); +#if defined(RCC_DCKCFGR_PLLI2SDIVQ) + MODIFY_REG(RCC->PLLI2SCFGR, RCC_PLLI2SCFGR_PLLI2SQ, PLLQ_R); + MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_PLLI2SDIVQ, PLLDIVQ_R); +#else + MODIFY_REG(RCC->PLLI2SCFGR, RCC_PLLI2SCFGR_PLLI2SR, PLLQ_R); + MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_PLLI2SDIVR, PLLDIVQ_R); +#endif /* RCC_DCKCFGR_PLLI2SDIVQ */ +} +#endif /* RCC_DCKCFGR_PLLI2SDIVQ && RCC_DCKCFGR_PLLI2SDIVR */ + +#if defined(RCC_PLLI2SCFGR_PLLI2SQ) && !defined(RCC_DCKCFGR_PLLI2SDIVQ) +/** + * @brief Configure PLLI2S used for 48Mhz domain clock + * @note PLL Source and PLLM Divider can be written only when PLL, + * PLLI2S and PLLSAI(*) are disabled + * @note PLLN/PLLQ can be written only when PLLI2S is disabled + * @note This can be selected for RNG, USB, SDIO + * @rmtoll PLLCFGR PLLSRC LL_RCC_PLLI2S_ConfigDomain_48M\n + * PLLI2SCFGR PLLI2SSRC LL_RCC_PLLI2S_ConfigDomain_48M\n + * PLLCFGR PLLM LL_RCC_PLLI2S_ConfigDomain_48M\n + * PLLI2SCFGR PLLI2SM LL_RCC_PLLI2S_ConfigDomain_48M\n + * PLLI2SCFGR PLLI2SN LL_RCC_PLLI2S_ConfigDomain_48M\n + * PLLI2SCFGR PLLI2SQ LL_RCC_PLLI2S_ConfigDomain_48M + * @param Source This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSOURCE_HSI + * @arg @ref LL_RCC_PLLSOURCE_HSE + * @arg @ref LL_RCC_PLLI2SSOURCE_PIN (*) + * + * (*) value not defined in all devices. + * @param PLLM This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLI2SM_DIV_2 + * @arg @ref LL_RCC_PLLI2SM_DIV_3 + * @arg @ref LL_RCC_PLLI2SM_DIV_4 + * @arg @ref LL_RCC_PLLI2SM_DIV_5 + * @arg @ref LL_RCC_PLLI2SM_DIV_6 + * @arg @ref LL_RCC_PLLI2SM_DIV_7 + * @arg @ref LL_RCC_PLLI2SM_DIV_8 + * @arg @ref LL_RCC_PLLI2SM_DIV_9 + * @arg @ref LL_RCC_PLLI2SM_DIV_10 + * @arg @ref LL_RCC_PLLI2SM_DIV_11 + * @arg @ref LL_RCC_PLLI2SM_DIV_12 + * @arg @ref LL_RCC_PLLI2SM_DIV_13 + * @arg @ref LL_RCC_PLLI2SM_DIV_14 + * @arg @ref LL_RCC_PLLI2SM_DIV_15 + * @arg @ref LL_RCC_PLLI2SM_DIV_16 + * @arg @ref LL_RCC_PLLI2SM_DIV_17 + * @arg @ref LL_RCC_PLLI2SM_DIV_18 + * @arg @ref LL_RCC_PLLI2SM_DIV_19 + * @arg @ref LL_RCC_PLLI2SM_DIV_20 + * @arg @ref LL_RCC_PLLI2SM_DIV_21 + * @arg @ref LL_RCC_PLLI2SM_DIV_22 + * @arg @ref LL_RCC_PLLI2SM_DIV_23 + * @arg @ref LL_RCC_PLLI2SM_DIV_24 + * @arg @ref LL_RCC_PLLI2SM_DIV_25 + * @arg @ref LL_RCC_PLLI2SM_DIV_26 + * @arg @ref LL_RCC_PLLI2SM_DIV_27 + * @arg @ref LL_RCC_PLLI2SM_DIV_28 + * @arg @ref LL_RCC_PLLI2SM_DIV_29 + * @arg @ref LL_RCC_PLLI2SM_DIV_30 + * @arg @ref LL_RCC_PLLI2SM_DIV_31 + * @arg @ref LL_RCC_PLLI2SM_DIV_32 + * @arg @ref LL_RCC_PLLI2SM_DIV_33 + * @arg @ref LL_RCC_PLLI2SM_DIV_34 + * @arg @ref LL_RCC_PLLI2SM_DIV_35 + * @arg @ref LL_RCC_PLLI2SM_DIV_36 + * @arg @ref LL_RCC_PLLI2SM_DIV_37 + * @arg @ref LL_RCC_PLLI2SM_DIV_38 + * @arg @ref LL_RCC_PLLI2SM_DIV_39 + * @arg @ref LL_RCC_PLLI2SM_DIV_40 + * @arg @ref LL_RCC_PLLI2SM_DIV_41 + * @arg @ref LL_RCC_PLLI2SM_DIV_42 + * @arg @ref LL_RCC_PLLI2SM_DIV_43 + * @arg @ref LL_RCC_PLLI2SM_DIV_44 + * @arg @ref LL_RCC_PLLI2SM_DIV_45 + * @arg @ref LL_RCC_PLLI2SM_DIV_46 + * @arg @ref LL_RCC_PLLI2SM_DIV_47 + * @arg @ref LL_RCC_PLLI2SM_DIV_48 + * @arg @ref LL_RCC_PLLI2SM_DIV_49 + * @arg @ref LL_RCC_PLLI2SM_DIV_50 + * @arg @ref LL_RCC_PLLI2SM_DIV_51 + * @arg @ref LL_RCC_PLLI2SM_DIV_52 + * @arg @ref LL_RCC_PLLI2SM_DIV_53 + * @arg @ref LL_RCC_PLLI2SM_DIV_54 + * @arg @ref LL_RCC_PLLI2SM_DIV_55 + * @arg @ref LL_RCC_PLLI2SM_DIV_56 + * @arg @ref LL_RCC_PLLI2SM_DIV_57 + * @arg @ref LL_RCC_PLLI2SM_DIV_58 + * @arg @ref LL_RCC_PLLI2SM_DIV_59 + * @arg @ref LL_RCC_PLLI2SM_DIV_60 + * @arg @ref LL_RCC_PLLI2SM_DIV_61 + * @arg @ref LL_RCC_PLLI2SM_DIV_62 + * @arg @ref LL_RCC_PLLI2SM_DIV_63 + * @param PLLN Between 50 and 432 + * @param PLLQ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLI2SQ_DIV_2 + * @arg @ref LL_RCC_PLLI2SQ_DIV_3 + * @arg @ref LL_RCC_PLLI2SQ_DIV_4 + * @arg @ref LL_RCC_PLLI2SQ_DIV_5 + * @arg @ref LL_RCC_PLLI2SQ_DIV_6 + * @arg @ref LL_RCC_PLLI2SQ_DIV_7 + * @arg @ref LL_RCC_PLLI2SQ_DIV_8 + * @arg @ref LL_RCC_PLLI2SQ_DIV_9 + * @arg @ref LL_RCC_PLLI2SQ_DIV_10 + * @arg @ref LL_RCC_PLLI2SQ_DIV_11 + * @arg @ref LL_RCC_PLLI2SQ_DIV_12 + * @arg @ref LL_RCC_PLLI2SQ_DIV_13 + * @arg @ref LL_RCC_PLLI2SQ_DIV_14 + * @arg @ref LL_RCC_PLLI2SQ_DIV_15 + * @retval None + */ +__STATIC_INLINE void LL_RCC_PLLI2S_ConfigDomain_48M(uint32_t Source, uint32_t PLLM, uint32_t PLLN, uint32_t PLLQ) +{ + __IO uint32_t *pReg = (__IO uint32_t *)((uint32_t)((uint32_t)(&RCC->PLLCFGR) + (Source & 0x80U))); + MODIFY_REG(*pReg, RCC_PLLCFGR_PLLSRC, (Source & (~0x80U))); +#if defined(RCC_PLLI2SCFGR_PLLI2SM) + MODIFY_REG(RCC->PLLI2SCFGR, RCC_PLLI2SCFGR_PLLI2SM, PLLM); +#else + MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLM, PLLM); +#endif /* RCC_PLLI2SCFGR_PLLI2SM */ + MODIFY_REG(RCC->PLLI2SCFGR, RCC_PLLI2SCFGR_PLLI2SN | RCC_PLLI2SCFGR_PLLI2SQ, PLLN << RCC_PLLI2SCFGR_PLLI2SN_Pos | PLLQ); +} +#endif /* RCC_PLLI2SCFGR_PLLI2SQ && !RCC_DCKCFGR_PLLI2SDIVQ */ + +#if defined(SPDIFRX) +/** + * @brief Configure PLLI2S used for SPDIFRX domain clock + * @note PLL Source and PLLM Divider can be written only when PLL, + * PLLI2S and PLLSAI(*) are disabled + * @note PLLN/PLLP can be written only when PLLI2S is disabled + * @note This can be selected for SPDIFRX + * @rmtoll PLLCFGR PLLSRC LL_RCC_PLLI2S_ConfigDomain_SPDIFRX\n + * PLLCFGR PLLM LL_RCC_PLLI2S_ConfigDomain_SPDIFRX\n + * PLLI2SCFGR PLLI2SM LL_RCC_PLLI2S_ConfigDomain_SPDIFRX\n + * PLLI2SCFGR PLLI2SN LL_RCC_PLLI2S_ConfigDomain_SPDIFRX\n + * PLLI2SCFGR PLLI2SP LL_RCC_PLLI2S_ConfigDomain_SPDIFRX + * @param Source This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSOURCE_HSI + * @arg @ref LL_RCC_PLLSOURCE_HSE + * @param PLLM This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLI2SM_DIV_2 + * @arg @ref LL_RCC_PLLI2SM_DIV_3 + * @arg @ref LL_RCC_PLLI2SM_DIV_4 + * @arg @ref LL_RCC_PLLI2SM_DIV_5 + * @arg @ref LL_RCC_PLLI2SM_DIV_6 + * @arg @ref LL_RCC_PLLI2SM_DIV_7 + * @arg @ref LL_RCC_PLLI2SM_DIV_8 + * @arg @ref LL_RCC_PLLI2SM_DIV_9 + * @arg @ref LL_RCC_PLLI2SM_DIV_10 + * @arg @ref LL_RCC_PLLI2SM_DIV_11 + * @arg @ref LL_RCC_PLLI2SM_DIV_12 + * @arg @ref LL_RCC_PLLI2SM_DIV_13 + * @arg @ref LL_RCC_PLLI2SM_DIV_14 + * @arg @ref LL_RCC_PLLI2SM_DIV_15 + * @arg @ref LL_RCC_PLLI2SM_DIV_16 + * @arg @ref LL_RCC_PLLI2SM_DIV_17 + * @arg @ref LL_RCC_PLLI2SM_DIV_18 + * @arg @ref LL_RCC_PLLI2SM_DIV_19 + * @arg @ref LL_RCC_PLLI2SM_DIV_20 + * @arg @ref LL_RCC_PLLI2SM_DIV_21 + * @arg @ref LL_RCC_PLLI2SM_DIV_22 + * @arg @ref LL_RCC_PLLI2SM_DIV_23 + * @arg @ref LL_RCC_PLLI2SM_DIV_24 + * @arg @ref LL_RCC_PLLI2SM_DIV_25 + * @arg @ref LL_RCC_PLLI2SM_DIV_26 + * @arg @ref LL_RCC_PLLI2SM_DIV_27 + * @arg @ref LL_RCC_PLLI2SM_DIV_28 + * @arg @ref LL_RCC_PLLI2SM_DIV_29 + * @arg @ref LL_RCC_PLLI2SM_DIV_30 + * @arg @ref LL_RCC_PLLI2SM_DIV_31 + * @arg @ref LL_RCC_PLLI2SM_DIV_32 + * @arg @ref LL_RCC_PLLI2SM_DIV_33 + * @arg @ref LL_RCC_PLLI2SM_DIV_34 + * @arg @ref LL_RCC_PLLI2SM_DIV_35 + * @arg @ref LL_RCC_PLLI2SM_DIV_36 + * @arg @ref LL_RCC_PLLI2SM_DIV_37 + * @arg @ref LL_RCC_PLLI2SM_DIV_38 + * @arg @ref LL_RCC_PLLI2SM_DIV_39 + * @arg @ref LL_RCC_PLLI2SM_DIV_40 + * @arg @ref LL_RCC_PLLI2SM_DIV_41 + * @arg @ref LL_RCC_PLLI2SM_DIV_42 + * @arg @ref LL_RCC_PLLI2SM_DIV_43 + * @arg @ref LL_RCC_PLLI2SM_DIV_44 + * @arg @ref LL_RCC_PLLI2SM_DIV_45 + * @arg @ref LL_RCC_PLLI2SM_DIV_46 + * @arg @ref LL_RCC_PLLI2SM_DIV_47 + * @arg @ref LL_RCC_PLLI2SM_DIV_48 + * @arg @ref LL_RCC_PLLI2SM_DIV_49 + * @arg @ref LL_RCC_PLLI2SM_DIV_50 + * @arg @ref LL_RCC_PLLI2SM_DIV_51 + * @arg @ref LL_RCC_PLLI2SM_DIV_52 + * @arg @ref LL_RCC_PLLI2SM_DIV_53 + * @arg @ref LL_RCC_PLLI2SM_DIV_54 + * @arg @ref LL_RCC_PLLI2SM_DIV_55 + * @arg @ref LL_RCC_PLLI2SM_DIV_56 + * @arg @ref LL_RCC_PLLI2SM_DIV_57 + * @arg @ref LL_RCC_PLLI2SM_DIV_58 + * @arg @ref LL_RCC_PLLI2SM_DIV_59 + * @arg @ref LL_RCC_PLLI2SM_DIV_60 + * @arg @ref LL_RCC_PLLI2SM_DIV_61 + * @arg @ref LL_RCC_PLLI2SM_DIV_62 + * @arg @ref LL_RCC_PLLI2SM_DIV_63 + * @param PLLN Between 50 and 432 + * @param PLLP This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLI2SP_DIV_2 + * @arg @ref LL_RCC_PLLI2SP_DIV_4 + * @arg @ref LL_RCC_PLLI2SP_DIV_6 + * @arg @ref LL_RCC_PLLI2SP_DIV_8 + * @retval None + */ +__STATIC_INLINE void LL_RCC_PLLI2S_ConfigDomain_SPDIFRX(uint32_t Source, uint32_t PLLM, uint32_t PLLN, uint32_t PLLP) +{ + MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLSRC, Source); +#if defined(RCC_PLLI2SCFGR_PLLI2SM) + MODIFY_REG(RCC->PLLI2SCFGR, RCC_PLLI2SCFGR_PLLI2SM, PLLM); +#else + MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLM, PLLM); +#endif /* RCC_PLLI2SCFGR_PLLI2SM */ + MODIFY_REG(RCC->PLLI2SCFGR, RCC_PLLI2SCFGR_PLLI2SN | RCC_PLLI2SCFGR_PLLI2SP, PLLN << RCC_PLLI2SCFGR_PLLI2SN_Pos | PLLP); +} +#endif /* SPDIFRX */ + +/** + * @brief Configure PLLI2S used for I2S1 domain clock + * @note PLL Source and PLLM Divider can be written only when PLL, + * PLLI2S and PLLSAI(*) are disabled + * @note PLLN/PLLR can be written only when PLLI2S is disabled + * @note This can be selected for I2S + * @rmtoll PLLCFGR PLLSRC LL_RCC_PLLI2S_ConfigDomain_I2S\n + * PLLCFGR PLLM LL_RCC_PLLI2S_ConfigDomain_I2S\n + * PLLI2SCFGR PLLI2SSRC LL_RCC_PLLI2S_ConfigDomain_I2S\n + * PLLI2SCFGR PLLI2SM LL_RCC_PLLI2S_ConfigDomain_I2S\n + * PLLI2SCFGR PLLI2SN LL_RCC_PLLI2S_ConfigDomain_I2S\n + * PLLI2SCFGR PLLI2SR LL_RCC_PLLI2S_ConfigDomain_I2S + * @param Source This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSOURCE_HSI + * @arg @ref LL_RCC_PLLSOURCE_HSE + * @arg @ref LL_RCC_PLLI2SSOURCE_PIN (*) + * + * (*) value not defined in all devices. + * @param PLLM This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLI2SM_DIV_2 + * @arg @ref LL_RCC_PLLI2SM_DIV_3 + * @arg @ref LL_RCC_PLLI2SM_DIV_4 + * @arg @ref LL_RCC_PLLI2SM_DIV_5 + * @arg @ref LL_RCC_PLLI2SM_DIV_6 + * @arg @ref LL_RCC_PLLI2SM_DIV_7 + * @arg @ref LL_RCC_PLLI2SM_DIV_8 + * @arg @ref LL_RCC_PLLI2SM_DIV_9 + * @arg @ref LL_RCC_PLLI2SM_DIV_10 + * @arg @ref LL_RCC_PLLI2SM_DIV_11 + * @arg @ref LL_RCC_PLLI2SM_DIV_12 + * @arg @ref LL_RCC_PLLI2SM_DIV_13 + * @arg @ref LL_RCC_PLLI2SM_DIV_14 + * @arg @ref LL_RCC_PLLI2SM_DIV_15 + * @arg @ref LL_RCC_PLLI2SM_DIV_16 + * @arg @ref LL_RCC_PLLI2SM_DIV_17 + * @arg @ref LL_RCC_PLLI2SM_DIV_18 + * @arg @ref LL_RCC_PLLI2SM_DIV_19 + * @arg @ref LL_RCC_PLLI2SM_DIV_20 + * @arg @ref LL_RCC_PLLI2SM_DIV_21 + * @arg @ref LL_RCC_PLLI2SM_DIV_22 + * @arg @ref LL_RCC_PLLI2SM_DIV_23 + * @arg @ref LL_RCC_PLLI2SM_DIV_24 + * @arg @ref LL_RCC_PLLI2SM_DIV_25 + * @arg @ref LL_RCC_PLLI2SM_DIV_26 + * @arg @ref LL_RCC_PLLI2SM_DIV_27 + * @arg @ref LL_RCC_PLLI2SM_DIV_28 + * @arg @ref LL_RCC_PLLI2SM_DIV_29 + * @arg @ref LL_RCC_PLLI2SM_DIV_30 + * @arg @ref LL_RCC_PLLI2SM_DIV_31 + * @arg @ref LL_RCC_PLLI2SM_DIV_32 + * @arg @ref LL_RCC_PLLI2SM_DIV_33 + * @arg @ref LL_RCC_PLLI2SM_DIV_34 + * @arg @ref LL_RCC_PLLI2SM_DIV_35 + * @arg @ref LL_RCC_PLLI2SM_DIV_36 + * @arg @ref LL_RCC_PLLI2SM_DIV_37 + * @arg @ref LL_RCC_PLLI2SM_DIV_38 + * @arg @ref LL_RCC_PLLI2SM_DIV_39 + * @arg @ref LL_RCC_PLLI2SM_DIV_40 + * @arg @ref LL_RCC_PLLI2SM_DIV_41 + * @arg @ref LL_RCC_PLLI2SM_DIV_42 + * @arg @ref LL_RCC_PLLI2SM_DIV_43 + * @arg @ref LL_RCC_PLLI2SM_DIV_44 + * @arg @ref LL_RCC_PLLI2SM_DIV_45 + * @arg @ref LL_RCC_PLLI2SM_DIV_46 + * @arg @ref LL_RCC_PLLI2SM_DIV_47 + * @arg @ref LL_RCC_PLLI2SM_DIV_48 + * @arg @ref LL_RCC_PLLI2SM_DIV_49 + * @arg @ref LL_RCC_PLLI2SM_DIV_50 + * @arg @ref LL_RCC_PLLI2SM_DIV_51 + * @arg @ref LL_RCC_PLLI2SM_DIV_52 + * @arg @ref LL_RCC_PLLI2SM_DIV_53 + * @arg @ref LL_RCC_PLLI2SM_DIV_54 + * @arg @ref LL_RCC_PLLI2SM_DIV_55 + * @arg @ref LL_RCC_PLLI2SM_DIV_56 + * @arg @ref LL_RCC_PLLI2SM_DIV_57 + * @arg @ref LL_RCC_PLLI2SM_DIV_58 + * @arg @ref LL_RCC_PLLI2SM_DIV_59 + * @arg @ref LL_RCC_PLLI2SM_DIV_60 + * @arg @ref LL_RCC_PLLI2SM_DIV_61 + * @arg @ref LL_RCC_PLLI2SM_DIV_62 + * @arg @ref LL_RCC_PLLI2SM_DIV_63 + * @param PLLN Between 50/192(*) and 432 + * + * (*) value not defined in all devices. + * @param PLLR This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLI2SR_DIV_2 + * @arg @ref LL_RCC_PLLI2SR_DIV_3 + * @arg @ref LL_RCC_PLLI2SR_DIV_4 + * @arg @ref LL_RCC_PLLI2SR_DIV_5 + * @arg @ref LL_RCC_PLLI2SR_DIV_6 + * @arg @ref LL_RCC_PLLI2SR_DIV_7 + * @retval None + */ +__STATIC_INLINE void LL_RCC_PLLI2S_ConfigDomain_I2S(uint32_t Source, uint32_t PLLM, uint32_t PLLN, uint32_t PLLR) +{ + __IO uint32_t *pReg = (__IO uint32_t *)((uint32_t)((uint32_t)(&RCC->PLLCFGR) + (Source & 0x80U))); + MODIFY_REG(*pReg, RCC_PLLCFGR_PLLSRC, (Source & (~0x80U))); +#if defined(RCC_PLLI2SCFGR_PLLI2SM) + MODIFY_REG(RCC->PLLI2SCFGR, RCC_PLLI2SCFGR_PLLI2SM, PLLM); +#else + MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLM, PLLM); +#endif /* RCC_PLLI2SCFGR_PLLI2SM */ + MODIFY_REG(RCC->PLLI2SCFGR, RCC_PLLI2SCFGR_PLLI2SN | RCC_PLLI2SCFGR_PLLI2SR, PLLN << RCC_PLLI2SCFGR_PLLI2SN_Pos | PLLR); +} + +/** + * @brief Get I2SPLL multiplication factor for VCO + * @rmtoll PLLI2SCFGR PLLI2SN LL_RCC_PLLI2S_GetN + * @retval Between 50/192(*) and 432 + * + * (*) value not defined in all devices. + */ +__STATIC_INLINE uint32_t LL_RCC_PLLI2S_GetN(void) +{ + return (uint32_t)(READ_BIT(RCC->PLLI2SCFGR, RCC_PLLI2SCFGR_PLLI2SN) >> RCC_PLLI2SCFGR_PLLI2SN_Pos); +} + +#if defined(RCC_PLLI2SCFGR_PLLI2SQ) +/** + * @brief Get I2SPLL division factor for PLLI2SQ + * @rmtoll PLLI2SCFGR PLLI2SQ LL_RCC_PLLI2S_GetQ + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_PLLI2SQ_DIV_2 + * @arg @ref LL_RCC_PLLI2SQ_DIV_3 + * @arg @ref LL_RCC_PLLI2SQ_DIV_4 + * @arg @ref LL_RCC_PLLI2SQ_DIV_5 + * @arg @ref LL_RCC_PLLI2SQ_DIV_6 + * @arg @ref LL_RCC_PLLI2SQ_DIV_7 + * @arg @ref LL_RCC_PLLI2SQ_DIV_8 + * @arg @ref LL_RCC_PLLI2SQ_DIV_9 + * @arg @ref LL_RCC_PLLI2SQ_DIV_10 + * @arg @ref LL_RCC_PLLI2SQ_DIV_11 + * @arg @ref LL_RCC_PLLI2SQ_DIV_12 + * @arg @ref LL_RCC_PLLI2SQ_DIV_13 + * @arg @ref LL_RCC_PLLI2SQ_DIV_14 + * @arg @ref LL_RCC_PLLI2SQ_DIV_15 + */ +__STATIC_INLINE uint32_t LL_RCC_PLLI2S_GetQ(void) +{ + return (uint32_t)(READ_BIT(RCC->PLLI2SCFGR, RCC_PLLI2SCFGR_PLLI2SQ)); +} +#endif /* RCC_PLLI2SCFGR_PLLI2SQ */ + +/** + * @brief Get I2SPLL division factor for PLLI2SR + * @note used for PLLI2SCLK (I2S clock) + * @rmtoll PLLI2SCFGR PLLI2SR LL_RCC_PLLI2S_GetR + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_PLLI2SR_DIV_2 + * @arg @ref LL_RCC_PLLI2SR_DIV_3 + * @arg @ref LL_RCC_PLLI2SR_DIV_4 + * @arg @ref LL_RCC_PLLI2SR_DIV_5 + * @arg @ref LL_RCC_PLLI2SR_DIV_6 + * @arg @ref LL_RCC_PLLI2SR_DIV_7 + */ +__STATIC_INLINE uint32_t LL_RCC_PLLI2S_GetR(void) +{ + return (uint32_t)(READ_BIT(RCC->PLLI2SCFGR, RCC_PLLI2SCFGR_PLLI2SR)); +} + +#if defined(RCC_PLLI2SCFGR_PLLI2SP) +/** + * @brief Get I2SPLL division factor for PLLI2SP + * @note used for PLLSPDIFRXCLK (SPDIFRX clock) + * @rmtoll PLLI2SCFGR PLLI2SP LL_RCC_PLLI2S_GetP + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_PLLI2SP_DIV_2 + * @arg @ref LL_RCC_PLLI2SP_DIV_4 + * @arg @ref LL_RCC_PLLI2SP_DIV_6 + * @arg @ref LL_RCC_PLLI2SP_DIV_8 + */ +__STATIC_INLINE uint32_t LL_RCC_PLLI2S_GetP(void) +{ + return (uint32_t)(READ_BIT(RCC->PLLI2SCFGR, RCC_PLLI2SCFGR_PLLI2SP)); +} +#endif /* RCC_PLLI2SCFGR_PLLI2SP */ + +#if defined(RCC_DCKCFGR_PLLI2SDIVQ) +/** + * @brief Get I2SPLL division factor for PLLI2SDIVQ + * @note used PLLSAICLK selected (SAI clock) + * @rmtoll DCKCFGR PLLI2SDIVQ LL_RCC_PLLI2S_GetDIVQ + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_1 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_2 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_3 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_4 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_5 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_6 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_7 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_8 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_9 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_10 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_11 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_12 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_13 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_14 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_15 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_16 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_17 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_18 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_19 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_20 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_21 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_22 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_23 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_24 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_25 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_26 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_27 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_28 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_29 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_30 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_31 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_32 + */ +__STATIC_INLINE uint32_t LL_RCC_PLLI2S_GetDIVQ(void) +{ + return (uint32_t)(READ_BIT(RCC->DCKCFGR, RCC_DCKCFGR_PLLI2SDIVQ)); +} +#endif /* RCC_DCKCFGR_PLLI2SDIVQ */ + +#if defined(RCC_DCKCFGR_PLLI2SDIVR) +/** + * @brief Get I2SPLL division factor for PLLI2SDIVR + * @note used PLLSAICLK selected (SAI clock) + * @rmtoll DCKCFGR PLLI2SDIVR LL_RCC_PLLI2S_GetDIVR + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_1 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_2 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_3 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_4 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_5 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_6 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_7 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_8 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_9 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_10 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_11 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_12 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_13 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_14 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_15 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_16 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_17 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_18 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_19 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_20 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_21 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_22 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_23 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_24 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_25 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_26 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_27 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_28 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_29 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_30 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_31 + */ +__STATIC_INLINE uint32_t LL_RCC_PLLI2S_GetDIVR(void) +{ + return (uint32_t)(READ_BIT(RCC->DCKCFGR, RCC_DCKCFGR_PLLI2SDIVR)); +} +#endif /* RCC_DCKCFGR_PLLI2SDIVR */ + +/** + * @brief Get division factor for PLLI2S input clock + * @rmtoll PLLCFGR PLLM LL_RCC_PLLI2S_GetDivider\n + * PLLI2SCFGR PLLI2SM LL_RCC_PLLI2S_GetDivider + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_PLLI2SM_DIV_2 + * @arg @ref LL_RCC_PLLI2SM_DIV_3 + * @arg @ref LL_RCC_PLLI2SM_DIV_4 + * @arg @ref LL_RCC_PLLI2SM_DIV_5 + * @arg @ref LL_RCC_PLLI2SM_DIV_6 + * @arg @ref LL_RCC_PLLI2SM_DIV_7 + * @arg @ref LL_RCC_PLLI2SM_DIV_8 + * @arg @ref LL_RCC_PLLI2SM_DIV_9 + * @arg @ref LL_RCC_PLLI2SM_DIV_10 + * @arg @ref LL_RCC_PLLI2SM_DIV_11 + * @arg @ref LL_RCC_PLLI2SM_DIV_12 + * @arg @ref LL_RCC_PLLI2SM_DIV_13 + * @arg @ref LL_RCC_PLLI2SM_DIV_14 + * @arg @ref LL_RCC_PLLI2SM_DIV_15 + * @arg @ref LL_RCC_PLLI2SM_DIV_16 + * @arg @ref LL_RCC_PLLI2SM_DIV_17 + * @arg @ref LL_RCC_PLLI2SM_DIV_18 + * @arg @ref LL_RCC_PLLI2SM_DIV_19 + * @arg @ref LL_RCC_PLLI2SM_DIV_20 + * @arg @ref LL_RCC_PLLI2SM_DIV_21 + * @arg @ref LL_RCC_PLLI2SM_DIV_22 + * @arg @ref LL_RCC_PLLI2SM_DIV_23 + * @arg @ref LL_RCC_PLLI2SM_DIV_24 + * @arg @ref LL_RCC_PLLI2SM_DIV_25 + * @arg @ref LL_RCC_PLLI2SM_DIV_26 + * @arg @ref LL_RCC_PLLI2SM_DIV_27 + * @arg @ref LL_RCC_PLLI2SM_DIV_28 + * @arg @ref LL_RCC_PLLI2SM_DIV_29 + * @arg @ref LL_RCC_PLLI2SM_DIV_30 + * @arg @ref LL_RCC_PLLI2SM_DIV_31 + * @arg @ref LL_RCC_PLLI2SM_DIV_32 + * @arg @ref LL_RCC_PLLI2SM_DIV_33 + * @arg @ref LL_RCC_PLLI2SM_DIV_34 + * @arg @ref LL_RCC_PLLI2SM_DIV_35 + * @arg @ref LL_RCC_PLLI2SM_DIV_36 + * @arg @ref LL_RCC_PLLI2SM_DIV_37 + * @arg @ref LL_RCC_PLLI2SM_DIV_38 + * @arg @ref LL_RCC_PLLI2SM_DIV_39 + * @arg @ref LL_RCC_PLLI2SM_DIV_40 + * @arg @ref LL_RCC_PLLI2SM_DIV_41 + * @arg @ref LL_RCC_PLLI2SM_DIV_42 + * @arg @ref LL_RCC_PLLI2SM_DIV_43 + * @arg @ref LL_RCC_PLLI2SM_DIV_44 + * @arg @ref LL_RCC_PLLI2SM_DIV_45 + * @arg @ref LL_RCC_PLLI2SM_DIV_46 + * @arg @ref LL_RCC_PLLI2SM_DIV_47 + * @arg @ref LL_RCC_PLLI2SM_DIV_48 + * @arg @ref LL_RCC_PLLI2SM_DIV_49 + * @arg @ref LL_RCC_PLLI2SM_DIV_50 + * @arg @ref LL_RCC_PLLI2SM_DIV_51 + * @arg @ref LL_RCC_PLLI2SM_DIV_52 + * @arg @ref LL_RCC_PLLI2SM_DIV_53 + * @arg @ref LL_RCC_PLLI2SM_DIV_54 + * @arg @ref LL_RCC_PLLI2SM_DIV_55 + * @arg @ref LL_RCC_PLLI2SM_DIV_56 + * @arg @ref LL_RCC_PLLI2SM_DIV_57 + * @arg @ref LL_RCC_PLLI2SM_DIV_58 + * @arg @ref LL_RCC_PLLI2SM_DIV_59 + * @arg @ref LL_RCC_PLLI2SM_DIV_60 + * @arg @ref LL_RCC_PLLI2SM_DIV_61 + * @arg @ref LL_RCC_PLLI2SM_DIV_62 + * @arg @ref LL_RCC_PLLI2SM_DIV_63 + */ +__STATIC_INLINE uint32_t LL_RCC_PLLI2S_GetDivider(void) +{ +#if defined(RCC_PLLI2SCFGR_PLLI2SM) + return (uint32_t)(READ_BIT(RCC->PLLI2SCFGR, RCC_PLLI2SCFGR_PLLI2SM)); +#else + return (uint32_t)(READ_BIT(RCC->PLLCFGR, RCC_PLLCFGR_PLLM)); +#endif /* RCC_PLLI2SCFGR_PLLI2SM */ +} + +/** + * @brief Get the oscillator used as PLL clock source. + * @rmtoll PLLCFGR PLLSRC LL_RCC_PLLI2S_GetMainSource\n + * PLLI2SCFGR PLLI2SSRC LL_RCC_PLLI2S_GetMainSource + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_PLLSOURCE_HSI + * @arg @ref LL_RCC_PLLSOURCE_HSE + * @arg @ref LL_RCC_PLLI2SSOURCE_PIN (*) + * + * (*) value not defined in all devices. + */ +__STATIC_INLINE uint32_t LL_RCC_PLLI2S_GetMainSource(void) +{ +#if defined(RCC_PLLI2SCFGR_PLLI2SSRC) + uint32_t pllsrc = READ_BIT(RCC->PLLCFGR, RCC_PLLCFGR_PLLSRC); + uint32_t plli2sssrc0 = READ_BIT(RCC->PLLI2SCFGR, RCC_PLLI2SCFGR_PLLI2SSRC); + uint32_t plli2sssrc1 = READ_BIT(RCC->PLLI2SCFGR, RCC_PLLI2SCFGR_PLLI2SSRC) >> 15U; + return (uint32_t)(pllsrc | plli2sssrc0 | plli2sssrc1); +#else + return (uint32_t)(READ_BIT(RCC->PLLCFGR, RCC_PLLCFGR_PLLSRC)); +#endif /* RCC_PLLI2SCFGR_PLLI2SSRC */ +} + +/** + * @} + */ +#endif /* RCC_PLLI2S_SUPPORT */ + +#if defined(RCC_PLLSAI_SUPPORT) +/** @defgroup RCC_LL_EF_PLLSAI PLLSAI + * @{ + */ + +/** + * @brief Enable PLLSAI + * @rmtoll CR PLLSAION LL_RCC_PLLSAI_Enable + * @retval None + */ +__STATIC_INLINE void LL_RCC_PLLSAI_Enable(void) +{ + SET_BIT(RCC->CR, RCC_CR_PLLSAION); +} + +/** + * @brief Disable PLLSAI + * @rmtoll CR PLLSAION LL_RCC_PLLSAI_Disable + * @retval None + */ +__STATIC_INLINE void LL_RCC_PLLSAI_Disable(void) +{ + CLEAR_BIT(RCC->CR, RCC_CR_PLLSAION); +} + +/** + * @brief Check if PLLSAI Ready + * @rmtoll CR PLLSAIRDY LL_RCC_PLLSAI_IsReady + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_PLLSAI_IsReady(void) +{ + return (READ_BIT(RCC->CR, RCC_CR_PLLSAIRDY) == (RCC_CR_PLLSAIRDY)); +} + +/** + * @brief Configure PLLSAI used for SAI domain clock + * @note PLL Source and PLLM Divider can be written only when PLL, + * PLLI2S and PLLSAI(*) are disabled + * @note PLLN/PLLQ can be written only when PLLSAI is disabled + * @note This can be selected for SAI + * @rmtoll PLLCFGR PLLSRC LL_RCC_PLLSAI_ConfigDomain_SAI\n + * PLLCFGR PLLM LL_RCC_PLLSAI_ConfigDomain_SAI\n + * PLLSAICFGR PLLSAIM LL_RCC_PLLSAI_ConfigDomain_SAI\n + * PLLSAICFGR PLLSAIN LL_RCC_PLLSAI_ConfigDomain_SAI\n + * PLLSAICFGR PLLSAIQ LL_RCC_PLLSAI_ConfigDomain_SAI\n + * DCKCFGR PLLSAIDIVQ LL_RCC_PLLSAI_ConfigDomain_SAI + * @param Source This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSOURCE_HSI + * @arg @ref LL_RCC_PLLSOURCE_HSE + * @param PLLM This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSAIM_DIV_2 + * @arg @ref LL_RCC_PLLSAIM_DIV_3 + * @arg @ref LL_RCC_PLLSAIM_DIV_4 + * @arg @ref LL_RCC_PLLSAIM_DIV_5 + * @arg @ref LL_RCC_PLLSAIM_DIV_6 + * @arg @ref LL_RCC_PLLSAIM_DIV_7 + * @arg @ref LL_RCC_PLLSAIM_DIV_8 + * @arg @ref LL_RCC_PLLSAIM_DIV_9 + * @arg @ref LL_RCC_PLLSAIM_DIV_10 + * @arg @ref LL_RCC_PLLSAIM_DIV_11 + * @arg @ref LL_RCC_PLLSAIM_DIV_12 + * @arg @ref LL_RCC_PLLSAIM_DIV_13 + * @arg @ref LL_RCC_PLLSAIM_DIV_14 + * @arg @ref LL_RCC_PLLSAIM_DIV_15 + * @arg @ref LL_RCC_PLLSAIM_DIV_16 + * @arg @ref LL_RCC_PLLSAIM_DIV_17 + * @arg @ref LL_RCC_PLLSAIM_DIV_18 + * @arg @ref LL_RCC_PLLSAIM_DIV_19 + * @arg @ref LL_RCC_PLLSAIM_DIV_20 + * @arg @ref LL_RCC_PLLSAIM_DIV_21 + * @arg @ref LL_RCC_PLLSAIM_DIV_22 + * @arg @ref LL_RCC_PLLSAIM_DIV_23 + * @arg @ref LL_RCC_PLLSAIM_DIV_24 + * @arg @ref LL_RCC_PLLSAIM_DIV_25 + * @arg @ref LL_RCC_PLLSAIM_DIV_26 + * @arg @ref LL_RCC_PLLSAIM_DIV_27 + * @arg @ref LL_RCC_PLLSAIM_DIV_28 + * @arg @ref LL_RCC_PLLSAIM_DIV_29 + * @arg @ref LL_RCC_PLLSAIM_DIV_30 + * @arg @ref LL_RCC_PLLSAIM_DIV_31 + * @arg @ref LL_RCC_PLLSAIM_DIV_32 + * @arg @ref LL_RCC_PLLSAIM_DIV_33 + * @arg @ref LL_RCC_PLLSAIM_DIV_34 + * @arg @ref LL_RCC_PLLSAIM_DIV_35 + * @arg @ref LL_RCC_PLLSAIM_DIV_36 + * @arg @ref LL_RCC_PLLSAIM_DIV_37 + * @arg @ref LL_RCC_PLLSAIM_DIV_38 + * @arg @ref LL_RCC_PLLSAIM_DIV_39 + * @arg @ref LL_RCC_PLLSAIM_DIV_40 + * @arg @ref LL_RCC_PLLSAIM_DIV_41 + * @arg @ref LL_RCC_PLLSAIM_DIV_42 + * @arg @ref LL_RCC_PLLSAIM_DIV_43 + * @arg @ref LL_RCC_PLLSAIM_DIV_44 + * @arg @ref LL_RCC_PLLSAIM_DIV_45 + * @arg @ref LL_RCC_PLLSAIM_DIV_46 + * @arg @ref LL_RCC_PLLSAIM_DIV_47 + * @arg @ref LL_RCC_PLLSAIM_DIV_48 + * @arg @ref LL_RCC_PLLSAIM_DIV_49 + * @arg @ref LL_RCC_PLLSAIM_DIV_50 + * @arg @ref LL_RCC_PLLSAIM_DIV_51 + * @arg @ref LL_RCC_PLLSAIM_DIV_52 + * @arg @ref LL_RCC_PLLSAIM_DIV_53 + * @arg @ref LL_RCC_PLLSAIM_DIV_54 + * @arg @ref LL_RCC_PLLSAIM_DIV_55 + * @arg @ref LL_RCC_PLLSAIM_DIV_56 + * @arg @ref LL_RCC_PLLSAIM_DIV_57 + * @arg @ref LL_RCC_PLLSAIM_DIV_58 + * @arg @ref LL_RCC_PLLSAIM_DIV_59 + * @arg @ref LL_RCC_PLLSAIM_DIV_60 + * @arg @ref LL_RCC_PLLSAIM_DIV_61 + * @arg @ref LL_RCC_PLLSAIM_DIV_62 + * @arg @ref LL_RCC_PLLSAIM_DIV_63 + * @param PLLN Between 49/50(*) and 432 + * + * (*) value not defined in all devices. + * @param PLLQ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSAIQ_DIV_2 + * @arg @ref LL_RCC_PLLSAIQ_DIV_3 + * @arg @ref LL_RCC_PLLSAIQ_DIV_4 + * @arg @ref LL_RCC_PLLSAIQ_DIV_5 + * @arg @ref LL_RCC_PLLSAIQ_DIV_6 + * @arg @ref LL_RCC_PLLSAIQ_DIV_7 + * @arg @ref LL_RCC_PLLSAIQ_DIV_8 + * @arg @ref LL_RCC_PLLSAIQ_DIV_9 + * @arg @ref LL_RCC_PLLSAIQ_DIV_10 + * @arg @ref LL_RCC_PLLSAIQ_DIV_11 + * @arg @ref LL_RCC_PLLSAIQ_DIV_12 + * @arg @ref LL_RCC_PLLSAIQ_DIV_13 + * @arg @ref LL_RCC_PLLSAIQ_DIV_14 + * @arg @ref LL_RCC_PLLSAIQ_DIV_15 + * @param PLLDIVQ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_1 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_2 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_3 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_4 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_5 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_6 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_7 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_8 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_9 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_10 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_11 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_12 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_13 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_14 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_15 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_16 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_17 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_18 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_19 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_20 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_21 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_22 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_23 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_24 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_25 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_26 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_27 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_28 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_29 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_30 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_31 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_32 + * @retval None + */ +__STATIC_INLINE void LL_RCC_PLLSAI_ConfigDomain_SAI(uint32_t Source, uint32_t PLLM, uint32_t PLLN, uint32_t PLLQ, + uint32_t PLLDIVQ) +{ + MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLSRC, Source); +#if defined(RCC_PLLSAICFGR_PLLSAIM) + MODIFY_REG(RCC->PLLSAICFGR, RCC_PLLSAICFGR_PLLSAIM, PLLM); +#else + MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLM, PLLM); +#endif /* RCC_PLLSAICFGR_PLLSAIM */ + MODIFY_REG(RCC->PLLSAICFGR, RCC_PLLSAICFGR_PLLSAIN | RCC_PLLSAICFGR_PLLSAIQ, PLLN << RCC_PLLSAICFGR_PLLSAIN_Pos | PLLQ); + MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_PLLSAIDIVQ, PLLDIVQ); +} + +#if defined(RCC_PLLSAICFGR_PLLSAIP) +/** + * @brief Configure PLLSAI used for 48Mhz domain clock + * @note PLL Source and PLLM Divider can be written only when PLL, + * PLLI2S and PLLSAI(*) are disabled + * @note PLLN/PLLP can be written only when PLLSAI is disabled + * @note This can be selected for USB, RNG, SDIO + * @rmtoll PLLCFGR PLLSRC LL_RCC_PLLSAI_ConfigDomain_48M\n + * PLLCFGR PLLM LL_RCC_PLLSAI_ConfigDomain_48M\n + * PLLSAICFGR PLLSAIM LL_RCC_PLLSAI_ConfigDomain_48M\n + * PLLSAICFGR PLLSAIN LL_RCC_PLLSAI_ConfigDomain_48M\n + * PLLSAICFGR PLLSAIP LL_RCC_PLLSAI_ConfigDomain_48M + * @param Source This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSOURCE_HSI + * @arg @ref LL_RCC_PLLSOURCE_HSE + * @param PLLM This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSAIM_DIV_2 + * @arg @ref LL_RCC_PLLSAIM_DIV_3 + * @arg @ref LL_RCC_PLLSAIM_DIV_4 + * @arg @ref LL_RCC_PLLSAIM_DIV_5 + * @arg @ref LL_RCC_PLLSAIM_DIV_6 + * @arg @ref LL_RCC_PLLSAIM_DIV_7 + * @arg @ref LL_RCC_PLLSAIM_DIV_8 + * @arg @ref LL_RCC_PLLSAIM_DIV_9 + * @arg @ref LL_RCC_PLLSAIM_DIV_10 + * @arg @ref LL_RCC_PLLSAIM_DIV_11 + * @arg @ref LL_RCC_PLLSAIM_DIV_12 + * @arg @ref LL_RCC_PLLSAIM_DIV_13 + * @arg @ref LL_RCC_PLLSAIM_DIV_14 + * @arg @ref LL_RCC_PLLSAIM_DIV_15 + * @arg @ref LL_RCC_PLLSAIM_DIV_16 + * @arg @ref LL_RCC_PLLSAIM_DIV_17 + * @arg @ref LL_RCC_PLLSAIM_DIV_18 + * @arg @ref LL_RCC_PLLSAIM_DIV_19 + * @arg @ref LL_RCC_PLLSAIM_DIV_20 + * @arg @ref LL_RCC_PLLSAIM_DIV_21 + * @arg @ref LL_RCC_PLLSAIM_DIV_22 + * @arg @ref LL_RCC_PLLSAIM_DIV_23 + * @arg @ref LL_RCC_PLLSAIM_DIV_24 + * @arg @ref LL_RCC_PLLSAIM_DIV_25 + * @arg @ref LL_RCC_PLLSAIM_DIV_26 + * @arg @ref LL_RCC_PLLSAIM_DIV_27 + * @arg @ref LL_RCC_PLLSAIM_DIV_28 + * @arg @ref LL_RCC_PLLSAIM_DIV_29 + * @arg @ref LL_RCC_PLLSAIM_DIV_30 + * @arg @ref LL_RCC_PLLSAIM_DIV_31 + * @arg @ref LL_RCC_PLLSAIM_DIV_32 + * @arg @ref LL_RCC_PLLSAIM_DIV_33 + * @arg @ref LL_RCC_PLLSAIM_DIV_34 + * @arg @ref LL_RCC_PLLSAIM_DIV_35 + * @arg @ref LL_RCC_PLLSAIM_DIV_36 + * @arg @ref LL_RCC_PLLSAIM_DIV_37 + * @arg @ref LL_RCC_PLLSAIM_DIV_38 + * @arg @ref LL_RCC_PLLSAIM_DIV_39 + * @arg @ref LL_RCC_PLLSAIM_DIV_40 + * @arg @ref LL_RCC_PLLSAIM_DIV_41 + * @arg @ref LL_RCC_PLLSAIM_DIV_42 + * @arg @ref LL_RCC_PLLSAIM_DIV_43 + * @arg @ref LL_RCC_PLLSAIM_DIV_44 + * @arg @ref LL_RCC_PLLSAIM_DIV_45 + * @arg @ref LL_RCC_PLLSAIM_DIV_46 + * @arg @ref LL_RCC_PLLSAIM_DIV_47 + * @arg @ref LL_RCC_PLLSAIM_DIV_48 + * @arg @ref LL_RCC_PLLSAIM_DIV_49 + * @arg @ref LL_RCC_PLLSAIM_DIV_50 + * @arg @ref LL_RCC_PLLSAIM_DIV_51 + * @arg @ref LL_RCC_PLLSAIM_DIV_52 + * @arg @ref LL_RCC_PLLSAIM_DIV_53 + * @arg @ref LL_RCC_PLLSAIM_DIV_54 + * @arg @ref LL_RCC_PLLSAIM_DIV_55 + * @arg @ref LL_RCC_PLLSAIM_DIV_56 + * @arg @ref LL_RCC_PLLSAIM_DIV_57 + * @arg @ref LL_RCC_PLLSAIM_DIV_58 + * @arg @ref LL_RCC_PLLSAIM_DIV_59 + * @arg @ref LL_RCC_PLLSAIM_DIV_60 + * @arg @ref LL_RCC_PLLSAIM_DIV_61 + * @arg @ref LL_RCC_PLLSAIM_DIV_62 + * @arg @ref LL_RCC_PLLSAIM_DIV_63 + * @param PLLN Between 50 and 432 + * @param PLLP This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSAIP_DIV_2 + * @arg @ref LL_RCC_PLLSAIP_DIV_4 + * @arg @ref LL_RCC_PLLSAIP_DIV_6 + * @arg @ref LL_RCC_PLLSAIP_DIV_8 + * @retval None + */ +__STATIC_INLINE void LL_RCC_PLLSAI_ConfigDomain_48M(uint32_t Source, uint32_t PLLM, uint32_t PLLN, uint32_t PLLP) +{ + MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLSRC, Source); +#if defined(RCC_PLLSAICFGR_PLLSAIM) + MODIFY_REG(RCC->PLLSAICFGR, RCC_PLLSAICFGR_PLLSAIM, PLLM); +#else + MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLM, PLLM); +#endif /* RCC_PLLSAICFGR_PLLSAIM */ + MODIFY_REG(RCC->PLLSAICFGR, RCC_PLLSAICFGR_PLLSAIN | RCC_PLLSAICFGR_PLLSAIP, PLLN << RCC_PLLSAICFGR_PLLSAIN_Pos | PLLP); +} +#endif /* RCC_PLLSAICFGR_PLLSAIP */ + +#if defined(LTDC) +/** + * @brief Configure PLLSAI used for LTDC domain clock + * @note PLL Source and PLLM Divider can be written only when PLL, + * PLLI2S and PLLSAI(*) are disabled + * @note PLLN/PLLR can be written only when PLLSAI is disabled + * @note This can be selected for LTDC + * @rmtoll PLLCFGR PLLSRC LL_RCC_PLLSAI_ConfigDomain_LTDC\n + * PLLCFGR PLLM LL_RCC_PLLSAI_ConfigDomain_LTDC\n + * PLLSAICFGR PLLSAIN LL_RCC_PLLSAI_ConfigDomain_LTDC\n + * PLLSAICFGR PLLSAIR LL_RCC_PLLSAI_ConfigDomain_LTDC\n + * DCKCFGR PLLSAIDIVR LL_RCC_PLLSAI_ConfigDomain_LTDC + * @param Source This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSOURCE_HSI + * @arg @ref LL_RCC_PLLSOURCE_HSE + * @param PLLM This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSAIM_DIV_2 + * @arg @ref LL_RCC_PLLSAIM_DIV_3 + * @arg @ref LL_RCC_PLLSAIM_DIV_4 + * @arg @ref LL_RCC_PLLSAIM_DIV_5 + * @arg @ref LL_RCC_PLLSAIM_DIV_6 + * @arg @ref LL_RCC_PLLSAIM_DIV_7 + * @arg @ref LL_RCC_PLLSAIM_DIV_8 + * @arg @ref LL_RCC_PLLSAIM_DIV_9 + * @arg @ref LL_RCC_PLLSAIM_DIV_10 + * @arg @ref LL_RCC_PLLSAIM_DIV_11 + * @arg @ref LL_RCC_PLLSAIM_DIV_12 + * @arg @ref LL_RCC_PLLSAIM_DIV_13 + * @arg @ref LL_RCC_PLLSAIM_DIV_14 + * @arg @ref LL_RCC_PLLSAIM_DIV_15 + * @arg @ref LL_RCC_PLLSAIM_DIV_16 + * @arg @ref LL_RCC_PLLSAIM_DIV_17 + * @arg @ref LL_RCC_PLLSAIM_DIV_18 + * @arg @ref LL_RCC_PLLSAIM_DIV_19 + * @arg @ref LL_RCC_PLLSAIM_DIV_20 + * @arg @ref LL_RCC_PLLSAIM_DIV_21 + * @arg @ref LL_RCC_PLLSAIM_DIV_22 + * @arg @ref LL_RCC_PLLSAIM_DIV_23 + * @arg @ref LL_RCC_PLLSAIM_DIV_24 + * @arg @ref LL_RCC_PLLSAIM_DIV_25 + * @arg @ref LL_RCC_PLLSAIM_DIV_26 + * @arg @ref LL_RCC_PLLSAIM_DIV_27 + * @arg @ref LL_RCC_PLLSAIM_DIV_28 + * @arg @ref LL_RCC_PLLSAIM_DIV_29 + * @arg @ref LL_RCC_PLLSAIM_DIV_30 + * @arg @ref LL_RCC_PLLSAIM_DIV_31 + * @arg @ref LL_RCC_PLLSAIM_DIV_32 + * @arg @ref LL_RCC_PLLSAIM_DIV_33 + * @arg @ref LL_RCC_PLLSAIM_DIV_34 + * @arg @ref LL_RCC_PLLSAIM_DIV_35 + * @arg @ref LL_RCC_PLLSAIM_DIV_36 + * @arg @ref LL_RCC_PLLSAIM_DIV_37 + * @arg @ref LL_RCC_PLLSAIM_DIV_38 + * @arg @ref LL_RCC_PLLSAIM_DIV_39 + * @arg @ref LL_RCC_PLLSAIM_DIV_40 + * @arg @ref LL_RCC_PLLSAIM_DIV_41 + * @arg @ref LL_RCC_PLLSAIM_DIV_42 + * @arg @ref LL_RCC_PLLSAIM_DIV_43 + * @arg @ref LL_RCC_PLLSAIM_DIV_44 + * @arg @ref LL_RCC_PLLSAIM_DIV_45 + * @arg @ref LL_RCC_PLLSAIM_DIV_46 + * @arg @ref LL_RCC_PLLSAIM_DIV_47 + * @arg @ref LL_RCC_PLLSAIM_DIV_48 + * @arg @ref LL_RCC_PLLSAIM_DIV_49 + * @arg @ref LL_RCC_PLLSAIM_DIV_50 + * @arg @ref LL_RCC_PLLSAIM_DIV_51 + * @arg @ref LL_RCC_PLLSAIM_DIV_52 + * @arg @ref LL_RCC_PLLSAIM_DIV_53 + * @arg @ref LL_RCC_PLLSAIM_DIV_54 + * @arg @ref LL_RCC_PLLSAIM_DIV_55 + * @arg @ref LL_RCC_PLLSAIM_DIV_56 + * @arg @ref LL_RCC_PLLSAIM_DIV_57 + * @arg @ref LL_RCC_PLLSAIM_DIV_58 + * @arg @ref LL_RCC_PLLSAIM_DIV_59 + * @arg @ref LL_RCC_PLLSAIM_DIV_60 + * @arg @ref LL_RCC_PLLSAIM_DIV_61 + * @arg @ref LL_RCC_PLLSAIM_DIV_62 + * @arg @ref LL_RCC_PLLSAIM_DIV_63 + * @param PLLN Between 49/50(*) and 432 + * + * (*) value not defined in all devices. + * @param PLLR This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSAIR_DIV_2 + * @arg @ref LL_RCC_PLLSAIR_DIV_3 + * @arg @ref LL_RCC_PLLSAIR_DIV_4 + * @arg @ref LL_RCC_PLLSAIR_DIV_5 + * @arg @ref LL_RCC_PLLSAIR_DIV_6 + * @arg @ref LL_RCC_PLLSAIR_DIV_7 + * @param PLLDIVR This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSAIDIVR_DIV_2 + * @arg @ref LL_RCC_PLLSAIDIVR_DIV_4 + * @arg @ref LL_RCC_PLLSAIDIVR_DIV_8 + * @arg @ref LL_RCC_PLLSAIDIVR_DIV_16 + * @retval None + */ +__STATIC_INLINE void LL_RCC_PLLSAI_ConfigDomain_LTDC(uint32_t Source, uint32_t PLLM, uint32_t PLLN, uint32_t PLLR, + uint32_t PLLDIVR) +{ + MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLSRC | RCC_PLLCFGR_PLLM, Source | PLLM); + MODIFY_REG(RCC->PLLSAICFGR, RCC_PLLSAICFGR_PLLSAIN | RCC_PLLSAICFGR_PLLSAIR, PLLN << RCC_PLLSAICFGR_PLLSAIN_Pos | PLLR); + MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_PLLSAIDIVR, PLLDIVR); +} +#endif /* LTDC */ + +/** + * @brief Get division factor for PLLSAI input clock + * @rmtoll PLLCFGR PLLM LL_RCC_PLLSAI_GetDivider\n + * PLLSAICFGR PLLSAIM LL_RCC_PLLSAI_GetDivider + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_PLLSAIM_DIV_2 + * @arg @ref LL_RCC_PLLSAIM_DIV_3 + * @arg @ref LL_RCC_PLLSAIM_DIV_4 + * @arg @ref LL_RCC_PLLSAIM_DIV_5 + * @arg @ref LL_RCC_PLLSAIM_DIV_6 + * @arg @ref LL_RCC_PLLSAIM_DIV_7 + * @arg @ref LL_RCC_PLLSAIM_DIV_8 + * @arg @ref LL_RCC_PLLSAIM_DIV_9 + * @arg @ref LL_RCC_PLLSAIM_DIV_10 + * @arg @ref LL_RCC_PLLSAIM_DIV_11 + * @arg @ref LL_RCC_PLLSAIM_DIV_12 + * @arg @ref LL_RCC_PLLSAIM_DIV_13 + * @arg @ref LL_RCC_PLLSAIM_DIV_14 + * @arg @ref LL_RCC_PLLSAIM_DIV_15 + * @arg @ref LL_RCC_PLLSAIM_DIV_16 + * @arg @ref LL_RCC_PLLSAIM_DIV_17 + * @arg @ref LL_RCC_PLLSAIM_DIV_18 + * @arg @ref LL_RCC_PLLSAIM_DIV_19 + * @arg @ref LL_RCC_PLLSAIM_DIV_20 + * @arg @ref LL_RCC_PLLSAIM_DIV_21 + * @arg @ref LL_RCC_PLLSAIM_DIV_22 + * @arg @ref LL_RCC_PLLSAIM_DIV_23 + * @arg @ref LL_RCC_PLLSAIM_DIV_24 + * @arg @ref LL_RCC_PLLSAIM_DIV_25 + * @arg @ref LL_RCC_PLLSAIM_DIV_26 + * @arg @ref LL_RCC_PLLSAIM_DIV_27 + * @arg @ref LL_RCC_PLLSAIM_DIV_28 + * @arg @ref LL_RCC_PLLSAIM_DIV_29 + * @arg @ref LL_RCC_PLLSAIM_DIV_30 + * @arg @ref LL_RCC_PLLSAIM_DIV_31 + * @arg @ref LL_RCC_PLLSAIM_DIV_32 + * @arg @ref LL_RCC_PLLSAIM_DIV_33 + * @arg @ref LL_RCC_PLLSAIM_DIV_34 + * @arg @ref LL_RCC_PLLSAIM_DIV_35 + * @arg @ref LL_RCC_PLLSAIM_DIV_36 + * @arg @ref LL_RCC_PLLSAIM_DIV_37 + * @arg @ref LL_RCC_PLLSAIM_DIV_38 + * @arg @ref LL_RCC_PLLSAIM_DIV_39 + * @arg @ref LL_RCC_PLLSAIM_DIV_40 + * @arg @ref LL_RCC_PLLSAIM_DIV_41 + * @arg @ref LL_RCC_PLLSAIM_DIV_42 + * @arg @ref LL_RCC_PLLSAIM_DIV_43 + * @arg @ref LL_RCC_PLLSAIM_DIV_44 + * @arg @ref LL_RCC_PLLSAIM_DIV_45 + * @arg @ref LL_RCC_PLLSAIM_DIV_46 + * @arg @ref LL_RCC_PLLSAIM_DIV_47 + * @arg @ref LL_RCC_PLLSAIM_DIV_48 + * @arg @ref LL_RCC_PLLSAIM_DIV_49 + * @arg @ref LL_RCC_PLLSAIM_DIV_50 + * @arg @ref LL_RCC_PLLSAIM_DIV_51 + * @arg @ref LL_RCC_PLLSAIM_DIV_52 + * @arg @ref LL_RCC_PLLSAIM_DIV_53 + * @arg @ref LL_RCC_PLLSAIM_DIV_54 + * @arg @ref LL_RCC_PLLSAIM_DIV_55 + * @arg @ref LL_RCC_PLLSAIM_DIV_56 + * @arg @ref LL_RCC_PLLSAIM_DIV_57 + * @arg @ref LL_RCC_PLLSAIM_DIV_58 + * @arg @ref LL_RCC_PLLSAIM_DIV_59 + * @arg @ref LL_RCC_PLLSAIM_DIV_60 + * @arg @ref LL_RCC_PLLSAIM_DIV_61 + * @arg @ref LL_RCC_PLLSAIM_DIV_62 + * @arg @ref LL_RCC_PLLSAIM_DIV_63 + */ +__STATIC_INLINE uint32_t LL_RCC_PLLSAI_GetDivider(void) +{ +#if defined(RCC_PLLSAICFGR_PLLSAIM) + return (uint32_t)(READ_BIT(RCC->PLLSAICFGR, RCC_PLLSAICFGR_PLLSAIM)); +#else + return (uint32_t)(READ_BIT(RCC->PLLCFGR, RCC_PLLCFGR_PLLM)); +#endif /* RCC_PLLSAICFGR_PLLSAIM */ +} + +/** + * @brief Get SAIPLL multiplication factor for VCO + * @rmtoll PLLSAICFGR PLLSAIN LL_RCC_PLLSAI_GetN + * @retval Between 49/50(*) and 432 + * + * (*) value not defined in all devices. + */ +__STATIC_INLINE uint32_t LL_RCC_PLLSAI_GetN(void) +{ + return (uint32_t)(READ_BIT(RCC->PLLSAICFGR, RCC_PLLSAICFGR_PLLSAIN) >> RCC_PLLSAICFGR_PLLSAIN_Pos); +} + +/** + * @brief Get SAIPLL division factor for PLLSAIQ + * @rmtoll PLLSAICFGR PLLSAIQ LL_RCC_PLLSAI_GetQ + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_PLLSAIQ_DIV_2 + * @arg @ref LL_RCC_PLLSAIQ_DIV_3 + * @arg @ref LL_RCC_PLLSAIQ_DIV_4 + * @arg @ref LL_RCC_PLLSAIQ_DIV_5 + * @arg @ref LL_RCC_PLLSAIQ_DIV_6 + * @arg @ref LL_RCC_PLLSAIQ_DIV_7 + * @arg @ref LL_RCC_PLLSAIQ_DIV_8 + * @arg @ref LL_RCC_PLLSAIQ_DIV_9 + * @arg @ref LL_RCC_PLLSAIQ_DIV_10 + * @arg @ref LL_RCC_PLLSAIQ_DIV_11 + * @arg @ref LL_RCC_PLLSAIQ_DIV_12 + * @arg @ref LL_RCC_PLLSAIQ_DIV_13 + * @arg @ref LL_RCC_PLLSAIQ_DIV_14 + * @arg @ref LL_RCC_PLLSAIQ_DIV_15 + */ +__STATIC_INLINE uint32_t LL_RCC_PLLSAI_GetQ(void) +{ + return (uint32_t)(READ_BIT(RCC->PLLSAICFGR, RCC_PLLSAICFGR_PLLSAIQ)); +} + +#if defined(RCC_PLLSAICFGR_PLLSAIR) +/** + * @brief Get SAIPLL division factor for PLLSAIR + * @note used for PLLSAICLK (SAI clock) + * @rmtoll PLLSAICFGR PLLSAIR LL_RCC_PLLSAI_GetR + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_PLLSAIR_DIV_2 + * @arg @ref LL_RCC_PLLSAIR_DIV_3 + * @arg @ref LL_RCC_PLLSAIR_DIV_4 + * @arg @ref LL_RCC_PLLSAIR_DIV_5 + * @arg @ref LL_RCC_PLLSAIR_DIV_6 + * @arg @ref LL_RCC_PLLSAIR_DIV_7 + */ +__STATIC_INLINE uint32_t LL_RCC_PLLSAI_GetR(void) +{ + return (uint32_t)(READ_BIT(RCC->PLLSAICFGR, RCC_PLLSAICFGR_PLLSAIR)); +} +#endif /* RCC_PLLSAICFGR_PLLSAIR */ + +#if defined(RCC_PLLSAICFGR_PLLSAIP) +/** + * @brief Get SAIPLL division factor for PLLSAIP + * @note used for PLL48MCLK (48M domain clock) + * @rmtoll PLLSAICFGR PLLSAIP LL_RCC_PLLSAI_GetP + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_PLLSAIP_DIV_2 + * @arg @ref LL_RCC_PLLSAIP_DIV_4 + * @arg @ref LL_RCC_PLLSAIP_DIV_6 + * @arg @ref LL_RCC_PLLSAIP_DIV_8 + */ +__STATIC_INLINE uint32_t LL_RCC_PLLSAI_GetP(void) +{ + return (uint32_t)(READ_BIT(RCC->PLLSAICFGR, RCC_PLLSAICFGR_PLLSAIP)); +} +#endif /* RCC_PLLSAICFGR_PLLSAIP */ + +/** + * @brief Get SAIPLL division factor for PLLSAIDIVQ + * @note used PLLSAICLK selected (SAI clock) + * @rmtoll DCKCFGR PLLSAIDIVQ LL_RCC_PLLSAI_GetDIVQ + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_1 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_2 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_3 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_4 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_5 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_6 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_7 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_8 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_9 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_10 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_11 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_12 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_13 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_14 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_15 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_16 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_17 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_18 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_19 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_20 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_21 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_22 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_23 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_24 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_25 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_26 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_27 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_28 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_29 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_30 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_31 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_32 + */ +__STATIC_INLINE uint32_t LL_RCC_PLLSAI_GetDIVQ(void) +{ + return (uint32_t)(READ_BIT(RCC->DCKCFGR, RCC_DCKCFGR_PLLSAIDIVQ)); +} + +#if defined(RCC_DCKCFGR_PLLSAIDIVR) +/** + * @brief Get SAIPLL division factor for PLLSAIDIVR + * @note used for LTDC domain clock + * @rmtoll DCKCFGR PLLSAIDIVR LL_RCC_PLLSAI_GetDIVR + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_PLLSAIDIVR_DIV_2 + * @arg @ref LL_RCC_PLLSAIDIVR_DIV_4 + * @arg @ref LL_RCC_PLLSAIDIVR_DIV_8 + * @arg @ref LL_RCC_PLLSAIDIVR_DIV_16 + */ +__STATIC_INLINE uint32_t LL_RCC_PLLSAI_GetDIVR(void) +{ + return (uint32_t)(READ_BIT(RCC->DCKCFGR, RCC_DCKCFGR_PLLSAIDIVR)); +} +#endif /* RCC_DCKCFGR_PLLSAIDIVR */ + +/** + * @} + */ +#endif /* RCC_PLLSAI_SUPPORT */ + +/** @defgroup RCC_LL_EF_FLAG_Management FLAG Management + * @{ + */ + +/** + * @brief Clear LSI ready interrupt flag + * @rmtoll CIR LSIRDYC LL_RCC_ClearFlag_LSIRDY + * @retval None + */ +__STATIC_INLINE void LL_RCC_ClearFlag_LSIRDY(void) +{ + SET_BIT(RCC->CIR, RCC_CIR_LSIRDYC); +} + +/** + * @brief Clear LSE ready interrupt flag + * @rmtoll CIR LSERDYC LL_RCC_ClearFlag_LSERDY + * @retval None + */ +__STATIC_INLINE void LL_RCC_ClearFlag_LSERDY(void) +{ + SET_BIT(RCC->CIR, RCC_CIR_LSERDYC); +} + +/** + * @brief Clear HSI ready interrupt flag + * @rmtoll CIR HSIRDYC LL_RCC_ClearFlag_HSIRDY + * @retval None + */ +__STATIC_INLINE void LL_RCC_ClearFlag_HSIRDY(void) +{ + SET_BIT(RCC->CIR, RCC_CIR_HSIRDYC); +} + +/** + * @brief Clear HSE ready interrupt flag + * @rmtoll CIR HSERDYC LL_RCC_ClearFlag_HSERDY + * @retval None + */ +__STATIC_INLINE void LL_RCC_ClearFlag_HSERDY(void) +{ + SET_BIT(RCC->CIR, RCC_CIR_HSERDYC); +} + +/** + * @brief Clear PLL ready interrupt flag + * @rmtoll CIR PLLRDYC LL_RCC_ClearFlag_PLLRDY + * @retval None + */ +__STATIC_INLINE void LL_RCC_ClearFlag_PLLRDY(void) +{ + SET_BIT(RCC->CIR, RCC_CIR_PLLRDYC); +} + +#if defined(RCC_PLLI2S_SUPPORT) +/** + * @brief Clear PLLI2S ready interrupt flag + * @rmtoll CIR PLLI2SRDYC LL_RCC_ClearFlag_PLLI2SRDY + * @retval None + */ +__STATIC_INLINE void LL_RCC_ClearFlag_PLLI2SRDY(void) +{ + SET_BIT(RCC->CIR, RCC_CIR_PLLI2SRDYC); +} + +#endif /* RCC_PLLI2S_SUPPORT */ + +#if defined(RCC_PLLSAI_SUPPORT) +/** + * @brief Clear PLLSAI ready interrupt flag + * @rmtoll CIR PLLSAIRDYC LL_RCC_ClearFlag_PLLSAIRDY + * @retval None + */ +__STATIC_INLINE void LL_RCC_ClearFlag_PLLSAIRDY(void) +{ + SET_BIT(RCC->CIR, RCC_CIR_PLLSAIRDYC); +} + +#endif /* RCC_PLLSAI_SUPPORT */ + +/** + * @brief Clear Clock security system interrupt flag + * @rmtoll CIR CSSC LL_RCC_ClearFlag_HSECSS + * @retval None + */ +__STATIC_INLINE void LL_RCC_ClearFlag_HSECSS(void) +{ + SET_BIT(RCC->CIR, RCC_CIR_CSSC); +} + +/** + * @brief Check if LSI ready interrupt occurred or not + * @rmtoll CIR LSIRDYF LL_RCC_IsActiveFlag_LSIRDY + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsActiveFlag_LSIRDY(void) +{ + return (READ_BIT(RCC->CIR, RCC_CIR_LSIRDYF) == (RCC_CIR_LSIRDYF)); +} + +/** + * @brief Check if LSE ready interrupt occurred or not + * @rmtoll CIR LSERDYF LL_RCC_IsActiveFlag_LSERDY + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsActiveFlag_LSERDY(void) +{ + return (READ_BIT(RCC->CIR, RCC_CIR_LSERDYF) == (RCC_CIR_LSERDYF)); +} + +/** + * @brief Check if HSI ready interrupt occurred or not + * @rmtoll CIR HSIRDYF LL_RCC_IsActiveFlag_HSIRDY + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsActiveFlag_HSIRDY(void) +{ + return (READ_BIT(RCC->CIR, RCC_CIR_HSIRDYF) == (RCC_CIR_HSIRDYF)); +} + +/** + * @brief Check if HSE ready interrupt occurred or not + * @rmtoll CIR HSERDYF LL_RCC_IsActiveFlag_HSERDY + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsActiveFlag_HSERDY(void) +{ + return (READ_BIT(RCC->CIR, RCC_CIR_HSERDYF) == (RCC_CIR_HSERDYF)); +} + +/** + * @brief Check if PLL ready interrupt occurred or not + * @rmtoll CIR PLLRDYF LL_RCC_IsActiveFlag_PLLRDY + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsActiveFlag_PLLRDY(void) +{ + return (READ_BIT(RCC->CIR, RCC_CIR_PLLRDYF) == (RCC_CIR_PLLRDYF)); +} + +#if defined(RCC_PLLI2S_SUPPORT) +/** + * @brief Check if PLLI2S ready interrupt occurred or not + * @rmtoll CIR PLLI2SRDYF LL_RCC_IsActiveFlag_PLLI2SRDY + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsActiveFlag_PLLI2SRDY(void) +{ + return (READ_BIT(RCC->CIR, RCC_CIR_PLLI2SRDYF) == (RCC_CIR_PLLI2SRDYF)); +} +#endif /* RCC_PLLI2S_SUPPORT */ + +#if defined(RCC_PLLSAI_SUPPORT) +/** + * @brief Check if PLLSAI ready interrupt occurred or not + * @rmtoll CIR PLLSAIRDYF LL_RCC_IsActiveFlag_PLLSAIRDY + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsActiveFlag_PLLSAIRDY(void) +{ + return (READ_BIT(RCC->CIR, RCC_CIR_PLLSAIRDYF) == (RCC_CIR_PLLSAIRDYF)); +} +#endif /* RCC_PLLSAI_SUPPORT */ + +/** + * @brief Check if Clock security system interrupt occurred or not + * @rmtoll CIR CSSF LL_RCC_IsActiveFlag_HSECSS + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsActiveFlag_HSECSS(void) +{ + return (READ_BIT(RCC->CIR, RCC_CIR_CSSF) == (RCC_CIR_CSSF)); +} + +/** + * @brief Check if RCC flag Independent Watchdog reset is set or not. + * @rmtoll CSR IWDGRSTF LL_RCC_IsActiveFlag_IWDGRST + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsActiveFlag_IWDGRST(void) +{ + return (READ_BIT(RCC->CSR, RCC_CSR_IWDGRSTF) == (RCC_CSR_IWDGRSTF)); +} + +/** + * @brief Check if RCC flag Low Power reset is set or not. + * @rmtoll CSR LPWRRSTF LL_RCC_IsActiveFlag_LPWRRST + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsActiveFlag_LPWRRST(void) +{ + return (READ_BIT(RCC->CSR, RCC_CSR_LPWRRSTF) == (RCC_CSR_LPWRRSTF)); +} + +/** + * @brief Check if RCC flag Pin reset is set or not. + * @rmtoll CSR PINRSTF LL_RCC_IsActiveFlag_PINRST + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsActiveFlag_PINRST(void) +{ + return (READ_BIT(RCC->CSR, RCC_CSR_PINRSTF) == (RCC_CSR_PINRSTF)); +} + +/** + * @brief Check if RCC flag POR/PDR reset is set or not. + * @rmtoll CSR PORRSTF LL_RCC_IsActiveFlag_PORRST + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsActiveFlag_PORRST(void) +{ + return (READ_BIT(RCC->CSR, RCC_CSR_PORRSTF) == (RCC_CSR_PORRSTF)); +} + +/** + * @brief Check if RCC flag Software reset is set or not. + * @rmtoll CSR SFTRSTF LL_RCC_IsActiveFlag_SFTRST + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsActiveFlag_SFTRST(void) +{ + return (READ_BIT(RCC->CSR, RCC_CSR_SFTRSTF) == (RCC_CSR_SFTRSTF)); +} + +/** + * @brief Check if RCC flag Window Watchdog reset is set or not. + * @rmtoll CSR WWDGRSTF LL_RCC_IsActiveFlag_WWDGRST + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsActiveFlag_WWDGRST(void) +{ + return (READ_BIT(RCC->CSR, RCC_CSR_WWDGRSTF) == (RCC_CSR_WWDGRSTF)); +} + +#if defined(RCC_CSR_BORRSTF) +/** + * @brief Check if RCC flag BOR reset is set or not. + * @rmtoll CSR BORRSTF LL_RCC_IsActiveFlag_BORRST + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsActiveFlag_BORRST(void) +{ + return (READ_BIT(RCC->CSR, RCC_CSR_BORRSTF) == (RCC_CSR_BORRSTF)); +} +#endif /* RCC_CSR_BORRSTF */ + +/** + * @brief Set RMVF bit to clear the reset flags. + * @rmtoll CSR RMVF LL_RCC_ClearResetFlags + * @retval None + */ +__STATIC_INLINE void LL_RCC_ClearResetFlags(void) +{ + SET_BIT(RCC->CSR, RCC_CSR_RMVF); +} + +/** + * @} + */ + +/** @defgroup RCC_LL_EF_IT_Management IT Management + * @{ + */ + +/** + * @brief Enable LSI ready interrupt + * @rmtoll CIR LSIRDYIE LL_RCC_EnableIT_LSIRDY + * @retval None + */ +__STATIC_INLINE void LL_RCC_EnableIT_LSIRDY(void) +{ + SET_BIT(RCC->CIR, RCC_CIR_LSIRDYIE); +} + +/** + * @brief Enable LSE ready interrupt + * @rmtoll CIR LSERDYIE LL_RCC_EnableIT_LSERDY + * @retval None + */ +__STATIC_INLINE void LL_RCC_EnableIT_LSERDY(void) +{ + SET_BIT(RCC->CIR, RCC_CIR_LSERDYIE); +} + +/** + * @brief Enable HSI ready interrupt + * @rmtoll CIR HSIRDYIE LL_RCC_EnableIT_HSIRDY + * @retval None + */ +__STATIC_INLINE void LL_RCC_EnableIT_HSIRDY(void) +{ + SET_BIT(RCC->CIR, RCC_CIR_HSIRDYIE); +} + +/** + * @brief Enable HSE ready interrupt + * @rmtoll CIR HSERDYIE LL_RCC_EnableIT_HSERDY + * @retval None + */ +__STATIC_INLINE void LL_RCC_EnableIT_HSERDY(void) +{ + SET_BIT(RCC->CIR, RCC_CIR_HSERDYIE); +} + +/** + * @brief Enable PLL ready interrupt + * @rmtoll CIR PLLRDYIE LL_RCC_EnableIT_PLLRDY + * @retval None + */ +__STATIC_INLINE void LL_RCC_EnableIT_PLLRDY(void) +{ + SET_BIT(RCC->CIR, RCC_CIR_PLLRDYIE); +} + +#if defined(RCC_PLLI2S_SUPPORT) +/** + * @brief Enable PLLI2S ready interrupt + * @rmtoll CIR PLLI2SRDYIE LL_RCC_EnableIT_PLLI2SRDY + * @retval None + */ +__STATIC_INLINE void LL_RCC_EnableIT_PLLI2SRDY(void) +{ + SET_BIT(RCC->CIR, RCC_CIR_PLLI2SRDYIE); +} +#endif /* RCC_PLLI2S_SUPPORT */ + +#if defined(RCC_PLLSAI_SUPPORT) +/** + * @brief Enable PLLSAI ready interrupt + * @rmtoll CIR PLLSAIRDYIE LL_RCC_EnableIT_PLLSAIRDY + * @retval None + */ +__STATIC_INLINE void LL_RCC_EnableIT_PLLSAIRDY(void) +{ + SET_BIT(RCC->CIR, RCC_CIR_PLLSAIRDYIE); +} +#endif /* RCC_PLLSAI_SUPPORT */ + +/** + * @brief Disable LSI ready interrupt + * @rmtoll CIR LSIRDYIE LL_RCC_DisableIT_LSIRDY + * @retval None + */ +__STATIC_INLINE void LL_RCC_DisableIT_LSIRDY(void) +{ + CLEAR_BIT(RCC->CIR, RCC_CIR_LSIRDYIE); +} + +/** + * @brief Disable LSE ready interrupt + * @rmtoll CIR LSERDYIE LL_RCC_DisableIT_LSERDY + * @retval None + */ +__STATIC_INLINE void LL_RCC_DisableIT_LSERDY(void) +{ + CLEAR_BIT(RCC->CIR, RCC_CIR_LSERDYIE); +} + +/** + * @brief Disable HSI ready interrupt + * @rmtoll CIR HSIRDYIE LL_RCC_DisableIT_HSIRDY + * @retval None + */ +__STATIC_INLINE void LL_RCC_DisableIT_HSIRDY(void) +{ + CLEAR_BIT(RCC->CIR, RCC_CIR_HSIRDYIE); +} + +/** + * @brief Disable HSE ready interrupt + * @rmtoll CIR HSERDYIE LL_RCC_DisableIT_HSERDY + * @retval None + */ +__STATIC_INLINE void LL_RCC_DisableIT_HSERDY(void) +{ + CLEAR_BIT(RCC->CIR, RCC_CIR_HSERDYIE); +} + +/** + * @brief Disable PLL ready interrupt + * @rmtoll CIR PLLRDYIE LL_RCC_DisableIT_PLLRDY + * @retval None + */ +__STATIC_INLINE void LL_RCC_DisableIT_PLLRDY(void) +{ + CLEAR_BIT(RCC->CIR, RCC_CIR_PLLRDYIE); +} + +#if defined(RCC_PLLI2S_SUPPORT) +/** + * @brief Disable PLLI2S ready interrupt + * @rmtoll CIR PLLI2SRDYIE LL_RCC_DisableIT_PLLI2SRDY + * @retval None + */ +__STATIC_INLINE void LL_RCC_DisableIT_PLLI2SRDY(void) +{ + CLEAR_BIT(RCC->CIR, RCC_CIR_PLLI2SRDYIE); +} + +#endif /* RCC_PLLI2S_SUPPORT */ + +#if defined(RCC_PLLSAI_SUPPORT) +/** + * @brief Disable PLLSAI ready interrupt + * @rmtoll CIR PLLSAIRDYIE LL_RCC_DisableIT_PLLSAIRDY + * @retval None + */ +__STATIC_INLINE void LL_RCC_DisableIT_PLLSAIRDY(void) +{ + CLEAR_BIT(RCC->CIR, RCC_CIR_PLLSAIRDYIE); +} +#endif /* RCC_PLLSAI_SUPPORT */ + +/** + * @brief Checks if LSI ready interrupt source is enabled or disabled. + * @rmtoll CIR LSIRDYIE LL_RCC_IsEnabledIT_LSIRDY + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsEnabledIT_LSIRDY(void) +{ + return (READ_BIT(RCC->CIR, RCC_CIR_LSIRDYIE) == (RCC_CIR_LSIRDYIE)); +} + +/** + * @brief Checks if LSE ready interrupt source is enabled or disabled. + * @rmtoll CIR LSERDYIE LL_RCC_IsEnabledIT_LSERDY + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsEnabledIT_LSERDY(void) +{ + return (READ_BIT(RCC->CIR, RCC_CIR_LSERDYIE) == (RCC_CIR_LSERDYIE)); +} + +/** + * @brief Checks if HSI ready interrupt source is enabled or disabled. + * @rmtoll CIR HSIRDYIE LL_RCC_IsEnabledIT_HSIRDY + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsEnabledIT_HSIRDY(void) +{ + return (READ_BIT(RCC->CIR, RCC_CIR_HSIRDYIE) == (RCC_CIR_HSIRDYIE)); +} + +/** + * @brief Checks if HSE ready interrupt source is enabled or disabled. + * @rmtoll CIR HSERDYIE LL_RCC_IsEnabledIT_HSERDY + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsEnabledIT_HSERDY(void) +{ + return (READ_BIT(RCC->CIR, RCC_CIR_HSERDYIE) == (RCC_CIR_HSERDYIE)); +} + +/** + * @brief Checks if PLL ready interrupt source is enabled or disabled. + * @rmtoll CIR PLLRDYIE LL_RCC_IsEnabledIT_PLLRDY + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsEnabledIT_PLLRDY(void) +{ + return (READ_BIT(RCC->CIR, RCC_CIR_PLLRDYIE) == (RCC_CIR_PLLRDYIE)); +} + +#if defined(RCC_PLLI2S_SUPPORT) +/** + * @brief Checks if PLLI2S ready interrupt source is enabled or disabled. + * @rmtoll CIR PLLI2SRDYIE LL_RCC_IsEnabledIT_PLLI2SRDY + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsEnabledIT_PLLI2SRDY(void) +{ + return (READ_BIT(RCC->CIR, RCC_CIR_PLLI2SRDYIE) == (RCC_CIR_PLLI2SRDYIE)); +} + +#endif /* RCC_PLLI2S_SUPPORT */ + +#if defined(RCC_PLLSAI_SUPPORT) +/** + * @brief Checks if PLLSAI ready interrupt source is enabled or disabled. + * @rmtoll CIR PLLSAIRDYIE LL_RCC_IsEnabledIT_PLLSAIRDY + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsEnabledIT_PLLSAIRDY(void) +{ + return (READ_BIT(RCC->CIR, RCC_CIR_PLLSAIRDYIE) == (RCC_CIR_PLLSAIRDYIE)); +} +#endif /* RCC_PLLSAI_SUPPORT */ + +/** + * @} + */ + +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup RCC_LL_EF_Init De-initialization function + * @{ + */ +ErrorStatus LL_RCC_DeInit(void); +/** + * @} + */ + +/** @defgroup RCC_LL_EF_Get_Freq Get system and peripherals clocks frequency functions + * @{ + */ +void LL_RCC_GetSystemClocksFreq(LL_RCC_ClocksTypeDef *RCC_Clocks); +#if defined(FMPI2C1) +uint32_t LL_RCC_GetFMPI2CClockFreq(uint32_t FMPI2CxSource); +#endif /* FMPI2C1 */ +#if defined(LPTIM1) +uint32_t LL_RCC_GetLPTIMClockFreq(uint32_t LPTIMxSource); +#endif /* LPTIM1 */ +#if defined(SAI1) +uint32_t LL_RCC_GetSAIClockFreq(uint32_t SAIxSource); +#endif /* SAI1 */ +#if defined(SDIO) +uint32_t LL_RCC_GetSDIOClockFreq(uint32_t SDIOxSource); +#endif /* SDIO */ +#if defined(RNG) +uint32_t LL_RCC_GetRNGClockFreq(uint32_t RNGxSource); +#endif /* RNG */ +#if defined(USB_OTG_FS) || defined(USB_OTG_HS) +uint32_t LL_RCC_GetUSBClockFreq(uint32_t USBxSource); +#endif /* USB_OTG_FS || USB_OTG_HS */ +#if defined(DFSDM1_Channel0) +uint32_t LL_RCC_GetDFSDMClockFreq(uint32_t DFSDMxSource); +uint32_t LL_RCC_GetDFSDMAudioClockFreq(uint32_t DFSDMxSource); +#endif /* DFSDM1_Channel0 */ +uint32_t LL_RCC_GetI2SClockFreq(uint32_t I2SxSource); +#if defined(CEC) +uint32_t LL_RCC_GetCECClockFreq(uint32_t CECxSource); +#endif /* CEC */ +#if defined(LTDC) +uint32_t LL_RCC_GetLTDCClockFreq(uint32_t LTDCxSource); +#endif /* LTDC */ +#if defined(SPDIFRX) +uint32_t LL_RCC_GetSPDIFRXClockFreq(uint32_t SPDIFRXxSource); +#endif /* SPDIFRX */ +#if defined(DSI) +uint32_t LL_RCC_GetDSIClockFreq(uint32_t DSIxSource); +#endif /* DSI */ +/** + * @} + */ +#endif /* USE_FULL_LL_DRIVER */ + +/** + * @} + */ + +/** + * @} + */ + +#endif /* defined(RCC) */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F4xx_LL_RCC_H */ + diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_spi.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_spi.h new file mode 100644 index 0000000..371383e --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_spi.h @@ -0,0 +1,2027 @@ +/** + ****************************************************************************** + * @file stm32f4xx_ll_spi.h + * @author MCD Application Team + * @brief Header file of SPI LL module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32F4xx_LL_SPI_H +#define STM32F4xx_LL_SPI_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx.h" + +/** @addtogroup STM32F4xx_LL_Driver + * @{ + */ + +#if defined (SPI1) || defined (SPI2) || defined (SPI3) || defined (SPI4) || defined (SPI5) || defined(SPI6) + +/** @defgroup SPI_LL SPI + * @{ + */ + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private macros ------------------------------------------------------------*/ + +/* Exported types ------------------------------------------------------------*/ +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup SPI_LL_ES_INIT SPI Exported Init structure + * @{ + */ + +/** + * @brief SPI Init structures definition + */ +typedef struct +{ + uint32_t TransferDirection; /*!< Specifies the SPI unidirectional or bidirectional data mode. + This parameter can be a value of @ref SPI_LL_EC_TRANSFER_MODE. + + This feature can be modified afterwards using unitary function @ref LL_SPI_SetTransferDirection().*/ + + uint32_t Mode; /*!< Specifies the SPI mode (Master/Slave). + This parameter can be a value of @ref SPI_LL_EC_MODE. + + This feature can be modified afterwards using unitary function @ref LL_SPI_SetMode().*/ + + uint32_t DataWidth; /*!< Specifies the SPI data width. + This parameter can be a value of @ref SPI_LL_EC_DATAWIDTH. + + This feature can be modified afterwards using unitary function @ref LL_SPI_SetDataWidth().*/ + + uint32_t ClockPolarity; /*!< Specifies the serial clock steady state. + This parameter can be a value of @ref SPI_LL_EC_POLARITY. + + This feature can be modified afterwards using unitary function @ref LL_SPI_SetClockPolarity().*/ + + uint32_t ClockPhase; /*!< Specifies the clock active edge for the bit capture. + This parameter can be a value of @ref SPI_LL_EC_PHASE. + + This feature can be modified afterwards using unitary function @ref LL_SPI_SetClockPhase().*/ + + uint32_t NSS; /*!< Specifies whether the NSS signal is managed by hardware (NSS pin) or by software using the SSI bit. + This parameter can be a value of @ref SPI_LL_EC_NSS_MODE. + + This feature can be modified afterwards using unitary function @ref LL_SPI_SetNSSMode().*/ + + uint32_t BaudRate; /*!< Specifies the BaudRate prescaler value which will be used to configure the transmit and receive SCK clock. + This parameter can be a value of @ref SPI_LL_EC_BAUDRATEPRESCALER. + @note The communication clock is derived from the master clock. The slave clock does not need to be set. + + This feature can be modified afterwards using unitary function @ref LL_SPI_SetBaudRatePrescaler().*/ + + uint32_t BitOrder; /*!< Specifies whether data transfers start from MSB or LSB bit. + This parameter can be a value of @ref SPI_LL_EC_BIT_ORDER. + + This feature can be modified afterwards using unitary function @ref LL_SPI_SetTransferBitOrder().*/ + + uint32_t CRCCalculation; /*!< Specifies if the CRC calculation is enabled or not. + This parameter can be a value of @ref SPI_LL_EC_CRC_CALCULATION. + + This feature can be modified afterwards using unitary functions @ref LL_SPI_EnableCRC() and @ref LL_SPI_DisableCRC().*/ + + uint32_t CRCPoly; /*!< Specifies the polynomial used for the CRC calculation. + This parameter must be a number between Min_Data = 0x00 and Max_Data = 0xFFFF. + + This feature can be modified afterwards using unitary function @ref LL_SPI_SetCRCPolynomial().*/ + +} LL_SPI_InitTypeDef; + +/** + * @} + */ +#endif /* USE_FULL_LL_DRIVER */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup SPI_LL_Exported_Constants SPI Exported Constants + * @{ + */ + +/** @defgroup SPI_LL_EC_GET_FLAG Get Flags Defines + * @brief Flags defines which can be used with LL_SPI_ReadReg function + * @{ + */ +#define LL_SPI_SR_RXNE SPI_SR_RXNE /*!< Rx buffer not empty flag */ +#define LL_SPI_SR_TXE SPI_SR_TXE /*!< Tx buffer empty flag */ +#define LL_SPI_SR_BSY SPI_SR_BSY /*!< Busy flag */ +#define LL_SPI_SR_CRCERR SPI_SR_CRCERR /*!< CRC error flag */ +#define LL_SPI_SR_MODF SPI_SR_MODF /*!< Mode fault flag */ +#define LL_SPI_SR_OVR SPI_SR_OVR /*!< Overrun flag */ +#define LL_SPI_SR_FRE SPI_SR_FRE /*!< TI mode frame format error flag */ +/** + * @} + */ + +/** @defgroup SPI_LL_EC_IT IT Defines + * @brief IT defines which can be used with LL_SPI_ReadReg and LL_SPI_WriteReg functions + * @{ + */ +#define LL_SPI_CR2_RXNEIE SPI_CR2_RXNEIE /*!< Rx buffer not empty interrupt enable */ +#define LL_SPI_CR2_TXEIE SPI_CR2_TXEIE /*!< Tx buffer empty interrupt enable */ +#define LL_SPI_CR2_ERRIE SPI_CR2_ERRIE /*!< Error interrupt enable */ +/** + * @} + */ + +/** @defgroup SPI_LL_EC_MODE Operation Mode + * @{ + */ +#define LL_SPI_MODE_MASTER (SPI_CR1_MSTR | SPI_CR1_SSI) /*!< Master configuration */ +#define LL_SPI_MODE_SLAVE 0x00000000U /*!< Slave configuration */ +/** + * @} + */ + +/** @defgroup SPI_LL_EC_PROTOCOL Serial Protocol + * @{ + */ +#define LL_SPI_PROTOCOL_MOTOROLA 0x00000000U /*!< Motorola mode. Used as default value */ +#define LL_SPI_PROTOCOL_TI (SPI_CR2_FRF) /*!< TI mode */ +/** + * @} + */ + +/** @defgroup SPI_LL_EC_PHASE Clock Phase + * @{ + */ +#define LL_SPI_PHASE_1EDGE 0x00000000U /*!< First clock transition is the first data capture edge */ +#define LL_SPI_PHASE_2EDGE (SPI_CR1_CPHA) /*!< Second clock transition is the first data capture edge */ +/** + * @} + */ + +/** @defgroup SPI_LL_EC_POLARITY Clock Polarity + * @{ + */ +#define LL_SPI_POLARITY_LOW 0x00000000U /*!< Clock to 0 when idle */ +#define LL_SPI_POLARITY_HIGH (SPI_CR1_CPOL) /*!< Clock to 1 when idle */ +/** + * @} + */ + +/** @defgroup SPI_LL_EC_BAUDRATEPRESCALER Baud Rate Prescaler + * @{ + */ +#define LL_SPI_BAUDRATEPRESCALER_DIV2 0x00000000U /*!< BaudRate control equal to fPCLK/2 */ +#define LL_SPI_BAUDRATEPRESCALER_DIV4 (SPI_CR1_BR_0) /*!< BaudRate control equal to fPCLK/4 */ +#define LL_SPI_BAUDRATEPRESCALER_DIV8 (SPI_CR1_BR_1) /*!< BaudRate control equal to fPCLK/8 */ +#define LL_SPI_BAUDRATEPRESCALER_DIV16 (SPI_CR1_BR_1 | SPI_CR1_BR_0) /*!< BaudRate control equal to fPCLK/16 */ +#define LL_SPI_BAUDRATEPRESCALER_DIV32 (SPI_CR1_BR_2) /*!< BaudRate control equal to fPCLK/32 */ +#define LL_SPI_BAUDRATEPRESCALER_DIV64 (SPI_CR1_BR_2 | SPI_CR1_BR_0) /*!< BaudRate control equal to fPCLK/64 */ +#define LL_SPI_BAUDRATEPRESCALER_DIV128 (SPI_CR1_BR_2 | SPI_CR1_BR_1) /*!< BaudRate control equal to fPCLK/128 */ +#define LL_SPI_BAUDRATEPRESCALER_DIV256 (SPI_CR1_BR_2 | SPI_CR1_BR_1 | SPI_CR1_BR_0) /*!< BaudRate control equal to fPCLK/256 */ +/** + * @} + */ + +/** @defgroup SPI_LL_EC_BIT_ORDER Transmission Bit Order + * @{ + */ +#define LL_SPI_LSB_FIRST (SPI_CR1_LSBFIRST) /*!< Data is transmitted/received with the LSB first */ +#define LL_SPI_MSB_FIRST 0x00000000U /*!< Data is transmitted/received with the MSB first */ +/** + * @} + */ + +/** @defgroup SPI_LL_EC_TRANSFER_MODE Transfer Mode + * @{ + */ +#define LL_SPI_FULL_DUPLEX 0x00000000U /*!< Full-Duplex mode. Rx and Tx transfer on 2 lines */ +#define LL_SPI_SIMPLEX_RX (SPI_CR1_RXONLY) /*!< Simplex Rx mode. Rx transfer only on 1 line */ +#define LL_SPI_HALF_DUPLEX_RX (SPI_CR1_BIDIMODE) /*!< Half-Duplex Rx mode. Rx transfer on 1 line */ +#define LL_SPI_HALF_DUPLEX_TX (SPI_CR1_BIDIMODE | SPI_CR1_BIDIOE) /*!< Half-Duplex Tx mode. Tx transfer on 1 line */ +/** + * @} + */ + +/** @defgroup SPI_LL_EC_NSS_MODE Slave Select Pin Mode + * @{ + */ +#define LL_SPI_NSS_SOFT (SPI_CR1_SSM) /*!< NSS managed internally. NSS pin not used and free */ +#define LL_SPI_NSS_HARD_INPUT 0x00000000U /*!< NSS pin used in Input. Only used in Master mode */ +#define LL_SPI_NSS_HARD_OUTPUT (((uint32_t)SPI_CR2_SSOE << 16U)) /*!< NSS pin used in Output. Only used in Slave mode as chip select */ +/** + * @} + */ + +/** @defgroup SPI_LL_EC_DATAWIDTH Datawidth + * @{ + */ +#define LL_SPI_DATAWIDTH_8BIT 0x00000000U /*!< Data length for SPI transfer: 8 bits */ +#define LL_SPI_DATAWIDTH_16BIT (SPI_CR1_DFF) /*!< Data length for SPI transfer: 16 bits */ +/** + * @} + */ +#if defined(USE_FULL_LL_DRIVER) + +/** @defgroup SPI_LL_EC_CRC_CALCULATION CRC Calculation + * @{ + */ +#define LL_SPI_CRCCALCULATION_DISABLE 0x00000000U /*!< CRC calculation disabled */ +#define LL_SPI_CRCCALCULATION_ENABLE (SPI_CR1_CRCEN) /*!< CRC calculation enabled */ +/** + * @} + */ +#endif /* USE_FULL_LL_DRIVER */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup SPI_LL_Exported_Macros SPI Exported Macros + * @{ + */ + +/** @defgroup SPI_LL_EM_WRITE_READ Common Write and read registers Macros + * @{ + */ + +/** + * @brief Write a value in SPI register + * @param __INSTANCE__ SPI Instance + * @param __REG__ Register to be written + * @param __VALUE__ Value to be written in the register + * @retval None + */ +#define LL_SPI_WriteReg(__INSTANCE__, __REG__, __VALUE__) WRITE_REG(__INSTANCE__->__REG__, (__VALUE__)) + +/** + * @brief Read a value in SPI register + * @param __INSTANCE__ SPI Instance + * @param __REG__ Register to be read + * @retval Register value + */ +#define LL_SPI_ReadReg(__INSTANCE__, __REG__) READ_REG(__INSTANCE__->__REG__) +/** + * @} + */ + +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup SPI_LL_Exported_Functions SPI Exported Functions + * @{ + */ + +/** @defgroup SPI_LL_EF_Configuration Configuration + * @{ + */ + +/** + * @brief Enable SPI peripheral + * @rmtoll CR1 SPE LL_SPI_Enable + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_SPI_Enable(SPI_TypeDef *SPIx) +{ + SET_BIT(SPIx->CR1, SPI_CR1_SPE); +} + +/** + * @brief Disable SPI peripheral + * @note When disabling the SPI, follow the procedure described in the Reference Manual. + * @rmtoll CR1 SPE LL_SPI_Disable + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_SPI_Disable(SPI_TypeDef *SPIx) +{ + CLEAR_BIT(SPIx->CR1, SPI_CR1_SPE); +} + +/** + * @brief Check if SPI peripheral is enabled + * @rmtoll CR1 SPE LL_SPI_IsEnabled + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_SPI_IsEnabled(const SPI_TypeDef *SPIx) +{ + return ((READ_BIT(SPIx->CR1, SPI_CR1_SPE) == (SPI_CR1_SPE)) ? 1UL : 0UL); +} + +/** + * @brief Set SPI operation mode to Master or Slave + * @note This bit should not be changed when communication is ongoing. + * @rmtoll CR1 MSTR LL_SPI_SetMode\n + * CR1 SSI LL_SPI_SetMode + * @param SPIx SPI Instance + * @param Mode This parameter can be one of the following values: + * @arg @ref LL_SPI_MODE_MASTER + * @arg @ref LL_SPI_MODE_SLAVE + * @retval None + */ +__STATIC_INLINE void LL_SPI_SetMode(SPI_TypeDef *SPIx, uint32_t Mode) +{ + MODIFY_REG(SPIx->CR1, SPI_CR1_MSTR | SPI_CR1_SSI, Mode); +} + +/** + * @brief Get SPI operation mode (Master or Slave) + * @rmtoll CR1 MSTR LL_SPI_GetMode\n + * CR1 SSI LL_SPI_GetMode + * @param SPIx SPI Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_SPI_MODE_MASTER + * @arg @ref LL_SPI_MODE_SLAVE + */ +__STATIC_INLINE uint32_t LL_SPI_GetMode(const SPI_TypeDef *SPIx) +{ + return (uint32_t)(READ_BIT(SPIx->CR1, SPI_CR1_MSTR | SPI_CR1_SSI)); +} + +/** + * @brief Set serial protocol used + * @note This bit should be written only when SPI is disabled (SPE = 0) for correct operation. + * @rmtoll CR2 FRF LL_SPI_SetStandard + * @param SPIx SPI Instance + * @param Standard This parameter can be one of the following values: + * @arg @ref LL_SPI_PROTOCOL_MOTOROLA + * @arg @ref LL_SPI_PROTOCOL_TI + * @retval None + */ +__STATIC_INLINE void LL_SPI_SetStandard(SPI_TypeDef *SPIx, uint32_t Standard) +{ + MODIFY_REG(SPIx->CR2, SPI_CR2_FRF, Standard); +} + +/** + * @brief Get serial protocol used + * @rmtoll CR2 FRF LL_SPI_GetStandard + * @param SPIx SPI Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_SPI_PROTOCOL_MOTOROLA + * @arg @ref LL_SPI_PROTOCOL_TI + */ +__STATIC_INLINE uint32_t LL_SPI_GetStandard(const SPI_TypeDef *SPIx) +{ + return (uint32_t)(READ_BIT(SPIx->CR2, SPI_CR2_FRF)); +} + +/** + * @brief Set clock phase + * @note This bit should not be changed when communication is ongoing. + * This bit is not used in SPI TI mode. + * @rmtoll CR1 CPHA LL_SPI_SetClockPhase + * @param SPIx SPI Instance + * @param ClockPhase This parameter can be one of the following values: + * @arg @ref LL_SPI_PHASE_1EDGE + * @arg @ref LL_SPI_PHASE_2EDGE + * @retval None + */ +__STATIC_INLINE void LL_SPI_SetClockPhase(SPI_TypeDef *SPIx, uint32_t ClockPhase) +{ + MODIFY_REG(SPIx->CR1, SPI_CR1_CPHA, ClockPhase); +} + +/** + * @brief Get clock phase + * @rmtoll CR1 CPHA LL_SPI_GetClockPhase + * @param SPIx SPI Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_SPI_PHASE_1EDGE + * @arg @ref LL_SPI_PHASE_2EDGE + */ +__STATIC_INLINE uint32_t LL_SPI_GetClockPhase(const SPI_TypeDef *SPIx) +{ + return (uint32_t)(READ_BIT(SPIx->CR1, SPI_CR1_CPHA)); +} + +/** + * @brief Set clock polarity + * @note This bit should not be changed when communication is ongoing. + * This bit is not used in SPI TI mode. + * @rmtoll CR1 CPOL LL_SPI_SetClockPolarity + * @param SPIx SPI Instance + * @param ClockPolarity This parameter can be one of the following values: + * @arg @ref LL_SPI_POLARITY_LOW + * @arg @ref LL_SPI_POLARITY_HIGH + * @retval None + */ +__STATIC_INLINE void LL_SPI_SetClockPolarity(SPI_TypeDef *SPIx, uint32_t ClockPolarity) +{ + MODIFY_REG(SPIx->CR1, SPI_CR1_CPOL, ClockPolarity); +} + +/** + * @brief Get clock polarity + * @rmtoll CR1 CPOL LL_SPI_GetClockPolarity + * @param SPIx SPI Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_SPI_POLARITY_LOW + * @arg @ref LL_SPI_POLARITY_HIGH + */ +__STATIC_INLINE uint32_t LL_SPI_GetClockPolarity(const SPI_TypeDef *SPIx) +{ + return (uint32_t)(READ_BIT(SPIx->CR1, SPI_CR1_CPOL)); +} + +/** + * @brief Set baud rate prescaler + * @note These bits should not be changed when communication is ongoing. SPI BaudRate = fPCLK/Prescaler. + * @rmtoll CR1 BR LL_SPI_SetBaudRatePrescaler + * @param SPIx SPI Instance + * @param BaudRate This parameter can be one of the following values: + * @arg @ref LL_SPI_BAUDRATEPRESCALER_DIV2 + * @arg @ref LL_SPI_BAUDRATEPRESCALER_DIV4 + * @arg @ref LL_SPI_BAUDRATEPRESCALER_DIV8 + * @arg @ref LL_SPI_BAUDRATEPRESCALER_DIV16 + * @arg @ref LL_SPI_BAUDRATEPRESCALER_DIV32 + * @arg @ref LL_SPI_BAUDRATEPRESCALER_DIV64 + * @arg @ref LL_SPI_BAUDRATEPRESCALER_DIV128 + * @arg @ref LL_SPI_BAUDRATEPRESCALER_DIV256 + * @retval None + */ +__STATIC_INLINE void LL_SPI_SetBaudRatePrescaler(SPI_TypeDef *SPIx, uint32_t BaudRate) +{ + MODIFY_REG(SPIx->CR1, SPI_CR1_BR, BaudRate); +} + +/** + * @brief Get baud rate prescaler + * @rmtoll CR1 BR LL_SPI_GetBaudRatePrescaler + * @param SPIx SPI Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_SPI_BAUDRATEPRESCALER_DIV2 + * @arg @ref LL_SPI_BAUDRATEPRESCALER_DIV4 + * @arg @ref LL_SPI_BAUDRATEPRESCALER_DIV8 + * @arg @ref LL_SPI_BAUDRATEPRESCALER_DIV16 + * @arg @ref LL_SPI_BAUDRATEPRESCALER_DIV32 + * @arg @ref LL_SPI_BAUDRATEPRESCALER_DIV64 + * @arg @ref LL_SPI_BAUDRATEPRESCALER_DIV128 + * @arg @ref LL_SPI_BAUDRATEPRESCALER_DIV256 + */ +__STATIC_INLINE uint32_t LL_SPI_GetBaudRatePrescaler(const SPI_TypeDef *SPIx) +{ + return (uint32_t)(READ_BIT(SPIx->CR1, SPI_CR1_BR)); +} + +/** + * @brief Set transfer bit order + * @note This bit should not be changed when communication is ongoing. This bit is not used in SPI TI mode. + * @rmtoll CR1 LSBFIRST LL_SPI_SetTransferBitOrder + * @param SPIx SPI Instance + * @param BitOrder This parameter can be one of the following values: + * @arg @ref LL_SPI_LSB_FIRST + * @arg @ref LL_SPI_MSB_FIRST + * @retval None + */ +__STATIC_INLINE void LL_SPI_SetTransferBitOrder(SPI_TypeDef *SPIx, uint32_t BitOrder) +{ + MODIFY_REG(SPIx->CR1, SPI_CR1_LSBFIRST, BitOrder); +} + +/** + * @brief Get transfer bit order + * @rmtoll CR1 LSBFIRST LL_SPI_GetTransferBitOrder + * @param SPIx SPI Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_SPI_LSB_FIRST + * @arg @ref LL_SPI_MSB_FIRST + */ +__STATIC_INLINE uint32_t LL_SPI_GetTransferBitOrder(const SPI_TypeDef *SPIx) +{ + return (uint32_t)(READ_BIT(SPIx->CR1, SPI_CR1_LSBFIRST)); +} + +/** + * @brief Set transfer direction mode + * @note For Half-Duplex mode, Rx Direction is set by default. + * In master mode, the MOSI pin is used and in slave mode, the MISO pin is used for Half-Duplex. + * @rmtoll CR1 RXONLY LL_SPI_SetTransferDirection\n + * CR1 BIDIMODE LL_SPI_SetTransferDirection\n + * CR1 BIDIOE LL_SPI_SetTransferDirection + * @param SPIx SPI Instance + * @param TransferDirection This parameter can be one of the following values: + * @arg @ref LL_SPI_FULL_DUPLEX + * @arg @ref LL_SPI_SIMPLEX_RX + * @arg @ref LL_SPI_HALF_DUPLEX_RX + * @arg @ref LL_SPI_HALF_DUPLEX_TX + * @retval None + */ +__STATIC_INLINE void LL_SPI_SetTransferDirection(SPI_TypeDef *SPIx, uint32_t TransferDirection) +{ + MODIFY_REG(SPIx->CR1, SPI_CR1_RXONLY | SPI_CR1_BIDIMODE | SPI_CR1_BIDIOE, TransferDirection); +} + +/** + * @brief Get transfer direction mode + * @rmtoll CR1 RXONLY LL_SPI_GetTransferDirection\n + * CR1 BIDIMODE LL_SPI_GetTransferDirection\n + * CR1 BIDIOE LL_SPI_GetTransferDirection + * @param SPIx SPI Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_SPI_FULL_DUPLEX + * @arg @ref LL_SPI_SIMPLEX_RX + * @arg @ref LL_SPI_HALF_DUPLEX_RX + * @arg @ref LL_SPI_HALF_DUPLEX_TX + */ +__STATIC_INLINE uint32_t LL_SPI_GetTransferDirection(const SPI_TypeDef *SPIx) +{ + return (uint32_t)(READ_BIT(SPIx->CR1, SPI_CR1_RXONLY | SPI_CR1_BIDIMODE | SPI_CR1_BIDIOE)); +} + +/** + * @brief Set frame data width + * @rmtoll CR1 DFF LL_SPI_SetDataWidth + * @param SPIx SPI Instance + * @param DataWidth This parameter can be one of the following values: + * @arg @ref LL_SPI_DATAWIDTH_8BIT + * @arg @ref LL_SPI_DATAWIDTH_16BIT + * @retval None + */ +__STATIC_INLINE void LL_SPI_SetDataWidth(SPI_TypeDef *SPIx, uint32_t DataWidth) +{ + MODIFY_REG(SPIx->CR1, SPI_CR1_DFF, DataWidth); +} + +/** + * @brief Get frame data width + * @rmtoll CR1 DFF LL_SPI_GetDataWidth + * @param SPIx SPI Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_SPI_DATAWIDTH_8BIT + * @arg @ref LL_SPI_DATAWIDTH_16BIT + */ +__STATIC_INLINE uint32_t LL_SPI_GetDataWidth(const SPI_TypeDef *SPIx) +{ + return (uint32_t)(READ_BIT(SPIx->CR1, SPI_CR1_DFF)); +} + +/** + * @} + */ + +/** @defgroup SPI_LL_EF_CRC_Management CRC Management + * @{ + */ + +/** + * @brief Enable CRC + * @note This bit should be written only when SPI is disabled (SPE = 0) for correct operation. + * @rmtoll CR1 CRCEN LL_SPI_EnableCRC + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_SPI_EnableCRC(SPI_TypeDef *SPIx) +{ + SET_BIT(SPIx->CR1, SPI_CR1_CRCEN); +} + +/** + * @brief Disable CRC + * @note This bit should be written only when SPI is disabled (SPE = 0) for correct operation. + * @rmtoll CR1 CRCEN LL_SPI_DisableCRC + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_SPI_DisableCRC(SPI_TypeDef *SPIx) +{ + CLEAR_BIT(SPIx->CR1, SPI_CR1_CRCEN); +} + +/** + * @brief Check if CRC is enabled + * @note This bit should be written only when SPI is disabled (SPE = 0) for correct operation. + * @rmtoll CR1 CRCEN LL_SPI_IsEnabledCRC + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_SPI_IsEnabledCRC(const SPI_TypeDef *SPIx) +{ + return ((READ_BIT(SPIx->CR1, SPI_CR1_CRCEN) == (SPI_CR1_CRCEN)) ? 1UL : 0UL); +} + +/** + * @brief Set CRCNext to transfer CRC on the line + * @note This bit has to be written as soon as the last data is written in the SPIx_DR register. + * @rmtoll CR1 CRCNEXT LL_SPI_SetCRCNext + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_SPI_SetCRCNext(SPI_TypeDef *SPIx) +{ + SET_BIT(SPIx->CR1, SPI_CR1_CRCNEXT); +} + +/** + * @brief Set polynomial for CRC calculation + * @rmtoll CRCPR CRCPOLY LL_SPI_SetCRCPolynomial + * @param SPIx SPI Instance + * @param CRCPoly This parameter must be a number between Min_Data = 0x00 and Max_Data = 0xFFFF + * @retval None + */ +__STATIC_INLINE void LL_SPI_SetCRCPolynomial(SPI_TypeDef *SPIx, uint32_t CRCPoly) +{ + WRITE_REG(SPIx->CRCPR, (uint16_t)CRCPoly); +} + +/** + * @brief Get polynomial for CRC calculation + * @rmtoll CRCPR CRCPOLY LL_SPI_GetCRCPolynomial + * @param SPIx SPI Instance + * @retval Returned value is a number between Min_Data = 0x00 and Max_Data = 0xFFFF + */ +__STATIC_INLINE uint32_t LL_SPI_GetCRCPolynomial(const SPI_TypeDef *SPIx) +{ + return (uint32_t)(READ_REG(SPIx->CRCPR)); +} + +/** + * @brief Get Rx CRC + * @rmtoll RXCRCR RXCRC LL_SPI_GetRxCRC + * @param SPIx SPI Instance + * @retval Returned value is a number between Min_Data = 0x00 and Max_Data = 0xFFFF + */ +__STATIC_INLINE uint32_t LL_SPI_GetRxCRC(const SPI_TypeDef *SPIx) +{ + return (uint32_t)(READ_REG(SPIx->RXCRCR)); +} + +/** + * @brief Get Tx CRC + * @rmtoll TXCRCR TXCRC LL_SPI_GetTxCRC + * @param SPIx SPI Instance + * @retval Returned value is a number between Min_Data = 0x00 and Max_Data = 0xFFFF + */ +__STATIC_INLINE uint32_t LL_SPI_GetTxCRC(const SPI_TypeDef *SPIx) +{ + return (uint32_t)(READ_REG(SPIx->TXCRCR)); +} + +/** + * @} + */ + +/** @defgroup SPI_LL_EF_NSS_Management Slave Select Pin Management + * @{ + */ + +/** + * @brief Set NSS mode + * @note LL_SPI_NSS_SOFT Mode is not used in SPI TI mode. + * @rmtoll CR1 SSM LL_SPI_SetNSSMode\n + * @rmtoll CR2 SSOE LL_SPI_SetNSSMode + * @param SPIx SPI Instance + * @param NSS This parameter can be one of the following values: + * @arg @ref LL_SPI_NSS_SOFT + * @arg @ref LL_SPI_NSS_HARD_INPUT + * @arg @ref LL_SPI_NSS_HARD_OUTPUT + * @retval None + */ +__STATIC_INLINE void LL_SPI_SetNSSMode(SPI_TypeDef *SPIx, uint32_t NSS) +{ + MODIFY_REG(SPIx->CR1, SPI_CR1_SSM, NSS); + MODIFY_REG(SPIx->CR2, SPI_CR2_SSOE, ((uint32_t)(NSS >> 16U))); +} + +/** + * @brief Get NSS mode + * @rmtoll CR1 SSM LL_SPI_GetNSSMode\n + * @rmtoll CR2 SSOE LL_SPI_GetNSSMode + * @param SPIx SPI Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_SPI_NSS_SOFT + * @arg @ref LL_SPI_NSS_HARD_INPUT + * @arg @ref LL_SPI_NSS_HARD_OUTPUT + */ +__STATIC_INLINE uint32_t LL_SPI_GetNSSMode(const SPI_TypeDef *SPIx) +{ + uint32_t Ssm = (READ_BIT(SPIx->CR1, SPI_CR1_SSM)); + uint32_t Ssoe = (READ_BIT(SPIx->CR2, SPI_CR2_SSOE) << 16U); + return (Ssm | Ssoe); +} + +/** + * @} + */ + +/** @defgroup SPI_LL_EF_FLAG_Management FLAG Management + * @{ + */ + +/** + * @brief Check if Rx buffer is not empty + * @rmtoll SR RXNE LL_SPI_IsActiveFlag_RXNE + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_SPI_IsActiveFlag_RXNE(const SPI_TypeDef *SPIx) +{ + return ((READ_BIT(SPIx->SR, SPI_SR_RXNE) == (SPI_SR_RXNE)) ? 1UL : 0UL); +} + +/** + * @brief Check if Tx buffer is empty + * @rmtoll SR TXE LL_SPI_IsActiveFlag_TXE + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_SPI_IsActiveFlag_TXE(const SPI_TypeDef *SPIx) +{ + return ((READ_BIT(SPIx->SR, SPI_SR_TXE) == (SPI_SR_TXE)) ? 1UL : 0UL); +} + +/** + * @brief Get CRC error flag + * @rmtoll SR CRCERR LL_SPI_IsActiveFlag_CRCERR + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_SPI_IsActiveFlag_CRCERR(const SPI_TypeDef *SPIx) +{ + return ((READ_BIT(SPIx->SR, SPI_SR_CRCERR) == (SPI_SR_CRCERR)) ? 1UL : 0UL); +} + +/** + * @brief Get mode fault error flag + * @rmtoll SR MODF LL_SPI_IsActiveFlag_MODF + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_SPI_IsActiveFlag_MODF(const SPI_TypeDef *SPIx) +{ + return ((READ_BIT(SPIx->SR, SPI_SR_MODF) == (SPI_SR_MODF)) ? 1UL : 0UL); +} + +/** + * @brief Get overrun error flag + * @rmtoll SR OVR LL_SPI_IsActiveFlag_OVR + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_SPI_IsActiveFlag_OVR(const SPI_TypeDef *SPIx) +{ + return ((READ_BIT(SPIx->SR, SPI_SR_OVR) == (SPI_SR_OVR)) ? 1UL : 0UL); +} + +/** + * @brief Get busy flag + * @note The BSY flag is cleared under any one of the following conditions: + * -When the SPI is correctly disabled + * -When a fault is detected in Master mode (MODF bit set to 1) + * -In Master mode, when it finishes a data transmission and no new data is ready to be + * sent + * -In Slave mode, when the BSY flag is set to '0' for at least one SPI clock cycle between + * each data transfer. + * @rmtoll SR BSY LL_SPI_IsActiveFlag_BSY + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_SPI_IsActiveFlag_BSY(const SPI_TypeDef *SPIx) +{ + return ((READ_BIT(SPIx->SR, SPI_SR_BSY) == (SPI_SR_BSY)) ? 1UL : 0UL); +} + +/** + * @brief Get frame format error flag + * @rmtoll SR FRE LL_SPI_IsActiveFlag_FRE + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_SPI_IsActiveFlag_FRE(const SPI_TypeDef *SPIx) +{ + return ((READ_BIT(SPIx->SR, SPI_SR_FRE) == (SPI_SR_FRE)) ? 1UL : 0UL); +} + +/** + * @brief Clear CRC error flag + * @rmtoll SR CRCERR LL_SPI_ClearFlag_CRCERR + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_SPI_ClearFlag_CRCERR(SPI_TypeDef *SPIx) +{ + CLEAR_BIT(SPIx->SR, SPI_SR_CRCERR); +} + +/** + * @brief Clear mode fault error flag + * @note Clearing this flag is done by a read access to the SPIx_SR + * register followed by a write access to the SPIx_CR1 register + * @rmtoll SR MODF LL_SPI_ClearFlag_MODF + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_SPI_ClearFlag_MODF(SPI_TypeDef *SPIx) +{ + __IO uint32_t tmpreg_sr; + tmpreg_sr = SPIx->SR; + (void) tmpreg_sr; + CLEAR_BIT(SPIx->CR1, SPI_CR1_SPE); +} + +/** + * @brief Clear overrun error flag + * @note Clearing this flag is done by a read access to the SPIx_DR + * register followed by a read access to the SPIx_SR register + * @rmtoll SR OVR LL_SPI_ClearFlag_OVR + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_SPI_ClearFlag_OVR(SPI_TypeDef *SPIx) +{ + __IO uint32_t tmpreg; + tmpreg = SPIx->DR; + (void) tmpreg; + tmpreg = SPIx->SR; + (void) tmpreg; +} + +/** + * @brief Clear frame format error flag + * @note Clearing this flag is done by reading SPIx_SR register + * @rmtoll SR FRE LL_SPI_ClearFlag_FRE + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_SPI_ClearFlag_FRE(SPI_TypeDef *SPIx) +{ + __IO uint32_t tmpreg; + tmpreg = SPIx->SR; + (void) tmpreg; +} + +/** + * @} + */ + +/** @defgroup SPI_LL_EF_IT_Management Interrupt Management + * @{ + */ + +/** + * @brief Enable error interrupt + * @note This bit controls the generation of an interrupt when an error condition occurs (CRCERR, OVR, MODF in SPI mode, FRE at TI mode). + * @rmtoll CR2 ERRIE LL_SPI_EnableIT_ERR + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_SPI_EnableIT_ERR(SPI_TypeDef *SPIx) +{ + SET_BIT(SPIx->CR2, SPI_CR2_ERRIE); +} + +/** + * @brief Enable Rx buffer not empty interrupt + * @rmtoll CR2 RXNEIE LL_SPI_EnableIT_RXNE + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_SPI_EnableIT_RXNE(SPI_TypeDef *SPIx) +{ + SET_BIT(SPIx->CR2, SPI_CR2_RXNEIE); +} + +/** + * @brief Enable Tx buffer empty interrupt + * @rmtoll CR2 TXEIE LL_SPI_EnableIT_TXE + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_SPI_EnableIT_TXE(SPI_TypeDef *SPIx) +{ + SET_BIT(SPIx->CR2, SPI_CR2_TXEIE); +} + +/** + * @brief Disable error interrupt + * @note This bit controls the generation of an interrupt when an error condition occurs (CRCERR, OVR, MODF in SPI mode, FRE at TI mode). + * @rmtoll CR2 ERRIE LL_SPI_DisableIT_ERR + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_SPI_DisableIT_ERR(SPI_TypeDef *SPIx) +{ + CLEAR_BIT(SPIx->CR2, SPI_CR2_ERRIE); +} + +/** + * @brief Disable Rx buffer not empty interrupt + * @rmtoll CR2 RXNEIE LL_SPI_DisableIT_RXNE + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_SPI_DisableIT_RXNE(SPI_TypeDef *SPIx) +{ + CLEAR_BIT(SPIx->CR2, SPI_CR2_RXNEIE); +} + +/** + * @brief Disable Tx buffer empty interrupt + * @rmtoll CR2 TXEIE LL_SPI_DisableIT_TXE + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_SPI_DisableIT_TXE(SPI_TypeDef *SPIx) +{ + CLEAR_BIT(SPIx->CR2, SPI_CR2_TXEIE); +} + +/** + * @brief Check if error interrupt is enabled + * @rmtoll CR2 ERRIE LL_SPI_IsEnabledIT_ERR + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_SPI_IsEnabledIT_ERR(const SPI_TypeDef *SPIx) +{ + return ((READ_BIT(SPIx->CR2, SPI_CR2_ERRIE) == (SPI_CR2_ERRIE)) ? 1UL : 0UL); +} + +/** + * @brief Check if Rx buffer not empty interrupt is enabled + * @rmtoll CR2 RXNEIE LL_SPI_IsEnabledIT_RXNE + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_SPI_IsEnabledIT_RXNE(const SPI_TypeDef *SPIx) +{ + return ((READ_BIT(SPIx->CR2, SPI_CR2_RXNEIE) == (SPI_CR2_RXNEIE)) ? 1UL : 0UL); +} + +/** + * @brief Check if Tx buffer empty interrupt + * @rmtoll CR2 TXEIE LL_SPI_IsEnabledIT_TXE + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_SPI_IsEnabledIT_TXE(const SPI_TypeDef *SPIx) +{ + return ((READ_BIT(SPIx->CR2, SPI_CR2_TXEIE) == (SPI_CR2_TXEIE)) ? 1UL : 0UL); +} + +/** + * @} + */ + +/** @defgroup SPI_LL_EF_DMA_Management DMA Management + * @{ + */ + +/** + * @brief Enable DMA Rx + * @rmtoll CR2 RXDMAEN LL_SPI_EnableDMAReq_RX + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_SPI_EnableDMAReq_RX(SPI_TypeDef *SPIx) +{ + SET_BIT(SPIx->CR2, SPI_CR2_RXDMAEN); +} + +/** + * @brief Disable DMA Rx + * @rmtoll CR2 RXDMAEN LL_SPI_DisableDMAReq_RX + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_SPI_DisableDMAReq_RX(SPI_TypeDef *SPIx) +{ + CLEAR_BIT(SPIx->CR2, SPI_CR2_RXDMAEN); +} + +/** + * @brief Check if DMA Rx is enabled + * @rmtoll CR2 RXDMAEN LL_SPI_IsEnabledDMAReq_RX + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_SPI_IsEnabledDMAReq_RX(const SPI_TypeDef *SPIx) +{ + return ((READ_BIT(SPIx->CR2, SPI_CR2_RXDMAEN) == (SPI_CR2_RXDMAEN)) ? 1UL : 0UL); +} + +/** + * @brief Enable DMA Tx + * @rmtoll CR2 TXDMAEN LL_SPI_EnableDMAReq_TX + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_SPI_EnableDMAReq_TX(SPI_TypeDef *SPIx) +{ + SET_BIT(SPIx->CR2, SPI_CR2_TXDMAEN); +} + +/** + * @brief Disable DMA Tx + * @rmtoll CR2 TXDMAEN LL_SPI_DisableDMAReq_TX + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_SPI_DisableDMAReq_TX(SPI_TypeDef *SPIx) +{ + CLEAR_BIT(SPIx->CR2, SPI_CR2_TXDMAEN); +} + +/** + * @brief Check if DMA Tx is enabled + * @rmtoll CR2 TXDMAEN LL_SPI_IsEnabledDMAReq_TX + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_SPI_IsEnabledDMAReq_TX(const SPI_TypeDef *SPIx) +{ + return ((READ_BIT(SPIx->CR2, SPI_CR2_TXDMAEN) == (SPI_CR2_TXDMAEN)) ? 1UL : 0UL); +} + +/** + * @brief Get the data register address used for DMA transfer + * @rmtoll DR DR LL_SPI_DMA_GetRegAddr + * @param SPIx SPI Instance + * @retval Address of data register + */ +__STATIC_INLINE uint32_t LL_SPI_DMA_GetRegAddr(const SPI_TypeDef *SPIx) +{ + return (uint32_t) &(SPIx->DR); +} + +/** + * @} + */ + +/** @defgroup SPI_LL_EF_DATA_Management DATA Management + * @{ + */ + +/** + * @brief Read 8-Bits in the data register + * @rmtoll DR DR LL_SPI_ReceiveData8 + * @param SPIx SPI Instance + * @retval RxData Value between Min_Data=0x00 and Max_Data=0xFF + */ +__STATIC_INLINE uint8_t LL_SPI_ReceiveData8(SPI_TypeDef *SPIx) +{ + return (*((__IO uint8_t *)&SPIx->DR)); +} + +/** + * @brief Read 16-Bits in the data register + * @rmtoll DR DR LL_SPI_ReceiveData16 + * @param SPIx SPI Instance + * @retval RxData Value between Min_Data=0x00 and Max_Data=0xFFFF + */ +__STATIC_INLINE uint16_t LL_SPI_ReceiveData16(SPI_TypeDef *SPIx) +{ + return (uint16_t)(READ_REG(SPIx->DR)); +} + +/** + * @brief Write 8-Bits in the data register + * @rmtoll DR DR LL_SPI_TransmitData8 + * @param SPIx SPI Instance + * @param TxData Value between Min_Data=0x00 and Max_Data=0xFF + * @retval None + */ +__STATIC_INLINE void LL_SPI_TransmitData8(SPI_TypeDef *SPIx, uint8_t TxData) +{ +#if defined (__GNUC__) + __IO uint8_t *spidr = ((__IO uint8_t *)&SPIx->DR); + *spidr = TxData; +#else + *((__IO uint8_t *)&SPIx->DR) = TxData; +#endif /* __GNUC__ */ +} + +/** + * @brief Write 16-Bits in the data register + * @rmtoll DR DR LL_SPI_TransmitData16 + * @param SPIx SPI Instance + * @param TxData Value between Min_Data=0x00 and Max_Data=0xFFFF + * @retval None + */ +__STATIC_INLINE void LL_SPI_TransmitData16(SPI_TypeDef *SPIx, uint16_t TxData) +{ +#if defined (__GNUC__) + __IO uint16_t *spidr = ((__IO uint16_t *)&SPIx->DR); + *spidr = TxData; +#else + SPIx->DR = TxData; +#endif /* __GNUC__ */ +} + +/** + * @} + */ +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup SPI_LL_EF_Init Initialization and de-initialization functions + * @{ + */ + +ErrorStatus LL_SPI_DeInit(const SPI_TypeDef *SPIx); +ErrorStatus LL_SPI_Init(SPI_TypeDef *SPIx, LL_SPI_InitTypeDef *SPI_InitStruct); +void LL_SPI_StructInit(LL_SPI_InitTypeDef *SPI_InitStruct); + +/** + * @} + */ +#endif /* USE_FULL_LL_DRIVER */ +/** + * @} + */ + +/** + * @} + */ + +/** @defgroup I2S_LL I2S + * @{ + */ + +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/* Private macros ------------------------------------------------------------*/ + +/* Exported types ------------------------------------------------------------*/ +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup I2S_LL_ES_INIT I2S Exported Init structure + * @{ + */ + +/** + * @brief I2S Init structure definition + */ + +typedef struct +{ + uint32_t Mode; /*!< Specifies the I2S operating mode. + This parameter can be a value of @ref I2S_LL_EC_MODE + + This feature can be modified afterwards using unitary function @ref LL_I2S_SetTransferMode().*/ + + uint32_t Standard; /*!< Specifies the standard used for the I2S communication. + This parameter can be a value of @ref I2S_LL_EC_STANDARD + + This feature can be modified afterwards using unitary function @ref LL_I2S_SetStandard().*/ + + + uint32_t DataFormat; /*!< Specifies the data format for the I2S communication. + This parameter can be a value of @ref I2S_LL_EC_DATA_FORMAT + + This feature can be modified afterwards using unitary function @ref LL_I2S_SetDataFormat().*/ + + + uint32_t MCLKOutput; /*!< Specifies whether the I2S MCLK output is enabled or not. + This parameter can be a value of @ref I2S_LL_EC_MCLK_OUTPUT + + This feature can be modified afterwards using unitary functions @ref LL_I2S_EnableMasterClock() or @ref LL_I2S_DisableMasterClock.*/ + + + uint32_t AudioFreq; /*!< Specifies the frequency selected for the I2S communication. + This parameter can be a value of @ref I2S_LL_EC_AUDIO_FREQ + + Audio Frequency can be modified afterwards using Reference manual formulas to calculate Prescaler Linear, Parity + and unitary functions @ref LL_I2S_SetPrescalerLinear() and @ref LL_I2S_SetPrescalerParity() to set it.*/ + + + uint32_t ClockPolarity; /*!< Specifies the idle state of the I2S clock. + This parameter can be a value of @ref I2S_LL_EC_POLARITY + + This feature can be modified afterwards using unitary function @ref LL_I2S_SetClockPolarity().*/ + +} LL_I2S_InitTypeDef; + +/** + * @} + */ +#endif /*USE_FULL_LL_DRIVER*/ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup I2S_LL_Exported_Constants I2S Exported Constants + * @{ + */ + +/** @defgroup I2S_LL_EC_GET_FLAG Get Flags Defines + * @brief Flags defines which can be used with LL_I2S_ReadReg function + * @{ + */ +#define LL_I2S_SR_RXNE LL_SPI_SR_RXNE /*!< Rx buffer not empty flag */ +#define LL_I2S_SR_TXE LL_SPI_SR_TXE /*!< Tx buffer empty flag */ +#define LL_I2S_SR_BSY LL_SPI_SR_BSY /*!< Busy flag */ +#define LL_I2S_SR_UDR SPI_SR_UDR /*!< Underrun flag */ +#define LL_I2S_SR_OVR LL_SPI_SR_OVR /*!< Overrun flag */ +#define LL_I2S_SR_FRE LL_SPI_SR_FRE /*!< TI mode frame format error flag */ +/** + * @} + */ + +/** @defgroup SPI_LL_EC_IT IT Defines + * @brief IT defines which can be used with LL_SPI_ReadReg and LL_SPI_WriteReg functions + * @{ + */ +#define LL_I2S_CR2_RXNEIE LL_SPI_CR2_RXNEIE /*!< Rx buffer not empty interrupt enable */ +#define LL_I2S_CR2_TXEIE LL_SPI_CR2_TXEIE /*!< Tx buffer empty interrupt enable */ +#define LL_I2S_CR2_ERRIE LL_SPI_CR2_ERRIE /*!< Error interrupt enable */ +/** + * @} + */ + +/** @defgroup I2S_LL_EC_DATA_FORMAT Data format + * @{ + */ +#define LL_I2S_DATAFORMAT_16B 0x00000000U /*!< Data length 16 bits, Channel length 16bit */ +#define LL_I2S_DATAFORMAT_16B_EXTENDED (SPI_I2SCFGR_CHLEN) /*!< Data length 16 bits, Channel length 32bit */ +#define LL_I2S_DATAFORMAT_24B (SPI_I2SCFGR_CHLEN | SPI_I2SCFGR_DATLEN_0) /*!< Data length 24 bits, Channel length 32bit */ +#define LL_I2S_DATAFORMAT_32B (SPI_I2SCFGR_CHLEN | SPI_I2SCFGR_DATLEN_1) /*!< Data length 16 bits, Channel length 32bit */ +/** + * @} + */ + +/** @defgroup I2S_LL_EC_POLARITY Clock Polarity + * @{ + */ +#define LL_I2S_POLARITY_LOW 0x00000000U /*!< Clock steady state is low level */ +#define LL_I2S_POLARITY_HIGH (SPI_I2SCFGR_CKPOL) /*!< Clock steady state is high level */ +/** + * @} + */ + +/** @defgroup I2S_LL_EC_STANDARD I2s Standard + * @{ + */ +#define LL_I2S_STANDARD_PHILIPS 0x00000000U /*!< I2S standard philips */ +#define LL_I2S_STANDARD_MSB (SPI_I2SCFGR_I2SSTD_0) /*!< MSB justified standard (left justified) */ +#define LL_I2S_STANDARD_LSB (SPI_I2SCFGR_I2SSTD_1) /*!< LSB justified standard (right justified) */ +#define LL_I2S_STANDARD_PCM_SHORT (SPI_I2SCFGR_I2SSTD_0 | SPI_I2SCFGR_I2SSTD_1) /*!< PCM standard, short frame synchronization */ +#define LL_I2S_STANDARD_PCM_LONG (SPI_I2SCFGR_I2SSTD_0 | SPI_I2SCFGR_I2SSTD_1 | SPI_I2SCFGR_PCMSYNC) /*!< PCM standard, long frame synchronization */ +/** + * @} + */ + +/** @defgroup I2S_LL_EC_MODE Operation Mode + * @{ + */ +#define LL_I2S_MODE_SLAVE_TX 0x00000000U /*!< Slave Tx configuration */ +#define LL_I2S_MODE_SLAVE_RX (SPI_I2SCFGR_I2SCFG_0) /*!< Slave Rx configuration */ +#define LL_I2S_MODE_MASTER_TX (SPI_I2SCFGR_I2SCFG_1) /*!< Master Tx configuration */ +#define LL_I2S_MODE_MASTER_RX (SPI_I2SCFGR_I2SCFG_0 | SPI_I2SCFGR_I2SCFG_1) /*!< Master Rx configuration */ +/** + * @} + */ + +/** @defgroup I2S_LL_EC_PRESCALER_FACTOR Prescaler Factor + * @{ + */ +#define LL_I2S_PRESCALER_PARITY_EVEN 0x00000000U /*!< Odd factor: Real divider value is = I2SDIV * 2 */ +#define LL_I2S_PRESCALER_PARITY_ODD (SPI_I2SPR_ODD >> 8U) /*!< Odd factor: Real divider value is = (I2SDIV * 2)+1 */ +/** + * @} + */ + +#if defined(USE_FULL_LL_DRIVER) + +/** @defgroup I2S_LL_EC_MCLK_OUTPUT MCLK Output + * @{ + */ +#define LL_I2S_MCLK_OUTPUT_DISABLE 0x00000000U /*!< Master clock output is disabled */ +#define LL_I2S_MCLK_OUTPUT_ENABLE (SPI_I2SPR_MCKOE) /*!< Master clock output is enabled */ +/** + * @} + */ + +/** @defgroup I2S_LL_EC_AUDIO_FREQ Audio Frequency + * @{ + */ + +#define LL_I2S_AUDIOFREQ_192K 192000U /*!< Audio Frequency configuration 192000 Hz */ +#define LL_I2S_AUDIOFREQ_96K 96000U /*!< Audio Frequency configuration 96000 Hz */ +#define LL_I2S_AUDIOFREQ_48K 48000U /*!< Audio Frequency configuration 48000 Hz */ +#define LL_I2S_AUDIOFREQ_44K 44100U /*!< Audio Frequency configuration 44100 Hz */ +#define LL_I2S_AUDIOFREQ_32K 32000U /*!< Audio Frequency configuration 32000 Hz */ +#define LL_I2S_AUDIOFREQ_22K 22050U /*!< Audio Frequency configuration 22050 Hz */ +#define LL_I2S_AUDIOFREQ_16K 16000U /*!< Audio Frequency configuration 16000 Hz */ +#define LL_I2S_AUDIOFREQ_11K 11025U /*!< Audio Frequency configuration 11025 Hz */ +#define LL_I2S_AUDIOFREQ_8K 8000U /*!< Audio Frequency configuration 8000 Hz */ +#define LL_I2S_AUDIOFREQ_DEFAULT 2U /*!< Audio Freq not specified. Register I2SDIV = 2 */ +/** + * @} + */ +#endif /* USE_FULL_LL_DRIVER */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup I2S_LL_Exported_Macros I2S Exported Macros + * @{ + */ + +/** @defgroup I2S_LL_EM_WRITE_READ Common Write and read registers Macros + * @{ + */ + +/** + * @brief Write a value in I2S register + * @param __INSTANCE__ I2S Instance + * @param __REG__ Register to be written + * @param __VALUE__ Value to be written in the register + * @retval None + */ +#define LL_I2S_WriteReg(__INSTANCE__, __REG__, __VALUE__) WRITE_REG(__INSTANCE__->__REG__, (__VALUE__)) + +/** + * @brief Read a value in I2S register + * @param __INSTANCE__ I2S Instance + * @param __REG__ Register to be read + * @retval Register value + */ +#define LL_I2S_ReadReg(__INSTANCE__, __REG__) READ_REG(__INSTANCE__->__REG__) +/** + * @} + */ + +/** + * @} + */ + + +/* Exported functions --------------------------------------------------------*/ + +/** @defgroup I2S_LL_Exported_Functions I2S Exported Functions + * @{ + */ + +/** @defgroup I2S_LL_EF_Configuration Configuration + * @{ + */ + +/** + * @brief Select I2S mode and Enable I2S peripheral + * @rmtoll I2SCFGR I2SMOD LL_I2S_Enable\n + * I2SCFGR I2SE LL_I2S_Enable + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_I2S_Enable(SPI_TypeDef *SPIx) +{ + SET_BIT(SPIx->I2SCFGR, SPI_I2SCFGR_I2SMOD | SPI_I2SCFGR_I2SE); +} + +/** + * @brief Disable I2S peripheral + * @rmtoll I2SCFGR I2SE LL_I2S_Disable + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_I2S_Disable(SPI_TypeDef *SPIx) +{ + CLEAR_BIT(SPIx->I2SCFGR, SPI_I2SCFGR_I2SMOD | SPI_I2SCFGR_I2SE); +} + +/** + * @brief Check if I2S peripheral is enabled + * @rmtoll I2SCFGR I2SE LL_I2S_IsEnabled + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2S_IsEnabled(const SPI_TypeDef *SPIx) +{ + return ((READ_BIT(SPIx->I2SCFGR, SPI_I2SCFGR_I2SE) == (SPI_I2SCFGR_I2SE)) ? 1UL : 0UL); +} + +/** + * @brief Set I2S data frame length + * @rmtoll I2SCFGR DATLEN LL_I2S_SetDataFormat\n + * I2SCFGR CHLEN LL_I2S_SetDataFormat + * @param SPIx SPI Instance + * @param DataFormat This parameter can be one of the following values: + * @arg @ref LL_I2S_DATAFORMAT_16B + * @arg @ref LL_I2S_DATAFORMAT_16B_EXTENDED + * @arg @ref LL_I2S_DATAFORMAT_24B + * @arg @ref LL_I2S_DATAFORMAT_32B + * @retval None + */ +__STATIC_INLINE void LL_I2S_SetDataFormat(SPI_TypeDef *SPIx, uint32_t DataFormat) +{ + MODIFY_REG(SPIx->I2SCFGR, SPI_I2SCFGR_DATLEN | SPI_I2SCFGR_CHLEN, DataFormat); +} + +/** + * @brief Get I2S data frame length + * @rmtoll I2SCFGR DATLEN LL_I2S_GetDataFormat\n + * I2SCFGR CHLEN LL_I2S_GetDataFormat + * @param SPIx SPI Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_I2S_DATAFORMAT_16B + * @arg @ref LL_I2S_DATAFORMAT_16B_EXTENDED + * @arg @ref LL_I2S_DATAFORMAT_24B + * @arg @ref LL_I2S_DATAFORMAT_32B + */ +__STATIC_INLINE uint32_t LL_I2S_GetDataFormat(const SPI_TypeDef *SPIx) +{ + return (uint32_t)(READ_BIT(SPIx->I2SCFGR, SPI_I2SCFGR_DATLEN | SPI_I2SCFGR_CHLEN)); +} + +/** + * @brief Set I2S clock polarity + * @rmtoll I2SCFGR CKPOL LL_I2S_SetClockPolarity + * @param SPIx SPI Instance + * @param ClockPolarity This parameter can be one of the following values: + * @arg @ref LL_I2S_POLARITY_LOW + * @arg @ref LL_I2S_POLARITY_HIGH + * @retval None + */ +__STATIC_INLINE void LL_I2S_SetClockPolarity(SPI_TypeDef *SPIx, uint32_t ClockPolarity) +{ + SET_BIT(SPIx->I2SCFGR, ClockPolarity); +} + +/** + * @brief Get I2S clock polarity + * @rmtoll I2SCFGR CKPOL LL_I2S_GetClockPolarity + * @param SPIx SPI Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_I2S_POLARITY_LOW + * @arg @ref LL_I2S_POLARITY_HIGH + */ +__STATIC_INLINE uint32_t LL_I2S_GetClockPolarity(const SPI_TypeDef *SPIx) +{ + return (uint32_t)(READ_BIT(SPIx->I2SCFGR, SPI_I2SCFGR_CKPOL)); +} + +/** + * @brief Set I2S standard protocol + * @rmtoll I2SCFGR I2SSTD LL_I2S_SetStandard\n + * I2SCFGR PCMSYNC LL_I2S_SetStandard + * @param SPIx SPI Instance + * @param Standard This parameter can be one of the following values: + * @arg @ref LL_I2S_STANDARD_PHILIPS + * @arg @ref LL_I2S_STANDARD_MSB + * @arg @ref LL_I2S_STANDARD_LSB + * @arg @ref LL_I2S_STANDARD_PCM_SHORT + * @arg @ref LL_I2S_STANDARD_PCM_LONG + * @retval None + */ +__STATIC_INLINE void LL_I2S_SetStandard(SPI_TypeDef *SPIx, uint32_t Standard) +{ + MODIFY_REG(SPIx->I2SCFGR, SPI_I2SCFGR_I2SSTD | SPI_I2SCFGR_PCMSYNC, Standard); +} + +/** + * @brief Get I2S standard protocol + * @rmtoll I2SCFGR I2SSTD LL_I2S_GetStandard\n + * I2SCFGR PCMSYNC LL_I2S_GetStandard + * @param SPIx SPI Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_I2S_STANDARD_PHILIPS + * @arg @ref LL_I2S_STANDARD_MSB + * @arg @ref LL_I2S_STANDARD_LSB + * @arg @ref LL_I2S_STANDARD_PCM_SHORT + * @arg @ref LL_I2S_STANDARD_PCM_LONG + */ +__STATIC_INLINE uint32_t LL_I2S_GetStandard(const SPI_TypeDef *SPIx) +{ + return (uint32_t)(READ_BIT(SPIx->I2SCFGR, SPI_I2SCFGR_I2SSTD | SPI_I2SCFGR_PCMSYNC)); +} + +/** + * @brief Set I2S transfer mode + * @rmtoll I2SCFGR I2SCFG LL_I2S_SetTransferMode + * @param SPIx SPI Instance + * @param Mode This parameter can be one of the following values: + * @arg @ref LL_I2S_MODE_SLAVE_TX + * @arg @ref LL_I2S_MODE_SLAVE_RX + * @arg @ref LL_I2S_MODE_MASTER_TX + * @arg @ref LL_I2S_MODE_MASTER_RX + * @retval None + */ +__STATIC_INLINE void LL_I2S_SetTransferMode(SPI_TypeDef *SPIx, uint32_t Mode) +{ + MODIFY_REG(SPIx->I2SCFGR, SPI_I2SCFGR_I2SCFG, Mode); +} + +/** + * @brief Get I2S transfer mode + * @rmtoll I2SCFGR I2SCFG LL_I2S_GetTransferMode + * @param SPIx SPI Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_I2S_MODE_SLAVE_TX + * @arg @ref LL_I2S_MODE_SLAVE_RX + * @arg @ref LL_I2S_MODE_MASTER_TX + * @arg @ref LL_I2S_MODE_MASTER_RX + */ +__STATIC_INLINE uint32_t LL_I2S_GetTransferMode(const SPI_TypeDef *SPIx) +{ + return (uint32_t)(READ_BIT(SPIx->I2SCFGR, SPI_I2SCFGR_I2SCFG)); +} + +/** + * @brief Set I2S linear prescaler + * @rmtoll I2SPR I2SDIV LL_I2S_SetPrescalerLinear + * @param SPIx SPI Instance + * @param PrescalerLinear Value between Min_Data=0x02 and Max_Data=0xFF + * @retval None + */ +__STATIC_INLINE void LL_I2S_SetPrescalerLinear(SPI_TypeDef *SPIx, uint8_t PrescalerLinear) +{ + MODIFY_REG(SPIx->I2SPR, SPI_I2SPR_I2SDIV, PrescalerLinear); +} + +/** + * @brief Get I2S linear prescaler + * @rmtoll I2SPR I2SDIV LL_I2S_GetPrescalerLinear + * @param SPIx SPI Instance + * @retval PrescalerLinear Value between Min_Data=0x02 and Max_Data=0xFF + */ +__STATIC_INLINE uint32_t LL_I2S_GetPrescalerLinear(const SPI_TypeDef *SPIx) +{ + return (uint32_t)(READ_BIT(SPIx->I2SPR, SPI_I2SPR_I2SDIV)); +} + +/** + * @brief Set I2S parity prescaler + * @rmtoll I2SPR ODD LL_I2S_SetPrescalerParity + * @param SPIx SPI Instance + * @param PrescalerParity This parameter can be one of the following values: + * @arg @ref LL_I2S_PRESCALER_PARITY_EVEN + * @arg @ref LL_I2S_PRESCALER_PARITY_ODD + * @retval None + */ +__STATIC_INLINE void LL_I2S_SetPrescalerParity(SPI_TypeDef *SPIx, uint32_t PrescalerParity) +{ + MODIFY_REG(SPIx->I2SPR, SPI_I2SPR_ODD, PrescalerParity << 8U); +} + +/** + * @brief Get I2S parity prescaler + * @rmtoll I2SPR ODD LL_I2S_GetPrescalerParity + * @param SPIx SPI Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_I2S_PRESCALER_PARITY_EVEN + * @arg @ref LL_I2S_PRESCALER_PARITY_ODD + */ +__STATIC_INLINE uint32_t LL_I2S_GetPrescalerParity(const SPI_TypeDef *SPIx) +{ + return (uint32_t)(READ_BIT(SPIx->I2SPR, SPI_I2SPR_ODD) >> 8U); +} + +/** + * @brief Enable the master clock output (Pin MCK) + * @rmtoll I2SPR MCKOE LL_I2S_EnableMasterClock + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_I2S_EnableMasterClock(SPI_TypeDef *SPIx) +{ + SET_BIT(SPIx->I2SPR, SPI_I2SPR_MCKOE); +} + +/** + * @brief Disable the master clock output (Pin MCK) + * @rmtoll I2SPR MCKOE LL_I2S_DisableMasterClock + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_I2S_DisableMasterClock(SPI_TypeDef *SPIx) +{ + CLEAR_BIT(SPIx->I2SPR, SPI_I2SPR_MCKOE); +} + +/** + * @brief Check if the master clock output (Pin MCK) is enabled + * @rmtoll I2SPR MCKOE LL_I2S_IsEnabledMasterClock + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2S_IsEnabledMasterClock(const SPI_TypeDef *SPIx) +{ + return ((READ_BIT(SPIx->I2SPR, SPI_I2SPR_MCKOE) == (SPI_I2SPR_MCKOE)) ? 1UL : 0UL); +} + +#if defined(SPI_I2SCFGR_ASTRTEN) +/** + * @brief Enable asynchronous start + * @rmtoll I2SCFGR ASTRTEN LL_I2S_EnableAsyncStart + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_I2S_EnableAsyncStart(SPI_TypeDef *SPIx) +{ + SET_BIT(SPIx->I2SCFGR, SPI_I2SCFGR_ASTRTEN); +} + +/** + * @brief Disable asynchronous start + * @rmtoll I2SCFGR ASTRTEN LL_I2S_DisableAsyncStart + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_I2S_DisableAsyncStart(SPI_TypeDef *SPIx) +{ + CLEAR_BIT(SPIx->I2SCFGR, SPI_I2SCFGR_ASTRTEN); +} + +/** + * @brief Check if asynchronous start is enabled + * @rmtoll I2SCFGR ASTRTEN LL_I2S_IsEnabledAsyncStart + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2S_IsEnabledAsyncStart(const SPI_TypeDef *SPIx) +{ + return ((READ_BIT(SPIx->I2SCFGR, SPI_I2SCFGR_ASTRTEN) == (SPI_I2SCFGR_ASTRTEN)) ? 1UL : 0UL); +} +#endif /* SPI_I2SCFGR_ASTRTEN */ + +/** + * @} + */ + +/** @defgroup I2S_LL_EF_FLAG FLAG Management + * @{ + */ + +/** + * @brief Check if Rx buffer is not empty + * @rmtoll SR RXNE LL_I2S_IsActiveFlag_RXNE + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2S_IsActiveFlag_RXNE(const SPI_TypeDef *SPIx) +{ + return LL_SPI_IsActiveFlag_RXNE(SPIx); +} + +/** + * @brief Check if Tx buffer is empty + * @rmtoll SR TXE LL_I2S_IsActiveFlag_TXE + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2S_IsActiveFlag_TXE(const SPI_TypeDef *SPIx) +{ + return LL_SPI_IsActiveFlag_TXE(SPIx); +} + +/** + * @brief Get busy flag + * @rmtoll SR BSY LL_I2S_IsActiveFlag_BSY + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2S_IsActiveFlag_BSY(const SPI_TypeDef *SPIx) +{ + return LL_SPI_IsActiveFlag_BSY(SPIx); +} + +/** + * @brief Get overrun error flag + * @rmtoll SR OVR LL_I2S_IsActiveFlag_OVR + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2S_IsActiveFlag_OVR(const SPI_TypeDef *SPIx) +{ + return LL_SPI_IsActiveFlag_OVR(SPIx); +} + +/** + * @brief Get underrun error flag + * @rmtoll SR UDR LL_I2S_IsActiveFlag_UDR + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2S_IsActiveFlag_UDR(const SPI_TypeDef *SPIx) +{ + return ((READ_BIT(SPIx->SR, SPI_SR_UDR) == (SPI_SR_UDR)) ? 1UL : 0UL); +} + +/** + * @brief Get frame format error flag + * @rmtoll SR FRE LL_I2S_IsActiveFlag_FRE + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2S_IsActiveFlag_FRE(const SPI_TypeDef *SPIx) +{ + return LL_SPI_IsActiveFlag_FRE(SPIx); +} + +/** + * @brief Get channel side flag. + * @note 0: Channel Left has to be transmitted or has been received\n + * 1: Channel Right has to be transmitted or has been received\n + * It has no significance in PCM mode. + * @rmtoll SR CHSIDE LL_I2S_IsActiveFlag_CHSIDE + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2S_IsActiveFlag_CHSIDE(const SPI_TypeDef *SPIx) +{ + return ((READ_BIT(SPIx->SR, SPI_SR_CHSIDE) == (SPI_SR_CHSIDE)) ? 1UL : 0UL); +} + +/** + * @brief Clear overrun error flag + * @rmtoll SR OVR LL_I2S_ClearFlag_OVR + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_I2S_ClearFlag_OVR(SPI_TypeDef *SPIx) +{ + LL_SPI_ClearFlag_OVR(SPIx); +} + +/** + * @brief Clear underrun error flag + * @rmtoll SR UDR LL_I2S_ClearFlag_UDR + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_I2S_ClearFlag_UDR(SPI_TypeDef *SPIx) +{ + __IO uint32_t tmpreg; + tmpreg = SPIx->SR; + (void)tmpreg; +} + +/** + * @brief Clear frame format error flag + * @rmtoll SR FRE LL_I2S_ClearFlag_FRE + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_I2S_ClearFlag_FRE(SPI_TypeDef *SPIx) +{ + LL_SPI_ClearFlag_FRE(SPIx); +} + +/** + * @} + */ + +/** @defgroup I2S_LL_EF_IT Interrupt Management + * @{ + */ + +/** + * @brief Enable error IT + * @note This bit controls the generation of an interrupt when an error condition occurs (OVR, UDR and FRE in I2S mode). + * @rmtoll CR2 ERRIE LL_I2S_EnableIT_ERR + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_I2S_EnableIT_ERR(SPI_TypeDef *SPIx) +{ + LL_SPI_EnableIT_ERR(SPIx); +} + +/** + * @brief Enable Rx buffer not empty IT + * @rmtoll CR2 RXNEIE LL_I2S_EnableIT_RXNE + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_I2S_EnableIT_RXNE(SPI_TypeDef *SPIx) +{ + LL_SPI_EnableIT_RXNE(SPIx); +} + +/** + * @brief Enable Tx buffer empty IT + * @rmtoll CR2 TXEIE LL_I2S_EnableIT_TXE + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_I2S_EnableIT_TXE(SPI_TypeDef *SPIx) +{ + LL_SPI_EnableIT_TXE(SPIx); +} + +/** + * @brief Disable error IT + * @note This bit controls the generation of an interrupt when an error condition occurs (OVR, UDR and FRE in I2S mode). + * @rmtoll CR2 ERRIE LL_I2S_DisableIT_ERR + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_I2S_DisableIT_ERR(SPI_TypeDef *SPIx) +{ + LL_SPI_DisableIT_ERR(SPIx); +} + +/** + * @brief Disable Rx buffer not empty IT + * @rmtoll CR2 RXNEIE LL_I2S_DisableIT_RXNE + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_I2S_DisableIT_RXNE(SPI_TypeDef *SPIx) +{ + LL_SPI_DisableIT_RXNE(SPIx); +} + +/** + * @brief Disable Tx buffer empty IT + * @rmtoll CR2 TXEIE LL_I2S_DisableIT_TXE + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_I2S_DisableIT_TXE(SPI_TypeDef *SPIx) +{ + LL_SPI_DisableIT_TXE(SPIx); +} + +/** + * @brief Check if ERR IT is enabled + * @rmtoll CR2 ERRIE LL_I2S_IsEnabledIT_ERR + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2S_IsEnabledIT_ERR(const SPI_TypeDef *SPIx) +{ + return LL_SPI_IsEnabledIT_ERR(SPIx); +} + +/** + * @brief Check if RXNE IT is enabled + * @rmtoll CR2 RXNEIE LL_I2S_IsEnabledIT_RXNE + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2S_IsEnabledIT_RXNE(const SPI_TypeDef *SPIx) +{ + return LL_SPI_IsEnabledIT_RXNE(SPIx); +} + +/** + * @brief Check if TXE IT is enabled + * @rmtoll CR2 TXEIE LL_I2S_IsEnabledIT_TXE + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2S_IsEnabledIT_TXE(const SPI_TypeDef *SPIx) +{ + return LL_SPI_IsEnabledIT_TXE(SPIx); +} + +/** + * @} + */ + +/** @defgroup I2S_LL_EF_DMA DMA Management + * @{ + */ + +/** + * @brief Enable DMA Rx + * @rmtoll CR2 RXDMAEN LL_I2S_EnableDMAReq_RX + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_I2S_EnableDMAReq_RX(SPI_TypeDef *SPIx) +{ + LL_SPI_EnableDMAReq_RX(SPIx); +} + +/** + * @brief Disable DMA Rx + * @rmtoll CR2 RXDMAEN LL_I2S_DisableDMAReq_RX + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_I2S_DisableDMAReq_RX(SPI_TypeDef *SPIx) +{ + LL_SPI_DisableDMAReq_RX(SPIx); +} + +/** + * @brief Check if DMA Rx is enabled + * @rmtoll CR2 RXDMAEN LL_I2S_IsEnabledDMAReq_RX + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2S_IsEnabledDMAReq_RX(const SPI_TypeDef *SPIx) +{ + return LL_SPI_IsEnabledDMAReq_RX(SPIx); +} + +/** + * @brief Enable DMA Tx + * @rmtoll CR2 TXDMAEN LL_I2S_EnableDMAReq_TX + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_I2S_EnableDMAReq_TX(SPI_TypeDef *SPIx) +{ + LL_SPI_EnableDMAReq_TX(SPIx); +} + +/** + * @brief Disable DMA Tx + * @rmtoll CR2 TXDMAEN LL_I2S_DisableDMAReq_TX + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_I2S_DisableDMAReq_TX(SPI_TypeDef *SPIx) +{ + LL_SPI_DisableDMAReq_TX(SPIx); +} + +/** + * @brief Check if DMA Tx is enabled + * @rmtoll CR2 TXDMAEN LL_I2S_IsEnabledDMAReq_TX + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2S_IsEnabledDMAReq_TX(const SPI_TypeDef *SPIx) +{ + return LL_SPI_IsEnabledDMAReq_TX(SPIx); +} + +/** + * @} + */ + +/** @defgroup I2S_LL_EF_DATA DATA Management + * @{ + */ + +/** + * @brief Read 16-Bits in data register + * @rmtoll DR DR LL_I2S_ReceiveData16 + * @param SPIx SPI Instance + * @retval RxData Value between Min_Data=0x0000 and Max_Data=0xFFFF + */ +__STATIC_INLINE uint16_t LL_I2S_ReceiveData16(SPI_TypeDef *SPIx) +{ + return LL_SPI_ReceiveData16(SPIx); +} + +/** + * @brief Write 16-Bits in data register + * @rmtoll DR DR LL_I2S_TransmitData16 + * @param SPIx SPI Instance + * @param TxData Value between Min_Data=0x0000 and Max_Data=0xFFFF + * @retval None + */ +__STATIC_INLINE void LL_I2S_TransmitData16(SPI_TypeDef *SPIx, uint16_t TxData) +{ + LL_SPI_TransmitData16(SPIx, TxData); +} + +/** + * @} + */ + +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup I2S_LL_EF_Init Initialization and de-initialization functions + * @{ + */ + +ErrorStatus LL_I2S_DeInit(const SPI_TypeDef *SPIx); +ErrorStatus LL_I2S_Init(SPI_TypeDef *SPIx, LL_I2S_InitTypeDef *I2S_InitStruct); +void LL_I2S_StructInit(LL_I2S_InitTypeDef *I2S_InitStruct); +void LL_I2S_ConfigPrescaler(SPI_TypeDef *SPIx, uint32_t PrescalerLinear, uint32_t PrescalerParity); +#if defined (SPI_I2S_FULLDUPLEX_SUPPORT) +ErrorStatus LL_I2S_InitFullDuplex(SPI_TypeDef *I2Sxext, LL_I2S_InitTypeDef *I2S_InitStruct); +#endif /* SPI_I2S_FULLDUPLEX_SUPPORT */ + +/** + * @} + */ +#endif /* USE_FULL_LL_DRIVER */ + +/** + * @} + */ + +/** + * @} + */ + +#endif /* defined (SPI1) || defined (SPI2) || defined (SPI3) || defined (SPI4) || defined (SPI5) || defined(SPI6) */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* STM32F4xx_LL_SPI_H */ + diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_system.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_system.h new file mode 100644 index 0000000..84ea5c4 --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_system.h @@ -0,0 +1,1711 @@ +/** + ****************************************************************************** + * @file stm32f4xx_ll_system.h + * @author MCD Application Team + * @brief Header file of SYSTEM LL module. + * + ****************************************************************************** + * @attention + * + *Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + @verbatim + ============================================================================== + ##### How to use this driver ##### + ============================================================================== + [..] + The LL SYSTEM driver contains a set of generic APIs that can be + used by user: + (+) Some of the FLASH features need to be handled in the SYSTEM file. + (+) Access to DBGCMU registers + (+) Access to SYSCFG registers + + @endverbatim + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F4xx_LL_SYSTEM_H +#define __STM32F4xx_LL_SYSTEM_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx.h" + +/** @addtogroup STM32F4xx_LL_Driver + * @{ + */ + +#if defined (FLASH) || defined (SYSCFG) || defined (DBGMCU) + +/** @defgroup SYSTEM_LL SYSTEM + * @{ + */ + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ + +/* Private constants ---------------------------------------------------------*/ +/** @defgroup SYSTEM_LL_Private_Constants SYSTEM Private Constants + * @{ + */ + +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ + +/* Exported types ------------------------------------------------------------*/ +/* Exported constants --------------------------------------------------------*/ +/** @defgroup SYSTEM_LL_Exported_Constants SYSTEM Exported Constants + * @{ + */ + +/** @defgroup SYSTEM_LL_EC_REMAP SYSCFG REMAP +* @{ +*/ +#define LL_SYSCFG_REMAP_FLASH (uint32_t)0x00000000 /*!< Main Flash memory mapped at 0x00000000 */ +#define LL_SYSCFG_REMAP_SYSTEMFLASH SYSCFG_MEMRMP_MEM_MODE_0 /*!< System Flash memory mapped at 0x00000000 */ +#if defined(FSMC_Bank1) +#define LL_SYSCFG_REMAP_FSMC SYSCFG_MEMRMP_MEM_MODE_1 /*!< FSMC(NOR/PSRAM 1 and 2) mapped at 0x00000000 */ +#endif /* FSMC_Bank1 */ +#if defined(FMC_Bank1) +#define LL_SYSCFG_REMAP_FMC SYSCFG_MEMRMP_MEM_MODE_1 /*!< FMC(NOR/PSRAM 1 and 2) mapped at 0x00000000 */ +#define LL_SYSCFG_REMAP_SDRAM SYSCFG_MEMRMP_MEM_MODE_2 /*!< FMC/SDRAM mapped at 0x00000000 */ +#endif /* FMC_Bank1 */ +#define LL_SYSCFG_REMAP_SRAM (SYSCFG_MEMRMP_MEM_MODE_1 | SYSCFG_MEMRMP_MEM_MODE_0) /*!< SRAM1 mapped at 0x00000000 */ + +/** + * @} + */ + +#if defined(SYSCFG_PMC_MII_RMII_SEL) + /** @defgroup SYSTEM_LL_EC_PMC SYSCFG PMC +* @{ +*/ +#define LL_SYSCFG_PMC_ETHMII (uint32_t)0x00000000 /*!< ETH Media MII interface */ +#define LL_SYSCFG_PMC_ETHRMII (uint32_t)SYSCFG_PMC_MII_RMII_SEL /*!< ETH Media RMII interface */ + +/** + * @} + */ +#endif /* SYSCFG_PMC_MII_RMII_SEL */ + + + +#if defined(SYSCFG_MEMRMP_UFB_MODE) +/** @defgroup SYSTEM_LL_EC_BANKMODE SYSCFG BANK MODE + * @{ + */ +#define LL_SYSCFG_BANKMODE_BANK1 (uint32_t)0x00000000 /*!< Flash Bank 1 base address mapped at 0x0800 0000 (AXI) and 0x0020 0000 (TCM) + and Flash Bank 2 base address mapped at 0x0810 0000 (AXI) and 0x0030 0000 (TCM)*/ +#define LL_SYSCFG_BANKMODE_BANK2 SYSCFG_MEMRMP_UFB_MODE /*!< Flash Bank 2 base address mapped at 0x0800 0000 (AXI) and 0x0020 0000(TCM) + and Flash Bank 1 base address mapped at 0x0810 0000 (AXI) and 0x0030 0000(TCM) */ +/** + * @} + */ +#endif /* SYSCFG_MEMRMP_UFB_MODE */ +/** @defgroup SYSTEM_LL_EC_I2C_FASTMODEPLUS SYSCFG I2C FASTMODEPLUS + * @{ + */ +#if defined(SYSCFG_CFGR_FMPI2C1_SCL) +#define LL_SYSCFG_I2C_FASTMODEPLUS_SCL SYSCFG_CFGR_FMPI2C1_SCL /*!< Enable Fast Mode Plus on FMPI2C_SCL pin */ +#define LL_SYSCFG_I2C_FASTMODEPLUS_SDA SYSCFG_CFGR_FMPI2C1_SDA /*!< Enable Fast Mode Plus on FMPI2C_SDA pin*/ +#endif /* SYSCFG_CFGR_FMPI2C1_SCL */ +/** + * @} + */ + +/** @defgroup SYSTEM_LL_EC_EXTI_PORT SYSCFG EXTI PORT + * @{ + */ +#define LL_SYSCFG_EXTI_PORTA (uint32_t)0 /*!< EXTI PORT A */ +#define LL_SYSCFG_EXTI_PORTB (uint32_t)1 /*!< EXTI PORT B */ +#define LL_SYSCFG_EXTI_PORTC (uint32_t)2 /*!< EXTI PORT C */ +#define LL_SYSCFG_EXTI_PORTD (uint32_t)3 /*!< EXTI PORT D */ +#define LL_SYSCFG_EXTI_PORTE (uint32_t)4 /*!< EXTI PORT E */ +#if defined(GPIOF) +#define LL_SYSCFG_EXTI_PORTF (uint32_t)5 /*!< EXTI PORT F */ +#endif /* GPIOF */ +#if defined(GPIOG) +#define LL_SYSCFG_EXTI_PORTG (uint32_t)6 /*!< EXTI PORT G */ +#endif /* GPIOG */ +#define LL_SYSCFG_EXTI_PORTH (uint32_t)7 /*!< EXTI PORT H */ +#if defined(GPIOI) +#define LL_SYSCFG_EXTI_PORTI (uint32_t)8 /*!< EXTI PORT I */ +#endif /* GPIOI */ +#if defined(GPIOJ) +#define LL_SYSCFG_EXTI_PORTJ (uint32_t)9 /*!< EXTI PORT J */ +#endif /* GPIOJ */ +#if defined(GPIOK) +#define LL_SYSCFG_EXTI_PORTK (uint32_t)10 /*!< EXTI PORT k */ +#endif /* GPIOK */ +/** + * @} + */ + +/** @defgroup SYSTEM_LL_EC_EXTI_LINE SYSCFG EXTI LINE + * @{ + */ +#define LL_SYSCFG_EXTI_LINE0 (uint32_t)(0x000FU << 16 | 0) /*!< EXTI_POSITION_0 | EXTICR[0] */ +#define LL_SYSCFG_EXTI_LINE1 (uint32_t)(0x00F0U << 16 | 0) /*!< EXTI_POSITION_4 | EXTICR[0] */ +#define LL_SYSCFG_EXTI_LINE2 (uint32_t)(0x0F00U << 16 | 0) /*!< EXTI_POSITION_8 | EXTICR[0] */ +#define LL_SYSCFG_EXTI_LINE3 (uint32_t)(0xF000U << 16 | 0) /*!< EXTI_POSITION_12 | EXTICR[0] */ +#define LL_SYSCFG_EXTI_LINE4 (uint32_t)(0x000FU << 16 | 1) /*!< EXTI_POSITION_0 | EXTICR[1] */ +#define LL_SYSCFG_EXTI_LINE5 (uint32_t)(0x00F0U << 16 | 1) /*!< EXTI_POSITION_4 | EXTICR[1] */ +#define LL_SYSCFG_EXTI_LINE6 (uint32_t)(0x0F00U << 16 | 1) /*!< EXTI_POSITION_8 | EXTICR[1] */ +#define LL_SYSCFG_EXTI_LINE7 (uint32_t)(0xF000U << 16 | 1) /*!< EXTI_POSITION_12 | EXTICR[1] */ +#define LL_SYSCFG_EXTI_LINE8 (uint32_t)(0x000FU << 16 | 2) /*!< EXTI_POSITION_0 | EXTICR[2] */ +#define LL_SYSCFG_EXTI_LINE9 (uint32_t)(0x00F0U << 16 | 2) /*!< EXTI_POSITION_4 | EXTICR[2] */ +#define LL_SYSCFG_EXTI_LINE10 (uint32_t)(0x0F00U << 16 | 2) /*!< EXTI_POSITION_8 | EXTICR[2] */ +#define LL_SYSCFG_EXTI_LINE11 (uint32_t)(0xF000U << 16 | 2) /*!< EXTI_POSITION_12 | EXTICR[2] */ +#define LL_SYSCFG_EXTI_LINE12 (uint32_t)(0x000FU << 16 | 3) /*!< EXTI_POSITION_0 | EXTICR[3] */ +#define LL_SYSCFG_EXTI_LINE13 (uint32_t)(0x00F0U << 16 | 3) /*!< EXTI_POSITION_4 | EXTICR[3] */ +#define LL_SYSCFG_EXTI_LINE14 (uint32_t)(0x0F00U << 16 | 3) /*!< EXTI_POSITION_8 | EXTICR[3] */ +#define LL_SYSCFG_EXTI_LINE15 (uint32_t)(0xF000U << 16 | 3) /*!< EXTI_POSITION_12 | EXTICR[3] */ +/** + * @} + */ + +/** @defgroup SYSTEM_LL_EC_TIMBREAK SYSCFG TIMER BREAK + * @{ + */ +#if defined(SYSCFG_CFGR2_LOCKUP_LOCK) +#define LL_SYSCFG_TIMBREAK_LOCKUP SYSCFG_CFGR2_LOCKUP_LOCK /*!< Enables and locks the LOCKUP output of CortexM4 + with Break Input of TIM1/8 */ +#define LL_SYSCFG_TIMBREAK_PVD SYSCFG_CFGR2_PVD_LOCK /*!< Enables and locks the PVD connection with TIM1/8 Break Input + and also the PVDE and PLS bits of the Power Control Interface */ +#endif /* SYSCFG_CFGR2_CLL */ +/** + * @} + */ + +#if defined(SYSCFG_MCHDLYCR_BSCKSEL) +/** @defgroup SYSTEM_LL_DFSDM_BitStream_ClockSource SYSCFG MCHDLY BCKKSEL + * @{ + */ +#define LL_SYSCFG_BITSTREAM_CLOCK_TIM2OC1 (uint32_t)0x00000000 +#define LL_SYSCFG_BITSTREAM_CLOCK_DFSDM2 SYSCFG_MCHDLYCR_BSCKSEL +/** + * @} + */ +/** @defgroup SYSTEM_LL_DFSDM_MCHDLYEN SYSCFG MCHDLY MCHDLYEN + * @{ + */ +#define LL_SYSCFG_DFSDM1_MCHDLYEN SYSCFG_MCHDLYCR_MCHDLY1EN +#define LL_SYSCFG_DFSDM2_MCHDLYEN SYSCFG_MCHDLYCR_MCHDLY2EN +/** + * @} + */ +/** @defgroup SYSTEM_LL_DFSDM_DataIn0_Source SYSCFG MCHDLY DFSDMD0SEL + * @{ + */ +#define LL_SYSCFG_DFSDM1_DataIn0 SYSCFG_MCHDLYCR_DFSDM1D0SEL +#define LL_SYSCFG_DFSDM2_DataIn0 SYSCFG_MCHDLYCR_DFSDM2D0SEL + +#define LL_SYSCFG_DFSDM1_DataIn0_PAD (uint32_t)((SYSCFG_MCHDLYCR_DFSDM1D0SEL << 16) | 0x00000000) +#define LL_SYSCFG_DFSDM1_DataIn0_DM (uint32_t)((SYSCFG_MCHDLYCR_DFSDM1D0SEL << 16) | SYSCFG_MCHDLYCR_DFSDM1D0SEL) +#define LL_SYSCFG_DFSDM2_DataIn0_PAD (uint32_t)((SYSCFG_MCHDLYCR_DFSDM2D0SEL << 16) | 0x00000000) +#define LL_SYSCFG_DFSDM2_DataIn0_DM (uint32_t)((SYSCFG_MCHDLYCR_DFSDM2D0SEL << 16) | SYSCFG_MCHDLYCR_DFSDM2D0SEL) +/** + * @} + */ +/** @defgroup SYSTEM_LL_DFSDM_DataIn2_Source SYSCFG MCHDLY DFSDMD2SEL + * @{ + */ +#define LL_SYSCFG_DFSDM1_DataIn2 SYSCFG_MCHDLYCR_DFSDM1D2SEL +#define LL_SYSCFG_DFSDM2_DataIn2 SYSCFG_MCHDLYCR_DFSDM2D2SEL + +#define LL_SYSCFG_DFSDM1_DataIn2_PAD (uint32_t)((SYSCFG_MCHDLYCR_DFSDM1D2SEL << 16) | 0x00000000) +#define LL_SYSCFG_DFSDM1_DataIn2_DM (uint32_t)((SYSCFG_MCHDLYCR_DFSDM1D2SEL << 16) | SYSCFG_MCHDLYCR_DFSDM1D2SEL) +#define LL_SYSCFG_DFSDM2_DataIn2_PAD (uint32_t)((SYSCFG_MCHDLYCR_DFSDM2D2SEL << 16) | 0x00000000) +#define LL_SYSCFG_DFSDM2_DataIn2_DM (uint32_t)((SYSCFG_MCHDLYCR_DFSDM2D2SEL << 16) | SYSCFG_MCHDLYCR_DFSDM2D2SEL) +/** + * @} + */ +/** @defgroup SYSTEM_LL_DFSDM1_TIM4OC2_BitstreamDistribution SYSCFG MCHDLY DFSDM1CK02SEL + * @{ + */ +#define LL_SYSCFG_DFSDM1_TIM4OC2_CLKIN0 (uint32_t)0x00000000 +#define LL_SYSCFG_DFSDM1_TIM4OC2_CLKIN2 SYSCFG_MCHDLYCR_DFSDM1CK02SEL +/** + * @} + */ +/** @defgroup SYSTEM_LL_DFSDM1_TIM4OC1_BitstreamDistribution SYSCFG MCHDLY DFSDM1CK13SEL + * @{ + */ +#define LL_SYSCFG_DFSDM1_TIM4OC1_CLKIN1 (uint32_t)0x00000000 +#define LL_SYSCFG_DFSDM1_TIM4OC1_CLKIN3 SYSCFG_MCHDLYCR_DFSDM1CK13SEL +/** + * @} + */ +/** @defgroup SYSTEM_LL_DFSDM1_CLKIN_SourceSelection SYSCFG MCHDLY DFSDMCFG + * @{ + */ +#define LL_SYSCFG_DFSDM1_CKIN_PAD (uint32_t)0x00000000 +#define LL_SYSCFG_DFSDM1_CKIN_DM SYSCFG_MCHDLYCR_DFSDM1CFG +/** + * @} + */ +/** @defgroup SYSTEM_LL_DFSDM1_CLKOUT_SourceSelection SYSCFG MCHDLY DFSDM1CKOSEL + * @{ + */ +#define LL_SYSCFG_DFSDM1_CKOUT (uint32_t)0x00000000 +#define LL_SYSCFG_DFSDM1_CKOUT_M27 SYSCFG_MCHDLYCR_DFSDM1CKOSEL +/** + * @} + */ + +/** @defgroup SYSTEM_LL_DFSDM2_DataIn4_SourceSelection SYSCFG MCHDLY DFSDM2D4SEL + * @{ + */ +#define LL_SYSCFG_DFSDM2_DataIn4_PAD (uint32_t)0x00000000 +#define LL_SYSCFG_DFSDM2_DataIn4_DM SYSCFG_MCHDLYCR_DFSDM2D4SEL +/** + * @} + */ +/** @defgroup SYSTEM_LL_DFSDM2_DataIn6_SourceSelection SYSCFG MCHDLY DFSDM2D6SEL + * @{ + */ +#define LL_SYSCFG_DFSDM2_DataIn6_PAD (uint32_t)0x00000000 +#define LL_SYSCFG_DFSDM2_DataIn6_DM SYSCFG_MCHDLYCR_DFSDM2D6SEL +/** + * @} + */ +/** @defgroup SYSTEM_LL_DFSDM2_TIM3OC4_BitstreamDistribution SYSCFG MCHDLY DFSDM2CK04SEL + * @{ + */ +#define LL_SYSCFG_DFSDM2_TIM3OC4_CLKIN0 (uint32_t)0x00000000 +#define LL_SYSCFG_DFSDM2_TIM3OC4_CLKIN4 SYSCFG_MCHDLYCR_DFSDM2CK04SEL +/** + * @} + */ +/** @defgroup SYSTEM_LL_DFSDM2_TIM3OC3_BitstreamDistribution SYSCFG MCHDLY DFSDM2CK15SEL + * @{ + */ +#define LL_SYSCFG_DFSDM2_TIM3OC3_CLKIN1 (uint32_t)0x00000000 +#define LL_SYSCFG_DFSDM2_TIM3OC3_CLKIN5 SYSCFG_MCHDLYCR_DFSDM2CK15SEL +/** + * @} + */ +/** @defgroup SYSTEM_LL_DFSDM2_TIM3OC2_BitstreamDistribution SYSCFG MCHDLY DFSDM2CK26SEL + * @{ + */ +#define LL_SYSCFG_DFSDM2_TIM3OC2_CLKIN2 (uint32_t)0x00000000 +#define LL_SYSCFG_DFSDM2_TIM3OC2_CLKIN6 SYSCFG_MCHDLYCR_DFSDM2CK26SEL +/** + * @} + */ +/** @defgroup SYSTEM_LL_DFSDM2_TIM3OC1_BitstreamDistribution SYSCFG MCHDLY DFSDM2CK37SEL + * @{ + */ +#define LL_SYSCFG_DFSDM2_TIM3OC1_CLKIN3 (uint32_t)0x00000000 +#define LL_SYSCFG_DFSDM2_TIM3OC1_CLKIN7 SYSCFG_MCHDLYCR_DFSDM2CK37SEL +/** + * @} + */ +/** @defgroup SYSTEM_LL_DFSDM2_CLKIN_SourceSelection SYSCFG MCHDLY DFSDM2CFG + * @{ + */ +#define LL_SYSCFG_DFSDM2_CKIN_PAD (uint32_t)0x00000000 +#define LL_SYSCFG_DFSDM2_CKIN_DM SYSCFG_MCHDLYCR_DFSDM2CFG +/** + * @} + */ +/** @defgroup SYSTEM_LL_DFSDM2_CLKOUT_SourceSelection SYSCFG MCHDLY DFSDM2CKOSEL + * @{ + */ +#define LL_SYSCFG_DFSDM2_CKOUT (uint32_t)0x00000000 +#define LL_SYSCFG_DFSDM2_CKOUT_M27 SYSCFG_MCHDLYCR_DFSDM2CKOSEL +/** + * @} + */ +#endif /* SYSCFG_MCHDLYCR_BSCKSEL */ + +/** @defgroup SYSTEM_LL_EC_TRACE DBGMCU TRACE Pin Assignment + * @{ + */ +#define LL_DBGMCU_TRACE_NONE 0x00000000U /*!< TRACE pins not assigned (default state) */ +#define LL_DBGMCU_TRACE_ASYNCH DBGMCU_CR_TRACE_IOEN /*!< TRACE pin assignment for Asynchronous Mode */ +#define LL_DBGMCU_TRACE_SYNCH_SIZE1 (DBGMCU_CR_TRACE_IOEN | DBGMCU_CR_TRACE_MODE_0) /*!< TRACE pin assignment for Synchronous Mode with a TRACEDATA size of 1 */ +#define LL_DBGMCU_TRACE_SYNCH_SIZE2 (DBGMCU_CR_TRACE_IOEN | DBGMCU_CR_TRACE_MODE_1) /*!< TRACE pin assignment for Synchronous Mode with a TRACEDATA size of 2 */ +#define LL_DBGMCU_TRACE_SYNCH_SIZE4 (DBGMCU_CR_TRACE_IOEN | DBGMCU_CR_TRACE_MODE) /*!< TRACE pin assignment for Synchronous Mode with a TRACEDATA size of 4 */ +/** + * @} + */ + +/** @defgroup SYSTEM_LL_EC_APB1_GRP1_STOP_IP DBGMCU APB1 GRP1 STOP IP + * @{ + */ +#if defined(DBGMCU_APB1_FZ_DBG_TIM2_STOP) +#define LL_DBGMCU_APB1_GRP1_TIM2_STOP DBGMCU_APB1_FZ_DBG_TIM2_STOP /*!< TIM2 counter stopped when core is halted */ +#endif /* DBGMCU_APB1_FZ_DBG_TIM2_STOP */ +#if defined(DBGMCU_APB1_FZ_DBG_TIM3_STOP) +#define LL_DBGMCU_APB1_GRP1_TIM3_STOP DBGMCU_APB1_FZ_DBG_TIM3_STOP /*!< TIM3 counter stopped when core is halted */ +#endif /* DBGMCU_APB1_FZ_DBG_TIM3_STOP */ +#if defined(DBGMCU_APB1_FZ_DBG_TIM4_STOP) +#define LL_DBGMCU_APB1_GRP1_TIM4_STOP DBGMCU_APB1_FZ_DBG_TIM4_STOP /*!< TIM4 counter stopped when core is halted */ +#endif /* DBGMCU_APB1_FZ_DBG_TIM4_STOP */ +#define LL_DBGMCU_APB1_GRP1_TIM5_STOP DBGMCU_APB1_FZ_DBG_TIM5_STOP /*!< TIM5 counter stopped when core is halted */ +#if defined(DBGMCU_APB1_FZ_DBG_TIM6_STOP) +#define LL_DBGMCU_APB1_GRP1_TIM6_STOP DBGMCU_APB1_FZ_DBG_TIM6_STOP /*!< TIM6 counter stopped when core is halted */ +#endif /* DBGMCU_APB1_FZ_DBG_TIM6_STOP */ +#if defined(DBGMCU_APB1_FZ_DBG_TIM7_STOP) +#define LL_DBGMCU_APB1_GRP1_TIM7_STOP DBGMCU_APB1_FZ_DBG_TIM7_STOP /*!< TIM7 counter stopped when core is halted */ +#endif /* DBGMCU_APB1_FZ_DBG_TIM7_STOP */ +#if defined(DBGMCU_APB1_FZ_DBG_TIM12_STOP) +#define LL_DBGMCU_APB1_GRP1_TIM12_STOP DBGMCU_APB1_FZ_DBG_TIM12_STOP /*!< TIM12 counter stopped when core is halted */ +#endif /* DBGMCU_APB1_FZ_DBG_TIM12_STOP */ +#if defined(DBGMCU_APB1_FZ_DBG_TIM13_STOP) +#define LL_DBGMCU_APB1_GRP1_TIM13_STOP DBGMCU_APB1_FZ_DBG_TIM13_STOP /*!< TIM13 counter stopped when core is halted */ +#endif /* DBGMCU_APB1_FZ_DBG_TIM13_STOP */ +#if defined(DBGMCU_APB1_FZ_DBG_TIM14_STOP) +#define LL_DBGMCU_APB1_GRP1_TIM14_STOP DBGMCU_APB1_FZ_DBG_TIM14_STOP /*!< TIM14 counter stopped when core is halted */ +#endif /* DBGMCU_APB1_FZ_DBG_TIM14_STOP */ +#if defined(DBGMCU_APB1_FZ_DBG_LPTIM_STOP) +#define LL_DBGMCU_APB1_GRP1_LPTIM_STOP DBGMCU_APB1_FZ_DBG_LPTIM_STOP /*!< LPTIM counter stopped when core is halted */ +#endif /* DBGMCU_APB1_FZ_DBG_LPTIM_STOP */ +#define LL_DBGMCU_APB1_GRP1_RTC_STOP DBGMCU_APB1_FZ_DBG_RTC_STOP /*!< RTC counter stopped when core is halted */ +#define LL_DBGMCU_APB1_GRP1_WWDG_STOP DBGMCU_APB1_FZ_DBG_WWDG_STOP /*!< Debug Window Watchdog stopped when Core is halted */ +#define LL_DBGMCU_APB1_GRP1_IWDG_STOP DBGMCU_APB1_FZ_DBG_IWDG_STOP /*!< Debug Independent Watchdog stopped when Core is halted */ +#define LL_DBGMCU_APB1_GRP1_I2C1_STOP DBGMCU_APB1_FZ_DBG_I2C1_SMBUS_TIMEOUT /*!< I2C1 SMBUS timeout mode stopped when Core is halted */ +#define LL_DBGMCU_APB1_GRP1_I2C2_STOP DBGMCU_APB1_FZ_DBG_I2C2_SMBUS_TIMEOUT /*!< I2C2 SMBUS timeout mode stopped when Core is halted */ +#if defined(DBGMCU_APB1_FZ_DBG_I2C3_SMBUS_TIMEOUT) +#define LL_DBGMCU_APB1_GRP1_I2C3_STOP DBGMCU_APB1_FZ_DBG_I2C3_SMBUS_TIMEOUT /*!< I2C3 SMBUS timeout mode stopped when Core is halted */ +#endif /* DBGMCU_APB1_FZ_DBG_I2C3_SMBUS_TIMEOUT */ +#if defined(DBGMCU_APB1_FZ_DBG_I2C4_SMBUS_TIMEOUT) +#define LL_DBGMCU_APB1_GRP1_I2C4_STOP DBGMCU_APB1_FZ_DBG_I2C4_SMBUS_TIMEOUT /*!< I2C4 SMBUS timeout mode stopped when Core is halted */ +#endif /* DBGMCU_APB1_FZ_DBG_I2C4_SMBUS_TIMEOUT */ +#if defined(DBGMCU_APB1_FZ_DBG_CAN1_STOP) +#define LL_DBGMCU_APB1_GRP1_CAN1_STOP DBGMCU_APB1_FZ_DBG_CAN1_STOP /*!< CAN1 debug stopped when Core is halted */ +#endif /* DBGMCU_APB1_FZ_DBG_CAN1_STOP */ +#if defined(DBGMCU_APB1_FZ_DBG_CAN2_STOP) +#define LL_DBGMCU_APB1_GRP1_CAN2_STOP DBGMCU_APB1_FZ_DBG_CAN2_STOP /*!< CAN2 debug stopped when Core is halted */ +#endif /* DBGMCU_APB1_FZ_DBG_CAN2_STOP */ +#if defined(DBGMCU_APB1_FZ_DBG_CAN3_STOP) +#define LL_DBGMCU_APB1_GRP1_CAN3_STOP DBGMCU_APB1_FZ_DBG_CAN3_STOP /*!< CAN3 debug stopped when Core is halted */ +#endif /* DBGMCU_APB1_FZ_DBG_CAN3_STOP */ +/** + * @} + */ + +/** @defgroup SYSTEM_LL_EC_APB2_GRP1_STOP_IP DBGMCU APB2 GRP1 STOP IP + * @{ + */ +#define LL_DBGMCU_APB2_GRP1_TIM1_STOP DBGMCU_APB2_FZ_DBG_TIM1_STOP /*!< TIM1 counter stopped when core is halted */ +#if defined(DBGMCU_APB2_FZ_DBG_TIM8_STOP) +#define LL_DBGMCU_APB2_GRP1_TIM8_STOP DBGMCU_APB2_FZ_DBG_TIM8_STOP /*!< TIM8 counter stopped when core is halted */ +#endif /* DBGMCU_APB2_FZ_DBG_TIM8_STOP */ +#define LL_DBGMCU_APB2_GRP1_TIM9_STOP DBGMCU_APB2_FZ_DBG_TIM9_STOP /*!< TIM9 counter stopped when core is halted */ +#if defined(DBGMCU_APB2_FZ_DBG_TIM10_STOP) +#define LL_DBGMCU_APB2_GRP1_TIM10_STOP DBGMCU_APB2_FZ_DBG_TIM10_STOP /*!< TIM10 counter stopped when core is halted */ +#endif /* DBGMCU_APB2_FZ_DBG_TIM10_STOP */ +#define LL_DBGMCU_APB2_GRP1_TIM11_STOP DBGMCU_APB2_FZ_DBG_TIM11_STOP /*!< TIM11 counter stopped when core is halted */ +/** + * @} + */ + +/** @defgroup SYSTEM_LL_EC_LATENCY FLASH LATENCY + * @{ + */ +#define LL_FLASH_LATENCY_0 FLASH_ACR_LATENCY_0WS /*!< FLASH Zero wait state */ +#define LL_FLASH_LATENCY_1 FLASH_ACR_LATENCY_1WS /*!< FLASH One wait state */ +#define LL_FLASH_LATENCY_2 FLASH_ACR_LATENCY_2WS /*!< FLASH Two wait states */ +#define LL_FLASH_LATENCY_3 FLASH_ACR_LATENCY_3WS /*!< FLASH Three wait states */ +#define LL_FLASH_LATENCY_4 FLASH_ACR_LATENCY_4WS /*!< FLASH Four wait states */ +#define LL_FLASH_LATENCY_5 FLASH_ACR_LATENCY_5WS /*!< FLASH five wait state */ +#define LL_FLASH_LATENCY_6 FLASH_ACR_LATENCY_6WS /*!< FLASH six wait state */ +#define LL_FLASH_LATENCY_7 FLASH_ACR_LATENCY_7WS /*!< FLASH seven wait states */ +#define LL_FLASH_LATENCY_8 FLASH_ACR_LATENCY_8WS /*!< FLASH eight wait states */ +#define LL_FLASH_LATENCY_9 FLASH_ACR_LATENCY_9WS /*!< FLASH nine wait states */ +#define LL_FLASH_LATENCY_10 FLASH_ACR_LATENCY_10WS /*!< FLASH ten wait states */ +#define LL_FLASH_LATENCY_11 FLASH_ACR_LATENCY_11WS /*!< FLASH eleven wait states */ +#define LL_FLASH_LATENCY_12 FLASH_ACR_LATENCY_12WS /*!< FLASH twelve wait states */ +#define LL_FLASH_LATENCY_13 FLASH_ACR_LATENCY_13WS /*!< FLASH thirteen wait states */ +#define LL_FLASH_LATENCY_14 FLASH_ACR_LATENCY_14WS /*!< FLASH fourteen wait states */ +#define LL_FLASH_LATENCY_15 FLASH_ACR_LATENCY_15WS /*!< FLASH fifteen wait states */ +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup SYSTEM_LL_Exported_Functions SYSTEM Exported Functions + * @{ + */ + +/** @defgroup SYSTEM_LL_EF_SYSCFG SYSCFG + * @{ + */ +/** + * @brief Set memory mapping at address 0x00000000 + * @rmtoll SYSCFG_MEMRMP MEM_MODE LL_SYSCFG_SetRemapMemory + * @param Memory This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_REMAP_FLASH + * @arg @ref LL_SYSCFG_REMAP_SYSTEMFLASH + * @arg @ref LL_SYSCFG_REMAP_SRAM + * @arg @ref LL_SYSCFG_REMAP_FSMC (*) + * @arg @ref LL_SYSCFG_REMAP_FMC (*) + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_SetRemapMemory(uint32_t Memory) +{ + MODIFY_REG(SYSCFG->MEMRMP, SYSCFG_MEMRMP_MEM_MODE, Memory); +} + +/** + * @brief Get memory mapping at address 0x00000000 + * @rmtoll SYSCFG_MEMRMP MEM_MODE LL_SYSCFG_GetRemapMemory + * @retval Returned value can be one of the following values: + * @arg @ref LL_SYSCFG_REMAP_FLASH + * @arg @ref LL_SYSCFG_REMAP_SYSTEMFLASH + * @arg @ref LL_SYSCFG_REMAP_SRAM + * @arg @ref LL_SYSCFG_REMAP_FSMC (*) + * @arg @ref LL_SYSCFG_REMAP_FMC (*) + */ +__STATIC_INLINE uint32_t LL_SYSCFG_GetRemapMemory(void) +{ + return (uint32_t)(READ_BIT(SYSCFG->MEMRMP, SYSCFG_MEMRMP_MEM_MODE)); +} + +#if defined(SYSCFG_MEMRMP_SWP_FMC) +/** + * @brief Enables the FMC Memory Mapping Swapping + * @rmtoll SYSCFG_MEMRMP SWP_FMC LL_SYSCFG_EnableFMCMemorySwapping + * @note SDRAM is accessible at 0x60000000 and NOR/RAM + * is accessible at 0xC0000000 + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_EnableFMCMemorySwapping(void) +{ + SET_BIT(SYSCFG->MEMRMP, SYSCFG_MEMRMP_SWP_FMC_0); +} + +/** + * @brief Disables the FMC Memory Mapping Swapping + * @rmtoll SYSCFG_MEMRMP SWP_FMC LL_SYSCFG_DisableFMCMemorySwapping + * @note SDRAM is accessible at 0xC0000000 (default mapping) + * and NOR/RAM is accessible at 0x60000000 (default mapping) + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DisableFMCMemorySwapping(void) +{ + CLEAR_BIT(SYSCFG->MEMRMP, SYSCFG_MEMRMP_SWP_FMC); +} + +#endif /* SYSCFG_MEMRMP_SWP_FMC */ +/** + * @brief Enables the Compensation cell Power Down + * @rmtoll SYSCFG_CMPCR CMP_PD LL_SYSCFG_EnableCompensationCell + * @note The I/O compensation cell can be used only when the device supply + * voltage ranges from 2.4 to 3.6 V + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_EnableCompensationCell(void) +{ + SET_BIT(SYSCFG->CMPCR, SYSCFG_CMPCR_CMP_PD); +} + +/** + * @brief Disables the Compensation cell Power Down + * @rmtoll SYSCFG_CMPCR CMP_PD LL_SYSCFG_DisableCompensationCell + * @note The I/O compensation cell can be used only when the device supply + * voltage ranges from 2.4 to 3.6 V + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DisableCompensationCell(void) +{ + CLEAR_BIT(SYSCFG->CMPCR, SYSCFG_CMPCR_CMP_PD); +} + +/** + * @brief Get Compensation Cell ready Flag + * @rmtoll SYSCFG_CMPCR READY LL_SYSCFG_IsActiveFlag_CMPCR + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_SYSCFG_IsActiveFlag_CMPCR(void) +{ + return (READ_BIT(SYSCFG->CMPCR, SYSCFG_CMPCR_READY) == (SYSCFG_CMPCR_READY)); +} + +#if defined(SYSCFG_PMC_MII_RMII_SEL) +/** + * @brief Select Ethernet PHY interface + * @rmtoll SYSCFG_PMC MII_RMII_SEL LL_SYSCFG_SetPHYInterface + * @param Interface This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_PMC_ETHMII + * @arg @ref LL_SYSCFG_PMC_ETHRMII + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_SetPHYInterface(uint32_t Interface) +{ + MODIFY_REG(SYSCFG->PMC, SYSCFG_PMC_MII_RMII_SEL, Interface); +} + +/** + * @brief Get Ethernet PHY interface + * @rmtoll SYSCFG_PMC MII_RMII_SEL LL_SYSCFG_GetPHYInterface + * @retval Returned value can be one of the following values: + * @arg @ref LL_SYSCFG_PMC_ETHMII + * @arg @ref LL_SYSCFG_PMC_ETHRMII + * @retval None + */ +__STATIC_INLINE uint32_t LL_SYSCFG_GetPHYInterface(void) +{ + return (uint32_t)(READ_BIT(SYSCFG->PMC, SYSCFG_PMC_MII_RMII_SEL)); +} +#endif /* SYSCFG_PMC_MII_RMII_SEL */ + + + +#if defined(SYSCFG_MEMRMP_UFB_MODE) +/** + * @brief Select Flash bank mode (Bank flashed at 0x08000000) + * @rmtoll SYSCFG_MEMRMP UFB_MODE LL_SYSCFG_SetFlashBankMode + * @param Bank This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_BANKMODE_BANK1 + * @arg @ref LL_SYSCFG_BANKMODE_BANK2 + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_SetFlashBankMode(uint32_t Bank) +{ + MODIFY_REG(SYSCFG->MEMRMP, SYSCFG_MEMRMP_UFB_MODE, Bank); +} + +/** + * @brief Get Flash bank mode (Bank flashed at 0x08000000) + * @rmtoll SYSCFG_MEMRMP UFB_MODE LL_SYSCFG_GetFlashBankMode + * @retval Returned value can be one of the following values: + * @arg @ref LL_SYSCFG_BANKMODE_BANK1 + * @arg @ref LL_SYSCFG_BANKMODE_BANK2 + */ +__STATIC_INLINE uint32_t LL_SYSCFG_GetFlashBankMode(void) +{ + return (uint32_t)(READ_BIT(SYSCFG->MEMRMP, SYSCFG_MEMRMP_UFB_MODE)); +} +#endif /* SYSCFG_MEMRMP_UFB_MODE */ + +#if defined(SYSCFG_CFGR_FMPI2C1_SCL) +/** + * @brief Enable the I2C fast mode plus driving capability. + * @rmtoll SYSCFG_CFGR FMPI2C1_SCL LL_SYSCFG_EnableFastModePlus\n + * SYSCFG_CFGR FMPI2C1_SDA LL_SYSCFG_EnableFastModePlus + * @param ConfigFastModePlus This parameter can be a combination of the following values: + * @arg @ref LL_SYSCFG_I2C_FASTMODEPLUS_SCL + * @arg @ref LL_SYSCFG_I2C_FASTMODEPLUS_SDA + * (*) value not defined in all devices + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_EnableFastModePlus(uint32_t ConfigFastModePlus) +{ + SET_BIT(SYSCFG->CFGR, ConfigFastModePlus); +} + +/** + * @brief Disable the I2C fast mode plus driving capability. + * @rmtoll SYSCFG_CFGR FMPI2C1_SCL LL_SYSCFG_DisableFastModePlus\n + * SYSCFG_CFGR FMPI2C1_SDA LL_SYSCFG_DisableFastModePlus\n + * @param ConfigFastModePlus This parameter can be a combination of the following values: + * @arg @ref LL_SYSCFG_I2C_FASTMODEPLUS_SCL + * @arg @ref LL_SYSCFG_I2C_FASTMODEPLUS_SDA + * (*) value not defined in all devices + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DisableFastModePlus(uint32_t ConfigFastModePlus) +{ + CLEAR_BIT(SYSCFG->CFGR, ConfigFastModePlus); +} +#endif /* SYSCFG_CFGR_FMPI2C1_SCL */ + +/** + * @brief Configure source input for the EXTI external interrupt. + * @rmtoll SYSCFG_EXTICR1 EXTIx LL_SYSCFG_SetEXTISource\n + * SYSCFG_EXTICR2 EXTIx LL_SYSCFG_SetEXTISource\n + * SYSCFG_EXTICR3 EXTIx LL_SYSCFG_SetEXTISource\n + * SYSCFG_EXTICR4 EXTIx LL_SYSCFG_SetEXTISource + * @param Port This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_EXTI_PORTA + * @arg @ref LL_SYSCFG_EXTI_PORTB + * @arg @ref LL_SYSCFG_EXTI_PORTC + * @arg @ref LL_SYSCFG_EXTI_PORTD + * @arg @ref LL_SYSCFG_EXTI_PORTE + * @arg @ref LL_SYSCFG_EXTI_PORTF (*) + * @arg @ref LL_SYSCFG_EXTI_PORTG (*) + * @arg @ref LL_SYSCFG_EXTI_PORTH + * + * (*) value not defined in all devices + * @param Line This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_EXTI_LINE0 + * @arg @ref LL_SYSCFG_EXTI_LINE1 + * @arg @ref LL_SYSCFG_EXTI_LINE2 + * @arg @ref LL_SYSCFG_EXTI_LINE3 + * @arg @ref LL_SYSCFG_EXTI_LINE4 + * @arg @ref LL_SYSCFG_EXTI_LINE5 + * @arg @ref LL_SYSCFG_EXTI_LINE6 + * @arg @ref LL_SYSCFG_EXTI_LINE7 + * @arg @ref LL_SYSCFG_EXTI_LINE8 + * @arg @ref LL_SYSCFG_EXTI_LINE9 + * @arg @ref LL_SYSCFG_EXTI_LINE10 + * @arg @ref LL_SYSCFG_EXTI_LINE11 + * @arg @ref LL_SYSCFG_EXTI_LINE12 + * @arg @ref LL_SYSCFG_EXTI_LINE13 + * @arg @ref LL_SYSCFG_EXTI_LINE14 + * @arg @ref LL_SYSCFG_EXTI_LINE15 + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_SetEXTISource(uint32_t Port, uint32_t Line) +{ + MODIFY_REG(SYSCFG->EXTICR[Line & 0xFF], (Line >> 16), Port << POSITION_VAL((Line >> 16))); +} + +/** + * @brief Get the configured defined for specific EXTI Line + * @rmtoll SYSCFG_EXTICR1 EXTIx LL_SYSCFG_GetEXTISource\n + * SYSCFG_EXTICR2 EXTIx LL_SYSCFG_GetEXTISource\n + * SYSCFG_EXTICR3 EXTIx LL_SYSCFG_GetEXTISource\n + * SYSCFG_EXTICR4 EXTIx LL_SYSCFG_GetEXTISource + * @param Line This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_EXTI_LINE0 + * @arg @ref LL_SYSCFG_EXTI_LINE1 + * @arg @ref LL_SYSCFG_EXTI_LINE2 + * @arg @ref LL_SYSCFG_EXTI_LINE3 + * @arg @ref LL_SYSCFG_EXTI_LINE4 + * @arg @ref LL_SYSCFG_EXTI_LINE5 + * @arg @ref LL_SYSCFG_EXTI_LINE6 + * @arg @ref LL_SYSCFG_EXTI_LINE7 + * @arg @ref LL_SYSCFG_EXTI_LINE8 + * @arg @ref LL_SYSCFG_EXTI_LINE9 + * @arg @ref LL_SYSCFG_EXTI_LINE10 + * @arg @ref LL_SYSCFG_EXTI_LINE11 + * @arg @ref LL_SYSCFG_EXTI_LINE12 + * @arg @ref LL_SYSCFG_EXTI_LINE13 + * @arg @ref LL_SYSCFG_EXTI_LINE14 + * @arg @ref LL_SYSCFG_EXTI_LINE15 + * @retval Returned value can be one of the following values: + * @arg @ref LL_SYSCFG_EXTI_PORTA + * @arg @ref LL_SYSCFG_EXTI_PORTB + * @arg @ref LL_SYSCFG_EXTI_PORTC + * @arg @ref LL_SYSCFG_EXTI_PORTD + * @arg @ref LL_SYSCFG_EXTI_PORTE + * @arg @ref LL_SYSCFG_EXTI_PORTF (*) + * @arg @ref LL_SYSCFG_EXTI_PORTG (*) + * @arg @ref LL_SYSCFG_EXTI_PORTH + * (*) value not defined in all devices + */ +__STATIC_INLINE uint32_t LL_SYSCFG_GetEXTISource(uint32_t Line) +{ + return (uint32_t)(READ_BIT(SYSCFG->EXTICR[Line & 0xFF], (Line >> 16)) >> POSITION_VAL(Line >> 16)); +} + +#if defined(SYSCFG_CFGR2_LOCKUP_LOCK) +/** + * @brief Set connections to TIM1/8 break inputs + * @rmtoll SYSCFG_CFGR2 LockUp Lock LL_SYSCFG_SetTIMBreakInputs \n + * SYSCFG_CFGR2 PVD Lock LL_SYSCFG_SetTIMBreakInputs + * @param Break This parameter can be a combination of the following values: + * @arg @ref LL_SYSCFG_TIMBREAK_LOCKUP + * @arg @ref LL_SYSCFG_TIMBREAK_PVD + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_SetTIMBreakInputs(uint32_t Break) +{ + MODIFY_REG(SYSCFG->CFGR2, SYSCFG_CFGR2_LOCKUP_LOCK | SYSCFG_CFGR2_PVD_LOCK, Break); +} + +/** + * @brief Get connections to TIM1/8 Break inputs + * @rmtoll SYSCFG_CFGR2 LockUp Lock LL_SYSCFG_SetTIMBreakInputs \n + * SYSCFG_CFGR2 PVD Lock LL_SYSCFG_SetTIMBreakInputs + * @retval Returned value can be can be a combination of the following values: + * @arg @ref LL_SYSCFG_TIMBREAK_LOCKUP + * @arg @ref LL_SYSCFG_TIMBREAK_PVD + */ +__STATIC_INLINE uint32_t LL_SYSCFG_GetTIMBreakInputs(void) +{ + return (uint32_t)(READ_BIT(SYSCFG->CFGR2, SYSCFG_CFGR2_LOCKUP_LOCK | SYSCFG_CFGR2_PVD_LOCK)); +} +#endif /* SYSCFG_CFGR2_LOCKUP_LOCK */ +#if defined(SYSCFG_MCHDLYCR_BSCKSEL) +/** + * @brief Select the DFSDM2 or TIM2_OC1 as clock source for the bitstream clock. + * @rmtoll SYSCFG_MCHDLYCR BSCKSEL LL_SYSCFG_DFSDM_SetBitstreamClockSourceSelection + * @param ClockSource This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_BITSTREAM_CLOCK_DFSDM2 + * @arg @ref LL_SYSCFG_BITSTREAM_CLOCK_TIM2OC1 + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DFSDM_SetBitstreamClockSourceSelection(uint32_t ClockSource) +{ + MODIFY_REG(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_BSCKSEL, ClockSource); +} +/** + * @brief Get the DFSDM2 or TIM2_OC1 as clock source for the bitstream clock. + * @rmtoll SYSCFG_MCHDLYCR BSCKSEL LL_SYSCFG_DFSDM_GetBitstreamClockSourceSelection + * @retval Returned value can be one of the following values: + * @arg @ref LL_SYSCFG_BITSTREAM_CLOCK_DFSDM2 + * @arg @ref LL_SYSCFG_BITSTREAM_CLOCK_TIM2OC1 + * @retval None + */ +__STATIC_INLINE uint32_t LL_SYSCFG_DFSDM_GetBitstreamClockSourceSelection(void) +{ + return (uint32_t)(READ_BIT(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_BSCKSEL)); +} +/** + * @brief Enables the DFSDM1 or DFSDM2 Delay clock + * @rmtoll SYSCFG_MCHDLYCR MCHDLYEN LL_SYSCFG_DFSDM_EnableDelayClock + * @param MCHDLY This parameter can be one of the following values + * @arg @ref LL_SYSCFG_DFSDM1_MCHDLYEN + * @arg @ref LL_SYSCFG_DFSDM2_MCHDLYEN + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DFSDM_EnableDelayClock(uint32_t MCHDLY) +{ + SET_BIT(SYSCFG->MCHDLYCR, MCHDLY); +} + +/** + * @brief Disables the DFSDM1 or the DFSDM2 Delay clock + * @rmtoll SYSCFG_MCHDLYCR MCHDLY1EN LL_SYSCFG_DFSDM1_DisableDelayClock + * @param MCHDLY This parameter can be one of the following values + * @arg @ref LL_SYSCFG_DFSDM1_MCHDLYEN + * @arg @ref LL_SYSCFG_DFSDM2_MCHDLYEN + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DFSDM_DisableDelayClock(uint32_t MCHDLY) +{ + CLEAR_BIT(SYSCFG->MCHDLYCR, MCHDLY); +} + +/** + * @brief Select the source for DFSDM1 or DFSDM2 DatIn0 + * @rmtoll SYSCFG_MCHDLYCR DFSDMD0SEL LL_SYSCFG_DFSDM_SetDataIn0Source + * @param Source This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM1_DataIn0_PAD + * @arg @ref LL_SYSCFG_DFSDM1_DataIn0_DM + * @arg @ref LL_SYSCFG_DFSDM2_DataIn0_PAD + * @arg @ref LL_SYSCFG_DFSDM2_DataIn0_DM + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DFSDM_SetDataIn0Source(uint32_t Source) +{ + MODIFY_REG(SYSCFG->MCHDLYCR, (Source >> 16), (Source & 0x0000FFFF)); +} +/** + * @brief Get the source for DFSDM1 or DFSDM2 DatIn0. + * @rmtoll SYSCFG_MCHDLYCR DFSDMD0SEL LL_SYSCFG_DFSDM_GetDataIn0Source + * @param Source This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM1_DataIn0 + * @arg @ref LL_SYSCFG_DFSDM2_DataIn0 + * @retval Returned value can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM1_DataIn0_PAD + * @arg @ref LL_SYSCFG_DFSDM1_DataIn0_DM + * @arg @ref LL_SYSCFG_DFSDM2_DataIn0_PAD + * @arg @ref LL_SYSCFG_DFSDM2_DataIn0_DM + * @retval None + */ +__STATIC_INLINE uint32_t LL_SYSCFG_DFSDM_GetDataIn0Source(uint32_t Source) +{ + return (uint32_t)(READ_BIT(SYSCFG->MCHDLYCR, Source)); +} +/** + * @brief Select the source for DFSDM1 or DFSDM2 DatIn2 + * @rmtoll SYSCFG_MCHDLYCR DFSDMD2SEL LL_SYSCFG_DFSDM_SetDataIn2Source + * @param Source This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM1_DataIn2_PAD + * @arg @ref LL_SYSCFG_DFSDM1_DataIn2_DM + * @arg @ref LL_SYSCFG_DFSDM2_DataIn2_PAD + * @arg @ref LL_SYSCFG_DFSDM2_DataIn2_DM + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DFSDM_SetDataIn2Source(uint32_t Source) +{ + MODIFY_REG(SYSCFG->MCHDLYCR, (Source >> 16), (Source & 0x0000FFFF)); +} +/** + * @brief Get the source for DFSDM1 or DFSDM2 DatIn2. + * @rmtoll SYSCFG_MCHDLYCR DFSDMD2SEL LL_SYSCFG_DFSDM_GetDataIn2Source + * @param Source This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM1_DataIn2 + * @arg @ref LL_SYSCFG_DFSDM2_DataIn2 + * @retval Returned value can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM1_DataIn2_PAD + * @arg @ref LL_SYSCFG_DFSDM1_DataIn2_DM + * @arg @ref LL_SYSCFG_DFSDM2_DataIn2_PAD + * @arg @ref LL_SYSCFG_DFSDM2_DataIn2_DM + * @retval None + */ +__STATIC_INLINE uint32_t LL_SYSCFG_DFSDM_GetDataIn2Source(uint32_t Source) +{ + return (uint32_t)(READ_BIT(SYSCFG->MCHDLYCR, Source)); +} + +/** + * @brief Select the distribution of the bitsream lock gated by TIM4 OC2 + * @rmtoll SYSCFG_MCHDLYCR DFSDM1CK02SEL LL_SYSCFG_DFSDM1_SetTIM4OC2BitStreamDistribution + * @param Source This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM1_TIM4OC2_CLKIN0 + * @arg @ref LL_SYSCFG_DFSDM1_TIM4OC2_CLKIN2 + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DFSDM1_SetTIM4OC2BitStreamDistribution(uint32_t Source) +{ + MODIFY_REG(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM1CK02SEL, Source); +} +/** + * @brief Get the distribution of the bitsream lock gated by TIM4 OC2 + * @rmtoll SYSCFG_MCHDLYCR DFSDM1D2SEL LL_SYSCFG_DFSDM1_GetTIM4OC2BitStreamDistribution + * @retval Returned value can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM1_TIM4OC2_CLKIN0 + * @arg @ref LL_SYSCFG_DFSDM1_TIM4OC2_CLKIN2 + * @retval None + */ +__STATIC_INLINE uint32_t LL_SYSCFG_DFSDM1_GetTIM4OC2BitStreamDistribution(void) +{ + return (uint32_t)(READ_BIT(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM1CK02SEL)); +} + +/** + * @brief Select the distribution of the bitsream lock gated by TIM4 OC1 + * @rmtoll SYSCFG_MCHDLYCR DFSDM1CK13SEL LL_SYSCFG_DFSDM1_SetTIM4OC1BitStreamDistribution + * @param Source This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM1_TIM4OC1_CLKIN1 + * @arg @ref LL_SYSCFG_DFSDM1_TIM4OC1_CLKIN3 + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DFSDM1_SetTIM4OC1BitStreamDistribution(uint32_t Source) +{ + MODIFY_REG(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM1CK13SEL, Source); +} +/** + * @brief Get the distribution of the bitsream lock gated by TIM4 OC1 + * @rmtoll SYSCFG_MCHDLYCR DFSDM1D2SEL LL_SYSCFG_DFSDM1_GetTIM4OC1BitStreamDistribution + * @retval Returned value can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM1_TIM4OC1_CLKIN1 + * @arg @ref LL_SYSCFG_DFSDM1_TIM4OC1_CLKIN3 + * @retval None + */ +__STATIC_INLINE uint32_t LL_SYSCFG_DFSDM1_GetTIM4OC1BitStreamDistribution(void) +{ + return (uint32_t)(READ_BIT(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM1CK13SEL)); +} + +/** + * @brief Select the DFSDM1 Clock In + * @rmtoll SYSCFG_MCHDLYCR DFSDM1CFG LL_SYSCFG_DFSDM1_SetClockInSourceSelection + * @param ClockSource This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM1_CKIN_PAD + * @arg @ref LL_SYSCFG_DFSDM1_CKIN_DM + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DFSDM1_SetClockInSourceSelection(uint32_t ClockSource) +{ + MODIFY_REG(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM1CFG, ClockSource); +} +/** + * @brief GET the DFSDM1 Clock In + * @rmtoll SYSCFG_MCHDLYCR DFSDM1CFG LL_SYSCFG_DFSDM1_GetClockInSourceSelection + * @retval Returned value can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM1_CKIN_PAD + * @arg @ref LL_SYSCFG_DFSDM1_CKIN_DM + * @retval None + */ +__STATIC_INLINE uint32_t LL_SYSCFG_DFSDM1_GetClockInSourceSelection(void) +{ + return (uint32_t)(READ_BIT(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM1CFG)); +} + +/** + * @brief Select the DFSDM1 Clock Out + * @rmtoll SYSCFG_MCHDLYCR DFSDM1CKOSEL LL_SYSCFG_DFSDM1_SetClockOutSourceSelection + * @param ClockSource This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM1_CKOUT + * @arg @ref LL_SYSCFG_DFSDM1_CKOUT_M27 + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DFSDM1_SetClockOutSourceSelection(uint32_t ClockSource) +{ + MODIFY_REG(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM1CKOSEL, ClockSource); +} +/** + * @brief GET the DFSDM1 Clock Out + * @rmtoll SYSCFG_MCHDLYCR DFSDM1CKOSEL LL_SYSCFG_DFSDM1_GetClockOutSourceSelection + * @retval Returned value can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM1_CKOUT + * @arg @ref LL_SYSCFG_DFSDM1_CKOUT_M27 + * @retval None + */ +__STATIC_INLINE uint32_t LL_SYSCFG_DFSDM1_GetClockOutSourceSelection(void) +{ + return (uint32_t)(READ_BIT(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM1CKOSEL)); +} + +/** + * @brief Enables the DFSDM2 Delay clock + * @rmtoll SYSCFG_MCHDLYCR MCHDLY2EN LL_SYSCFG_DFSDM2_EnableDelayClock + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DFSDM2_EnableDelayClock(void) +{ + SET_BIT(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_MCHDLY2EN); +} + +/** + * @brief Disables the DFSDM2 Delay clock + * @rmtoll SYSCFG_MCHDLYCR MCHDLY2EN LL_SYSCFG_DFSDM2_DisableDelayClock + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DFSDM2_DisableDelayClock(void) +{ + CLEAR_BIT(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_MCHDLY2EN); +} +/** + * @brief Select the source for DFSDM2 DatIn0 + * @rmtoll SYSCFG_MCHDLYCR DFSDM2D0SEL LL_SYSCFG_DFSDM2_SetDataIn0Source + * @param Source This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM2_DataIn0_PAD + * @arg @ref LL_SYSCFG_DFSDM2_DataIn0_DM + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DFSDM2_SetDataIn0Source(uint32_t Source) +{ + MODIFY_REG(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM2D0SEL, Source); +} +/** + * @brief Get the source for DFSDM2 DatIn0. + * @rmtoll SYSCFG_MCHDLYCR DFSDM2D0SEL LL_SYSCFG_DFSDM2_GetDataIn0Source + * @retval Returned value can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM2_DataIn0_PAD + * @arg @ref LL_SYSCFG_DFSDM2_DataIn0_DM + * @retval None + */ +__STATIC_INLINE uint32_t LL_SYSCFG_DFSDM2_GetDataIn0Source(void) +{ + return (uint32_t)(READ_BIT(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM2D0SEL)); +} + +/** + * @brief Select the source for DFSDM2 DatIn2 + * @rmtoll SYSCFG_MCHDLYCR DFSDM2D2SEL LL_SYSCFG_DFSDM2_SetDataIn2Source + * @param Source This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM2_DataIn2_PAD + * @arg @ref LL_SYSCFG_DFSDM2_DataIn2_DM + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DFSDM2_SetDataIn2Source(uint32_t Source) +{ + MODIFY_REG(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM2D2SEL, Source); +} +/** + * @brief Get the source for DFSDM2 DatIn2. + * @rmtoll SYSCFG_MCHDLYCR DFSDM2D2SEL LL_SYSCFG_DFSDM2_GetDataIn2Source + * @retval Returned value can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM2_DataIn2_PAD + * @arg @ref LL_SYSCFG_DFSDM2_DataIn2_DM + * @retval None + */ +__STATIC_INLINE uint32_t LL_SYSCFG_DFSDM2_GetDataIn2Source(void) +{ + return (uint32_t)(READ_BIT(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM2D2SEL)); +} + +/** + * @brief Select the source for DFSDM2 DatIn4 + * @rmtoll SYSCFG_MCHDLYCR DFSDM2D4SEL LL_SYSCFG_DFSDM2_SetDataIn4Source + * @param Source This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM2_DataIn4_PAD + * @arg @ref LL_SYSCFG_DFSDM2_DataIn4_DM + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DFSDM2_SetDataIn4Source(uint32_t Source) +{ + MODIFY_REG(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM2D4SEL, Source); +} +/** + * @brief Get the source for DFSDM2 DatIn4. + * @rmtoll SYSCFG_MCHDLYCR DFSDM2D4SEL LL_SYSCFG_DFSDM2_GetDataIn4Source + * @retval Returned value can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM2_DataIn4_PAD + * @arg @ref LL_SYSCFG_DFSDM2_DataIn4_DM + * @retval None + */ +__STATIC_INLINE uint32_t LL_SYSCFG_DFSDM2_GetDataIn4Source(void) +{ + return (uint32_t)(READ_BIT(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM2D4SEL)); +} + +/** + * @brief Select the source for DFSDM2 DatIn6 + * @rmtoll SYSCFG_MCHDLYCR DFSDM2D6SEL LL_SYSCFG_DFSDM2_SetDataIn6Source + * @param Source This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM2_DataIn6_PAD + * @arg @ref LL_SYSCFG_DFSDM2_DataIn6_DM + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DFSDM2_SetDataIn6Source(uint32_t Source) +{ + MODIFY_REG(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM2D6SEL, Source); +} +/** + * @brief Get the source for DFSDM2 DatIn6. + * @rmtoll SYSCFG_MCHDLYCR DFSDM2D6SEL LL_SYSCFG_DFSDM2_GetDataIn6Source + * @retval Returned value can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM2_DataIn6_PAD + * @arg @ref LL_SYSCFG_DFSDM2_DataIn6_DM + * @retval None + */ +__STATIC_INLINE uint32_t LL_SYSCFG_DFSDM2_GetDataIn6Source(void) +{ + return (uint32_t)(READ_BIT(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM2D6SEL)); +} + +/** + * @brief Select the distribution of the bitsream lock gated by TIM3 OC4 + * @rmtoll SYSCFG_MCHDLYCR DFSDM2CK04SEL LL_SYSCFG_DFSDM2_SetTIM3OC4BitStreamDistribution + * @param Source This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM2_TIM3OC4_CLKIN0 + * @arg @ref LL_SYSCFG_DFSDM2_TIM3OC4_CLKIN4 + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DFSDM2_SetTIM3OC4BitStreamDistribution(uint32_t Source) +{ + MODIFY_REG(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM2CK04SEL, Source); +} +/** + * @brief Get the distribution of the bitsream lock gated by TIM3 OC4 + * @rmtoll SYSCFG_MCHDLYCR DFSDM2CK04SEL LL_SYSCFG_DFSDM2_GetTIM3OC4BitStreamDistribution + * @retval Returned value can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM2_TIM3OC4_CLKIN0 + * @arg @ref LL_SYSCFG_DFSDM2_TIM3OC4_CLKIN4 + * @retval None + */ +__STATIC_INLINE uint32_t LL_SYSCFG_DFSDM2_GetTIM3OC4BitStreamDistribution(void) +{ + return (uint32_t)(READ_BIT(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM2CK04SEL)); +} + +/** + * @brief Select the distribution of the bitsream lock gated by TIM3 OC3 + * @rmtoll SYSCFG_MCHDLYCR DFSDM2CK15SEL LL_SYSCFG_DFSDM2_SetTIM3OC3BitStreamDistribution + * @param Source This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM2_TIM3OC3_CLKIN1 + * @arg @ref LL_SYSCFG_DFSDM2_TIM3OC3_CLKIN5 + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DFSDM2_SetTIM3OC3BitStreamDistribution(uint32_t Source) +{ + MODIFY_REG(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM2CK15SEL, Source); +} +/** + * @brief Get the distribution of the bitsream lock gated by TIM3 OC4 + * @rmtoll SYSCFG_MCHDLYCR DFSDM2CK04SEL LL_SYSCFG_DFSDM2_GetTIM3OC3BitStreamDistribution + * @retval Returned value can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM2_TIM3OC3_CLKIN1 + * @arg @ref LL_SYSCFG_DFSDM2_TIM3OC3_CLKIN5 + * @retval None + */ +__STATIC_INLINE uint32_t LL_SYSCFG_DFSDM2_GetTIM3OC3BitStreamDistribution(void) +{ + return (uint32_t)(READ_BIT(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM2CK15SEL)); +} + +/** + * @brief Select the distribution of the bitsream lock gated by TIM3 OC2 + * @rmtoll SYSCFG_MCHDLYCR DFSDM2CK26SEL LL_SYSCFG_DFSDM2_SetTIM3OC2BitStreamDistribution + * @param Source This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM2_TIM3OC2_CLKIN2 + * @arg @ref LL_SYSCFG_DFSDM2_TIM3OC2_CLKIN6 + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DFSDM2_SetTIM3OC2BitStreamDistribution(uint32_t Source) +{ + MODIFY_REG(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM2CK26SEL, Source); +} +/** + * @brief Get the distribution of the bitsream lock gated by TIM3 OC2 + * @rmtoll SYSCFG_MCHDLYCR DFSDM2CK04SEL LL_SYSCFG_DFSDM2_GetTIM3OC2BitStreamDistribution + * @retval Returned value can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM2_TIM3OC2_CLKIN2 + * @arg @ref LL_SYSCFG_DFSDM2_TIM3OC2_CLKIN6 + * @retval None + */ +__STATIC_INLINE uint32_t LL_SYSCFG_DFSDM2_GetTIM3OC2BitStreamDistribution(void) +{ + return (uint32_t)(READ_BIT(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM2CK26SEL)); +} + +/** + * @brief Select the distribution of the bitsream lock gated by TIM3 OC1 + * @rmtoll SYSCFG_MCHDLYCR DFSDM2CK37SEL LL_SYSCFG_DFSDM2_SetTIM3OC1BitStreamDistribution + * @param Source This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM2_TIM3OC1_CLKIN3 + * @arg @ref LL_SYSCFG_DFSDM2_TIM3OC1_CLKIN7 + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DFSDM2_SetTIM3OC1BitStreamDistribution(uint32_t Source) +{ + MODIFY_REG(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM2CK37SEL, Source); +} +/** + * @brief Get the distribution of the bitsream lock gated by TIM3 OC1 + * @rmtoll SYSCFG_MCHDLYCR DFSDM2CK37SEL LL_SYSCFG_DFSDM2_GetTIM3OC1BitStreamDistribution + * @retval Returned value can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM2_TIM3OC1_CLKIN3 + * @arg @ref LL_SYSCFG_DFSDM2_TIM3OC1_CLKIN7 + * @retval None + */ +__STATIC_INLINE uint32_t LL_SYSCFG_DFSDM2_GetTIM3OC1BitStreamDistribution(void) +{ + return (uint32_t)(READ_BIT(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM2CK37SEL)); +} + +/** + * @brief Select the DFSDM2 Clock In + * @rmtoll SYSCFG_MCHDLYCR DFSDM2CFG LL_SYSCFG_DFSDM2_SetClockInSourceSelection + * @param ClockSource This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM2_CKIN_PAD + * @arg @ref LL_SYSCFG_DFSDM2_CKIN_DM + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DFSDM2_SetClockInSourceSelection(uint32_t ClockSource) +{ + MODIFY_REG(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM2CFG, ClockSource); +} +/** + * @brief GET the DFSDM2 Clock In + * @rmtoll SYSCFG_MCHDLYCR DFSDM2CFG LL_SYSCFG_DFSDM2_GetClockInSourceSelection + * @retval Returned value can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM2_CKIN_PAD + * @arg @ref LL_SYSCFG_DFSDM2_CKIN_DM + * @retval None + */ +__STATIC_INLINE uint32_t LL_SYSCFG_DFSDM2_GetClockInSourceSelection(void) +{ + return (uint32_t)(READ_BIT(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM2CFG)); +} + +/** + * @brief Select the DFSDM2 Clock Out + * @rmtoll SYSCFG_MCHDLYCR DFSDM2CKOSEL LL_SYSCFG_DFSDM2_SetClockOutSourceSelection + * @param ClockSource This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM2_CKOUT + * @arg @ref LL_SYSCFG_DFSDM2_CKOUT_M27 + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DFSDM2_SetClockOutSourceSelection(uint32_t ClockSource) +{ + MODIFY_REG(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM2CKOSEL, ClockSource); +} +/** + * @brief GET the DFSDM2 Clock Out + * @rmtoll SYSCFG_MCHDLYCR DFSDM2CKOSEL LL_SYSCFG_DFSDM2_GetClockOutSourceSelection + * @retval Returned value can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM2_CKOUT + * @arg @ref LL_SYSCFG_DFSDM2_CKOUT_M27 + * @retval None + */ +__STATIC_INLINE uint32_t LL_SYSCFG_DFSDM2_GetClockOutSourceSelection(void) +{ + return (uint32_t)(READ_BIT(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM2CKOSEL)); +} + +#endif /* SYSCFG_MCHDLYCR_BSCKSEL */ +/** + * @} + */ + + +/** @defgroup SYSTEM_LL_EF_DBGMCU DBGMCU + * @{ + */ + +/** + * @brief Return the device identifier + * @note For STM32F405/407xx and STM32F415/417xx devices, the device ID is 0x413 + * @note For STM32F42xxx and STM32F43xxx devices, the device ID is 0x419 + * @note For STM32F401xx devices, the device ID is 0x423 + * @note For STM32F401xx devices, the device ID is 0x433 + * @note For STM32F411xx devices, the device ID is 0x431 + * @note For STM32F410xx devices, the device ID is 0x458 + * @note For STM32F412xx devices, the device ID is 0x441 + * @note For STM32F413xx and STM32423xx devices, the device ID is 0x463 + * @note For STM32F446xx devices, the device ID is 0x421 + * @note For STM32F469xx and STM32F479xx devices, the device ID is 0x434 + * @rmtoll DBGMCU_IDCODE DEV_ID LL_DBGMCU_GetDeviceID + * @retval Values between Min_Data=0x00 and Max_Data=0xFFF + */ +__STATIC_INLINE uint32_t LL_DBGMCU_GetDeviceID(void) +{ + return (uint32_t)(READ_BIT(DBGMCU->IDCODE, DBGMCU_IDCODE_DEV_ID)); +} + +/** + * @brief Return the device revision identifier + * @note This field indicates the revision of the device. + For example, it is read as RevA -> 0x1000, Cat 2 revZ -> 0x1001, rev1 -> 0x1003, rev2 ->0x1007, revY -> 0x100F for STM32F405/407xx and STM32F415/417xx devices + For example, it is read as RevA -> 0x1000, Cat 2 revY -> 0x1003, rev1 -> 0x1007, rev3 ->0x2001 for STM32F42xxx and STM32F43xxx devices + For example, it is read as RevZ -> 0x1000, Cat 2 revA -> 0x1001 for STM32F401xB/C devices + For example, it is read as RevA -> 0x1000, Cat 2 revZ -> 0x1001 for STM32F401xD/E devices + For example, it is read as RevA -> 0x1000 for STM32F411xx,STM32F413/423xx,STM32F469/423xx, STM32F446xx and STM32F410xx devices + For example, it is read as RevZ -> 0x1001, Cat 2 revB -> 0x2000, revC -> 0x3000 for STM32F412xx devices + * @rmtoll DBGMCU_IDCODE REV_ID LL_DBGMCU_GetRevisionID + * @retval Values between Min_Data=0x00 and Max_Data=0xFFFF + */ +__STATIC_INLINE uint32_t LL_DBGMCU_GetRevisionID(void) +{ + return (uint32_t)(READ_BIT(DBGMCU->IDCODE, DBGMCU_IDCODE_REV_ID) >> DBGMCU_IDCODE_REV_ID_Pos); +} + +/** + * @brief Enable the Debug Module during SLEEP mode + * @rmtoll DBGMCU_CR DBG_SLEEP LL_DBGMCU_EnableDBGSleepMode + * @retval None + */ +__STATIC_INLINE void LL_DBGMCU_EnableDBGSleepMode(void) +{ + SET_BIT(DBGMCU->CR, DBGMCU_CR_DBG_SLEEP); +} + +/** + * @brief Disable the Debug Module during SLEEP mode + * @rmtoll DBGMCU_CR DBG_SLEEP LL_DBGMCU_DisableDBGSleepMode + * @retval None + */ +__STATIC_INLINE void LL_DBGMCU_DisableDBGSleepMode(void) +{ + CLEAR_BIT(DBGMCU->CR, DBGMCU_CR_DBG_SLEEP); +} + +/** + * @brief Enable the Debug Module during STOP mode + * @rmtoll DBGMCU_CR DBG_STOP LL_DBGMCU_EnableDBGStopMode + * @retval None + */ +__STATIC_INLINE void LL_DBGMCU_EnableDBGStopMode(void) +{ + SET_BIT(DBGMCU->CR, DBGMCU_CR_DBG_STOP); +} + +/** + * @brief Disable the Debug Module during STOP mode + * @rmtoll DBGMCU_CR DBG_STOP LL_DBGMCU_DisableDBGStopMode + * @retval None + */ +__STATIC_INLINE void LL_DBGMCU_DisableDBGStopMode(void) +{ + CLEAR_BIT(DBGMCU->CR, DBGMCU_CR_DBG_STOP); +} + +/** + * @brief Enable the Debug Module during STANDBY mode + * @rmtoll DBGMCU_CR DBG_STANDBY LL_DBGMCU_EnableDBGStandbyMode + * @retval None + */ +__STATIC_INLINE void LL_DBGMCU_EnableDBGStandbyMode(void) +{ + SET_BIT(DBGMCU->CR, DBGMCU_CR_DBG_STANDBY); +} + +/** + * @brief Disable the Debug Module during STANDBY mode + * @rmtoll DBGMCU_CR DBG_STANDBY LL_DBGMCU_DisableDBGStandbyMode + * @retval None + */ +__STATIC_INLINE void LL_DBGMCU_DisableDBGStandbyMode(void) +{ + CLEAR_BIT(DBGMCU->CR, DBGMCU_CR_DBG_STANDBY); +} + +/** + * @brief Set Trace pin assignment control + * @rmtoll DBGMCU_CR TRACE_IOEN LL_DBGMCU_SetTracePinAssignment\n + * DBGMCU_CR TRACE_MODE LL_DBGMCU_SetTracePinAssignment + * @param PinAssignment This parameter can be one of the following values: + * @arg @ref LL_DBGMCU_TRACE_NONE + * @arg @ref LL_DBGMCU_TRACE_ASYNCH + * @arg @ref LL_DBGMCU_TRACE_SYNCH_SIZE1 + * @arg @ref LL_DBGMCU_TRACE_SYNCH_SIZE2 + * @arg @ref LL_DBGMCU_TRACE_SYNCH_SIZE4 + * @retval None + */ +__STATIC_INLINE void LL_DBGMCU_SetTracePinAssignment(uint32_t PinAssignment) +{ + MODIFY_REG(DBGMCU->CR, DBGMCU_CR_TRACE_IOEN | DBGMCU_CR_TRACE_MODE, PinAssignment); +} + +/** + * @brief Get Trace pin assignment control + * @rmtoll DBGMCU_CR TRACE_IOEN LL_DBGMCU_GetTracePinAssignment\n + * DBGMCU_CR TRACE_MODE LL_DBGMCU_GetTracePinAssignment + * @retval Returned value can be one of the following values: + * @arg @ref LL_DBGMCU_TRACE_NONE + * @arg @ref LL_DBGMCU_TRACE_ASYNCH + * @arg @ref LL_DBGMCU_TRACE_SYNCH_SIZE1 + * @arg @ref LL_DBGMCU_TRACE_SYNCH_SIZE2 + * @arg @ref LL_DBGMCU_TRACE_SYNCH_SIZE4 + */ +__STATIC_INLINE uint32_t LL_DBGMCU_GetTracePinAssignment(void) +{ + return (uint32_t)(READ_BIT(DBGMCU->CR, DBGMCU_CR_TRACE_IOEN | DBGMCU_CR_TRACE_MODE)); +} + +/** + * @brief Freeze APB1 peripherals (group1 peripherals) + * @rmtoll DBGMCU_APB1_FZ DBG_TIM2_STOP LL_DBGMCU_APB1_GRP1_FreezePeriph\n + * DBGMCU_APB1_FZ DBG_TIM3_STOP LL_DBGMCU_APB1_GRP1_FreezePeriph\n + * DBGMCU_APB1_FZ DBG_TIM4_STOP LL_DBGMCU_APB1_GRP1_FreezePeriph\n + * DBGMCU_APB1_FZ DBG_TIM5_STOP LL_DBGMCU_APB1_GRP1_FreezePeriph\n + * DBGMCU_APB1_FZ DBG_TIM6_STOP LL_DBGMCU_APB1_GRP1_FreezePeriph\n + * DBGMCU_APB1_FZ DBG_TIM7_STOP LL_DBGMCU_APB1_GRP1_FreezePeriph\n + * DBGMCU_APB1_FZ DBG_TIM12_STOP LL_DBGMCU_APB1_GRP1_FreezePeriph\n + * DBGMCU_APB1_FZ DBG_TIM13_STOP LL_DBGMCU_APB1_GRP1_FreezePeriph\n + * DBGMCU_APB1_FZ DBG_TIM14_STOP LL_DBGMCU_APB1_GRP1_FreezePeriph\n + * DBGMCU_APB1_FZ DBG_LPTIM_STOP LL_DBGMCU_APB1_GRP1_FreezePeriph\n + * DBGMCU_APB1_FZ DBG_RTC_STOP LL_DBGMCU_APB1_GRP1_FreezePeriph\n + * DBGMCU_APB1_FZ DBG_WWDG_STOP LL_DBGMCU_APB1_GRP1_FreezePeriph\n + * DBGMCU_APB1_FZ DBG_IWDG_STOP LL_DBGMCU_APB1_GRP1_FreezePeriph\n + * DBGMCU_APB1_FZ DBG_I2C1_SMBUS_TIMEOUT LL_DBGMCU_APB1_GRP1_FreezePeriph\n + * DBGMCU_APB1_FZ DBG_I2C2_SMBUS_TIMEOUT LL_DBGMCU_APB1_GRP1_FreezePeriph\n + * DBGMCU_APB1_FZ DBG_I2C3_SMBUS_TIMEOUT LL_DBGMCU_APB1_GRP1_FreezePeriph\n + * DBGMCU_APB1_FZ DBG_I2C4_SMBUS_TIMEOUT LL_DBGMCU_APB1_GRP1_FreezePeriph\n + * DBGMCU_APB1_FZ DBG_CAN1_STOP LL_DBGMCU_APB1_GRP1_FreezePeriph\n + * DBGMCU_APB1_FZ DBG_CAN2_STOP LL_DBGMCU_APB1_GRP1_FreezePeriph\n + * DBGMCU_APB1_FZ DBG_CAN3_STOP LL_DBGMCU_APB1_GRP1_FreezePeriph + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_DBGMCU_APB1_GRP1_TIM2_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_TIM3_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_TIM4_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_TIM5_STOP + * @arg @ref LL_DBGMCU_APB1_GRP1_TIM6_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_TIM7_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_TIM12_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_TIM13_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_TIM14_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_LPTIM_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_RTC_STOP + * @arg @ref LL_DBGMCU_APB1_GRP1_WWDG_STOP + * @arg @ref LL_DBGMCU_APB1_GRP1_IWDG_STOP + * @arg @ref LL_DBGMCU_APB1_GRP1_I2C1_STOP + * @arg @ref LL_DBGMCU_APB1_GRP1_I2C2_STOP + * @arg @ref LL_DBGMCU_APB1_GRP1_I2C3_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_I2C4_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_CAN1_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_CAN2_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_CAN3_STOP (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_DBGMCU_APB1_GRP1_FreezePeriph(uint32_t Periphs) +{ + SET_BIT(DBGMCU->APB1FZ, Periphs); +} + +/** + * @brief Unfreeze APB1 peripherals (group1 peripherals) + * @rmtoll DBGMCU_APB1_FZ DBG_TIM2_STOP LL_DBGMCU_APB1_GRP1_UnFreezePeriph\n + * DBGMCU_APB1_FZ DBG_TIM3_STOP LL_DBGMCU_APB1_GRP1_UnFreezePeriph\n + * DBGMCU_APB1_FZ DBG_TIM4_STOP LL_DBGMCU_APB1_GRP1_UnFreezePeriph\n + * DBGMCU_APB1_FZ DBG_TIM5_STOP LL_DBGMCU_APB1_GRP1_UnFreezePeriph\n + * DBGMCU_APB1_FZ DBG_TIM6_STOP LL_DBGMCU_APB1_GRP1_UnFreezePeriph\n + * DBGMCU_APB1_FZ DBG_TIM7_STOP LL_DBGMCU_APB1_GRP1_UnFreezePeriph\n + * DBGMCU_APB1_FZ DBG_TIM12_STOP LL_DBGMCU_APB1_GRP1_UnFreezePeriph\n + * DBGMCU_APB1_FZ DBG_TIM13_STOP LL_DBGMCU_APB1_GRP1_UnFreezePeriph\n + * DBGMCU_APB1_FZ DBG_TIM14_STOP LL_DBGMCU_APB1_GRP1_UnFreezePeriph\n + * DBGMCU_APB1_FZ DBG_LPTIM_STOP LL_DBGMCU_APB1_GRP1_UnFreezePeriph\n + * DBGMCU_APB1_FZ DBG_RTC_STOP LL_DBGMCU_APB1_GRP1_UnFreezePeriph\n + * DBGMCU_APB1_FZ DBG_WWDG_STOP LL_DBGMCU_APB1_GRP1_UnFreezePeriph\n + * DBGMCU_APB1_FZ DBG_IWDG_STOP LL_DBGMCU_APB1_GRP1_UnFreezePeriph\n + * DBGMCU_APB1_FZ DBG_I2C1_SMBUS_TIMEOUT LL_DBGMCU_APB1_GRP1_UnFreezePeriph\n + * DBGMCU_APB1_FZ DBG_I2C2_SMBUS_TIMEOUT LL_DBGMCU_APB1_GRP1_UnFreezePeriph\n + * DBGMCU_APB1_FZ DBG_I2C3_SMBUS_TIMEOUT LL_DBGMCU_APB1_GRP1_UnFreezePeriph\n + * DBGMCU_APB1_FZ DBG_I2C4_SMBUS_TIMEOUT LL_DBGMCU_APB1_GRP1_UnFreezePeriph\n + * DBGMCU_APB1_FZ DBG_CAN1_STOP LL_DBGMCU_APB1_GRP1_UnFreezePeriph\n + * DBGMCU_APB1_FZ DBG_CAN2_STOP LL_DBGMCU_APB1_GRP1_UnFreezePeriph\n + * DBGMCU_APB1_FZ DBG_CAN3_STOP LL_DBGMCU_APB1_GRP1_UnFreezePeriph + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_DBGMCU_APB1_GRP1_TIM2_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_TIM3_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_TIM4_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_TIM5_STOP + * @arg @ref LL_DBGMCU_APB1_GRP1_TIM6_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_TIM7_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_TIM12_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_TIM13_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_TIM14_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_LPTIM_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_RTC_STOP + * @arg @ref LL_DBGMCU_APB1_GRP1_WWDG_STOP + * @arg @ref LL_DBGMCU_APB1_GRP1_IWDG_STOP + * @arg @ref LL_DBGMCU_APB1_GRP1_I2C1_STOP + * @arg @ref LL_DBGMCU_APB1_GRP1_I2C2_STOP + * @arg @ref LL_DBGMCU_APB1_GRP1_I2C3_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_I2C4_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_CAN1_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_CAN2_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_CAN3_STOP (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_DBGMCU_APB1_GRP1_UnFreezePeriph(uint32_t Periphs) +{ + CLEAR_BIT(DBGMCU->APB1FZ, Periphs); +} + +/** + * @brief Freeze APB2 peripherals + * @rmtoll DBGMCU_APB2_FZ DBG_TIM1_STOP LL_DBGMCU_APB2_GRP1_FreezePeriph\n + * DBGMCU_APB2_FZ DBG_TIM8_STOP LL_DBGMCU_APB2_GRP1_FreezePeriph\n + * DBGMCU_APB2_FZ DBG_TIM9_STOP LL_DBGMCU_APB2_GRP1_FreezePeriph\n + * DBGMCU_APB2_FZ DBG_TIM10_STOP LL_DBGMCU_APB2_GRP1_FreezePeriph\n + * DBGMCU_APB2_FZ DBG_TIM11_STOP LL_DBGMCU_APB2_GRP1_FreezePeriph + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_DBGMCU_APB2_GRP1_TIM1_STOP + * @arg @ref LL_DBGMCU_APB2_GRP1_TIM8_STOP (*) + * @arg @ref LL_DBGMCU_APB2_GRP1_TIM9_STOP (*) + * @arg @ref LL_DBGMCU_APB2_GRP1_TIM10_STOP (*) + * @arg @ref LL_DBGMCU_APB2_GRP1_TIM11_STOP (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_DBGMCU_APB2_GRP1_FreezePeriph(uint32_t Periphs) +{ + SET_BIT(DBGMCU->APB2FZ, Periphs); +} + +/** + * @brief Unfreeze APB2 peripherals + * @rmtoll DBGMCU_APB2_FZ DBG_TIM1_STOP LL_DBGMCU_APB2_GRP1_UnFreezePeriph\n + * DBGMCU_APB2_FZ DBG_TIM8_STOP LL_DBGMCU_APB2_GRP1_UnFreezePeriph\n + * DBGMCU_APB2_FZ DBG_TIM9_STOP LL_DBGMCU_APB2_GRP1_UnFreezePeriph\n + * DBGMCU_APB2_FZ DBG_TIM10_STOP LL_DBGMCU_APB2_GRP1_UnFreezePeriph\n + * DBGMCU_APB2_FZ DBG_TIM11_STOP LL_DBGMCU_APB2_GRP1_UnFreezePeriph + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_DBGMCU_APB2_GRP1_TIM1_STOP + * @arg @ref LL_DBGMCU_APB2_GRP1_TIM8_STOP (*) + * @arg @ref LL_DBGMCU_APB2_GRP1_TIM9_STOP (*) + * @arg @ref LL_DBGMCU_APB2_GRP1_TIM10_STOP (*) + * @arg @ref LL_DBGMCU_APB2_GRP1_TIM11_STOP (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_DBGMCU_APB2_GRP1_UnFreezePeriph(uint32_t Periphs) +{ + CLEAR_BIT(DBGMCU->APB2FZ, Periphs); +} +/** + * @} + */ + +/** @defgroup SYSTEM_LL_EF_FLASH FLASH + * @{ + */ + +/** + * @brief Set FLASH Latency + * @rmtoll FLASH_ACR LATENCY LL_FLASH_SetLatency + * @param Latency This parameter can be one of the following values: + * @arg @ref LL_FLASH_LATENCY_0 + * @arg @ref LL_FLASH_LATENCY_1 + * @arg @ref LL_FLASH_LATENCY_2 + * @arg @ref LL_FLASH_LATENCY_3 + * @arg @ref LL_FLASH_LATENCY_4 + * @arg @ref LL_FLASH_LATENCY_5 + * @arg @ref LL_FLASH_LATENCY_6 + * @arg @ref LL_FLASH_LATENCY_7 + * @arg @ref LL_FLASH_LATENCY_8 + * @arg @ref LL_FLASH_LATENCY_9 + * @arg @ref LL_FLASH_LATENCY_10 + * @arg @ref LL_FLASH_LATENCY_11 + * @arg @ref LL_FLASH_LATENCY_12 + * @arg @ref LL_FLASH_LATENCY_13 + * @arg @ref LL_FLASH_LATENCY_14 + * @arg @ref LL_FLASH_LATENCY_15 + * @retval None + */ +__STATIC_INLINE void LL_FLASH_SetLatency(uint32_t Latency) +{ + MODIFY_REG(FLASH->ACR, FLASH_ACR_LATENCY, Latency); +} + +/** + * @brief Get FLASH Latency + * @rmtoll FLASH_ACR LATENCY LL_FLASH_GetLatency + * @retval Returned value can be one of the following values: + * @arg @ref LL_FLASH_LATENCY_0 + * @arg @ref LL_FLASH_LATENCY_1 + * @arg @ref LL_FLASH_LATENCY_2 + * @arg @ref LL_FLASH_LATENCY_3 + * @arg @ref LL_FLASH_LATENCY_4 + * @arg @ref LL_FLASH_LATENCY_5 + * @arg @ref LL_FLASH_LATENCY_6 + * @arg @ref LL_FLASH_LATENCY_7 + * @arg @ref LL_FLASH_LATENCY_8 + * @arg @ref LL_FLASH_LATENCY_9 + * @arg @ref LL_FLASH_LATENCY_10 + * @arg @ref LL_FLASH_LATENCY_11 + * @arg @ref LL_FLASH_LATENCY_12 + * @arg @ref LL_FLASH_LATENCY_13 + * @arg @ref LL_FLASH_LATENCY_14 + * @arg @ref LL_FLASH_LATENCY_15 + */ +__STATIC_INLINE uint32_t LL_FLASH_GetLatency(void) +{ + return (uint32_t)(READ_BIT(FLASH->ACR, FLASH_ACR_LATENCY)); +} + +/** + * @brief Enable Prefetch + * @rmtoll FLASH_ACR PRFTEN LL_FLASH_EnablePrefetch + * @retval None + */ +__STATIC_INLINE void LL_FLASH_EnablePrefetch(void) +{ + SET_BIT(FLASH->ACR, FLASH_ACR_PRFTEN); +} + +/** + * @brief Disable Prefetch + * @rmtoll FLASH_ACR PRFTEN LL_FLASH_DisablePrefetch + * @retval None + */ +__STATIC_INLINE void LL_FLASH_DisablePrefetch(void) +{ + CLEAR_BIT(FLASH->ACR, FLASH_ACR_PRFTEN); +} + +/** + * @brief Check if Prefetch buffer is enabled + * @rmtoll FLASH_ACR PRFTEN LL_FLASH_IsPrefetchEnabled + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_FLASH_IsPrefetchEnabled(void) +{ + return (READ_BIT(FLASH->ACR, FLASH_ACR_PRFTEN) == (FLASH_ACR_PRFTEN)); +} + +/** + * @brief Enable Instruction cache + * @rmtoll FLASH_ACR ICEN LL_FLASH_EnableInstCache + * @retval None + */ +__STATIC_INLINE void LL_FLASH_EnableInstCache(void) +{ + SET_BIT(FLASH->ACR, FLASH_ACR_ICEN); +} + +/** + * @brief Disable Instruction cache + * @rmtoll FLASH_ACR ICEN LL_FLASH_DisableInstCache + * @retval None + */ +__STATIC_INLINE void LL_FLASH_DisableInstCache(void) +{ + CLEAR_BIT(FLASH->ACR, FLASH_ACR_ICEN); +} + +/** + * @brief Enable Data cache + * @rmtoll FLASH_ACR DCEN LL_FLASH_EnableDataCache + * @retval None + */ +__STATIC_INLINE void LL_FLASH_EnableDataCache(void) +{ + SET_BIT(FLASH->ACR, FLASH_ACR_DCEN); +} + +/** + * @brief Disable Data cache + * @rmtoll FLASH_ACR DCEN LL_FLASH_DisableDataCache + * @retval None + */ +__STATIC_INLINE void LL_FLASH_DisableDataCache(void) +{ + CLEAR_BIT(FLASH->ACR, FLASH_ACR_DCEN); +} + +/** + * @brief Enable Instruction cache reset + * @note bit can be written only when the instruction cache is disabled + * @rmtoll FLASH_ACR ICRST LL_FLASH_EnableInstCacheReset + * @retval None + */ +__STATIC_INLINE void LL_FLASH_EnableInstCacheReset(void) +{ + SET_BIT(FLASH->ACR, FLASH_ACR_ICRST); +} + +/** + * @brief Disable Instruction cache reset + * @rmtoll FLASH_ACR ICRST LL_FLASH_DisableInstCacheReset + * @retval None + */ +__STATIC_INLINE void LL_FLASH_DisableInstCacheReset(void) +{ + CLEAR_BIT(FLASH->ACR, FLASH_ACR_ICRST); +} + +/** + * @brief Enable Data cache reset + * @note bit can be written only when the data cache is disabled + * @rmtoll FLASH_ACR DCRST LL_FLASH_EnableDataCacheReset + * @retval None + */ +__STATIC_INLINE void LL_FLASH_EnableDataCacheReset(void) +{ + SET_BIT(FLASH->ACR, FLASH_ACR_DCRST); +} + +/** + * @brief Disable Data cache reset + * @rmtoll FLASH_ACR DCRST LL_FLASH_DisableDataCacheReset + * @retval None + */ +__STATIC_INLINE void LL_FLASH_DisableDataCacheReset(void) +{ + CLEAR_BIT(FLASH->ACR, FLASH_ACR_DCRST); +} + + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#endif /* defined (FLASH) || defined (SYSCFG) || defined (DBGMCU) */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F4xx_LL_SYSTEM_H */ + + diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_tim.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_tim.h new file mode 100644 index 0000000..a11f561 --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_tim.h @@ -0,0 +1,4096 @@ +/** + ****************************************************************************** + * @file stm32f4xx_ll_tim.h + * @author MCD Application Team + * @brief Header file of TIM LL module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F4xx_LL_TIM_H +#define __STM32F4xx_LL_TIM_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx.h" + +/** @addtogroup STM32F4xx_LL_Driver + * @{ + */ + +#if defined (TIM1) || defined (TIM2) || defined (TIM3) || defined (TIM4) || defined (TIM5) || defined (TIM6) || defined (TIM7) || defined (TIM8) || defined (TIM9) || defined (TIM10) || defined (TIM11) || defined (TIM12) || defined (TIM13) || defined (TIM14) + +/** @defgroup TIM_LL TIM + * @{ + */ + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/** @defgroup TIM_LL_Private_Variables TIM Private Variables + * @{ + */ +static const uint8_t OFFSET_TAB_CCMRx[] = +{ + 0x00U, /* 0: TIMx_CH1 */ + 0x00U, /* 1: TIMx_CH1N */ + 0x00U, /* 2: TIMx_CH2 */ + 0x00U, /* 3: TIMx_CH2N */ + 0x04U, /* 4: TIMx_CH3 */ + 0x04U, /* 5: TIMx_CH3N */ + 0x04U /* 6: TIMx_CH4 */ +}; + +static const uint8_t SHIFT_TAB_OCxx[] = +{ + 0U, /* 0: OC1M, OC1FE, OC1PE */ + 0U, /* 1: - NA */ + 8U, /* 2: OC2M, OC2FE, OC2PE */ + 0U, /* 3: - NA */ + 0U, /* 4: OC3M, OC3FE, OC3PE */ + 0U, /* 5: - NA */ + 8U /* 6: OC4M, OC4FE, OC4PE */ +}; + +static const uint8_t SHIFT_TAB_ICxx[] = +{ + 0U, /* 0: CC1S, IC1PSC, IC1F */ + 0U, /* 1: - NA */ + 8U, /* 2: CC2S, IC2PSC, IC2F */ + 0U, /* 3: - NA */ + 0U, /* 4: CC3S, IC3PSC, IC3F */ + 0U, /* 5: - NA */ + 8U /* 6: CC4S, IC4PSC, IC4F */ +}; + +static const uint8_t SHIFT_TAB_CCxP[] = +{ + 0U, /* 0: CC1P */ + 2U, /* 1: CC1NP */ + 4U, /* 2: CC2P */ + 6U, /* 3: CC2NP */ + 8U, /* 4: CC3P */ + 10U, /* 5: CC3NP */ + 12U /* 6: CC4P */ +}; + +static const uint8_t SHIFT_TAB_OISx[] = +{ + 0U, /* 0: OIS1 */ + 1U, /* 1: OIS1N */ + 2U, /* 2: OIS2 */ + 3U, /* 3: OIS2N */ + 4U, /* 4: OIS3 */ + 5U, /* 5: OIS3N */ + 6U /* 6: OIS4 */ +}; +/** + * @} + */ + +/* Private constants ---------------------------------------------------------*/ +/** @defgroup TIM_LL_Private_Constants TIM Private Constants + * @{ + */ + + +/* Remap mask definitions */ +#define TIMx_OR_RMP_SHIFT 16U +#define TIMx_OR_RMP_MASK 0x0000FFFFU +#define TIM2_OR_RMP_MASK (TIM_OR_ITR1_RMP << TIMx_OR_RMP_SHIFT) +#define TIM5_OR_RMP_MASK (TIM_OR_TI4_RMP << TIMx_OR_RMP_SHIFT) +#define TIM11_OR_RMP_MASK (TIM_OR_TI1_RMP << TIMx_OR_RMP_SHIFT) + +/* Mask used to set the TDG[x:0] of the DTG bits of the TIMx_BDTR register */ +#define DT_DELAY_1 ((uint8_t)0x7F) +#define DT_DELAY_2 ((uint8_t)0x3F) +#define DT_DELAY_3 ((uint8_t)0x1F) +#define DT_DELAY_4 ((uint8_t)0x1F) + +/* Mask used to set the DTG[7:5] bits of the DTG bits of the TIMx_BDTR register */ +#define DT_RANGE_1 ((uint8_t)0x00) +#define DT_RANGE_2 ((uint8_t)0x80) +#define DT_RANGE_3 ((uint8_t)0xC0) +#define DT_RANGE_4 ((uint8_t)0xE0) + + +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup TIM_LL_Private_Macros TIM Private Macros + * @{ + */ +/** @brief Convert channel id into channel index. + * @param __CHANNEL__ This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH1N + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH2N + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH3N + * @arg @ref LL_TIM_CHANNEL_CH4 + * @retval none + */ +#define TIM_GET_CHANNEL_INDEX( __CHANNEL__) \ + (((__CHANNEL__) == LL_TIM_CHANNEL_CH1) ? 0U :\ + ((__CHANNEL__) == LL_TIM_CHANNEL_CH1N) ? 1U :\ + ((__CHANNEL__) == LL_TIM_CHANNEL_CH2) ? 2U :\ + ((__CHANNEL__) == LL_TIM_CHANNEL_CH2N) ? 3U :\ + ((__CHANNEL__) == LL_TIM_CHANNEL_CH3) ? 4U :\ + ((__CHANNEL__) == LL_TIM_CHANNEL_CH3N) ? 5U : 6U) + +/** @brief Calculate the deadtime sampling period(in ps). + * @param __TIMCLK__ timer input clock frequency (in Hz). + * @param __CKD__ This parameter can be one of the following values: + * @arg @ref LL_TIM_CLOCKDIVISION_DIV1 + * @arg @ref LL_TIM_CLOCKDIVISION_DIV2 + * @arg @ref LL_TIM_CLOCKDIVISION_DIV4 + * @retval none + */ +#define TIM_CALC_DTS(__TIMCLK__, __CKD__) \ + (((__CKD__) == LL_TIM_CLOCKDIVISION_DIV1) ? ((uint64_t)1000000000000U/(__TIMCLK__)) : \ + ((__CKD__) == LL_TIM_CLOCKDIVISION_DIV2) ? ((uint64_t)1000000000000U/((__TIMCLK__) >> 1U)) : \ + ((uint64_t)1000000000000U/((__TIMCLK__) >> 2U))) +/** + * @} + */ + + +/* Exported types ------------------------------------------------------------*/ +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup TIM_LL_ES_INIT TIM Exported Init structure + * @{ + */ + +/** + * @brief TIM Time Base configuration structure definition. + */ +typedef struct +{ + uint16_t Prescaler; /*!< Specifies the prescaler value used to divide the TIM clock. + This parameter can be a number between Min_Data=0x0000 and Max_Data=0xFFFF. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_SetPrescaler().*/ + + uint32_t CounterMode; /*!< Specifies the counter mode. + This parameter can be a value of @ref TIM_LL_EC_COUNTERMODE. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_SetCounterMode().*/ + + uint32_t Autoreload; /*!< Specifies the auto reload value to be loaded into the active + Auto-Reload Register at the next update event. + This parameter must be a number between Min_Data=0x0000 and Max_Data=0xFFFF. + Some timer instances may support 32 bits counters. In that case this parameter must + be a number between 0x0000 and 0xFFFFFFFF. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_SetAutoReload().*/ + + uint32_t ClockDivision; /*!< Specifies the clock division. + This parameter can be a value of @ref TIM_LL_EC_CLOCKDIVISION. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_SetClockDivision().*/ + + uint32_t RepetitionCounter; /*!< Specifies the repetition counter value. Each time the RCR downcounter + reaches zero, an update event is generated and counting restarts + from the RCR value (N). + This means in PWM mode that (N+1) corresponds to: + - the number of PWM periods in edge-aligned mode + - the number of half PWM period in center-aligned mode + GP timers: this parameter must be a number between Min_Data = 0x00 and + Max_Data = 0xFF. + Advanced timers: this parameter must be a number between Min_Data = 0x0000 and + Max_Data = 0xFFFF. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_SetRepetitionCounter().*/ +} LL_TIM_InitTypeDef; + +/** + * @brief TIM Output Compare configuration structure definition. + */ +typedef struct +{ + uint32_t OCMode; /*!< Specifies the output mode. + This parameter can be a value of @ref TIM_LL_EC_OCMODE. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_OC_SetMode().*/ + + uint32_t OCState; /*!< Specifies the TIM Output Compare state. + This parameter can be a value of @ref TIM_LL_EC_OCSTATE. + + This feature can be modified afterwards using unitary functions + @ref LL_TIM_CC_EnableChannel() or @ref LL_TIM_CC_DisableChannel().*/ + + uint32_t OCNState; /*!< Specifies the TIM complementary Output Compare state. + This parameter can be a value of @ref TIM_LL_EC_OCSTATE. + + This feature can be modified afterwards using unitary functions + @ref LL_TIM_CC_EnableChannel() or @ref LL_TIM_CC_DisableChannel().*/ + + uint32_t CompareValue; /*!< Specifies the Compare value to be loaded into the Capture Compare Register. + This parameter can be a number between Min_Data=0x0000 and Max_Data=0xFFFF. + + This feature can be modified afterwards using unitary function + LL_TIM_OC_SetCompareCHx (x=1..6).*/ + + uint32_t OCPolarity; /*!< Specifies the output polarity. + This parameter can be a value of @ref TIM_LL_EC_OCPOLARITY. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_OC_SetPolarity().*/ + + uint32_t OCNPolarity; /*!< Specifies the complementary output polarity. + This parameter can be a value of @ref TIM_LL_EC_OCPOLARITY. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_OC_SetPolarity().*/ + + + uint32_t OCIdleState; /*!< Specifies the TIM Output Compare pin state during Idle state. + This parameter can be a value of @ref TIM_LL_EC_OCIDLESTATE. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_OC_SetIdleState().*/ + + uint32_t OCNIdleState; /*!< Specifies the TIM Output Compare pin state during Idle state. + This parameter can be a value of @ref TIM_LL_EC_OCIDLESTATE. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_OC_SetIdleState().*/ +} LL_TIM_OC_InitTypeDef; + +/** + * @brief TIM Input Capture configuration structure definition. + */ + +typedef struct +{ + + uint32_t ICPolarity; /*!< Specifies the active edge of the input signal. + This parameter can be a value of @ref TIM_LL_EC_IC_POLARITY. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_IC_SetPolarity().*/ + + uint32_t ICActiveInput; /*!< Specifies the input. + This parameter can be a value of @ref TIM_LL_EC_ACTIVEINPUT. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_IC_SetActiveInput().*/ + + uint32_t ICPrescaler; /*!< Specifies the Input Capture Prescaler. + This parameter can be a value of @ref TIM_LL_EC_ICPSC. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_IC_SetPrescaler().*/ + + uint32_t ICFilter; /*!< Specifies the input capture filter. + This parameter can be a value of @ref TIM_LL_EC_IC_FILTER. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_IC_SetFilter().*/ +} LL_TIM_IC_InitTypeDef; + + +/** + * @brief TIM Encoder interface configuration structure definition. + */ +typedef struct +{ + uint32_t EncoderMode; /*!< Specifies the encoder resolution (x2 or x4). + This parameter can be a value of @ref TIM_LL_EC_ENCODERMODE. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_SetEncoderMode().*/ + + uint32_t IC1Polarity; /*!< Specifies the active edge of TI1 input. + This parameter can be a value of @ref TIM_LL_EC_IC_POLARITY. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_IC_SetPolarity().*/ + + uint32_t IC1ActiveInput; /*!< Specifies the TI1 input source + This parameter can be a value of @ref TIM_LL_EC_ACTIVEINPUT. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_IC_SetActiveInput().*/ + + uint32_t IC1Prescaler; /*!< Specifies the TI1 input prescaler value. + This parameter can be a value of @ref TIM_LL_EC_ICPSC. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_IC_SetPrescaler().*/ + + uint32_t IC1Filter; /*!< Specifies the TI1 input filter. + This parameter can be a value of @ref TIM_LL_EC_IC_FILTER. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_IC_SetFilter().*/ + + uint32_t IC2Polarity; /*!< Specifies the active edge of TI2 input. + This parameter can be a value of @ref TIM_LL_EC_IC_POLARITY. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_IC_SetPolarity().*/ + + uint32_t IC2ActiveInput; /*!< Specifies the TI2 input source + This parameter can be a value of @ref TIM_LL_EC_ACTIVEINPUT. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_IC_SetActiveInput().*/ + + uint32_t IC2Prescaler; /*!< Specifies the TI2 input prescaler value. + This parameter can be a value of @ref TIM_LL_EC_ICPSC. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_IC_SetPrescaler().*/ + + uint32_t IC2Filter; /*!< Specifies the TI2 input filter. + This parameter can be a value of @ref TIM_LL_EC_IC_FILTER. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_IC_SetFilter().*/ + +} LL_TIM_ENCODER_InitTypeDef; + +/** + * @brief TIM Hall sensor interface configuration structure definition. + */ +typedef struct +{ + + uint32_t IC1Polarity; /*!< Specifies the active edge of TI1 input. + This parameter can be a value of @ref TIM_LL_EC_IC_POLARITY. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_IC_SetPolarity().*/ + + uint32_t IC1Prescaler; /*!< Specifies the TI1 input prescaler value. + Prescaler must be set to get a maximum counter period longer than the + time interval between 2 consecutive changes on the Hall inputs. + This parameter can be a value of @ref TIM_LL_EC_ICPSC. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_IC_SetPrescaler().*/ + + uint32_t IC1Filter; /*!< Specifies the TI1 input filter. + This parameter can be a value of + @ref TIM_LL_EC_IC_FILTER. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_IC_SetFilter().*/ + + uint32_t CommutationDelay; /*!< Specifies the compare value to be loaded into the Capture Compare Register. + A positive pulse (TRGO event) is generated with a programmable delay every time + a change occurs on the Hall inputs. + This parameter can be a number between Min_Data = 0x0000 and Max_Data = 0xFFFF. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_OC_SetCompareCH2().*/ +} LL_TIM_HALLSENSOR_InitTypeDef; + +/** + * @brief BDTR (Break and Dead Time) structure definition + */ +typedef struct +{ + uint32_t OSSRState; /*!< Specifies the Off-State selection used in Run mode. + This parameter can be a value of @ref TIM_LL_EC_OSSR + + This feature can be modified afterwards using unitary function + @ref LL_TIM_SetOffStates() + + @note This bit-field cannot be modified as long as LOCK level 2 has been + programmed. */ + + uint32_t OSSIState; /*!< Specifies the Off-State used in Idle state. + This parameter can be a value of @ref TIM_LL_EC_OSSI + + This feature can be modified afterwards using unitary function + @ref LL_TIM_SetOffStates() + + @note This bit-field cannot be modified as long as LOCK level 2 has been + programmed. */ + + uint32_t LockLevel; /*!< Specifies the LOCK level parameters. + This parameter can be a value of @ref TIM_LL_EC_LOCKLEVEL + + @note The LOCK bits can be written only once after the reset. Once the TIMx_BDTR + register has been written, their content is frozen until the next reset.*/ + + uint8_t DeadTime; /*!< Specifies the delay time between the switching-off and the + switching-on of the outputs. + This parameter can be a number between Min_Data = 0x00 and Max_Data = 0xFF. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_OC_SetDeadTime() + + @note This bit-field can not be modified as long as LOCK level 1, 2 or 3 has been + programmed. */ + + uint16_t BreakState; /*!< Specifies whether the TIM Break input is enabled or not. + This parameter can be a value of @ref TIM_LL_EC_BREAK_ENABLE + + This feature can be modified afterwards using unitary functions + @ref LL_TIM_EnableBRK() or @ref LL_TIM_DisableBRK() + + @note This bit-field can not be modified as long as LOCK level 1 has been + programmed. */ + + uint32_t BreakPolarity; /*!< Specifies the TIM Break Input pin polarity. + This parameter can be a value of @ref TIM_LL_EC_BREAK_POLARITY + + This feature can be modified afterwards using unitary function + @ref LL_TIM_ConfigBRK() + + @note This bit-field can not be modified as long as LOCK level 1 has been + programmed. */ + + uint32_t AutomaticOutput; /*!< Specifies whether the TIM Automatic Output feature is enabled or not. + This parameter can be a value of @ref TIM_LL_EC_AUTOMATICOUTPUT_ENABLE + + This feature can be modified afterwards using unitary functions + @ref LL_TIM_EnableAutomaticOutput() or @ref LL_TIM_DisableAutomaticOutput() + + @note This bit-field can not be modified as long as LOCK level 1 has been + programmed. */ +} LL_TIM_BDTR_InitTypeDef; + +/** + * @} + */ +#endif /* USE_FULL_LL_DRIVER */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup TIM_LL_Exported_Constants TIM Exported Constants + * @{ + */ + +/** @defgroup TIM_LL_EC_GET_FLAG Get Flags Defines + * @brief Flags defines which can be used with LL_TIM_ReadReg function. + * @{ + */ +#define LL_TIM_SR_UIF TIM_SR_UIF /*!< Update interrupt flag */ +#define LL_TIM_SR_CC1IF TIM_SR_CC1IF /*!< Capture/compare 1 interrupt flag */ +#define LL_TIM_SR_CC2IF TIM_SR_CC2IF /*!< Capture/compare 2 interrupt flag */ +#define LL_TIM_SR_CC3IF TIM_SR_CC3IF /*!< Capture/compare 3 interrupt flag */ +#define LL_TIM_SR_CC4IF TIM_SR_CC4IF /*!< Capture/compare 4 interrupt flag */ +#define LL_TIM_SR_COMIF TIM_SR_COMIF /*!< COM interrupt flag */ +#define LL_TIM_SR_TIF TIM_SR_TIF /*!< Trigger interrupt flag */ +#define LL_TIM_SR_BIF TIM_SR_BIF /*!< Break interrupt flag */ +#define LL_TIM_SR_CC1OF TIM_SR_CC1OF /*!< Capture/Compare 1 overcapture flag */ +#define LL_TIM_SR_CC2OF TIM_SR_CC2OF /*!< Capture/Compare 2 overcapture flag */ +#define LL_TIM_SR_CC3OF TIM_SR_CC3OF /*!< Capture/Compare 3 overcapture flag */ +#define LL_TIM_SR_CC4OF TIM_SR_CC4OF /*!< Capture/Compare 4 overcapture flag */ +/** + * @} + */ + +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup TIM_LL_EC_BREAK_ENABLE Break Enable + * @{ + */ +#define LL_TIM_BREAK_DISABLE 0x00000000U /*!< Break function disabled */ +#define LL_TIM_BREAK_ENABLE TIM_BDTR_BKE /*!< Break function enabled */ +/** + * @} + */ + +/** @defgroup TIM_LL_EC_AUTOMATICOUTPUT_ENABLE Automatic output enable + * @{ + */ +#define LL_TIM_AUTOMATICOUTPUT_DISABLE 0x00000000U /*!< MOE can be set only by software */ +#define LL_TIM_AUTOMATICOUTPUT_ENABLE TIM_BDTR_AOE /*!< MOE can be set by software or automatically at the next update event */ +/** + * @} + */ +#endif /* USE_FULL_LL_DRIVER */ + +/** @defgroup TIM_LL_EC_IT IT Defines + * @brief IT defines which can be used with LL_TIM_ReadReg and LL_TIM_WriteReg functions. + * @{ + */ +#define LL_TIM_DIER_UIE TIM_DIER_UIE /*!< Update interrupt enable */ +#define LL_TIM_DIER_CC1IE TIM_DIER_CC1IE /*!< Capture/compare 1 interrupt enable */ +#define LL_TIM_DIER_CC2IE TIM_DIER_CC2IE /*!< Capture/compare 2 interrupt enable */ +#define LL_TIM_DIER_CC3IE TIM_DIER_CC3IE /*!< Capture/compare 3 interrupt enable */ +#define LL_TIM_DIER_CC4IE TIM_DIER_CC4IE /*!< Capture/compare 4 interrupt enable */ +#define LL_TIM_DIER_COMIE TIM_DIER_COMIE /*!< COM interrupt enable */ +#define LL_TIM_DIER_TIE TIM_DIER_TIE /*!< Trigger interrupt enable */ +#define LL_TIM_DIER_BIE TIM_DIER_BIE /*!< Break interrupt enable */ +/** + * @} + */ + +/** @defgroup TIM_LL_EC_UPDATESOURCE Update Source + * @{ + */ +#define LL_TIM_UPDATESOURCE_REGULAR 0x00000000U /*!< Counter overflow/underflow, Setting the UG bit or Update generation through the slave mode controller generates an update request */ +#define LL_TIM_UPDATESOURCE_COUNTER TIM_CR1_URS /*!< Only counter overflow/underflow generates an update request */ +/** + * @} + */ + +/** @defgroup TIM_LL_EC_ONEPULSEMODE One Pulse Mode + * @{ + */ +#define LL_TIM_ONEPULSEMODE_SINGLE TIM_CR1_OPM /*!< Counter stops counting at the next update event */ +#define LL_TIM_ONEPULSEMODE_REPETITIVE 0x00000000U /*!< Counter is not stopped at update event */ +/** + * @} + */ + +/** @defgroup TIM_LL_EC_COUNTERMODE Counter Mode + * @{ + */ +#define LL_TIM_COUNTERMODE_UP 0x00000000U /*!< Counter used as upcounter */ +#define LL_TIM_COUNTERMODE_DOWN TIM_CR1_DIR /*!< Counter used as downcounter */ +#define LL_TIM_COUNTERMODE_CENTER_DOWN TIM_CR1_CMS_0 /*!< The counter counts up and down alternatively. Output compare interrupt flags of output channels are set only when the counter is counting down. */ +#define LL_TIM_COUNTERMODE_CENTER_UP TIM_CR1_CMS_1 /*!< The counter counts up and down alternatively. Output compare interrupt flags of output channels are set only when the counter is counting up */ +#define LL_TIM_COUNTERMODE_CENTER_UP_DOWN TIM_CR1_CMS /*!< The counter counts up and down alternatively. Output compare interrupt flags of output channels are set only when the counter is counting up or down. */ +/** + * @} + */ + +/** @defgroup TIM_LL_EC_CLOCKDIVISION Clock Division + * @{ + */ +#define LL_TIM_CLOCKDIVISION_DIV1 0x00000000U /*!< tDTS=tCK_INT */ +#define LL_TIM_CLOCKDIVISION_DIV2 TIM_CR1_CKD_0 /*!< tDTS=2*tCK_INT */ +#define LL_TIM_CLOCKDIVISION_DIV4 TIM_CR1_CKD_1 /*!< tDTS=4*tCK_INT */ +/** + * @} + */ + +/** @defgroup TIM_LL_EC_COUNTERDIRECTION Counter Direction + * @{ + */ +#define LL_TIM_COUNTERDIRECTION_UP 0x00000000U /*!< Timer counter counts up */ +#define LL_TIM_COUNTERDIRECTION_DOWN TIM_CR1_DIR /*!< Timer counter counts down */ +/** + * @} + */ + +/** @defgroup TIM_LL_EC_CCUPDATESOURCE Capture Compare Update Source + * @{ + */ +#define LL_TIM_CCUPDATESOURCE_COMG_ONLY 0x00000000U /*!< Capture/compare control bits are updated by setting the COMG bit only */ +#define LL_TIM_CCUPDATESOURCE_COMG_AND_TRGI TIM_CR2_CCUS /*!< Capture/compare control bits are updated by setting the COMG bit or when a rising edge occurs on trigger input (TRGI) */ +/** + * @} + */ + +/** @defgroup TIM_LL_EC_CCDMAREQUEST Capture Compare DMA Request + * @{ + */ +#define LL_TIM_CCDMAREQUEST_CC 0x00000000U /*!< CCx DMA request sent when CCx event occurs */ +#define LL_TIM_CCDMAREQUEST_UPDATE TIM_CR2_CCDS /*!< CCx DMA requests sent when update event occurs */ +/** + * @} + */ + +/** @defgroup TIM_LL_EC_LOCKLEVEL Lock Level + * @{ + */ +#define LL_TIM_LOCKLEVEL_OFF 0x00000000U /*!< LOCK OFF - No bit is write protected */ +#define LL_TIM_LOCKLEVEL_1 TIM_BDTR_LOCK_0 /*!< LOCK Level 1 */ +#define LL_TIM_LOCKLEVEL_2 TIM_BDTR_LOCK_1 /*!< LOCK Level 2 */ +#define LL_TIM_LOCKLEVEL_3 TIM_BDTR_LOCK /*!< LOCK Level 3 */ +/** + * @} + */ + +/** @defgroup TIM_LL_EC_CHANNEL Channel + * @{ + */ +#define LL_TIM_CHANNEL_CH1 TIM_CCER_CC1E /*!< Timer input/output channel 1 */ +#define LL_TIM_CHANNEL_CH1N TIM_CCER_CC1NE /*!< Timer complementary output channel 1 */ +#define LL_TIM_CHANNEL_CH2 TIM_CCER_CC2E /*!< Timer input/output channel 2 */ +#define LL_TIM_CHANNEL_CH2N TIM_CCER_CC2NE /*!< Timer complementary output channel 2 */ +#define LL_TIM_CHANNEL_CH3 TIM_CCER_CC3E /*!< Timer input/output channel 3 */ +#define LL_TIM_CHANNEL_CH3N TIM_CCER_CC3NE /*!< Timer complementary output channel 3 */ +#define LL_TIM_CHANNEL_CH4 TIM_CCER_CC4E /*!< Timer input/output channel 4 */ +/** + * @} + */ + +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup TIM_LL_EC_OCSTATE Output Configuration State + * @{ + */ +#define LL_TIM_OCSTATE_DISABLE 0x00000000U /*!< OCx is not active */ +#define LL_TIM_OCSTATE_ENABLE TIM_CCER_CC1E /*!< OCx signal is output on the corresponding output pin */ +/** + * @} + */ +#endif /* USE_FULL_LL_DRIVER */ + +/** @defgroup TIM_LL_EC_OCMODE Output Configuration Mode + * @{ + */ +#define LL_TIM_OCMODE_FROZEN 0x00000000U /*!TIMx_CCRy else active.*/ +#define LL_TIM_OCMODE_PWM2 (TIM_CCMR1_OC1M_2 | TIM_CCMR1_OC1M_1 | TIM_CCMR1_OC1M_0) /*!TIMx_CCRy else inactive*/ +/** + * @} + */ + +/** @defgroup TIM_LL_EC_OCPOLARITY Output Configuration Polarity + * @{ + */ +#define LL_TIM_OCPOLARITY_HIGH 0x00000000U /*!< OCxactive high*/ +#define LL_TIM_OCPOLARITY_LOW TIM_CCER_CC1P /*!< OCxactive low*/ +/** + * @} + */ + +/** @defgroup TIM_LL_EC_OCIDLESTATE Output Configuration Idle State + * @{ + */ +#define LL_TIM_OCIDLESTATE_LOW 0x00000000U /*!__REG__, (__VALUE__)) + +/** + * @brief Read a value in TIM register. + * @param __INSTANCE__ TIM Instance + * @param __REG__ Register to be read + * @retval Register value + */ +#define LL_TIM_ReadReg(__INSTANCE__, __REG__) READ_REG((__INSTANCE__)->__REG__) +/** + * @} + */ + +/** + * @brief HELPER macro calculating DTG[0:7] in the TIMx_BDTR register to achieve the requested dead time duration. + * @note ex: @ref __LL_TIM_CALC_DEADTIME (80000000, @ref LL_TIM_GetClockDivision (), 120); + * @param __TIMCLK__ timer input clock frequency (in Hz) + * @param __CKD__ This parameter can be one of the following values: + * @arg @ref LL_TIM_CLOCKDIVISION_DIV1 + * @arg @ref LL_TIM_CLOCKDIVISION_DIV2 + * @arg @ref LL_TIM_CLOCKDIVISION_DIV4 + * @param __DT__ deadtime duration (in ns) + * @retval DTG[0:7] + */ +#define __LL_TIM_CALC_DEADTIME(__TIMCLK__, __CKD__, __DT__) \ + ( (((uint64_t)((__DT__)*1000U)) < ((DT_DELAY_1+1U) * TIM_CALC_DTS((__TIMCLK__), (__CKD__)))) ? \ + (uint8_t)(((uint64_t)((__DT__)*1000U) / TIM_CALC_DTS((__TIMCLK__), (__CKD__))) & DT_DELAY_1) : \ + (((uint64_t)((__DT__)*1000U)) < ((64U + (DT_DELAY_2+1U)) * 2U * TIM_CALC_DTS((__TIMCLK__), (__CKD__)))) ? \ + (uint8_t)(DT_RANGE_2 | ((uint8_t)((uint8_t)((((uint64_t)((__DT__)*1000U))/ TIM_CALC_DTS((__TIMCLK__), \ + (__CKD__))) >> 1U) - (uint8_t) 64) & DT_DELAY_2)) :\ + (((uint64_t)((__DT__)*1000U)) < ((32U + (DT_DELAY_3+1U)) * 8U * TIM_CALC_DTS((__TIMCLK__), (__CKD__)))) ? \ + (uint8_t)(DT_RANGE_3 | ((uint8_t)((uint8_t)(((((uint64_t)(__DT__)*1000U))/ TIM_CALC_DTS((__TIMCLK__), \ + (__CKD__))) >> 3U) - (uint8_t) 32) & DT_DELAY_3)) :\ + (((uint64_t)((__DT__)*1000U)) < ((32U + (DT_DELAY_4+1U)) * 16U * TIM_CALC_DTS((__TIMCLK__), (__CKD__)))) ? \ + (uint8_t)(DT_RANGE_4 | ((uint8_t)((uint8_t)(((((uint64_t)(__DT__)*1000U))/ TIM_CALC_DTS((__TIMCLK__), \ + (__CKD__))) >> 4U) - (uint8_t) 32) & DT_DELAY_4)) :\ + 0U) + +/** + * @brief HELPER macro calculating the prescaler value to achieve the required counter clock frequency. + * @note ex: @ref __LL_TIM_CALC_PSC (80000000, 1000000); + * @param __TIMCLK__ timer input clock frequency (in Hz) + * @param __CNTCLK__ counter clock frequency (in Hz) + * @retval Prescaler value (between Min_Data=0 and Max_Data=65535) + */ +#define __LL_TIM_CALC_PSC(__TIMCLK__, __CNTCLK__) \ + (((__TIMCLK__) >= (__CNTCLK__)) ? (uint32_t)((((__TIMCLK__) + (__CNTCLK__)/2U)/(__CNTCLK__)) - 1U) : 0U) + +/** + * @brief HELPER macro calculating the auto-reload value to achieve the required output signal frequency. + * @note ex: @ref __LL_TIM_CALC_ARR (1000000, @ref LL_TIM_GetPrescaler (), 10000); + * @param __TIMCLK__ timer input clock frequency (in Hz) + * @param __PSC__ prescaler + * @param __FREQ__ output signal frequency (in Hz) + * @retval Auto-reload value (between Min_Data=0 and Max_Data=65535) + */ +#define __LL_TIM_CALC_ARR(__TIMCLK__, __PSC__, __FREQ__) \ + ((((__TIMCLK__)/((__PSC__) + 1U)) >= (__FREQ__)) ? (((__TIMCLK__)/((__FREQ__) * ((__PSC__) + 1U))) - 1U) : 0U) + +/** + * @brief HELPER macro calculating the compare value required to achieve the required timer output compare + * active/inactive delay. + * @note ex: @ref __LL_TIM_CALC_DELAY (1000000, @ref LL_TIM_GetPrescaler (), 10); + * @param __TIMCLK__ timer input clock frequency (in Hz) + * @param __PSC__ prescaler + * @param __DELAY__ timer output compare active/inactive delay (in us) + * @retval Compare value (between Min_Data=0 and Max_Data=65535) + */ +#define __LL_TIM_CALC_DELAY(__TIMCLK__, __PSC__, __DELAY__) \ + ((uint32_t)(((uint64_t)(__TIMCLK__) * (uint64_t)(__DELAY__)) \ + / ((uint64_t)1000000U * (uint64_t)((__PSC__) + 1U)))) + +/** + * @brief HELPER macro calculating the auto-reload value to achieve the required pulse duration + * (when the timer operates in one pulse mode). + * @note ex: @ref __LL_TIM_CALC_PULSE (1000000, @ref LL_TIM_GetPrescaler (), 10, 20); + * @param __TIMCLK__ timer input clock frequency (in Hz) + * @param __PSC__ prescaler + * @param __DELAY__ timer output compare active/inactive delay (in us) + * @param __PULSE__ pulse duration (in us) + * @retval Auto-reload value (between Min_Data=0 and Max_Data=65535) + */ +#define __LL_TIM_CALC_PULSE(__TIMCLK__, __PSC__, __DELAY__, __PULSE__) \ + ((uint32_t)(__LL_TIM_CALC_DELAY((__TIMCLK__), (__PSC__), (__PULSE__)) \ + + __LL_TIM_CALC_DELAY((__TIMCLK__), (__PSC__), (__DELAY__)))) + +/** + * @brief HELPER macro retrieving the ratio of the input capture prescaler + * @note ex: @ref __LL_TIM_GET_ICPSC_RATIO (@ref LL_TIM_IC_GetPrescaler ()); + * @param __ICPSC__ This parameter can be one of the following values: + * @arg @ref LL_TIM_ICPSC_DIV1 + * @arg @ref LL_TIM_ICPSC_DIV2 + * @arg @ref LL_TIM_ICPSC_DIV4 + * @arg @ref LL_TIM_ICPSC_DIV8 + * @retval Input capture prescaler ratio (1, 2, 4 or 8) + */ +#define __LL_TIM_GET_ICPSC_RATIO(__ICPSC__) \ + ((uint32_t)(0x01U << (((__ICPSC__) >> 16U) >> TIM_CCMR1_IC1PSC_Pos))) + + +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup TIM_LL_Exported_Functions TIM Exported Functions + * @{ + */ + +/** @defgroup TIM_LL_EF_Time_Base Time Base configuration + * @{ + */ +/** + * @brief Enable timer counter. + * @rmtoll CR1 CEN LL_TIM_EnableCounter + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableCounter(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->CR1, TIM_CR1_CEN); +} + +/** + * @brief Disable timer counter. + * @rmtoll CR1 CEN LL_TIM_DisableCounter + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableCounter(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->CR1, TIM_CR1_CEN); +} + +/** + * @brief Indicates whether the timer counter is enabled. + * @rmtoll CR1 CEN LL_TIM_IsEnabledCounter + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsEnabledCounter(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->CR1, TIM_CR1_CEN) == (TIM_CR1_CEN)) ? 1UL : 0UL); +} + +/** + * @brief Enable update event generation. + * @rmtoll CR1 UDIS LL_TIM_EnableUpdateEvent + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableUpdateEvent(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->CR1, TIM_CR1_UDIS); +} + +/** + * @brief Disable update event generation. + * @rmtoll CR1 UDIS LL_TIM_DisableUpdateEvent + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableUpdateEvent(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->CR1, TIM_CR1_UDIS); +} + +/** + * @brief Indicates whether update event generation is enabled. + * @rmtoll CR1 UDIS LL_TIM_IsEnabledUpdateEvent + * @param TIMx Timer instance + * @retval Inverted state of bit (0 or 1). + */ +__STATIC_INLINE uint32_t LL_TIM_IsEnabledUpdateEvent(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->CR1, TIM_CR1_UDIS) == (uint32_t)RESET) ? 1UL : 0UL); +} + +/** + * @brief Set update event source + * @note Update event source set to LL_TIM_UPDATESOURCE_REGULAR: any of the following events + * generate an update interrupt or DMA request if enabled: + * - Counter overflow/underflow + * - Setting the UG bit + * - Update generation through the slave mode controller + * @note Update event source set to LL_TIM_UPDATESOURCE_COUNTER: only counter + * overflow/underflow generates an update interrupt or DMA request if enabled. + * @rmtoll CR1 URS LL_TIM_SetUpdateSource + * @param TIMx Timer instance + * @param UpdateSource This parameter can be one of the following values: + * @arg @ref LL_TIM_UPDATESOURCE_REGULAR + * @arg @ref LL_TIM_UPDATESOURCE_COUNTER + * @retval None + */ +__STATIC_INLINE void LL_TIM_SetUpdateSource(TIM_TypeDef *TIMx, uint32_t UpdateSource) +{ + MODIFY_REG(TIMx->CR1, TIM_CR1_URS, UpdateSource); +} + +/** + * @brief Get actual event update source + * @rmtoll CR1 URS LL_TIM_GetUpdateSource + * @param TIMx Timer instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_TIM_UPDATESOURCE_REGULAR + * @arg @ref LL_TIM_UPDATESOURCE_COUNTER + */ +__STATIC_INLINE uint32_t LL_TIM_GetUpdateSource(const TIM_TypeDef *TIMx) +{ + return (uint32_t)(READ_BIT(TIMx->CR1, TIM_CR1_URS)); +} + +/** + * @brief Set one pulse mode (one shot v.s. repetitive). + * @rmtoll CR1 OPM LL_TIM_SetOnePulseMode + * @param TIMx Timer instance + * @param OnePulseMode This parameter can be one of the following values: + * @arg @ref LL_TIM_ONEPULSEMODE_SINGLE + * @arg @ref LL_TIM_ONEPULSEMODE_REPETITIVE + * @retval None + */ +__STATIC_INLINE void LL_TIM_SetOnePulseMode(TIM_TypeDef *TIMx, uint32_t OnePulseMode) +{ + MODIFY_REG(TIMx->CR1, TIM_CR1_OPM, OnePulseMode); +} + +/** + * @brief Get actual one pulse mode. + * @rmtoll CR1 OPM LL_TIM_GetOnePulseMode + * @param TIMx Timer instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_TIM_ONEPULSEMODE_SINGLE + * @arg @ref LL_TIM_ONEPULSEMODE_REPETITIVE + */ +__STATIC_INLINE uint32_t LL_TIM_GetOnePulseMode(const TIM_TypeDef *TIMx) +{ + return (uint32_t)(READ_BIT(TIMx->CR1, TIM_CR1_OPM)); +} + +/** + * @brief Set the timer counter counting mode. + * @note Macro IS_TIM_COUNTER_MODE_SELECT_INSTANCE(TIMx) can be used to + * check whether or not the counter mode selection feature is supported + * by a timer instance. + * @note Switching from Center Aligned counter mode to Edge counter mode (or reverse) + * requires a timer reset to avoid unexpected direction + * due to DIR bit readonly in center aligned mode. + * @rmtoll CR1 DIR LL_TIM_SetCounterMode\n + * CR1 CMS LL_TIM_SetCounterMode + * @param TIMx Timer instance + * @param CounterMode This parameter can be one of the following values: + * @arg @ref LL_TIM_COUNTERMODE_UP + * @arg @ref LL_TIM_COUNTERMODE_DOWN + * @arg @ref LL_TIM_COUNTERMODE_CENTER_UP + * @arg @ref LL_TIM_COUNTERMODE_CENTER_DOWN + * @arg @ref LL_TIM_COUNTERMODE_CENTER_UP_DOWN + * @retval None + */ +__STATIC_INLINE void LL_TIM_SetCounterMode(TIM_TypeDef *TIMx, uint32_t CounterMode) +{ + MODIFY_REG(TIMx->CR1, (TIM_CR1_DIR | TIM_CR1_CMS), CounterMode); +} + +/** + * @brief Get actual counter mode. + * @note Macro IS_TIM_COUNTER_MODE_SELECT_INSTANCE(TIMx) can be used to + * check whether or not the counter mode selection feature is supported + * by a timer instance. + * @rmtoll CR1 DIR LL_TIM_GetCounterMode\n + * CR1 CMS LL_TIM_GetCounterMode + * @param TIMx Timer instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_TIM_COUNTERMODE_UP + * @arg @ref LL_TIM_COUNTERMODE_DOWN + * @arg @ref LL_TIM_COUNTERMODE_CENTER_UP + * @arg @ref LL_TIM_COUNTERMODE_CENTER_DOWN + * @arg @ref LL_TIM_COUNTERMODE_CENTER_UP_DOWN + */ +__STATIC_INLINE uint32_t LL_TIM_GetCounterMode(const TIM_TypeDef *TIMx) +{ + uint32_t counter_mode; + + counter_mode = (uint32_t)(READ_BIT(TIMx->CR1, TIM_CR1_CMS)); + + if (counter_mode == 0U) + { + counter_mode = (uint32_t)(READ_BIT(TIMx->CR1, TIM_CR1_DIR)); + } + + return counter_mode; +} + +/** + * @brief Enable auto-reload (ARR) preload. + * @rmtoll CR1 ARPE LL_TIM_EnableARRPreload + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableARRPreload(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->CR1, TIM_CR1_ARPE); +} + +/** + * @brief Disable auto-reload (ARR) preload. + * @rmtoll CR1 ARPE LL_TIM_DisableARRPreload + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableARRPreload(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->CR1, TIM_CR1_ARPE); +} + +/** + * @brief Indicates whether auto-reload (ARR) preload is enabled. + * @rmtoll CR1 ARPE LL_TIM_IsEnabledARRPreload + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsEnabledARRPreload(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->CR1, TIM_CR1_ARPE) == (TIM_CR1_ARPE)) ? 1UL : 0UL); +} + +/** + * @brief Set the division ratio between the timer clock and the sampling clock used by the dead-time generators + * (when supported) and the digital filters. + * @note Macro IS_TIM_CLOCK_DIVISION_INSTANCE(TIMx) can be used to check + * whether or not the clock division feature is supported by the timer + * instance. + * @rmtoll CR1 CKD LL_TIM_SetClockDivision + * @param TIMx Timer instance + * @param ClockDivision This parameter can be one of the following values: + * @arg @ref LL_TIM_CLOCKDIVISION_DIV1 + * @arg @ref LL_TIM_CLOCKDIVISION_DIV2 + * @arg @ref LL_TIM_CLOCKDIVISION_DIV4 + * @retval None + */ +__STATIC_INLINE void LL_TIM_SetClockDivision(TIM_TypeDef *TIMx, uint32_t ClockDivision) +{ + MODIFY_REG(TIMx->CR1, TIM_CR1_CKD, ClockDivision); +} + +/** + * @brief Get the actual division ratio between the timer clock and the sampling clock used by the dead-time + * generators (when supported) and the digital filters. + * @note Macro IS_TIM_CLOCK_DIVISION_INSTANCE(TIMx) can be used to check + * whether or not the clock division feature is supported by the timer + * instance. + * @rmtoll CR1 CKD LL_TIM_GetClockDivision + * @param TIMx Timer instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_TIM_CLOCKDIVISION_DIV1 + * @arg @ref LL_TIM_CLOCKDIVISION_DIV2 + * @arg @ref LL_TIM_CLOCKDIVISION_DIV4 + */ +__STATIC_INLINE uint32_t LL_TIM_GetClockDivision(const TIM_TypeDef *TIMx) +{ + return (uint32_t)(READ_BIT(TIMx->CR1, TIM_CR1_CKD)); +} + +/** + * @brief Set the counter value. + * @note Macro IS_TIM_32B_COUNTER_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports a 32 bits counter. + * @rmtoll CNT CNT LL_TIM_SetCounter + * @param TIMx Timer instance + * @param Counter Counter value (between Min_Data=0 and Max_Data=0xFFFF or 0xFFFFFFFF) + * @retval None + */ +__STATIC_INLINE void LL_TIM_SetCounter(TIM_TypeDef *TIMx, uint32_t Counter) +{ + WRITE_REG(TIMx->CNT, Counter); +} + +/** + * @brief Get the counter value. + * @note Macro IS_TIM_32B_COUNTER_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports a 32 bits counter. + * @rmtoll CNT CNT LL_TIM_GetCounter + * @param TIMx Timer instance + * @retval Counter value (between Min_Data=0 and Max_Data=0xFFFF or 0xFFFFFFFF) + */ +__STATIC_INLINE uint32_t LL_TIM_GetCounter(const TIM_TypeDef *TIMx) +{ + return (uint32_t)(READ_REG(TIMx->CNT)); +} + +/** + * @brief Get the current direction of the counter + * @rmtoll CR1 DIR LL_TIM_GetDirection + * @param TIMx Timer instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_TIM_COUNTERDIRECTION_UP + * @arg @ref LL_TIM_COUNTERDIRECTION_DOWN + */ +__STATIC_INLINE uint32_t LL_TIM_GetDirection(const TIM_TypeDef *TIMx) +{ + return (uint32_t)(READ_BIT(TIMx->CR1, TIM_CR1_DIR)); +} + +/** + * @brief Set the prescaler value. + * @note The counter clock frequency CK_CNT is equal to fCK_PSC / (PSC[15:0] + 1). + * @note The prescaler can be changed on the fly as this control register is buffered. The new + * prescaler ratio is taken into account at the next update event. + * @note Helper macro @ref __LL_TIM_CALC_PSC can be used to calculate the Prescaler parameter + * @rmtoll PSC PSC LL_TIM_SetPrescaler + * @param TIMx Timer instance + * @param Prescaler between Min_Data=0 and Max_Data=65535 + * @retval None + */ +__STATIC_INLINE void LL_TIM_SetPrescaler(TIM_TypeDef *TIMx, uint32_t Prescaler) +{ + WRITE_REG(TIMx->PSC, Prescaler); +} + +/** + * @brief Get the prescaler value. + * @rmtoll PSC PSC LL_TIM_GetPrescaler + * @param TIMx Timer instance + * @retval Prescaler value between Min_Data=0 and Max_Data=65535 + */ +__STATIC_INLINE uint32_t LL_TIM_GetPrescaler(const TIM_TypeDef *TIMx) +{ + return (uint32_t)(READ_REG(TIMx->PSC)); +} + +/** + * @brief Set the auto-reload value. + * @note The counter is blocked while the auto-reload value is null. + * @note Macro IS_TIM_32B_COUNTER_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports a 32 bits counter. + * @note Helper macro @ref __LL_TIM_CALC_ARR can be used to calculate the AutoReload parameter + * @rmtoll ARR ARR LL_TIM_SetAutoReload + * @param TIMx Timer instance + * @param AutoReload between Min_Data=0 and Max_Data=65535 + * @retval None + */ +__STATIC_INLINE void LL_TIM_SetAutoReload(TIM_TypeDef *TIMx, uint32_t AutoReload) +{ + WRITE_REG(TIMx->ARR, AutoReload); +} + +/** + * @brief Get the auto-reload value. + * @rmtoll ARR ARR LL_TIM_GetAutoReload + * @note Macro IS_TIM_32B_COUNTER_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports a 32 bits counter. + * @param TIMx Timer instance + * @retval Auto-reload value + */ +__STATIC_INLINE uint32_t LL_TIM_GetAutoReload(const TIM_TypeDef *TIMx) +{ + return (uint32_t)(READ_REG(TIMx->ARR)); +} + +/** + * @brief Set the repetition counter value. + * @note Macro IS_TIM_REPETITION_COUNTER_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports a repetition counter. + * @rmtoll RCR REP LL_TIM_SetRepetitionCounter + * @param TIMx Timer instance + * @param RepetitionCounter between Min_Data=0 and Max_Data=255 or 65535 for advanced timer. + * @retval None + */ +__STATIC_INLINE void LL_TIM_SetRepetitionCounter(TIM_TypeDef *TIMx, uint32_t RepetitionCounter) +{ + WRITE_REG(TIMx->RCR, RepetitionCounter); +} + +/** + * @brief Get the repetition counter value. + * @note Macro IS_TIM_REPETITION_COUNTER_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports a repetition counter. + * @rmtoll RCR REP LL_TIM_GetRepetitionCounter + * @param TIMx Timer instance + * @retval Repetition counter value + */ +__STATIC_INLINE uint32_t LL_TIM_GetRepetitionCounter(const TIM_TypeDef *TIMx) +{ + return (uint32_t)(READ_REG(TIMx->RCR)); +} + +/** + * @} + */ + +/** @defgroup TIM_LL_EF_Capture_Compare Capture Compare configuration + * @{ + */ +/** + * @brief Enable the capture/compare control bits (CCxE, CCxNE and OCxM) preload. + * @note CCxE, CCxNE and OCxM bits are preloaded, after having been written, + * they are updated only when a commutation event (COM) occurs. + * @note Only on channels that have a complementary output. + * @note Macro IS_TIM_COMMUTATION_EVENT_INSTANCE(TIMx) can be used to check + * whether or not a timer instance is able to generate a commutation event. + * @rmtoll CR2 CCPC LL_TIM_CC_EnablePreload + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_CC_EnablePreload(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->CR2, TIM_CR2_CCPC); +} + +/** + * @brief Disable the capture/compare control bits (CCxE, CCxNE and OCxM) preload. + * @note Macro IS_TIM_COMMUTATION_EVENT_INSTANCE(TIMx) can be used to check + * whether or not a timer instance is able to generate a commutation event. + * @rmtoll CR2 CCPC LL_TIM_CC_DisablePreload + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_CC_DisablePreload(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->CR2, TIM_CR2_CCPC); +} + +/** + * @brief Indicates whether the capture/compare control bits (CCxE, CCxNE and OCxM) preload is enabled. + * @rmtoll CR2 CCPC LL_TIM_CC_IsEnabledPreload + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_CC_IsEnabledPreload(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->CR2, TIM_CR2_CCPC) == (TIM_CR2_CCPC)) ? 1UL : 0UL); +} + +/** + * @brief Set the updated source of the capture/compare control bits (CCxE, CCxNE and OCxM). + * @note Macro IS_TIM_COMMUTATION_EVENT_INSTANCE(TIMx) can be used to check + * whether or not a timer instance is able to generate a commutation event. + * @rmtoll CR2 CCUS LL_TIM_CC_SetUpdate + * @param TIMx Timer instance + * @param CCUpdateSource This parameter can be one of the following values: + * @arg @ref LL_TIM_CCUPDATESOURCE_COMG_ONLY + * @arg @ref LL_TIM_CCUPDATESOURCE_COMG_AND_TRGI + * @retval None + */ +__STATIC_INLINE void LL_TIM_CC_SetUpdate(TIM_TypeDef *TIMx, uint32_t CCUpdateSource) +{ + MODIFY_REG(TIMx->CR2, TIM_CR2_CCUS, CCUpdateSource); +} + +/** + * @brief Set the trigger of the capture/compare DMA request. + * @rmtoll CR2 CCDS LL_TIM_CC_SetDMAReqTrigger + * @param TIMx Timer instance + * @param DMAReqTrigger This parameter can be one of the following values: + * @arg @ref LL_TIM_CCDMAREQUEST_CC + * @arg @ref LL_TIM_CCDMAREQUEST_UPDATE + * @retval None + */ +__STATIC_INLINE void LL_TIM_CC_SetDMAReqTrigger(TIM_TypeDef *TIMx, uint32_t DMAReqTrigger) +{ + MODIFY_REG(TIMx->CR2, TIM_CR2_CCDS, DMAReqTrigger); +} + +/** + * @brief Get actual trigger of the capture/compare DMA request. + * @rmtoll CR2 CCDS LL_TIM_CC_GetDMAReqTrigger + * @param TIMx Timer instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_TIM_CCDMAREQUEST_CC + * @arg @ref LL_TIM_CCDMAREQUEST_UPDATE + */ +__STATIC_INLINE uint32_t LL_TIM_CC_GetDMAReqTrigger(const TIM_TypeDef *TIMx) +{ + return (uint32_t)(READ_BIT(TIMx->CR2, TIM_CR2_CCDS)); +} + +/** + * @brief Set the lock level to freeze the + * configuration of several capture/compare parameters. + * @note Macro IS_TIM_BREAK_INSTANCE(TIMx) can be used to check whether or not + * the lock mechanism is supported by a timer instance. + * @rmtoll BDTR LOCK LL_TIM_CC_SetLockLevel + * @param TIMx Timer instance + * @param LockLevel This parameter can be one of the following values: + * @arg @ref LL_TIM_LOCKLEVEL_OFF + * @arg @ref LL_TIM_LOCKLEVEL_1 + * @arg @ref LL_TIM_LOCKLEVEL_2 + * @arg @ref LL_TIM_LOCKLEVEL_3 + * @retval None + */ +__STATIC_INLINE void LL_TIM_CC_SetLockLevel(TIM_TypeDef *TIMx, uint32_t LockLevel) +{ + MODIFY_REG(TIMx->BDTR, TIM_BDTR_LOCK, LockLevel); +} + +/** + * @brief Enable capture/compare channels. + * @rmtoll CCER CC1E LL_TIM_CC_EnableChannel\n + * CCER CC1NE LL_TIM_CC_EnableChannel\n + * CCER CC2E LL_TIM_CC_EnableChannel\n + * CCER CC2NE LL_TIM_CC_EnableChannel\n + * CCER CC3E LL_TIM_CC_EnableChannel\n + * CCER CC3NE LL_TIM_CC_EnableChannel\n + * CCER CC4E LL_TIM_CC_EnableChannel + * @param TIMx Timer instance + * @param Channels This parameter can be a combination of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH1N + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH2N + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH3N + * @arg @ref LL_TIM_CHANNEL_CH4 + * @retval None + */ +__STATIC_INLINE void LL_TIM_CC_EnableChannel(TIM_TypeDef *TIMx, uint32_t Channels) +{ + SET_BIT(TIMx->CCER, Channels); +} + +/** + * @brief Disable capture/compare channels. + * @rmtoll CCER CC1E LL_TIM_CC_DisableChannel\n + * CCER CC1NE LL_TIM_CC_DisableChannel\n + * CCER CC2E LL_TIM_CC_DisableChannel\n + * CCER CC2NE LL_TIM_CC_DisableChannel\n + * CCER CC3E LL_TIM_CC_DisableChannel\n + * CCER CC3NE LL_TIM_CC_DisableChannel\n + * CCER CC4E LL_TIM_CC_DisableChannel + * @param TIMx Timer instance + * @param Channels This parameter can be a combination of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH1N + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH2N + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH3N + * @arg @ref LL_TIM_CHANNEL_CH4 + * @retval None + */ +__STATIC_INLINE void LL_TIM_CC_DisableChannel(TIM_TypeDef *TIMx, uint32_t Channels) +{ + CLEAR_BIT(TIMx->CCER, Channels); +} + +/** + * @brief Indicate whether channel(s) is(are) enabled. + * @rmtoll CCER CC1E LL_TIM_CC_IsEnabledChannel\n + * CCER CC1NE LL_TIM_CC_IsEnabledChannel\n + * CCER CC2E LL_TIM_CC_IsEnabledChannel\n + * CCER CC2NE LL_TIM_CC_IsEnabledChannel\n + * CCER CC3E LL_TIM_CC_IsEnabledChannel\n + * CCER CC3NE LL_TIM_CC_IsEnabledChannel\n + * CCER CC4E LL_TIM_CC_IsEnabledChannel + * @param TIMx Timer instance + * @param Channels This parameter can be a combination of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH1N + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH2N + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH3N + * @arg @ref LL_TIM_CHANNEL_CH4 + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_CC_IsEnabledChannel(const TIM_TypeDef *TIMx, uint32_t Channels) +{ + return ((READ_BIT(TIMx->CCER, Channels) == (Channels)) ? 1UL : 0UL); +} + +/** + * @} + */ + +/** @defgroup TIM_LL_EF_Output_Channel Output channel configuration + * @{ + */ +/** + * @brief Configure an output channel. + * @rmtoll CCMR1 CC1S LL_TIM_OC_ConfigOutput\n + * CCMR1 CC2S LL_TIM_OC_ConfigOutput\n + * CCMR2 CC3S LL_TIM_OC_ConfigOutput\n + * CCMR2 CC4S LL_TIM_OC_ConfigOutput\n + * CCER CC1P LL_TIM_OC_ConfigOutput\n + * CCER CC2P LL_TIM_OC_ConfigOutput\n + * CCER CC3P LL_TIM_OC_ConfigOutput\n + * CCER CC4P LL_TIM_OC_ConfigOutput\n + * CR2 OIS1 LL_TIM_OC_ConfigOutput\n + * CR2 OIS2 LL_TIM_OC_ConfigOutput\n + * CR2 OIS3 LL_TIM_OC_ConfigOutput\n + * CR2 OIS4 LL_TIM_OC_ConfigOutput + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH4 + * @param Configuration This parameter must be a combination of all the following values: + * @arg @ref LL_TIM_OCPOLARITY_HIGH or @ref LL_TIM_OCPOLARITY_LOW + * @arg @ref LL_TIM_OCIDLESTATE_LOW or @ref LL_TIM_OCIDLESTATE_HIGH + * @retval None + */ +__STATIC_INLINE void LL_TIM_OC_ConfigOutput(TIM_TypeDef *TIMx, uint32_t Channel, uint32_t Configuration) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + __IO uint32_t *pReg = (__IO uint32_t *)((uint32_t)((uint32_t)(&TIMx->CCMR1) + OFFSET_TAB_CCMRx[iChannel])); + CLEAR_BIT(*pReg, (TIM_CCMR1_CC1S << SHIFT_TAB_OCxx[iChannel])); + MODIFY_REG(TIMx->CCER, (TIM_CCER_CC1P << SHIFT_TAB_CCxP[iChannel]), + (Configuration & TIM_CCER_CC1P) << SHIFT_TAB_CCxP[iChannel]); + MODIFY_REG(TIMx->CR2, (TIM_CR2_OIS1 << SHIFT_TAB_OISx[iChannel]), + (Configuration & TIM_CR2_OIS1) << SHIFT_TAB_OISx[iChannel]); +} + +/** + * @brief Define the behavior of the output reference signal OCxREF from which + * OCx and OCxN (when relevant) are derived. + * @rmtoll CCMR1 OC1M LL_TIM_OC_SetMode\n + * CCMR1 OC2M LL_TIM_OC_SetMode\n + * CCMR2 OC3M LL_TIM_OC_SetMode\n + * CCMR2 OC4M LL_TIM_OC_SetMode + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH4 + * @param Mode This parameter can be one of the following values: + * @arg @ref LL_TIM_OCMODE_FROZEN + * @arg @ref LL_TIM_OCMODE_ACTIVE + * @arg @ref LL_TIM_OCMODE_INACTIVE + * @arg @ref LL_TIM_OCMODE_TOGGLE + * @arg @ref LL_TIM_OCMODE_FORCED_INACTIVE + * @arg @ref LL_TIM_OCMODE_FORCED_ACTIVE + * @arg @ref LL_TIM_OCMODE_PWM1 + * @arg @ref LL_TIM_OCMODE_PWM2 + * @retval None + */ +__STATIC_INLINE void LL_TIM_OC_SetMode(TIM_TypeDef *TIMx, uint32_t Channel, uint32_t Mode) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + __IO uint32_t *pReg = (__IO uint32_t *)((uint32_t)((uint32_t)(&TIMx->CCMR1) + OFFSET_TAB_CCMRx[iChannel])); + MODIFY_REG(*pReg, ((TIM_CCMR1_OC1M | TIM_CCMR1_CC1S) << SHIFT_TAB_OCxx[iChannel]), Mode << SHIFT_TAB_OCxx[iChannel]); +} + +/** + * @brief Get the output compare mode of an output channel. + * @rmtoll CCMR1 OC1M LL_TIM_OC_GetMode\n + * CCMR1 OC2M LL_TIM_OC_GetMode\n + * CCMR2 OC3M LL_TIM_OC_GetMode\n + * CCMR2 OC4M LL_TIM_OC_GetMode + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH4 + * @retval Returned value can be one of the following values: + * @arg @ref LL_TIM_OCMODE_FROZEN + * @arg @ref LL_TIM_OCMODE_ACTIVE + * @arg @ref LL_TIM_OCMODE_INACTIVE + * @arg @ref LL_TIM_OCMODE_TOGGLE + * @arg @ref LL_TIM_OCMODE_FORCED_INACTIVE + * @arg @ref LL_TIM_OCMODE_FORCED_ACTIVE + * @arg @ref LL_TIM_OCMODE_PWM1 + * @arg @ref LL_TIM_OCMODE_PWM2 + */ +__STATIC_INLINE uint32_t LL_TIM_OC_GetMode(const TIM_TypeDef *TIMx, uint32_t Channel) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + const __IO uint32_t *pReg = (__IO uint32_t *)((uint32_t)((uint32_t)(&TIMx->CCMR1) + OFFSET_TAB_CCMRx[iChannel])); + return (READ_BIT(*pReg, ((TIM_CCMR1_OC1M | TIM_CCMR1_CC1S) << SHIFT_TAB_OCxx[iChannel])) >> SHIFT_TAB_OCxx[iChannel]); +} + +/** + * @brief Set the polarity of an output channel. + * @rmtoll CCER CC1P LL_TIM_OC_SetPolarity\n + * CCER CC1NP LL_TIM_OC_SetPolarity\n + * CCER CC2P LL_TIM_OC_SetPolarity\n + * CCER CC2NP LL_TIM_OC_SetPolarity\n + * CCER CC3P LL_TIM_OC_SetPolarity\n + * CCER CC3NP LL_TIM_OC_SetPolarity\n + * CCER CC4P LL_TIM_OC_SetPolarity + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH1N + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH2N + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH3N + * @arg @ref LL_TIM_CHANNEL_CH4 + * @param Polarity This parameter can be one of the following values: + * @arg @ref LL_TIM_OCPOLARITY_HIGH + * @arg @ref LL_TIM_OCPOLARITY_LOW + * @retval None + */ +__STATIC_INLINE void LL_TIM_OC_SetPolarity(TIM_TypeDef *TIMx, uint32_t Channel, uint32_t Polarity) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + MODIFY_REG(TIMx->CCER, (TIM_CCER_CC1P << SHIFT_TAB_CCxP[iChannel]), Polarity << SHIFT_TAB_CCxP[iChannel]); +} + +/** + * @brief Get the polarity of an output channel. + * @rmtoll CCER CC1P LL_TIM_OC_GetPolarity\n + * CCER CC1NP LL_TIM_OC_GetPolarity\n + * CCER CC2P LL_TIM_OC_GetPolarity\n + * CCER CC2NP LL_TIM_OC_GetPolarity\n + * CCER CC3P LL_TIM_OC_GetPolarity\n + * CCER CC3NP LL_TIM_OC_GetPolarity\n + * CCER CC4P LL_TIM_OC_GetPolarity + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH1N + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH2N + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH3N + * @arg @ref LL_TIM_CHANNEL_CH4 + * @retval Returned value can be one of the following values: + * @arg @ref LL_TIM_OCPOLARITY_HIGH + * @arg @ref LL_TIM_OCPOLARITY_LOW + */ +__STATIC_INLINE uint32_t LL_TIM_OC_GetPolarity(const TIM_TypeDef *TIMx, uint32_t Channel) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + return (READ_BIT(TIMx->CCER, (TIM_CCER_CC1P << SHIFT_TAB_CCxP[iChannel])) >> SHIFT_TAB_CCxP[iChannel]); +} + +/** + * @brief Set the IDLE state of an output channel + * @note This function is significant only for the timer instances + * supporting the break feature. Macro IS_TIM_BREAK_INSTANCE(TIMx) + * can be used to check whether or not a timer instance provides + * a break input. + * @rmtoll CR2 OIS1 LL_TIM_OC_SetIdleState\n + * CR2 OIS1N LL_TIM_OC_SetIdleState\n + * CR2 OIS2 LL_TIM_OC_SetIdleState\n + * CR2 OIS2N LL_TIM_OC_SetIdleState\n + * CR2 OIS3 LL_TIM_OC_SetIdleState\n + * CR2 OIS3N LL_TIM_OC_SetIdleState\n + * CR2 OIS4 LL_TIM_OC_SetIdleState + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH1N + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH2N + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH3N + * @arg @ref LL_TIM_CHANNEL_CH4 + * @param IdleState This parameter can be one of the following values: + * @arg @ref LL_TIM_OCIDLESTATE_LOW + * @arg @ref LL_TIM_OCIDLESTATE_HIGH + * @retval None + */ +__STATIC_INLINE void LL_TIM_OC_SetIdleState(TIM_TypeDef *TIMx, uint32_t Channel, uint32_t IdleState) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + MODIFY_REG(TIMx->CR2, (TIM_CR2_OIS1 << SHIFT_TAB_OISx[iChannel]), IdleState << SHIFT_TAB_OISx[iChannel]); +} + +/** + * @brief Get the IDLE state of an output channel + * @rmtoll CR2 OIS1 LL_TIM_OC_GetIdleState\n + * CR2 OIS1N LL_TIM_OC_GetIdleState\n + * CR2 OIS2 LL_TIM_OC_GetIdleState\n + * CR2 OIS2N LL_TIM_OC_GetIdleState\n + * CR2 OIS3 LL_TIM_OC_GetIdleState\n + * CR2 OIS3N LL_TIM_OC_GetIdleState\n + * CR2 OIS4 LL_TIM_OC_GetIdleState + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH1N + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH2N + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH3N + * @arg @ref LL_TIM_CHANNEL_CH4 + * @retval Returned value can be one of the following values: + * @arg @ref LL_TIM_OCIDLESTATE_LOW + * @arg @ref LL_TIM_OCIDLESTATE_HIGH + */ +__STATIC_INLINE uint32_t LL_TIM_OC_GetIdleState(const TIM_TypeDef *TIMx, uint32_t Channel) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + return (READ_BIT(TIMx->CR2, (TIM_CR2_OIS1 << SHIFT_TAB_OISx[iChannel])) >> SHIFT_TAB_OISx[iChannel]); +} + +/** + * @brief Enable fast mode for the output channel. + * @note Acts only if the channel is configured in PWM1 or PWM2 mode. + * @rmtoll CCMR1 OC1FE LL_TIM_OC_EnableFast\n + * CCMR1 OC2FE LL_TIM_OC_EnableFast\n + * CCMR2 OC3FE LL_TIM_OC_EnableFast\n + * CCMR2 OC4FE LL_TIM_OC_EnableFast + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH4 + * @retval None + */ +__STATIC_INLINE void LL_TIM_OC_EnableFast(TIM_TypeDef *TIMx, uint32_t Channel) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + __IO uint32_t *pReg = (__IO uint32_t *)((uint32_t)((uint32_t)(&TIMx->CCMR1) + OFFSET_TAB_CCMRx[iChannel])); + SET_BIT(*pReg, (TIM_CCMR1_OC1FE << SHIFT_TAB_OCxx[iChannel])); + +} + +/** + * @brief Disable fast mode for the output channel. + * @rmtoll CCMR1 OC1FE LL_TIM_OC_DisableFast\n + * CCMR1 OC2FE LL_TIM_OC_DisableFast\n + * CCMR2 OC3FE LL_TIM_OC_DisableFast\n + * CCMR2 OC4FE LL_TIM_OC_DisableFast + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH4 + * @retval None + */ +__STATIC_INLINE void LL_TIM_OC_DisableFast(TIM_TypeDef *TIMx, uint32_t Channel) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + __IO uint32_t *pReg = (__IO uint32_t *)((uint32_t)((uint32_t)(&TIMx->CCMR1) + OFFSET_TAB_CCMRx[iChannel])); + CLEAR_BIT(*pReg, (TIM_CCMR1_OC1FE << SHIFT_TAB_OCxx[iChannel])); + +} + +/** + * @brief Indicates whether fast mode is enabled for the output channel. + * @rmtoll CCMR1 OC1FE LL_TIM_OC_IsEnabledFast\n + * CCMR1 OC2FE LL_TIM_OC_IsEnabledFast\n + * CCMR2 OC3FE LL_TIM_OC_IsEnabledFast\n + * CCMR2 OC4FE LL_TIM_OC_IsEnabledFast\n + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH4 + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_OC_IsEnabledFast(const TIM_TypeDef *TIMx, uint32_t Channel) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + const __IO uint32_t *pReg = (__IO uint32_t *)((uint32_t)((uint32_t)(&TIMx->CCMR1) + OFFSET_TAB_CCMRx[iChannel])); + uint32_t bitfield = TIM_CCMR1_OC1FE << SHIFT_TAB_OCxx[iChannel]; + return ((READ_BIT(*pReg, bitfield) == bitfield) ? 1UL : 0UL); +} + +/** + * @brief Enable compare register (TIMx_CCRx) preload for the output channel. + * @rmtoll CCMR1 OC1PE LL_TIM_OC_EnablePreload\n + * CCMR1 OC2PE LL_TIM_OC_EnablePreload\n + * CCMR2 OC3PE LL_TIM_OC_EnablePreload\n + * CCMR2 OC4PE LL_TIM_OC_EnablePreload + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH4 + * @retval None + */ +__STATIC_INLINE void LL_TIM_OC_EnablePreload(TIM_TypeDef *TIMx, uint32_t Channel) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + __IO uint32_t *pReg = (__IO uint32_t *)((uint32_t)((uint32_t)(&TIMx->CCMR1) + OFFSET_TAB_CCMRx[iChannel])); + SET_BIT(*pReg, (TIM_CCMR1_OC1PE << SHIFT_TAB_OCxx[iChannel])); +} + +/** + * @brief Disable compare register (TIMx_CCRx) preload for the output channel. + * @rmtoll CCMR1 OC1PE LL_TIM_OC_DisablePreload\n + * CCMR1 OC2PE LL_TIM_OC_DisablePreload\n + * CCMR2 OC3PE LL_TIM_OC_DisablePreload\n + * CCMR2 OC4PE LL_TIM_OC_DisablePreload + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH4 + * @retval None + */ +__STATIC_INLINE void LL_TIM_OC_DisablePreload(TIM_TypeDef *TIMx, uint32_t Channel) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + __IO uint32_t *pReg = (__IO uint32_t *)((uint32_t)((uint32_t)(&TIMx->CCMR1) + OFFSET_TAB_CCMRx[iChannel])); + CLEAR_BIT(*pReg, (TIM_CCMR1_OC1PE << SHIFT_TAB_OCxx[iChannel])); +} + +/** + * @brief Indicates whether compare register (TIMx_CCRx) preload is enabled for the output channel. + * @rmtoll CCMR1 OC1PE LL_TIM_OC_IsEnabledPreload\n + * CCMR1 OC2PE LL_TIM_OC_IsEnabledPreload\n + * CCMR2 OC3PE LL_TIM_OC_IsEnabledPreload\n + * CCMR2 OC4PE LL_TIM_OC_IsEnabledPreload\n + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH4 + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_OC_IsEnabledPreload(const TIM_TypeDef *TIMx, uint32_t Channel) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + const __IO uint32_t *pReg = (__IO uint32_t *)((uint32_t)((uint32_t)(&TIMx->CCMR1) + OFFSET_TAB_CCMRx[iChannel])); + uint32_t bitfield = TIM_CCMR1_OC1PE << SHIFT_TAB_OCxx[iChannel]; + return ((READ_BIT(*pReg, bitfield) == bitfield) ? 1UL : 0UL); +} + +/** + * @brief Enable clearing the output channel on an external event. + * @note This function can only be used in Output compare and PWM modes. It does not work in Forced mode. + * @note Macro IS_TIM_OCXREF_CLEAR_INSTANCE(TIMx) can be used to check whether + * or not a timer instance can clear the OCxREF signal on an external event. + * @rmtoll CCMR1 OC1CE LL_TIM_OC_EnableClear\n + * CCMR1 OC2CE LL_TIM_OC_EnableClear\n + * CCMR2 OC3CE LL_TIM_OC_EnableClear\n + * CCMR2 OC4CE LL_TIM_OC_EnableClear + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH4 + * @retval None + */ +__STATIC_INLINE void LL_TIM_OC_EnableClear(TIM_TypeDef *TIMx, uint32_t Channel) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + __IO uint32_t *pReg = (__IO uint32_t *)((uint32_t)((uint32_t)(&TIMx->CCMR1) + OFFSET_TAB_CCMRx[iChannel])); + SET_BIT(*pReg, (TIM_CCMR1_OC1CE << SHIFT_TAB_OCxx[iChannel])); +} + +/** + * @brief Disable clearing the output channel on an external event. + * @note Macro IS_TIM_OCXREF_CLEAR_INSTANCE(TIMx) can be used to check whether + * or not a timer instance can clear the OCxREF signal on an external event. + * @rmtoll CCMR1 OC1CE LL_TIM_OC_DisableClear\n + * CCMR1 OC2CE LL_TIM_OC_DisableClear\n + * CCMR2 OC3CE LL_TIM_OC_DisableClear\n + * CCMR2 OC4CE LL_TIM_OC_DisableClear + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH4 + * @retval None + */ +__STATIC_INLINE void LL_TIM_OC_DisableClear(TIM_TypeDef *TIMx, uint32_t Channel) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + __IO uint32_t *pReg = (__IO uint32_t *)((uint32_t)((uint32_t)(&TIMx->CCMR1) + OFFSET_TAB_CCMRx[iChannel])); + CLEAR_BIT(*pReg, (TIM_CCMR1_OC1CE << SHIFT_TAB_OCxx[iChannel])); +} + +/** + * @brief Indicates clearing the output channel on an external event is enabled for the output channel. + * @note This function enables clearing the output channel on an external event. + * @note This function can only be used in Output compare and PWM modes. It does not work in Forced mode. + * @note Macro IS_TIM_OCXREF_CLEAR_INSTANCE(TIMx) can be used to check whether + * or not a timer instance can clear the OCxREF signal on an external event. + * @rmtoll CCMR1 OC1CE LL_TIM_OC_IsEnabledClear\n + * CCMR1 OC2CE LL_TIM_OC_IsEnabledClear\n + * CCMR2 OC3CE LL_TIM_OC_IsEnabledClear\n + * CCMR2 OC4CE LL_TIM_OC_IsEnabledClear\n + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH4 + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_OC_IsEnabledClear(const TIM_TypeDef *TIMx, uint32_t Channel) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + const __IO uint32_t *pReg = (__IO uint32_t *)((uint32_t)((uint32_t)(&TIMx->CCMR1) + OFFSET_TAB_CCMRx[iChannel])); + uint32_t bitfield = TIM_CCMR1_OC1CE << SHIFT_TAB_OCxx[iChannel]; + return ((READ_BIT(*pReg, bitfield) == bitfield) ? 1UL : 0UL); +} + +/** + * @brief Set the dead-time delay (delay inserted between the rising edge of the OCxREF signal and the rising edge of + * the Ocx and OCxN signals). + * @note Macro IS_TIM_BREAK_INSTANCE(TIMx) can be used to check whether or not + * dead-time insertion feature is supported by a timer instance. + * @note Helper macro @ref __LL_TIM_CALC_DEADTIME can be used to calculate the DeadTime parameter + * @rmtoll BDTR DTG LL_TIM_OC_SetDeadTime + * @param TIMx Timer instance + * @param DeadTime between Min_Data=0 and Max_Data=255 + * @retval None + */ +__STATIC_INLINE void LL_TIM_OC_SetDeadTime(TIM_TypeDef *TIMx, uint32_t DeadTime) +{ + MODIFY_REG(TIMx->BDTR, TIM_BDTR_DTG, DeadTime); +} + +/** + * @brief Set compare value for output channel 1 (TIMx_CCR1). + * @note In 32-bit timer implementations compare value can be between 0x00000000 and 0xFFFFFFFF. + * @note Macro IS_TIM_32B_COUNTER_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports a 32 bits counter. + * @note Macro IS_TIM_CC1_INSTANCE(TIMx) can be used to check whether or not + * output channel 1 is supported by a timer instance. + * @rmtoll CCR1 CCR1 LL_TIM_OC_SetCompareCH1 + * @param TIMx Timer instance + * @param CompareValue between Min_Data=0 and Max_Data=65535 + * @retval None + */ +__STATIC_INLINE void LL_TIM_OC_SetCompareCH1(TIM_TypeDef *TIMx, uint32_t CompareValue) +{ + WRITE_REG(TIMx->CCR1, CompareValue); +} + +/** + * @brief Set compare value for output channel 2 (TIMx_CCR2). + * @note In 32-bit timer implementations compare value can be between 0x00000000 and 0xFFFFFFFF. + * @note Macro IS_TIM_32B_COUNTER_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports a 32 bits counter. + * @note Macro IS_TIM_CC2_INSTANCE(TIMx) can be used to check whether or not + * output channel 2 is supported by a timer instance. + * @rmtoll CCR2 CCR2 LL_TIM_OC_SetCompareCH2 + * @param TIMx Timer instance + * @param CompareValue between Min_Data=0 and Max_Data=65535 + * @retval None + */ +__STATIC_INLINE void LL_TIM_OC_SetCompareCH2(TIM_TypeDef *TIMx, uint32_t CompareValue) +{ + WRITE_REG(TIMx->CCR2, CompareValue); +} + +/** + * @brief Set compare value for output channel 3 (TIMx_CCR3). + * @note In 32-bit timer implementations compare value can be between 0x00000000 and 0xFFFFFFFF. + * @note Macro IS_TIM_32B_COUNTER_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports a 32 bits counter. + * @note Macro IS_TIM_CC3_INSTANCE(TIMx) can be used to check whether or not + * output channel is supported by a timer instance. + * @rmtoll CCR3 CCR3 LL_TIM_OC_SetCompareCH3 + * @param TIMx Timer instance + * @param CompareValue between Min_Data=0 and Max_Data=65535 + * @retval None + */ +__STATIC_INLINE void LL_TIM_OC_SetCompareCH3(TIM_TypeDef *TIMx, uint32_t CompareValue) +{ + WRITE_REG(TIMx->CCR3, CompareValue); +} + +/** + * @brief Set compare value for output channel 4 (TIMx_CCR4). + * @note In 32-bit timer implementations compare value can be between 0x00000000 and 0xFFFFFFFF. + * @note Macro IS_TIM_32B_COUNTER_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports a 32 bits counter. + * @note Macro IS_TIM_CC4_INSTANCE(TIMx) can be used to check whether or not + * output channel 4 is supported by a timer instance. + * @rmtoll CCR4 CCR4 LL_TIM_OC_SetCompareCH4 + * @param TIMx Timer instance + * @param CompareValue between Min_Data=0 and Max_Data=65535 + * @retval None + */ +__STATIC_INLINE void LL_TIM_OC_SetCompareCH4(TIM_TypeDef *TIMx, uint32_t CompareValue) +{ + WRITE_REG(TIMx->CCR4, CompareValue); +} + +/** + * @brief Get compare value (TIMx_CCR1) set for output channel 1. + * @note In 32-bit timer implementations returned compare value can be between 0x00000000 and 0xFFFFFFFF. + * @note Macro IS_TIM_32B_COUNTER_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports a 32 bits counter. + * @note Macro IS_TIM_CC1_INSTANCE(TIMx) can be used to check whether or not + * output channel 1 is supported by a timer instance. + * @rmtoll CCR1 CCR1 LL_TIM_OC_GetCompareCH1 + * @param TIMx Timer instance + * @retval CompareValue (between Min_Data=0 and Max_Data=65535) + */ +__STATIC_INLINE uint32_t LL_TIM_OC_GetCompareCH1(const TIM_TypeDef *TIMx) +{ + return (uint32_t)(READ_REG(TIMx->CCR1)); +} + +/** + * @brief Get compare value (TIMx_CCR2) set for output channel 2. + * @note In 32-bit timer implementations returned compare value can be between 0x00000000 and 0xFFFFFFFF. + * @note Macro IS_TIM_32B_COUNTER_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports a 32 bits counter. + * @note Macro IS_TIM_CC2_INSTANCE(TIMx) can be used to check whether or not + * output channel 2 is supported by a timer instance. + * @rmtoll CCR2 CCR2 LL_TIM_OC_GetCompareCH2 + * @param TIMx Timer instance + * @retval CompareValue (between Min_Data=0 and Max_Data=65535) + */ +__STATIC_INLINE uint32_t LL_TIM_OC_GetCompareCH2(const TIM_TypeDef *TIMx) +{ + return (uint32_t)(READ_REG(TIMx->CCR2)); +} + +/** + * @brief Get compare value (TIMx_CCR3) set for output channel 3. + * @note In 32-bit timer implementations returned compare value can be between 0x00000000 and 0xFFFFFFFF. + * @note Macro IS_TIM_32B_COUNTER_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports a 32 bits counter. + * @note Macro IS_TIM_CC3_INSTANCE(TIMx) can be used to check whether or not + * output channel 3 is supported by a timer instance. + * @rmtoll CCR3 CCR3 LL_TIM_OC_GetCompareCH3 + * @param TIMx Timer instance + * @retval CompareValue (between Min_Data=0 and Max_Data=65535) + */ +__STATIC_INLINE uint32_t LL_TIM_OC_GetCompareCH3(const TIM_TypeDef *TIMx) +{ + return (uint32_t)(READ_REG(TIMx->CCR3)); +} + +/** + * @brief Get compare value (TIMx_CCR4) set for output channel 4. + * @note In 32-bit timer implementations returned compare value can be between 0x00000000 and 0xFFFFFFFF. + * @note Macro IS_TIM_32B_COUNTER_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports a 32 bits counter. + * @note Macro IS_TIM_CC4_INSTANCE(TIMx) can be used to check whether or not + * output channel 4 is supported by a timer instance. + * @rmtoll CCR4 CCR4 LL_TIM_OC_GetCompareCH4 + * @param TIMx Timer instance + * @retval CompareValue (between Min_Data=0 and Max_Data=65535) + */ +__STATIC_INLINE uint32_t LL_TIM_OC_GetCompareCH4(const TIM_TypeDef *TIMx) +{ + return (uint32_t)(READ_REG(TIMx->CCR4)); +} + +/** + * @} + */ + +/** @defgroup TIM_LL_EF_Input_Channel Input channel configuration + * @{ + */ +/** + * @brief Configure input channel. + * @rmtoll CCMR1 CC1S LL_TIM_IC_Config\n + * CCMR1 IC1PSC LL_TIM_IC_Config\n + * CCMR1 IC1F LL_TIM_IC_Config\n + * CCMR1 CC2S LL_TIM_IC_Config\n + * CCMR1 IC2PSC LL_TIM_IC_Config\n + * CCMR1 IC2F LL_TIM_IC_Config\n + * CCMR2 CC3S LL_TIM_IC_Config\n + * CCMR2 IC3PSC LL_TIM_IC_Config\n + * CCMR2 IC3F LL_TIM_IC_Config\n + * CCMR2 CC4S LL_TIM_IC_Config\n + * CCMR2 IC4PSC LL_TIM_IC_Config\n + * CCMR2 IC4F LL_TIM_IC_Config\n + * CCER CC1P LL_TIM_IC_Config\n + * CCER CC1NP LL_TIM_IC_Config\n + * CCER CC2P LL_TIM_IC_Config\n + * CCER CC2NP LL_TIM_IC_Config\n + * CCER CC3P LL_TIM_IC_Config\n + * CCER CC3NP LL_TIM_IC_Config\n + * CCER CC4P LL_TIM_IC_Config\n + * CCER CC4NP LL_TIM_IC_Config + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH4 + * @param Configuration This parameter must be a combination of all the following values: + * @arg @ref LL_TIM_ACTIVEINPUT_DIRECTTI or @ref LL_TIM_ACTIVEINPUT_INDIRECTTI or @ref LL_TIM_ACTIVEINPUT_TRC + * @arg @ref LL_TIM_ICPSC_DIV1 or ... or @ref LL_TIM_ICPSC_DIV8 + * @arg @ref LL_TIM_IC_FILTER_FDIV1 or ... or @ref LL_TIM_IC_FILTER_FDIV32_N8 + * @arg @ref LL_TIM_IC_POLARITY_RISING or @ref LL_TIM_IC_POLARITY_FALLING or @ref LL_TIM_IC_POLARITY_BOTHEDGE + * @retval None + */ +__STATIC_INLINE void LL_TIM_IC_Config(TIM_TypeDef *TIMx, uint32_t Channel, uint32_t Configuration) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + __IO uint32_t *pReg = (__IO uint32_t *)((uint32_t)((uint32_t)(&TIMx->CCMR1) + OFFSET_TAB_CCMRx[iChannel])); + MODIFY_REG(*pReg, ((TIM_CCMR1_IC1F | TIM_CCMR1_IC1PSC | TIM_CCMR1_CC1S) << SHIFT_TAB_ICxx[iChannel]), + ((Configuration >> 16U) & (TIM_CCMR1_IC1F | TIM_CCMR1_IC1PSC | TIM_CCMR1_CC1S)) \ + << SHIFT_TAB_ICxx[iChannel]); + MODIFY_REG(TIMx->CCER, ((TIM_CCER_CC1NP | TIM_CCER_CC1P) << SHIFT_TAB_CCxP[iChannel]), + (Configuration & (TIM_CCER_CC1NP | TIM_CCER_CC1P)) << SHIFT_TAB_CCxP[iChannel]); +} + +/** + * @brief Set the active input. + * @rmtoll CCMR1 CC1S LL_TIM_IC_SetActiveInput\n + * CCMR1 CC2S LL_TIM_IC_SetActiveInput\n + * CCMR2 CC3S LL_TIM_IC_SetActiveInput\n + * CCMR2 CC4S LL_TIM_IC_SetActiveInput + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH4 + * @param ICActiveInput This parameter can be one of the following values: + * @arg @ref LL_TIM_ACTIVEINPUT_DIRECTTI + * @arg @ref LL_TIM_ACTIVEINPUT_INDIRECTTI + * @arg @ref LL_TIM_ACTIVEINPUT_TRC + * @retval None + */ +__STATIC_INLINE void LL_TIM_IC_SetActiveInput(TIM_TypeDef *TIMx, uint32_t Channel, uint32_t ICActiveInput) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + __IO uint32_t *pReg = (__IO uint32_t *)((uint32_t)((uint32_t)(&TIMx->CCMR1) + OFFSET_TAB_CCMRx[iChannel])); + MODIFY_REG(*pReg, ((TIM_CCMR1_CC1S) << SHIFT_TAB_ICxx[iChannel]), (ICActiveInput >> 16U) << SHIFT_TAB_ICxx[iChannel]); +} + +/** + * @brief Get the current active input. + * @rmtoll CCMR1 CC1S LL_TIM_IC_GetActiveInput\n + * CCMR1 CC2S LL_TIM_IC_GetActiveInput\n + * CCMR2 CC3S LL_TIM_IC_GetActiveInput\n + * CCMR2 CC4S LL_TIM_IC_GetActiveInput + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH4 + * @retval Returned value can be one of the following values: + * @arg @ref LL_TIM_ACTIVEINPUT_DIRECTTI + * @arg @ref LL_TIM_ACTIVEINPUT_INDIRECTTI + * @arg @ref LL_TIM_ACTIVEINPUT_TRC + */ +__STATIC_INLINE uint32_t LL_TIM_IC_GetActiveInput(const TIM_TypeDef *TIMx, uint32_t Channel) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + const __IO uint32_t *pReg = (__IO uint32_t *)((uint32_t)((uint32_t)(&TIMx->CCMR1) + OFFSET_TAB_CCMRx[iChannel])); + return ((READ_BIT(*pReg, ((TIM_CCMR1_CC1S) << SHIFT_TAB_ICxx[iChannel])) >> SHIFT_TAB_ICxx[iChannel]) << 16U); +} + +/** + * @brief Set the prescaler of input channel. + * @rmtoll CCMR1 IC1PSC LL_TIM_IC_SetPrescaler\n + * CCMR1 IC2PSC LL_TIM_IC_SetPrescaler\n + * CCMR2 IC3PSC LL_TIM_IC_SetPrescaler\n + * CCMR2 IC4PSC LL_TIM_IC_SetPrescaler + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH4 + * @param ICPrescaler This parameter can be one of the following values: + * @arg @ref LL_TIM_ICPSC_DIV1 + * @arg @ref LL_TIM_ICPSC_DIV2 + * @arg @ref LL_TIM_ICPSC_DIV4 + * @arg @ref LL_TIM_ICPSC_DIV8 + * @retval None + */ +__STATIC_INLINE void LL_TIM_IC_SetPrescaler(TIM_TypeDef *TIMx, uint32_t Channel, uint32_t ICPrescaler) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + __IO uint32_t *pReg = (__IO uint32_t *)((uint32_t)((uint32_t)(&TIMx->CCMR1) + OFFSET_TAB_CCMRx[iChannel])); + MODIFY_REG(*pReg, ((TIM_CCMR1_IC1PSC) << SHIFT_TAB_ICxx[iChannel]), (ICPrescaler >> 16U) << SHIFT_TAB_ICxx[iChannel]); +} + +/** + * @brief Get the current prescaler value acting on an input channel. + * @rmtoll CCMR1 IC1PSC LL_TIM_IC_GetPrescaler\n + * CCMR1 IC2PSC LL_TIM_IC_GetPrescaler\n + * CCMR2 IC3PSC LL_TIM_IC_GetPrescaler\n + * CCMR2 IC4PSC LL_TIM_IC_GetPrescaler + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH4 + * @retval Returned value can be one of the following values: + * @arg @ref LL_TIM_ICPSC_DIV1 + * @arg @ref LL_TIM_ICPSC_DIV2 + * @arg @ref LL_TIM_ICPSC_DIV4 + * @arg @ref LL_TIM_ICPSC_DIV8 + */ +__STATIC_INLINE uint32_t LL_TIM_IC_GetPrescaler(const TIM_TypeDef *TIMx, uint32_t Channel) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + const __IO uint32_t *pReg = (__IO uint32_t *)((uint32_t)((uint32_t)(&TIMx->CCMR1) + OFFSET_TAB_CCMRx[iChannel])); + return ((READ_BIT(*pReg, ((TIM_CCMR1_IC1PSC) << SHIFT_TAB_ICxx[iChannel])) >> SHIFT_TAB_ICxx[iChannel]) << 16U); +} + +/** + * @brief Set the input filter duration. + * @rmtoll CCMR1 IC1F LL_TIM_IC_SetFilter\n + * CCMR1 IC2F LL_TIM_IC_SetFilter\n + * CCMR2 IC3F LL_TIM_IC_SetFilter\n + * CCMR2 IC4F LL_TIM_IC_SetFilter + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH4 + * @param ICFilter This parameter can be one of the following values: + * @arg @ref LL_TIM_IC_FILTER_FDIV1 + * @arg @ref LL_TIM_IC_FILTER_FDIV1_N2 + * @arg @ref LL_TIM_IC_FILTER_FDIV1_N4 + * @arg @ref LL_TIM_IC_FILTER_FDIV1_N8 + * @arg @ref LL_TIM_IC_FILTER_FDIV2_N6 + * @arg @ref LL_TIM_IC_FILTER_FDIV2_N8 + * @arg @ref LL_TIM_IC_FILTER_FDIV4_N6 + * @arg @ref LL_TIM_IC_FILTER_FDIV4_N8 + * @arg @ref LL_TIM_IC_FILTER_FDIV8_N6 + * @arg @ref LL_TIM_IC_FILTER_FDIV8_N8 + * @arg @ref LL_TIM_IC_FILTER_FDIV16_N5 + * @arg @ref LL_TIM_IC_FILTER_FDIV16_N6 + * @arg @ref LL_TIM_IC_FILTER_FDIV16_N8 + * @arg @ref LL_TIM_IC_FILTER_FDIV32_N5 + * @arg @ref LL_TIM_IC_FILTER_FDIV32_N6 + * @arg @ref LL_TIM_IC_FILTER_FDIV32_N8 + * @retval None + */ +__STATIC_INLINE void LL_TIM_IC_SetFilter(TIM_TypeDef *TIMx, uint32_t Channel, uint32_t ICFilter) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + __IO uint32_t *pReg = (__IO uint32_t *)((uint32_t)((uint32_t)(&TIMx->CCMR1) + OFFSET_TAB_CCMRx[iChannel])); + MODIFY_REG(*pReg, ((TIM_CCMR1_IC1F) << SHIFT_TAB_ICxx[iChannel]), (ICFilter >> 16U) << SHIFT_TAB_ICxx[iChannel]); +} + +/** + * @brief Get the input filter duration. + * @rmtoll CCMR1 IC1F LL_TIM_IC_GetFilter\n + * CCMR1 IC2F LL_TIM_IC_GetFilter\n + * CCMR2 IC3F LL_TIM_IC_GetFilter\n + * CCMR2 IC4F LL_TIM_IC_GetFilter + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH4 + * @retval Returned value can be one of the following values: + * @arg @ref LL_TIM_IC_FILTER_FDIV1 + * @arg @ref LL_TIM_IC_FILTER_FDIV1_N2 + * @arg @ref LL_TIM_IC_FILTER_FDIV1_N4 + * @arg @ref LL_TIM_IC_FILTER_FDIV1_N8 + * @arg @ref LL_TIM_IC_FILTER_FDIV2_N6 + * @arg @ref LL_TIM_IC_FILTER_FDIV2_N8 + * @arg @ref LL_TIM_IC_FILTER_FDIV4_N6 + * @arg @ref LL_TIM_IC_FILTER_FDIV4_N8 + * @arg @ref LL_TIM_IC_FILTER_FDIV8_N6 + * @arg @ref LL_TIM_IC_FILTER_FDIV8_N8 + * @arg @ref LL_TIM_IC_FILTER_FDIV16_N5 + * @arg @ref LL_TIM_IC_FILTER_FDIV16_N6 + * @arg @ref LL_TIM_IC_FILTER_FDIV16_N8 + * @arg @ref LL_TIM_IC_FILTER_FDIV32_N5 + * @arg @ref LL_TIM_IC_FILTER_FDIV32_N6 + * @arg @ref LL_TIM_IC_FILTER_FDIV32_N8 + */ +__STATIC_INLINE uint32_t LL_TIM_IC_GetFilter(const TIM_TypeDef *TIMx, uint32_t Channel) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + const __IO uint32_t *pReg = (__IO uint32_t *)((uint32_t)((uint32_t)(&TIMx->CCMR1) + OFFSET_TAB_CCMRx[iChannel])); + return ((READ_BIT(*pReg, ((TIM_CCMR1_IC1F) << SHIFT_TAB_ICxx[iChannel])) >> SHIFT_TAB_ICxx[iChannel]) << 16U); +} + +/** + * @brief Set the input channel polarity. + * @rmtoll CCER CC1P LL_TIM_IC_SetPolarity\n + * CCER CC1NP LL_TIM_IC_SetPolarity\n + * CCER CC2P LL_TIM_IC_SetPolarity\n + * CCER CC2NP LL_TIM_IC_SetPolarity\n + * CCER CC3P LL_TIM_IC_SetPolarity\n + * CCER CC3NP LL_TIM_IC_SetPolarity\n + * CCER CC4P LL_TIM_IC_SetPolarity\n + * CCER CC4NP LL_TIM_IC_SetPolarity + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH4 + * @param ICPolarity This parameter can be one of the following values: + * @arg @ref LL_TIM_IC_POLARITY_RISING + * @arg @ref LL_TIM_IC_POLARITY_FALLING + * @arg @ref LL_TIM_IC_POLARITY_BOTHEDGE + * @retval None + */ +__STATIC_INLINE void LL_TIM_IC_SetPolarity(TIM_TypeDef *TIMx, uint32_t Channel, uint32_t ICPolarity) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + MODIFY_REG(TIMx->CCER, ((TIM_CCER_CC1NP | TIM_CCER_CC1P) << SHIFT_TAB_CCxP[iChannel]), + ICPolarity << SHIFT_TAB_CCxP[iChannel]); +} + +/** + * @brief Get the current input channel polarity. + * @rmtoll CCER CC1P LL_TIM_IC_GetPolarity\n + * CCER CC1NP LL_TIM_IC_GetPolarity\n + * CCER CC2P LL_TIM_IC_GetPolarity\n + * CCER CC2NP LL_TIM_IC_GetPolarity\n + * CCER CC3P LL_TIM_IC_GetPolarity\n + * CCER CC3NP LL_TIM_IC_GetPolarity\n + * CCER CC4P LL_TIM_IC_GetPolarity\n + * CCER CC4NP LL_TIM_IC_GetPolarity + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH4 + * @retval Returned value can be one of the following values: + * @arg @ref LL_TIM_IC_POLARITY_RISING + * @arg @ref LL_TIM_IC_POLARITY_FALLING + * @arg @ref LL_TIM_IC_POLARITY_BOTHEDGE + */ +__STATIC_INLINE uint32_t LL_TIM_IC_GetPolarity(const TIM_TypeDef *TIMx, uint32_t Channel) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + return (READ_BIT(TIMx->CCER, ((TIM_CCER_CC1NP | TIM_CCER_CC1P) << SHIFT_TAB_CCxP[iChannel])) >> + SHIFT_TAB_CCxP[iChannel]); +} + +/** + * @brief Connect the TIMx_CH1, CH2 and CH3 pins to the TI1 input (XOR combination). + * @note Macro IS_TIM_XOR_INSTANCE(TIMx) can be used to check whether or not + * a timer instance provides an XOR input. + * @rmtoll CR2 TI1S LL_TIM_IC_EnableXORCombination + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_IC_EnableXORCombination(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->CR2, TIM_CR2_TI1S); +} + +/** + * @brief Disconnect the TIMx_CH1, CH2 and CH3 pins from the TI1 input. + * @note Macro IS_TIM_XOR_INSTANCE(TIMx) can be used to check whether or not + * a timer instance provides an XOR input. + * @rmtoll CR2 TI1S LL_TIM_IC_DisableXORCombination + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_IC_DisableXORCombination(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->CR2, TIM_CR2_TI1S); +} + +/** + * @brief Indicates whether the TIMx_CH1, CH2 and CH3 pins are connectected to the TI1 input. + * @note Macro IS_TIM_XOR_INSTANCE(TIMx) can be used to check whether or not + * a timer instance provides an XOR input. + * @rmtoll CR2 TI1S LL_TIM_IC_IsEnabledXORCombination + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IC_IsEnabledXORCombination(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->CR2, TIM_CR2_TI1S) == (TIM_CR2_TI1S)) ? 1UL : 0UL); +} + +/** + * @brief Get captured value for input channel 1. + * @note In 32-bit timer implementations returned captured value can be between 0x00000000 and 0xFFFFFFFF. + * @note Macro IS_TIM_32B_COUNTER_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports a 32 bits counter. + * @note Macro IS_TIM_CC1_INSTANCE(TIMx) can be used to check whether or not + * input channel 1 is supported by a timer instance. + * @rmtoll CCR1 CCR1 LL_TIM_IC_GetCaptureCH1 + * @param TIMx Timer instance + * @retval CapturedValue (between Min_Data=0 and Max_Data=65535) + */ +__STATIC_INLINE uint32_t LL_TIM_IC_GetCaptureCH1(const TIM_TypeDef *TIMx) +{ + return (uint32_t)(READ_REG(TIMx->CCR1)); +} + +/** + * @brief Get captured value for input channel 2. + * @note In 32-bit timer implementations returned captured value can be between 0x00000000 and 0xFFFFFFFF. + * @note Macro IS_TIM_32B_COUNTER_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports a 32 bits counter. + * @note Macro IS_TIM_CC2_INSTANCE(TIMx) can be used to check whether or not + * input channel 2 is supported by a timer instance. + * @rmtoll CCR2 CCR2 LL_TIM_IC_GetCaptureCH2 + * @param TIMx Timer instance + * @retval CapturedValue (between Min_Data=0 and Max_Data=65535) + */ +__STATIC_INLINE uint32_t LL_TIM_IC_GetCaptureCH2(const TIM_TypeDef *TIMx) +{ + return (uint32_t)(READ_REG(TIMx->CCR2)); +} + +/** + * @brief Get captured value for input channel 3. + * @note In 32-bit timer implementations returned captured value can be between 0x00000000 and 0xFFFFFFFF. + * @note Macro IS_TIM_32B_COUNTER_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports a 32 bits counter. + * @note Macro IS_TIM_CC3_INSTANCE(TIMx) can be used to check whether or not + * input channel 3 is supported by a timer instance. + * @rmtoll CCR3 CCR3 LL_TIM_IC_GetCaptureCH3 + * @param TIMx Timer instance + * @retval CapturedValue (between Min_Data=0 and Max_Data=65535) + */ +__STATIC_INLINE uint32_t LL_TIM_IC_GetCaptureCH3(const TIM_TypeDef *TIMx) +{ + return (uint32_t)(READ_REG(TIMx->CCR3)); +} + +/** + * @brief Get captured value for input channel 4. + * @note In 32-bit timer implementations returned captured value can be between 0x00000000 and 0xFFFFFFFF. + * @note Macro IS_TIM_32B_COUNTER_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports a 32 bits counter. + * @note Macro IS_TIM_CC4_INSTANCE(TIMx) can be used to check whether or not + * input channel 4 is supported by a timer instance. + * @rmtoll CCR4 CCR4 LL_TIM_IC_GetCaptureCH4 + * @param TIMx Timer instance + * @retval CapturedValue (between Min_Data=0 and Max_Data=65535) + */ +__STATIC_INLINE uint32_t LL_TIM_IC_GetCaptureCH4(const TIM_TypeDef *TIMx) +{ + return (uint32_t)(READ_REG(TIMx->CCR4)); +} + +/** + * @} + */ + +/** @defgroup TIM_LL_EF_Clock_Selection Counter clock selection + * @{ + */ +/** + * @brief Enable external clock mode 2. + * @note When external clock mode 2 is enabled the counter is clocked by any active edge on the ETRF signal. + * @note Macro IS_TIM_CLOCKSOURCE_ETRMODE2_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports external clock mode2. + * @rmtoll SMCR ECE LL_TIM_EnableExternalClock + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableExternalClock(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->SMCR, TIM_SMCR_ECE); +} + +/** + * @brief Disable external clock mode 2. + * @note Macro IS_TIM_CLOCKSOURCE_ETRMODE2_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports external clock mode2. + * @rmtoll SMCR ECE LL_TIM_DisableExternalClock + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableExternalClock(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->SMCR, TIM_SMCR_ECE); +} + +/** + * @brief Indicate whether external clock mode 2 is enabled. + * @note Macro IS_TIM_CLOCKSOURCE_ETRMODE2_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports external clock mode2. + * @rmtoll SMCR ECE LL_TIM_IsEnabledExternalClock + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsEnabledExternalClock(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->SMCR, TIM_SMCR_ECE) == (TIM_SMCR_ECE)) ? 1UL : 0UL); +} + +/** + * @brief Set the clock source of the counter clock. + * @note when selected clock source is external clock mode 1, the timer input + * the external clock is applied is selected by calling the @ref LL_TIM_SetTriggerInput() + * function. This timer input must be configured by calling + * the @ref LL_TIM_IC_Config() function. + * @note Macro IS_TIM_CLOCKSOURCE_ETRMODE1_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports external clock mode1. + * @note Macro IS_TIM_CLOCKSOURCE_ETRMODE2_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports external clock mode2. + * @rmtoll SMCR SMS LL_TIM_SetClockSource\n + * SMCR ECE LL_TIM_SetClockSource + * @param TIMx Timer instance + * @param ClockSource This parameter can be one of the following values: + * @arg @ref LL_TIM_CLOCKSOURCE_INTERNAL + * @arg @ref LL_TIM_CLOCKSOURCE_EXT_MODE1 + * @arg @ref LL_TIM_CLOCKSOURCE_EXT_MODE2 + * @retval None + */ +__STATIC_INLINE void LL_TIM_SetClockSource(TIM_TypeDef *TIMx, uint32_t ClockSource) +{ + MODIFY_REG(TIMx->SMCR, TIM_SMCR_SMS | TIM_SMCR_ECE, ClockSource); +} + +/** + * @brief Set the encoder interface mode. + * @note Macro IS_TIM_ENCODER_INTERFACE_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports the encoder mode. + * @rmtoll SMCR SMS LL_TIM_SetEncoderMode + * @param TIMx Timer instance + * @param EncoderMode This parameter can be one of the following values: + * @arg @ref LL_TIM_ENCODERMODE_X2_TI1 + * @arg @ref LL_TIM_ENCODERMODE_X2_TI2 + * @arg @ref LL_TIM_ENCODERMODE_X4_TI12 + * @retval None + */ +__STATIC_INLINE void LL_TIM_SetEncoderMode(TIM_TypeDef *TIMx, uint32_t EncoderMode) +{ + MODIFY_REG(TIMx->SMCR, TIM_SMCR_SMS, EncoderMode); +} + +/** + * @} + */ + +/** @defgroup TIM_LL_EF_Timer_Synchronization Timer synchronisation configuration + * @{ + */ +/** + * @brief Set the trigger output (TRGO) used for timer synchronization . + * @note Macro IS_TIM_MASTER_INSTANCE(TIMx) can be used to check + * whether or not a timer instance can operate as a master timer. + * @rmtoll CR2 MMS LL_TIM_SetTriggerOutput + * @param TIMx Timer instance + * @param TimerSynchronization This parameter can be one of the following values: + * @arg @ref LL_TIM_TRGO_RESET + * @arg @ref LL_TIM_TRGO_ENABLE + * @arg @ref LL_TIM_TRGO_UPDATE + * @arg @ref LL_TIM_TRGO_CC1IF + * @arg @ref LL_TIM_TRGO_OC1REF + * @arg @ref LL_TIM_TRGO_OC2REF + * @arg @ref LL_TIM_TRGO_OC3REF + * @arg @ref LL_TIM_TRGO_OC4REF + * @retval None + */ +__STATIC_INLINE void LL_TIM_SetTriggerOutput(TIM_TypeDef *TIMx, uint32_t TimerSynchronization) +{ + MODIFY_REG(TIMx->CR2, TIM_CR2_MMS, TimerSynchronization); +} + +/** + * @brief Set the synchronization mode of a slave timer. + * @note Macro IS_TIM_SLAVE_INSTANCE(TIMx) can be used to check whether or not + * a timer instance can operate as a slave timer. + * @rmtoll SMCR SMS LL_TIM_SetSlaveMode + * @param TIMx Timer instance + * @param SlaveMode This parameter can be one of the following values: + * @arg @ref LL_TIM_SLAVEMODE_DISABLED + * @arg @ref LL_TIM_SLAVEMODE_RESET + * @arg @ref LL_TIM_SLAVEMODE_GATED + * @arg @ref LL_TIM_SLAVEMODE_TRIGGER + * @retval None + */ +__STATIC_INLINE void LL_TIM_SetSlaveMode(TIM_TypeDef *TIMx, uint32_t SlaveMode) +{ + MODIFY_REG(TIMx->SMCR, TIM_SMCR_SMS, SlaveMode); +} + +/** + * @brief Set the selects the trigger input to be used to synchronize the counter. + * @note Macro IS_TIM_SLAVE_INSTANCE(TIMx) can be used to check whether or not + * a timer instance can operate as a slave timer. + * @rmtoll SMCR TS LL_TIM_SetTriggerInput + * @param TIMx Timer instance + * @param TriggerInput This parameter can be one of the following values: + * @arg @ref LL_TIM_TS_ITR0 + * @arg @ref LL_TIM_TS_ITR1 + * @arg @ref LL_TIM_TS_ITR2 + * @arg @ref LL_TIM_TS_ITR3 + * @arg @ref LL_TIM_TS_TI1F_ED + * @arg @ref LL_TIM_TS_TI1FP1 + * @arg @ref LL_TIM_TS_TI2FP2 + * @arg @ref LL_TIM_TS_ETRF + * @retval None + */ +__STATIC_INLINE void LL_TIM_SetTriggerInput(TIM_TypeDef *TIMx, uint32_t TriggerInput) +{ + MODIFY_REG(TIMx->SMCR, TIM_SMCR_TS, TriggerInput); +} + +/** + * @brief Enable the Master/Slave mode. + * @note Macro IS_TIM_SLAVE_INSTANCE(TIMx) can be used to check whether or not + * a timer instance can operate as a slave timer. + * @rmtoll SMCR MSM LL_TIM_EnableMasterSlaveMode + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableMasterSlaveMode(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->SMCR, TIM_SMCR_MSM); +} + +/** + * @brief Disable the Master/Slave mode. + * @note Macro IS_TIM_SLAVE_INSTANCE(TIMx) can be used to check whether or not + * a timer instance can operate as a slave timer. + * @rmtoll SMCR MSM LL_TIM_DisableMasterSlaveMode + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableMasterSlaveMode(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->SMCR, TIM_SMCR_MSM); +} + +/** + * @brief Indicates whether the Master/Slave mode is enabled. + * @note Macro IS_TIM_SLAVE_INSTANCE(TIMx) can be used to check whether or not + * a timer instance can operate as a slave timer. + * @rmtoll SMCR MSM LL_TIM_IsEnabledMasterSlaveMode + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsEnabledMasterSlaveMode(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->SMCR, TIM_SMCR_MSM) == (TIM_SMCR_MSM)) ? 1UL : 0UL); +} + +/** + * @brief Configure the external trigger (ETR) input. + * @note Macro IS_TIM_ETR_INSTANCE(TIMx) can be used to check whether or not + * a timer instance provides an external trigger input. + * @rmtoll SMCR ETP LL_TIM_ConfigETR\n + * SMCR ETPS LL_TIM_ConfigETR\n + * SMCR ETF LL_TIM_ConfigETR + * @param TIMx Timer instance + * @param ETRPolarity This parameter can be one of the following values: + * @arg @ref LL_TIM_ETR_POLARITY_NONINVERTED + * @arg @ref LL_TIM_ETR_POLARITY_INVERTED + * @param ETRPrescaler This parameter can be one of the following values: + * @arg @ref LL_TIM_ETR_PRESCALER_DIV1 + * @arg @ref LL_TIM_ETR_PRESCALER_DIV2 + * @arg @ref LL_TIM_ETR_PRESCALER_DIV4 + * @arg @ref LL_TIM_ETR_PRESCALER_DIV8 + * @param ETRFilter This parameter can be one of the following values: + * @arg @ref LL_TIM_ETR_FILTER_FDIV1 + * @arg @ref LL_TIM_ETR_FILTER_FDIV1_N2 + * @arg @ref LL_TIM_ETR_FILTER_FDIV1_N4 + * @arg @ref LL_TIM_ETR_FILTER_FDIV1_N8 + * @arg @ref LL_TIM_ETR_FILTER_FDIV2_N6 + * @arg @ref LL_TIM_ETR_FILTER_FDIV2_N8 + * @arg @ref LL_TIM_ETR_FILTER_FDIV4_N6 + * @arg @ref LL_TIM_ETR_FILTER_FDIV4_N8 + * @arg @ref LL_TIM_ETR_FILTER_FDIV8_N6 + * @arg @ref LL_TIM_ETR_FILTER_FDIV8_N8 + * @arg @ref LL_TIM_ETR_FILTER_FDIV16_N5 + * @arg @ref LL_TIM_ETR_FILTER_FDIV16_N6 + * @arg @ref LL_TIM_ETR_FILTER_FDIV16_N8 + * @arg @ref LL_TIM_ETR_FILTER_FDIV32_N5 + * @arg @ref LL_TIM_ETR_FILTER_FDIV32_N6 + * @arg @ref LL_TIM_ETR_FILTER_FDIV32_N8 + * @retval None + */ +__STATIC_INLINE void LL_TIM_ConfigETR(TIM_TypeDef *TIMx, uint32_t ETRPolarity, uint32_t ETRPrescaler, + uint32_t ETRFilter) +{ + MODIFY_REG(TIMx->SMCR, TIM_SMCR_ETP | TIM_SMCR_ETPS | TIM_SMCR_ETF, ETRPolarity | ETRPrescaler | ETRFilter); +} + +/** + * @} + */ + +/** @defgroup TIM_LL_EF_Break_Function Break function configuration + * @{ + */ +/** + * @brief Enable the break function. + * @note Macro IS_TIM_BREAK_INSTANCE(TIMx) can be used to check whether or not + * a timer instance provides a break input. + * @rmtoll BDTR BKE LL_TIM_EnableBRK + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableBRK(TIM_TypeDef *TIMx) +{ + __IO uint32_t tmpreg; + SET_BIT(TIMx->BDTR, TIM_BDTR_BKE); + /* Note: Any write operation to this bit takes a delay of 1 APB clock cycle to become effective. */ + tmpreg = READ_REG(TIMx->BDTR); + (void)(tmpreg); +} + +/** + * @brief Disable the break function. + * @rmtoll BDTR BKE LL_TIM_DisableBRK + * @param TIMx Timer instance + * @note Macro IS_TIM_BREAK_INSTANCE(TIMx) can be used to check whether or not + * a timer instance provides a break input. + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableBRK(TIM_TypeDef *TIMx) +{ + __IO uint32_t tmpreg; + CLEAR_BIT(TIMx->BDTR, TIM_BDTR_BKE); + /* Note: Any write operation to this bit takes a delay of 1 APB clock cycle to become effective. */ + tmpreg = READ_REG(TIMx->BDTR); + (void)(tmpreg); +} + +/** + * @brief Configure the break input. + * @note Macro IS_TIM_BREAK_INSTANCE(TIMx) can be used to check whether or not + * a timer instance provides a break input. + * @rmtoll BDTR BKP LL_TIM_ConfigBRK + * @param TIMx Timer instance + * @param BreakPolarity This parameter can be one of the following values: + * @arg @ref LL_TIM_BREAK_POLARITY_LOW + * @arg @ref LL_TIM_BREAK_POLARITY_HIGH + * @retval None + */ +__STATIC_INLINE void LL_TIM_ConfigBRK(TIM_TypeDef *TIMx, uint32_t BreakPolarity) +{ + __IO uint32_t tmpreg; + MODIFY_REG(TIMx->BDTR, TIM_BDTR_BKP, BreakPolarity); + /* Note: Any write operation to BKP bit takes a delay of 1 APB clock cycle to become effective. */ + tmpreg = READ_REG(TIMx->BDTR); + (void)(tmpreg); +} + +/** + * @brief Select the outputs off state (enabled v.s. disabled) in Idle and Run modes. + * @note Macro IS_TIM_BREAK_INSTANCE(TIMx) can be used to check whether or not + * a timer instance provides a break input. + * @rmtoll BDTR OSSI LL_TIM_SetOffStates\n + * BDTR OSSR LL_TIM_SetOffStates + * @param TIMx Timer instance + * @param OffStateIdle This parameter can be one of the following values: + * @arg @ref LL_TIM_OSSI_DISABLE + * @arg @ref LL_TIM_OSSI_ENABLE + * @param OffStateRun This parameter can be one of the following values: + * @arg @ref LL_TIM_OSSR_DISABLE + * @arg @ref LL_TIM_OSSR_ENABLE + * @retval None + */ +__STATIC_INLINE void LL_TIM_SetOffStates(TIM_TypeDef *TIMx, uint32_t OffStateIdle, uint32_t OffStateRun) +{ + MODIFY_REG(TIMx->BDTR, TIM_BDTR_OSSI | TIM_BDTR_OSSR, OffStateIdle | OffStateRun); +} + +/** + * @brief Enable automatic output (MOE can be set by software or automatically when a break input is active). + * @note Macro IS_TIM_BREAK_INSTANCE(TIMx) can be used to check whether or not + * a timer instance provides a break input. + * @rmtoll BDTR AOE LL_TIM_EnableAutomaticOutput + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableAutomaticOutput(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->BDTR, TIM_BDTR_AOE); +} + +/** + * @brief Disable automatic output (MOE can be set only by software). + * @note Macro IS_TIM_BREAK_INSTANCE(TIMx) can be used to check whether or not + * a timer instance provides a break input. + * @rmtoll BDTR AOE LL_TIM_DisableAutomaticOutput + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableAutomaticOutput(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->BDTR, TIM_BDTR_AOE); +} + +/** + * @brief Indicate whether automatic output is enabled. + * @note Macro IS_TIM_BREAK_INSTANCE(TIMx) can be used to check whether or not + * a timer instance provides a break input. + * @rmtoll BDTR AOE LL_TIM_IsEnabledAutomaticOutput + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsEnabledAutomaticOutput(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->BDTR, TIM_BDTR_AOE) == (TIM_BDTR_AOE)) ? 1UL : 0UL); +} + +/** + * @brief Enable the outputs (set the MOE bit in TIMx_BDTR register). + * @note The MOE bit in TIMx_BDTR register allows to enable /disable the outputs by + * software and is reset in case of break or break2 event + * @note Macro IS_TIM_BREAK_INSTANCE(TIMx) can be used to check whether or not + * a timer instance provides a break input. + * @rmtoll BDTR MOE LL_TIM_EnableAllOutputs + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableAllOutputs(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->BDTR, TIM_BDTR_MOE); +} + +/** + * @brief Disable the outputs (reset the MOE bit in TIMx_BDTR register). + * @note The MOE bit in TIMx_BDTR register allows to enable /disable the outputs by + * software and is reset in case of break or break2 event. + * @note Macro IS_TIM_BREAK_INSTANCE(TIMx) can be used to check whether or not + * a timer instance provides a break input. + * @rmtoll BDTR MOE LL_TIM_DisableAllOutputs + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableAllOutputs(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->BDTR, TIM_BDTR_MOE); +} + +/** + * @brief Indicates whether outputs are enabled. + * @note Macro IS_TIM_BREAK_INSTANCE(TIMx) can be used to check whether or not + * a timer instance provides a break input. + * @rmtoll BDTR MOE LL_TIM_IsEnabledAllOutputs + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsEnabledAllOutputs(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->BDTR, TIM_BDTR_MOE) == (TIM_BDTR_MOE)) ? 1UL : 0UL); +} + +/** + * @} + */ + +/** @defgroup TIM_LL_EF_DMA_Burst_Mode DMA burst mode configuration + * @{ + */ +/** + * @brief Configures the timer DMA burst feature. + * @note Macro IS_TIM_DMABURST_INSTANCE(TIMx) can be used to check whether or + * not a timer instance supports the DMA burst mode. + * @rmtoll DCR DBL LL_TIM_ConfigDMABurst\n + * DCR DBA LL_TIM_ConfigDMABurst + * @param TIMx Timer instance + * @param DMABurstBaseAddress This parameter can be one of the following values: + * @arg @ref LL_TIM_DMABURST_BASEADDR_CR1 + * @arg @ref LL_TIM_DMABURST_BASEADDR_CR2 + * @arg @ref LL_TIM_DMABURST_BASEADDR_SMCR + * @arg @ref LL_TIM_DMABURST_BASEADDR_DIER + * @arg @ref LL_TIM_DMABURST_BASEADDR_SR + * @arg @ref LL_TIM_DMABURST_BASEADDR_EGR + * @arg @ref LL_TIM_DMABURST_BASEADDR_CCMR1 + * @arg @ref LL_TIM_DMABURST_BASEADDR_CCMR2 + * @arg @ref LL_TIM_DMABURST_BASEADDR_CCER + * @arg @ref LL_TIM_DMABURST_BASEADDR_CNT + * @arg @ref LL_TIM_DMABURST_BASEADDR_PSC + * @arg @ref LL_TIM_DMABURST_BASEADDR_ARR + * @arg @ref LL_TIM_DMABURST_BASEADDR_RCR + * @arg @ref LL_TIM_DMABURST_BASEADDR_CCR1 + * @arg @ref LL_TIM_DMABURST_BASEADDR_CCR2 + * @arg @ref LL_TIM_DMABURST_BASEADDR_CCR3 + * @arg @ref LL_TIM_DMABURST_BASEADDR_CCR4 + * @arg @ref LL_TIM_DMABURST_BASEADDR_BDTR + * @param DMABurstLength This parameter can be one of the following values: + * @arg @ref LL_TIM_DMABURST_LENGTH_1TRANSFER + * @arg @ref LL_TIM_DMABURST_LENGTH_2TRANSFERS + * @arg @ref LL_TIM_DMABURST_LENGTH_3TRANSFERS + * @arg @ref LL_TIM_DMABURST_LENGTH_4TRANSFERS + * @arg @ref LL_TIM_DMABURST_LENGTH_5TRANSFERS + * @arg @ref LL_TIM_DMABURST_LENGTH_6TRANSFERS + * @arg @ref LL_TIM_DMABURST_LENGTH_7TRANSFERS + * @arg @ref LL_TIM_DMABURST_LENGTH_8TRANSFERS + * @arg @ref LL_TIM_DMABURST_LENGTH_9TRANSFERS + * @arg @ref LL_TIM_DMABURST_LENGTH_10TRANSFERS + * @arg @ref LL_TIM_DMABURST_LENGTH_11TRANSFERS + * @arg @ref LL_TIM_DMABURST_LENGTH_12TRANSFERS + * @arg @ref LL_TIM_DMABURST_LENGTH_13TRANSFERS + * @arg @ref LL_TIM_DMABURST_LENGTH_14TRANSFERS + * @arg @ref LL_TIM_DMABURST_LENGTH_15TRANSFERS + * @arg @ref LL_TIM_DMABURST_LENGTH_16TRANSFERS + * @arg @ref LL_TIM_DMABURST_LENGTH_17TRANSFERS + * @arg @ref LL_TIM_DMABURST_LENGTH_18TRANSFERS + * @retval None + */ +__STATIC_INLINE void LL_TIM_ConfigDMABurst(TIM_TypeDef *TIMx, uint32_t DMABurstBaseAddress, uint32_t DMABurstLength) +{ + MODIFY_REG(TIMx->DCR, (TIM_DCR_DBL | TIM_DCR_DBA), (DMABurstBaseAddress | DMABurstLength)); +} + +/** + * @} + */ + +/** @defgroup TIM_LL_EF_Timer_Inputs_Remapping Timer input remapping + * @{ + */ +/** + * @brief Remap TIM inputs (input channel, internal/external triggers). + * @note Macro IS_TIM_REMAP_INSTANCE(TIMx) can be used to check whether or not + * a some timer inputs can be remapped. + * @rmtoll TIM1_OR ITR2_RMP LL_TIM_SetRemap\n + * TIM2_OR ITR1_RMP LL_TIM_SetRemap\n + * TIM5_OR ITR1_RMP LL_TIM_SetRemap\n + * TIM5_OR TI4_RMP LL_TIM_SetRemap\n + * TIM9_OR ITR1_RMP LL_TIM_SetRemap\n + * TIM11_OR TI1_RMP LL_TIM_SetRemap\n + * LPTIM1_OR OR LL_TIM_SetRemap + * @param TIMx Timer instance + * @param Remap Remap param depends on the TIMx. Description available only + * in CHM version of the User Manual (not in .pdf). + * Otherwise see Reference Manual description of OR registers. + * + * Below description summarizes "Timer Instance" and "Remap" param combinations: + * + * TIM1: one of the following values + * + * ITR2_RMP can be one of the following values + * @arg @ref LL_TIM_TIM1_ITR2_RMP_TIM3_TRGO (*) + * @arg @ref LL_TIM_TIM1_ITR2_RMP_LPTIM (*) + * + * TIM2: one of the following values + * + * ITR1_RMP can be one of the following values + * @arg @ref LL_TIM_TIM2_ITR1_RMP_TIM8_TRGO + * @arg @ref LL_TIM_TIM2_ITR1_RMP_OTG_FS_SOF + * @arg @ref LL_TIM_TIM2_ITR1_RMP_OTG_HS_SOF + * + * TIM5: one of the following values + * + * @arg @ref LL_TIM_TIM5_TI4_RMP_GPIO + * @arg @ref LL_TIM_TIM5_TI4_RMP_LSI + * @arg @ref LL_TIM_TIM5_TI4_RMP_LSE + * @arg @ref LL_TIM_TIM5_TI4_RMP_RTC + * @arg @ref LL_TIM_TIM5_ITR1_RMP_TIM3_TRGO (*) + * @arg @ref LL_TIM_TIM5_ITR1_RMP_LPTIM (*) + * + * TIM9: one of the following values + * + * ITR1_RMP can be one of the following values + * @arg @ref LL_TIM_TIM9_ITR1_RMP_TIM3_TRGO (*) + * @arg @ref LL_TIM_TIM9_ITR1_RMP_LPTIM (*) + * + * TIM11: one of the following values + * + * @arg @ref LL_TIM_TIM11_TI1_RMP_GPIO + * @arg @ref LL_TIM_TIM11_TI1_RMP_GPIO1 (*) + * @arg @ref LL_TIM_TIM11_TI1_RMP_HSE_RTC + * @arg @ref LL_TIM_TIM11_TI1_RMP_GPIO2 + * @arg @ref LL_TIM_TIM11_TI1_RMP_SPDIFRX (*) + * + * (*) Value not defined in all devices. \n + * + * @retval None + */ +__STATIC_INLINE void LL_TIM_SetRemap(TIM_TypeDef *TIMx, uint32_t Remap) +{ +#if defined(LPTIM_OR_TIM1_ITR2_RMP) && defined(LPTIM_OR_TIM5_ITR1_RMP) && defined(LPTIM_OR_TIM9_ITR1_RMP) + if ((Remap & LL_TIM_LPTIM_REMAP_MASK) == LL_TIM_LPTIM_REMAP_MASK) + { + /* Connect TIMx internal trigger to LPTIM1 output */ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_LPTIM1EN); + MODIFY_REG(LPTIM1->OR, + (LPTIM_OR_TIM1_ITR2_RMP | LPTIM_OR_TIM5_ITR1_RMP | LPTIM_OR_TIM9_ITR1_RMP), + Remap & ~(LL_TIM_LPTIM_REMAP_MASK)); + } + else + { + MODIFY_REG(TIMx->OR, (Remap >> TIMx_OR_RMP_SHIFT), (Remap & TIMx_OR_RMP_MASK)); + } +#else + MODIFY_REG(TIMx->OR, (Remap >> TIMx_OR_RMP_SHIFT), (Remap & TIMx_OR_RMP_MASK)); +#endif /* LPTIM_OR_TIM1_ITR2_RMP && LPTIM_OR_TIM5_ITR1_RMP && LPTIM_OR_TIM9_ITR1_RMP */ +} + +/** + * @} + */ + +/** @defgroup TIM_LL_EF_FLAG_Management FLAG-Management + * @{ + */ +/** + * @brief Clear the update interrupt flag (UIF). + * @rmtoll SR UIF LL_TIM_ClearFlag_UPDATE + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_ClearFlag_UPDATE(TIM_TypeDef *TIMx) +{ + WRITE_REG(TIMx->SR, ~(TIM_SR_UIF)); +} + +/** + * @brief Indicate whether update interrupt flag (UIF) is set (update interrupt is pending). + * @rmtoll SR UIF LL_TIM_IsActiveFlag_UPDATE + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsActiveFlag_UPDATE(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->SR, TIM_SR_UIF) == (TIM_SR_UIF)) ? 1UL : 0UL); +} + +/** + * @brief Clear the Capture/Compare 1 interrupt flag (CC1F). + * @rmtoll SR CC1IF LL_TIM_ClearFlag_CC1 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_ClearFlag_CC1(TIM_TypeDef *TIMx) +{ + WRITE_REG(TIMx->SR, ~(TIM_SR_CC1IF)); +} + +/** + * @brief Indicate whether Capture/Compare 1 interrupt flag (CC1F) is set (Capture/Compare 1 interrupt is pending). + * @rmtoll SR CC1IF LL_TIM_IsActiveFlag_CC1 + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsActiveFlag_CC1(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->SR, TIM_SR_CC1IF) == (TIM_SR_CC1IF)) ? 1UL : 0UL); +} + +/** + * @brief Clear the Capture/Compare 2 interrupt flag (CC2F). + * @rmtoll SR CC2IF LL_TIM_ClearFlag_CC2 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_ClearFlag_CC2(TIM_TypeDef *TIMx) +{ + WRITE_REG(TIMx->SR, ~(TIM_SR_CC2IF)); +} + +/** + * @brief Indicate whether Capture/Compare 2 interrupt flag (CC2F) is set (Capture/Compare 2 interrupt is pending). + * @rmtoll SR CC2IF LL_TIM_IsActiveFlag_CC2 + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsActiveFlag_CC2(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->SR, TIM_SR_CC2IF) == (TIM_SR_CC2IF)) ? 1UL : 0UL); +} + +/** + * @brief Clear the Capture/Compare 3 interrupt flag (CC3F). + * @rmtoll SR CC3IF LL_TIM_ClearFlag_CC3 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_ClearFlag_CC3(TIM_TypeDef *TIMx) +{ + WRITE_REG(TIMx->SR, ~(TIM_SR_CC3IF)); +} + +/** + * @brief Indicate whether Capture/Compare 3 interrupt flag (CC3F) is set (Capture/Compare 3 interrupt is pending). + * @rmtoll SR CC3IF LL_TIM_IsActiveFlag_CC3 + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsActiveFlag_CC3(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->SR, TIM_SR_CC3IF) == (TIM_SR_CC3IF)) ? 1UL : 0UL); +} + +/** + * @brief Clear the Capture/Compare 4 interrupt flag (CC4F). + * @rmtoll SR CC4IF LL_TIM_ClearFlag_CC4 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_ClearFlag_CC4(TIM_TypeDef *TIMx) +{ + WRITE_REG(TIMx->SR, ~(TIM_SR_CC4IF)); +} + +/** + * @brief Indicate whether Capture/Compare 4 interrupt flag (CC4F) is set (Capture/Compare 4 interrupt is pending). + * @rmtoll SR CC4IF LL_TIM_IsActiveFlag_CC4 + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsActiveFlag_CC4(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->SR, TIM_SR_CC4IF) == (TIM_SR_CC4IF)) ? 1UL : 0UL); +} + +/** + * @brief Clear the commutation interrupt flag (COMIF). + * @rmtoll SR COMIF LL_TIM_ClearFlag_COM + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_ClearFlag_COM(TIM_TypeDef *TIMx) +{ + WRITE_REG(TIMx->SR, ~(TIM_SR_COMIF)); +} + +/** + * @brief Indicate whether commutation interrupt flag (COMIF) is set (commutation interrupt is pending). + * @rmtoll SR COMIF LL_TIM_IsActiveFlag_COM + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsActiveFlag_COM(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->SR, TIM_SR_COMIF) == (TIM_SR_COMIF)) ? 1UL : 0UL); +} + +/** + * @brief Clear the trigger interrupt flag (TIF). + * @rmtoll SR TIF LL_TIM_ClearFlag_TRIG + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_ClearFlag_TRIG(TIM_TypeDef *TIMx) +{ + WRITE_REG(TIMx->SR, ~(TIM_SR_TIF)); +} + +/** + * @brief Indicate whether trigger interrupt flag (TIF) is set (trigger interrupt is pending). + * @rmtoll SR TIF LL_TIM_IsActiveFlag_TRIG + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsActiveFlag_TRIG(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->SR, TIM_SR_TIF) == (TIM_SR_TIF)) ? 1UL : 0UL); +} + +/** + * @brief Clear the break interrupt flag (BIF). + * @rmtoll SR BIF LL_TIM_ClearFlag_BRK + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_ClearFlag_BRK(TIM_TypeDef *TIMx) +{ + WRITE_REG(TIMx->SR, ~(TIM_SR_BIF)); +} + +/** + * @brief Indicate whether break interrupt flag (BIF) is set (break interrupt is pending). + * @rmtoll SR BIF LL_TIM_IsActiveFlag_BRK + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsActiveFlag_BRK(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->SR, TIM_SR_BIF) == (TIM_SR_BIF)) ? 1UL : 0UL); +} + +/** + * @brief Clear the Capture/Compare 1 over-capture interrupt flag (CC1OF). + * @rmtoll SR CC1OF LL_TIM_ClearFlag_CC1OVR + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_ClearFlag_CC1OVR(TIM_TypeDef *TIMx) +{ + WRITE_REG(TIMx->SR, ~(TIM_SR_CC1OF)); +} + +/** + * @brief Indicate whether Capture/Compare 1 over-capture interrupt flag (CC1OF) is set + * (Capture/Compare 1 interrupt is pending). + * @rmtoll SR CC1OF LL_TIM_IsActiveFlag_CC1OVR + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsActiveFlag_CC1OVR(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->SR, TIM_SR_CC1OF) == (TIM_SR_CC1OF)) ? 1UL : 0UL); +} + +/** + * @brief Clear the Capture/Compare 2 over-capture interrupt flag (CC2OF). + * @rmtoll SR CC2OF LL_TIM_ClearFlag_CC2OVR + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_ClearFlag_CC2OVR(TIM_TypeDef *TIMx) +{ + WRITE_REG(TIMx->SR, ~(TIM_SR_CC2OF)); +} + +/** + * @brief Indicate whether Capture/Compare 2 over-capture interrupt flag (CC2OF) is set + * (Capture/Compare 2 over-capture interrupt is pending). + * @rmtoll SR CC2OF LL_TIM_IsActiveFlag_CC2OVR + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsActiveFlag_CC2OVR(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->SR, TIM_SR_CC2OF) == (TIM_SR_CC2OF)) ? 1UL : 0UL); +} + +/** + * @brief Clear the Capture/Compare 3 over-capture interrupt flag (CC3OF). + * @rmtoll SR CC3OF LL_TIM_ClearFlag_CC3OVR + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_ClearFlag_CC3OVR(TIM_TypeDef *TIMx) +{ + WRITE_REG(TIMx->SR, ~(TIM_SR_CC3OF)); +} + +/** + * @brief Indicate whether Capture/Compare 3 over-capture interrupt flag (CC3OF) is set + * (Capture/Compare 3 over-capture interrupt is pending). + * @rmtoll SR CC3OF LL_TIM_IsActiveFlag_CC3OVR + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsActiveFlag_CC3OVR(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->SR, TIM_SR_CC3OF) == (TIM_SR_CC3OF)) ? 1UL : 0UL); +} + +/** + * @brief Clear the Capture/Compare 4 over-capture interrupt flag (CC4OF). + * @rmtoll SR CC4OF LL_TIM_ClearFlag_CC4OVR + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_ClearFlag_CC4OVR(TIM_TypeDef *TIMx) +{ + WRITE_REG(TIMx->SR, ~(TIM_SR_CC4OF)); +} + +/** + * @brief Indicate whether Capture/Compare 4 over-capture interrupt flag (CC4OF) is set + * (Capture/Compare 4 over-capture interrupt is pending). + * @rmtoll SR CC4OF LL_TIM_IsActiveFlag_CC4OVR + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsActiveFlag_CC4OVR(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->SR, TIM_SR_CC4OF) == (TIM_SR_CC4OF)) ? 1UL : 0UL); +} + +/** + * @} + */ + +/** @defgroup TIM_LL_EF_IT_Management IT-Management + * @{ + */ +/** + * @brief Enable update interrupt (UIE). + * @rmtoll DIER UIE LL_TIM_EnableIT_UPDATE + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableIT_UPDATE(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->DIER, TIM_DIER_UIE); +} + +/** + * @brief Disable update interrupt (UIE). + * @rmtoll DIER UIE LL_TIM_DisableIT_UPDATE + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableIT_UPDATE(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->DIER, TIM_DIER_UIE); +} + +/** + * @brief Indicates whether the update interrupt (UIE) is enabled. + * @rmtoll DIER UIE LL_TIM_IsEnabledIT_UPDATE + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsEnabledIT_UPDATE(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->DIER, TIM_DIER_UIE) == (TIM_DIER_UIE)) ? 1UL : 0UL); +} + +/** + * @brief Enable capture/compare 1 interrupt (CC1IE). + * @rmtoll DIER CC1IE LL_TIM_EnableIT_CC1 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableIT_CC1(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->DIER, TIM_DIER_CC1IE); +} + +/** + * @brief Disable capture/compare 1 interrupt (CC1IE). + * @rmtoll DIER CC1IE LL_TIM_DisableIT_CC1 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableIT_CC1(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->DIER, TIM_DIER_CC1IE); +} + +/** + * @brief Indicates whether the capture/compare 1 interrupt (CC1IE) is enabled. + * @rmtoll DIER CC1IE LL_TIM_IsEnabledIT_CC1 + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsEnabledIT_CC1(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->DIER, TIM_DIER_CC1IE) == (TIM_DIER_CC1IE)) ? 1UL : 0UL); +} + +/** + * @brief Enable capture/compare 2 interrupt (CC2IE). + * @rmtoll DIER CC2IE LL_TIM_EnableIT_CC2 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableIT_CC2(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->DIER, TIM_DIER_CC2IE); +} + +/** + * @brief Disable capture/compare 2 interrupt (CC2IE). + * @rmtoll DIER CC2IE LL_TIM_DisableIT_CC2 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableIT_CC2(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->DIER, TIM_DIER_CC2IE); +} + +/** + * @brief Indicates whether the capture/compare 2 interrupt (CC2IE) is enabled. + * @rmtoll DIER CC2IE LL_TIM_IsEnabledIT_CC2 + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsEnabledIT_CC2(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->DIER, TIM_DIER_CC2IE) == (TIM_DIER_CC2IE)) ? 1UL : 0UL); +} + +/** + * @brief Enable capture/compare 3 interrupt (CC3IE). + * @rmtoll DIER CC3IE LL_TIM_EnableIT_CC3 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableIT_CC3(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->DIER, TIM_DIER_CC3IE); +} + +/** + * @brief Disable capture/compare 3 interrupt (CC3IE). + * @rmtoll DIER CC3IE LL_TIM_DisableIT_CC3 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableIT_CC3(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->DIER, TIM_DIER_CC3IE); +} + +/** + * @brief Indicates whether the capture/compare 3 interrupt (CC3IE) is enabled. + * @rmtoll DIER CC3IE LL_TIM_IsEnabledIT_CC3 + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsEnabledIT_CC3(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->DIER, TIM_DIER_CC3IE) == (TIM_DIER_CC3IE)) ? 1UL : 0UL); +} + +/** + * @brief Enable capture/compare 4 interrupt (CC4IE). + * @rmtoll DIER CC4IE LL_TIM_EnableIT_CC4 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableIT_CC4(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->DIER, TIM_DIER_CC4IE); +} + +/** + * @brief Disable capture/compare 4 interrupt (CC4IE). + * @rmtoll DIER CC4IE LL_TIM_DisableIT_CC4 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableIT_CC4(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->DIER, TIM_DIER_CC4IE); +} + +/** + * @brief Indicates whether the capture/compare 4 interrupt (CC4IE) is enabled. + * @rmtoll DIER CC4IE LL_TIM_IsEnabledIT_CC4 + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsEnabledIT_CC4(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->DIER, TIM_DIER_CC4IE) == (TIM_DIER_CC4IE)) ? 1UL : 0UL); +} + +/** + * @brief Enable commutation interrupt (COMIE). + * @rmtoll DIER COMIE LL_TIM_EnableIT_COM + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableIT_COM(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->DIER, TIM_DIER_COMIE); +} + +/** + * @brief Disable commutation interrupt (COMIE). + * @rmtoll DIER COMIE LL_TIM_DisableIT_COM + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableIT_COM(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->DIER, TIM_DIER_COMIE); +} + +/** + * @brief Indicates whether the commutation interrupt (COMIE) is enabled. + * @rmtoll DIER COMIE LL_TIM_IsEnabledIT_COM + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsEnabledIT_COM(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->DIER, TIM_DIER_COMIE) == (TIM_DIER_COMIE)) ? 1UL : 0UL); +} + +/** + * @brief Enable trigger interrupt (TIE). + * @rmtoll DIER TIE LL_TIM_EnableIT_TRIG + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableIT_TRIG(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->DIER, TIM_DIER_TIE); +} + +/** + * @brief Disable trigger interrupt (TIE). + * @rmtoll DIER TIE LL_TIM_DisableIT_TRIG + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableIT_TRIG(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->DIER, TIM_DIER_TIE); +} + +/** + * @brief Indicates whether the trigger interrupt (TIE) is enabled. + * @rmtoll DIER TIE LL_TIM_IsEnabledIT_TRIG + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsEnabledIT_TRIG(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->DIER, TIM_DIER_TIE) == (TIM_DIER_TIE)) ? 1UL : 0UL); +} + +/** + * @brief Enable break interrupt (BIE). + * @rmtoll DIER BIE LL_TIM_EnableIT_BRK + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableIT_BRK(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->DIER, TIM_DIER_BIE); +} + +/** + * @brief Disable break interrupt (BIE). + * @rmtoll DIER BIE LL_TIM_DisableIT_BRK + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableIT_BRK(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->DIER, TIM_DIER_BIE); +} + +/** + * @brief Indicates whether the break interrupt (BIE) is enabled. + * @rmtoll DIER BIE LL_TIM_IsEnabledIT_BRK + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsEnabledIT_BRK(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->DIER, TIM_DIER_BIE) == (TIM_DIER_BIE)) ? 1UL : 0UL); +} + +/** + * @} + */ + +/** @defgroup TIM_LL_EF_DMA_Management DMA Management + * @{ + */ +/** + * @brief Enable update DMA request (UDE). + * @rmtoll DIER UDE LL_TIM_EnableDMAReq_UPDATE + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableDMAReq_UPDATE(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->DIER, TIM_DIER_UDE); +} + +/** + * @brief Disable update DMA request (UDE). + * @rmtoll DIER UDE LL_TIM_DisableDMAReq_UPDATE + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableDMAReq_UPDATE(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->DIER, TIM_DIER_UDE); +} + +/** + * @brief Indicates whether the update DMA request (UDE) is enabled. + * @rmtoll DIER UDE LL_TIM_IsEnabledDMAReq_UPDATE + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsEnabledDMAReq_UPDATE(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->DIER, TIM_DIER_UDE) == (TIM_DIER_UDE)) ? 1UL : 0UL); +} + +/** + * @brief Enable capture/compare 1 DMA request (CC1DE). + * @rmtoll DIER CC1DE LL_TIM_EnableDMAReq_CC1 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableDMAReq_CC1(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->DIER, TIM_DIER_CC1DE); +} + +/** + * @brief Disable capture/compare 1 DMA request (CC1DE). + * @rmtoll DIER CC1DE LL_TIM_DisableDMAReq_CC1 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableDMAReq_CC1(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->DIER, TIM_DIER_CC1DE); +} + +/** + * @brief Indicates whether the capture/compare 1 DMA request (CC1DE) is enabled. + * @rmtoll DIER CC1DE LL_TIM_IsEnabledDMAReq_CC1 + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsEnabledDMAReq_CC1(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->DIER, TIM_DIER_CC1DE) == (TIM_DIER_CC1DE)) ? 1UL : 0UL); +} + +/** + * @brief Enable capture/compare 2 DMA request (CC2DE). + * @rmtoll DIER CC2DE LL_TIM_EnableDMAReq_CC2 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableDMAReq_CC2(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->DIER, TIM_DIER_CC2DE); +} + +/** + * @brief Disable capture/compare 2 DMA request (CC2DE). + * @rmtoll DIER CC2DE LL_TIM_DisableDMAReq_CC2 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableDMAReq_CC2(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->DIER, TIM_DIER_CC2DE); +} + +/** + * @brief Indicates whether the capture/compare 2 DMA request (CC2DE) is enabled. + * @rmtoll DIER CC2DE LL_TIM_IsEnabledDMAReq_CC2 + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsEnabledDMAReq_CC2(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->DIER, TIM_DIER_CC2DE) == (TIM_DIER_CC2DE)) ? 1UL : 0UL); +} + +/** + * @brief Enable capture/compare 3 DMA request (CC3DE). + * @rmtoll DIER CC3DE LL_TIM_EnableDMAReq_CC3 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableDMAReq_CC3(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->DIER, TIM_DIER_CC3DE); +} + +/** + * @brief Disable capture/compare 3 DMA request (CC3DE). + * @rmtoll DIER CC3DE LL_TIM_DisableDMAReq_CC3 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableDMAReq_CC3(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->DIER, TIM_DIER_CC3DE); +} + +/** + * @brief Indicates whether the capture/compare 3 DMA request (CC3DE) is enabled. + * @rmtoll DIER CC3DE LL_TIM_IsEnabledDMAReq_CC3 + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsEnabledDMAReq_CC3(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->DIER, TIM_DIER_CC3DE) == (TIM_DIER_CC3DE)) ? 1UL : 0UL); +} + +/** + * @brief Enable capture/compare 4 DMA request (CC4DE). + * @rmtoll DIER CC4DE LL_TIM_EnableDMAReq_CC4 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableDMAReq_CC4(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->DIER, TIM_DIER_CC4DE); +} + +/** + * @brief Disable capture/compare 4 DMA request (CC4DE). + * @rmtoll DIER CC4DE LL_TIM_DisableDMAReq_CC4 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableDMAReq_CC4(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->DIER, TIM_DIER_CC4DE); +} + +/** + * @brief Indicates whether the capture/compare 4 DMA request (CC4DE) is enabled. + * @rmtoll DIER CC4DE LL_TIM_IsEnabledDMAReq_CC4 + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsEnabledDMAReq_CC4(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->DIER, TIM_DIER_CC4DE) == (TIM_DIER_CC4DE)) ? 1UL : 0UL); +} + +/** + * @brief Enable commutation DMA request (COMDE). + * @rmtoll DIER COMDE LL_TIM_EnableDMAReq_COM + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableDMAReq_COM(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->DIER, TIM_DIER_COMDE); +} + +/** + * @brief Disable commutation DMA request (COMDE). + * @rmtoll DIER COMDE LL_TIM_DisableDMAReq_COM + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableDMAReq_COM(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->DIER, TIM_DIER_COMDE); +} + +/** + * @brief Indicates whether the commutation DMA request (COMDE) is enabled. + * @rmtoll DIER COMDE LL_TIM_IsEnabledDMAReq_COM + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsEnabledDMAReq_COM(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->DIER, TIM_DIER_COMDE) == (TIM_DIER_COMDE)) ? 1UL : 0UL); +} + +/** + * @brief Enable trigger interrupt (TDE). + * @rmtoll DIER TDE LL_TIM_EnableDMAReq_TRIG + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableDMAReq_TRIG(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->DIER, TIM_DIER_TDE); +} + +/** + * @brief Disable trigger interrupt (TDE). + * @rmtoll DIER TDE LL_TIM_DisableDMAReq_TRIG + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableDMAReq_TRIG(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->DIER, TIM_DIER_TDE); +} + +/** + * @brief Indicates whether the trigger interrupt (TDE) is enabled. + * @rmtoll DIER TDE LL_TIM_IsEnabledDMAReq_TRIG + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsEnabledDMAReq_TRIG(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->DIER, TIM_DIER_TDE) == (TIM_DIER_TDE)) ? 1UL : 0UL); +} + +/** + * @} + */ + +/** @defgroup TIM_LL_EF_EVENT_Management EVENT-Management + * @{ + */ +/** + * @brief Generate an update event. + * @rmtoll EGR UG LL_TIM_GenerateEvent_UPDATE + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_GenerateEvent_UPDATE(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->EGR, TIM_EGR_UG); +} + +/** + * @brief Generate Capture/Compare 1 event. + * @rmtoll EGR CC1G LL_TIM_GenerateEvent_CC1 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_GenerateEvent_CC1(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->EGR, TIM_EGR_CC1G); +} + +/** + * @brief Generate Capture/Compare 2 event. + * @rmtoll EGR CC2G LL_TIM_GenerateEvent_CC2 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_GenerateEvent_CC2(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->EGR, TIM_EGR_CC2G); +} + +/** + * @brief Generate Capture/Compare 3 event. + * @rmtoll EGR CC3G LL_TIM_GenerateEvent_CC3 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_GenerateEvent_CC3(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->EGR, TIM_EGR_CC3G); +} + +/** + * @brief Generate Capture/Compare 4 event. + * @rmtoll EGR CC4G LL_TIM_GenerateEvent_CC4 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_GenerateEvent_CC4(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->EGR, TIM_EGR_CC4G); +} + +/** + * @brief Generate commutation event. + * @rmtoll EGR COMG LL_TIM_GenerateEvent_COM + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_GenerateEvent_COM(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->EGR, TIM_EGR_COMG); +} + +/** + * @brief Generate trigger event. + * @rmtoll EGR TG LL_TIM_GenerateEvent_TRIG + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_GenerateEvent_TRIG(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->EGR, TIM_EGR_TG); +} + +/** + * @brief Generate break event. + * @rmtoll EGR BG LL_TIM_GenerateEvent_BRK + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_GenerateEvent_BRK(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->EGR, TIM_EGR_BG); +} + +/** + * @} + */ + +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup TIM_LL_EF_Init Initialisation and deinitialisation functions + * @{ + */ + +ErrorStatus LL_TIM_DeInit(const TIM_TypeDef *TIMx); +void LL_TIM_StructInit(LL_TIM_InitTypeDef *TIM_InitStruct); +ErrorStatus LL_TIM_Init(TIM_TypeDef *TIMx, const LL_TIM_InitTypeDef *TIM_InitStruct); +void LL_TIM_OC_StructInit(LL_TIM_OC_InitTypeDef *TIM_OC_InitStruct); +ErrorStatus LL_TIM_OC_Init(TIM_TypeDef *TIMx, uint32_t Channel, const LL_TIM_OC_InitTypeDef *TIM_OC_InitStruct); +void LL_TIM_IC_StructInit(LL_TIM_IC_InitTypeDef *TIM_ICInitStruct); +ErrorStatus LL_TIM_IC_Init(TIM_TypeDef *TIMx, uint32_t Channel, const LL_TIM_IC_InitTypeDef *TIM_IC_InitStruct); +void LL_TIM_ENCODER_StructInit(LL_TIM_ENCODER_InitTypeDef *TIM_EncoderInitStruct); +ErrorStatus LL_TIM_ENCODER_Init(TIM_TypeDef *TIMx, const LL_TIM_ENCODER_InitTypeDef *TIM_EncoderInitStruct); +void LL_TIM_HALLSENSOR_StructInit(LL_TIM_HALLSENSOR_InitTypeDef *TIM_HallSensorInitStruct); +ErrorStatus LL_TIM_HALLSENSOR_Init(TIM_TypeDef *TIMx, const LL_TIM_HALLSENSOR_InitTypeDef *TIM_HallSensorInitStruct); +void LL_TIM_BDTR_StructInit(LL_TIM_BDTR_InitTypeDef *TIM_BDTRInitStruct); +ErrorStatus LL_TIM_BDTR_Init(TIM_TypeDef *TIMx, const LL_TIM_BDTR_InitTypeDef *TIM_BDTRInitStruct); +/** + * @} + */ +#endif /* USE_FULL_LL_DRIVER */ + +/** + * @} + */ + +/** + * @} + */ + +#endif /* TIM1 || TIM2 || TIM3 || TIM4 || TIM5 || TIM6 || TIM7 || TIM8 || TIM9 || TIM10 || TIM11 || TIM12 || TIM13 || TIM14 */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F4xx_LL_TIM_H */ diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_usart.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_usart.h new file mode 100644 index 0000000..ed83b6c --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_usart.h @@ -0,0 +1,2521 @@ +/** + ****************************************************************************** + * @file stm32f4xx_ll_usart.h + * @author MCD Application Team + * @brief Header file of USART LL module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F4xx_LL_USART_H +#define __STM32F4xx_LL_USART_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx.h" + +/** @addtogroup STM32F4xx_LL_Driver + * @{ + */ + +#if defined (USART1) || defined (USART2) || defined (USART3) || defined (USART6) || defined (UART4) || defined (UART5) || defined (UART7) || defined (UART8) || defined (UART9) || defined (UART10) + +/** @defgroup USART_LL USART + * @{ + */ + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ + +/* Private constants ---------------------------------------------------------*/ +/** @defgroup USART_LL_Private_Constants USART Private Constants + * @{ + */ + +/* Defines used for the bit position in the register and perform offsets*/ +#define USART_POSITION_GTPR_GT USART_GTPR_GT_Pos +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup USART_LL_Private_Macros USART Private Macros + * @{ + */ +/** + * @} + */ +#endif /*USE_FULL_LL_DRIVER*/ + +/* Exported types ------------------------------------------------------------*/ +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup USART_LL_ES_INIT USART Exported Init structures + * @{ + */ + +/** + * @brief LL USART Init Structure definition + */ +typedef struct +{ + uint32_t BaudRate; /*!< This field defines expected Usart communication baud rate. + + This feature can be modified afterwards using unitary function @ref LL_USART_SetBaudRate().*/ + + uint32_t DataWidth; /*!< Specifies the number of data bits transmitted or received in a frame. + This parameter can be a value of @ref USART_LL_EC_DATAWIDTH. + + This feature can be modified afterwards using unitary function @ref LL_USART_SetDataWidth().*/ + + uint32_t StopBits; /*!< Specifies the number of stop bits transmitted. + This parameter can be a value of @ref USART_LL_EC_STOPBITS. + + This feature can be modified afterwards using unitary function @ref LL_USART_SetStopBitsLength().*/ + + uint32_t Parity; /*!< Specifies the parity mode. + This parameter can be a value of @ref USART_LL_EC_PARITY. + + This feature can be modified afterwards using unitary function @ref LL_USART_SetParity().*/ + + uint32_t TransferDirection; /*!< Specifies whether the Receive and/or Transmit mode is enabled or disabled. + This parameter can be a value of @ref USART_LL_EC_DIRECTION. + + This feature can be modified afterwards using unitary function @ref LL_USART_SetTransferDirection().*/ + + uint32_t HardwareFlowControl; /*!< Specifies whether the hardware flow control mode is enabled or disabled. + This parameter can be a value of @ref USART_LL_EC_HWCONTROL. + + This feature can be modified afterwards using unitary function @ref LL_USART_SetHWFlowCtrl().*/ + + uint32_t OverSampling; /*!< Specifies whether USART oversampling mode is 16 or 8. + This parameter can be a value of @ref USART_LL_EC_OVERSAMPLING. + + This feature can be modified afterwards using unitary function @ref LL_USART_SetOverSampling().*/ + +} LL_USART_InitTypeDef; + +/** + * @brief LL USART Clock Init Structure definition + */ +typedef struct +{ + uint32_t ClockOutput; /*!< Specifies whether the USART clock is enabled or disabled. + This parameter can be a value of @ref USART_LL_EC_CLOCK. + + USART HW configuration can be modified afterwards using unitary functions + @ref LL_USART_EnableSCLKOutput() or @ref LL_USART_DisableSCLKOutput(). + For more details, refer to description of this function. */ + + uint32_t ClockPolarity; /*!< Specifies the steady state of the serial clock. + This parameter can be a value of @ref USART_LL_EC_POLARITY. + + USART HW configuration can be modified afterwards using unitary functions @ref LL_USART_SetClockPolarity(). + For more details, refer to description of this function. */ + + uint32_t ClockPhase; /*!< Specifies the clock transition on which the bit capture is made. + This parameter can be a value of @ref USART_LL_EC_PHASE. + + USART HW configuration can be modified afterwards using unitary functions @ref LL_USART_SetClockPhase(). + For more details, refer to description of this function. */ + + uint32_t LastBitClockPulse; /*!< Specifies whether the clock pulse corresponding to the last transmitted + data bit (MSB) has to be output on the SCLK pin in synchronous mode. + This parameter can be a value of @ref USART_LL_EC_LASTCLKPULSE. + + USART HW configuration can be modified afterwards using unitary functions @ref LL_USART_SetLastClkPulseOutput(). + For more details, refer to description of this function. */ + +} LL_USART_ClockInitTypeDef; + +/** + * @} + */ +#endif /* USE_FULL_LL_DRIVER */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup USART_LL_Exported_Constants USART Exported Constants + * @{ + */ + +/** @defgroup USART_LL_EC_GET_FLAG Get Flags Defines + * @brief Flags defines which can be used with LL_USART_ReadReg function + * @{ + */ +#define LL_USART_SR_PE USART_SR_PE /*!< Parity error flag */ +#define LL_USART_SR_FE USART_SR_FE /*!< Framing error flag */ +#define LL_USART_SR_NE USART_SR_NE /*!< Noise detected flag */ +#define LL_USART_SR_ORE USART_SR_ORE /*!< Overrun error flag */ +#define LL_USART_SR_IDLE USART_SR_IDLE /*!< Idle line detected flag */ +#define LL_USART_SR_RXNE USART_SR_RXNE /*!< Read data register not empty flag */ +#define LL_USART_SR_TC USART_SR_TC /*!< Transmission complete flag */ +#define LL_USART_SR_TXE USART_SR_TXE /*!< Transmit data register empty flag */ +#define LL_USART_SR_LBD USART_SR_LBD /*!< LIN break detection flag */ +#define LL_USART_SR_CTS USART_SR_CTS /*!< CTS flag */ +/** + * @} + */ + +/** @defgroup USART_LL_EC_IT IT Defines + * @brief IT defines which can be used with LL_USART_ReadReg and LL_USART_WriteReg functions + * @{ + */ +#define LL_USART_CR1_IDLEIE USART_CR1_IDLEIE /*!< IDLE interrupt enable */ +#define LL_USART_CR1_RXNEIE USART_CR1_RXNEIE /*!< Read data register not empty interrupt enable */ +#define LL_USART_CR1_TCIE USART_CR1_TCIE /*!< Transmission complete interrupt enable */ +#define LL_USART_CR1_TXEIE USART_CR1_TXEIE /*!< Transmit data register empty interrupt enable */ +#define LL_USART_CR1_PEIE USART_CR1_PEIE /*!< Parity error */ +#define LL_USART_CR2_LBDIE USART_CR2_LBDIE /*!< LIN break detection interrupt enable */ +#define LL_USART_CR3_EIE USART_CR3_EIE /*!< Error interrupt enable */ +#define LL_USART_CR3_CTSIE USART_CR3_CTSIE /*!< CTS interrupt enable */ +/** + * @} + */ + +/** @defgroup USART_LL_EC_DIRECTION Communication Direction + * @{ + */ +#define LL_USART_DIRECTION_NONE 0x00000000U /*!< Transmitter and Receiver are disabled */ +#define LL_USART_DIRECTION_RX USART_CR1_RE /*!< Transmitter is disabled and Receiver is enabled */ +#define LL_USART_DIRECTION_TX USART_CR1_TE /*!< Transmitter is enabled and Receiver is disabled */ +#define LL_USART_DIRECTION_TX_RX (USART_CR1_TE |USART_CR1_RE) /*!< Transmitter and Receiver are enabled */ +/** + * @} + */ + +/** @defgroup USART_LL_EC_PARITY Parity Control + * @{ + */ +#define LL_USART_PARITY_NONE 0x00000000U /*!< Parity control disabled */ +#define LL_USART_PARITY_EVEN USART_CR1_PCE /*!< Parity control enabled and Even Parity is selected */ +#define LL_USART_PARITY_ODD (USART_CR1_PCE | USART_CR1_PS) /*!< Parity control enabled and Odd Parity is selected */ +/** + * @} + */ + +/** @defgroup USART_LL_EC_WAKEUP Wakeup + * @{ + */ +#define LL_USART_WAKEUP_IDLELINE 0x00000000U /*!< USART wake up from Mute mode on Idle Line */ +#define LL_USART_WAKEUP_ADDRESSMARK USART_CR1_WAKE /*!< USART wake up from Mute mode on Address Mark */ +/** + * @} + */ + +/** @defgroup USART_LL_EC_DATAWIDTH Datawidth + * @{ + */ +#define LL_USART_DATAWIDTH_8B 0x00000000U /*!< 8 bits word length : Start bit, 8 data bits, n stop bits */ +#define LL_USART_DATAWIDTH_9B USART_CR1_M /*!< 9 bits word length : Start bit, 9 data bits, n stop bits */ +/** + * @} + */ + +/** @defgroup USART_LL_EC_OVERSAMPLING Oversampling + * @{ + */ +#define LL_USART_OVERSAMPLING_16 0x00000000U /*!< Oversampling by 16 */ +#define LL_USART_OVERSAMPLING_8 USART_CR1_OVER8 /*!< Oversampling by 8 */ +/** + * @} + */ + +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup USART_LL_EC_CLOCK Clock Signal + * @{ + */ + +#define LL_USART_CLOCK_DISABLE 0x00000000U /*!< Clock signal not provided */ +#define LL_USART_CLOCK_ENABLE USART_CR2_CLKEN /*!< Clock signal provided */ +/** + * @} + */ +#endif /*USE_FULL_LL_DRIVER*/ + +/** @defgroup USART_LL_EC_LASTCLKPULSE Last Clock Pulse + * @{ + */ +#define LL_USART_LASTCLKPULSE_NO_OUTPUT 0x00000000U /*!< The clock pulse of the last data bit is not output to the SCLK pin */ +#define LL_USART_LASTCLKPULSE_OUTPUT USART_CR2_LBCL /*!< The clock pulse of the last data bit is output to the SCLK pin */ +/** + * @} + */ + +/** @defgroup USART_LL_EC_PHASE Clock Phase + * @{ + */ +#define LL_USART_PHASE_1EDGE 0x00000000U /*!< The first clock transition is the first data capture edge */ +#define LL_USART_PHASE_2EDGE USART_CR2_CPHA /*!< The second clock transition is the first data capture edge */ +/** + * @} + */ + +/** @defgroup USART_LL_EC_POLARITY Clock Polarity + * @{ + */ +#define LL_USART_POLARITY_LOW 0x00000000U /*!< Steady low value on SCLK pin outside transmission window*/ +#define LL_USART_POLARITY_HIGH USART_CR2_CPOL /*!< Steady high value on SCLK pin outside transmission window */ +/** + * @} + */ + +/** @defgroup USART_LL_EC_STOPBITS Stop Bits + * @{ + */ +#define LL_USART_STOPBITS_0_5 USART_CR2_STOP_0 /*!< 0.5 stop bit */ +#define LL_USART_STOPBITS_1 0x00000000U /*!< 1 stop bit */ +#define LL_USART_STOPBITS_1_5 (USART_CR2_STOP_0 | USART_CR2_STOP_1) /*!< 1.5 stop bits */ +#define LL_USART_STOPBITS_2 USART_CR2_STOP_1 /*!< 2 stop bits */ +/** + * @} + */ + +/** @defgroup USART_LL_EC_HWCONTROL Hardware Control + * @{ + */ +#define LL_USART_HWCONTROL_NONE 0x00000000U /*!< CTS and RTS hardware flow control disabled */ +#define LL_USART_HWCONTROL_RTS USART_CR3_RTSE /*!< RTS output enabled, data is only requested when there is space in the receive buffer */ +#define LL_USART_HWCONTROL_CTS USART_CR3_CTSE /*!< CTS mode enabled, data is only transmitted when the nCTS input is asserted (tied to 0) */ +#define LL_USART_HWCONTROL_RTS_CTS (USART_CR3_RTSE | USART_CR3_CTSE) /*!< CTS and RTS hardware flow control enabled */ +/** + * @} + */ + +/** @defgroup USART_LL_EC_IRDA_POWER IrDA Power + * @{ + */ +#define LL_USART_IRDA_POWER_NORMAL 0x00000000U /*!< IrDA normal power mode */ +#define LL_USART_IRDA_POWER_LOW USART_CR3_IRLP /*!< IrDA low power mode */ +/** + * @} + */ + +/** @defgroup USART_LL_EC_LINBREAK_DETECT LIN Break Detection Length + * @{ + */ +#define LL_USART_LINBREAK_DETECT_10B 0x00000000U /*!< 10-bit break detection method selected */ +#define LL_USART_LINBREAK_DETECT_11B USART_CR2_LBDL /*!< 11-bit break detection method selected */ +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup USART_LL_Exported_Macros USART Exported Macros + * @{ + */ + +/** @defgroup USART_LL_EM_WRITE_READ Common Write and read registers Macros + * @{ + */ + +/** + * @brief Write a value in USART register + * @param __INSTANCE__ USART Instance + * @param __REG__ Register to be written + * @param __VALUE__ Value to be written in the register + * @retval None + */ +#define LL_USART_WriteReg(__INSTANCE__, __REG__, __VALUE__) WRITE_REG(__INSTANCE__->__REG__, (__VALUE__)) + +/** + * @brief Read a value in USART register + * @param __INSTANCE__ USART Instance + * @param __REG__ Register to be read + * @retval Register value + */ +#define LL_USART_ReadReg(__INSTANCE__, __REG__) READ_REG(__INSTANCE__->__REG__) +/** + * @} + */ + +/** @defgroup USART_LL_EM_Exported_Macros_Helper Exported Macros Helper + * @{ + */ + +/** + * @brief Compute USARTDIV value according to Peripheral Clock and + * expected Baud Rate in 8 bits sampling mode (32 bits value of USARTDIV is returned) + * @param __PERIPHCLK__ Peripheral Clock frequency used for USART instance + * @param __BAUDRATE__ Baud rate value to achieve + * @retval USARTDIV value to be used for BRR register filling in OverSampling_8 case + */ +#define __LL_USART_DIV_SAMPLING8_100(__PERIPHCLK__, __BAUDRATE__) ((uint32_t)((((uint64_t)(__PERIPHCLK__))*25)/(2*((uint64_t)(__BAUDRATE__))))) +#define __LL_USART_DIVMANT_SAMPLING8(__PERIPHCLK__, __BAUDRATE__) (__LL_USART_DIV_SAMPLING8_100((__PERIPHCLK__), (__BAUDRATE__))/100) +#define __LL_USART_DIVFRAQ_SAMPLING8(__PERIPHCLK__, __BAUDRATE__) ((((__LL_USART_DIV_SAMPLING8_100((__PERIPHCLK__), (__BAUDRATE__)) - (__LL_USART_DIVMANT_SAMPLING8((__PERIPHCLK__), (__BAUDRATE__)) * 100)) * 8)\ + + 50) / 100) +/* UART BRR = mantissa + overflow + fraction + = (UART DIVMANT << 4) + ((UART DIVFRAQ & 0xF8) << 1) + (UART DIVFRAQ & 0x07) */ +#define __LL_USART_DIV_SAMPLING8(__PERIPHCLK__, __BAUDRATE__) (((__LL_USART_DIVMANT_SAMPLING8((__PERIPHCLK__), (__BAUDRATE__)) << 4) + \ + ((__LL_USART_DIVFRAQ_SAMPLING8((__PERIPHCLK__), (__BAUDRATE__)) & 0xF8) << 1)) + \ + (__LL_USART_DIVFRAQ_SAMPLING8((__PERIPHCLK__), (__BAUDRATE__)) & 0x07)) + +/** + * @brief Compute USARTDIV value according to Peripheral Clock and + * expected Baud Rate in 16 bits sampling mode (32 bits value of USARTDIV is returned) + * @param __PERIPHCLK__ Peripheral Clock frequency used for USART instance + * @param __BAUDRATE__ Baud rate value to achieve + * @retval USARTDIV value to be used for BRR register filling in OverSampling_16 case + */ +#define __LL_USART_DIV_SAMPLING16_100(__PERIPHCLK__, __BAUDRATE__) ((uint32_t)((((uint64_t)(__PERIPHCLK__))*25)/(4*((uint64_t)(__BAUDRATE__))))) +#define __LL_USART_DIVMANT_SAMPLING16(__PERIPHCLK__, __BAUDRATE__) (__LL_USART_DIV_SAMPLING16_100((__PERIPHCLK__), (__BAUDRATE__))/100) +#define __LL_USART_DIVFRAQ_SAMPLING16(__PERIPHCLK__, __BAUDRATE__) ((((__LL_USART_DIV_SAMPLING16_100((__PERIPHCLK__), (__BAUDRATE__)) - (__LL_USART_DIVMANT_SAMPLING16((__PERIPHCLK__), (__BAUDRATE__)) * 100)) * 16)\ + + 50) / 100) +/* USART BRR = mantissa + overflow + fraction + = (USART DIVMANT << 4) + (USART DIVFRAQ & 0xF0) + (USART DIVFRAQ & 0x0F) */ +#define __LL_USART_DIV_SAMPLING16(__PERIPHCLK__, __BAUDRATE__) (((__LL_USART_DIVMANT_SAMPLING16((__PERIPHCLK__), (__BAUDRATE__)) << 4) + \ + (__LL_USART_DIVFRAQ_SAMPLING16((__PERIPHCLK__), (__BAUDRATE__)) & 0xF0)) + \ + (__LL_USART_DIVFRAQ_SAMPLING16((__PERIPHCLK__), (__BAUDRATE__)) & 0x0F)) + +/** + * @} + */ + +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ + +/** @defgroup USART_LL_Exported_Functions USART Exported Functions + * @{ + */ + +/** @defgroup USART_LL_EF_Configuration Configuration functions + * @{ + */ + +/** + * @brief USART Enable + * @rmtoll CR1 UE LL_USART_Enable + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_Enable(USART_TypeDef *USARTx) +{ + SET_BIT(USARTx->CR1, USART_CR1_UE); +} + +/** + * @brief USART Disable (all USART prescalers and outputs are disabled) + * @note When USART is disabled, USART prescalers and outputs are stopped immediately, + * and current operations are discarded. The configuration of the USART is kept, but all the status + * flags, in the USARTx_SR are set to their default values. + * @rmtoll CR1 UE LL_USART_Disable + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_Disable(USART_TypeDef *USARTx) +{ + CLEAR_BIT(USARTx->CR1, USART_CR1_UE); +} + +/** + * @brief Indicate if USART is enabled + * @rmtoll CR1 UE LL_USART_IsEnabled + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsEnabled(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->CR1, USART_CR1_UE) == (USART_CR1_UE)); +} + +/** + * @brief Receiver Enable (Receiver is enabled and begins searching for a start bit) + * @rmtoll CR1 RE LL_USART_EnableDirectionRx + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_EnableDirectionRx(USART_TypeDef *USARTx) +{ + ATOMIC_SET_BIT(USARTx->CR1, USART_CR1_RE); +} + +/** + * @brief Receiver Disable + * @rmtoll CR1 RE LL_USART_DisableDirectionRx + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_DisableDirectionRx(USART_TypeDef *USARTx) +{ + ATOMIC_CLEAR_BIT(USARTx->CR1, USART_CR1_RE); +} + +/** + * @brief Transmitter Enable + * @rmtoll CR1 TE LL_USART_EnableDirectionTx + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_EnableDirectionTx(USART_TypeDef *USARTx) +{ + ATOMIC_SET_BIT(USARTx->CR1, USART_CR1_TE); +} + +/** + * @brief Transmitter Disable + * @rmtoll CR1 TE LL_USART_DisableDirectionTx + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_DisableDirectionTx(USART_TypeDef *USARTx) +{ + ATOMIC_CLEAR_BIT(USARTx->CR1, USART_CR1_TE); +} + +/** + * @brief Configure simultaneously enabled/disabled states + * of Transmitter and Receiver + * @rmtoll CR1 RE LL_USART_SetTransferDirection\n + * CR1 TE LL_USART_SetTransferDirection + * @param USARTx USART Instance + * @param TransferDirection This parameter can be one of the following values: + * @arg @ref LL_USART_DIRECTION_NONE + * @arg @ref LL_USART_DIRECTION_RX + * @arg @ref LL_USART_DIRECTION_TX + * @arg @ref LL_USART_DIRECTION_TX_RX + * @retval None + */ +__STATIC_INLINE void LL_USART_SetTransferDirection(USART_TypeDef *USARTx, uint32_t TransferDirection) +{ + ATOMIC_MODIFY_REG(USARTx->CR1, USART_CR1_RE | USART_CR1_TE, TransferDirection); +} + +/** + * @brief Return enabled/disabled states of Transmitter and Receiver + * @rmtoll CR1 RE LL_USART_GetTransferDirection\n + * CR1 TE LL_USART_GetTransferDirection + * @param USARTx USART Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_USART_DIRECTION_NONE + * @arg @ref LL_USART_DIRECTION_RX + * @arg @ref LL_USART_DIRECTION_TX + * @arg @ref LL_USART_DIRECTION_TX_RX + */ +__STATIC_INLINE uint32_t LL_USART_GetTransferDirection(const USART_TypeDef *USARTx) +{ + return (uint32_t)(READ_BIT(USARTx->CR1, USART_CR1_RE | USART_CR1_TE)); +} + +/** + * @brief Configure Parity (enabled/disabled and parity mode if enabled). + * @note This function selects if hardware parity control (generation and detection) is enabled or disabled. + * When the parity control is enabled (Odd or Even), computed parity bit is inserted at the MSB position + * (9th or 8th bit depending on data width) and parity is checked on the received data. + * @rmtoll CR1 PS LL_USART_SetParity\n + * CR1 PCE LL_USART_SetParity + * @param USARTx USART Instance + * @param Parity This parameter can be one of the following values: + * @arg @ref LL_USART_PARITY_NONE + * @arg @ref LL_USART_PARITY_EVEN + * @arg @ref LL_USART_PARITY_ODD + * @retval None + */ +__STATIC_INLINE void LL_USART_SetParity(USART_TypeDef *USARTx, uint32_t Parity) +{ + MODIFY_REG(USARTx->CR1, USART_CR1_PS | USART_CR1_PCE, Parity); +} + +/** + * @brief Return Parity configuration (enabled/disabled and parity mode if enabled) + * @rmtoll CR1 PS LL_USART_GetParity\n + * CR1 PCE LL_USART_GetParity + * @param USARTx USART Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_USART_PARITY_NONE + * @arg @ref LL_USART_PARITY_EVEN + * @arg @ref LL_USART_PARITY_ODD + */ +__STATIC_INLINE uint32_t LL_USART_GetParity(const USART_TypeDef *USARTx) +{ + return (uint32_t)(READ_BIT(USARTx->CR1, USART_CR1_PS | USART_CR1_PCE)); +} + +/** + * @brief Set Receiver Wake Up method from Mute mode. + * @rmtoll CR1 WAKE LL_USART_SetWakeUpMethod + * @param USARTx USART Instance + * @param Method This parameter can be one of the following values: + * @arg @ref LL_USART_WAKEUP_IDLELINE + * @arg @ref LL_USART_WAKEUP_ADDRESSMARK + * @retval None + */ +__STATIC_INLINE void LL_USART_SetWakeUpMethod(USART_TypeDef *USARTx, uint32_t Method) +{ + MODIFY_REG(USARTx->CR1, USART_CR1_WAKE, Method); +} + +/** + * @brief Return Receiver Wake Up method from Mute mode + * @rmtoll CR1 WAKE LL_USART_GetWakeUpMethod + * @param USARTx USART Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_USART_WAKEUP_IDLELINE + * @arg @ref LL_USART_WAKEUP_ADDRESSMARK + */ +__STATIC_INLINE uint32_t LL_USART_GetWakeUpMethod(const USART_TypeDef *USARTx) +{ + return (uint32_t)(READ_BIT(USARTx->CR1, USART_CR1_WAKE)); +} + +/** + * @brief Set Word length (i.e. nb of data bits, excluding start and stop bits) + * @rmtoll CR1 M LL_USART_SetDataWidth + * @param USARTx USART Instance + * @param DataWidth This parameter can be one of the following values: + * @arg @ref LL_USART_DATAWIDTH_8B + * @arg @ref LL_USART_DATAWIDTH_9B + * @retval None + */ +__STATIC_INLINE void LL_USART_SetDataWidth(USART_TypeDef *USARTx, uint32_t DataWidth) +{ + MODIFY_REG(USARTx->CR1, USART_CR1_M, DataWidth); +} + +/** + * @brief Return Word length (i.e. nb of data bits, excluding start and stop bits) + * @rmtoll CR1 M LL_USART_GetDataWidth + * @param USARTx USART Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_USART_DATAWIDTH_8B + * @arg @ref LL_USART_DATAWIDTH_9B + */ +__STATIC_INLINE uint32_t LL_USART_GetDataWidth(const USART_TypeDef *USARTx) +{ + return (uint32_t)(READ_BIT(USARTx->CR1, USART_CR1_M)); +} + +/** + * @brief Set Oversampling to 8-bit or 16-bit mode + * @rmtoll CR1 OVER8 LL_USART_SetOverSampling + * @param USARTx USART Instance + * @param OverSampling This parameter can be one of the following values: + * @arg @ref LL_USART_OVERSAMPLING_16 + * @arg @ref LL_USART_OVERSAMPLING_8 + * @retval None + */ +__STATIC_INLINE void LL_USART_SetOverSampling(USART_TypeDef *USARTx, uint32_t OverSampling) +{ + MODIFY_REG(USARTx->CR1, USART_CR1_OVER8, OverSampling); +} + +/** + * @brief Return Oversampling mode + * @rmtoll CR1 OVER8 LL_USART_GetOverSampling + * @param USARTx USART Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_USART_OVERSAMPLING_16 + * @arg @ref LL_USART_OVERSAMPLING_8 + */ +__STATIC_INLINE uint32_t LL_USART_GetOverSampling(const USART_TypeDef *USARTx) +{ + return (uint32_t)(READ_BIT(USARTx->CR1, USART_CR1_OVER8)); +} + +/** + * @brief Configure if Clock pulse of the last data bit is output to the SCLK pin or not + * @note Macro IS_USART_INSTANCE(USARTx) can be used to check whether or not + * Synchronous mode is supported by the USARTx instance. + * @rmtoll CR2 LBCL LL_USART_SetLastClkPulseOutput + * @param USARTx USART Instance + * @param LastBitClockPulse This parameter can be one of the following values: + * @arg @ref LL_USART_LASTCLKPULSE_NO_OUTPUT + * @arg @ref LL_USART_LASTCLKPULSE_OUTPUT + * @retval None + */ +__STATIC_INLINE void LL_USART_SetLastClkPulseOutput(USART_TypeDef *USARTx, uint32_t LastBitClockPulse) +{ + MODIFY_REG(USARTx->CR2, USART_CR2_LBCL, LastBitClockPulse); +} + +/** + * @brief Retrieve Clock pulse of the last data bit output configuration + * (Last bit Clock pulse output to the SCLK pin or not) + * @note Macro IS_USART_INSTANCE(USARTx) can be used to check whether or not + * Synchronous mode is supported by the USARTx instance. + * @rmtoll CR2 LBCL LL_USART_GetLastClkPulseOutput + * @param USARTx USART Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_USART_LASTCLKPULSE_NO_OUTPUT + * @arg @ref LL_USART_LASTCLKPULSE_OUTPUT + */ +__STATIC_INLINE uint32_t LL_USART_GetLastClkPulseOutput(const USART_TypeDef *USARTx) +{ + return (uint32_t)(READ_BIT(USARTx->CR2, USART_CR2_LBCL)); +} + +/** + * @brief Select the phase of the clock output on the SCLK pin in synchronous mode + * @note Macro IS_USART_INSTANCE(USARTx) can be used to check whether or not + * Synchronous mode is supported by the USARTx instance. + * @rmtoll CR2 CPHA LL_USART_SetClockPhase + * @param USARTx USART Instance + * @param ClockPhase This parameter can be one of the following values: + * @arg @ref LL_USART_PHASE_1EDGE + * @arg @ref LL_USART_PHASE_2EDGE + * @retval None + */ +__STATIC_INLINE void LL_USART_SetClockPhase(USART_TypeDef *USARTx, uint32_t ClockPhase) +{ + MODIFY_REG(USARTx->CR2, USART_CR2_CPHA, ClockPhase); +} + +/** + * @brief Return phase of the clock output on the SCLK pin in synchronous mode + * @note Macro IS_USART_INSTANCE(USARTx) can be used to check whether or not + * Synchronous mode is supported by the USARTx instance. + * @rmtoll CR2 CPHA LL_USART_GetClockPhase + * @param USARTx USART Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_USART_PHASE_1EDGE + * @arg @ref LL_USART_PHASE_2EDGE + */ +__STATIC_INLINE uint32_t LL_USART_GetClockPhase(const USART_TypeDef *USARTx) +{ + return (uint32_t)(READ_BIT(USARTx->CR2, USART_CR2_CPHA)); +} + +/** + * @brief Select the polarity of the clock output on the SCLK pin in synchronous mode + * @note Macro IS_USART_INSTANCE(USARTx) can be used to check whether or not + * Synchronous mode is supported by the USARTx instance. + * @rmtoll CR2 CPOL LL_USART_SetClockPolarity + * @param USARTx USART Instance + * @param ClockPolarity This parameter can be one of the following values: + * @arg @ref LL_USART_POLARITY_LOW + * @arg @ref LL_USART_POLARITY_HIGH + * @retval None + */ +__STATIC_INLINE void LL_USART_SetClockPolarity(USART_TypeDef *USARTx, uint32_t ClockPolarity) +{ + MODIFY_REG(USARTx->CR2, USART_CR2_CPOL, ClockPolarity); +} + +/** + * @brief Return polarity of the clock output on the SCLK pin in synchronous mode + * @note Macro IS_USART_INSTANCE(USARTx) can be used to check whether or not + * Synchronous mode is supported by the USARTx instance. + * @rmtoll CR2 CPOL LL_USART_GetClockPolarity + * @param USARTx USART Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_USART_POLARITY_LOW + * @arg @ref LL_USART_POLARITY_HIGH + */ +__STATIC_INLINE uint32_t LL_USART_GetClockPolarity(const USART_TypeDef *USARTx) +{ + return (uint32_t)(READ_BIT(USARTx->CR2, USART_CR2_CPOL)); +} + +/** + * @brief Configure Clock signal format (Phase Polarity and choice about output of last bit clock pulse) + * @note Macro IS_USART_INSTANCE(USARTx) can be used to check whether or not + * Synchronous mode is supported by the USARTx instance. + * @note Call of this function is equivalent to following function call sequence : + * - Clock Phase configuration using @ref LL_USART_SetClockPhase() function + * - Clock Polarity configuration using @ref LL_USART_SetClockPolarity() function + * - Output of Last bit Clock pulse configuration using @ref LL_USART_SetLastClkPulseOutput() function + * @rmtoll CR2 CPHA LL_USART_ConfigClock\n + * CR2 CPOL LL_USART_ConfigClock\n + * CR2 LBCL LL_USART_ConfigClock + * @param USARTx USART Instance + * @param Phase This parameter can be one of the following values: + * @arg @ref LL_USART_PHASE_1EDGE + * @arg @ref LL_USART_PHASE_2EDGE + * @param Polarity This parameter can be one of the following values: + * @arg @ref LL_USART_POLARITY_LOW + * @arg @ref LL_USART_POLARITY_HIGH + * @param LBCPOutput This parameter can be one of the following values: + * @arg @ref LL_USART_LASTCLKPULSE_NO_OUTPUT + * @arg @ref LL_USART_LASTCLKPULSE_OUTPUT + * @retval None + */ +__STATIC_INLINE void LL_USART_ConfigClock(USART_TypeDef *USARTx, uint32_t Phase, uint32_t Polarity, uint32_t LBCPOutput) +{ + MODIFY_REG(USARTx->CR2, USART_CR2_CPHA | USART_CR2_CPOL | USART_CR2_LBCL, Phase | Polarity | LBCPOutput); +} + +/** + * @brief Enable Clock output on SCLK pin + * @note Macro IS_USART_INSTANCE(USARTx) can be used to check whether or not + * Synchronous mode is supported by the USARTx instance. + * @rmtoll CR2 CLKEN LL_USART_EnableSCLKOutput + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_EnableSCLKOutput(USART_TypeDef *USARTx) +{ + SET_BIT(USARTx->CR2, USART_CR2_CLKEN); +} + +/** + * @brief Disable Clock output on SCLK pin + * @note Macro IS_USART_INSTANCE(USARTx) can be used to check whether or not + * Synchronous mode is supported by the USARTx instance. + * @rmtoll CR2 CLKEN LL_USART_DisableSCLKOutput + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_DisableSCLKOutput(USART_TypeDef *USARTx) +{ + CLEAR_BIT(USARTx->CR2, USART_CR2_CLKEN); +} + +/** + * @brief Indicate if Clock output on SCLK pin is enabled + * @note Macro IS_USART_INSTANCE(USARTx) can be used to check whether or not + * Synchronous mode is supported by the USARTx instance. + * @rmtoll CR2 CLKEN LL_USART_IsEnabledSCLKOutput + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsEnabledSCLKOutput(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->CR2, USART_CR2_CLKEN) == (USART_CR2_CLKEN)); +} + +/** + * @brief Set the length of the stop bits + * @rmtoll CR2 STOP LL_USART_SetStopBitsLength + * @param USARTx USART Instance + * @param StopBits This parameter can be one of the following values: + * @arg @ref LL_USART_STOPBITS_0_5 + * @arg @ref LL_USART_STOPBITS_1 + * @arg @ref LL_USART_STOPBITS_1_5 + * @arg @ref LL_USART_STOPBITS_2 + * @retval None + */ +__STATIC_INLINE void LL_USART_SetStopBitsLength(USART_TypeDef *USARTx, uint32_t StopBits) +{ + MODIFY_REG(USARTx->CR2, USART_CR2_STOP, StopBits); +} + +/** + * @brief Retrieve the length of the stop bits + * @rmtoll CR2 STOP LL_USART_GetStopBitsLength + * @param USARTx USART Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_USART_STOPBITS_0_5 + * @arg @ref LL_USART_STOPBITS_1 + * @arg @ref LL_USART_STOPBITS_1_5 + * @arg @ref LL_USART_STOPBITS_2 + */ +__STATIC_INLINE uint32_t LL_USART_GetStopBitsLength(const USART_TypeDef *USARTx) +{ + return (uint32_t)(READ_BIT(USARTx->CR2, USART_CR2_STOP)); +} + +/** + * @brief Configure Character frame format (Datawidth, Parity control, Stop Bits) + * @note Call of this function is equivalent to following function call sequence : + * - Data Width configuration using @ref LL_USART_SetDataWidth() function + * - Parity Control and mode configuration using @ref LL_USART_SetParity() function + * - Stop bits configuration using @ref LL_USART_SetStopBitsLength() function + * @rmtoll CR1 PS LL_USART_ConfigCharacter\n + * CR1 PCE LL_USART_ConfigCharacter\n + * CR1 M LL_USART_ConfigCharacter\n + * CR2 STOP LL_USART_ConfigCharacter + * @param USARTx USART Instance + * @param DataWidth This parameter can be one of the following values: + * @arg @ref LL_USART_DATAWIDTH_8B + * @arg @ref LL_USART_DATAWIDTH_9B + * @param Parity This parameter can be one of the following values: + * @arg @ref LL_USART_PARITY_NONE + * @arg @ref LL_USART_PARITY_EVEN + * @arg @ref LL_USART_PARITY_ODD + * @param StopBits This parameter can be one of the following values: + * @arg @ref LL_USART_STOPBITS_0_5 + * @arg @ref LL_USART_STOPBITS_1 + * @arg @ref LL_USART_STOPBITS_1_5 + * @arg @ref LL_USART_STOPBITS_2 + * @retval None + */ +__STATIC_INLINE void LL_USART_ConfigCharacter(USART_TypeDef *USARTx, uint32_t DataWidth, uint32_t Parity, + uint32_t StopBits) +{ + MODIFY_REG(USARTx->CR1, USART_CR1_PS | USART_CR1_PCE | USART_CR1_M, Parity | DataWidth); + MODIFY_REG(USARTx->CR2, USART_CR2_STOP, StopBits); +} + +/** + * @brief Set Address of the USART node. + * @note This is used in multiprocessor communication during Mute mode or Stop mode, + * for wake up with address mark detection. + * @rmtoll CR2 ADD LL_USART_SetNodeAddress + * @param USARTx USART Instance + * @param NodeAddress 4 bit Address of the USART node. + * @retval None + */ +__STATIC_INLINE void LL_USART_SetNodeAddress(USART_TypeDef *USARTx, uint32_t NodeAddress) +{ + MODIFY_REG(USARTx->CR2, USART_CR2_ADD, (NodeAddress & USART_CR2_ADD)); +} + +/** + * @brief Return 4 bit Address of the USART node as set in ADD field of CR2. + * @note only 4bits (b3-b0) of returned value are relevant (b31-b4 are not relevant) + * @rmtoll CR2 ADD LL_USART_GetNodeAddress + * @param USARTx USART Instance + * @retval Address of the USART node (Value between Min_Data=0 and Max_Data=255) + */ +__STATIC_INLINE uint32_t LL_USART_GetNodeAddress(const USART_TypeDef *USARTx) +{ + return (uint32_t)(READ_BIT(USARTx->CR2, USART_CR2_ADD)); +} + +/** + * @brief Enable RTS HW Flow Control + * @note Macro IS_UART_HWFLOW_INSTANCE(USARTx) can be used to check whether or not + * Hardware Flow control feature is supported by the USARTx instance. + * @rmtoll CR3 RTSE LL_USART_EnableRTSHWFlowCtrl + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_EnableRTSHWFlowCtrl(USART_TypeDef *USARTx) +{ + SET_BIT(USARTx->CR3, USART_CR3_RTSE); +} + +/** + * @brief Disable RTS HW Flow Control + * @note Macro IS_UART_HWFLOW_INSTANCE(USARTx) can be used to check whether or not + * Hardware Flow control feature is supported by the USARTx instance. + * @rmtoll CR3 RTSE LL_USART_DisableRTSHWFlowCtrl + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_DisableRTSHWFlowCtrl(USART_TypeDef *USARTx) +{ + CLEAR_BIT(USARTx->CR3, USART_CR3_RTSE); +} + +/** + * @brief Enable CTS HW Flow Control + * @note Macro IS_UART_HWFLOW_INSTANCE(USARTx) can be used to check whether or not + * Hardware Flow control feature is supported by the USARTx instance. + * @rmtoll CR3 CTSE LL_USART_EnableCTSHWFlowCtrl + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_EnableCTSHWFlowCtrl(USART_TypeDef *USARTx) +{ + SET_BIT(USARTx->CR3, USART_CR3_CTSE); +} + +/** + * @brief Disable CTS HW Flow Control + * @note Macro IS_UART_HWFLOW_INSTANCE(USARTx) can be used to check whether or not + * Hardware Flow control feature is supported by the USARTx instance. + * @rmtoll CR3 CTSE LL_USART_DisableCTSHWFlowCtrl + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_DisableCTSHWFlowCtrl(USART_TypeDef *USARTx) +{ + CLEAR_BIT(USARTx->CR3, USART_CR3_CTSE); +} + +/** + * @brief Configure HW Flow Control mode (both CTS and RTS) + * @note Macro IS_UART_HWFLOW_INSTANCE(USARTx) can be used to check whether or not + * Hardware Flow control feature is supported by the USARTx instance. + * @rmtoll CR3 RTSE LL_USART_SetHWFlowCtrl\n + * CR3 CTSE LL_USART_SetHWFlowCtrl + * @param USARTx USART Instance + * @param HardwareFlowControl This parameter can be one of the following values: + * @arg @ref LL_USART_HWCONTROL_NONE + * @arg @ref LL_USART_HWCONTROL_RTS + * @arg @ref LL_USART_HWCONTROL_CTS + * @arg @ref LL_USART_HWCONTROL_RTS_CTS + * @retval None + */ +__STATIC_INLINE void LL_USART_SetHWFlowCtrl(USART_TypeDef *USARTx, uint32_t HardwareFlowControl) +{ + MODIFY_REG(USARTx->CR3, USART_CR3_RTSE | USART_CR3_CTSE, HardwareFlowControl); +} + +/** + * @brief Return HW Flow Control configuration (both CTS and RTS) + * @note Macro IS_UART_HWFLOW_INSTANCE(USARTx) can be used to check whether or not + * Hardware Flow control feature is supported by the USARTx instance. + * @rmtoll CR3 RTSE LL_USART_GetHWFlowCtrl\n + * CR3 CTSE LL_USART_GetHWFlowCtrl + * @param USARTx USART Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_USART_HWCONTROL_NONE + * @arg @ref LL_USART_HWCONTROL_RTS + * @arg @ref LL_USART_HWCONTROL_CTS + * @arg @ref LL_USART_HWCONTROL_RTS_CTS + */ +__STATIC_INLINE uint32_t LL_USART_GetHWFlowCtrl(const USART_TypeDef *USARTx) +{ + return (uint32_t)(READ_BIT(USARTx->CR3, USART_CR3_RTSE | USART_CR3_CTSE)); +} + +/** + * @brief Enable One bit sampling method + * @rmtoll CR3 ONEBIT LL_USART_EnableOneBitSamp + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_EnableOneBitSamp(USART_TypeDef *USARTx) +{ + SET_BIT(USARTx->CR3, USART_CR3_ONEBIT); +} + +/** + * @brief Disable One bit sampling method + * @rmtoll CR3 ONEBIT LL_USART_DisableOneBitSamp + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_DisableOneBitSamp(USART_TypeDef *USARTx) +{ + CLEAR_BIT(USARTx->CR3, USART_CR3_ONEBIT); +} + +/** + * @brief Indicate if One bit sampling method is enabled + * @rmtoll CR3 ONEBIT LL_USART_IsEnabledOneBitSamp + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsEnabledOneBitSamp(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->CR3, USART_CR3_ONEBIT) == (USART_CR3_ONEBIT)); +} + +/** + * @brief Configure USART BRR register for achieving expected Baud Rate value. + * @note Compute and set USARTDIV value in BRR Register (full BRR content) + * according to used Peripheral Clock, Oversampling mode, and expected Baud Rate values + * @note Peripheral clock and Baud rate values provided as function parameters should be valid + * (Baud rate value != 0) + * @rmtoll BRR BRR LL_USART_SetBaudRate + * @param USARTx USART Instance + * @param PeriphClk Peripheral Clock + * @param OverSampling This parameter can be one of the following values: + * @arg @ref LL_USART_OVERSAMPLING_16 + * @arg @ref LL_USART_OVERSAMPLING_8 + * @param BaudRate Baud Rate + * @retval None + */ +__STATIC_INLINE void LL_USART_SetBaudRate(USART_TypeDef *USARTx, uint32_t PeriphClk, uint32_t OverSampling, + uint32_t BaudRate) +{ + if (OverSampling == LL_USART_OVERSAMPLING_8) + { + USARTx->BRR = (uint16_t)(__LL_USART_DIV_SAMPLING8(PeriphClk, BaudRate)); + } + else + { + USARTx->BRR = (uint16_t)(__LL_USART_DIV_SAMPLING16(PeriphClk, BaudRate)); + } +} + +/** + * @brief Return current Baud Rate value, according to USARTDIV present in BRR register + * (full BRR content), and to used Peripheral Clock and Oversampling mode values + * @note In case of non-initialized or invalid value stored in BRR register, value 0 will be returned. + * @rmtoll BRR BRR LL_USART_GetBaudRate + * @param USARTx USART Instance + * @param PeriphClk Peripheral Clock + * @param OverSampling This parameter can be one of the following values: + * @arg @ref LL_USART_OVERSAMPLING_16 + * @arg @ref LL_USART_OVERSAMPLING_8 + * @retval Baud Rate + */ +__STATIC_INLINE uint32_t LL_USART_GetBaudRate(const USART_TypeDef *USARTx, uint32_t PeriphClk, uint32_t OverSampling) +{ + uint32_t usartdiv = 0x0U; + uint32_t brrresult = 0x0U; + + usartdiv = USARTx->BRR; + + if (OverSampling == LL_USART_OVERSAMPLING_8) + { + if ((usartdiv & 0xFFF7U) != 0U) + { + usartdiv = (uint16_t)((usartdiv & 0xFFF0U) | ((usartdiv & 0x0007U) << 1U)) ; + brrresult = (PeriphClk * 2U) / usartdiv; + } + } + else + { + if ((usartdiv & 0xFFFFU) != 0U) + { + brrresult = PeriphClk / usartdiv; + } + } + return (brrresult); +} + +/** + * @} + */ + +/** @defgroup USART_LL_EF_Configuration_IRDA Configuration functions related to Irda feature + * @{ + */ + +/** + * @brief Enable IrDA mode + * @note Macro IS_IRDA_INSTANCE(USARTx) can be used to check whether or not + * IrDA feature is supported by the USARTx instance. + * @rmtoll CR3 IREN LL_USART_EnableIrda + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_EnableIrda(USART_TypeDef *USARTx) +{ + SET_BIT(USARTx->CR3, USART_CR3_IREN); +} + +/** + * @brief Disable IrDA mode + * @note Macro IS_IRDA_INSTANCE(USARTx) can be used to check whether or not + * IrDA feature is supported by the USARTx instance. + * @rmtoll CR3 IREN LL_USART_DisableIrda + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_DisableIrda(USART_TypeDef *USARTx) +{ + CLEAR_BIT(USARTx->CR3, USART_CR3_IREN); +} + +/** + * @brief Indicate if IrDA mode is enabled + * @note Macro IS_IRDA_INSTANCE(USARTx) can be used to check whether or not + * IrDA feature is supported by the USARTx instance. + * @rmtoll CR3 IREN LL_USART_IsEnabledIrda + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsEnabledIrda(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->CR3, USART_CR3_IREN) == (USART_CR3_IREN)); +} + +/** + * @brief Configure IrDA Power Mode (Normal or Low Power) + * @note Macro IS_IRDA_INSTANCE(USARTx) can be used to check whether or not + * IrDA feature is supported by the USARTx instance. + * @rmtoll CR3 IRLP LL_USART_SetIrdaPowerMode + * @param USARTx USART Instance + * @param PowerMode This parameter can be one of the following values: + * @arg @ref LL_USART_IRDA_POWER_NORMAL + * @arg @ref LL_USART_IRDA_POWER_LOW + * @retval None + */ +__STATIC_INLINE void LL_USART_SetIrdaPowerMode(USART_TypeDef *USARTx, uint32_t PowerMode) +{ + MODIFY_REG(USARTx->CR3, USART_CR3_IRLP, PowerMode); +} + +/** + * @brief Retrieve IrDA Power Mode configuration (Normal or Low Power) + * @note Macro IS_IRDA_INSTANCE(USARTx) can be used to check whether or not + * IrDA feature is supported by the USARTx instance. + * @rmtoll CR3 IRLP LL_USART_GetIrdaPowerMode + * @param USARTx USART Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_USART_IRDA_POWER_NORMAL + * @arg @ref LL_USART_PHASE_2EDGE + */ +__STATIC_INLINE uint32_t LL_USART_GetIrdaPowerMode(const USART_TypeDef *USARTx) +{ + return (uint32_t)(READ_BIT(USARTx->CR3, USART_CR3_IRLP)); +} + +/** + * @brief Set Irda prescaler value, used for dividing the USART clock source + * to achieve the Irda Low Power frequency (8 bits value) + * @note Macro IS_IRDA_INSTANCE(USARTx) can be used to check whether or not + * IrDA feature is supported by the USARTx instance. + * @rmtoll GTPR PSC LL_USART_SetIrdaPrescaler + * @param USARTx USART Instance + * @param PrescalerValue Value between Min_Data=0x00 and Max_Data=0xFF + * @retval None + */ +__STATIC_INLINE void LL_USART_SetIrdaPrescaler(USART_TypeDef *USARTx, uint32_t PrescalerValue) +{ + MODIFY_REG(USARTx->GTPR, USART_GTPR_PSC, PrescalerValue); +} + +/** + * @brief Return Irda prescaler value, used for dividing the USART clock source + * to achieve the Irda Low Power frequency (8 bits value) + * @note Macro IS_IRDA_INSTANCE(USARTx) can be used to check whether or not + * IrDA feature is supported by the USARTx instance. + * @rmtoll GTPR PSC LL_USART_GetIrdaPrescaler + * @param USARTx USART Instance + * @retval Irda prescaler value (Value between Min_Data=0x00 and Max_Data=0xFF) + */ +__STATIC_INLINE uint32_t LL_USART_GetIrdaPrescaler(const USART_TypeDef *USARTx) +{ + return (uint32_t)(READ_BIT(USARTx->GTPR, USART_GTPR_PSC)); +} + +/** + * @} + */ + +/** @defgroup USART_LL_EF_Configuration_Smartcard Configuration functions related to Smartcard feature + * @{ + */ + +/** + * @brief Enable Smartcard NACK transmission + * @note Macro IS_SMARTCARD_INSTANCE(USARTx) can be used to check whether or not + * Smartcard feature is supported by the USARTx instance. + * @rmtoll CR3 NACK LL_USART_EnableSmartcardNACK + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_EnableSmartcardNACK(USART_TypeDef *USARTx) +{ + SET_BIT(USARTx->CR3, USART_CR3_NACK); +} + +/** + * @brief Disable Smartcard NACK transmission + * @note Macro IS_SMARTCARD_INSTANCE(USARTx) can be used to check whether or not + * Smartcard feature is supported by the USARTx instance. + * @rmtoll CR3 NACK LL_USART_DisableSmartcardNACK + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_DisableSmartcardNACK(USART_TypeDef *USARTx) +{ + CLEAR_BIT(USARTx->CR3, USART_CR3_NACK); +} + +/** + * @brief Indicate if Smartcard NACK transmission is enabled + * @note Macro IS_SMARTCARD_INSTANCE(USARTx) can be used to check whether or not + * Smartcard feature is supported by the USARTx instance. + * @rmtoll CR3 NACK LL_USART_IsEnabledSmartcardNACK + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsEnabledSmartcardNACK(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->CR3, USART_CR3_NACK) == (USART_CR3_NACK)); +} + +/** + * @brief Enable Smartcard mode + * @note Macro IS_SMARTCARD_INSTANCE(USARTx) can be used to check whether or not + * Smartcard feature is supported by the USARTx instance. + * @rmtoll CR3 SCEN LL_USART_EnableSmartcard + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_EnableSmartcard(USART_TypeDef *USARTx) +{ + SET_BIT(USARTx->CR3, USART_CR3_SCEN); +} + +/** + * @brief Disable Smartcard mode + * @note Macro IS_SMARTCARD_INSTANCE(USARTx) can be used to check whether or not + * Smartcard feature is supported by the USARTx instance. + * @rmtoll CR3 SCEN LL_USART_DisableSmartcard + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_DisableSmartcard(USART_TypeDef *USARTx) +{ + CLEAR_BIT(USARTx->CR3, USART_CR3_SCEN); +} + +/** + * @brief Indicate if Smartcard mode is enabled + * @note Macro IS_SMARTCARD_INSTANCE(USARTx) can be used to check whether or not + * Smartcard feature is supported by the USARTx instance. + * @rmtoll CR3 SCEN LL_USART_IsEnabledSmartcard + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsEnabledSmartcard(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->CR3, USART_CR3_SCEN) == (USART_CR3_SCEN)); +} + +/** + * @brief Set Smartcard prescaler value, used for dividing the USART clock + * source to provide the SMARTCARD Clock (5 bits value) + * @note Macro IS_SMARTCARD_INSTANCE(USARTx) can be used to check whether or not + * Smartcard feature is supported by the USARTx instance. + * @rmtoll GTPR PSC LL_USART_SetSmartcardPrescaler + * @param USARTx USART Instance + * @param PrescalerValue Value between Min_Data=0 and Max_Data=31 + * @retval None + */ +__STATIC_INLINE void LL_USART_SetSmartcardPrescaler(USART_TypeDef *USARTx, uint32_t PrescalerValue) +{ + MODIFY_REG(USARTx->GTPR, USART_GTPR_PSC, PrescalerValue); +} + +/** + * @brief Return Smartcard prescaler value, used for dividing the USART clock + * source to provide the SMARTCARD Clock (5 bits value) + * @note Macro IS_SMARTCARD_INSTANCE(USARTx) can be used to check whether or not + * Smartcard feature is supported by the USARTx instance. + * @rmtoll GTPR PSC LL_USART_GetSmartcardPrescaler + * @param USARTx USART Instance + * @retval Smartcard prescaler value (Value between Min_Data=0 and Max_Data=31) + */ +__STATIC_INLINE uint32_t LL_USART_GetSmartcardPrescaler(const USART_TypeDef *USARTx) +{ + return (uint32_t)(READ_BIT(USARTx->GTPR, USART_GTPR_PSC)); +} + +/** + * @brief Set Smartcard Guard time value, expressed in nb of baud clocks periods + * (GT[7:0] bits : Guard time value) + * @note Macro IS_SMARTCARD_INSTANCE(USARTx) can be used to check whether or not + * Smartcard feature is supported by the USARTx instance. + * @rmtoll GTPR GT LL_USART_SetSmartcardGuardTime + * @param USARTx USART Instance + * @param GuardTime Value between Min_Data=0x00 and Max_Data=0xFF + * @retval None + */ +__STATIC_INLINE void LL_USART_SetSmartcardGuardTime(USART_TypeDef *USARTx, uint32_t GuardTime) +{ + MODIFY_REG(USARTx->GTPR, USART_GTPR_GT, GuardTime << USART_POSITION_GTPR_GT); +} + +/** + * @brief Return Smartcard Guard time value, expressed in nb of baud clocks periods + * (GT[7:0] bits : Guard time value) + * @note Macro IS_SMARTCARD_INSTANCE(USARTx) can be used to check whether or not + * Smartcard feature is supported by the USARTx instance. + * @rmtoll GTPR GT LL_USART_GetSmartcardGuardTime + * @param USARTx USART Instance + * @retval Smartcard Guard time value (Value between Min_Data=0x00 and Max_Data=0xFF) + */ +__STATIC_INLINE uint32_t LL_USART_GetSmartcardGuardTime(const USART_TypeDef *USARTx) +{ + return (uint32_t)(READ_BIT(USARTx->GTPR, USART_GTPR_GT) >> USART_POSITION_GTPR_GT); +} + +/** + * @} + */ + +/** @defgroup USART_LL_EF_Configuration_HalfDuplex Configuration functions related to Half Duplex feature + * @{ + */ + +/** + * @brief Enable Single Wire Half-Duplex mode + * @note Macro IS_UART_HALFDUPLEX_INSTANCE(USARTx) can be used to check whether or not + * Half-Duplex mode is supported by the USARTx instance. + * @rmtoll CR3 HDSEL LL_USART_EnableHalfDuplex + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_EnableHalfDuplex(USART_TypeDef *USARTx) +{ + SET_BIT(USARTx->CR3, USART_CR3_HDSEL); +} + +/** + * @brief Disable Single Wire Half-Duplex mode + * @note Macro IS_UART_HALFDUPLEX_INSTANCE(USARTx) can be used to check whether or not + * Half-Duplex mode is supported by the USARTx instance. + * @rmtoll CR3 HDSEL LL_USART_DisableHalfDuplex + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_DisableHalfDuplex(USART_TypeDef *USARTx) +{ + CLEAR_BIT(USARTx->CR3, USART_CR3_HDSEL); +} + +/** + * @brief Indicate if Single Wire Half-Duplex mode is enabled + * @note Macro IS_UART_HALFDUPLEX_INSTANCE(USARTx) can be used to check whether or not + * Half-Duplex mode is supported by the USARTx instance. + * @rmtoll CR3 HDSEL LL_USART_IsEnabledHalfDuplex + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsEnabledHalfDuplex(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->CR3, USART_CR3_HDSEL) == (USART_CR3_HDSEL)); +} + +/** + * @} + */ + +/** @defgroup USART_LL_EF_Configuration_LIN Configuration functions related to LIN feature + * @{ + */ + +/** + * @brief Set LIN Break Detection Length + * @note Macro IS_UART_LIN_INSTANCE(USARTx) can be used to check whether or not + * LIN feature is supported by the USARTx instance. + * @rmtoll CR2 LBDL LL_USART_SetLINBrkDetectionLen + * @param USARTx USART Instance + * @param LINBDLength This parameter can be one of the following values: + * @arg @ref LL_USART_LINBREAK_DETECT_10B + * @arg @ref LL_USART_LINBREAK_DETECT_11B + * @retval None + */ +__STATIC_INLINE void LL_USART_SetLINBrkDetectionLen(USART_TypeDef *USARTx, uint32_t LINBDLength) +{ + MODIFY_REG(USARTx->CR2, USART_CR2_LBDL, LINBDLength); +} + +/** + * @brief Return LIN Break Detection Length + * @note Macro IS_UART_LIN_INSTANCE(USARTx) can be used to check whether or not + * LIN feature is supported by the USARTx instance. + * @rmtoll CR2 LBDL LL_USART_GetLINBrkDetectionLen + * @param USARTx USART Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_USART_LINBREAK_DETECT_10B + * @arg @ref LL_USART_LINBREAK_DETECT_11B + */ +__STATIC_INLINE uint32_t LL_USART_GetLINBrkDetectionLen(const USART_TypeDef *USARTx) +{ + return (uint32_t)(READ_BIT(USARTx->CR2, USART_CR2_LBDL)); +} + +/** + * @brief Enable LIN mode + * @note Macro IS_UART_LIN_INSTANCE(USARTx) can be used to check whether or not + * LIN feature is supported by the USARTx instance. + * @rmtoll CR2 LINEN LL_USART_EnableLIN + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_EnableLIN(USART_TypeDef *USARTx) +{ + SET_BIT(USARTx->CR2, USART_CR2_LINEN); +} + +/** + * @brief Disable LIN mode + * @note Macro IS_UART_LIN_INSTANCE(USARTx) can be used to check whether or not + * LIN feature is supported by the USARTx instance. + * @rmtoll CR2 LINEN LL_USART_DisableLIN + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_DisableLIN(USART_TypeDef *USARTx) +{ + CLEAR_BIT(USARTx->CR2, USART_CR2_LINEN); +} + +/** + * @brief Indicate if LIN mode is enabled + * @note Macro IS_UART_LIN_INSTANCE(USARTx) can be used to check whether or not + * LIN feature is supported by the USARTx instance. + * @rmtoll CR2 LINEN LL_USART_IsEnabledLIN + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsEnabledLIN(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->CR2, USART_CR2_LINEN) == (USART_CR2_LINEN)); +} + +/** + * @} + */ + +/** @defgroup USART_LL_EF_AdvancedConfiguration Advanced Configurations services + * @{ + */ + +/** + * @brief Perform basic configuration of USART for enabling use in Asynchronous Mode (UART) + * @note In UART mode, the following bits must be kept cleared: + * - LINEN bit in the USART_CR2 register, + * - CLKEN bit in the USART_CR2 register, + * - SCEN bit in the USART_CR3 register, + * - IREN bit in the USART_CR3 register, + * - HDSEL bit in the USART_CR3 register. + * @note Call of this function is equivalent to following function call sequence : + * - Clear LINEN in CR2 using @ref LL_USART_DisableLIN() function + * - Clear CLKEN in CR2 using @ref LL_USART_DisableSCLKOutput() function + * - Clear SCEN in CR3 using @ref LL_USART_DisableSmartcard() function + * - Clear IREN in CR3 using @ref LL_USART_DisableIrda() function + * - Clear HDSEL in CR3 using @ref LL_USART_DisableHalfDuplex() function + * @note Other remaining configurations items related to Asynchronous Mode + * (as Baud Rate, Word length, Parity, ...) should be set using + * dedicated functions + * @rmtoll CR2 LINEN LL_USART_ConfigAsyncMode\n + * CR2 CLKEN LL_USART_ConfigAsyncMode\n + * CR3 SCEN LL_USART_ConfigAsyncMode\n + * CR3 IREN LL_USART_ConfigAsyncMode\n + * CR3 HDSEL LL_USART_ConfigAsyncMode + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_ConfigAsyncMode(USART_TypeDef *USARTx) +{ + /* In Asynchronous mode, the following bits must be kept cleared: + - LINEN, CLKEN bits in the USART_CR2 register, + - SCEN, IREN and HDSEL bits in the USART_CR3 register.*/ + CLEAR_BIT(USARTx->CR2, (USART_CR2_LINEN | USART_CR2_CLKEN)); + CLEAR_BIT(USARTx->CR3, (USART_CR3_SCEN | USART_CR3_IREN | USART_CR3_HDSEL)); +} + +/** + * @brief Perform basic configuration of USART for enabling use in Synchronous Mode + * @note In Synchronous mode, the following bits must be kept cleared: + * - LINEN bit in the USART_CR2 register, + * - SCEN bit in the USART_CR3 register, + * - IREN bit in the USART_CR3 register, + * - HDSEL bit in the USART_CR3 register. + * This function also sets the USART in Synchronous mode. + * @note Macro IS_USART_INSTANCE(USARTx) can be used to check whether or not + * Synchronous mode is supported by the USARTx instance. + * @note Call of this function is equivalent to following function call sequence : + * - Clear LINEN in CR2 using @ref LL_USART_DisableLIN() function + * - Clear IREN in CR3 using @ref LL_USART_DisableIrda() function + * - Clear SCEN in CR3 using @ref LL_USART_DisableSmartcard() function + * - Clear HDSEL in CR3 using @ref LL_USART_DisableHalfDuplex() function + * - Set CLKEN in CR2 using @ref LL_USART_EnableSCLKOutput() function + * @note Other remaining configurations items related to Synchronous Mode + * (as Baud Rate, Word length, Parity, Clock Polarity, ...) should be set using + * dedicated functions + * @rmtoll CR2 LINEN LL_USART_ConfigSyncMode\n + * CR2 CLKEN LL_USART_ConfigSyncMode\n + * CR3 SCEN LL_USART_ConfigSyncMode\n + * CR3 IREN LL_USART_ConfigSyncMode\n + * CR3 HDSEL LL_USART_ConfigSyncMode + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_ConfigSyncMode(USART_TypeDef *USARTx) +{ + /* In Synchronous mode, the following bits must be kept cleared: + - LINEN bit in the USART_CR2 register, + - SCEN, IREN and HDSEL bits in the USART_CR3 register.*/ + CLEAR_BIT(USARTx->CR2, (USART_CR2_LINEN)); + CLEAR_BIT(USARTx->CR3, (USART_CR3_SCEN | USART_CR3_IREN | USART_CR3_HDSEL)); + /* set the UART/USART in Synchronous mode */ + SET_BIT(USARTx->CR2, USART_CR2_CLKEN); +} + +/** + * @brief Perform basic configuration of USART for enabling use in LIN Mode + * @note In LIN mode, the following bits must be kept cleared: + * - STOP and CLKEN bits in the USART_CR2 register, + * - SCEN bit in the USART_CR3 register, + * - IREN bit in the USART_CR3 register, + * - HDSEL bit in the USART_CR3 register. + * This function also set the UART/USART in LIN mode. + * @note Macro IS_UART_LIN_INSTANCE(USARTx) can be used to check whether or not + * LIN feature is supported by the USARTx instance. + * @note Call of this function is equivalent to following function call sequence : + * - Clear CLKEN in CR2 using @ref LL_USART_DisableSCLKOutput() function + * - Clear STOP in CR2 using @ref LL_USART_SetStopBitsLength() function + * - Clear SCEN in CR3 using @ref LL_USART_DisableSmartcard() function + * - Clear IREN in CR3 using @ref LL_USART_DisableIrda() function + * - Clear HDSEL in CR3 using @ref LL_USART_DisableHalfDuplex() function + * - Set LINEN in CR2 using @ref LL_USART_EnableLIN() function + * @note Other remaining configurations items related to LIN Mode + * (as Baud Rate, Word length, LIN Break Detection Length, ...) should be set using + * dedicated functions + * @rmtoll CR2 CLKEN LL_USART_ConfigLINMode\n + * CR2 STOP LL_USART_ConfigLINMode\n + * CR2 LINEN LL_USART_ConfigLINMode\n + * CR3 IREN LL_USART_ConfigLINMode\n + * CR3 SCEN LL_USART_ConfigLINMode\n + * CR3 HDSEL LL_USART_ConfigLINMode + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_ConfigLINMode(USART_TypeDef *USARTx) +{ + /* In LIN mode, the following bits must be kept cleared: + - STOP and CLKEN bits in the USART_CR2 register, + - IREN, SCEN and HDSEL bits in the USART_CR3 register.*/ + CLEAR_BIT(USARTx->CR2, (USART_CR2_CLKEN | USART_CR2_STOP)); + CLEAR_BIT(USARTx->CR3, (USART_CR3_IREN | USART_CR3_SCEN | USART_CR3_HDSEL)); + /* Set the UART/USART in LIN mode */ + SET_BIT(USARTx->CR2, USART_CR2_LINEN); +} + +/** + * @brief Perform basic configuration of USART for enabling use in Half Duplex Mode + * @note In Half Duplex mode, the following bits must be kept cleared: + * - LINEN bit in the USART_CR2 register, + * - CLKEN bit in the USART_CR2 register, + * - SCEN bit in the USART_CR3 register, + * - IREN bit in the USART_CR3 register, + * This function also sets the UART/USART in Half Duplex mode. + * @note Macro IS_UART_HALFDUPLEX_INSTANCE(USARTx) can be used to check whether or not + * Half-Duplex mode is supported by the USARTx instance. + * @note Call of this function is equivalent to following function call sequence : + * - Clear LINEN in CR2 using @ref LL_USART_DisableLIN() function + * - Clear CLKEN in CR2 using @ref LL_USART_DisableSCLKOutput() function + * - Clear SCEN in CR3 using @ref LL_USART_DisableSmartcard() function + * - Clear IREN in CR3 using @ref LL_USART_DisableIrda() function + * - Set HDSEL in CR3 using @ref LL_USART_EnableHalfDuplex() function + * @note Other remaining configurations items related to Half Duplex Mode + * (as Baud Rate, Word length, Parity, ...) should be set using + * dedicated functions + * @rmtoll CR2 LINEN LL_USART_ConfigHalfDuplexMode\n + * CR2 CLKEN LL_USART_ConfigHalfDuplexMode\n + * CR3 HDSEL LL_USART_ConfigHalfDuplexMode\n + * CR3 SCEN LL_USART_ConfigHalfDuplexMode\n + * CR3 IREN LL_USART_ConfigHalfDuplexMode + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_ConfigHalfDuplexMode(USART_TypeDef *USARTx) +{ + /* In Half Duplex mode, the following bits must be kept cleared: + - LINEN and CLKEN bits in the USART_CR2 register, + - SCEN and IREN bits in the USART_CR3 register.*/ + CLEAR_BIT(USARTx->CR2, (USART_CR2_LINEN | USART_CR2_CLKEN)); + CLEAR_BIT(USARTx->CR3, (USART_CR3_SCEN | USART_CR3_IREN)); + /* set the UART/USART in Half Duplex mode */ + SET_BIT(USARTx->CR3, USART_CR3_HDSEL); +} + +/** + * @brief Perform basic configuration of USART for enabling use in Smartcard Mode + * @note In Smartcard mode, the following bits must be kept cleared: + * - LINEN bit in the USART_CR2 register, + * - IREN bit in the USART_CR3 register, + * - HDSEL bit in the USART_CR3 register. + * This function also configures Stop bits to 1.5 bits and + * sets the USART in Smartcard mode (SCEN bit). + * Clock Output is also enabled (CLKEN). + * @note Macro IS_SMARTCARD_INSTANCE(USARTx) can be used to check whether or not + * Smartcard feature is supported by the USARTx instance. + * @note Call of this function is equivalent to following function call sequence : + * - Clear LINEN in CR2 using @ref LL_USART_DisableLIN() function + * - Clear IREN in CR3 using @ref LL_USART_DisableIrda() function + * - Clear HDSEL in CR3 using @ref LL_USART_DisableHalfDuplex() function + * - Configure STOP in CR2 using @ref LL_USART_SetStopBitsLength() function + * - Set CLKEN in CR2 using @ref LL_USART_EnableSCLKOutput() function + * - Set SCEN in CR3 using @ref LL_USART_EnableSmartcard() function + * @note Other remaining configurations items related to Smartcard Mode + * (as Baud Rate, Word length, Parity, ...) should be set using + * dedicated functions + * @rmtoll CR2 LINEN LL_USART_ConfigSmartcardMode\n + * CR2 STOP LL_USART_ConfigSmartcardMode\n + * CR2 CLKEN LL_USART_ConfigSmartcardMode\n + * CR3 HDSEL LL_USART_ConfigSmartcardMode\n + * CR3 SCEN LL_USART_ConfigSmartcardMode + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_ConfigSmartcardMode(USART_TypeDef *USARTx) +{ + /* In Smartcard mode, the following bits must be kept cleared: + - LINEN bit in the USART_CR2 register, + - IREN and HDSEL bits in the USART_CR3 register.*/ + CLEAR_BIT(USARTx->CR2, (USART_CR2_LINEN)); + CLEAR_BIT(USARTx->CR3, (USART_CR3_IREN | USART_CR3_HDSEL)); + /* Configure Stop bits to 1.5 bits */ + /* Synchronous mode is activated by default */ + SET_BIT(USARTx->CR2, (USART_CR2_STOP_0 | USART_CR2_STOP_1 | USART_CR2_CLKEN)); + /* set the UART/USART in Smartcard mode */ + SET_BIT(USARTx->CR3, USART_CR3_SCEN); +} + +/** + * @brief Perform basic configuration of USART for enabling use in Irda Mode + * @note In IRDA mode, the following bits must be kept cleared: + * - LINEN bit in the USART_CR2 register, + * - STOP and CLKEN bits in the USART_CR2 register, + * - SCEN bit in the USART_CR3 register, + * - HDSEL bit in the USART_CR3 register. + * This function also sets the UART/USART in IRDA mode (IREN bit). + * @note Macro IS_IRDA_INSTANCE(USARTx) can be used to check whether or not + * IrDA feature is supported by the USARTx instance. + * @note Call of this function is equivalent to following function call sequence : + * - Clear LINEN in CR2 using @ref LL_USART_DisableLIN() function + * - Clear CLKEN in CR2 using @ref LL_USART_DisableSCLKOutput() function + * - Clear SCEN in CR3 using @ref LL_USART_DisableSmartcard() function + * - Clear HDSEL in CR3 using @ref LL_USART_DisableHalfDuplex() function + * - Configure STOP in CR2 using @ref LL_USART_SetStopBitsLength() function + * - Set IREN in CR3 using @ref LL_USART_EnableIrda() function + * @note Other remaining configurations items related to Irda Mode + * (as Baud Rate, Word length, Power mode, ...) should be set using + * dedicated functions + * @rmtoll CR2 LINEN LL_USART_ConfigIrdaMode\n + * CR2 CLKEN LL_USART_ConfigIrdaMode\n + * CR2 STOP LL_USART_ConfigIrdaMode\n + * CR3 SCEN LL_USART_ConfigIrdaMode\n + * CR3 HDSEL LL_USART_ConfigIrdaMode\n + * CR3 IREN LL_USART_ConfigIrdaMode + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_ConfigIrdaMode(USART_TypeDef *USARTx) +{ + /* In IRDA mode, the following bits must be kept cleared: + - LINEN, STOP and CLKEN bits in the USART_CR2 register, + - SCEN and HDSEL bits in the USART_CR3 register.*/ + CLEAR_BIT(USARTx->CR2, (USART_CR2_LINEN | USART_CR2_CLKEN | USART_CR2_STOP)); + CLEAR_BIT(USARTx->CR3, (USART_CR3_SCEN | USART_CR3_HDSEL)); + /* set the UART/USART in IRDA mode */ + SET_BIT(USARTx->CR3, USART_CR3_IREN); +} + +/** + * @brief Perform basic configuration of USART for enabling use in Multi processor Mode + * (several USARTs connected in a network, one of the USARTs can be the master, + * its TX output connected to the RX inputs of the other slaves USARTs). + * @note In MultiProcessor mode, the following bits must be kept cleared: + * - LINEN bit in the USART_CR2 register, + * - CLKEN bit in the USART_CR2 register, + * - SCEN bit in the USART_CR3 register, + * - IREN bit in the USART_CR3 register, + * - HDSEL bit in the USART_CR3 register. + * @note Call of this function is equivalent to following function call sequence : + * - Clear LINEN in CR2 using @ref LL_USART_DisableLIN() function + * - Clear CLKEN in CR2 using @ref LL_USART_DisableSCLKOutput() function + * - Clear SCEN in CR3 using @ref LL_USART_DisableSmartcard() function + * - Clear IREN in CR3 using @ref LL_USART_DisableIrda() function + * - Clear HDSEL in CR3 using @ref LL_USART_DisableHalfDuplex() function + * @note Other remaining configurations items related to Multi processor Mode + * (as Baud Rate, Wake Up Method, Node address, ...) should be set using + * dedicated functions + * @rmtoll CR2 LINEN LL_USART_ConfigMultiProcessMode\n + * CR2 CLKEN LL_USART_ConfigMultiProcessMode\n + * CR3 SCEN LL_USART_ConfigMultiProcessMode\n + * CR3 HDSEL LL_USART_ConfigMultiProcessMode\n + * CR3 IREN LL_USART_ConfigMultiProcessMode + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_ConfigMultiProcessMode(USART_TypeDef *USARTx) +{ + /* In Multi Processor mode, the following bits must be kept cleared: + - LINEN and CLKEN bits in the USART_CR2 register, + - IREN, SCEN and HDSEL bits in the USART_CR3 register.*/ + CLEAR_BIT(USARTx->CR2, (USART_CR2_LINEN | USART_CR2_CLKEN)); + CLEAR_BIT(USARTx->CR3, (USART_CR3_SCEN | USART_CR3_HDSEL | USART_CR3_IREN)); +} + +/** + * @} + */ + +/** @defgroup USART_LL_EF_FLAG_Management FLAG_Management + * @{ + */ + +/** + * @brief Check if the USART Parity Error Flag is set or not + * @rmtoll SR PE LL_USART_IsActiveFlag_PE + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsActiveFlag_PE(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->SR, USART_SR_PE) == (USART_SR_PE)); +} + +/** + * @brief Check if the USART Framing Error Flag is set or not + * @rmtoll SR FE LL_USART_IsActiveFlag_FE + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsActiveFlag_FE(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->SR, USART_SR_FE) == (USART_SR_FE)); +} + +/** + * @brief Check if the USART Noise error detected Flag is set or not + * @rmtoll SR NF LL_USART_IsActiveFlag_NE + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsActiveFlag_NE(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->SR, USART_SR_NE) == (USART_SR_NE)); +} + +/** + * @brief Check if the USART OverRun Error Flag is set or not + * @rmtoll SR ORE LL_USART_IsActiveFlag_ORE + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsActiveFlag_ORE(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->SR, USART_SR_ORE) == (USART_SR_ORE)); +} + +/** + * @brief Check if the USART IDLE line detected Flag is set or not + * @rmtoll SR IDLE LL_USART_IsActiveFlag_IDLE + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsActiveFlag_IDLE(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->SR, USART_SR_IDLE) == (USART_SR_IDLE)); +} + +/** + * @brief Check if the USART Read Data Register Not Empty Flag is set or not + * @rmtoll SR RXNE LL_USART_IsActiveFlag_RXNE + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsActiveFlag_RXNE(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->SR, USART_SR_RXNE) == (USART_SR_RXNE)); +} + +/** + * @brief Check if the USART Transmission Complete Flag is set or not + * @rmtoll SR TC LL_USART_IsActiveFlag_TC + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsActiveFlag_TC(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->SR, USART_SR_TC) == (USART_SR_TC)); +} + +/** + * @brief Check if the USART Transmit Data Register Empty Flag is set or not + * @rmtoll SR TXE LL_USART_IsActiveFlag_TXE + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsActiveFlag_TXE(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->SR, USART_SR_TXE) == (USART_SR_TXE)); +} + +/** + * @brief Check if the USART LIN Break Detection Flag is set or not + * @note Macro IS_UART_LIN_INSTANCE(USARTx) can be used to check whether or not + * LIN feature is supported by the USARTx instance. + * @rmtoll SR LBD LL_USART_IsActiveFlag_LBD + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsActiveFlag_LBD(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->SR, USART_SR_LBD) == (USART_SR_LBD)); +} + +/** + * @brief Check if the USART CTS Flag is set or not + * @note Macro IS_UART_HWFLOW_INSTANCE(USARTx) can be used to check whether or not + * Hardware Flow control feature is supported by the USARTx instance. + * @rmtoll SR CTS LL_USART_IsActiveFlag_nCTS + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsActiveFlag_nCTS(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->SR, USART_SR_CTS) == (USART_SR_CTS)); +} + +/** + * @brief Check if the USART Send Break Flag is set or not + * @rmtoll CR1 SBK LL_USART_IsActiveFlag_SBK + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsActiveFlag_SBK(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->CR1, USART_CR1_SBK) == (USART_CR1_SBK)); +} + +/** + * @brief Check if the USART Receive Wake Up from mute mode Flag is set or not + * @rmtoll CR1 RWU LL_USART_IsActiveFlag_RWU + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsActiveFlag_RWU(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->CR1, USART_CR1_RWU) == (USART_CR1_RWU)); +} + +/** + * @brief Clear Parity Error Flag + * @note Clearing this flag is done by a read access to the USARTx_SR + * register followed by a read access to the USARTx_DR register. + * @note Please also consider that when clearing this flag, other flags as + * NE, FE, ORE, IDLE would also be cleared. + * @rmtoll SR PE LL_USART_ClearFlag_PE + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_ClearFlag_PE(USART_TypeDef *USARTx) +{ + __IO uint32_t tmpreg; + tmpreg = USARTx->SR; + (void) tmpreg; + tmpreg = USARTx->DR; + (void) tmpreg; +} + +/** + * @brief Clear Framing Error Flag + * @note Clearing this flag is done by a read access to the USARTx_SR + * register followed by a read access to the USARTx_DR register. + * @note Please also consider that when clearing this flag, other flags as + * PE, NE, ORE, IDLE would also be cleared. + * @rmtoll SR FE LL_USART_ClearFlag_FE + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_ClearFlag_FE(USART_TypeDef *USARTx) +{ + __IO uint32_t tmpreg; + tmpreg = USARTx->SR; + (void) tmpreg; + tmpreg = USARTx->DR; + (void) tmpreg; +} + +/** + * @brief Clear Noise detected Flag + * @note Clearing this flag is done by a read access to the USARTx_SR + * register followed by a read access to the USARTx_DR register. + * @note Please also consider that when clearing this flag, other flags as + * PE, FE, ORE, IDLE would also be cleared. + * @rmtoll SR NF LL_USART_ClearFlag_NE + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_ClearFlag_NE(USART_TypeDef *USARTx) +{ + __IO uint32_t tmpreg; + tmpreg = USARTx->SR; + (void) tmpreg; + tmpreg = USARTx->DR; + (void) tmpreg; +} + +/** + * @brief Clear OverRun Error Flag + * @note Clearing this flag is done by a read access to the USARTx_SR + * register followed by a read access to the USARTx_DR register. + * @note Please also consider that when clearing this flag, other flags as + * PE, NE, FE, IDLE would also be cleared. + * @rmtoll SR ORE LL_USART_ClearFlag_ORE + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_ClearFlag_ORE(USART_TypeDef *USARTx) +{ + __IO uint32_t tmpreg; + tmpreg = USARTx->SR; + (void) tmpreg; + tmpreg = USARTx->DR; + (void) tmpreg; +} + +/** + * @brief Clear IDLE line detected Flag + * @note Clearing this flag is done by a read access to the USARTx_SR + * register followed by a read access to the USARTx_DR register. + * @note Please also consider that when clearing this flag, other flags as + * PE, NE, FE, ORE would also be cleared. + * @rmtoll SR IDLE LL_USART_ClearFlag_IDLE + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_ClearFlag_IDLE(USART_TypeDef *USARTx) +{ + __IO uint32_t tmpreg; + tmpreg = USARTx->SR; + (void) tmpreg; + tmpreg = USARTx->DR; + (void) tmpreg; +} + +/** + * @brief Clear Transmission Complete Flag + * @rmtoll SR TC LL_USART_ClearFlag_TC + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_ClearFlag_TC(USART_TypeDef *USARTx) +{ + WRITE_REG(USARTx->SR, ~(USART_SR_TC)); +} + +/** + * @brief Clear RX Not Empty Flag + * @rmtoll SR RXNE LL_USART_ClearFlag_RXNE + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_ClearFlag_RXNE(USART_TypeDef *USARTx) +{ + WRITE_REG(USARTx->SR, ~(USART_SR_RXNE)); +} + +/** + * @brief Clear LIN Break Detection Flag + * @note Macro IS_UART_LIN_INSTANCE(USARTx) can be used to check whether or not + * LIN feature is supported by the USARTx instance. + * @rmtoll SR LBD LL_USART_ClearFlag_LBD + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_ClearFlag_LBD(USART_TypeDef *USARTx) +{ + WRITE_REG(USARTx->SR, ~(USART_SR_LBD)); +} + +/** + * @brief Clear CTS Interrupt Flag + * @note Macro IS_UART_HWFLOW_INSTANCE(USARTx) can be used to check whether or not + * Hardware Flow control feature is supported by the USARTx instance. + * @rmtoll SR CTS LL_USART_ClearFlag_nCTS + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_ClearFlag_nCTS(USART_TypeDef *USARTx) +{ + WRITE_REG(USARTx->SR, ~(USART_SR_CTS)); +} + +/** + * @} + */ + +/** @defgroup USART_LL_EF_IT_Management IT_Management + * @{ + */ + +/** + * @brief Enable IDLE Interrupt + * @rmtoll CR1 IDLEIE LL_USART_EnableIT_IDLE + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_EnableIT_IDLE(USART_TypeDef *USARTx) +{ + ATOMIC_SET_BIT(USARTx->CR1, USART_CR1_IDLEIE); +} + +/** + * @brief Enable RX Not Empty Interrupt + * @rmtoll CR1 RXNEIE LL_USART_EnableIT_RXNE + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_EnableIT_RXNE(USART_TypeDef *USARTx) +{ + ATOMIC_SET_BIT(USARTx->CR1, USART_CR1_RXNEIE); +} + +/** + * @brief Enable Transmission Complete Interrupt + * @rmtoll CR1 TCIE LL_USART_EnableIT_TC + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_EnableIT_TC(USART_TypeDef *USARTx) +{ + ATOMIC_SET_BIT(USARTx->CR1, USART_CR1_TCIE); +} + +/** + * @brief Enable TX Empty Interrupt + * @rmtoll CR1 TXEIE LL_USART_EnableIT_TXE + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_EnableIT_TXE(USART_TypeDef *USARTx) +{ + ATOMIC_SET_BIT(USARTx->CR1, USART_CR1_TXEIE); +} + +/** + * @brief Enable Parity Error Interrupt + * @rmtoll CR1 PEIE LL_USART_EnableIT_PE + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_EnableIT_PE(USART_TypeDef *USARTx) +{ + ATOMIC_SET_BIT(USARTx->CR1, USART_CR1_PEIE); +} + +/** + * @brief Enable LIN Break Detection Interrupt + * @note Macro IS_UART_LIN_INSTANCE(USARTx) can be used to check whether or not + * LIN feature is supported by the USARTx instance. + * @rmtoll CR2 LBDIE LL_USART_EnableIT_LBD + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_EnableIT_LBD(USART_TypeDef *USARTx) +{ + SET_BIT(USARTx->CR2, USART_CR2_LBDIE); +} + +/** + * @brief Enable Error Interrupt + * @note When set, Error Interrupt Enable Bit is enabling interrupt generation in case of a framing + * error, overrun error or noise flag (FE=1 or ORE=1 or NF=1 in the USARTx_SR register). + * 0: Interrupt is inhibited + * 1: An interrupt is generated when FE=1 or ORE=1 or NF=1 in the USARTx_SR register. + * @rmtoll CR3 EIE LL_USART_EnableIT_ERROR + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_EnableIT_ERROR(USART_TypeDef *USARTx) +{ + ATOMIC_SET_BIT(USARTx->CR3, USART_CR3_EIE); +} + +/** + * @brief Enable CTS Interrupt + * @note Macro IS_UART_HWFLOW_INSTANCE(USARTx) can be used to check whether or not + * Hardware Flow control feature is supported by the USARTx instance. + * @rmtoll CR3 CTSIE LL_USART_EnableIT_CTS + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_EnableIT_CTS(USART_TypeDef *USARTx) +{ + ATOMIC_SET_BIT(USARTx->CR3, USART_CR3_CTSIE); +} + +/** + * @brief Disable IDLE Interrupt + * @rmtoll CR1 IDLEIE LL_USART_DisableIT_IDLE + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_DisableIT_IDLE(USART_TypeDef *USARTx) +{ + ATOMIC_CLEAR_BIT(USARTx->CR1, USART_CR1_IDLEIE); +} + +/** + * @brief Disable RX Not Empty Interrupt + * @rmtoll CR1 RXNEIE LL_USART_DisableIT_RXNE + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_DisableIT_RXNE(USART_TypeDef *USARTx) +{ + ATOMIC_CLEAR_BIT(USARTx->CR1, USART_CR1_RXNEIE); +} + +/** + * @brief Disable Transmission Complete Interrupt + * @rmtoll CR1 TCIE LL_USART_DisableIT_TC + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_DisableIT_TC(USART_TypeDef *USARTx) +{ + ATOMIC_CLEAR_BIT(USARTx->CR1, USART_CR1_TCIE); +} + +/** + * @brief Disable TX Empty Interrupt + * @rmtoll CR1 TXEIE LL_USART_DisableIT_TXE + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_DisableIT_TXE(USART_TypeDef *USARTx) +{ + ATOMIC_CLEAR_BIT(USARTx->CR1, USART_CR1_TXEIE); +} + +/** + * @brief Disable Parity Error Interrupt + * @rmtoll CR1 PEIE LL_USART_DisableIT_PE + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_DisableIT_PE(USART_TypeDef *USARTx) +{ + ATOMIC_CLEAR_BIT(USARTx->CR1, USART_CR1_PEIE); +} + +/** + * @brief Disable LIN Break Detection Interrupt + * @note Macro IS_UART_LIN_INSTANCE(USARTx) can be used to check whether or not + * LIN feature is supported by the USARTx instance. + * @rmtoll CR2 LBDIE LL_USART_DisableIT_LBD + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_DisableIT_LBD(USART_TypeDef *USARTx) +{ + CLEAR_BIT(USARTx->CR2, USART_CR2_LBDIE); +} + +/** + * @brief Disable Error Interrupt + * @note When set, Error Interrupt Enable Bit is enabling interrupt generation in case of a framing + * error, overrun error or noise flag (FE=1 or ORE=1 or NF=1 in the USARTx_SR register). + * 0: Interrupt is inhibited + * 1: An interrupt is generated when FE=1 or ORE=1 or NF=1 in the USARTx_SR register. + * @rmtoll CR3 EIE LL_USART_DisableIT_ERROR + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_DisableIT_ERROR(USART_TypeDef *USARTx) +{ + ATOMIC_CLEAR_BIT(USARTx->CR3, USART_CR3_EIE); +} + +/** + * @brief Disable CTS Interrupt + * @note Macro IS_UART_HWFLOW_INSTANCE(USARTx) can be used to check whether or not + * Hardware Flow control feature is supported by the USARTx instance. + * @rmtoll CR3 CTSIE LL_USART_DisableIT_CTS + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_DisableIT_CTS(USART_TypeDef *USARTx) +{ + ATOMIC_CLEAR_BIT(USARTx->CR3, USART_CR3_CTSIE); +} + +/** + * @brief Check if the USART IDLE Interrupt source is enabled or disabled. + * @rmtoll CR1 IDLEIE LL_USART_IsEnabledIT_IDLE + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsEnabledIT_IDLE(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->CR1, USART_CR1_IDLEIE) == (USART_CR1_IDLEIE)); +} + +/** + * @brief Check if the USART RX Not Empty Interrupt is enabled or disabled. + * @rmtoll CR1 RXNEIE LL_USART_IsEnabledIT_RXNE + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsEnabledIT_RXNE(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->CR1, USART_CR1_RXNEIE) == (USART_CR1_RXNEIE)); +} + +/** + * @brief Check if the USART Transmission Complete Interrupt is enabled or disabled. + * @rmtoll CR1 TCIE LL_USART_IsEnabledIT_TC + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsEnabledIT_TC(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->CR1, USART_CR1_TCIE) == (USART_CR1_TCIE)); +} + +/** + * @brief Check if the USART TX Empty Interrupt is enabled or disabled. + * @rmtoll CR1 TXEIE LL_USART_IsEnabledIT_TXE + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsEnabledIT_TXE(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->CR1, USART_CR1_TXEIE) == (USART_CR1_TXEIE)); +} + +/** + * @brief Check if the USART Parity Error Interrupt is enabled or disabled. + * @rmtoll CR1 PEIE LL_USART_IsEnabledIT_PE + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsEnabledIT_PE(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->CR1, USART_CR1_PEIE) == (USART_CR1_PEIE)); +} + +/** + * @brief Check if the USART LIN Break Detection Interrupt is enabled or disabled. + * @note Macro IS_UART_LIN_INSTANCE(USARTx) can be used to check whether or not + * LIN feature is supported by the USARTx instance. + * @rmtoll CR2 LBDIE LL_USART_IsEnabledIT_LBD + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsEnabledIT_LBD(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->CR2, USART_CR2_LBDIE) == (USART_CR2_LBDIE)); +} + +/** + * @brief Check if the USART Error Interrupt is enabled or disabled. + * @rmtoll CR3 EIE LL_USART_IsEnabledIT_ERROR + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsEnabledIT_ERROR(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->CR3, USART_CR3_EIE) == (USART_CR3_EIE)); +} + +/** + * @brief Check if the USART CTS Interrupt is enabled or disabled. + * @note Macro IS_UART_HWFLOW_INSTANCE(USARTx) can be used to check whether or not + * Hardware Flow control feature is supported by the USARTx instance. + * @rmtoll CR3 CTSIE LL_USART_IsEnabledIT_CTS + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsEnabledIT_CTS(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->CR3, USART_CR3_CTSIE) == (USART_CR3_CTSIE)); +} + +/** + * @} + */ + +/** @defgroup USART_LL_EF_DMA_Management DMA_Management + * @{ + */ + +/** + * @brief Enable DMA Mode for reception + * @rmtoll CR3 DMAR LL_USART_EnableDMAReq_RX + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_EnableDMAReq_RX(USART_TypeDef *USARTx) +{ + ATOMIC_SET_BIT(USARTx->CR3, USART_CR3_DMAR); +} + +/** + * @brief Disable DMA Mode for reception + * @rmtoll CR3 DMAR LL_USART_DisableDMAReq_RX + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_DisableDMAReq_RX(USART_TypeDef *USARTx) +{ + ATOMIC_CLEAR_BIT(USARTx->CR3, USART_CR3_DMAR); +} + +/** + * @brief Check if DMA Mode is enabled for reception + * @rmtoll CR3 DMAR LL_USART_IsEnabledDMAReq_RX + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsEnabledDMAReq_RX(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->CR3, USART_CR3_DMAR) == (USART_CR3_DMAR)); +} + +/** + * @brief Enable DMA Mode for transmission + * @rmtoll CR3 DMAT LL_USART_EnableDMAReq_TX + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_EnableDMAReq_TX(USART_TypeDef *USARTx) +{ + ATOMIC_SET_BIT(USARTx->CR3, USART_CR3_DMAT); +} + +/** + * @brief Disable DMA Mode for transmission + * @rmtoll CR3 DMAT LL_USART_DisableDMAReq_TX + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_DisableDMAReq_TX(USART_TypeDef *USARTx) +{ + ATOMIC_CLEAR_BIT(USARTx->CR3, USART_CR3_DMAT); +} + +/** + * @brief Check if DMA Mode is enabled for transmission + * @rmtoll CR3 DMAT LL_USART_IsEnabledDMAReq_TX + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsEnabledDMAReq_TX(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->CR3, USART_CR3_DMAT) == (USART_CR3_DMAT)); +} + +/** + * @brief Get the data register address used for DMA transfer + * @rmtoll DR DR LL_USART_DMA_GetRegAddr + * @note Address of Data Register is valid for both Transmit and Receive transfers. + * @param USARTx USART Instance + * @retval Address of data register + */ +__STATIC_INLINE uint32_t LL_USART_DMA_GetRegAddr(const USART_TypeDef *USARTx) +{ + /* return address of DR register */ + return ((uint32_t) &(USARTx->DR)); +} + +/** + * @} + */ + +/** @defgroup USART_LL_EF_Data_Management Data_Management + * @{ + */ + +/** + * @brief Read Receiver Data register (Receive Data value, 8 bits) + * @rmtoll DR DR LL_USART_ReceiveData8 + * @param USARTx USART Instance + * @retval Value between Min_Data=0x00 and Max_Data=0xFF + */ +__STATIC_INLINE uint8_t LL_USART_ReceiveData8(const USART_TypeDef *USARTx) +{ + return (uint8_t)(READ_BIT(USARTx->DR, USART_DR_DR)); +} + +/** + * @brief Read Receiver Data register (Receive Data value, 9 bits) + * @rmtoll DR DR LL_USART_ReceiveData9 + * @param USARTx USART Instance + * @retval Value between Min_Data=0x00 and Max_Data=0x1FF + */ +__STATIC_INLINE uint16_t LL_USART_ReceiveData9(const USART_TypeDef *USARTx) +{ + return (uint16_t)(READ_BIT(USARTx->DR, USART_DR_DR)); +} + +/** + * @brief Write in Transmitter Data Register (Transmit Data value, 8 bits) + * @rmtoll DR DR LL_USART_TransmitData8 + * @param USARTx USART Instance + * @param Value between Min_Data=0x00 and Max_Data=0xFF + * @retval None + */ +__STATIC_INLINE void LL_USART_TransmitData8(USART_TypeDef *USARTx, uint8_t Value) +{ + USARTx->DR = Value; +} + +/** + * @brief Write in Transmitter Data Register (Transmit Data value, 9 bits) + * @rmtoll DR DR LL_USART_TransmitData9 + * @param USARTx USART Instance + * @param Value between Min_Data=0x00 and Max_Data=0x1FF + * @retval None + */ +__STATIC_INLINE void LL_USART_TransmitData9(USART_TypeDef *USARTx, uint16_t Value) +{ + USARTx->DR = Value & 0x1FFU; +} + +/** + * @} + */ + +/** @defgroup USART_LL_EF_Execution Execution + * @{ + */ + +/** + * @brief Request Break sending + * @rmtoll CR1 SBK LL_USART_RequestBreakSending + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_RequestBreakSending(USART_TypeDef *USARTx) +{ + SET_BIT(USARTx->CR1, USART_CR1_SBK); +} + +/** + * @brief Put USART in Mute mode + * @rmtoll CR1 RWU LL_USART_RequestEnterMuteMode + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_RequestEnterMuteMode(USART_TypeDef *USARTx) +{ + SET_BIT(USARTx->CR1, USART_CR1_RWU); +} + +/** + * @brief Put USART in Active mode + * @rmtoll CR1 RWU LL_USART_RequestExitMuteMode + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_RequestExitMuteMode(USART_TypeDef *USARTx) +{ + CLEAR_BIT(USARTx->CR1, USART_CR1_RWU); +} + +/** + * @} + */ + +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup USART_LL_EF_Init Initialization and de-initialization functions + * @{ + */ +ErrorStatus LL_USART_DeInit(const USART_TypeDef *USARTx); +ErrorStatus LL_USART_Init(USART_TypeDef *USARTx, const LL_USART_InitTypeDef *USART_InitStruct); +void LL_USART_StructInit(LL_USART_InitTypeDef *USART_InitStruct); +ErrorStatus LL_USART_ClockInit(USART_TypeDef *USARTx, const LL_USART_ClockInitTypeDef *USART_ClockInitStruct); +void LL_USART_ClockStructInit(LL_USART_ClockInitTypeDef *USART_ClockInitStruct); +/** + * @} + */ +#endif /* USE_FULL_LL_DRIVER */ + +/** + * @} + */ + +/** + * @} + */ + +#endif /* USART1 || USART2 || USART3 || USART6 || UART4 || UART5 || UART7 || UART8 || UART9 || UART10 */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F4xx_LL_USART_H */ + diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_utils.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_utils.h new file mode 100644 index 0000000..accdac7 --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_utils.h @@ -0,0 +1,307 @@ +/** + ****************************************************************************** + * @file stm32f4xx_ll_utils.h + * @author MCD Application Team + * @brief Header file of UTILS LL module. + @verbatim + ============================================================================== + ##### How to use this driver ##### + ============================================================================== + [..] + The LL UTILS driver contains a set of generic APIs that can be + used by user: + (+) Device electronic signature + (+) Timing functions + (+) PLL configuration functions + + @endverbatim + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F4xx_LL_UTILS_H +#define __STM32F4xx_LL_UTILS_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx.h" + +/** @addtogroup STM32F4xx_LL_Driver + * @{ + */ + +/** @defgroup UTILS_LL UTILS + * @{ + */ + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ + +/* Private constants ---------------------------------------------------------*/ +/** @defgroup UTILS_LL_Private_Constants UTILS Private Constants + * @{ + */ + +/* Max delay can be used in LL_mDelay */ +#define LL_MAX_DELAY 0xFFFFFFFFU + +/** + * @brief Unique device ID register base address + */ +#define UID_BASE_ADDRESS UID_BASE + +/** + * @brief Flash size data register base address + */ +#define FLASHSIZE_BASE_ADDRESS FLASHSIZE_BASE + +/** + * @brief Package data register base address + */ +#define PACKAGE_BASE_ADDRESS PACKAGE_BASE + +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup UTILS_LL_Private_Macros UTILS Private Macros + * @{ + */ +/** + * @} + */ +/* Exported types ------------------------------------------------------------*/ +/** @defgroup UTILS_LL_ES_INIT UTILS Exported structures + * @{ + */ +/** + * @brief UTILS PLL structure definition + */ +typedef struct +{ + uint32_t PLLM; /*!< Division factor for PLL VCO input clock. + This parameter can be a value of @ref RCC_LL_EC_PLLM_DIV + + This feature can be modified afterwards using unitary function + @ref LL_RCC_PLL_ConfigDomain_SYS(). */ + + uint32_t PLLN; /*!< Multiplication factor for PLL VCO output clock. + This parameter must be a number between Min_Data = @ref RCC_PLLN_MIN_VALUE + and Max_Data = @ref RCC_PLLN_MIN_VALUE + + This feature can be modified afterwards using unitary function + @ref LL_RCC_PLL_ConfigDomain_SYS(). */ + + uint32_t PLLP; /*!< Division for the main system clock. + This parameter can be a value of @ref RCC_LL_EC_PLLP_DIV + + This feature can be modified afterwards using unitary function + @ref LL_RCC_PLL_ConfigDomain_SYS(). */ +} LL_UTILS_PLLInitTypeDef; + +/** + * @brief UTILS System, AHB and APB buses clock configuration structure definition + */ +typedef struct +{ + uint32_t AHBCLKDivider; /*!< The AHB clock (HCLK) divider. This clock is derived from the system clock (SYSCLK). + This parameter can be a value of @ref RCC_LL_EC_SYSCLK_DIV + + This feature can be modified afterwards using unitary function + @ref LL_RCC_SetAHBPrescaler(). */ + + uint32_t APB1CLKDivider; /*!< The APB1 clock (PCLK1) divider. This clock is derived from the AHB clock (HCLK). + This parameter can be a value of @ref RCC_LL_EC_APB1_DIV + + This feature can be modified afterwards using unitary function + @ref LL_RCC_SetAPB1Prescaler(). */ + + uint32_t APB2CLKDivider; /*!< The APB2 clock (PCLK2) divider. This clock is derived from the AHB clock (HCLK). + This parameter can be a value of @ref RCC_LL_EC_APB2_DIV + + This feature can be modified afterwards using unitary function + @ref LL_RCC_SetAPB2Prescaler(). */ + +} LL_UTILS_ClkInitTypeDef; + +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup UTILS_LL_Exported_Constants UTILS Exported Constants + * @{ + */ + +/** @defgroup UTILS_EC_HSE_BYPASS HSE Bypass activation + * @{ + */ +#define LL_UTILS_HSEBYPASS_OFF 0x00000000U /*!< HSE Bypass is not enabled */ +#define LL_UTILS_HSEBYPASS_ON 0x00000001U /*!< HSE Bypass is enabled */ +/** + * @} + */ + +/** @defgroup UTILS_EC_PACKAGETYPE PACKAGE TYPE + * @{ + */ +#define LL_UTILS_PACKAGETYPE_WLCSP36_UFQFPN48_LQFP64 0x00000000U /*!< WLCSP36 or UFQFPN48 or LQFP64 package type */ +#define LL_UTILS_PACKAGETYPE_WLCSP168_FBGA169_LQFP100_LQFP64_UFQFPN48 0x00000100U /*!< WLCSP168 or FBGA169 or LQFP100 or LQFP64 or UFQFPN48 package type */ +#define LL_UTILS_PACKAGETYPE_WLCSP64_WLCSP81_LQFP176_UFBGA176 0x00000200U /*!< WLCSP64 or WLCSP81 or LQFP176 or UFBGA176 package type */ +#define LL_UTILS_PACKAGETYPE_LQFP144_UFBGA144_UFBGA144_UFBGA100 0x00000300U /*!< LQFP144 or UFBGA144 or UFBGA144 or UFBGA100 package type */ +#define LL_UTILS_PACKAGETYPE_LQFP100_LQFP208_TFBGA216 0x00000400U /*!< LQFP100 or LQFP208 or TFBGA216 package type */ +#define LL_UTILS_PACKAGETYPE_LQFP208_TFBGA216 0x00000500U /*!< LQFP208 or TFBGA216 package type */ +#define LL_UTILS_PACKAGETYPE_TQFP64_UFBGA144_LQFP144 0x00000700U /*!< TQFP64 or UFBGA144 or LQFP144 package type */ +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup UTILS_LL_Exported_Functions UTILS Exported Functions + * @{ + */ + +/** @defgroup UTILS_EF_DEVICE_ELECTRONIC_SIGNATURE DEVICE ELECTRONIC SIGNATURE + * @{ + */ + +/** + * @brief Get Word0 of the unique device identifier (UID based on 96 bits) + * @retval UID[31:0] + */ +__STATIC_INLINE uint32_t LL_GetUID_Word0(void) +{ + return (uint32_t)(READ_REG(*((uint32_t *)UID_BASE_ADDRESS))); +} + +/** + * @brief Get Word1 of the unique device identifier (UID based on 96 bits) + * @retval UID[63:32] + */ +__STATIC_INLINE uint32_t LL_GetUID_Word1(void) +{ + return (uint32_t)(READ_REG(*((uint32_t *)(UID_BASE_ADDRESS + 4U)))); +} + +/** + * @brief Get Word2 of the unique device identifier (UID based on 96 bits) + * @retval UID[95:64] + */ +__STATIC_INLINE uint32_t LL_GetUID_Word2(void) +{ + return (uint32_t)(READ_REG(*((uint32_t *)(UID_BASE_ADDRESS + 8U)))); +} + +/** + * @brief Get Flash memory size + * @note This bitfield indicates the size of the device Flash memory expressed in + * Kbytes. As an example, 0x040 corresponds to 64 Kbytes. + * @retval FLASH_SIZE[15:0]: Flash memory size + */ +__STATIC_INLINE uint32_t LL_GetFlashSize(void) +{ + return (uint32_t)(READ_REG(*((uint32_t *)FLASHSIZE_BASE_ADDRESS)) & 0xFFFF); +} + +/** + * @brief Get Package type + * @retval Returned value can be one of the following values: + * @arg @ref LL_UTILS_PACKAGETYPE_WLCSP36_UFQFPN48_LQFP64 (*) + * @arg @ref LL_UTILS_PACKAGETYPE_WLCSP168_FBGA169_LQFP100_LQFP64_UFQFPN48 (*) + * @arg @ref LL_UTILS_PACKAGETYPE_WLCSP64_WLCSP81_LQFP176_UFBGA176 (*) + * @arg @ref LL_UTILS_PACKAGETYPE_LQFP144_UFBGA144_UFBGA144_UFBGA100 (*) + * @arg @ref LL_UTILS_PACKAGETYPE_LQFP100_LQFP208_TFBGA216 (*) + * @arg @ref LL_UTILS_PACKAGETYPE_LQFP208_TFBGA216 (*) + * @arg @ref LL_UTILS_PACKAGETYPE_TQFP64_UFBGA144_LQFP144 (*) + * + * (*) value not defined in all devices. + */ +__STATIC_INLINE uint32_t LL_GetPackageType(void) +{ + return (uint32_t)(READ_REG(*((uint32_t *)PACKAGE_BASE_ADDRESS)) & 0x0700U); +} + +/** + * @} + */ + +/** @defgroup UTILS_LL_EF_DELAY DELAY + * @{ + */ + +/** + * @brief This function configures the Cortex-M SysTick source of the time base. + * @param HCLKFrequency HCLK frequency in Hz (can be calculated thanks to RCC helper macro) + * @note When a RTOS is used, it is recommended to avoid changing the SysTick + * configuration by calling this function, for a delay use rather osDelay RTOS service. + * @param Ticks Frequency of Ticks (Hz) + * @retval None + */ +__STATIC_INLINE void LL_InitTick(uint32_t HCLKFrequency, uint32_t Ticks) +{ + /* Configure the SysTick to have interrupt in 1ms time base */ + SysTick->LOAD = (uint32_t)((HCLKFrequency / Ticks) - 1UL); /* set reload register */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable the Systick Timer */ +} + +void LL_Init1msTick(uint32_t HCLKFrequency); +void LL_mDelay(uint32_t Delay); + +/** + * @} + */ + +/** @defgroup UTILS_EF_SYSTEM SYSTEM + * @{ + */ + +void LL_SetSystemCoreClock(uint32_t HCLKFrequency); +ErrorStatus LL_SetFlashLatency(uint32_t HCLK_Frequency); +ErrorStatus LL_PLL_ConfigSystemClock_HSI(LL_UTILS_PLLInitTypeDef *UTILS_PLLInitStruct, + LL_UTILS_ClkInitTypeDef *UTILS_ClkInitStruct); +ErrorStatus LL_PLL_ConfigSystemClock_HSE(uint32_t HSEFrequency, uint32_t HSEBypass, + LL_UTILS_PLLInitTypeDef *UTILS_PLLInitStruct, LL_UTILS_ClkInitTypeDef *UTILS_ClkInitStruct); + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F4xx_LL_UTILS_H */ diff --git a/Drivers/STM32F4xx_HAL_Driver/LICENSE.txt b/Drivers/STM32F4xx_HAL_Driver/LICENSE.txt new file mode 100644 index 0000000..3edc4d1 --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/LICENSE.txt @@ -0,0 +1,6 @@ +This software component is provided to you as part of a software package and +applicable license terms are in the Package_license file. If you received this +software component outside of a package or without applicable license terms, +the terms of the BSD-3-Clause license shall apply. +You may obtain a copy of the BSD-3-Clause at: +https://opensource.org/licenses/BSD-3-Clause diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal.c new file mode 100644 index 0000000..4497de6 --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal.c @@ -0,0 +1,616 @@ +/** + ****************************************************************************** + * @file stm32f4xx_hal.c + * @author MCD Application Team + * @brief HAL module driver. + * This is the common part of the HAL initialization + * + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + @verbatim + ============================================================================== + ##### How to use this driver ##### + ============================================================================== + [..] + The common HAL driver contains a set of generic and common APIs that can be + used by the PPP peripheral drivers and the user to start using the HAL. + [..] + The HAL contains two APIs' categories: + (+) Common HAL APIs + (+) Services HAL APIs + + @endverbatim + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx_hal.h" + +/** @addtogroup STM32F4xx_HAL_Driver + * @{ + */ + +/** @defgroup HAL HAL + * @brief HAL module driver. + * @{ + */ + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/** @addtogroup HAL_Private_Constants + * @{ + */ +/** + * @brief STM32F4xx HAL Driver version number V1.8.3 + */ +#define __STM32F4xx_HAL_VERSION_MAIN (0x01U) /*!< [31:24] main version */ +#define __STM32F4xx_HAL_VERSION_SUB1 (0x08U) /*!< [23:16] sub1 version */ +#define __STM32F4xx_HAL_VERSION_SUB2 (0x03U) /*!< [15:8] sub2 version */ +#define __STM32F4xx_HAL_VERSION_RC (0x00U) /*!< [7:0] release candidate */ +#define __STM32F4xx_HAL_VERSION ((__STM32F4xx_HAL_VERSION_MAIN << 24U)\ + |(__STM32F4xx_HAL_VERSION_SUB1 << 16U)\ + |(__STM32F4xx_HAL_VERSION_SUB2 << 8U )\ + |(__STM32F4xx_HAL_VERSION_RC)) + +#define IDCODE_DEVID_MASK 0x00000FFFU + +/* ------------ RCC registers bit address in the alias region ----------- */ +#define SYSCFG_OFFSET (SYSCFG_BASE - PERIPH_BASE) +/* --- MEMRMP Register ---*/ +/* Alias word address of UFB_MODE bit */ +#define MEMRMP_OFFSET SYSCFG_OFFSET +#define UFB_MODE_BIT_NUMBER SYSCFG_MEMRMP_UFB_MODE_Pos +#define UFB_MODE_BB (uint32_t)(PERIPH_BB_BASE + (MEMRMP_OFFSET * 32U) + (UFB_MODE_BIT_NUMBER * 4U)) + +/* --- CMPCR Register ---*/ +/* Alias word address of CMP_PD bit */ +#define CMPCR_OFFSET (SYSCFG_OFFSET + 0x20U) +#define CMP_PD_BIT_NUMBER SYSCFG_CMPCR_CMP_PD_Pos +#define CMPCR_CMP_PD_BB (uint32_t)(PERIPH_BB_BASE + (CMPCR_OFFSET * 32U) + (CMP_PD_BIT_NUMBER * 4U)) + +/* --- MCHDLYCR Register ---*/ +/* Alias word address of BSCKSEL bit */ +#define MCHDLYCR_OFFSET (SYSCFG_OFFSET + 0x30U) +#define BSCKSEL_BIT_NUMBER SYSCFG_MCHDLYCR_BSCKSEL_Pos +#define MCHDLYCR_BSCKSEL_BB (uint32_t)(PERIPH_BB_BASE + (MCHDLYCR_OFFSET * 32U) + (BSCKSEL_BIT_NUMBER * 4U)) +/** + * @} + */ + +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/** @addtogroup HAL_Private_Variables + * @{ + */ +__IO uint32_t uwTick; +uint32_t uwTickPrio = (1UL << __NVIC_PRIO_BITS); /* Invalid PRIO */ +HAL_TickFreqTypeDef uwTickFreq = HAL_TICK_FREQ_DEFAULT; /* 1KHz */ +/** + * @} + */ +/* Private function prototypes -----------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ + +/** @defgroup HAL_Exported_Functions HAL Exported Functions + * @{ + */ + +/** @defgroup HAL_Exported_Functions_Group1 Initialization and de-initialization Functions + * @brief Initialization and de-initialization functions + * +@verbatim + =============================================================================== + ##### Initialization and Configuration functions ##### + =============================================================================== + [..] This section provides functions allowing to: + (+) Initializes the Flash interface the NVIC allocation and initial clock + configuration. It initializes the systick also when timeout is needed + and the backup domain when enabled. + (+) De-Initializes common part of the HAL. + (+) Configure the time base source to have 1ms time base with a dedicated + Tick interrupt priority. + (++) SysTick timer is used by default as source of time base, but user + can eventually implement his proper time base source (a general purpose + timer for example or other time source), keeping in mind that Time base + duration should be kept 1ms since PPP_TIMEOUT_VALUEs are defined and + handled in milliseconds basis. + (++) Time base configuration function (HAL_InitTick ()) is called automatically + at the beginning of the program after reset by HAL_Init() or at any time + when clock is configured, by HAL_RCC_ClockConfig(). + (++) Source of time base is configured to generate interrupts at regular + time intervals. Care must be taken if HAL_Delay() is called from a + peripheral ISR process, the Tick interrupt line must have higher priority + (numerically lower) than the peripheral interrupt. Otherwise the caller + ISR process will be blocked. + (++) functions affecting time base configurations are declared as __weak + to make override possible in case of other implementations in user file. +@endverbatim + * @{ + */ + +/** + * @brief This function is used to initialize the HAL Library; it must be the first + * instruction to be executed in the main program (before to call any other + * HAL function), it performs the following: + * Configure the Flash prefetch, instruction and Data caches. + * Configures the SysTick to generate an interrupt each 1 millisecond, + * which is clocked by the HSI (at this stage, the clock is not yet + * configured and thus the system is running from the internal HSI at 16 MHz). + * Set NVIC Group Priority to 4. + * Calls the HAL_MspInit() callback function defined in user file + * "stm32f4xx_hal_msp.c" to do the global low level hardware initialization + * + * @note SysTick is used as time base for the HAL_Delay() function, the application + * need to ensure that the SysTick time base is always set to 1 millisecond + * to have correct HAL operation. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_Init(void) +{ + /* Configure Flash prefetch, Instruction cache, Data cache */ +#if (INSTRUCTION_CACHE_ENABLE != 0U) + __HAL_FLASH_INSTRUCTION_CACHE_ENABLE(); +#endif /* INSTRUCTION_CACHE_ENABLE */ + +#if (DATA_CACHE_ENABLE != 0U) + __HAL_FLASH_DATA_CACHE_ENABLE(); +#endif /* DATA_CACHE_ENABLE */ + +#if (PREFETCH_ENABLE != 0U) + __HAL_FLASH_PREFETCH_BUFFER_ENABLE(); +#endif /* PREFETCH_ENABLE */ + + /* Set Interrupt Group Priority */ + HAL_NVIC_SetPriorityGrouping(NVIC_PRIORITYGROUP_4); + + /* Use systick as time base source and configure 1ms tick (default clock after Reset is HSI) */ + HAL_InitTick(TICK_INT_PRIORITY); + + /* Init the low level hardware */ + HAL_MspInit(); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief This function de-Initializes common part of the HAL and stops the systick. + * This function is optional. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DeInit(void) +{ + /* Reset of all peripherals */ + __HAL_RCC_APB1_FORCE_RESET(); + __HAL_RCC_APB1_RELEASE_RESET(); + + __HAL_RCC_APB2_FORCE_RESET(); + __HAL_RCC_APB2_RELEASE_RESET(); + + __HAL_RCC_AHB1_FORCE_RESET(); + __HAL_RCC_AHB1_RELEASE_RESET(); + + __HAL_RCC_AHB2_FORCE_RESET(); + __HAL_RCC_AHB2_RELEASE_RESET(); + + __HAL_RCC_AHB3_FORCE_RESET(); + __HAL_RCC_AHB3_RELEASE_RESET(); + + /* De-Init the low level hardware */ + HAL_MspDeInit(); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Initialize the MSP. + * @retval None + */ +__weak void HAL_MspInit(void) +{ + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_MspInit could be implemented in the user file + */ +} + +/** + * @brief DeInitializes the MSP. + * @retval None + */ +__weak void HAL_MspDeInit(void) +{ + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_MspDeInit could be implemented in the user file + */ +} + +/** + * @brief This function configures the source of the time base. + * The time source is configured to have 1ms time base with a dedicated + * Tick interrupt priority. + * @note This function is called automatically at the beginning of program after + * reset by HAL_Init() or at any time when clock is reconfigured by HAL_RCC_ClockConfig(). + * @note In the default implementation, SysTick timer is the source of time base. + * It is used to generate interrupts at regular time intervals. + * Care must be taken if HAL_Delay() is called from a peripheral ISR process, + * The SysTick interrupt must have higher priority (numerically lower) + * than the peripheral interrupt. Otherwise the caller ISR process will be blocked. + * The function is declared as __weak to be overwritten in case of other + * implementation in user file. + * @param TickPriority Tick interrupt priority. + * @retval HAL status + */ +__weak HAL_StatusTypeDef HAL_InitTick(uint32_t TickPriority) +{ + /* Configure the SysTick to have interrupt in 1ms time basis*/ + if (HAL_SYSTICK_Config(SystemCoreClock / (1000U / uwTickFreq)) > 0U) + { + return HAL_ERROR; + } + + /* Configure the SysTick IRQ priority */ + if (TickPriority < (1UL << __NVIC_PRIO_BITS)) + { + HAL_NVIC_SetPriority(SysTick_IRQn, TickPriority, 0U); + uwTickPrio = TickPriority; + } + else + { + return HAL_ERROR; + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup HAL_Exported_Functions_Group2 HAL Control functions + * @brief HAL Control functions + * +@verbatim + =============================================================================== + ##### HAL Control functions ##### + =============================================================================== + [..] This section provides functions allowing to: + (+) Provide a tick value in millisecond + (+) Provide a blocking delay in millisecond + (+) Suspend the time base source interrupt + (+) Resume the time base source interrupt + (+) Get the HAL API driver version + (+) Get the device identifier + (+) Get the device revision identifier + (+) Enable/Disable Debug module during SLEEP mode + (+) Enable/Disable Debug module during STOP mode + (+) Enable/Disable Debug module during STANDBY mode + +@endverbatim + * @{ + */ + +/** + * @brief This function is called to increment a global variable "uwTick" + * used as application time base. + * @note In the default implementation, this variable is incremented each 1ms + * in SysTick ISR. + * @note This function is declared as __weak to be overwritten in case of other + * implementations in user file. + * @retval None + */ +__weak void HAL_IncTick(void) +{ + uwTick += uwTickFreq; +} + +/** + * @brief Provides a tick value in millisecond. + * @note This function is declared as __weak to be overwritten in case of other + * implementations in user file. + * @retval tick value + */ +__weak uint32_t HAL_GetTick(void) +{ + return uwTick; +} + +/** + * @brief This function returns a tick priority. + * @retval tick priority + */ +uint32_t HAL_GetTickPrio(void) +{ + return uwTickPrio; +} + +/** + * @brief Set new tick Freq. + * @retval Status + */ +HAL_StatusTypeDef HAL_SetTickFreq(HAL_TickFreqTypeDef Freq) +{ + HAL_StatusTypeDef status = HAL_OK; + HAL_TickFreqTypeDef prevTickFreq; + + assert_param(IS_TICKFREQ(Freq)); + + if (uwTickFreq != Freq) + { + /* Back up uwTickFreq frequency */ + prevTickFreq = uwTickFreq; + + /* Update uwTickFreq global variable used by HAL_InitTick() */ + uwTickFreq = Freq; + + /* Apply the new tick Freq */ + status = HAL_InitTick(uwTickPrio); + + if (status != HAL_OK) + { + /* Restore previous tick frequency */ + uwTickFreq = prevTickFreq; + } + } + + return status; +} + +/** + * @brief Return tick frequency. + * @retval Tick frequency. + * Value of @ref HAL_TickFreqTypeDef. + */ +HAL_TickFreqTypeDef HAL_GetTickFreq(void) +{ + return uwTickFreq; +} + +/** + * @brief This function provides minimum delay (in milliseconds) based + * on variable incremented. + * @note In the default implementation , SysTick timer is the source of time base. + * It is used to generate interrupts at regular time intervals where uwTick + * is incremented. + * @note This function is declared as __weak to be overwritten in case of other + * implementations in user file. + * @param Delay specifies the delay time length, in milliseconds. + * @retval None + */ +__weak void HAL_Delay(uint32_t Delay) +{ + uint32_t tickstart = HAL_GetTick(); + uint32_t wait = Delay; + + /* Add a freq to guarantee minimum wait */ + if (wait < HAL_MAX_DELAY) + { + wait += (uint32_t)(uwTickFreq); + } + + while((HAL_GetTick() - tickstart) < wait) + { + } +} + +/** + * @brief Suspend Tick increment. + * @note In the default implementation , SysTick timer is the source of time base. It is + * used to generate interrupts at regular time intervals. Once HAL_SuspendTick() + * is called, the SysTick interrupt will be disabled and so Tick increment + * is suspended. + * @note This function is declared as __weak to be overwritten in case of other + * implementations in user file. + * @retval None + */ +__weak void HAL_SuspendTick(void) +{ + /* Disable SysTick Interrupt */ + SysTick->CTRL &= ~SysTick_CTRL_TICKINT_Msk; +} + +/** + * @brief Resume Tick increment. + * @note In the default implementation , SysTick timer is the source of time base. It is + * used to generate interrupts at regular time intervals. Once HAL_ResumeTick() + * is called, the SysTick interrupt will be enabled and so Tick increment + * is resumed. + * @note This function is declared as __weak to be overwritten in case of other + * implementations in user file. + * @retval None + */ +__weak void HAL_ResumeTick(void) +{ + /* Enable SysTick Interrupt */ + SysTick->CTRL |= SysTick_CTRL_TICKINT_Msk; +} + +/** + * @brief Returns the HAL revision + * @retval version : 0xXYZR (8bits for each decimal, R for RC) + */ +uint32_t HAL_GetHalVersion(void) +{ + return __STM32F4xx_HAL_VERSION; +} + +/** + * @brief Returns the device revision identifier. + * @retval Device revision identifier + */ +uint32_t HAL_GetREVID(void) +{ + return((DBGMCU->IDCODE) >> 16U); +} + +/** + * @brief Returns the device identifier. + * @retval Device identifier + */ +uint32_t HAL_GetDEVID(void) +{ + return((DBGMCU->IDCODE) & IDCODE_DEVID_MASK); +} + +/** + * @brief Enable the Debug Module during SLEEP mode + * @retval None + */ +void HAL_DBGMCU_EnableDBGSleepMode(void) +{ + SET_BIT(DBGMCU->CR, DBGMCU_CR_DBG_SLEEP); +} + +/** + * @brief Disable the Debug Module during SLEEP mode + * @retval None + */ +void HAL_DBGMCU_DisableDBGSleepMode(void) +{ + CLEAR_BIT(DBGMCU->CR, DBGMCU_CR_DBG_SLEEP); +} + +/** + * @brief Enable the Debug Module during STOP mode + * @retval None + */ +void HAL_DBGMCU_EnableDBGStopMode(void) +{ + SET_BIT(DBGMCU->CR, DBGMCU_CR_DBG_STOP); +} + +/** + * @brief Disable the Debug Module during STOP mode + * @retval None + */ +void HAL_DBGMCU_DisableDBGStopMode(void) +{ + CLEAR_BIT(DBGMCU->CR, DBGMCU_CR_DBG_STOP); +} + +/** + * @brief Enable the Debug Module during STANDBY mode + * @retval None + */ +void HAL_DBGMCU_EnableDBGStandbyMode(void) +{ + SET_BIT(DBGMCU->CR, DBGMCU_CR_DBG_STANDBY); +} + +/** + * @brief Disable the Debug Module during STANDBY mode + * @retval None + */ +void HAL_DBGMCU_DisableDBGStandbyMode(void) +{ + CLEAR_BIT(DBGMCU->CR, DBGMCU_CR_DBG_STANDBY); +} + +/** + * @brief Enables the I/O Compensation Cell. + * @note The I/O compensation cell can be used only when the device supply + * voltage ranges from 2.4 to 3.6 V. + * @retval None + */ +void HAL_EnableCompensationCell(void) +{ + *(__IO uint32_t *)CMPCR_CMP_PD_BB = (uint32_t)ENABLE; +} + +/** + * @brief Power-down the I/O Compensation Cell. + * @note The I/O compensation cell can be used only when the device supply + * voltage ranges from 2.4 to 3.6 V. + * @retval None + */ +void HAL_DisableCompensationCell(void) +{ + *(__IO uint32_t *)CMPCR_CMP_PD_BB = (uint32_t)DISABLE; +} + +/** + * @brief Returns first word of the unique device identifier (UID based on 96 bits) + * @retval Device identifier + */ +uint32_t HAL_GetUIDw0(void) +{ + return (READ_REG(*((uint32_t *)UID_BASE))); +} + +/** + * @brief Returns second word of the unique device identifier (UID based on 96 bits) + * @retval Device identifier + */ +uint32_t HAL_GetUIDw1(void) +{ + return (READ_REG(*((uint32_t *)(UID_BASE + 4U)))); +} + +/** + * @brief Returns third word of the unique device identifier (UID based on 96 bits) + * @retval Device identifier + */ +uint32_t HAL_GetUIDw2(void) +{ + return (READ_REG(*((uint32_t *)(UID_BASE + 8U)))); +} + +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx)|| defined(STM32F439xx) ||\ + defined(STM32F469xx) || defined(STM32F479xx) +/** + * @brief Enables the Internal FLASH Bank Swapping. + * + * @note This function can be used only for STM32F42xxx/43xxx/469xx/479xx devices. + * + * @note Flash Bank2 mapped at 0x08000000 (and aliased @0x00000000) + * and Flash Bank1 mapped at 0x08100000 (and aliased at 0x00100000) + * + * @retval None + */ +void HAL_EnableMemorySwappingBank(void) +{ + *(__IO uint32_t *)UFB_MODE_BB = (uint32_t)ENABLE; +} + +/** + * @brief Disables the Internal FLASH Bank Swapping. + * + * @note This function can be used only for STM32F42xxx/43xxx/469xx/479xx devices. + * + * @note The default state : Flash Bank1 mapped at 0x08000000 (and aliased @0x00000000) + * and Flash Bank2 mapped at 0x08100000 (and aliased at 0x00100000) + * + * @retval None + */ +void HAL_DisableMemorySwappingBank(void) +{ + *(__IO uint32_t *)UFB_MODE_BB = (uint32_t)DISABLE; +} +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F469xx || STM32F479xx */ +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + + diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_cortex.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_cortex.c new file mode 100644 index 0000000..c3d2ba8 --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_cortex.c @@ -0,0 +1,538 @@ +/** + ****************************************************************************** + * @file stm32f4xx_hal_cortex.c + * @author MCD Application Team + * @brief CORTEX HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the CORTEX: + * + Initialization and de-initialization functions + * + Peripheral Control functions + * + @verbatim + ============================================================================== + ##### How to use this driver ##### + ============================================================================== + + [..] + *** How to configure Interrupts using CORTEX HAL driver *** + =========================================================== + [..] + This section provides functions allowing to configure the NVIC interrupts (IRQ). + The Cortex-M4 exceptions are managed by CMSIS functions. + + (#) Configure the NVIC Priority Grouping using HAL_NVIC_SetPriorityGrouping() + function according to the following table. + (#) Configure the priority of the selected IRQ Channels using HAL_NVIC_SetPriority(). + (#) Enable the selected IRQ Channels using HAL_NVIC_EnableIRQ(). + (#) please refer to programming manual for details in how to configure priority. + + -@- When the NVIC_PRIORITYGROUP_0 is selected, IRQ preemption is no more possible. + The pending IRQ priority will be managed only by the sub priority. + + -@- IRQ priority order (sorted by highest to lowest priority): + (+@) Lowest preemption priority + (+@) Lowest sub priority + (+@) Lowest hardware priority (IRQ number) + + [..] + *** How to configure Systick using CORTEX HAL driver *** + ======================================================== + [..] + Setup SysTick Timer for time base. + + (+) The HAL_SYSTICK_Config() function calls the SysTick_Config() function which + is a CMSIS function that: + (++) Configures the SysTick Reload register with value passed as function parameter. + (++) Configures the SysTick IRQ priority to the lowest value 0x0F. + (++) Resets the SysTick Counter register. + (++) Configures the SysTick Counter clock source to be Core Clock Source (HCLK). + (++) Enables the SysTick Interrupt. + (++) Starts the SysTick Counter. + + (+) You can change the SysTick Clock source to be HCLK_Div8 by calling the macro + __HAL_CORTEX_SYSTICKCLK_CONFIG(SYSTICK_CLKSOURCE_HCLK_DIV8) just after the + HAL_SYSTICK_Config() function call. The __HAL_CORTEX_SYSTICKCLK_CONFIG() macro is defined + inside the stm32f4xx_hal_cortex.h file. + + (+) You can change the SysTick IRQ priority by calling the + HAL_NVIC_SetPriority(SysTick_IRQn,...) function just after the HAL_SYSTICK_Config() function + call. The HAL_NVIC_SetPriority() call the NVIC_SetPriority() function which is a CMSIS function. + + (+) To adjust the SysTick time base, use the following formula: + + Reload Value = SysTick Counter Clock (Hz) x Desired Time base (s) + (++) Reload Value is the parameter to be passed for HAL_SYSTICK_Config() function + (++) Reload Value should not exceed 0xFFFFFF + + @endverbatim + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx_hal.h" + +/** @addtogroup STM32F4xx_HAL_Driver + * @{ + */ + +/** @defgroup CORTEX CORTEX + * @brief CORTEX HAL module driver + * @{ + */ + +#ifdef HAL_CORTEX_MODULE_ENABLED + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/* Private macros ------------------------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ +/* Exported functions --------------------------------------------------------*/ + +/** @defgroup CORTEX_Exported_Functions CORTEX Exported Functions + * @{ + */ + + +/** @defgroup CORTEX_Exported_Functions_Group1 Initialization and de-initialization functions + * @brief Initialization and Configuration functions + * +@verbatim + ============================================================================== + ##### Initialization and de-initialization functions ##### + ============================================================================== + [..] + This section provides the CORTEX HAL driver functions allowing to configure Interrupts + Systick functionalities + +@endverbatim + * @{ + */ + + +/** + * @brief Sets the priority grouping field (preemption priority and subpriority) + * using the required unlock sequence. + * @param PriorityGroup The priority grouping bits length. + * This parameter can be one of the following values: + * @arg NVIC_PRIORITYGROUP_0: 0 bits for preemption priority + * 4 bits for subpriority + * @arg NVIC_PRIORITYGROUP_1: 1 bits for preemption priority + * 3 bits for subpriority + * @arg NVIC_PRIORITYGROUP_2: 2 bits for preemption priority + * 2 bits for subpriority + * @arg NVIC_PRIORITYGROUP_3: 3 bits for preemption priority + * 1 bits for subpriority + * @arg NVIC_PRIORITYGROUP_4: 4 bits for preemption priority + * 0 bits for subpriority + * @note When the NVIC_PriorityGroup_0 is selected, IRQ preemption is no more possible. + * The pending IRQ priority will be managed only by the subpriority. + * @retval None + */ +void HAL_NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + /* Check the parameters */ + assert_param(IS_NVIC_PRIORITY_GROUP(PriorityGroup)); + + /* Set the PRIGROUP[10:8] bits according to the PriorityGroup parameter value */ + NVIC_SetPriorityGrouping(PriorityGroup); +} + +/** + * @brief Sets the priority of an interrupt. + * @param IRQn External interrupt number. + * This parameter can be an enumerator of IRQn_Type enumeration + * (For the complete STM32 Devices IRQ Channels list, please refer to the appropriate CMSIS device file (stm32f4xxxx.h)) + * @param PreemptPriority The preemption priority for the IRQn channel. + * This parameter can be a value between 0 and 15 + * A lower priority value indicates a higher priority + * @param SubPriority the subpriority level for the IRQ channel. + * This parameter can be a value between 0 and 15 + * A lower priority value indicates a higher priority. + * @retval None + */ +void HAL_NVIC_SetPriority(IRQn_Type IRQn, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t prioritygroup = 0x00U; + + /* Check the parameters */ + assert_param(IS_NVIC_SUB_PRIORITY(SubPriority)); + assert_param(IS_NVIC_PREEMPTION_PRIORITY(PreemptPriority)); + + prioritygroup = NVIC_GetPriorityGrouping(); + + NVIC_SetPriority(IRQn, NVIC_EncodePriority(prioritygroup, PreemptPriority, SubPriority)); +} + +/** + * @brief Enables a device specific interrupt in the NVIC interrupt controller. + * @note To configure interrupts priority correctly, the NVIC_PriorityGroupConfig() + * function should be called before. + * @param IRQn External interrupt number. + * This parameter can be an enumerator of IRQn_Type enumeration + * (For the complete STM32 Devices IRQ Channels list, please refer to the appropriate CMSIS device file (stm32f4xxxx.h)) + * @retval None + */ +void HAL_NVIC_EnableIRQ(IRQn_Type IRQn) +{ + /* Check the parameters */ + assert_param(IS_NVIC_DEVICE_IRQ(IRQn)); + + /* Enable interrupt */ + NVIC_EnableIRQ(IRQn); +} + +/** + * @brief Disables a device specific interrupt in the NVIC interrupt controller. + * @param IRQn External interrupt number. + * This parameter can be an enumerator of IRQn_Type enumeration + * (For the complete STM32 Devices IRQ Channels list, please refer to the appropriate CMSIS device file (stm32f4xxxx.h)) + * @retval None + */ +void HAL_NVIC_DisableIRQ(IRQn_Type IRQn) +{ + /* Check the parameters */ + assert_param(IS_NVIC_DEVICE_IRQ(IRQn)); + + /* Disable interrupt */ + NVIC_DisableIRQ(IRQn); +} + +/** + * @brief Initiates a system reset request to reset the MCU. + * @retval None + */ +void HAL_NVIC_SystemReset(void) +{ + /* System Reset */ + NVIC_SystemReset(); +} + +/** + * @brief Initializes the System Timer and its interrupt, and starts the System Tick Timer. + * Counter is in free running mode to generate periodic interrupts. + * @param TicksNumb Specifies the ticks Number of ticks between two interrupts. + * @retval status: - 0 Function succeeded. + * - 1 Function failed. + */ +uint32_t HAL_SYSTICK_Config(uint32_t TicksNumb) +{ + return SysTick_Config(TicksNumb); +} +/** + * @} + */ + +/** @defgroup CORTEX_Exported_Functions_Group2 Peripheral Control functions + * @brief Cortex control functions + * +@verbatim + ============================================================================== + ##### Peripheral Control functions ##### + ============================================================================== + [..] + This subsection provides a set of functions allowing to control the CORTEX + (NVIC, SYSTICK, MPU) functionalities. + + +@endverbatim + * @{ + */ + +#if (__MPU_PRESENT == 1U) +/** + * @brief Disables the MPU + * @retval None + */ +void HAL_MPU_Disable(void) +{ + /* Make sure outstanding transfers are done */ + __DMB(); + + /* Disable fault exceptions */ + SCB->SHCSR &= ~SCB_SHCSR_MEMFAULTENA_Msk; + + /* Disable the MPU and clear the control register*/ + MPU->CTRL = 0U; +} + +/** + * @brief Enable the MPU. + * @param MPU_Control Specifies the control mode of the MPU during hard fault, + * NMI, FAULTMASK and privileged access to the default memory + * This parameter can be one of the following values: + * @arg MPU_HFNMI_PRIVDEF_NONE + * @arg MPU_HARDFAULT_NMI + * @arg MPU_PRIVILEGED_DEFAULT + * @arg MPU_HFNMI_PRIVDEF + * @retval None + */ +void HAL_MPU_Enable(uint32_t MPU_Control) +{ + /* Enable the MPU */ + MPU->CTRL = MPU_Control | MPU_CTRL_ENABLE_Msk; + + /* Enable fault exceptions */ + SCB->SHCSR |= SCB_SHCSR_MEMFAULTENA_Msk; + + /* Ensure MPU setting take effects */ + __DSB(); + __ISB(); +} + +/** + * @brief Enables the MPU Region. + * @retval None + */ +void HAL_MPU_EnableRegion(uint32_t RegionNumber) +{ + /* Check the parameters */ + assert_param(IS_MPU_REGION_NUMBER(RegionNumber)); + + /* Set the Region number */ + MPU->RNR = RegionNumber; + + /* Enable the Region */ + SET_BIT(MPU->RASR, MPU_RASR_ENABLE_Msk); +} + +/** + * @brief Disables the MPU Region. + * @retval None + */ +void HAL_MPU_DisableRegion(uint32_t RegionNumber) +{ + /* Check the parameters */ + assert_param(IS_MPU_REGION_NUMBER(RegionNumber)); + + /* Set the Region number */ + MPU->RNR = RegionNumber; + + /* Disable the Region */ + CLEAR_BIT(MPU->RASR, MPU_RASR_ENABLE_Msk); +} + +/** + * @brief Initializes and configures the Region and the memory to be protected. + * @param MPU_Init Pointer to a MPU_Region_InitTypeDef structure that contains + * the initialization and configuration information. + * @retval None + */ +void HAL_MPU_ConfigRegion(MPU_Region_InitTypeDef *MPU_Init) +{ + /* Check the parameters */ + assert_param(IS_MPU_REGION_NUMBER(MPU_Init->Number)); + assert_param(IS_MPU_REGION_ENABLE(MPU_Init->Enable)); + assert_param(IS_MPU_INSTRUCTION_ACCESS(MPU_Init->DisableExec)); + assert_param(IS_MPU_REGION_PERMISSION_ATTRIBUTE(MPU_Init->AccessPermission)); + assert_param(IS_MPU_TEX_LEVEL(MPU_Init->TypeExtField)); + assert_param(IS_MPU_ACCESS_SHAREABLE(MPU_Init->IsShareable)); + assert_param(IS_MPU_ACCESS_CACHEABLE(MPU_Init->IsCacheable)); + assert_param(IS_MPU_ACCESS_BUFFERABLE(MPU_Init->IsBufferable)); + assert_param(IS_MPU_SUB_REGION_DISABLE(MPU_Init->SubRegionDisable)); + assert_param(IS_MPU_REGION_SIZE(MPU_Init->Size)); + + /* Set the Region number */ + MPU->RNR = MPU_Init->Number; + + /* Disable the Region */ + CLEAR_BIT(MPU->RASR, MPU_RASR_ENABLE_Msk); + + /* Apply configuration */ + MPU->RBAR = MPU_Init->BaseAddress; + MPU->RASR = ((uint32_t)MPU_Init->DisableExec << MPU_RASR_XN_Pos) | + ((uint32_t)MPU_Init->AccessPermission << MPU_RASR_AP_Pos) | + ((uint32_t)MPU_Init->TypeExtField << MPU_RASR_TEX_Pos) | + ((uint32_t)MPU_Init->IsShareable << MPU_RASR_S_Pos) | + ((uint32_t)MPU_Init->IsCacheable << MPU_RASR_C_Pos) | + ((uint32_t)MPU_Init->IsBufferable << MPU_RASR_B_Pos) | + ((uint32_t)MPU_Init->SubRegionDisable << MPU_RASR_SRD_Pos) | + ((uint32_t)MPU_Init->Size << MPU_RASR_SIZE_Pos) | + ((uint32_t)MPU_Init->Enable << MPU_RASR_ENABLE_Pos); +} +#endif /* __MPU_PRESENT */ + +/** + * @brief Clear pending events. + * @retval None + */ +void HAL_CORTEX_ClearEvent(void) +{ + __SEV(); + __WFE(); +} + +/** + * @brief Gets the priority grouping field from the NVIC Interrupt Controller. + * @retval Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field) + */ +uint32_t HAL_NVIC_GetPriorityGrouping(void) +{ + /* Get the PRIGROUP[10:8] field value */ + return NVIC_GetPriorityGrouping(); +} + +/** + * @brief Gets the priority of an interrupt. + * @param IRQn External interrupt number. + * This parameter can be an enumerator of IRQn_Type enumeration + * (For the complete STM32 Devices IRQ Channels list, please refer to the appropriate CMSIS device file (stm32f4xxxx.h)) + * @param PriorityGroup the priority grouping bits length. + * This parameter can be one of the following values: + * @arg NVIC_PRIORITYGROUP_0: 0 bits for preemption priority + * 4 bits for subpriority + * @arg NVIC_PRIORITYGROUP_1: 1 bits for preemption priority + * 3 bits for subpriority + * @arg NVIC_PRIORITYGROUP_2: 2 bits for preemption priority + * 2 bits for subpriority + * @arg NVIC_PRIORITYGROUP_3: 3 bits for preemption priority + * 1 bits for subpriority + * @arg NVIC_PRIORITYGROUP_4: 4 bits for preemption priority + * 0 bits for subpriority + * @param pPreemptPriority Pointer on the Preemptive priority value (starting from 0). + * @param pSubPriority Pointer on the Subpriority value (starting from 0). + * @retval None + */ +void HAL_NVIC_GetPriority(IRQn_Type IRQn, uint32_t PriorityGroup, uint32_t *pPreemptPriority, uint32_t *pSubPriority) +{ + /* Check the parameters */ + assert_param(IS_NVIC_PRIORITY_GROUP(PriorityGroup)); + /* Get priority for Cortex-M system or device specific interrupts */ + NVIC_DecodePriority(NVIC_GetPriority(IRQn), PriorityGroup, pPreemptPriority, pSubPriority); +} + +/** + * @brief Sets Pending bit of an external interrupt. + * @param IRQn External interrupt number + * This parameter can be an enumerator of IRQn_Type enumeration + * (For the complete STM32 Devices IRQ Channels list, please refer to the appropriate CMSIS device file (stm32f4xxxx.h)) + * @retval None + */ +void HAL_NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + /* Check the parameters */ + assert_param(IS_NVIC_DEVICE_IRQ(IRQn)); + + /* Set interrupt pending */ + NVIC_SetPendingIRQ(IRQn); +} + +/** + * @brief Gets Pending Interrupt (reads the pending register in the NVIC + * and returns the pending bit for the specified interrupt). + * @param IRQn External interrupt number. + * This parameter can be an enumerator of IRQn_Type enumeration + * (For the complete STM32 Devices IRQ Channels list, please refer to the appropriate CMSIS device file (stm32f4xxxx.h)) + * @retval status: - 0 Interrupt status is not pending. + * - 1 Interrupt status is pending. + */ +uint32_t HAL_NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + /* Check the parameters */ + assert_param(IS_NVIC_DEVICE_IRQ(IRQn)); + + /* Return 1 if pending else 0 */ + return NVIC_GetPendingIRQ(IRQn); +} + +/** + * @brief Clears the pending bit of an external interrupt. + * @param IRQn External interrupt number. + * This parameter can be an enumerator of IRQn_Type enumeration + * (For the complete STM32 Devices IRQ Channels list, please refer to the appropriate CMSIS device file (stm32f4xxxx.h)) + * @retval None + */ +void HAL_NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + /* Check the parameters */ + assert_param(IS_NVIC_DEVICE_IRQ(IRQn)); + + /* Clear pending interrupt */ + NVIC_ClearPendingIRQ(IRQn); +} + +/** + * @brief Gets active interrupt ( reads the active register in NVIC and returns the active bit). + * @param IRQn External interrupt number + * This parameter can be an enumerator of IRQn_Type enumeration + * (For the complete STM32 Devices IRQ Channels list, please refer to the appropriate CMSIS device file (stm32f4xxxx.h)) + * @retval status: - 0 Interrupt status is not pending. + * - 1 Interrupt status is pending. + */ +uint32_t HAL_NVIC_GetActive(IRQn_Type IRQn) +{ + /* Check the parameters */ + assert_param(IS_NVIC_DEVICE_IRQ(IRQn)); + + /* Return 1 if active else 0 */ + return NVIC_GetActive(IRQn); +} + +/** + * @brief Configures the SysTick clock source. + * @param CLKSource specifies the SysTick clock source. + * This parameter can be one of the following values: + * @arg SYSTICK_CLKSOURCE_HCLK_DIV8: AHB clock divided by 8 selected as SysTick clock source. + * @arg SYSTICK_CLKSOURCE_HCLK: AHB clock selected as SysTick clock source. + * @retval None + */ +void HAL_SYSTICK_CLKSourceConfig(uint32_t CLKSource) +{ + /* Check the parameters */ + assert_param(IS_SYSTICK_CLK_SOURCE(CLKSource)); + if (CLKSource == SYSTICK_CLKSOURCE_HCLK) + { + SysTick->CTRL |= SYSTICK_CLKSOURCE_HCLK; + } + else + { + SysTick->CTRL &= ~SYSTICK_CLKSOURCE_HCLK; + } +} + +/** + * @brief This function handles SYSTICK interrupt request. + * @retval None + */ +void HAL_SYSTICK_IRQHandler(void) +{ + HAL_SYSTICK_Callback(); +} + +/** + * @brief SYSTICK callback. + * @retval None + */ +__weak void HAL_SYSTICK_Callback(void) +{ + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_SYSTICK_Callback could be implemented in the user file + */ +} + +/** + * @} + */ + +/** + * @} + */ + +#endif /* HAL_CORTEX_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ + diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_dma.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_dma.c new file mode 100644 index 0000000..3dbb477 --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_dma.c @@ -0,0 +1,1305 @@ +/** + ****************************************************************************** + * @file stm32f4xx_hal_dma.c + * @author MCD Application Team + * @brief DMA HAL module driver. + * + * This file provides firmware functions to manage the following + * functionalities of the Direct Memory Access (DMA) peripheral: + * + Initialization and de-initialization functions + * + IO operation functions + * + Peripheral State and errors functions + @verbatim + ============================================================================== + ##### How to use this driver ##### + ============================================================================== + [..] + (#) Enable and configure the peripheral to be connected to the DMA Stream + (except for internal SRAM/FLASH memories: no initialization is + necessary) please refer to Reference manual for connection between peripherals + and DMA requests. + + (#) For a given Stream, program the required configuration through the following parameters: + Transfer Direction, Source and Destination data formats, + Circular, Normal or peripheral flow control mode, Stream Priority level, + Source and Destination Increment mode, FIFO mode and its Threshold (if needed), + Burst mode for Source and/or Destination (if needed) using HAL_DMA_Init() function. + + -@- Prior to HAL_DMA_Init() the clock must be enabled for DMA through the following macros: + __HAL_RCC_DMA1_CLK_ENABLE() or __HAL_RCC_DMA2_CLK_ENABLE(). + + *** Polling mode IO operation *** + ================================= + [..] + (+) Use HAL_DMA_Start() to start DMA transfer after the configuration of Source + address and destination address and the Length of data to be transferred. + (+) Use HAL_DMA_PollForTransfer() to poll for the end of current transfer, in this + case a fixed Timeout can be configured by User depending from his application. + (+) Use HAL_DMA_Abort() function to abort the current transfer. + + *** Interrupt mode IO operation *** + =================================== + [..] + (+) Configure the DMA interrupt priority using HAL_NVIC_SetPriority() + (+) Enable the DMA IRQ handler using HAL_NVIC_EnableIRQ() + (+) Use HAL_DMA_Start_IT() to start DMA transfer after the configuration of + Source address and destination address and the Length of data to be transferred. In this + case the DMA interrupt is configured + (+) Use HAL_DMA_IRQHandler() called under DMA_IRQHandler() Interrupt subroutine + (+) At the end of data transfer HAL_DMA_IRQHandler() function is executed and user can + add his own function by customization of function pointer XferCpltCallback and + XferErrorCallback (i.e a member of DMA handle structure). + [..] + (#) Use HAL_DMA_GetState() function to return the DMA state and HAL_DMA_GetError() in case of error + detection. + + (#) Use HAL_DMA_Abort_IT() function to abort the current transfer + + -@- In Memory-to-Memory transfer mode, Circular mode is not allowed. + + -@- The FIFO is used mainly to reduce bus usage and to allow data packing/unpacking: it is + possible to set different Data Sizes for the Peripheral and the Memory (ie. you can set + Half-Word data size for the peripheral to access its data register and set Word data size + for the Memory to gain in access time. Each two half words will be packed and written in + a single access to a Word in the Memory). + + -@- When FIFO is disabled, it is not allowed to configure different Data Sizes for Source + and Destination. In this case the Peripheral Data Size will be applied to both Source + and Destination. + + *** DMA HAL driver macros list *** + ============================================= + [..] + Below the list of most used macros in DMA HAL driver. + + (+) __HAL_DMA_ENABLE: Enable the specified DMA Stream. + (+) __HAL_DMA_DISABLE: Disable the specified DMA Stream. + (+) __HAL_DMA_GET_IT_SOURCE: Check whether the specified DMA Stream interrupt has occurred or not. + + [..] + (@) You can refer to the DMA HAL driver header file for more useful macros + + @endverbatim + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx_hal.h" + +/** @addtogroup STM32F4xx_HAL_Driver + * @{ + */ + +/** @defgroup DMA DMA + * @brief DMA HAL module driver + * @{ + */ + +#ifdef HAL_DMA_MODULE_ENABLED + +/* Private types -------------------------------------------------------------*/ +typedef struct +{ + __IO uint32_t ISR; /*!< DMA interrupt status register */ + __IO uint32_t Reserved0; + __IO uint32_t IFCR; /*!< DMA interrupt flag clear register */ +} DMA_Base_Registers; + +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/** @addtogroup DMA_Private_Constants + * @{ + */ + #define HAL_TIMEOUT_DMA_ABORT 5U /* 5 ms */ +/** + * @} + */ +/* Private macros ------------------------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ +/** @addtogroup DMA_Private_Functions + * @{ + */ +static void DMA_SetConfig(DMA_HandleTypeDef *hdma, uint32_t SrcAddress, uint32_t DstAddress, uint32_t DataLength); +static uint32_t DMA_CalcBaseAndBitshift(DMA_HandleTypeDef *hdma); +static HAL_StatusTypeDef DMA_CheckFifoParam(DMA_HandleTypeDef *hdma); + +/** + * @} + */ + +/* Exported functions ---------------------------------------------------------*/ +/** @addtogroup DMA_Exported_Functions + * @{ + */ + +/** @addtogroup DMA_Exported_Functions_Group1 + * +@verbatim + =============================================================================== + ##### Initialization and de-initialization functions ##### + =============================================================================== + [..] + This section provides functions allowing to initialize the DMA Stream source + and destination addresses, incrementation and data sizes, transfer direction, + circular/normal mode selection, memory-to-memory mode selection and Stream priority value. + [..] + The HAL_DMA_Init() function follows the DMA configuration procedures as described in + reference manual. + +@endverbatim + * @{ + */ + +/** + * @brief Initialize the DMA according to the specified + * parameters in the DMA_InitTypeDef and create the associated handle. + * @param hdma Pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA_Init(DMA_HandleTypeDef *hdma) +{ + uint32_t tmp = 0U; + uint32_t tickstart = HAL_GetTick(); + DMA_Base_Registers *regs; + + /* Check the DMA peripheral state */ + if(hdma == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_DMA_STREAM_ALL_INSTANCE(hdma->Instance)); + assert_param(IS_DMA_CHANNEL(hdma->Init.Channel)); + assert_param(IS_DMA_DIRECTION(hdma->Init.Direction)); + assert_param(IS_DMA_PERIPHERAL_INC_STATE(hdma->Init.PeriphInc)); + assert_param(IS_DMA_MEMORY_INC_STATE(hdma->Init.MemInc)); + assert_param(IS_DMA_PERIPHERAL_DATA_SIZE(hdma->Init.PeriphDataAlignment)); + assert_param(IS_DMA_MEMORY_DATA_SIZE(hdma->Init.MemDataAlignment)); + assert_param(IS_DMA_MODE(hdma->Init.Mode)); + assert_param(IS_DMA_PRIORITY(hdma->Init.Priority)); + assert_param(IS_DMA_FIFO_MODE_STATE(hdma->Init.FIFOMode)); + /* Check the memory burst, peripheral burst and FIFO threshold parameters only + when FIFO mode is enabled */ + if(hdma->Init.FIFOMode != DMA_FIFOMODE_DISABLE) + { + assert_param(IS_DMA_FIFO_THRESHOLD(hdma->Init.FIFOThreshold)); + assert_param(IS_DMA_MEMORY_BURST(hdma->Init.MemBurst)); + assert_param(IS_DMA_PERIPHERAL_BURST(hdma->Init.PeriphBurst)); + } + + /* Change DMA peripheral state */ + hdma->State = HAL_DMA_STATE_BUSY; + + /* Allocate lock resource */ + __HAL_UNLOCK(hdma); + + /* Disable the peripheral */ + __HAL_DMA_DISABLE(hdma); + + /* Check if the DMA Stream is effectively disabled */ + while((hdma->Instance->CR & DMA_SxCR_EN) != RESET) + { + /* Check for the Timeout */ + if((HAL_GetTick() - tickstart ) > HAL_TIMEOUT_DMA_ABORT) + { + /* Update error code */ + hdma->ErrorCode = HAL_DMA_ERROR_TIMEOUT; + + /* Change the DMA state */ + hdma->State = HAL_DMA_STATE_TIMEOUT; + + return HAL_TIMEOUT; + } + } + + /* Get the CR register value */ + tmp = hdma->Instance->CR; + + /* Clear CHSEL, MBURST, PBURST, PL, MSIZE, PSIZE, MINC, PINC, CIRC, DIR, CT and DBM bits */ + tmp &= ((uint32_t)~(DMA_SxCR_CHSEL | DMA_SxCR_MBURST | DMA_SxCR_PBURST | \ + DMA_SxCR_PL | DMA_SxCR_MSIZE | DMA_SxCR_PSIZE | \ + DMA_SxCR_MINC | DMA_SxCR_PINC | DMA_SxCR_CIRC | \ + DMA_SxCR_DIR | DMA_SxCR_CT | DMA_SxCR_DBM)); + + /* Prepare the DMA Stream configuration */ + tmp |= hdma->Init.Channel | hdma->Init.Direction | + hdma->Init.PeriphInc | hdma->Init.MemInc | + hdma->Init.PeriphDataAlignment | hdma->Init.MemDataAlignment | + hdma->Init.Mode | hdma->Init.Priority; + + /* the Memory burst and peripheral burst are not used when the FIFO is disabled */ + if(hdma->Init.FIFOMode == DMA_FIFOMODE_ENABLE) + { + /* Get memory burst and peripheral burst */ + tmp |= hdma->Init.MemBurst | hdma->Init.PeriphBurst; + } + + /* Write to DMA Stream CR register */ + hdma->Instance->CR = tmp; + + /* Get the FCR register value */ + tmp = hdma->Instance->FCR; + + /* Clear Direct mode and FIFO threshold bits */ + tmp &= (uint32_t)~(DMA_SxFCR_DMDIS | DMA_SxFCR_FTH); + + /* Prepare the DMA Stream FIFO configuration */ + tmp |= hdma->Init.FIFOMode; + + /* The FIFO threshold is not used when the FIFO mode is disabled */ + if(hdma->Init.FIFOMode == DMA_FIFOMODE_ENABLE) + { + /* Get the FIFO threshold */ + tmp |= hdma->Init.FIFOThreshold; + + /* Check compatibility between FIFO threshold level and size of the memory burst */ + /* for INCR4, INCR8, INCR16 bursts */ + if (hdma->Init.MemBurst != DMA_MBURST_SINGLE) + { + if (DMA_CheckFifoParam(hdma) != HAL_OK) + { + /* Update error code */ + hdma->ErrorCode = HAL_DMA_ERROR_PARAM; + + /* Change the DMA state */ + hdma->State = HAL_DMA_STATE_READY; + + return HAL_ERROR; + } + } + } + + /* Write to DMA Stream FCR */ + hdma->Instance->FCR = tmp; + + /* Initialize StreamBaseAddress and StreamIndex parameters to be used to calculate + DMA steam Base Address needed by HAL_DMA_IRQHandler() and HAL_DMA_PollForTransfer() */ + regs = (DMA_Base_Registers *)DMA_CalcBaseAndBitshift(hdma); + + /* Clear all interrupt flags */ + regs->IFCR = 0x3FU << hdma->StreamIndex; + + /* Initialize the error code */ + hdma->ErrorCode = HAL_DMA_ERROR_NONE; + + /* Initialize the DMA state */ + hdma->State = HAL_DMA_STATE_READY; + + return HAL_OK; +} + +/** + * @brief DeInitializes the DMA peripheral + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA_DeInit(DMA_HandleTypeDef *hdma) +{ + DMA_Base_Registers *regs; + + /* Check the DMA peripheral state */ + if(hdma == NULL) + { + return HAL_ERROR; + } + + /* Check the DMA peripheral state */ + if(hdma->State == HAL_DMA_STATE_BUSY) + { + /* Return error status */ + return HAL_BUSY; + } + + /* Check the parameters */ + assert_param(IS_DMA_STREAM_ALL_INSTANCE(hdma->Instance)); + + /* Disable the selected DMA Streamx */ + __HAL_DMA_DISABLE(hdma); + + /* Reset DMA Streamx control register */ + hdma->Instance->CR = 0U; + + /* Reset DMA Streamx number of data to transfer register */ + hdma->Instance->NDTR = 0U; + + /* Reset DMA Streamx peripheral address register */ + hdma->Instance->PAR = 0U; + + /* Reset DMA Streamx memory 0 address register */ + hdma->Instance->M0AR = 0U; + + /* Reset DMA Streamx memory 1 address register */ + hdma->Instance->M1AR = 0U; + + /* Reset DMA Streamx FIFO control register */ + hdma->Instance->FCR = 0x00000021U; + + /* Get DMA steam Base Address */ + regs = (DMA_Base_Registers *)DMA_CalcBaseAndBitshift(hdma); + + /* Clean all callbacks */ + hdma->XferCpltCallback = NULL; + hdma->XferHalfCpltCallback = NULL; + hdma->XferM1CpltCallback = NULL; + hdma->XferM1HalfCpltCallback = NULL; + hdma->XferErrorCallback = NULL; + hdma->XferAbortCallback = NULL; + + /* Clear all interrupt flags at correct offset within the register */ + regs->IFCR = 0x3FU << hdma->StreamIndex; + + /* Reset the error code */ + hdma->ErrorCode = HAL_DMA_ERROR_NONE; + + /* Reset the DMA state */ + hdma->State = HAL_DMA_STATE_RESET; + + /* Release Lock */ + __HAL_UNLOCK(hdma); + + return HAL_OK; +} + +/** + * @} + */ + +/** @addtogroup DMA_Exported_Functions_Group2 + * +@verbatim + =============================================================================== + ##### IO operation functions ##### + =============================================================================== + [..] This section provides functions allowing to: + (+) Configure the source, destination address and data length and Start DMA transfer + (+) Configure the source, destination address and data length and + Start DMA transfer with interrupt + (+) Abort DMA transfer + (+) Poll for transfer complete + (+) Handle DMA interrupt request + +@endverbatim + * @{ + */ + +/** + * @brief Starts the DMA Transfer. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @param SrcAddress The source memory Buffer address + * @param DstAddress The destination memory Buffer address + * @param DataLength The length of data to be transferred from source to destination + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA_Start(DMA_HandleTypeDef *hdma, uint32_t SrcAddress, uint32_t DstAddress, uint32_t DataLength) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_DMA_BUFFER_SIZE(DataLength)); + + /* Process locked */ + __HAL_LOCK(hdma); + + if(HAL_DMA_STATE_READY == hdma->State) + { + /* Change DMA peripheral state */ + hdma->State = HAL_DMA_STATE_BUSY; + + /* Initialize the error code */ + hdma->ErrorCode = HAL_DMA_ERROR_NONE; + + /* Configure the source, destination address and the data length */ + DMA_SetConfig(hdma, SrcAddress, DstAddress, DataLength); + + /* Enable the Peripheral */ + __HAL_DMA_ENABLE(hdma); + } + else + { + /* Process unlocked */ + __HAL_UNLOCK(hdma); + + /* Return error status */ + status = HAL_BUSY; + } + return status; +} + +/** + * @brief Start the DMA Transfer with interrupt enabled. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @param SrcAddress The source memory Buffer address + * @param DstAddress The destination memory Buffer address + * @param DataLength The length of data to be transferred from source to destination + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA_Start_IT(DMA_HandleTypeDef *hdma, uint32_t SrcAddress, uint32_t DstAddress, uint32_t DataLength) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* calculate DMA base and stream number */ + DMA_Base_Registers *regs = (DMA_Base_Registers *)hdma->StreamBaseAddress; + + /* Check the parameters */ + assert_param(IS_DMA_BUFFER_SIZE(DataLength)); + + /* Process locked */ + __HAL_LOCK(hdma); + + if(HAL_DMA_STATE_READY == hdma->State) + { + /* Change DMA peripheral state */ + hdma->State = HAL_DMA_STATE_BUSY; + + /* Initialize the error code */ + hdma->ErrorCode = HAL_DMA_ERROR_NONE; + + /* Configure the source, destination address and the data length */ + DMA_SetConfig(hdma, SrcAddress, DstAddress, DataLength); + + /* Clear all interrupt flags at correct offset within the register */ + regs->IFCR = 0x3FU << hdma->StreamIndex; + + /* Enable Common interrupts*/ + hdma->Instance->CR |= DMA_IT_TC | DMA_IT_TE | DMA_IT_DME; + + if(hdma->XferHalfCpltCallback != NULL) + { + hdma->Instance->CR |= DMA_IT_HT; + } + + /* Enable the Peripheral */ + __HAL_DMA_ENABLE(hdma); + } + else + { + /* Process unlocked */ + __HAL_UNLOCK(hdma); + + /* Return error status */ + status = HAL_BUSY; + } + + return status; +} + +/** + * @brief Aborts the DMA Transfer. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * + * @note After disabling a DMA Stream, a check for wait until the DMA Stream is + * effectively disabled is added. If a Stream is disabled + * while a data transfer is ongoing, the current data will be transferred + * and the Stream will be effectively disabled only after the transfer of + * this single data is finished. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA_Abort(DMA_HandleTypeDef *hdma) +{ + /* calculate DMA base and stream number */ + DMA_Base_Registers *regs = (DMA_Base_Registers *)hdma->StreamBaseAddress; + + uint32_t tickstart = HAL_GetTick(); + + if(hdma->State != HAL_DMA_STATE_BUSY) + { + hdma->ErrorCode = HAL_DMA_ERROR_NO_XFER; + + /* Process Unlocked */ + __HAL_UNLOCK(hdma); + + return HAL_ERROR; + } + else + { + /* Disable all the transfer interrupts */ + hdma->Instance->CR &= ~(DMA_IT_TC | DMA_IT_TE | DMA_IT_DME); + hdma->Instance->FCR &= ~(DMA_IT_FE); + + if((hdma->XferHalfCpltCallback != NULL) || (hdma->XferM1HalfCpltCallback != NULL)) + { + hdma->Instance->CR &= ~(DMA_IT_HT); + } + + /* Disable the stream */ + __HAL_DMA_DISABLE(hdma); + + /* Check if the DMA Stream is effectively disabled */ + while((hdma->Instance->CR & DMA_SxCR_EN) != RESET) + { + /* Check for the Timeout */ + if((HAL_GetTick() - tickstart ) > HAL_TIMEOUT_DMA_ABORT) + { + /* Update error code */ + hdma->ErrorCode = HAL_DMA_ERROR_TIMEOUT; + + /* Change the DMA state */ + hdma->State = HAL_DMA_STATE_TIMEOUT; + + /* Process Unlocked */ + __HAL_UNLOCK(hdma); + + return HAL_TIMEOUT; + } + } + + /* Clear all interrupt flags at correct offset within the register */ + regs->IFCR = 0x3FU << hdma->StreamIndex; + + /* Change the DMA state*/ + hdma->State = HAL_DMA_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hdma); + } + return HAL_OK; +} + +/** + * @brief Aborts the DMA Transfer in Interrupt mode. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA_Abort_IT(DMA_HandleTypeDef *hdma) +{ + if(hdma->State != HAL_DMA_STATE_BUSY) + { + hdma->ErrorCode = HAL_DMA_ERROR_NO_XFER; + return HAL_ERROR; + } + else + { + /* Set Abort State */ + hdma->State = HAL_DMA_STATE_ABORT; + + /* Disable the stream */ + __HAL_DMA_DISABLE(hdma); + } + + return HAL_OK; +} + +/** + * @brief Polling for transfer complete. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @param CompleteLevel Specifies the DMA level complete. + * @note The polling mode is kept in this version for legacy. it is recommended to use the IT model instead. + * This model could be used for debug purpose. + * @note The HAL_DMA_PollForTransfer API cannot be used in circular and double buffering mode (automatic circular mode). + * @param Timeout Timeout duration. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA_PollForTransfer(DMA_HandleTypeDef *hdma, HAL_DMA_LevelCompleteTypeDef CompleteLevel, uint32_t Timeout) +{ + HAL_StatusTypeDef status = HAL_OK; + uint32_t mask_cpltlevel; + uint32_t tickstart = HAL_GetTick(); + uint32_t tmpisr; + + /* calculate DMA base and stream number */ + DMA_Base_Registers *regs; + + if(HAL_DMA_STATE_BUSY != hdma->State) + { + /* No transfer ongoing */ + hdma->ErrorCode = HAL_DMA_ERROR_NO_XFER; + __HAL_UNLOCK(hdma); + return HAL_ERROR; + } + + /* Polling mode not supported in circular mode and double buffering mode */ + if ((hdma->Instance->CR & DMA_SxCR_CIRC) != RESET) + { + hdma->ErrorCode = HAL_DMA_ERROR_NOT_SUPPORTED; + return HAL_ERROR; + } + + /* Get the level transfer complete flag */ + if(CompleteLevel == HAL_DMA_FULL_TRANSFER) + { + /* Transfer Complete flag */ + mask_cpltlevel = DMA_FLAG_TCIF0_4 << hdma->StreamIndex; + } + else + { + /* Half Transfer Complete flag */ + mask_cpltlevel = DMA_FLAG_HTIF0_4 << hdma->StreamIndex; + } + + regs = (DMA_Base_Registers *)hdma->StreamBaseAddress; + tmpisr = regs->ISR; + + while(((tmpisr & mask_cpltlevel) == RESET) && ((hdma->ErrorCode & HAL_DMA_ERROR_TE) == RESET)) + { + /* Check for the Timeout (Not applicable in circular mode)*/ + if(Timeout != HAL_MAX_DELAY) + { + if((Timeout == 0U)||((HAL_GetTick() - tickstart ) > Timeout)) + { + /* Update error code */ + hdma->ErrorCode = HAL_DMA_ERROR_TIMEOUT; + + /* Change the DMA state */ + hdma->State = HAL_DMA_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hdma); + + return HAL_TIMEOUT; + } + } + + /* Get the ISR register value */ + tmpisr = regs->ISR; + + if((tmpisr & (DMA_FLAG_TEIF0_4 << hdma->StreamIndex)) != RESET) + { + /* Update error code */ + hdma->ErrorCode |= HAL_DMA_ERROR_TE; + + /* Clear the transfer error flag */ + regs->IFCR = DMA_FLAG_TEIF0_4 << hdma->StreamIndex; + } + + if((tmpisr & (DMA_FLAG_FEIF0_4 << hdma->StreamIndex)) != RESET) + { + /* Update error code */ + hdma->ErrorCode |= HAL_DMA_ERROR_FE; + + /* Clear the FIFO error flag */ + regs->IFCR = DMA_FLAG_FEIF0_4 << hdma->StreamIndex; + } + + if((tmpisr & (DMA_FLAG_DMEIF0_4 << hdma->StreamIndex)) != RESET) + { + /* Update error code */ + hdma->ErrorCode |= HAL_DMA_ERROR_DME; + + /* Clear the Direct Mode error flag */ + regs->IFCR = DMA_FLAG_DMEIF0_4 << hdma->StreamIndex; + } + } + + if(hdma->ErrorCode != HAL_DMA_ERROR_NONE) + { + if((hdma->ErrorCode & HAL_DMA_ERROR_TE) != RESET) + { + HAL_DMA_Abort(hdma); + + /* Clear the half transfer and transfer complete flags */ + regs->IFCR = (DMA_FLAG_HTIF0_4 | DMA_FLAG_TCIF0_4) << hdma->StreamIndex; + + /* Change the DMA state */ + hdma->State= HAL_DMA_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hdma); + + return HAL_ERROR; + } + } + + /* Get the level transfer complete flag */ + if(CompleteLevel == HAL_DMA_FULL_TRANSFER) + { + /* Clear the half transfer and transfer complete flags */ + regs->IFCR = (DMA_FLAG_HTIF0_4 | DMA_FLAG_TCIF0_4) << hdma->StreamIndex; + + hdma->State = HAL_DMA_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hdma); + } + else + { + /* Clear the half transfer and transfer complete flags */ + regs->IFCR = (DMA_FLAG_HTIF0_4) << hdma->StreamIndex; + } + + return status; +} + +/** + * @brief Handles DMA interrupt request. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @retval None + */ +void HAL_DMA_IRQHandler(DMA_HandleTypeDef *hdma) +{ + uint32_t tmpisr; + __IO uint32_t count = 0U; + uint32_t timeout = SystemCoreClock / 9600U; + + /* calculate DMA base and stream number */ + DMA_Base_Registers *regs = (DMA_Base_Registers *)hdma->StreamBaseAddress; + + tmpisr = regs->ISR; + + /* Transfer Error Interrupt management ***************************************/ + if ((tmpisr & (DMA_FLAG_TEIF0_4 << hdma->StreamIndex)) != RESET) + { + if(__HAL_DMA_GET_IT_SOURCE(hdma, DMA_IT_TE) != RESET) + { + /* Disable the transfer error interrupt */ + hdma->Instance->CR &= ~(DMA_IT_TE); + + /* Clear the transfer error flag */ + regs->IFCR = DMA_FLAG_TEIF0_4 << hdma->StreamIndex; + + /* Update error code */ + hdma->ErrorCode |= HAL_DMA_ERROR_TE; + } + } + /* FIFO Error Interrupt management ******************************************/ + if ((tmpisr & (DMA_FLAG_FEIF0_4 << hdma->StreamIndex)) != RESET) + { + if(__HAL_DMA_GET_IT_SOURCE(hdma, DMA_IT_FE) != RESET) + { + /* Clear the FIFO error flag */ + regs->IFCR = DMA_FLAG_FEIF0_4 << hdma->StreamIndex; + + /* Update error code */ + hdma->ErrorCode |= HAL_DMA_ERROR_FE; + } + } + /* Direct Mode Error Interrupt management ***********************************/ + if ((tmpisr & (DMA_FLAG_DMEIF0_4 << hdma->StreamIndex)) != RESET) + { + if(__HAL_DMA_GET_IT_SOURCE(hdma, DMA_IT_DME) != RESET) + { + /* Clear the direct mode error flag */ + regs->IFCR = DMA_FLAG_DMEIF0_4 << hdma->StreamIndex; + + /* Update error code */ + hdma->ErrorCode |= HAL_DMA_ERROR_DME; + } + } + /* Half Transfer Complete Interrupt management ******************************/ + if ((tmpisr & (DMA_FLAG_HTIF0_4 << hdma->StreamIndex)) != RESET) + { + if(__HAL_DMA_GET_IT_SOURCE(hdma, DMA_IT_HT) != RESET) + { + /* Clear the half transfer complete flag */ + regs->IFCR = DMA_FLAG_HTIF0_4 << hdma->StreamIndex; + + /* Multi_Buffering mode enabled */ + if(((hdma->Instance->CR) & (uint32_t)(DMA_SxCR_DBM)) != RESET) + { + /* Current memory buffer used is Memory 0 */ + if((hdma->Instance->CR & DMA_SxCR_CT) == RESET) + { + if(hdma->XferHalfCpltCallback != NULL) + { + /* Half transfer callback */ + hdma->XferHalfCpltCallback(hdma); + } + } + /* Current memory buffer used is Memory 1 */ + else + { + if(hdma->XferM1HalfCpltCallback != NULL) + { + /* Half transfer callback */ + hdma->XferM1HalfCpltCallback(hdma); + } + } + } + else + { + /* Disable the half transfer interrupt if the DMA mode is not CIRCULAR */ + if((hdma->Instance->CR & DMA_SxCR_CIRC) == RESET) + { + /* Disable the half transfer interrupt */ + hdma->Instance->CR &= ~(DMA_IT_HT); + } + + if(hdma->XferHalfCpltCallback != NULL) + { + /* Half transfer callback */ + hdma->XferHalfCpltCallback(hdma); + } + } + } + } + /* Transfer Complete Interrupt management ***********************************/ + if ((tmpisr & (DMA_FLAG_TCIF0_4 << hdma->StreamIndex)) != RESET) + { + if(__HAL_DMA_GET_IT_SOURCE(hdma, DMA_IT_TC) != RESET) + { + /* Clear the transfer complete flag */ + regs->IFCR = DMA_FLAG_TCIF0_4 << hdma->StreamIndex; + + if(HAL_DMA_STATE_ABORT == hdma->State) + { + /* Disable all the transfer interrupts */ + hdma->Instance->CR &= ~(DMA_IT_TC | DMA_IT_TE | DMA_IT_DME); + hdma->Instance->FCR &= ~(DMA_IT_FE); + + if((hdma->XferHalfCpltCallback != NULL) || (hdma->XferM1HalfCpltCallback != NULL)) + { + hdma->Instance->CR &= ~(DMA_IT_HT); + } + + /* Clear all interrupt flags at correct offset within the register */ + regs->IFCR = 0x3FU << hdma->StreamIndex; + + /* Change the DMA state */ + hdma->State = HAL_DMA_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hdma); + + if(hdma->XferAbortCallback != NULL) + { + hdma->XferAbortCallback(hdma); + } + return; + } + + if(((hdma->Instance->CR) & (uint32_t)(DMA_SxCR_DBM)) != RESET) + { + /* Current memory buffer used is Memory 0 */ + if((hdma->Instance->CR & DMA_SxCR_CT) == RESET) + { + if(hdma->XferM1CpltCallback != NULL) + { + /* Transfer complete Callback for memory1 */ + hdma->XferM1CpltCallback(hdma); + } + } + /* Current memory buffer used is Memory 1 */ + else + { + if(hdma->XferCpltCallback != NULL) + { + /* Transfer complete Callback for memory0 */ + hdma->XferCpltCallback(hdma); + } + } + } + /* Disable the transfer complete interrupt if the DMA mode is not CIRCULAR */ + else + { + if((hdma->Instance->CR & DMA_SxCR_CIRC) == RESET) + { + /* Disable the transfer complete interrupt */ + hdma->Instance->CR &= ~(DMA_IT_TC); + + /* Change the DMA state */ + hdma->State = HAL_DMA_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hdma); + } + + if(hdma->XferCpltCallback != NULL) + { + /* Transfer complete callback */ + hdma->XferCpltCallback(hdma); + } + } + } + } + + /* manage error case */ + if(hdma->ErrorCode != HAL_DMA_ERROR_NONE) + { + if((hdma->ErrorCode & HAL_DMA_ERROR_TE) != RESET) + { + hdma->State = HAL_DMA_STATE_ABORT; + + /* Disable the stream */ + __HAL_DMA_DISABLE(hdma); + + do + { + if (++count > timeout) + { + break; + } + } + while((hdma->Instance->CR & DMA_SxCR_EN) != RESET); + + /* Change the DMA state */ + hdma->State = HAL_DMA_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hdma); + } + + if(hdma->XferErrorCallback != NULL) + { + /* Transfer error callback */ + hdma->XferErrorCallback(hdma); + } + } +} + +/** + * @brief Register callbacks + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @param CallbackID User Callback identifier + * a DMA_HandleTypeDef structure as parameter. + * @param pCallback pointer to private callback function which has pointer to + * a DMA_HandleTypeDef structure as parameter. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA_RegisterCallback(DMA_HandleTypeDef *hdma, HAL_DMA_CallbackIDTypeDef CallbackID, void (* pCallback)(DMA_HandleTypeDef *_hdma)) +{ + + HAL_StatusTypeDef status = HAL_OK; + + /* Process locked */ + __HAL_LOCK(hdma); + + if(HAL_DMA_STATE_READY == hdma->State) + { + switch (CallbackID) + { + case HAL_DMA_XFER_CPLT_CB_ID: + hdma->XferCpltCallback = pCallback; + break; + + case HAL_DMA_XFER_HALFCPLT_CB_ID: + hdma->XferHalfCpltCallback = pCallback; + break; + + case HAL_DMA_XFER_M1CPLT_CB_ID: + hdma->XferM1CpltCallback = pCallback; + break; + + case HAL_DMA_XFER_M1HALFCPLT_CB_ID: + hdma->XferM1HalfCpltCallback = pCallback; + break; + + case HAL_DMA_XFER_ERROR_CB_ID: + hdma->XferErrorCallback = pCallback; + break; + + case HAL_DMA_XFER_ABORT_CB_ID: + hdma->XferAbortCallback = pCallback; + break; + + default: + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hdma); + + return status; +} + +/** + * @brief UnRegister callbacks + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @param CallbackID User Callback identifier + * a HAL_DMA_CallbackIDTypeDef ENUM as parameter. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA_UnRegisterCallback(DMA_HandleTypeDef *hdma, HAL_DMA_CallbackIDTypeDef CallbackID) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Process locked */ + __HAL_LOCK(hdma); + + if(HAL_DMA_STATE_READY == hdma->State) + { + switch (CallbackID) + { + case HAL_DMA_XFER_CPLT_CB_ID: + hdma->XferCpltCallback = NULL; + break; + + case HAL_DMA_XFER_HALFCPLT_CB_ID: + hdma->XferHalfCpltCallback = NULL; + break; + + case HAL_DMA_XFER_M1CPLT_CB_ID: + hdma->XferM1CpltCallback = NULL; + break; + + case HAL_DMA_XFER_M1HALFCPLT_CB_ID: + hdma->XferM1HalfCpltCallback = NULL; + break; + + case HAL_DMA_XFER_ERROR_CB_ID: + hdma->XferErrorCallback = NULL; + break; + + case HAL_DMA_XFER_ABORT_CB_ID: + hdma->XferAbortCallback = NULL; + break; + + case HAL_DMA_XFER_ALL_CB_ID: + hdma->XferCpltCallback = NULL; + hdma->XferHalfCpltCallback = NULL; + hdma->XferM1CpltCallback = NULL; + hdma->XferM1HalfCpltCallback = NULL; + hdma->XferErrorCallback = NULL; + hdma->XferAbortCallback = NULL; + break; + + default: + status = HAL_ERROR; + break; + } + } + else + { + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hdma); + + return status; +} + +/** + * @} + */ + +/** @addtogroup DMA_Exported_Functions_Group3 + * +@verbatim + =============================================================================== + ##### State and Errors functions ##### + =============================================================================== + [..] + This subsection provides functions allowing to + (+) Check the DMA state + (+) Get error code + +@endverbatim + * @{ + */ + +/** + * @brief Returns the DMA state. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @retval HAL state + */ +HAL_DMA_StateTypeDef HAL_DMA_GetState(DMA_HandleTypeDef *hdma) +{ + return hdma->State; +} + +/** + * @brief Return the DMA error code + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @retval DMA Error Code + */ +uint32_t HAL_DMA_GetError(DMA_HandleTypeDef *hdma) +{ + return hdma->ErrorCode; +} + +/** + * @} + */ + +/** + * @} + */ + +/** @addtogroup DMA_Private_Functions + * @{ + */ + +/** + * @brief Sets the DMA Transfer parameter. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @param SrcAddress The source memory Buffer address + * @param DstAddress The destination memory Buffer address + * @param DataLength The length of data to be transferred from source to destination + * @retval HAL status + */ +static void DMA_SetConfig(DMA_HandleTypeDef *hdma, uint32_t SrcAddress, uint32_t DstAddress, uint32_t DataLength) +{ + /* Clear DBM bit */ + hdma->Instance->CR &= (uint32_t)(~DMA_SxCR_DBM); + + /* Configure DMA Stream data length */ + hdma->Instance->NDTR = DataLength; + + /* Memory to Peripheral */ + if((hdma->Init.Direction) == DMA_MEMORY_TO_PERIPH) + { + /* Configure DMA Stream destination address */ + hdma->Instance->PAR = DstAddress; + + /* Configure DMA Stream source address */ + hdma->Instance->M0AR = SrcAddress; + } + /* Peripheral to Memory */ + else + { + /* Configure DMA Stream source address */ + hdma->Instance->PAR = SrcAddress; + + /* Configure DMA Stream destination address */ + hdma->Instance->M0AR = DstAddress; + } +} + +/** + * @brief Returns the DMA Stream base address depending on stream number + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @retval Stream base address + */ +static uint32_t DMA_CalcBaseAndBitshift(DMA_HandleTypeDef *hdma) +{ + uint32_t stream_number = (((uint32_t)hdma->Instance & 0xFFU) - 16U) / 24U; + + /* lookup table for necessary bitshift of flags within status registers */ + static const uint8_t flagBitshiftOffset[8U] = {0U, 6U, 16U, 22U, 0U, 6U, 16U, 22U}; + hdma->StreamIndex = flagBitshiftOffset[stream_number]; + + if (stream_number > 3U) + { + /* return pointer to HISR and HIFCR */ + hdma->StreamBaseAddress = (((uint32_t)hdma->Instance & (uint32_t)(~0x3FFU)) + 4U); + } + else + { + /* return pointer to LISR and LIFCR */ + hdma->StreamBaseAddress = ((uint32_t)hdma->Instance & (uint32_t)(~0x3FFU)); + } + + return hdma->StreamBaseAddress; +} + +/** + * @brief Check compatibility between FIFO threshold level and size of the memory burst + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @retval HAL status + */ +static HAL_StatusTypeDef DMA_CheckFifoParam(DMA_HandleTypeDef *hdma) +{ + HAL_StatusTypeDef status = HAL_OK; + uint32_t tmp = hdma->Init.FIFOThreshold; + + /* Memory Data size equal to Byte */ + if(hdma->Init.MemDataAlignment == DMA_MDATAALIGN_BYTE) + { + switch (tmp) + { + case DMA_FIFO_THRESHOLD_1QUARTERFULL: + case DMA_FIFO_THRESHOLD_3QUARTERSFULL: + if ((hdma->Init.MemBurst & DMA_SxCR_MBURST_1) == DMA_SxCR_MBURST_1) + { + status = HAL_ERROR; + } + break; + case DMA_FIFO_THRESHOLD_HALFFULL: + if (hdma->Init.MemBurst == DMA_MBURST_INC16) + { + status = HAL_ERROR; + } + break; + case DMA_FIFO_THRESHOLD_FULL: + break; + default: + break; + } + } + + /* Memory Data size equal to Half-Word */ + else if (hdma->Init.MemDataAlignment == DMA_MDATAALIGN_HALFWORD) + { + switch (tmp) + { + case DMA_FIFO_THRESHOLD_1QUARTERFULL: + case DMA_FIFO_THRESHOLD_3QUARTERSFULL: + status = HAL_ERROR; + break; + case DMA_FIFO_THRESHOLD_HALFFULL: + if ((hdma->Init.MemBurst & DMA_SxCR_MBURST_1) == DMA_SxCR_MBURST_1) + { + status = HAL_ERROR; + } + break; + case DMA_FIFO_THRESHOLD_FULL: + if (hdma->Init.MemBurst == DMA_MBURST_INC16) + { + status = HAL_ERROR; + } + break; + default: + break; + } + } + + /* Memory Data size equal to Word */ + else + { + switch (tmp) + { + case DMA_FIFO_THRESHOLD_1QUARTERFULL: + case DMA_FIFO_THRESHOLD_HALFFULL: + case DMA_FIFO_THRESHOLD_3QUARTERSFULL: + status = HAL_ERROR; + break; + case DMA_FIFO_THRESHOLD_FULL: + if ((hdma->Init.MemBurst & DMA_SxCR_MBURST_1) == DMA_SxCR_MBURST_1) + { + status = HAL_ERROR; + } + break; + default: + break; + } + } + + return status; +} + +/** + * @} + */ + +#endif /* HAL_DMA_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ + diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_dma_ex.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_dma_ex.c new file mode 100644 index 0000000..7167e77 --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_dma_ex.c @@ -0,0 +1,313 @@ +/** + ****************************************************************************** + * @file stm32f4xx_hal_dma_ex.c + * @author MCD Application Team + * @brief DMA Extension HAL module driver + * This file provides firmware functions to manage the following + * functionalities of the DMA Extension peripheral: + * + Extended features functions + * + @verbatim + ============================================================================== + ##### How to use this driver ##### + ============================================================================== + [..] + The DMA Extension HAL driver can be used as follows: + (#) Start a multi buffer transfer using the HAL_DMA_MultiBufferStart() function + for polling mode or HAL_DMA_MultiBufferStart_IT() for interrupt mode. + + -@- In Memory-to-Memory transfer mode, Multi (Double) Buffer mode is not allowed. + -@- When Multi (Double) Buffer mode is enabled the, transfer is circular by default. + -@- In Multi (Double) buffer mode, it is possible to update the base address for + the AHB memory port on the fly (DMA_SxM0AR or DMA_SxM1AR) when the stream is enabled. + + @endverbatim + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx_hal.h" + +/** @addtogroup STM32F4xx_HAL_Driver + * @{ + */ + +/** @defgroup DMAEx DMAEx + * @brief DMA Extended HAL module driver + * @{ + */ + +#ifdef HAL_DMA_MODULE_ENABLED + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private Constants ---------------------------------------------------------*/ +/* Private macros ------------------------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ +/** @addtogroup DMAEx_Private_Functions + * @{ + */ +static void DMA_MultiBufferSetConfig(DMA_HandleTypeDef *hdma, uint32_t SrcAddress, uint32_t DstAddress, uint32_t DataLength); +/** + * @} + */ + +/* Exported functions ---------------------------------------------------------*/ + +/** @addtogroup DMAEx_Exported_Functions + * @{ + */ + + +/** @addtogroup DMAEx_Exported_Functions_Group1 + * +@verbatim + =============================================================================== + ##### Extended features functions ##### + =============================================================================== + [..] This section provides functions allowing to: + (+) Configure the source, destination address and data length and + Start MultiBuffer DMA transfer + (+) Configure the source, destination address and data length and + Start MultiBuffer DMA transfer with interrupt + (+) Change on the fly the memory0 or memory1 address. + +@endverbatim + * @{ + */ + + +/** + * @brief Starts the multi_buffer DMA Transfer. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @param SrcAddress The source memory Buffer address + * @param DstAddress The destination memory Buffer address + * @param SecondMemAddress The second memory Buffer address in case of multi buffer Transfer + * @param DataLength The length of data to be transferred from source to destination + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMAEx_MultiBufferStart(DMA_HandleTypeDef *hdma, uint32_t SrcAddress, uint32_t DstAddress, uint32_t SecondMemAddress, uint32_t DataLength) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_DMA_BUFFER_SIZE(DataLength)); + + /* Memory-to-memory transfer not supported in double buffering mode */ + if (hdma->Init.Direction == DMA_MEMORY_TO_MEMORY) + { + hdma->ErrorCode = HAL_DMA_ERROR_NOT_SUPPORTED; + status = HAL_ERROR; + } + else + { + /* Process Locked */ + __HAL_LOCK(hdma); + + if(HAL_DMA_STATE_READY == hdma->State) + { + /* Change DMA peripheral state */ + hdma->State = HAL_DMA_STATE_BUSY; + + /* Enable the double buffer mode */ + hdma->Instance->CR |= (uint32_t)DMA_SxCR_DBM; + + /* Configure DMA Stream destination address */ + hdma->Instance->M1AR = SecondMemAddress; + + /* Configure the source, destination address and the data length */ + DMA_MultiBufferSetConfig(hdma, SrcAddress, DstAddress, DataLength); + + /* Enable the peripheral */ + __HAL_DMA_ENABLE(hdma); + } + else + { + /* Return error status */ + status = HAL_BUSY; + } + } + return status; +} + +/** + * @brief Starts the multi_buffer DMA Transfer with interrupt enabled. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @param SrcAddress The source memory Buffer address + * @param DstAddress The destination memory Buffer address + * @param SecondMemAddress The second memory Buffer address in case of multi buffer Transfer + * @param DataLength The length of data to be transferred from source to destination + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMAEx_MultiBufferStart_IT(DMA_HandleTypeDef *hdma, uint32_t SrcAddress, uint32_t DstAddress, uint32_t SecondMemAddress, uint32_t DataLength) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_DMA_BUFFER_SIZE(DataLength)); + + /* Memory-to-memory transfer not supported in double buffering mode */ + if (hdma->Init.Direction == DMA_MEMORY_TO_MEMORY) + { + hdma->ErrorCode = HAL_DMA_ERROR_NOT_SUPPORTED; + return HAL_ERROR; + } + + /* Check callback functions */ + if ((NULL == hdma->XferCpltCallback) || (NULL == hdma->XferM1CpltCallback) || (NULL == hdma->XferErrorCallback)) + { + hdma->ErrorCode = HAL_DMA_ERROR_PARAM; + return HAL_ERROR; + } + + /* Process locked */ + __HAL_LOCK(hdma); + + if(HAL_DMA_STATE_READY == hdma->State) + { + /* Change DMA peripheral state */ + hdma->State = HAL_DMA_STATE_BUSY; + + /* Initialize the error code */ + hdma->ErrorCode = HAL_DMA_ERROR_NONE; + + /* Enable the Double buffer mode */ + hdma->Instance->CR |= (uint32_t)DMA_SxCR_DBM; + + /* Configure DMA Stream destination address */ + hdma->Instance->M1AR = SecondMemAddress; + + /* Configure the source, destination address and the data length */ + DMA_MultiBufferSetConfig(hdma, SrcAddress, DstAddress, DataLength); + + /* Clear all flags */ + __HAL_DMA_CLEAR_FLAG (hdma, __HAL_DMA_GET_TC_FLAG_INDEX(hdma)); + __HAL_DMA_CLEAR_FLAG (hdma, __HAL_DMA_GET_HT_FLAG_INDEX(hdma)); + __HAL_DMA_CLEAR_FLAG (hdma, __HAL_DMA_GET_TE_FLAG_INDEX(hdma)); + __HAL_DMA_CLEAR_FLAG (hdma, __HAL_DMA_GET_DME_FLAG_INDEX(hdma)); + __HAL_DMA_CLEAR_FLAG (hdma, __HAL_DMA_GET_FE_FLAG_INDEX(hdma)); + + /* Enable Common interrupts*/ + hdma->Instance->CR |= DMA_IT_TC | DMA_IT_TE | DMA_IT_DME; + hdma->Instance->FCR |= DMA_IT_FE; + + if((hdma->XferHalfCpltCallback != NULL) || (hdma->XferM1HalfCpltCallback != NULL)) + { + hdma->Instance->CR |= DMA_IT_HT; + } + + /* Enable the peripheral */ + __HAL_DMA_ENABLE(hdma); + } + else + { + /* Process unlocked */ + __HAL_UNLOCK(hdma); + + /* Return error status */ + status = HAL_BUSY; + } + return status; +} + +/** + * @brief Change the memory0 or memory1 address on the fly. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @param Address The new address + * @param memory the memory to be changed, This parameter can be one of + * the following values: + * MEMORY0 / + * MEMORY1 + * @note The MEMORY0 address can be changed only when the current transfer use + * MEMORY1 and the MEMORY1 address can be changed only when the current + * transfer use MEMORY0. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMAEx_ChangeMemory(DMA_HandleTypeDef *hdma, uint32_t Address, HAL_DMA_MemoryTypeDef memory) +{ + if(memory == MEMORY0) + { + /* change the memory0 address */ + hdma->Instance->M0AR = Address; + } + else + { + /* change the memory1 address */ + hdma->Instance->M1AR = Address; + } + + return HAL_OK; +} + +/** + * @} + */ + +/** + * @} + */ + +/** @addtogroup DMAEx_Private_Functions + * @{ + */ + +/** + * @brief Set the DMA Transfer parameter. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @param SrcAddress The source memory Buffer address + * @param DstAddress The destination memory Buffer address + * @param DataLength The length of data to be transferred from source to destination + * @retval HAL status + */ +static void DMA_MultiBufferSetConfig(DMA_HandleTypeDef *hdma, uint32_t SrcAddress, uint32_t DstAddress, uint32_t DataLength) +{ + /* Configure DMA Stream data length */ + hdma->Instance->NDTR = DataLength; + + /* Peripheral to Memory */ + if((hdma->Init.Direction) == DMA_MEMORY_TO_PERIPH) + { + /* Configure DMA Stream destination address */ + hdma->Instance->PAR = DstAddress; + + /* Configure DMA Stream source address */ + hdma->Instance->M0AR = SrcAddress; + } + /* Memory to Peripheral */ + else + { + /* Configure DMA Stream source address */ + hdma->Instance->PAR = SrcAddress; + + /* Configure DMA Stream destination address */ + hdma->Instance->M0AR = DstAddress; + } +} + +/** + * @} + */ + +#endif /* HAL_DMA_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ + diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_eth.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_eth.c new file mode 100644 index 0000000..f1a00e3 --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_eth.c @@ -0,0 +1,3238 @@ +/** + ****************************************************************************** + * @file stm32f4xx_hal_eth.c + * @author MCD Application Team + * @brief ETH HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the Ethernet (ETH) peripheral: + * + Initialization and deinitialization functions + * + IO operation functions + * + Peripheral Control functions + * + Peripheral State and Errors functions + * + ****************************************************************************** + * @attention + * + * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + @verbatim + ============================================================================== + ##### How to use this driver ##### + ============================================================================== + [..] + The ETH HAL driver can be used as follows: + + (#)Declare a ETH_HandleTypeDef handle structure, for example: + ETH_HandleTypeDef heth; + + (#)Fill parameters of Init structure in heth handle + + (#)Call HAL_ETH_Init() API to initialize the Ethernet peripheral (MAC, DMA, ...) + + (#)Initialize the ETH low level resources through the HAL_ETH_MspInit() API: + (##) Enable the Ethernet interface clock using + (+++) __HAL_RCC_ETH1MAC_CLK_ENABLE() + (+++) __HAL_RCC_ETH1TX_CLK_ENABLE() + (+++) __HAL_RCC_ETH1RX_CLK_ENABLE() + + (##) Initialize the related GPIO clocks + (##) Configure Ethernet pinout + (##) Configure Ethernet NVIC interrupt (in Interrupt mode) + + (#) Ethernet data reception is asynchronous, so call the following API + to start the listening mode: + (##) HAL_ETH_Start(): + This API starts the MAC and DMA transmission and reception process, + without enabling end of transfer interrupts, in this mode user + has to poll for data reception by calling HAL_ETH_ReadData() + (##) HAL_ETH_Start_IT(): + This API starts the MAC and DMA transmission and reception process, + end of transfer interrupts are enabled in this mode, + HAL_ETH_RxCpltCallback() will be executed when an Ethernet packet is received + + (#) When data is received user can call the following API to get received data: + (##) HAL_ETH_ReadData(): Read a received packet + + (#) For transmission path, two APIs are available: + (##) HAL_ETH_Transmit(): Transmit an ETH frame in blocking mode + (##) HAL_ETH_Transmit_IT(): Transmit an ETH frame in interrupt mode, + HAL_ETH_TxCpltCallback() will be executed when end of transfer occur + + (#) Communication with an external PHY device: + (##) HAL_ETH_ReadPHYRegister(): Read a register from an external PHY + (##) HAL_ETH_WritePHYRegister(): Write data to an external RHY register + + (#) Configure the Ethernet MAC after ETH peripheral initialization + (##) HAL_ETH_GetMACConfig(): Get MAC actual configuration into ETH_MACConfigTypeDef + (##) HAL_ETH_SetMACConfig(): Set MAC configuration based on ETH_MACConfigTypeDef + + (#) Configure the Ethernet DMA after ETH peripheral initialization + (##) HAL_ETH_GetDMAConfig(): Get DMA actual configuration into ETH_DMAConfigTypeDef + (##) HAL_ETH_SetDMAConfig(): Set DMA configuration based on ETH_DMAConfigTypeDef + + (#) Configure the Ethernet PTP after ETH peripheral initialization + (##) Define HAL_ETH_USE_PTP to use PTP APIs. + (##) HAL_ETH_PTP_GetConfig(): Get PTP actual configuration into ETH_PTP_ConfigTypeDef + (##) HAL_ETH_PTP_SetConfig(): Set PTP configuration based on ETH_PTP_ConfigTypeDef + (##) HAL_ETH_PTP_GetTime(): Get Seconds and Nanoseconds for the Ethernet PTP registers + (##) HAL_ETH_PTP_SetTime(): Set Seconds and Nanoseconds for the Ethernet PTP registers + (##) HAL_ETH_PTP_AddTimeOffset(): Add Seconds and Nanoseconds offset for the Ethernet PTP registers + (##) HAL_ETH_PTP_InsertTxTimestamp(): Insert Timestamp in transmission + (##) HAL_ETH_PTP_GetTxTimestamp(): Get transmission timestamp + (##) HAL_ETH_PTP_GetRxTimestamp(): Get reception timestamp + + -@- The ARP offload feature is not supported in this driver. + + -@- The PTP offload feature is not supported in this driver. + + *** Callback registration *** + ============================================= + + The compilation define USE_HAL_ETH_REGISTER_CALLBACKS when set to 1 + allows the user to configure dynamically the driver callbacks. + Use Function HAL_ETH_RegisterCallback() to register an interrupt callback. + + Function HAL_ETH_RegisterCallback() allows to register following callbacks: + (+) TxCpltCallback : Tx Complete Callback. + (+) RxCpltCallback : Rx Complete Callback. + (+) ErrorCallback : Error Callback. + (+) PMTCallback : Power Management Callback + (+) EEECallback : EEE Callback. + (+) WakeUpCallback : Wake UP Callback + (+) MspInitCallback : MspInit Callback. + (+) MspDeInitCallback: MspDeInit Callback. + + This function takes as parameters the HAL peripheral handle, the Callback ID + and a pointer to the user callback function. + + For specific callbacks RxAllocateCallback use dedicated register callbacks: + respectively HAL_ETH_RegisterRxAllocateCallback(). + + For specific callbacks RxLinkCallback use dedicated register callbacks: + respectively HAL_ETH_RegisterRxLinkCallback(). + + For specific callbacks TxFreeCallback use dedicated register callbacks: + respectively HAL_ETH_RegisterTxFreeCallback(). + + For specific callbacks TxPtpCallback use dedicated register callbacks: + respectively HAL_ETH_RegisterTxPtpCallback(). + + Use function HAL_ETH_UnRegisterCallback() to reset a callback to the default + weak function. + HAL_ETH_UnRegisterCallback takes as parameters the HAL peripheral handle, + and the Callback ID. + This function allows to reset following callbacks: + (+) TxCpltCallback : Tx Complete Callback. + (+) RxCpltCallback : Rx Complete Callback. + (+) ErrorCallback : Error Callback. + (+) PMTCallback : Power Management Callback + (+) EEECallback : EEE Callback. + (+) WakeUpCallback : Wake UP Callback + (+) MspInitCallback : MspInit Callback. + (+) MspDeInitCallback: MspDeInit Callback. + + For specific callbacks RxAllocateCallback use dedicated unregister callbacks: + respectively HAL_ETH_UnRegisterRxAllocateCallback(). + + For specific callbacks RxLinkCallback use dedicated unregister callbacks: + respectively HAL_ETH_UnRegisterRxLinkCallback(). + + For specific callbacks TxFreeCallback use dedicated unregister callbacks: + respectively HAL_ETH_UnRegisterTxFreeCallback(). + + For specific callbacks TxPtpCallback use dedicated unregister callbacks: + respectively HAL_ETH_UnRegisterTxPtpCallback(). + + By default, after the HAL_ETH_Init and when the state is HAL_ETH_STATE_RESET + all callbacks are set to the corresponding weak functions: + examples HAL_ETH_TxCpltCallback(), HAL_ETH_RxCpltCallback(). + Exception done for MspInit and MspDeInit functions that are + reset to the legacy weak function in the HAL_ETH_Init/ HAL_ETH_DeInit only when + these callbacks are null (not registered beforehand). + if not, MspInit or MspDeInit are not null, the HAL_ETH_Init/ HAL_ETH_DeInit + keep and use the user MspInit/MspDeInit callbacks (registered beforehand) + + Callbacks can be registered/unregistered in HAL_ETH_STATE_READY state only. + Exception done MspInit/MspDeInit that can be registered/unregistered + in HAL_ETH_STATE_READY or HAL_ETH_STATE_RESET state, + thus registered (user) MspInit/DeInit callbacks can be used during the Init/DeInit. + In that case first register the MspInit/MspDeInit user callbacks + using HAL_ETH_RegisterCallback() before calling HAL_ETH_DeInit + or HAL_ETH_Init function. + + When The compilation define USE_HAL_ETH_REGISTER_CALLBACKS is set to 0 or + not defined, the callback registration feature is not available and all callbacks + are set to the corresponding weak functions. + + @endverbatim + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx_hal.h" + +/** @addtogroup STM32F4xx_HAL_Driver + * @{ + */ +#ifdef HAL_ETH_MODULE_ENABLED + +#if defined(ETH) + +/** @defgroup ETH ETH + * @brief ETH HAL module driver + * @{ + */ + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/** @addtogroup ETH_Private_Constants ETH Private Constants + * @{ + */ +#define ETH_MACCR_MASK 0xFFFB7F7CU +#define ETH_MACECR_MASK 0x3F077FFFU +#define ETH_MACFFR_MASK 0x800007FFU +#define ETH_MACWTR_MASK 0x0000010FU +#define ETH_MACTFCR_MASK 0xFFFF00F2U +#define ETH_MACRFCR_MASK 0x00000003U +#define ETH_MTLTQOMR_MASK 0x00000072U +#define ETH_MTLRQOMR_MASK 0x0000007BU + +#define ETH_DMAMR_MASK 0x00007802U +#define ETH_DMASBMR_MASK 0x0000D001U +#define ETH_DMACCR_MASK 0x00013FFFU +#define ETH_DMACTCR_MASK 0x003F1010U +#define ETH_DMACRCR_MASK 0x803F0000U +#define ETH_MACPMTCSR_MASK (ETH_MACPMTCSR_PD | ETH_MACPMTCSR_WFE | \ + ETH_MACPMTCSR_MPE | ETH_MACPMTCSR_GU) + +/* Timeout values */ +#define ETH_SWRESET_TIMEOUT 500U +#define ETH_MDIO_BUS_TIMEOUT 1000U + +#define ETH_DMARXDESC_ERRORS_MASK ((uint32_t)(ETH_DMARXDESC_DBE | ETH_DMARXDESC_RE | \ + ETH_DMARXDESC_OE | ETH_DMARXDESC_RWT |\ + ETH_DMARXDESC_LC | ETH_DMARXDESC_CE |\ + ETH_DMARXDESC_DE | ETH_DMARXDESC_IPV4HCE)) + +#define ETH_MAC_US_TICK 1000000U + +#define ETH_MACTSCR_MASK 0x0087FF2FU + +#define ETH_PTPTSHR_VALUE 0xFFFFFFFFU +#define ETH_PTPTSLR_VALUE 0xBB9ACA00U + +/* Ethernet MACMIIAR register Mask */ +#define ETH_MACMIIAR_CR_MASK 0xFFFFFFE3U + +/* Delay to wait when writing to some Ethernet registers */ +#define ETH_REG_WRITE_DELAY 0x00000001U + +/* ETHERNET MACCR register Mask */ +#define ETH_MACCR_CLEAR_MASK 0xFD20810FU + +/* ETHERNET MACFCR register Mask */ +#define ETH_MACFCR_CLEAR_MASK 0x0000FF41U + +/* ETHERNET DMAOMR register Mask */ +#define ETH_DMAOMR_CLEAR_MASK 0xF8DE3F23U + +/* ETHERNET MAC address offsets */ +#define ETH_MAC_ADDR_HBASE (uint32_t)(ETH_MAC_BASE + 0x40U) /* ETHERNET MAC address high offset */ +#define ETH_MAC_ADDR_LBASE (uint32_t)(ETH_MAC_BASE + 0x44U) /* ETHERNET MAC address low offset */ + +/* ETHERNET DMA Rx descriptors Frame length Shift */ +#define ETH_DMARXDESC_FRAMELENGTHSHIFT 16U +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup ETH_Private_Macros ETH Private Macros + * @{ + */ +/* Helper macros for TX descriptor handling */ +#define INCR_TX_DESC_INDEX(inx, offset) do {\ + (inx) += (offset);\ + if ((inx) >= (uint32_t)ETH_TX_DESC_CNT){\ + (inx) = ((inx) - (uint32_t)ETH_TX_DESC_CNT);}\ + } while (0) + +/* Helper macros for RX descriptor handling */ +#define INCR_RX_DESC_INDEX(inx, offset) do {\ + (inx) += (offset);\ + if ((inx) >= (uint32_t)ETH_RX_DESC_CNT){\ + (inx) = ((inx) - (uint32_t)ETH_RX_DESC_CNT);}\ + } while (0) +/** + * @} + */ +/* Private function prototypes -----------------------------------------------*/ +/** @defgroup ETH_Private_Functions ETH Private Functions + * @{ + */ +static void ETH_SetMACConfig(ETH_HandleTypeDef *heth, const ETH_MACConfigTypeDef *macconf); +static void ETH_SetDMAConfig(ETH_HandleTypeDef *heth, const ETH_DMAConfigTypeDef *dmaconf); +static void ETH_MACDMAConfig(ETH_HandleTypeDef *heth); +static void ETH_DMATxDescListInit(ETH_HandleTypeDef *heth); +static void ETH_DMARxDescListInit(ETH_HandleTypeDef *heth); +static uint32_t ETH_Prepare_Tx_Descriptors(ETH_HandleTypeDef *heth, const ETH_TxPacketConfigTypeDef *pTxConfig, + uint32_t ItMode); +static void ETH_UpdateDescriptor(ETH_HandleTypeDef *heth); +static void ETH_FlushTransmitFIFO(ETH_HandleTypeDef *heth); +static void ETH_MACAddressConfig(ETH_HandleTypeDef *heth, uint32_t MacAddr, uint8_t *Addr); + +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) +static void ETH_InitCallbacksToDefault(ETH_HandleTypeDef *heth); +#endif /* USE_HAL_ETH_REGISTER_CALLBACKS */ +/** + * @} + */ + +/* Exported functions ---------------------------------------------------------*/ +/** @defgroup ETH_Exported_Functions ETH Exported Functions + * @{ + */ + +/** @defgroup ETH_Exported_Functions_Group1 Initialization and deinitialization functions + * @brief Initialization and Configuration functions + * +@verbatim +=============================================================================== + ##### Initialization and Configuration functions ##### + =============================================================================== + [..] This subsection provides a set of functions allowing to initialize and + deinitialize the ETH peripheral: + + (+) User must Implement HAL_ETH_MspInit() function in which he configures + all related peripherals resources (CLOCK, GPIO and NVIC ). + + (+) Call the function HAL_ETH_Init() to configure the selected device with + the selected configuration: + (++) MAC address + (++) Media interface (MII or RMII) + (++) Rx DMA Descriptors Tab + (++) Tx DMA Descriptors Tab + (++) Length of Rx Buffers + + (+) Call the function HAL_ETH_DeInit() to restore the default configuration + of the selected ETH peripheral. + +@endverbatim + * @{ + */ + +/** + * @brief Initialize the Ethernet peripheral registers. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_Init(ETH_HandleTypeDef *heth) +{ + uint32_t tickstart; + + if (heth == NULL) + { + return HAL_ERROR; + } + if (heth->gState == HAL_ETH_STATE_RESET) + { + heth->gState = HAL_ETH_STATE_BUSY; + +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) + + ETH_InitCallbacksToDefault(heth); + + if (heth->MspInitCallback == NULL) + { + heth->MspInitCallback = HAL_ETH_MspInit; + } + + /* Init the low level hardware */ + heth->MspInitCallback(heth); +#else + /* Init the low level hardware : GPIO, CLOCK, NVIC. */ + HAL_ETH_MspInit(heth); + +#endif /* (USE_HAL_ETH_REGISTER_CALLBACKS) */ + } + + __HAL_RCC_SYSCFG_CLK_ENABLE(); + + /* Select MII or RMII Mode*/ + SYSCFG->PMC &= ~(SYSCFG_PMC_MII_RMII_SEL); + SYSCFG->PMC |= (uint32_t)heth->Init.MediaInterface; + /* Dummy read to sync SYSCFG with ETH */ + (void)SYSCFG->PMC; + + /* Ethernet Software reset */ + /* Set the SWR bit: resets all MAC subsystem internal registers and logic */ + /* After reset all the registers holds their respective reset values */ + SET_BIT(heth->Instance->DMABMR, ETH_DMABMR_SR); + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Wait for software reset */ + while (READ_BIT(heth->Instance->DMABMR, ETH_DMABMR_SR) > 0U) + { + if (((HAL_GetTick() - tickstart) > ETH_SWRESET_TIMEOUT)) + { + /* Set Error Code */ + heth->ErrorCode = HAL_ETH_ERROR_TIMEOUT; + /* Set State as Error */ + heth->gState = HAL_ETH_STATE_ERROR; + /* Return Error */ + return HAL_ERROR; + } + } + + + /*------------------ MAC, MTL and DMA default Configuration ----------------*/ + ETH_MACDMAConfig(heth); + + + /*------------------ DMA Tx Descriptors Configuration ----------------------*/ + ETH_DMATxDescListInit(heth); + + /*------------------ DMA Rx Descriptors Configuration ----------------------*/ + ETH_DMARxDescListInit(heth); + + /*--------------------- ETHERNET MAC Address Configuration ------------------*/ + ETH_MACAddressConfig(heth, ETH_MAC_ADDRESS0, heth->Init.MACAddr); + + /* Disable MMC Interrupts */ + SET_BIT(heth->Instance->MACIMR, ETH_MACIMR_TSTIM | ETH_MACIMR_PMTIM); + + /* Disable Rx MMC Interrupts */ + SET_BIT(heth->Instance->MMCRIMR, ETH_MMCRIMR_RGUFM | ETH_MMCRIMR_RFAEM | \ + ETH_MMCRIMR_RFCEM); + + /* Disable Tx MMC Interrupts */ + SET_BIT(heth->Instance->MMCTIMR, ETH_MMCTIMR_TGFM | ETH_MMCTIMR_TGFMSCM | \ + ETH_MMCTIMR_TGFSCM); + + heth->ErrorCode = HAL_ETH_ERROR_NONE; + heth->gState = HAL_ETH_STATE_READY; + + return HAL_OK; +} + +/** + * @brief DeInitializes the ETH peripheral. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_DeInit(ETH_HandleTypeDef *heth) +{ + /* Set the ETH peripheral state to BUSY */ + heth->gState = HAL_ETH_STATE_BUSY; + +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) + + if (heth->MspDeInitCallback == NULL) + { + heth->MspDeInitCallback = HAL_ETH_MspDeInit; + } + /* DeInit the low level hardware */ + heth->MspDeInitCallback(heth); +#else + + /* De-Init the low level hardware : GPIO, CLOCK, NVIC. */ + HAL_ETH_MspDeInit(heth); + +#endif /* (USE_HAL_ETH_REGISTER_CALLBACKS) */ + + /* Set ETH HAL state to Disabled */ + heth->gState = HAL_ETH_STATE_RESET; + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Initializes the ETH MSP. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +__weak void HAL_ETH_MspInit(ETH_HandleTypeDef *heth) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(heth); + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_ETH_MspInit could be implemented in the user file + */ +} + +/** + * @brief DeInitializes ETH MSP. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +__weak void HAL_ETH_MspDeInit(ETH_HandleTypeDef *heth) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(heth); + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_ETH_MspDeInit could be implemented in the user file + */ +} + +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) +/** + * @brief Register a User ETH Callback + * To be used instead of the weak predefined callback + * @param heth eth handle + * @param CallbackID ID of the callback to be registered + * This parameter can be one of the following values: + * @arg @ref HAL_ETH_TX_COMPLETE_CB_ID Tx Complete Callback ID + * @arg @ref HAL_ETH_RX_COMPLETE_CB_ID Rx Complete Callback ID + * @arg @ref HAL_ETH_ERROR_CB_ID Error Callback ID + * @arg @ref HAL_ETH_PMT_CB_ID Power Management Callback ID + * @arg @ref HAL_ETH_WAKEUP_CB_ID Wake UP Callback ID + * @arg @ref HAL_ETH_MSPINIT_CB_ID MspInit callback ID + * @arg @ref HAL_ETH_MSPDEINIT_CB_ID MspDeInit callback ID + * @param pCallback pointer to the Callback function + * @retval status + */ +HAL_StatusTypeDef HAL_ETH_RegisterCallback(ETH_HandleTypeDef *heth, HAL_ETH_CallbackIDTypeDef CallbackID, + pETH_CallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + + if (pCallback == NULL) + { + /* Update the error code */ + heth->ErrorCode |= HAL_ETH_ERROR_INVALID_CALLBACK; + return HAL_ERROR; + } + + if (heth->gState == HAL_ETH_STATE_READY) + { + switch (CallbackID) + { + case HAL_ETH_TX_COMPLETE_CB_ID : + heth->TxCpltCallback = pCallback; + break; + + case HAL_ETH_RX_COMPLETE_CB_ID : + heth->RxCpltCallback = pCallback; + break; + + case HAL_ETH_ERROR_CB_ID : + heth->ErrorCallback = pCallback; + break; + + case HAL_ETH_PMT_CB_ID : + heth->PMTCallback = pCallback; + break; + + + case HAL_ETH_WAKEUP_CB_ID : + heth->WakeUpCallback = pCallback; + break; + + case HAL_ETH_MSPINIT_CB_ID : + heth->MspInitCallback = pCallback; + break; + + case HAL_ETH_MSPDEINIT_CB_ID : + heth->MspDeInitCallback = pCallback; + break; + + default : + /* Update the error code */ + heth->ErrorCode |= HAL_ETH_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else if (heth->gState == HAL_ETH_STATE_RESET) + { + switch (CallbackID) + { + case HAL_ETH_MSPINIT_CB_ID : + heth->MspInitCallback = pCallback; + break; + + case HAL_ETH_MSPDEINIT_CB_ID : + heth->MspDeInitCallback = pCallback; + break; + + default : + /* Update the error code */ + heth->ErrorCode |= HAL_ETH_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Update the error code */ + heth->ErrorCode |= HAL_ETH_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + } + + return status; +} + +/** + * @brief Unregister an ETH Callback + * ETH callback is redirected to the weak predefined callback + * @param heth eth handle + * @param CallbackID ID of the callback to be unregistered + * This parameter can be one of the following values: + * @arg @ref HAL_ETH_TX_COMPLETE_CB_ID Tx Complete Callback ID + * @arg @ref HAL_ETH_RX_COMPLETE_CB_ID Rx Complete Callback ID + * @arg @ref HAL_ETH_ERROR_CB_ID Error Callback ID + * @arg @ref HAL_ETH_PMT_CB_ID Power Management Callback ID + * @arg @ref HAL_ETH_WAKEUP_CB_ID Wake UP Callback ID + * @arg @ref HAL_ETH_MSPINIT_CB_ID MspInit callback ID + * @arg @ref HAL_ETH_MSPDEINIT_CB_ID MspDeInit callback ID + * @retval status + */ +HAL_StatusTypeDef HAL_ETH_UnRegisterCallback(ETH_HandleTypeDef *heth, HAL_ETH_CallbackIDTypeDef CallbackID) +{ + HAL_StatusTypeDef status = HAL_OK; + + if (heth->gState == HAL_ETH_STATE_READY) + { + switch (CallbackID) + { + case HAL_ETH_TX_COMPLETE_CB_ID : + heth->TxCpltCallback = HAL_ETH_TxCpltCallback; + break; + + case HAL_ETH_RX_COMPLETE_CB_ID : + heth->RxCpltCallback = HAL_ETH_RxCpltCallback; + break; + + case HAL_ETH_ERROR_CB_ID : + heth->ErrorCallback = HAL_ETH_ErrorCallback; + break; + + case HAL_ETH_PMT_CB_ID : + heth->PMTCallback = HAL_ETH_PMTCallback; + break; + + + case HAL_ETH_WAKEUP_CB_ID : + heth->WakeUpCallback = HAL_ETH_WakeUpCallback; + break; + + case HAL_ETH_MSPINIT_CB_ID : + heth->MspInitCallback = HAL_ETH_MspInit; + break; + + case HAL_ETH_MSPDEINIT_CB_ID : + heth->MspDeInitCallback = HAL_ETH_MspDeInit; + break; + + default : + /* Update the error code */ + heth->ErrorCode |= HAL_ETH_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else if (heth->gState == HAL_ETH_STATE_RESET) + { + switch (CallbackID) + { + case HAL_ETH_MSPINIT_CB_ID : + heth->MspInitCallback = HAL_ETH_MspInit; + break; + + case HAL_ETH_MSPDEINIT_CB_ID : + heth->MspDeInitCallback = HAL_ETH_MspDeInit; + break; + + default : + /* Update the error code */ + heth->ErrorCode |= HAL_ETH_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Update the error code */ + heth->ErrorCode |= HAL_ETH_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + } + + return status; +} +#endif /* USE_HAL_ETH_REGISTER_CALLBACKS */ + +/** + * @} + */ + +/** @defgroup ETH_Exported_Functions_Group2 IO operation functions + * @brief ETH Transmit and Receive functions + * +@verbatim + ============================================================================== + ##### IO operation functions ##### + ============================================================================== + [..] + This subsection provides a set of functions allowing to manage the ETH + data transfer. + +@endverbatim + * @{ + */ + +/** + * @brief Enables Ethernet MAC and DMA reception and transmission + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_Start(ETH_HandleTypeDef *heth) +{ + uint32_t tmpreg1; + + if (heth->gState == HAL_ETH_STATE_READY) + { + heth->gState = HAL_ETH_STATE_BUSY; + + /* Set number of descriptors to build */ + heth->RxDescList.RxBuildDescCnt = ETH_RX_DESC_CNT; + + /* Build all descriptors */ + ETH_UpdateDescriptor(heth); + + /* Enable the MAC transmission */ + SET_BIT(heth->Instance->MACCR, ETH_MACCR_TE); + + /* Wait until the write operation will be taken into account : + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg1 = (heth->Instance)->MACCR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACCR = tmpreg1; + + /* Enable the MAC reception */ + SET_BIT(heth->Instance->MACCR, ETH_MACCR_RE); + + /* Wait until the write operation will be taken into account : + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg1 = (heth->Instance)->MACCR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACCR = tmpreg1; + + /* Flush Transmit FIFO */ + ETH_FlushTransmitFIFO(heth); + + /* Enable the DMA transmission */ + SET_BIT(heth->Instance->DMAOMR, ETH_DMAOMR_ST); + + /* Enable the DMA reception */ + SET_BIT(heth->Instance->DMAOMR, ETH_DMAOMR_SR); + + heth->gState = HAL_ETH_STATE_STARTED; + + return HAL_OK; + } + else + { + return HAL_ERROR; + } +} + +/** + * @brief Enables Ethernet MAC and DMA reception/transmission in Interrupt mode + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_Start_IT(ETH_HandleTypeDef *heth) +{ + uint32_t tmpreg1; + + if (heth->gState == HAL_ETH_STATE_READY) + { + heth->gState = HAL_ETH_STATE_BUSY; + + /* save IT mode to ETH Handle */ + heth->RxDescList.ItMode = 1U; + + /* Set number of descriptors to build */ + heth->RxDescList.RxBuildDescCnt = ETH_RX_DESC_CNT; + + /* Build all descriptors */ + ETH_UpdateDescriptor(heth); + + /* Wait until the write operation will be taken into account : + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg1 = (heth->Instance)->MACCR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACCR = tmpreg1; + + /* Enable the DMA transmission */ + SET_BIT(heth->Instance->DMAOMR, ETH_DMAOMR_ST); + + /* Enable the DMA reception */ + SET_BIT(heth->Instance->DMAOMR, ETH_DMAOMR_SR); + + /* Flush Transmit FIFO */ + ETH_FlushTransmitFIFO(heth); + + + /* Enable the MAC transmission */ + SET_BIT(heth->Instance->MACCR, ETH_MACCR_TE); + + /* Wait until the write operation will be taken into account : + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg1 = (heth->Instance)->MACCR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACCR = tmpreg1; + + /* Enable the MAC reception */ + SET_BIT(heth->Instance->MACCR, ETH_MACCR_RE); + + /* Enable ETH DMA interrupts: + - Tx complete interrupt + - Rx complete interrupt + - Fatal bus interrupt + */ + __HAL_ETH_DMA_ENABLE_IT(heth, (ETH_DMAIER_NISE | ETH_DMAIER_RIE | ETH_DMAIER_TIE | + ETH_DMAIER_FBEIE | ETH_DMAIER_AISE | ETH_DMAIER_RBUIE)); + + heth->gState = HAL_ETH_STATE_STARTED; + return HAL_OK; + } + else + { + return HAL_ERROR; + } +} + +/** + * @brief Stop Ethernet MAC and DMA reception/transmission + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_Stop(ETH_HandleTypeDef *heth) +{ + uint32_t tmpreg1; + + if (heth->gState == HAL_ETH_STATE_STARTED) + { + /* Set the ETH peripheral state to BUSY */ + heth->gState = HAL_ETH_STATE_BUSY; + + /* Disable the DMA transmission */ + CLEAR_BIT(heth->Instance->DMAOMR, ETH_DMAOMR_ST); + + /* Disable the DMA reception */ + CLEAR_BIT(heth->Instance->DMAOMR, ETH_DMAOMR_SR); + + /* Disable the MAC reception */ + CLEAR_BIT(heth->Instance->MACCR, ETH_MACCR_RE); + + /* Wait until the write operation will be taken into account : + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg1 = (heth->Instance)->MACCR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACCR = tmpreg1; + + /* Flush Transmit FIFO */ + ETH_FlushTransmitFIFO(heth); + + /* Disable the MAC transmission */ + CLEAR_BIT(heth->Instance->MACCR, ETH_MACCR_TE); + + /* Wait until the write operation will be taken into account : + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg1 = (heth->Instance)->MACCR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACCR = tmpreg1; + + heth->gState = HAL_ETH_STATE_READY; + + /* Return function status */ + return HAL_OK; + } + else + { + return HAL_ERROR; + } +} + +/** + * @brief Stop Ethernet MAC and DMA reception/transmission in Interrupt mode + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_Stop_IT(ETH_HandleTypeDef *heth) +{ + ETH_DMADescTypeDef *dmarxdesc; + uint32_t descindex; + uint32_t tmpreg1; + + if (heth->gState == HAL_ETH_STATE_STARTED) + { + /* Set the ETH peripheral state to BUSY */ + heth->gState = HAL_ETH_STATE_BUSY; + + __HAL_ETH_DMA_DISABLE_IT(heth, (ETH_DMAIER_NISE | ETH_DMAIER_RIE | ETH_DMAIER_TIE | + ETH_DMAIER_FBEIE | ETH_DMAIER_AISE | ETH_DMAIER_RBUIE)); + + /* Disable the DMA transmission */ + CLEAR_BIT(heth->Instance->DMAOMR, ETH_DMAOMR_ST); + + /* Disable the DMA reception */ + CLEAR_BIT(heth->Instance->DMAOMR, ETH_DMAOMR_SR); + + /* Disable the MAC reception */ + CLEAR_BIT(heth->Instance->MACCR, ETH_MACCR_RE); + + + /* Wait until the write operation will be taken into account : + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg1 = (heth->Instance)->MACCR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACCR = tmpreg1; + + /* Flush Transmit FIFO */ + ETH_FlushTransmitFIFO(heth); + + /* Disable the MAC transmission */ + CLEAR_BIT(heth->Instance->MACCR, ETH_MACCR_TE); + + /* Wait until the write operation will be taken into account : + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg1 = (heth->Instance)->MACCR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACCR = tmpreg1; + + /* Clear IOC bit to all Rx descriptors */ + for (descindex = 0; descindex < (uint32_t)ETH_RX_DESC_CNT; descindex++) + { + dmarxdesc = (ETH_DMADescTypeDef *)heth->RxDescList.RxDesc[descindex]; + SET_BIT(dmarxdesc->DESC1, ETH_DMARXDESC_DIC); + } + + heth->RxDescList.ItMode = 0U; + + heth->gState = HAL_ETH_STATE_READY; + + /* Return function status */ + return HAL_OK; + } + else + { + return HAL_ERROR; + } +} + +/** + * @brief Sends an Ethernet Packet in polling mode. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param pTxConfig: Hold the configuration of packet to be transmitted + * @param Timeout: timeout value + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_Transmit(ETH_HandleTypeDef *heth, ETH_TxPacketConfigTypeDef *pTxConfig, uint32_t Timeout) +{ + uint32_t tickstart; + ETH_DMADescTypeDef *dmatxdesc; + + if (pTxConfig == NULL) + { + heth->ErrorCode |= HAL_ETH_ERROR_PARAM; + return HAL_ERROR; + } + + if (heth->gState == HAL_ETH_STATE_STARTED) + { + /* Config DMA Tx descriptor by Tx Packet info */ + if (ETH_Prepare_Tx_Descriptors(heth, pTxConfig, 0) != HAL_ETH_ERROR_NONE) + { + /* Set the ETH error code */ + heth->ErrorCode |= HAL_ETH_ERROR_BUSY; + return HAL_ERROR; + } + + /* Ensure completion of descriptor preparation before transmission start */ + __DSB(); + + dmatxdesc = (ETH_DMADescTypeDef *)(&heth->TxDescList)->TxDesc[heth->TxDescList.CurTxDesc]; + + /* Incr current tx desc index */ + INCR_TX_DESC_INDEX(heth->TxDescList.CurTxDesc, 1U); + + /* Start transmission */ + /* issue a poll command to Tx DMA by writing address of next immediate free descriptor */ + WRITE_REG(heth->Instance->DMATPDR, (uint32_t)(heth->TxDescList.TxDesc[heth->TxDescList.CurTxDesc])); + + tickstart = HAL_GetTick(); + + /* Wait for data to be transmitted or timeout occurred */ + while ((dmatxdesc->DESC0 & ETH_DMATXDESC_OWN) != (uint32_t)RESET) + { + if ((heth->Instance->DMASR & ETH_DMASR_FBES) != (uint32_t)RESET) + { + heth->ErrorCode |= HAL_ETH_ERROR_DMA; + heth->DMAErrorCode = heth->Instance->DMASR; + /* Return function status */ + return HAL_ERROR; + } + + /* Check for the Timeout */ + if (Timeout != HAL_MAX_DELAY) + { + if (((HAL_GetTick() - tickstart) > Timeout) || (Timeout == 0U)) + { + heth->ErrorCode |= HAL_ETH_ERROR_TIMEOUT; + /* Clear TX descriptor so that we can proceed */ + dmatxdesc->DESC0 = (ETH_DMATXDESC_FS | ETH_DMATXDESC_LS); + return HAL_ERROR; + } + } + } + + /* Return function status */ + return HAL_OK; + } + else + { + return HAL_ERROR; + } +} + +/** + * @brief Sends an Ethernet Packet in interrupt mode. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param pTxConfig: Hold the configuration of packet to be transmitted + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_Transmit_IT(ETH_HandleTypeDef *heth, ETH_TxPacketConfigTypeDef *pTxConfig) +{ + if (pTxConfig == NULL) + { + heth->ErrorCode |= HAL_ETH_ERROR_PARAM; + return HAL_ERROR; + } + + if (heth->gState == HAL_ETH_STATE_STARTED) + { + /* Save the packet pointer to release. */ + heth->TxDescList.CurrentPacketAddress = (uint32_t *)pTxConfig->pData; + + /* Config DMA Tx descriptor by Tx Packet info */ + if (ETH_Prepare_Tx_Descriptors(heth, pTxConfig, 1) != HAL_ETH_ERROR_NONE) + { + heth->ErrorCode |= HAL_ETH_ERROR_BUSY; + return HAL_ERROR; + } + + /* Ensure completion of descriptor preparation before transmission start */ + __DSB(); + + /* Incr current tx desc index */ + INCR_TX_DESC_INDEX(heth->TxDescList.CurTxDesc, 1U); + + /* Start transmission */ + /* issue a poll command to Tx DMA by writing address of next immediate free descriptor */ + if (((heth->Instance)->DMASR & ETH_DMASR_TBUS) != (uint32_t)RESET) + { + /* Clear TBUS ETHERNET DMA flag */ + (heth->Instance)->DMASR = ETH_DMASR_TBUS; + /* Resume DMA transmission*/ + (heth->Instance)->DMATPDR = 0U; + } + + return HAL_OK; + + } + else + { + return HAL_ERROR; + } +} + +/** + * @brief Read a received packet. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param pAppBuff: Pointer to an application buffer to receive the packet. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_ReadData(ETH_HandleTypeDef *heth, void **pAppBuff) +{ + uint32_t descidx; + ETH_DMADescTypeDef *dmarxdesc; + uint32_t desccnt = 0U; + uint32_t desccntmax; + uint32_t bufflength; + uint8_t rxdataready = 0U; + + if (pAppBuff == NULL) + { + heth->ErrorCode |= HAL_ETH_ERROR_PARAM; + return HAL_ERROR; + } + + if (heth->gState != HAL_ETH_STATE_STARTED) + { + return HAL_ERROR; + } + + descidx = heth->RxDescList.RxDescIdx; + dmarxdesc = (ETH_DMADescTypeDef *)heth->RxDescList.RxDesc[descidx]; + desccntmax = ETH_RX_DESC_CNT - heth->RxDescList.RxBuildDescCnt; + + /* Check if descriptor is not owned by DMA */ + while ((READ_BIT(dmarxdesc->DESC0, ETH_DMARXDESC_OWN) == (uint32_t)RESET) && (desccnt < desccntmax) + && (rxdataready == 0U)) + { + if (READ_BIT(dmarxdesc->DESC0, ETH_DMARXDESC_LS) != (uint32_t)RESET) + { + /* Get timestamp high */ + heth->RxDescList.TimeStamp.TimeStampHigh = dmarxdesc->DESC7; + /* Get timestamp low */ + heth->RxDescList.TimeStamp.TimeStampLow = dmarxdesc->DESC6; + } + if ((READ_BIT(dmarxdesc->DESC0, ETH_DMARXDESC_FS) != (uint32_t)RESET) || (heth->RxDescList.pRxStart != NULL)) + { + /* Check first descriptor */ + if (READ_BIT(dmarxdesc->DESC0, ETH_DMARXDESC_FS) != (uint32_t)RESET) + { + heth->RxDescList.RxDescCnt = 0; + heth->RxDescList.RxDataLength = 0; + } + + /* Get the Frame Length of the received packet: substruct 4 bytes of the CRC */ + bufflength = ((dmarxdesc->DESC0 & ETH_DMARXDESC_FL) >> ETH_DMARXDESC_FRAMELENGTHSHIFT); + + /* Check if last descriptor */ + if (READ_BIT(dmarxdesc->DESC0, ETH_DMARXDESC_LS) != (uint32_t)RESET) + { + /* Save Last descriptor index */ + heth->RxDescList.pRxLastRxDesc = dmarxdesc->DESC0; + + /* Packet ready */ + rxdataready = 1; + } + + /* Link data */ + WRITE_REG(dmarxdesc->BackupAddr0, dmarxdesc->DESC2); +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) + /*Call registered Link callback*/ + heth->rxLinkCallback(&heth->RxDescList.pRxStart, &heth->RxDescList.pRxEnd, + (uint8_t *)dmarxdesc->BackupAddr0, bufflength); +#else + /* Link callback */ + HAL_ETH_RxLinkCallback(&heth->RxDescList.pRxStart, &heth->RxDescList.pRxEnd, + (uint8_t *)dmarxdesc->BackupAddr0, (uint16_t) bufflength); +#endif /* USE_HAL_ETH_REGISTER_CALLBACKS */ + heth->RxDescList.RxDescCnt++; + heth->RxDescList.RxDataLength += bufflength; + + /* Clear buffer pointer */ + dmarxdesc->BackupAddr0 = 0; + } + + /* Increment current rx descriptor index */ + INCR_RX_DESC_INDEX(descidx, 1U); + /* Get current descriptor address */ + dmarxdesc = (ETH_DMADescTypeDef *)heth->RxDescList.RxDesc[descidx]; + desccnt++; + } + + heth->RxDescList.RxBuildDescCnt += desccnt; + if ((heth->RxDescList.RxBuildDescCnt) != 0U) + { + /* Update Descriptors */ + ETH_UpdateDescriptor(heth); + } + + heth->RxDescList.RxDescIdx = descidx; + + if (rxdataready == 1U) + { + /* Return received packet */ + *pAppBuff = heth->RxDescList.pRxStart; + /* Reset first element */ + heth->RxDescList.pRxStart = NULL; + + return HAL_OK; + } + + /* Packet not ready */ + return HAL_ERROR; +} + +/** + * @brief This function gives back Rx Desc of the last received Packet + * to the DMA, so ETH DMA will be able to use these descriptors + * to receive next Packets. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL status + */ +static void ETH_UpdateDescriptor(ETH_HandleTypeDef *heth) +{ + uint32_t descidx; + uint32_t tailidx; + uint32_t desccount; + ETH_DMADescTypeDef *dmarxdesc; + uint8_t *buff = NULL; + uint8_t allocStatus = 1U; + + descidx = heth->RxDescList.RxBuildDescIdx; + dmarxdesc = (ETH_DMADescTypeDef *)heth->RxDescList.RxDesc[descidx]; + desccount = heth->RxDescList.RxBuildDescCnt; + + while ((desccount > 0U) && (allocStatus != 0U)) + { + /* Check if a buffer's attached the descriptor */ + if (READ_REG(dmarxdesc->BackupAddr0) == 0U) + { + /* Get a new buffer. */ +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) + /*Call registered Allocate callback*/ + heth->rxAllocateCallback(&buff); +#else + /* Allocate callback */ + HAL_ETH_RxAllocateCallback(&buff); +#endif /* USE_HAL_ETH_REGISTER_CALLBACKS */ + if (buff == NULL) + { + allocStatus = 0U; + } + else + { + WRITE_REG(dmarxdesc->BackupAddr0, (uint32_t)buff); + WRITE_REG(dmarxdesc->DESC2, (uint32_t)buff); + } + } + + if (allocStatus != 0U) + { + if (heth->RxDescList.ItMode == 0U) + { + WRITE_REG(dmarxdesc->DESC1, heth->Init.RxBuffLen | ETH_DMARXDESC_DIC | ETH_DMARXDESC_RCH); + } + else + { + WRITE_REG(dmarxdesc->DESC1, heth->Init.RxBuffLen | ETH_DMARXDESC_RCH); + } + + SET_BIT(dmarxdesc->DESC0, ETH_DMARXDESC_OWN); + + /* Increment current rx descriptor index */ + INCR_RX_DESC_INDEX(descidx, 1U); + /* Get current descriptor address */ + dmarxdesc = (ETH_DMADescTypeDef *)heth->RxDescList.RxDesc[descidx]; + desccount--; + } + } + + if (heth->RxDescList.RxBuildDescCnt != desccount) + { + /* Set the tail pointer index */ + tailidx = (descidx + 1U) % ETH_RX_DESC_CNT; + + /* DMB instruction to avoid race condition */ + __DMB(); + + /* Set the Tail pointer address */ + WRITE_REG(heth->Instance->DMARPDR, ((uint32_t)(heth->Init.RxDesc + (tailidx)))); + + heth->RxDescList.RxBuildDescIdx = descidx; + heth->RxDescList.RxBuildDescCnt = desccount; + } +} + +/** + * @brief Register the Rx alloc callback. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param rxAllocateCallback: pointer to function to alloc buffer + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_RegisterRxAllocateCallback(ETH_HandleTypeDef *heth, + pETH_rxAllocateCallbackTypeDef rxAllocateCallback) +{ + if (rxAllocateCallback == NULL) + { + /* No buffer to save */ + return HAL_ERROR; + } + + /* Set function to allocate buffer */ + heth->rxAllocateCallback = rxAllocateCallback; + + return HAL_OK; +} + +/** + * @brief Unregister the Rx alloc callback. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_UnRegisterRxAllocateCallback(ETH_HandleTypeDef *heth) +{ + /* Set function to allocate buffer */ + heth->rxAllocateCallback = HAL_ETH_RxAllocateCallback; + + return HAL_OK; +} + +/** + * @brief Rx Allocate callback. + * @param buff: pointer to allocated buffer + * @retval None + */ +__weak void HAL_ETH_RxAllocateCallback(uint8_t **buff) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(buff); + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_ETH_RxAllocateCallback could be implemented in the user file + */ +} + +/** + * @brief Rx Link callback. + * @param pStart: pointer to packet start + * @param pEnd: pointer to packet end + * @param buff: pointer to received data + * @param Length: received data length + * @retval None + */ +__weak void HAL_ETH_RxLinkCallback(void **pStart, void **pEnd, uint8_t *buff, uint16_t Length) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(pStart); + UNUSED(pEnd); + UNUSED(buff); + UNUSED(Length); + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_ETH_RxLinkCallback could be implemented in the user file + */ +} + +/** + * @brief Set the Rx link data function. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param rxLinkCallback: pointer to function to link data + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_RegisterRxLinkCallback(ETH_HandleTypeDef *heth, pETH_rxLinkCallbackTypeDef rxLinkCallback) +{ + if (rxLinkCallback == NULL) + { + /* No buffer to save */ + return HAL_ERROR; + } + + /* Set function to link data */ + heth->rxLinkCallback = rxLinkCallback; + + return HAL_OK; +} + +/** + * @brief Unregister the Rx link callback. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_UnRegisterRxLinkCallback(ETH_HandleTypeDef *heth) +{ + /* Set function to allocate buffer */ + heth->rxLinkCallback = HAL_ETH_RxLinkCallback; + + return HAL_OK; +} + +/** + * @brief Get the error state of the last received packet. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param pErrorCode: pointer to uint32_t to hold the error code + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_GetRxDataErrorCode(const ETH_HandleTypeDef *heth, uint32_t *pErrorCode) +{ + /* Get error bits. */ + *pErrorCode = READ_BIT(heth->RxDescList.pRxLastRxDesc, ETH_DMARXDESC_ERRORS_MASK); + + return HAL_OK; +} + +/** + * @brief Set the Tx free function. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param txFreeCallback: pointer to function to release the packet + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_RegisterTxFreeCallback(ETH_HandleTypeDef *heth, pETH_txFreeCallbackTypeDef txFreeCallback) +{ + if (txFreeCallback == NULL) + { + /* No buffer to save */ + return HAL_ERROR; + } + + /* Set function to free transmmitted packet */ + heth->txFreeCallback = txFreeCallback; + + return HAL_OK; +} + +/** + * @brief Unregister the Tx free callback. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_UnRegisterTxFreeCallback(ETH_HandleTypeDef *heth) +{ + /* Set function to allocate buffer */ + heth->txFreeCallback = HAL_ETH_TxFreeCallback; + + return HAL_OK; +} + +/** + * @brief Tx Free callback. + * @param buff: pointer to buffer to free + * @retval None + */ +__weak void HAL_ETH_TxFreeCallback(uint32_t *buff) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(buff); + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_ETH_TxFreeCallback could be implemented in the user file + */ +} + +/** + * @brief Release transmitted Tx packets. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_ReleaseTxPacket(ETH_HandleTypeDef *heth) +{ + ETH_TxDescListTypeDef *dmatxdesclist = &heth->TxDescList; + uint32_t numOfBuf = dmatxdesclist->BuffersInUse; + uint32_t idx = dmatxdesclist->releaseIndex; + uint8_t pktTxStatus = 1U; + uint8_t pktInUse; +#ifdef HAL_ETH_USE_PTP + ETH_TimeStampTypeDef *timestamp = &heth->TxTimestamp; +#endif /* HAL_ETH_USE_PTP */ + + /* Loop through buffers in use. */ + while ((numOfBuf != 0U) && (pktTxStatus != 0U)) + { + pktInUse = 1U; + numOfBuf--; + /* If no packet, just examine the next packet. */ + if (dmatxdesclist->PacketAddress[idx] == NULL) + { + /* No packet in use, skip to next. */ + INCR_TX_DESC_INDEX(idx, 1U); + pktInUse = 0U; + } + + if (pktInUse != 0U) + { + /* Determine if the packet has been transmitted. */ + if ((heth->Init.TxDesc[idx].DESC0 & ETH_DMATXDESC_OWN) == 0U) + { +#ifdef HAL_ETH_USE_PTP + if ((heth->Init.TxDesc[idx].DESC3 & ETH_DMATXDESC_LS) + && (heth->Init.TxDesc[idx].DESC3 & ETH_DMATXDESC_TTSS)) + { + /* Get timestamp low */ + timestamp->TimeStampLow = heth->Init.TxDesc[idx].DESC6; + /* Get timestamp high */ + timestamp->TimeStampHigh = heth->Init.TxDesc[idx].DESC7; + } + else + { + timestamp->TimeStampHigh = timestamp->TimeStampLow = UINT32_MAX; + } +#endif /* HAL_ETH_USE_PTP */ + +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) + /*Call registered callbacks*/ +#ifdef HAL_ETH_USE_PTP + /* Handle Ptp */ + if (timestamp->TimeStampHigh != UINT32_MAX && timestamp->TimeStampLow != UINT32_MAX) + { + heth->txPtpCallback(dmatxdesclist->PacketAddress[idx], timestamp); + } +#endif /* HAL_ETH_USE_PTP */ + /* Release the packet. */ + heth->txFreeCallback(dmatxdesclist->PacketAddress[idx]); +#else + /* Call callbacks */ +#ifdef HAL_ETH_USE_PTP + /* Handle Ptp */ + if (timestamp->TimeStampHigh != UINT32_MAX && timestamp->TimeStampLow != UINT32_MAX) + { + HAL_ETH_TxPtpCallback(dmatxdesclist->PacketAddress[idx], timestamp); + } +#endif /* HAL_ETH_USE_PTP */ + /* Release the packet. */ + HAL_ETH_TxFreeCallback(dmatxdesclist->PacketAddress[idx]); +#endif /* USE_HAL_ETH_REGISTER_CALLBACKS */ + + /* Clear the entry in the in-use array. */ + dmatxdesclist->PacketAddress[idx] = NULL; + + /* Update the transmit relesae index and number of buffers in use. */ + INCR_TX_DESC_INDEX(idx, 1U); + dmatxdesclist->BuffersInUse = numOfBuf; + dmatxdesclist->releaseIndex = idx; + } + else + { + /* Get out of the loop! */ + pktTxStatus = 0U; + } + } + } + return HAL_OK; +} + +#ifdef HAL_ETH_USE_PTP +/** + * @brief Set the Ethernet PTP configuration. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param ptpconfig: pointer to a ETH_PTP_ConfigTypeDef structure that contains + * the configuration information for PTP + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_PTP_SetConfig(ETH_HandleTypeDef *heth, ETH_PTP_ConfigTypeDef *ptpconfig) +{ + uint32_t tmpTSCR; + ETH_TimeTypeDef time; + + if (ptpconfig == NULL) + { + return HAL_ERROR; + } + + tmpTSCR = ptpconfig->Timestamp | + ((uint32_t)ptpconfig->TimestampUpdate << ETH_PTPTSCR_TSFCU_Pos) | + ((uint32_t)ptpconfig->TimestampAll << ETH_PTPTSCR_TSSARFE_Pos) | + ((uint32_t)ptpconfig->TimestampRolloverMode << ETH_PTPTSCR_TSSSR_Pos) | + ((uint32_t)ptpconfig->TimestampV2 << ETH_PTPTSCR_TSPTPPSV2E_Pos) | + ((uint32_t)ptpconfig->TimestampEthernet << ETH_PTPTSCR_TSSPTPOEFE_Pos) | + ((uint32_t)ptpconfig->TimestampIPv6 << ETH_PTPTSCR_TSSIPV6FE_Pos) | + ((uint32_t)ptpconfig->TimestampIPv4 << ETH_PTPTSCR_TSSIPV4FE_Pos) | + ((uint32_t)ptpconfig->TimestampEvent << ETH_PTPTSCR_TSSEME_Pos) | + ((uint32_t)ptpconfig->TimestampMaster << ETH_PTPTSCR_TSSMRME_Pos) | + ((uint32_t)ptpconfig->TimestampFilter << ETH_PTPTSCR_TSPFFMAE_Pos) | + ((uint32_t)ptpconfig->TimestampClockType << ETH_PTPTSCR_TSCNT_Pos); + + /* Write to MACTSCR */ + MODIFY_REG(heth->Instance->PTPTSCR, ETH_MACTSCR_MASK, tmpTSCR); + + /* Enable Timestamp */ + SET_BIT(heth->Instance->PTPTSCR, ETH_PTPTSCR_TSE); + WRITE_REG(heth->Instance->PTPSSIR, ptpconfig->TimestampSubsecondInc); + WRITE_REG(heth->Instance->PTPTSAR, ptpconfig->TimestampAddend); + + /* Enable Timestamp */ + if (ptpconfig->TimestampAddendUpdate == ENABLE) + { + SET_BIT(heth->Instance->PTPTSCR, ETH_PTPTSCR_TSARU); + while ((heth->Instance->PTPTSCR & ETH_PTPTSCR_TSARU) != 0) + { + + } + } + + /* Ptp Init */ + SET_BIT(heth->Instance->PTPTSCR, ETH_PTPTSCR_TSSTI); + + /* Set PTP Configuration done */ + heth->IsPtpConfigured = HAL_ETH_PTP_CONFIGURED; + + /* Set Seconds */ + time.Seconds = heth->Instance->PTPTSHR; + /* Set NanoSeconds */ + time.NanoSeconds = heth->Instance->PTPTSLR; + + HAL_ETH_PTP_SetTime(heth, &time); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Get the Ethernet PTP configuration. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param ptpconfig: pointer to a ETH_PTP_ConfigTypeDef structure that contains + * the configuration information for PTP + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_PTP_GetConfig(ETH_HandleTypeDef *heth, ETH_PTP_ConfigTypeDef *ptpconfig) +{ + if (ptpconfig == NULL) + { + return HAL_ERROR; + } + ptpconfig->Timestamp = READ_BIT(heth->Instance->PTPTSCR, ETH_PTPTSCR_TSE); + ptpconfig->TimestampUpdate = ((READ_BIT(heth->Instance->PTPTSCR, + ETH_PTPTSCR_TSFCU) >> ETH_PTPTSCR_TSFCU_Pos) > 0U) ? ENABLE : DISABLE; + ptpconfig->TimestampAll = ((READ_BIT(heth->Instance->PTPTSCR, + ETH_PTPTSCR_TSSARFE) >> ETH_PTPTSCR_TSSARFE_Pos) > 0U) ? ENABLE : DISABLE; + ptpconfig->TimestampRolloverMode = ((READ_BIT(heth->Instance->PTPTSCR, + ETH_PTPTSCR_TSSSR) >> ETH_PTPTSCR_TSSSR_Pos) > 0U) + ? ENABLE : DISABLE; + ptpconfig->TimestampV2 = ((READ_BIT(heth->Instance->PTPTSCR, + ETH_PTPTSCR_TSPTPPSV2E) >> ETH_PTPTSCR_TSPTPPSV2E_Pos) > 0U) ? ENABLE : DISABLE; + ptpconfig->TimestampEthernet = ((READ_BIT(heth->Instance->PTPTSCR, + ETH_PTPTSCR_TSSPTPOEFE) >> ETH_PTPTSCR_TSSPTPOEFE_Pos) > 0U) + ? ENABLE : DISABLE; + ptpconfig->TimestampIPv6 = ((READ_BIT(heth->Instance->PTPTSCR, + ETH_PTPTSCR_TSSIPV6FE) >> ETH_PTPTSCR_TSSIPV6FE_Pos) > 0U) ? ENABLE : DISABLE; + ptpconfig->TimestampIPv4 = ((READ_BIT(heth->Instance->PTPTSCR, + ETH_PTPTSCR_TSSIPV4FE) >> ETH_PTPTSCR_TSSIPV4FE_Pos) > 0U) ? ENABLE : DISABLE; + ptpconfig->TimestampEvent = ((READ_BIT(heth->Instance->PTPTSCR, + ETH_PTPTSCR_TSSEME) >> ETH_PTPTSCR_TSSEME_Pos) > 0U) ? ENABLE : DISABLE; + ptpconfig->TimestampMaster = ((READ_BIT(heth->Instance->PTPTSCR, + ETH_PTPTSCR_TSSMRME) >> ETH_PTPTSCR_TSSMRME_Pos) > 0U) ? ENABLE : DISABLE; + ptpconfig->TimestampFilter = ((READ_BIT(heth->Instance->PTPTSCR, + ETH_PTPTSCR_TSPFFMAE) >> ETH_PTPTSCR_TSPFFMAE_Pos) > 0U) ? ENABLE : DISABLE; + ptpconfig->TimestampClockType = ((READ_BIT(heth->Instance->PTPTSCR, + ETH_PTPTSCR_TSCNT) >> ETH_PTPTSCR_TSCNT_Pos) > 0U) ? ENABLE : DISABLE; + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Set Seconds and Nanoseconds for the Ethernet PTP registers. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param time: pointer to a ETH_TimeTypeDef structure that contains + * time to set + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_PTP_SetTime(ETH_HandleTypeDef *heth, ETH_TimeTypeDef *time) +{ + if (heth->IsPtpConfigured == HAL_ETH_PTP_CONFIGURED) + { + /* Set Seconds */ + heth->Instance->PTPTSHUR = time->Seconds; + + /* Set NanoSeconds */ + heth->Instance->PTPTSLUR = time->NanoSeconds; + + /* the system time is updated */ + SET_BIT(heth->Instance->PTPTSCR, ETH_PTPTSCR_TSSTU); + + /* Return function status */ + return HAL_OK; + } + else + { + /* Return function status */ + return HAL_ERROR; + } +} + +/** + * @brief Get Seconds and Nanoseconds for the Ethernet PTP registers. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param time: pointer to a ETH_TimeTypeDef structure that contains + * time to get + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_PTP_GetTime(ETH_HandleTypeDef *heth, ETH_TimeTypeDef *time) +{ + if (heth->IsPtpConfigured == HAL_ETH_PTP_CONFIGURED) + { + /* Get Seconds */ + time->Seconds = heth->Instance->PTPTSHR; + /* Get NanoSeconds */ + time->NanoSeconds = heth->Instance->PTPTSLR; + + /* Return function status */ + return HAL_OK; + } + else + { + /* Return function status */ + return HAL_ERROR; + } +} + +/** + * @brief Update time for the Ethernet PTP registers. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param timeoffset: pointer to a ETH_PtpUpdateTypeDef structure that contains + * the time update information + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_PTP_AddTimeOffset(ETH_HandleTypeDef *heth, ETH_PtpUpdateTypeDef ptpoffsettype, + ETH_TimeTypeDef *timeoffset) +{ + if (heth->IsPtpConfigured == HAL_ETH_PTP_CONFIGURED) + { + if (ptpoffsettype == HAL_ETH_PTP_NEGATIVE_UPDATE) + { + /* Set Seconds update */ + heth->Instance->PTPTSHUR = ETH_PTPTSHR_VALUE - timeoffset->Seconds + 1U; + + if (READ_BIT(heth->Instance->PTPTSCR, ETH_PTPTSCR_TSSSR) == ETH_PTPTSCR_TSSSR) + { + /* Set nanoSeconds update */ + heth->Instance->PTPTSLUR = ETH_PTPTSLR_VALUE - timeoffset->NanoSeconds; + } + else + { + heth->Instance->PTPTSLUR = ETH_PTPTSHR_VALUE - timeoffset->NanoSeconds + 1U; + } + } + else + { + /* Set Seconds update */ + heth->Instance->PTPTSHUR = timeoffset->Seconds; + /* Set nanoSeconds update */ + heth->Instance->PTPTSLUR = timeoffset->NanoSeconds; + } + + SET_BIT(heth->Instance->PTPTSCR, ETH_PTPTSCR_TSSTU); + + /* Return function status */ + return HAL_OK; + } + else + { + /* Return function status */ + return HAL_ERROR; + } +} + +/** + * @brief Insert Timestamp in transmission. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_PTP_InsertTxTimestamp(ETH_HandleTypeDef *heth) +{ + ETH_TxDescListTypeDef *dmatxdesclist = &heth->TxDescList; + uint32_t descidx = dmatxdesclist->CurTxDesc; + ETH_DMADescTypeDef *dmatxdesc = (ETH_DMADescTypeDef *)dmatxdesclist->TxDesc[descidx]; + + if (heth->IsPtpConfigured == HAL_ETH_PTP_CONFIGURED) + { + /* Enable Time Stamp transmission */ + SET_BIT(dmatxdesc->DESC0, ETH_DMATXDESC_TTSE); + + /* Return function status */ + return HAL_OK; + } + else + { + /* Return function status */ + return HAL_ERROR; + } +} + +/** + * @brief Get transmission timestamp. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param timestamp: pointer to ETH_TIMESTAMPTypeDef structure that contains + * transmission timestamp + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_PTP_GetTxTimestamp(ETH_HandleTypeDef *heth, ETH_TimeStampTypeDef *timestamp) +{ + ETH_TxDescListTypeDef *dmatxdesclist = &heth->TxDescList; + uint32_t idx = dmatxdesclist->releaseIndex; + ETH_DMADescTypeDef *dmatxdesc = (ETH_DMADescTypeDef *)dmatxdesclist->TxDesc[idx]; + + if (heth->IsPtpConfigured == HAL_ETH_PTP_CONFIGURED) + { + /* Get timestamp low */ + timestamp->TimeStampLow = dmatxdesc->DESC0; + /* Get timestamp high */ + timestamp->TimeStampHigh = dmatxdesc->DESC1; + + /* Return function status */ + return HAL_OK; + } + else + { + /* Return function status */ + return HAL_ERROR; + } +} + +/** + * @brief Get receive timestamp. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param timestamp: pointer to ETH_TIMESTAMPTypeDef structure that contains + * receive timestamp + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_PTP_GetRxTimestamp(ETH_HandleTypeDef *heth, ETH_TimeStampTypeDef *timestamp) +{ + if (heth->IsPtpConfigured == HAL_ETH_PTP_CONFIGURED) + { + /* Get timestamp low */ + timestamp->TimeStampLow = heth->RxDescList.TimeStamp.TimeStampLow; + /* Get timestamp high */ + timestamp->TimeStampHigh = heth->RxDescList.TimeStamp.TimeStampHigh; + + /* Return function status */ + return HAL_OK; + } + else + { + /* Return function status */ + return HAL_ERROR; + } +} + +/** + * @brief Register the Tx Ptp callback. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param txPtpCallback: Function to handle Ptp transmission + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_RegisterTxPtpCallback(ETH_HandleTypeDef *heth, pETH_txPtpCallbackTypeDef txPtpCallback) +{ + if (txPtpCallback == NULL) + { + /* No buffer to save */ + return HAL_ERROR; + } + /* Set Function to handle Tx Ptp */ + heth->txPtpCallback = txPtpCallback; + + return HAL_OK; +} + +/** + * @brief Unregister the Tx Ptp callback. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_UnRegisterTxPtpCallback(ETH_HandleTypeDef *heth) +{ + /* Set function to allocate buffer */ + heth->txPtpCallback = HAL_ETH_TxPtpCallback; + + return HAL_OK; +} + +/** + * @brief Tx Ptp callback. + * @param buff: pointer to application buffer + * @param timestamp: pointer to ETH_TimeStampTypeDef structure that contains + * transmission timestamp + * @retval None + */ +__weak void HAL_ETH_TxPtpCallback(uint32_t *buff, ETH_TimeStampTypeDef *timestamp) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(buff); + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_ETH_TxPtpCallback could be implemented in the user file + */ +} +#endif /* HAL_ETH_USE_PTP */ + +/** + * @brief This function handles ETH interrupt request. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL status + */ +void HAL_ETH_IRQHandler(ETH_HandleTypeDef *heth) +{ + uint32_t mac_flag = READ_REG(heth->Instance->MACSR); + uint32_t dma_flag = READ_REG(heth->Instance->DMASR); + uint32_t dma_itsource = READ_REG(heth->Instance->DMAIER); + uint32_t exti_flag = READ_REG(EXTI->PR); + + /* Packet received */ + if (((dma_flag & ETH_DMASR_RS) != 0U) && ((dma_itsource & ETH_DMAIER_RIE) != 0U)) + { + /* Clear the Eth DMA Rx IT pending bits */ + __HAL_ETH_DMA_CLEAR_IT(heth, ETH_DMASR_RS | ETH_DMASR_NIS); + +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) + /*Call registered Receive complete callback*/ + heth->RxCpltCallback(heth); +#else + /* Receive complete callback */ + HAL_ETH_RxCpltCallback(heth); +#endif /* USE_HAL_ETH_REGISTER_CALLBACKS */ + } + + /* Packet transmitted */ + if (((dma_flag & ETH_DMASR_TS) != 0U) && ((dma_itsource & ETH_DMAIER_TIE) != 0U)) + { + /* Clear the Eth DMA Tx IT pending bits */ + __HAL_ETH_DMA_CLEAR_IT(heth, ETH_DMASR_TS | ETH_DMASR_NIS); + +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) + /*Call registered Transmit complete callback*/ + heth->TxCpltCallback(heth); +#else + /* Transfer complete callback */ + HAL_ETH_TxCpltCallback(heth); +#endif /* USE_HAL_ETH_REGISTER_CALLBACKS */ + } + + /* ETH DMA Error */ + if (((dma_flag & ETH_DMASR_AIS) != 0U) && ((dma_itsource & ETH_DMAIER_AISE) != 0U)) + { + heth->ErrorCode |= HAL_ETH_ERROR_DMA; + /* if fatal bus error occurred */ + if ((dma_flag & ETH_DMASR_FBES) != 0U) + { + /* Get DMA error code */ + heth->DMAErrorCode = READ_BIT(heth->Instance->DMASR, (ETH_DMASR_FBES | ETH_DMASR_TPS | ETH_DMASR_RPS)); + + /* Disable all interrupts */ + __HAL_ETH_DMA_DISABLE_IT(heth, ETH_DMAIER_NISE | ETH_DMAIER_AISE); + + /* Set HAL state to ERROR */ + heth->gState = HAL_ETH_STATE_ERROR; + } + else + { + /* Get DMA error status */ + heth->DMAErrorCode = READ_BIT(heth->Instance->DMASR, (ETH_DMASR_ETS | ETH_DMASR_RWTS | + ETH_DMASR_RBUS | ETH_DMASR_AIS)); + + /* Clear the interrupt summary flag */ + __HAL_ETH_DMA_CLEAR_IT(heth, (ETH_DMASR_ETS | ETH_DMASR_RWTS | + ETH_DMASR_RBUS | ETH_DMASR_AIS)); + } +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) + /* Call registered Error callback*/ + heth->ErrorCallback(heth); +#else + /* Ethernet DMA Error callback */ + HAL_ETH_ErrorCallback(heth); +#endif /* USE_HAL_ETH_REGISTER_CALLBACKS */ + } + + + /* ETH PMT IT */ + if ((mac_flag & ETH_MAC_PMT_IT) != 0U) + { + /* Get MAC Wake-up source and clear the status register pending bit */ + heth->MACWakeUpEvent = READ_BIT(heth->Instance->MACPMTCSR, (ETH_MACPMTCSR_WFR | ETH_MACPMTCSR_MPR)); + +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) + /* Call registered PMT callback*/ + heth->PMTCallback(heth); +#else + /* Ethernet PMT callback */ + HAL_ETH_PMTCallback(heth); +#endif /* USE_HAL_ETH_REGISTER_CALLBACKS */ + + heth->MACWakeUpEvent = (uint32_t)(0x0U); + } + + + /* check ETH WAKEUP exti flag */ + if ((exti_flag & ETH_WAKEUP_EXTI_LINE) != 0U) + { + /* Clear ETH WAKEUP Exti pending bit */ + __HAL_ETH_WAKEUP_EXTI_CLEAR_FLAG(ETH_WAKEUP_EXTI_LINE); +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) + /* Call registered WakeUp callback*/ + heth->WakeUpCallback(heth); +#else + /* ETH WAKEUP callback */ + HAL_ETH_WakeUpCallback(heth); +#endif /* USE_HAL_ETH_REGISTER_CALLBACKS */ + } +} + +/** + * @brief Tx Transfer completed callbacks. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +__weak void HAL_ETH_TxCpltCallback(ETH_HandleTypeDef *heth) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(heth); + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_ETH_TxCpltCallback could be implemented in the user file + */ +} + +/** + * @brief Rx Transfer completed callbacks. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +__weak void HAL_ETH_RxCpltCallback(ETH_HandleTypeDef *heth) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(heth); + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_ETH_RxCpltCallback could be implemented in the user file + */ +} + +/** + * @brief Ethernet transfer error callbacks + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +__weak void HAL_ETH_ErrorCallback(ETH_HandleTypeDef *heth) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(heth); + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_ETH_ErrorCallback could be implemented in the user file + */ +} + +/** + * @brief Ethernet Power Management module IT callback + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +__weak void HAL_ETH_PMTCallback(ETH_HandleTypeDef *heth) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(heth); + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_ETH_PMTCallback could be implemented in the user file + */ +} + + +/** + * @brief ETH WAKEUP interrupt callback + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +__weak void HAL_ETH_WakeUpCallback(ETH_HandleTypeDef *heth) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(heth); + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_ETH_WakeUpCallback could be implemented in the user file + */ +} + +/** + * @brief Read a PHY register + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param PHYAddr: PHY port address, must be a value from 0 to 31 + * @param PHYReg: PHY register address, must be a value from 0 to 31 + * @param pRegValue: parameter to hold read value + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_ReadPHYRegister(ETH_HandleTypeDef *heth, uint32_t PHYAddr, uint32_t PHYReg, + uint32_t *pRegValue) +{ + uint32_t tmpreg1; + uint32_t tickstart; + + /* Get the ETHERNET MACMIIAR value */ + tmpreg1 = heth->Instance->MACMIIAR; + + /* Keep only the CSR Clock Range CR[2:0] bits value */ + tmpreg1 &= ~ETH_MACMIIAR_CR_MASK; + + /* Prepare the MII address register value */ + tmpreg1 |= ((PHYAddr << 11U) & ETH_MACMIIAR_PA); /* Set the PHY device address */ + tmpreg1 |= (((uint32_t)PHYReg << 6U) & ETH_MACMIIAR_MR); /* Set the PHY register address */ + tmpreg1 &= ~ETH_MACMIIAR_MW; /* Set the read mode */ + tmpreg1 |= ETH_MACMIIAR_MB; /* Set the MII Busy bit */ + + /* Write the result value into the MII Address register */ + heth->Instance->MACMIIAR = tmpreg1; + + + tickstart = HAL_GetTick(); + + /* Check for the Busy flag */ + while ((tmpreg1 & ETH_MACMIIAR_MB) == ETH_MACMIIAR_MB) + { + /* Check for the Timeout */ + if ((HAL_GetTick() - tickstart) > PHY_READ_TO) + { + return HAL_ERROR; + } + + tmpreg1 = heth->Instance->MACMIIAR; + } + + /* Get MACMIIDR value */ + *pRegValue = (uint16_t)(heth->Instance->MACMIIDR); + + return HAL_OK; +} + +/** + * @brief Writes to a PHY register. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param PHYAddr: PHY port address, must be a value from 0 to 31 + * @param PHYReg: PHY register address, must be a value from 0 to 31 + * @param RegValue: the value to write + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_WritePHYRegister(const ETH_HandleTypeDef *heth, uint32_t PHYAddr, uint32_t PHYReg, + uint32_t RegValue) +{ + uint32_t tmpreg1; + uint32_t tickstart; + + /* Get the ETHERNET MACMIIAR value */ + tmpreg1 = heth->Instance->MACMIIAR; + + /* Keep only the CSR Clock Range CR[2:0] bits value */ + tmpreg1 &= ~ETH_MACMIIAR_CR_MASK; + + /* Prepare the MII register address value */ + tmpreg1 |= ((PHYAddr << 11U) & ETH_MACMIIAR_PA); /* Set the PHY device address */ + tmpreg1 |= (((uint32_t)PHYReg << 6U) & ETH_MACMIIAR_MR); /* Set the PHY register address */ + tmpreg1 |= ETH_MACMIIAR_MW; /* Set the write mode */ + tmpreg1 |= ETH_MACMIIAR_MB; /* Set the MII Busy bit */ + + /* Give the value to the MII data register */ + heth->Instance->MACMIIDR = (uint16_t)RegValue; + + /* Write the result value into the MII Address register */ + heth->Instance->MACMIIAR = tmpreg1; + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Check for the Busy flag */ + while ((tmpreg1 & ETH_MACMIIAR_MB) == ETH_MACMIIAR_MB) + { + /* Check for the Timeout */ + if ((HAL_GetTick() - tickstart) > PHY_WRITE_TO) + { + return HAL_ERROR; + } + + tmpreg1 = heth->Instance->MACMIIAR; + } + + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup ETH_Exported_Functions_Group3 Peripheral Control functions + * @brief ETH control functions + * +@verbatim + ============================================================================== + ##### Peripheral Control functions ##### + ============================================================================== + [..] + This subsection provides a set of functions allowing to control the ETH + peripheral. + +@endverbatim + * @{ + */ +/** + * @brief Get the configuration of the MAC and MTL subsystems. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param macconf: pointer to a ETH_MACConfigTypeDef structure that will hold + * the configuration of the MAC. + * @retval HAL Status + */ +HAL_StatusTypeDef HAL_ETH_GetMACConfig(const ETH_HandleTypeDef *heth, ETH_MACConfigTypeDef *macconf) +{ + if (macconf == NULL) + { + return HAL_ERROR; + } + + /* Get MAC parameters */ + macconf->DeferralCheck = ((READ_BIT(heth->Instance->MACCR, ETH_MACCR_DC) >> 4) > 0U) ? ENABLE : DISABLE; + macconf->BackOffLimit = READ_BIT(heth->Instance->MACCR, ETH_MACCR_BL); + macconf->RetryTransmission = ((READ_BIT(heth->Instance->MACCR, ETH_MACCR_RD) >> 9) == 0U) ? ENABLE : DISABLE; + macconf->CarrierSenseDuringTransmit = ((READ_BIT(heth->Instance->MACCR, ETH_MACCR_CSD) >> 16) > 0U) + ? ENABLE : DISABLE; + macconf->ReceiveOwn = ((READ_BIT(heth->Instance->MACCR, ETH_MACCR_ROD) >> 13) == 0U) ? ENABLE : DISABLE; + macconf->LoopbackMode = ((READ_BIT(heth->Instance->MACCR, ETH_MACCR_LM) >> 12) > 0U) ? ENABLE : DISABLE; + macconf->DuplexMode = READ_BIT(heth->Instance->MACCR, ETH_MACCR_DM); + macconf->Speed = READ_BIT(heth->Instance->MACCR, ETH_MACCR_FES); + macconf->Jabber = ((READ_BIT(heth->Instance->MACCR, ETH_MACCR_JD) >> 22) == 0U) ? ENABLE : DISABLE; + macconf->Watchdog = ((READ_BIT(heth->Instance->MACCR, ETH_MACCR_WD) >> 23) == 0U) ? ENABLE : DISABLE; + macconf->AutomaticPadCRCStrip = ((READ_BIT(heth->Instance->MACCR, ETH_MACCR_APCS) >> 7) > 0U) ? ENABLE : DISABLE; + macconf->InterPacketGapVal = READ_BIT(heth->Instance->MACCR, ETH_MACCR_IFG); + macconf->ChecksumOffload = ((READ_BIT(heth->Instance->MACCR, ETH_MACCR_IPCO) >> 10U) > 0U) ? ENABLE : DISABLE; + macconf->CRCStripTypePacket = ((READ_BIT(heth->Instance->MACCR, ETH_MACCR_CSTF) >> 25U) > 0U) ? ENABLE : DISABLE; + + macconf->TransmitFlowControl = ((READ_BIT(heth->Instance->MACFCR, ETH_MACFCR_TFCE) >> 1) > 0U) ? ENABLE : DISABLE; + macconf->ZeroQuantaPause = ((READ_BIT(heth->Instance->MACFCR, ETH_MACFCR_ZQPD) >> 7) == 0U) ? ENABLE : DISABLE; + macconf->PauseLowThreshold = READ_BIT(heth->Instance->MACFCR, ETH_MACFCR_PLT); + macconf->PauseTime = (READ_BIT(heth->Instance->MACFCR, ETH_MACFCR_PT) >> 16); + macconf->ReceiveFlowControl = ((READ_BIT(heth->Instance->MACFCR, ETH_MACFCR_RFCE) >> 2U) > 0U) ? ENABLE : DISABLE; + macconf->UnicastPausePacketDetect = ((READ_BIT(heth->Instance->MACFCR, ETH_MACFCR_UPFD) >> 3U) > 0U) + ? ENABLE : DISABLE; + + return HAL_OK; +} + +/** + * @brief Get the configuration of the DMA. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param dmaconf: pointer to a ETH_DMAConfigTypeDef structure that will hold + * the configuration of the ETH DMA. + * @retval HAL Status + */ +HAL_StatusTypeDef HAL_ETH_GetDMAConfig(const ETH_HandleTypeDef *heth, ETH_DMAConfigTypeDef *dmaconf) +{ + if (dmaconf == NULL) + { + return HAL_ERROR; + } + + dmaconf->DMAArbitration = READ_BIT(heth->Instance->DMABMR, + (ETH_DMAARBITRATION_RXPRIORTX | ETH_DMAARBITRATION_ROUNDROBIN_RXTX_4_1)); + dmaconf->AddressAlignedBeats = ((READ_BIT(heth->Instance->DMABMR, ETH_DMABMR_AAB) >> 25U) > 0U) ? ENABLE : DISABLE; + dmaconf->BurstMode = READ_BIT(heth->Instance->DMABMR, ETH_DMABMR_FB | ETH_DMABMR_MB); + dmaconf->RxDMABurstLength = READ_BIT(heth->Instance->DMABMR, ETH_DMABMR_RDP); + dmaconf->TxDMABurstLength = READ_BIT(heth->Instance->DMABMR, ETH_DMABMR_PBL); + dmaconf->EnhancedDescriptorFormat = ((READ_BIT(heth->Instance->DMABMR, ETH_DMABMR_EDE) >> 7) > 0U) ? ENABLE : DISABLE; + dmaconf->DescriptorSkipLength = READ_BIT(heth->Instance->DMABMR, ETH_DMABMR_DSL) >> 2; + + dmaconf->DropTCPIPChecksumErrorFrame = ((READ_BIT(heth->Instance->DMAOMR, + ETH_DMAOMR_DTCEFD) >> 26) > 0U) ? DISABLE : ENABLE; + dmaconf->ReceiveStoreForward = ((READ_BIT(heth->Instance->DMAOMR, ETH_DMAOMR_RSF) >> 25) > 0U) ? ENABLE : DISABLE; + dmaconf->FlushRxPacket = ((READ_BIT(heth->Instance->DMAOMR, ETH_DMAOMR_FTF) >> 20) > 0U) ? DISABLE : ENABLE; + dmaconf->TransmitStoreForward = ((READ_BIT(heth->Instance->DMAOMR, ETH_DMAOMR_TSF) >> 21) > 0U) ? ENABLE : DISABLE; + dmaconf->TransmitThresholdControl = READ_BIT(heth->Instance->DMAOMR, ETH_DMAOMR_TTC); + dmaconf->ForwardErrorFrames = ((READ_BIT(heth->Instance->DMAOMR, ETH_DMAOMR_FEF) >> 7) > 0U) ? ENABLE : DISABLE; + dmaconf->ForwardUndersizedGoodFrames = ((READ_BIT(heth->Instance->DMAOMR, + ETH_DMAOMR_FUGF) >> 6) > 0U) ? ENABLE : DISABLE; + dmaconf->ReceiveThresholdControl = READ_BIT(heth->Instance->DMAOMR, ETH_DMAOMR_RTC); + dmaconf->SecondFrameOperate = ((READ_BIT(heth->Instance->DMAOMR, ETH_DMAOMR_OSF) >> 2) > 0U) ? ENABLE : DISABLE; + + return HAL_OK; +} + +/** + * @brief Set the MAC configuration. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param macconf: pointer to a ETH_MACConfigTypeDef structure that contains + * the configuration of the MAC. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_SetMACConfig(ETH_HandleTypeDef *heth, ETH_MACConfigTypeDef *macconf) +{ + if (macconf == NULL) + { + return HAL_ERROR; + } + + if (heth->gState == HAL_ETH_STATE_READY) + { + ETH_SetMACConfig(heth, macconf); + + return HAL_OK; + } + else + { + return HAL_ERROR; + } +} + +/** + * @brief Set the ETH DMA configuration. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param dmaconf: pointer to a ETH_DMAConfigTypeDef structure that will hold + * the configuration of the ETH DMA. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_SetDMAConfig(ETH_HandleTypeDef *heth, ETH_DMAConfigTypeDef *dmaconf) +{ + if (dmaconf == NULL) + { + return HAL_ERROR; + } + + if (heth->gState == HAL_ETH_STATE_READY) + { + ETH_SetDMAConfig(heth, dmaconf); + + return HAL_OK; + } + else + { + return HAL_ERROR; + } +} + +/** + * @brief Configures the Clock range of ETH MDIO interface. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +void HAL_ETH_SetMDIOClockRange(ETH_HandleTypeDef *heth) +{ + uint32_t hclk; + uint32_t tmpreg; + + /* Get the ETHERNET MACMIIAR value */ + tmpreg = (heth->Instance)->MACMIIAR; + /* Clear CSR Clock Range CR[2:0] bits */ + tmpreg &= ETH_MACMIIAR_CR_MASK; + + /* Get hclk frequency value */ + hclk = HAL_RCC_GetHCLKFreq(); + + /* Set CR bits depending on hclk value */ + if (hclk < 35000000U) + { + /* CSR Clock Range between 0-35 MHz */ + tmpreg |= (uint32_t)ETH_MACMIIAR_CR_Div16; + } + else if (hclk < 60000000U) + { + /* CSR Clock Range between 35-60 MHz */ + tmpreg |= (uint32_t)ETH_MACMIIAR_CR_Div26; + } + else if (hclk < 100000000U) + { + /* CSR Clock Range between 60-100 MHz */ + tmpreg |= (uint32_t)ETH_MACMIIAR_CR_Div42; + } + else if (hclk < 150000000U) + { + /* CSR Clock Range between 100-150 MHz */ + tmpreg |= (uint32_t)ETH_MACMIIAR_CR_Div62; + } + else /* (hclk >= 150000000) */ + { + /* CSR Clock >= 150 MHz */ + tmpreg |= (uint32_t)ETH_MACMIIAR_CR_Div102; + } + + /* Write to ETHERNET MAC MIIAR: Configure the ETHERNET CSR Clock Range */ + (heth->Instance)->MACMIIAR = (uint32_t)tmpreg; +} + +/** + * @brief Set the ETH MAC (L2) Filters configuration. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param pFilterConfig: pointer to a ETH_MACFilterConfigTypeDef structure that contains + * the configuration of the ETH MAC filters. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_SetMACFilterConfig(ETH_HandleTypeDef *heth, const ETH_MACFilterConfigTypeDef *pFilterConfig) +{ + uint32_t filterconfig; + uint32_t tmpreg1; + + if (pFilterConfig == NULL) + { + return HAL_ERROR; + } + + filterconfig = ((uint32_t)pFilterConfig->PromiscuousMode | + ((uint32_t)pFilterConfig->HashUnicast << 1) | + ((uint32_t)pFilterConfig->HashMulticast << 2) | + ((uint32_t)pFilterConfig->DestAddrInverseFiltering << 3) | + ((uint32_t)pFilterConfig->PassAllMulticast << 4) | + ((uint32_t)((pFilterConfig->BroadcastFilter == DISABLE) ? 1U : 0U) << 5) | + ((uint32_t)pFilterConfig->SrcAddrInverseFiltering << 8) | + ((uint32_t)pFilterConfig->SrcAddrFiltering << 9) | + ((uint32_t)pFilterConfig->HachOrPerfectFilter << 10) | + ((uint32_t)pFilterConfig->ReceiveAllMode << 31) | + pFilterConfig->ControlPacketsFilter); + + MODIFY_REG(heth->Instance->MACFFR, ETH_MACFFR_MASK, filterconfig); + + /* Wait until the write operation will be taken into account : + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg1 = (heth->Instance)->MACFFR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACFFR = tmpreg1; + + return HAL_OK; +} + +/** + * @brief Get the ETH MAC (L2) Filters configuration. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param pFilterConfig: pointer to a ETH_MACFilterConfigTypeDef structure that will hold + * the configuration of the ETH MAC filters. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_GetMACFilterConfig(const ETH_HandleTypeDef *heth, ETH_MACFilterConfigTypeDef *pFilterConfig) +{ + if (pFilterConfig == NULL) + { + return HAL_ERROR; + } + + pFilterConfig->PromiscuousMode = ((READ_BIT(heth->Instance->MACFFR, ETH_MACFFR_PM)) > 0U) ? ENABLE : DISABLE; + pFilterConfig->HashUnicast = ((READ_BIT(heth->Instance->MACFFR, ETH_MACFFR_HU) >> 1) > 0U) ? ENABLE : DISABLE; + pFilterConfig->HashMulticast = ((READ_BIT(heth->Instance->MACFFR, ETH_MACFFR_HM) >> 2) > 0U) ? ENABLE : DISABLE; + pFilterConfig->DestAddrInverseFiltering = ((READ_BIT(heth->Instance->MACFFR, + ETH_MACFFR_DAIF) >> 3) > 0U) ? ENABLE : DISABLE; + pFilterConfig->PassAllMulticast = ((READ_BIT(heth->Instance->MACFFR, ETH_MACFFR_PAM) >> 4) > 0U) ? ENABLE : DISABLE; + pFilterConfig->BroadcastFilter = ((READ_BIT(heth->Instance->MACFFR, ETH_MACFFR_BFD) >> 5) == 0U) ? ENABLE : DISABLE; + pFilterConfig->ControlPacketsFilter = READ_BIT(heth->Instance->MACFFR, ETH_MACFFR_PCF); + pFilterConfig->SrcAddrInverseFiltering = ((READ_BIT(heth->Instance->MACFFR, + ETH_MACFFR_SAIF) >> 8) > 0U) ? ENABLE : DISABLE; + pFilterConfig->SrcAddrFiltering = ((READ_BIT(heth->Instance->MACFFR, ETH_MACFFR_SAF) >> 9) > 0U) ? ENABLE : DISABLE; + pFilterConfig->HachOrPerfectFilter = ((READ_BIT(heth->Instance->MACFFR, ETH_MACFFR_HPF) >> 10) > 0U) + ? ENABLE : DISABLE; + pFilterConfig->ReceiveAllMode = ((READ_BIT(heth->Instance->MACFFR, ETH_MACFFR_RA) >> 31) > 0U) ? ENABLE : DISABLE; + + return HAL_OK; +} + +/** + * @brief Set the source MAC Address to be matched. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param AddrNbr: The MAC address to configure + * This parameter must be a value of the following: + * ETH_MAC_ADDRESS1 + * ETH_MAC_ADDRESS2 + * ETH_MAC_ADDRESS3 + * @param pMACAddr: Pointer to MAC address buffer data (6 bytes) + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_SetSourceMACAddrMatch(const ETH_HandleTypeDef *heth, uint32_t AddrNbr, + const uint8_t *pMACAddr) +{ + uint32_t macaddrlr; + uint32_t macaddrhr; + + if (pMACAddr == NULL) + { + return HAL_ERROR; + } + + /* Get mac addr high reg offset */ + macaddrhr = ((uint32_t) &(heth->Instance->MACA0HR) + AddrNbr); + /* Get mac addr low reg offset */ + macaddrlr = ((uint32_t) &(heth->Instance->MACA0LR) + AddrNbr); + + /* Set MAC addr bits 32 to 47 */ + (*(__IO uint32_t *)macaddrhr) = (((uint32_t)(pMACAddr[5]) << 8) | (uint32_t)pMACAddr[4]); + /* Set MAC addr bits 0 to 31 */ + (*(__IO uint32_t *)macaddrlr) = (((uint32_t)(pMACAddr[3]) << 24) | ((uint32_t)(pMACAddr[2]) << 16) | + ((uint32_t)(pMACAddr[1]) << 8) | (uint32_t)pMACAddr[0]); + + /* Enable address and set source address bit */ + (*(__IO uint32_t *)macaddrhr) |= (ETH_MACA1HR_AE | ETH_MACA1HR_SA); + + return HAL_OK; +} + +/** + * @brief Set the ETH Hash Table Value. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param pHashTable: pointer to a table of two 32 bit values, that contains + * the 64 bits of the hash table. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_SetHashTable(ETH_HandleTypeDef *heth, uint32_t *pHashTable) +{ + uint32_t tmpreg1; + if (pHashTable == NULL) + { + return HAL_ERROR; + } + + heth->Instance->MACHTHR = pHashTable[0]; + + /* Wait until the write operation will be taken into account : + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg1 = (heth->Instance)->MACHTHR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACHTHR = tmpreg1; + + heth->Instance->MACHTLR = pHashTable[1]; + + /* Wait until the write operation will be taken into account : + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg1 = (heth->Instance)->MACHTLR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACHTLR = tmpreg1; + + return HAL_OK; +} + +/** + * @brief Set the VLAN Identifier for Rx packets + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param ComparisonBits: 12 or 16 bit comparison mode + must be a value of @ref ETH_VLAN_Tag_Comparison + * @param VLANIdentifier: VLAN Identifier value + * @retval None + */ +void HAL_ETH_SetRxVLANIdentifier(ETH_HandleTypeDef *heth, uint32_t ComparisonBits, uint32_t VLANIdentifier) +{ + uint32_t tmpreg1; + MODIFY_REG(heth->Instance->MACVLANTR, ETH_MACVLANTR_VLANTI, VLANIdentifier); + if (ComparisonBits == ETH_VLANTAGCOMPARISON_16BIT) + { + CLEAR_BIT(heth->Instance->MACVLANTR, ETH_MACVLANTR_VLANTC); + } + else + { + SET_BIT(heth->Instance->MACVLANTR, ETH_MACVLANTR_VLANTC); + } + + /* Wait until the write operation will be taken into account : + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg1 = (heth->Instance)->MACVLANTR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACVLANTR = tmpreg1; +} + +/** + * @brief Enters the Power down mode. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param pPowerDownConfig: a pointer to ETH_PowerDownConfigTypeDef structure + * that contains the Power Down configuration + * @retval None. + */ +void HAL_ETH_EnterPowerDownMode(ETH_HandleTypeDef *heth, const ETH_PowerDownConfigTypeDef *pPowerDownConfig) +{ + uint32_t powerdownconfig; + + powerdownconfig = (((uint32_t)pPowerDownConfig->MagicPacket << ETH_MACPMTCSR_MPE_Pos) | + ((uint32_t)pPowerDownConfig->WakeUpPacket << ETH_MACPMTCSR_WFE_Pos) | + ((uint32_t)pPowerDownConfig->GlobalUnicast << ETH_MACPMTCSR_GU_Pos) | + ETH_MACPMTCSR_PD); + + MODIFY_REG(heth->Instance->MACPMTCSR, ETH_MACPMTCSR_MASK, powerdownconfig); +} + +/** + * @brief Exits from the Power down mode. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None. + */ +void HAL_ETH_ExitPowerDownMode(ETH_HandleTypeDef *heth) +{ + uint32_t tmpreg1; + + /* clear wake up sources */ + CLEAR_BIT(heth->Instance->MACPMTCSR, ETH_MACPMTCSR_WFE | ETH_MACPMTCSR_MPE | ETH_MACPMTCSR_GU); + + /* Wait until the write operation will be taken into account : + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg1 = (heth->Instance)->MACPMTCSR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACPMTCSR = tmpreg1; + + if (READ_BIT(heth->Instance->MACPMTCSR, ETH_MACPMTCSR_PD) != 0U) + { + /* Exit power down mode */ + CLEAR_BIT(heth->Instance->MACPMTCSR, ETH_MACPMTCSR_PD); + + /* Wait until the write operation will be taken into account : + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg1 = (heth->Instance)->MACPMTCSR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACPMTCSR = tmpreg1; + } + + /* Disable PMT interrupt */ + SET_BIT(heth->Instance->MACIMR, ETH_MACIMR_PMTIM); +} + +/** + * @brief Set the WakeUp filter. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param pFilter: pointer to filter registers values + * @param Count: number of filter registers, must be from 1 to 8. + * @retval None. + */ +HAL_StatusTypeDef HAL_ETH_SetWakeUpFilter(ETH_HandleTypeDef *heth, uint32_t *pFilter, uint32_t Count) +{ + uint32_t regindex; + + if (pFilter == NULL) + { + return HAL_ERROR; + } + + /* Reset Filter Pointer */ + SET_BIT(heth->Instance->MACPMTCSR, ETH_MACPMTCSR_WFFRPR); + + /* Wake up packet filter config */ + for (regindex = 0; regindex < Count; regindex++) + { + /* Write filter regs */ + WRITE_REG(heth->Instance->MACRWUFFR, pFilter[regindex]); + } + + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup ETH_Exported_Functions_Group4 Peripheral State and Errors functions + * @brief ETH State and Errors functions + * +@verbatim + ============================================================================== + ##### Peripheral State and Errors functions ##### + ============================================================================== + [..] + This subsection provides a set of functions allowing to return the State of + ETH communication process, return Peripheral Errors occurred during communication + process + + +@endverbatim + * @{ + */ + +/** + * @brief Returns the ETH state. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL state + */ +HAL_ETH_StateTypeDef HAL_ETH_GetState(const ETH_HandleTypeDef *heth) +{ + return heth->gState; +} + +/** + * @brief Returns the ETH error code + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval ETH Error Code + */ +uint32_t HAL_ETH_GetError(const ETH_HandleTypeDef *heth) +{ + return heth->ErrorCode; +} + +/** + * @brief Returns the ETH DMA error code + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval ETH DMA Error Code + */ +uint32_t HAL_ETH_GetDMAError(const ETH_HandleTypeDef *heth) +{ + return heth->DMAErrorCode; +} + +/** + * @brief Returns the ETH MAC error code + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval ETH MAC Error Code + */ +uint32_t HAL_ETH_GetMACError(const ETH_HandleTypeDef *heth) +{ + return heth->MACErrorCode; +} + +/** + * @brief Returns the ETH MAC WakeUp event source + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval ETH MAC WakeUp event source + */ +uint32_t HAL_ETH_GetMACWakeUpSource(const ETH_HandleTypeDef *heth) +{ + return heth->MACWakeUpEvent; +} + +/** + * @} + */ + +/** + * @} + */ + +/** @addtogroup ETH_Private_Functions ETH Private Functions + * @{ + */ + +/** + * @brief Clears the ETHERNET transmit FIFO. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +static void ETH_FlushTransmitFIFO(ETH_HandleTypeDef *heth) +{ + __IO uint32_t tmpreg = 0; + + /* Set the Flush Transmit FIFO bit */ + (heth->Instance)->DMAOMR |= ETH_DMAOMR_FTF; + + /* Wait until the write operation will be taken into account: + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg = (heth->Instance)->DMAOMR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->DMAOMR = tmpreg; +} + +static void ETH_SetMACConfig(ETH_HandleTypeDef *heth, const ETH_MACConfigTypeDef *macconf) +{ + uint32_t tmpreg1; + + /*------------------------ ETHERNET MACCR Configuration --------------------*/ + /* Get the ETHERNET MACCR value */ + tmpreg1 = (heth->Instance)->MACCR; + /* Clear CSTF, WD, PCE, PS, TE and RE bits */ + tmpreg1 &= ETH_MACCR_CLEAR_MASK; + + tmpreg1 |= (uint32_t)(((uint32_t)macconf->CRCStripTypePacket << 25U) | + ((uint32_t)((macconf->Watchdog == DISABLE) ? 1U : 0U) << 23U) | + ((uint32_t)((macconf->Jabber == DISABLE) ? 1U : 0U) << 22U) | + (uint32_t)macconf->InterPacketGapVal | + ((uint32_t)macconf->CarrierSenseDuringTransmit << 16U) | + macconf->Speed | + ((uint32_t)((macconf->ReceiveOwn == DISABLE) ? 1U : 0U) << 13U) | + ((uint32_t)macconf->LoopbackMode << 12U) | + macconf->DuplexMode | + ((uint32_t)macconf->ChecksumOffload << 10U) | + ((uint32_t)((macconf->RetryTransmission == DISABLE) ? 1U : 0U) << 9U) | + ((uint32_t)macconf->AutomaticPadCRCStrip << 7U) | + macconf->BackOffLimit | + ((uint32_t)macconf->DeferralCheck << 4U)); + + /* Write to ETHERNET MACCR */ + (heth->Instance)->MACCR = (uint32_t)tmpreg1; + + /* Wait until the write operation will be taken into account : + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg1 = (heth->Instance)->MACCR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACCR = tmpreg1; + + /*----------------------- ETHERNET MACFCR Configuration --------------------*/ + + /* Get the ETHERNET MACFCR value */ + tmpreg1 = (heth->Instance)->MACFCR; + /* Clear xx bits */ + tmpreg1 &= ETH_MACFCR_CLEAR_MASK; + + tmpreg1 |= (uint32_t)((macconf->PauseTime << 16U) | + ((uint32_t)((macconf->ZeroQuantaPause == DISABLE) ? 1U : 0U) << 7U) | + macconf->PauseLowThreshold | + ((uint32_t)((macconf->UnicastPausePacketDetect == ENABLE) ? 1U : 0U) << 3U) | + ((uint32_t)((macconf->ReceiveFlowControl == ENABLE) ? 1U : 0U) << 2U) | + ((uint32_t)((macconf->TransmitFlowControl == ENABLE) ? 1U : 0U) << 1U)); + + /* Write to ETHERNET MACFCR */ + (heth->Instance)->MACFCR = (uint32_t)tmpreg1; + + /* Wait until the write operation will be taken into account : + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg1 = (heth->Instance)->MACFCR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACFCR = tmpreg1; +} + +static void ETH_SetDMAConfig(ETH_HandleTypeDef *heth, const ETH_DMAConfigTypeDef *dmaconf) +{ + uint32_t tmpreg1; + + /*----------------------- ETHERNET DMAOMR Configuration --------------------*/ + /* Get the ETHERNET DMAOMR value */ + tmpreg1 = (heth->Instance)->DMAOMR; + /* Clear xx bits */ + tmpreg1 &= ETH_DMAOMR_CLEAR_MASK; + + tmpreg1 |= (uint32_t)(((uint32_t)((dmaconf->DropTCPIPChecksumErrorFrame == DISABLE) ? 1U : 0U) << 26U) | + ((uint32_t)dmaconf->ReceiveStoreForward << 25U) | + ((uint32_t)((dmaconf->FlushRxPacket == DISABLE) ? 1U : 0U) << 20U) | + ((uint32_t)dmaconf->TransmitStoreForward << 21U) | + dmaconf->TransmitThresholdControl | + ((uint32_t)dmaconf->ForwardErrorFrames << 7U) | + ((uint32_t)dmaconf->ForwardUndersizedGoodFrames << 6U) | + dmaconf->ReceiveThresholdControl | + ((uint32_t)dmaconf->SecondFrameOperate << 2U)); + + /* Write to ETHERNET DMAOMR */ + (heth->Instance)->DMAOMR = (uint32_t)tmpreg1; + + /* Wait until the write operation will be taken into account: + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg1 = (heth->Instance)->DMAOMR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->DMAOMR = tmpreg1; + + /*----------------------- ETHERNET DMABMR Configuration --------------------*/ + (heth->Instance)->DMABMR = (uint32_t)(((uint32_t)dmaconf->AddressAlignedBeats << 25U) | + dmaconf->BurstMode | + dmaconf->RxDMABurstLength | /* !! if 4xPBL is selected for Tx or + Rx it is applied for the other */ + dmaconf->TxDMABurstLength | + ((uint32_t)dmaconf->EnhancedDescriptorFormat << 7U) | + (dmaconf->DescriptorSkipLength << 2U) | + dmaconf->DMAArbitration | + ETH_DMABMR_USP); /* Enable use of separate PBL for Rx and Tx */ + + /* Wait until the write operation will be taken into account: + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg1 = (heth->Instance)->DMABMR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->DMABMR = tmpreg1; +} + +/** + * @brief Configures Ethernet MAC and DMA with default parameters. + * called by HAL_ETH_Init() API. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL status + */ +static void ETH_MACDMAConfig(ETH_HandleTypeDef *heth) +{ + ETH_MACConfigTypeDef macDefaultConf; + ETH_DMAConfigTypeDef dmaDefaultConf; + + /*--------------- ETHERNET MAC registers default Configuration --------------*/ + macDefaultConf.Watchdog = ENABLE; + macDefaultConf.Jabber = ENABLE; + macDefaultConf.InterPacketGapVal = ETH_INTERFRAMEGAP_96BIT; + macDefaultConf.CarrierSenseDuringTransmit = DISABLE; + macDefaultConf.ReceiveOwn = ENABLE; + macDefaultConf.LoopbackMode = DISABLE; + macDefaultConf.CRCStripTypePacket = ENABLE; + macDefaultConf.ChecksumOffload = ENABLE; + macDefaultConf.RetryTransmission = DISABLE; + macDefaultConf.AutomaticPadCRCStrip = DISABLE; + macDefaultConf.BackOffLimit = ETH_BACKOFFLIMIT_10; + macDefaultConf.DeferralCheck = DISABLE; + macDefaultConf.PauseTime = 0x0U; + macDefaultConf.ZeroQuantaPause = DISABLE; + macDefaultConf.PauseLowThreshold = ETH_PAUSELOWTHRESHOLD_MINUS4; + macDefaultConf.ReceiveFlowControl = DISABLE; + macDefaultConf.TransmitFlowControl = DISABLE; + macDefaultConf.Speed = ETH_SPEED_100M; + macDefaultConf.DuplexMode = ETH_FULLDUPLEX_MODE; + macDefaultConf.UnicastPausePacketDetect = DISABLE; + + /* MAC default configuration */ + ETH_SetMACConfig(heth, &macDefaultConf); + + /*--------------- ETHERNET DMA registers default Configuration --------------*/ + dmaDefaultConf.DropTCPIPChecksumErrorFrame = ENABLE; + dmaDefaultConf.ReceiveStoreForward = ENABLE; + dmaDefaultConf.FlushRxPacket = ENABLE; + dmaDefaultConf.TransmitStoreForward = ENABLE; + dmaDefaultConf.TransmitThresholdControl = ETH_TRANSMITTHRESHOLDCONTROL_64BYTES; + dmaDefaultConf.ForwardErrorFrames = DISABLE; + dmaDefaultConf.ForwardUndersizedGoodFrames = DISABLE; + dmaDefaultConf.ReceiveThresholdControl = ETH_RECEIVEDTHRESHOLDCONTROL_64BYTES; + dmaDefaultConf.SecondFrameOperate = ENABLE; + dmaDefaultConf.AddressAlignedBeats = ENABLE; + dmaDefaultConf.BurstMode = ETH_BURSTLENGTH_FIXED; + dmaDefaultConf.RxDMABurstLength = ETH_RXDMABURSTLENGTH_32BEAT; + dmaDefaultConf.TxDMABurstLength = ETH_TXDMABURSTLENGTH_32BEAT; + dmaDefaultConf.EnhancedDescriptorFormat = ENABLE; + dmaDefaultConf.DescriptorSkipLength = 0x0U; + dmaDefaultConf.DMAArbitration = ETH_DMAARBITRATION_ROUNDROBIN_RXTX_1_1; + + /* DMA default configuration */ + ETH_SetDMAConfig(heth, &dmaDefaultConf); +} +/** + * @brief Configures the selected MAC address. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param MacAddr The MAC address to configure + * This parameter can be one of the following values: + * @arg ETH_MAC_Address0: MAC Address0 + * @arg ETH_MAC_Address1: MAC Address1 + * @arg ETH_MAC_Address2: MAC Address2 + * @arg ETH_MAC_Address3: MAC Address3 + * @param Addr Pointer to MAC address buffer data (6 bytes) + * @retval HAL status + */ +static void ETH_MACAddressConfig(ETH_HandleTypeDef *heth, uint32_t MacAddr, uint8_t *Addr) +{ + uint32_t tmpreg1; + + /* Prevent unused argument(s) compilation warning */ + UNUSED(heth); + + /* Calculate the selected MAC address high register */ + tmpreg1 = ((uint32_t)Addr[5U] << 8U) | (uint32_t)Addr[4U]; + /* Load the selected MAC address high register */ + (*(__IO uint32_t *)((uint32_t)(ETH_MAC_ADDR_HBASE + MacAddr))) = tmpreg1; + /* Calculate the selected MAC address low register */ + tmpreg1 = ((uint32_t)Addr[3U] << 24U) | ((uint32_t)Addr[2U] << 16U) | ((uint32_t)Addr[1U] << 8U) | Addr[0U]; + + /* Load the selected MAC address low register */ + (*(__IO uint32_t *)((uint32_t)(ETH_MAC_ADDR_LBASE + MacAddr))) = tmpreg1; +} + +/** + * @brief Initializes the DMA Tx descriptors. + * called by HAL_ETH_Init() API. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +static void ETH_DMATxDescListInit(ETH_HandleTypeDef *heth) +{ + ETH_DMADescTypeDef *dmatxdesc; + uint32_t i; + + /* Fill each DMATxDesc descriptor with the right values */ + for (i = 0; i < (uint32_t)ETH_TX_DESC_CNT; i++) + { + dmatxdesc = heth->Init.TxDesc + i; + + WRITE_REG(dmatxdesc->DESC0, 0x0U); + WRITE_REG(dmatxdesc->DESC1, 0x0U); + WRITE_REG(dmatxdesc->DESC2, 0x0U); + WRITE_REG(dmatxdesc->DESC3, 0x0U); + + WRITE_REG(heth->TxDescList.TxDesc[i], (uint32_t)dmatxdesc); + + /* Set Second Address Chained bit */ + SET_BIT(dmatxdesc->DESC0, ETH_DMATXDESC_TCH); + + if (i < ((uint32_t)ETH_TX_DESC_CNT - 1U)) + { + WRITE_REG(dmatxdesc->DESC3, (uint32_t)(heth->Init.TxDesc + i + 1U)); + } + else + { + WRITE_REG(dmatxdesc->DESC3, (uint32_t)(heth->Init.TxDesc)); + } + + /* Set the DMA Tx descriptors checksum insertion */ + SET_BIT(dmatxdesc->DESC0, ETH_DMATXDESC_CHECKSUMTCPUDPICMPFULL); + } + + heth->TxDescList.CurTxDesc = 0; + + /* Set Transmit Descriptor List Address */ + WRITE_REG(heth->Instance->DMATDLAR, (uint32_t) heth->Init.TxDesc); +} + +/** + * @brief Initializes the DMA Rx descriptors in chain mode. + * called by HAL_ETH_Init() API. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +static void ETH_DMARxDescListInit(ETH_HandleTypeDef *heth) +{ + ETH_DMADescTypeDef *dmarxdesc; + uint32_t i; + + for (i = 0; i < (uint32_t)ETH_RX_DESC_CNT; i++) + { + dmarxdesc = heth->Init.RxDesc + i; + + WRITE_REG(dmarxdesc->DESC0, 0x0U); + WRITE_REG(dmarxdesc->DESC1, 0x0U); + WRITE_REG(dmarxdesc->DESC2, 0x0U); + WRITE_REG(dmarxdesc->DESC3, 0x0U); + WRITE_REG(dmarxdesc->BackupAddr0, 0x0U); + WRITE_REG(dmarxdesc->BackupAddr1, 0x0U); + + /* Set Own bit of the Rx descriptor Status */ + dmarxdesc->DESC0 = ETH_DMARXDESC_OWN; + + /* Set Buffer1 size and Second Address Chained bit */ + dmarxdesc->DESC1 = heth->Init.RxBuffLen | ETH_DMARXDESC_RCH; + + /* Enable Ethernet DMA Rx Descriptor interrupt */ + dmarxdesc->DESC1 &= ~ETH_DMARXDESC_DIC; + /* Set Rx descritors addresses */ + WRITE_REG(heth->RxDescList.RxDesc[i], (uint32_t)dmarxdesc); + + if (i < ((uint32_t)ETH_RX_DESC_CNT - 1U)) + { + WRITE_REG(dmarxdesc->DESC3, (uint32_t)(heth->Init.RxDesc + i + 1U)); + } + else + { + WRITE_REG(dmarxdesc->DESC3, (uint32_t)(heth->Init.RxDesc)); + } + } + + WRITE_REG(heth->RxDescList.RxDescIdx, 0U); + WRITE_REG(heth->RxDescList.RxDescCnt, 0U); + WRITE_REG(heth->RxDescList.RxBuildDescIdx, 0U); + WRITE_REG(heth->RxDescList.RxBuildDescCnt, 0U); + WRITE_REG(heth->RxDescList.ItMode, 0U); + + /* Set Receive Descriptor List Address */ + WRITE_REG(heth->Instance->DMARDLAR, (uint32_t) heth->Init.RxDesc); +} + +/** + * @brief Prepare Tx DMA descriptor before transmission. + * called by HAL_ETH_Transmit_IT and HAL_ETH_Transmit_IT() API. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param pTxConfig: Tx packet configuration + * @param ItMode: Enable or disable Tx EOT interrept + * @retval Status + */ +static uint32_t ETH_Prepare_Tx_Descriptors(ETH_HandleTypeDef *heth, const ETH_TxPacketConfigTypeDef *pTxConfig, + uint32_t ItMode) +{ + ETH_TxDescListTypeDef *dmatxdesclist = &heth->TxDescList; + uint32_t descidx = dmatxdesclist->CurTxDesc; + uint32_t firstdescidx = dmatxdesclist->CurTxDesc; + uint32_t idx; + uint32_t descnbr = 0; + ETH_DMADescTypeDef *dmatxdesc = (ETH_DMADescTypeDef *)dmatxdesclist->TxDesc[descidx]; + + ETH_BufferTypeDef *txbuffer = pTxConfig->TxBuffer; + uint32_t bd_count = 0; + uint32_t primask_bit; + + /* Current Tx Descriptor Owned by DMA: cannot be used by the application */ + if ((READ_BIT(dmatxdesc->DESC0, ETH_DMATXDESC_OWN) == ETH_DMATXDESC_OWN) + || (dmatxdesclist->PacketAddress[descidx] != NULL)) + { + return HAL_ETH_ERROR_BUSY; + } + + + descnbr += 1U; + + /* Set header or buffer 1 address */ + WRITE_REG(dmatxdesc->DESC2, (uint32_t)txbuffer->buffer); + + /* Set header or buffer 1 Length */ + MODIFY_REG(dmatxdesc->DESC1, ETH_DMATXDESC_TBS1, txbuffer->len); + + if (READ_BIT(pTxConfig->Attributes, ETH_TX_PACKETS_FEATURES_CSUM) != 0U) + { + MODIFY_REG(dmatxdesc->DESC0, ETH_DMATXDESC_CIC, pTxConfig->ChecksumCtrl); + } + + if (READ_BIT(pTxConfig->Attributes, ETH_TX_PACKETS_FEATURES_CRCPAD) != 0U) + { + MODIFY_REG(dmatxdesc->DESC0, ETH_CRC_PAD_DISABLE, pTxConfig->CRCPadCtrl); + } + + + if (READ_BIT(pTxConfig->Attributes, ETH_TX_PACKETS_FEATURES_VLANTAG) != 0U) + { + /* Set Vlan Type */ + SET_BIT(dmatxdesc->DESC0, ETH_DMATXDESC_VF); + } + + /* Mark it as First Descriptor */ + SET_BIT(dmatxdesc->DESC0, ETH_DMATXDESC_FS); + + /* only if the packet is split into more than one descriptors > 1 */ + while (txbuffer->next != NULL) + { + /* Clear the LD bit of previous descriptor */ + CLEAR_BIT(dmatxdesc->DESC0, ETH_DMATXDESC_LS); + if (ItMode != ((uint32_t)RESET)) + { + /* Set Interrupt on completion bit */ + SET_BIT(dmatxdesc->DESC0, ETH_DMATXDESC_IC); + } + else + { + /* Clear Interrupt on completion bit */ + CLEAR_BIT(dmatxdesc->DESC0, ETH_DMATXDESC_IC); + } + /* Increment current tx descriptor index */ + INCR_TX_DESC_INDEX(descidx, 1U); + /* Get current descriptor address */ + dmatxdesc = (ETH_DMADescTypeDef *)dmatxdesclist->TxDesc[descidx]; + + /* Current Tx Descriptor Owned by DMA: cannot be used by the application */ + if ((READ_BIT(dmatxdesc->DESC0, ETH_DMATXDESC_OWN) == ETH_DMATXDESC_OWN) + || (dmatxdesclist->PacketAddress[descidx] != NULL)) + { + descidx = firstdescidx; + dmatxdesc = (ETH_DMADescTypeDef *)dmatxdesclist->TxDesc[descidx]; + + /* clear previous desc own bit */ + for (idx = 0; idx < descnbr; idx ++) + { + /* Ensure rest of descriptor is written to RAM before the OWN bit */ + __DMB(); + + CLEAR_BIT(dmatxdesc->DESC0, ETH_DMATXDESC_OWN); + + /* Increment current tx descriptor index */ + INCR_TX_DESC_INDEX(descidx, 1U); + /* Get current descriptor address */ + dmatxdesc = (ETH_DMADescTypeDef *)dmatxdesclist->TxDesc[descidx]; + } + + return HAL_ETH_ERROR_BUSY; + } + + /* Clear the FD bit of new Descriptor */ + CLEAR_BIT(dmatxdesc->DESC0, ETH_DMATXDESC_FS); + + descnbr += 1U; + + /* Get the next Tx buffer in the list */ + txbuffer = txbuffer->next; + + /* Set header or buffer 1 address */ + WRITE_REG(dmatxdesc->DESC2, (uint32_t)txbuffer->buffer); + + /* Set header or buffer 1 Length */ + MODIFY_REG(dmatxdesc->DESC1, ETH_DMATXDESC_TBS1, txbuffer->len); + + bd_count += 1U; + + /* Ensure rest of descriptor is written to RAM before the OWN bit */ + __DMB(); + /* Set Own bit */ + SET_BIT(dmatxdesc->DESC0, ETH_DMATXDESC_OWN); + } + + if (ItMode != ((uint32_t)RESET)) + { + /* Set Interrupt on completion bit */ + SET_BIT(dmatxdesc->DESC0, ETH_DMATXDESC_IC); + } + else + { + /* Clear Interrupt on completion bit */ + CLEAR_BIT(dmatxdesc->DESC0, ETH_DMATXDESC_IC); + } + + /* Mark it as LAST descriptor */ + SET_BIT(dmatxdesc->DESC0, ETH_DMATXDESC_LS); + + /* Get address of first descriptor */ + dmatxdesc = (ETH_DMADescTypeDef *)dmatxdesclist->TxDesc[firstdescidx]; + /* Ensure rest of descriptor is written to RAM before the OWN bit */ + __DMB(); + /* set OWN bit of FIRST descriptor */ + SET_BIT(dmatxdesc->DESC0, ETH_DMATXDESC_OWN); + /* Save the current packet address to expose it to the application */ + dmatxdesclist->PacketAddress[descidx] = dmatxdesclist->CurrentPacketAddress; + + dmatxdesclist->CurTxDesc = descidx; + + /* Enter critical section */ + primask_bit = __get_PRIMASK(); + __set_PRIMASK(1); + + dmatxdesclist->BuffersInUse += bd_count + 1U; + + /* Exit critical section: restore previous priority mask */ + __set_PRIMASK(primask_bit); + + /* Return function status */ + return HAL_ETH_ERROR_NONE; +} + +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) +static void ETH_InitCallbacksToDefault(ETH_HandleTypeDef *heth) +{ + /* Init the ETH Callback settings */ + heth->TxCpltCallback = HAL_ETH_TxCpltCallback; /* Legacy weak TxCpltCallback */ + heth->RxCpltCallback = HAL_ETH_RxCpltCallback; /* Legacy weak RxCpltCallback */ + heth->ErrorCallback = HAL_ETH_ErrorCallback; /* Legacy weak ErrorCallback */ + heth->PMTCallback = HAL_ETH_PMTCallback; /* Legacy weak PMTCallback */ + heth->WakeUpCallback = HAL_ETH_WakeUpCallback; /* Legacy weak WakeUpCallback */ + heth->rxLinkCallback = HAL_ETH_RxLinkCallback; /* Legacy weak RxLinkCallback */ + heth->txFreeCallback = HAL_ETH_TxFreeCallback; /* Legacy weak TxFreeCallback */ +#ifdef HAL_ETH_USE_PTP + heth->txPtpCallback = HAL_ETH_TxPtpCallback; /* Legacy weak TxPtpCallback */ +#endif /* HAL_ETH_USE_PTP */ + heth->rxAllocateCallback = HAL_ETH_RxAllocateCallback; /* Legacy weak RxAllocateCallback */ +} +#endif /* USE_HAL_ETH_REGISTER_CALLBACKS */ + +/** + * @} + */ + +/** + * @} + */ + +#endif /* ETH */ + +#endif /* HAL_ETH_MODULE_ENABLED */ + +/** + * @} + */ diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_exti.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_exti.c new file mode 100644 index 0000000..3e46312 --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_exti.c @@ -0,0 +1,553 @@ +/** + ****************************************************************************** + * @file stm32f4xx_hal_exti.c + * @author MCD Application Team + * @brief EXTI HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the Extended Interrupts and events controller (EXTI) peripheral: + * + Initialization and de-initialization functions + * + IO operation functions + * + ****************************************************************************** + * @attention + * + * Copyright (c) 2018 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + @verbatim + ============================================================================== + ##### EXTI Peripheral features ##### + ============================================================================== + [..] + (+) Each Exti line can be configured within this driver. + + (+) Exti line can be configured in 3 different modes + (++) Interrupt + (++) Event + (++) Both of them + + (+) Configurable Exti lines can be configured with 3 different triggers + (++) Rising + (++) Falling + (++) Both of them + + (+) When set in interrupt mode, configurable Exti lines have two different + interrupts pending registers which allow to distinguish which transition + occurs: + (++) Rising edge pending interrupt + (++) Falling + + (+) Exti lines 0 to 15 are linked to gpio pin number 0 to 15. Gpio port can + be selected through multiplexer. + + ##### How to use this driver ##### + ============================================================================== + [..] + + (#) Configure the EXTI line using HAL_EXTI_SetConfigLine(). + (++) Choose the interrupt line number by setting "Line" member from + EXTI_ConfigTypeDef structure. + (++) Configure the interrupt and/or event mode using "Mode" member from + EXTI_ConfigTypeDef structure. + (++) For configurable lines, configure rising and/or falling trigger + "Trigger" member from EXTI_ConfigTypeDef structure. + (++) For Exti lines linked to gpio, choose gpio port using "GPIOSel" + member from GPIO_InitTypeDef structure. + + (#) Get current Exti configuration of a dedicated line using + HAL_EXTI_GetConfigLine(). + (++) Provide exiting handle as parameter. + (++) Provide pointer on EXTI_ConfigTypeDef structure as second parameter. + + (#) Clear Exti configuration of a dedicated line using HAL_EXTI_ClearConfigLine(). + (++) Provide exiting handle as parameter. + + (#) Register callback to treat Exti interrupts using HAL_EXTI_RegisterCallback(). + (++) Provide exiting handle as first parameter. + (++) Provide which callback will be registered using one value from + EXTI_CallbackIDTypeDef. + (++) Provide callback function pointer. + + (#) Get interrupt pending bit using HAL_EXTI_GetPending(). + + (#) Clear interrupt pending bit using HAL_EXTI_ClearPending(). + + (#) Generate software interrupt using HAL_EXTI_GenerateSWI(). + + @endverbatim + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx_hal.h" + +/** @addtogroup STM32F4xx_HAL_Driver + * @{ + */ + +/** @addtogroup EXTI + * @{ + */ +/** MISRA C:2012 deviation rule has been granted for following rule: + * Rule-18.1_b - Medium: Array `EXTICR' 1st subscript interval [0,7] may be out + * of bounds [0,3] in following API : + * HAL_EXTI_SetConfigLine + * HAL_EXTI_GetConfigLine + * HAL_EXTI_ClearConfigLine + */ + +#ifdef HAL_EXTI_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private defines -----------------------------------------------------------*/ +/** @defgroup EXTI_Private_Constants EXTI Private Constants + * @{ + */ + +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/* Exported functions --------------------------------------------------------*/ + +/** @addtogroup EXTI_Exported_Functions + * @{ + */ + +/** @addtogroup EXTI_Exported_Functions_Group1 + * @brief Configuration functions + * +@verbatim + =============================================================================== + ##### Configuration functions ##### + =============================================================================== + +@endverbatim + * @{ + */ + +/** + * @brief Set configuration of a dedicated Exti line. + * @param hexti Exti handle. + * @param pExtiConfig Pointer on EXTI configuration to be set. + * @retval HAL Status. + */ +HAL_StatusTypeDef HAL_EXTI_SetConfigLine(EXTI_HandleTypeDef *hexti, EXTI_ConfigTypeDef *pExtiConfig) +{ + uint32_t regval; + uint32_t linepos; + uint32_t maskline; + + /* Check null pointer */ + if ((hexti == NULL) || (pExtiConfig == NULL)) + { + return HAL_ERROR; + } + + /* Check parameters */ + assert_param(IS_EXTI_LINE(pExtiConfig->Line)); + assert_param(IS_EXTI_MODE(pExtiConfig->Mode)); + + /* Assign line number to handle */ + hexti->Line = pExtiConfig->Line; + + /* Compute line mask */ + linepos = (pExtiConfig->Line & EXTI_PIN_MASK); + maskline = (1uL << linepos); + + /* Configure triggers for configurable lines */ + if ((pExtiConfig->Line & EXTI_CONFIG) != 0x00u) + { + assert_param(IS_EXTI_TRIGGER(pExtiConfig->Trigger)); + + /* Configure rising trigger */ + /* Mask or set line */ + if ((pExtiConfig->Trigger & EXTI_TRIGGER_RISING) != 0x00u) + { + EXTI->RTSR |= maskline; + } + else + { + EXTI->RTSR &= ~maskline; + } + + /* Configure falling trigger */ + /* Mask or set line */ + if ((pExtiConfig->Trigger & EXTI_TRIGGER_FALLING) != 0x00u) + { + EXTI->FTSR |= maskline; + } + else + { + EXTI->FTSR &= ~maskline; + } + + + /* Configure gpio port selection in case of gpio exti line */ + if ((pExtiConfig->Line & EXTI_GPIO) == EXTI_GPIO) + { + assert_param(IS_EXTI_GPIO_PORT(pExtiConfig->GPIOSel)); + assert_param(IS_EXTI_GPIO_PIN(linepos)); + + regval = SYSCFG->EXTICR[linepos >> 2u]; + regval &= ~(SYSCFG_EXTICR1_EXTI0 << (SYSCFG_EXTICR1_EXTI1_Pos * (linepos & 0x03u))); + regval |= (pExtiConfig->GPIOSel << (SYSCFG_EXTICR1_EXTI1_Pos * (linepos & 0x03u))); + SYSCFG->EXTICR[linepos >> 2u] = regval; + } + } + + /* Configure interrupt mode : read current mode */ + /* Mask or set line */ + if ((pExtiConfig->Mode & EXTI_MODE_INTERRUPT) != 0x00u) + { + EXTI->IMR |= maskline; + } + else + { + EXTI->IMR &= ~maskline; + } + + /* Configure event mode : read current mode */ + /* Mask or set line */ + if ((pExtiConfig->Mode & EXTI_MODE_EVENT) != 0x00u) + { + EXTI->EMR |= maskline; + } + else + { + EXTI->EMR &= ~maskline; + } + + return HAL_OK; +} + +/** + * @brief Get configuration of a dedicated Exti line. + * @param hexti Exti handle. + * @param pExtiConfig Pointer on structure to store Exti configuration. + * @retval HAL Status. + */ +HAL_StatusTypeDef HAL_EXTI_GetConfigLine(EXTI_HandleTypeDef *hexti, EXTI_ConfigTypeDef *pExtiConfig) +{ + uint32_t regval; + uint32_t linepos; + uint32_t maskline; + + /* Check null pointer */ + if ((hexti == NULL) || (pExtiConfig == NULL)) + { + return HAL_ERROR; + } + + /* Check the parameter */ + assert_param(IS_EXTI_LINE(hexti->Line)); + + /* Store handle line number to configuration structure */ + pExtiConfig->Line = hexti->Line; + + /* Compute line mask */ + linepos = (pExtiConfig->Line & EXTI_PIN_MASK); + maskline = (1uL << linepos); + + /* 1] Get core mode : interrupt */ + + /* Check if selected line is enable */ + if ((EXTI->IMR & maskline) != 0x00u) + { + pExtiConfig->Mode = EXTI_MODE_INTERRUPT; + } + else + { + pExtiConfig->Mode = EXTI_MODE_NONE; + } + + /* Get event mode */ + /* Check if selected line is enable */ + if ((EXTI->EMR & maskline) != 0x00u) + { + pExtiConfig->Mode |= EXTI_MODE_EVENT; + } + + /* Get default Trigger and GPIOSel configuration */ + pExtiConfig->Trigger = EXTI_TRIGGER_NONE; + pExtiConfig->GPIOSel = 0x00u; + + /* 2] Get trigger for configurable lines : rising */ + if ((pExtiConfig->Line & EXTI_CONFIG) != 0x00u) + { + /* Check if configuration of selected line is enable */ + if ((EXTI->RTSR & maskline) != 0x00u) + { + pExtiConfig->Trigger = EXTI_TRIGGER_RISING; + } + + /* Get falling configuration */ + /* Check if configuration of selected line is enable */ + if ((EXTI->FTSR & maskline) != 0x00u) + { + pExtiConfig->Trigger |= EXTI_TRIGGER_FALLING; + } + + /* Get Gpio port selection for gpio lines */ + if ((pExtiConfig->Line & EXTI_GPIO) == EXTI_GPIO) + { + assert_param(IS_EXTI_GPIO_PIN(linepos)); + + regval = SYSCFG->EXTICR[linepos >> 2u]; + pExtiConfig->GPIOSel = (regval >> (SYSCFG_EXTICR1_EXTI1_Pos * (linepos & 0x03u))) & SYSCFG_EXTICR1_EXTI0; + } + } + + return HAL_OK; +} + +/** + * @brief Clear whole configuration of a dedicated Exti line. + * @param hexti Exti handle. + * @retval HAL Status. + */ +HAL_StatusTypeDef HAL_EXTI_ClearConfigLine(EXTI_HandleTypeDef *hexti) +{ + uint32_t regval; + uint32_t linepos; + uint32_t maskline; + + /* Check null pointer */ + if (hexti == NULL) + { + return HAL_ERROR; + } + + /* Check the parameter */ + assert_param(IS_EXTI_LINE(hexti->Line)); + + /* compute line mask */ + linepos = (hexti->Line & EXTI_PIN_MASK); + maskline = (1uL << linepos); + + /* 1] Clear interrupt mode */ + EXTI->IMR = (EXTI->IMR & ~maskline); + + /* 2] Clear event mode */ + EXTI->EMR = (EXTI->EMR & ~maskline); + + /* 3] Clear triggers in case of configurable lines */ + if ((hexti->Line & EXTI_CONFIG) != 0x00u) + { + EXTI->RTSR = (EXTI->RTSR & ~maskline); + EXTI->FTSR = (EXTI->FTSR & ~maskline); + + /* Get Gpio port selection for gpio lines */ + if ((hexti->Line & EXTI_GPIO) == EXTI_GPIO) + { + assert_param(IS_EXTI_GPIO_PIN(linepos)); + + regval = SYSCFG->EXTICR[linepos >> 2u]; + regval &= ~(SYSCFG_EXTICR1_EXTI0 << (SYSCFG_EXTICR1_EXTI1_Pos * (linepos & 0x03u))); + SYSCFG->EXTICR[linepos >> 2u] = regval; + } + } + + return HAL_OK; +} + +/** + * @brief Register callback for a dedicated Exti line. + * @param hexti Exti handle. + * @param CallbackID User callback identifier. + * This parameter can be one of @arg @ref EXTI_CallbackIDTypeDef values. + * @param pPendingCbfn function pointer to be stored as callback. + * @retval HAL Status. + */ +HAL_StatusTypeDef HAL_EXTI_RegisterCallback(EXTI_HandleTypeDef *hexti, EXTI_CallbackIDTypeDef CallbackID, void (*pPendingCbfn)(void)) +{ + HAL_StatusTypeDef status = HAL_OK; + + switch (CallbackID) + { + case HAL_EXTI_COMMON_CB_ID: + hexti->PendingCallback = pPendingCbfn; + break; + + default: + status = HAL_ERROR; + break; + } + + return status; +} + +/** + * @brief Store line number as handle private field. + * @param hexti Exti handle. + * @param ExtiLine Exti line number. + * This parameter can be from 0 to @ref EXTI_LINE_NB. + * @retval HAL Status. + */ +HAL_StatusTypeDef HAL_EXTI_GetHandle(EXTI_HandleTypeDef *hexti, uint32_t ExtiLine) +{ + /* Check the parameters */ + assert_param(IS_EXTI_LINE(ExtiLine)); + + /* Check null pointer */ + if (hexti == NULL) + { + return HAL_ERROR; + } + else + { + /* Store line number as handle private field */ + hexti->Line = ExtiLine; + + return HAL_OK; + } +} + +/** + * @} + */ + +/** @addtogroup EXTI_Exported_Functions_Group2 + * @brief EXTI IO functions. + * +@verbatim + =============================================================================== + ##### IO operation functions ##### + =============================================================================== + +@endverbatim + * @{ + */ + +/** + * @brief Handle EXTI interrupt request. + * @param hexti Exti handle. + * @retval none. + */ +void HAL_EXTI_IRQHandler(EXTI_HandleTypeDef *hexti) +{ + uint32_t regval; + uint32_t maskline; + + /* Compute line mask */ + maskline = (1uL << (hexti->Line & EXTI_PIN_MASK)); + + /* Get pending bit */ + regval = (EXTI->PR & maskline); + if (regval != 0x00u) + { + /* Clear pending bit */ + EXTI->PR = maskline; + + /* Call callback */ + if (hexti->PendingCallback != NULL) + { + hexti->PendingCallback(); + } + } +} + +/** + * @brief Get interrupt pending bit of a dedicated line. + * @param hexti Exti handle. + * @param Edge Specify which pending edge as to be checked. + * This parameter can be one of the following values: + * @arg @ref EXTI_TRIGGER_RISING_FALLING + * This parameter is kept for compatibility with other series. + * @retval 1 if interrupt is pending else 0. + */ +uint32_t HAL_EXTI_GetPending(EXTI_HandleTypeDef *hexti, uint32_t Edge) +{ + uint32_t regval; + uint32_t linepos; + uint32_t maskline; + + /* Prevent unused argument(s) compilation warning */ + UNUSED(Edge); + + /* Check parameters */ + assert_param(IS_EXTI_LINE(hexti->Line)); + assert_param(IS_EXTI_CONFIG_LINE(hexti->Line)); + assert_param(IS_EXTI_PENDING_EDGE(Edge)); + + /* Compute line mask */ + linepos = (hexti->Line & EXTI_PIN_MASK); + maskline = (1uL << linepos); + + /* return 1 if bit is set else 0 */ + regval = ((EXTI->PR & maskline) >> linepos); + return regval; +} + +/** + * @brief Clear interrupt pending bit of a dedicated line. + * @param hexti Exti handle. + * @param Edge Specify which pending edge as to be clear. + * This parameter can be one of the following values: + * @arg @ref EXTI_TRIGGER_RISING_FALLING + * This parameter is kept for compatibility with other series. + * @retval None. + */ +void HAL_EXTI_ClearPending(EXTI_HandleTypeDef *hexti, uint32_t Edge) +{ + uint32_t maskline; + + /* Prevent unused argument(s) compilation warning */ + UNUSED(Edge); + + /* Check parameters */ + assert_param(IS_EXTI_LINE(hexti->Line)); + assert_param(IS_EXTI_CONFIG_LINE(hexti->Line)); + assert_param(IS_EXTI_PENDING_EDGE(Edge)); + + /* Compute line mask */ + maskline = (1uL << (hexti->Line & EXTI_PIN_MASK)); + + /* Clear Pending bit */ + EXTI->PR = maskline; +} + +/** + * @brief Generate a software interrupt for a dedicated line. + * @param hexti Exti handle. + * @retval None. + */ +void HAL_EXTI_GenerateSWI(EXTI_HandleTypeDef *hexti) +{ + uint32_t maskline; + + /* Check parameters */ + assert_param(IS_EXTI_LINE(hexti->Line)); + assert_param(IS_EXTI_CONFIG_LINE(hexti->Line)); + + /* Compute line mask */ + maskline = (1uL << (hexti->Line & EXTI_PIN_MASK)); + + /* Generate Software interrupt */ + EXTI->SWIER = maskline; +} + +/** + * @} + */ + +/** + * @} + */ + +#endif /* HAL_EXTI_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ + diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_flash.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_flash.c new file mode 100644 index 0000000..39f19f2 --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_flash.c @@ -0,0 +1,769 @@ +/** + ****************************************************************************** + * @file stm32f4xx_hal_flash.c + * @author MCD Application Team + * @brief FLASH HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the internal FLASH memory: + * + Program operations functions + * + Memory Control functions + * + Peripheral Errors functions + * + @verbatim + ============================================================================== + ##### FLASH peripheral features ##### + ============================================================================== + + [..] The Flash memory interface manages CPU AHB I-Code and D-Code accesses + to the Flash memory. It implements the erase and program Flash memory operations + and the read and write protection mechanisms. + + [..] The Flash memory interface accelerates code execution with a system of instruction + prefetch and cache lines. + + [..] The FLASH main features are: + (+) Flash memory read operations + (+) Flash memory program/erase operations + (+) Read / write protections + (+) Prefetch on I-Code + (+) 64 cache lines of 128 bits on I-Code + (+) 8 cache lines of 128 bits on D-Code + + + ##### How to use this driver ##### + ============================================================================== + [..] + This driver provides functions and macros to configure and program the FLASH + memory of all STM32F4xx devices. + + (#) FLASH Memory IO Programming functions: + (++) Lock and Unlock the FLASH interface using HAL_FLASH_Unlock() and + HAL_FLASH_Lock() functions + (++) Program functions: byte, half word, word and double word + (++) There Two modes of programming : + (+++) Polling mode using HAL_FLASH_Program() function + (+++) Interrupt mode using HAL_FLASH_Program_IT() function + + (#) Interrupts and flags management functions : + (++) Handle FLASH interrupts by calling HAL_FLASH_IRQHandler() + (++) Wait for last FLASH operation according to its status + (++) Get error flag status by calling HAL_SetErrorCode() + + [..] + In addition to these functions, this driver includes a set of macros allowing + to handle the following operations: + (+) Set the latency + (+) Enable/Disable the prefetch buffer + (+) Enable/Disable the Instruction cache and the Data cache + (+) Reset the Instruction cache and the Data cache + (+) Enable/Disable the FLASH interrupts + (+) Monitor the FLASH flags status + + @endverbatim + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx_hal.h" + +/** @addtogroup STM32F4xx_HAL_Driver + * @{ + */ + +/** @defgroup FLASH FLASH + * @brief FLASH HAL module driver + * @{ + */ + +#ifdef HAL_FLASH_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/** @addtogroup FLASH_Private_Constants + * @{ + */ +#define FLASH_TIMEOUT_VALUE 50000U /* 50 s */ +/** + * @} + */ +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/** @addtogroup FLASH_Private_Variables + * @{ + */ +/* Variable used for Erase sectors under interruption */ +FLASH_ProcessTypeDef pFlash; +/** + * @} + */ + +/* Private function prototypes -----------------------------------------------*/ +/** @addtogroup FLASH_Private_Functions + * @{ + */ +/* Program operations */ +static void FLASH_Program_DoubleWord(uint32_t Address, uint64_t Data); +static void FLASH_Program_Word(uint32_t Address, uint32_t Data); +static void FLASH_Program_HalfWord(uint32_t Address, uint16_t Data); +static void FLASH_Program_Byte(uint32_t Address, uint8_t Data); +static void FLASH_SetErrorCode(void); + +HAL_StatusTypeDef FLASH_WaitForLastOperation(uint32_t Timeout); +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup FLASH_Exported_Functions FLASH Exported Functions + * @{ + */ + +/** @defgroup FLASH_Exported_Functions_Group1 Programming operation functions + * @brief Programming operation functions + * +@verbatim + =============================================================================== + ##### Programming operation functions ##### + =============================================================================== + [..] + This subsection provides a set of functions allowing to manage the FLASH + program operations. + +@endverbatim + * @{ + */ + +/** + * @brief Program byte, halfword, word or double word at a specified address + * @param TypeProgram Indicate the way to program at a specified address. + * This parameter can be a value of @ref FLASH_Type_Program + * @param Address specifies the address to be programmed. + * @param Data specifies the data to be programmed + * + * @retval HAL_StatusTypeDef HAL Status + */ +HAL_StatusTypeDef HAL_FLASH_Program(uint32_t TypeProgram, uint32_t Address, uint64_t Data) +{ + HAL_StatusTypeDef status = HAL_ERROR; + + /* Process Locked */ + __HAL_LOCK(&pFlash); + + /* Check the parameters */ + assert_param(IS_FLASH_TYPEPROGRAM(TypeProgram)); + + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + if (status == HAL_OK) + { + if (TypeProgram == FLASH_TYPEPROGRAM_BYTE) + { + /*Program byte (8-bit) at a specified address.*/ + FLASH_Program_Byte(Address, (uint8_t) Data); + } + else if (TypeProgram == FLASH_TYPEPROGRAM_HALFWORD) + { + /*Program halfword (16-bit) at a specified address.*/ + FLASH_Program_HalfWord(Address, (uint16_t) Data); + } + else if (TypeProgram == FLASH_TYPEPROGRAM_WORD) + { + /*Program word (32-bit) at a specified address.*/ + FLASH_Program_Word(Address, (uint32_t) Data); + } + else + { + /*Program double word (64-bit) at a specified address.*/ + FLASH_Program_DoubleWord(Address, Data); + } + + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + /* If the program operation is completed, disable the PG Bit */ + FLASH->CR &= (~FLASH_CR_PG); + } + + /* Process Unlocked */ + __HAL_UNLOCK(&pFlash); + + return status; +} + +/** + * @brief Program byte, halfword, word or double word at a specified address with interrupt enabled. + * @param TypeProgram Indicate the way to program at a specified address. + * This parameter can be a value of @ref FLASH_Type_Program + * @param Address specifies the address to be programmed. + * @param Data specifies the data to be programmed + * + * @retval HAL Status + */ +HAL_StatusTypeDef HAL_FLASH_Program_IT(uint32_t TypeProgram, uint32_t Address, uint64_t Data) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_FLASH_TYPEPROGRAM(TypeProgram)); + + /* Enable End of FLASH Operation interrupt */ + __HAL_FLASH_ENABLE_IT(FLASH_IT_EOP); + + /* Enable Error source interrupt */ + __HAL_FLASH_ENABLE_IT(FLASH_IT_ERR); + + pFlash.ProcedureOnGoing = FLASH_PROC_PROGRAM; + pFlash.Address = Address; + + if (TypeProgram == FLASH_TYPEPROGRAM_BYTE) + { + /*Program byte (8-bit) at a specified address.*/ + FLASH_Program_Byte(Address, (uint8_t) Data); + } + else if (TypeProgram == FLASH_TYPEPROGRAM_HALFWORD) + { + /*Program halfword (16-bit) at a specified address.*/ + FLASH_Program_HalfWord(Address, (uint16_t) Data); + } + else if (TypeProgram == FLASH_TYPEPROGRAM_WORD) + { + /*Program word (32-bit) at a specified address.*/ + FLASH_Program_Word(Address, (uint32_t) Data); + } + else + { + /*Program double word (64-bit) at a specified address.*/ + FLASH_Program_DoubleWord(Address, Data); + } + + return status; +} + +/** + * @brief This function handles FLASH interrupt request. + * @retval None + */ +void HAL_FLASH_IRQHandler(void) +{ + uint32_t addresstmp = 0U; + + /* Check FLASH operation error flags */ +#if defined(FLASH_SR_RDERR) + if (__HAL_FLASH_GET_FLAG((FLASH_FLAG_OPERR | FLASH_FLAG_WRPERR | FLASH_FLAG_PGAERR | \ + FLASH_FLAG_PGPERR | FLASH_FLAG_PGSERR | FLASH_FLAG_RDERR)) != RESET) +#else + if (__HAL_FLASH_GET_FLAG((FLASH_FLAG_OPERR | FLASH_FLAG_WRPERR | FLASH_FLAG_PGAERR | \ + FLASH_FLAG_PGPERR | FLASH_FLAG_PGSERR)) != RESET) +#endif /* FLASH_SR_RDERR */ + { + if (pFlash.ProcedureOnGoing == FLASH_PROC_SECTERASE) + { + /*return the faulty sector*/ + addresstmp = pFlash.Sector; + pFlash.Sector = 0xFFFFFFFFU; + } + else if (pFlash.ProcedureOnGoing == FLASH_PROC_MASSERASE) + { + /*return the faulty bank*/ + addresstmp = pFlash.Bank; + } + else + { + /*return the faulty address*/ + addresstmp = pFlash.Address; + } + + /*Save the Error code*/ + FLASH_SetErrorCode(); + + /* FLASH error interrupt user callback */ + HAL_FLASH_OperationErrorCallback(addresstmp); + + /*Stop the procedure ongoing*/ + pFlash.ProcedureOnGoing = FLASH_PROC_NONE; + } + + /* Check FLASH End of Operation flag */ + if (__HAL_FLASH_GET_FLAG(FLASH_FLAG_EOP) != RESET) + { + /* Clear FLASH End of Operation pending bit */ + __HAL_FLASH_CLEAR_FLAG(FLASH_FLAG_EOP); + + if (pFlash.ProcedureOnGoing == FLASH_PROC_SECTERASE) + { + /*Nb of sector to erased can be decreased*/ + pFlash.NbSectorsToErase--; + + /* Check if there are still sectors to erase*/ + if (pFlash.NbSectorsToErase != 0U) + { + addresstmp = pFlash.Sector; + /*Indicate user which sector has been erased*/ + HAL_FLASH_EndOfOperationCallback(addresstmp); + + /*Increment sector number*/ + pFlash.Sector++; + addresstmp = pFlash.Sector; + FLASH_Erase_Sector(addresstmp, pFlash.VoltageForErase); + } + else + { + /*No more sectors to Erase, user callback can be called.*/ + /*Reset Sector and stop Erase sectors procedure*/ + pFlash.Sector = addresstmp = 0xFFFFFFFFU; + pFlash.ProcedureOnGoing = FLASH_PROC_NONE; + + /* Flush the caches to be sure of the data consistency */ + FLASH_FlushCaches(); + + /* FLASH EOP interrupt user callback */ + HAL_FLASH_EndOfOperationCallback(addresstmp); + } + } + else + { + if (pFlash.ProcedureOnGoing == FLASH_PROC_MASSERASE) + { + /* MassErase ended. Return the selected bank */ + /* Flush the caches to be sure of the data consistency */ + FLASH_FlushCaches(); + + /* FLASH EOP interrupt user callback */ + HAL_FLASH_EndOfOperationCallback(pFlash.Bank); + } + else + { + /*Program ended. Return the selected address*/ + /* FLASH EOP interrupt user callback */ + HAL_FLASH_EndOfOperationCallback(pFlash.Address); + } + pFlash.ProcedureOnGoing = FLASH_PROC_NONE; + } + } + + if (pFlash.ProcedureOnGoing == FLASH_PROC_NONE) + { + /* Operation is completed, disable the PG, SER, SNB and MER Bits */ + CLEAR_BIT(FLASH->CR, (FLASH_CR_PG | FLASH_CR_SER | FLASH_CR_SNB | FLASH_MER_BIT)); + + /* Disable End of FLASH Operation interrupt */ + __HAL_FLASH_DISABLE_IT(FLASH_IT_EOP); + + /* Disable Error source interrupt */ + __HAL_FLASH_DISABLE_IT(FLASH_IT_ERR); + } +} + +/** + * @brief FLASH end of operation interrupt callback + * @param ReturnValue The value saved in this parameter depends on the ongoing procedure + * Mass Erase: Bank number which has been requested to erase + * Sectors Erase: Sector which has been erased + * (if 0xFFFFFFFFU, it means that all the selected sectors have been erased) + * Program: Address which was selected for data program + * @retval None + */ +__weak void HAL_FLASH_EndOfOperationCallback(uint32_t ReturnValue) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(ReturnValue); + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_FLASH_EndOfOperationCallback could be implemented in the user file + */ +} + +/** + * @brief FLASH operation error interrupt callback + * @param ReturnValue The value saved in this parameter depends on the ongoing procedure + * Mass Erase: Bank number which has been requested to erase + * Sectors Erase: Sector number which returned an error + * Program: Address which was selected for data program + * @retval None + */ +__weak void HAL_FLASH_OperationErrorCallback(uint32_t ReturnValue) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(ReturnValue); + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_FLASH_OperationErrorCallback could be implemented in the user file + */ +} + +/** + * @} + */ + +/** @defgroup FLASH_Exported_Functions_Group2 Peripheral Control functions + * @brief management functions + * +@verbatim + =============================================================================== + ##### Peripheral Control functions ##### + =============================================================================== + [..] + This subsection provides a set of functions allowing to control the FLASH + memory operations. + +@endverbatim + * @{ + */ + +/** + * @brief Unlock the FLASH control register access + * @retval HAL Status + */ +HAL_StatusTypeDef HAL_FLASH_Unlock(void) +{ + HAL_StatusTypeDef status = HAL_OK; + + if (READ_BIT(FLASH->CR, FLASH_CR_LOCK) != RESET) + { + /* Authorize the FLASH Registers access */ + WRITE_REG(FLASH->KEYR, FLASH_KEY1); + WRITE_REG(FLASH->KEYR, FLASH_KEY2); + + /* Verify Flash is unlocked */ + if (READ_BIT(FLASH->CR, FLASH_CR_LOCK) != RESET) + { + status = HAL_ERROR; + } + } + + return status; +} + +/** + * @brief Locks the FLASH control register access + * @retval HAL Status + */ +HAL_StatusTypeDef HAL_FLASH_Lock(void) +{ + /* Set the LOCK Bit to lock the FLASH Registers access */ + FLASH->CR |= FLASH_CR_LOCK; + + return HAL_OK; +} + +/** + * @brief Unlock the FLASH Option Control Registers access. + * @retval HAL Status + */ +HAL_StatusTypeDef HAL_FLASH_OB_Unlock(void) +{ + if ((FLASH->OPTCR & FLASH_OPTCR_OPTLOCK) != RESET) + { + /* Authorizes the Option Byte register programming */ + FLASH->OPTKEYR = FLASH_OPT_KEY1; + FLASH->OPTKEYR = FLASH_OPT_KEY2; + } + else + { + return HAL_ERROR; + } + + return HAL_OK; +} + +/** + * @brief Lock the FLASH Option Control Registers access. + * @retval HAL Status + */ +HAL_StatusTypeDef HAL_FLASH_OB_Lock(void) +{ + /* Set the OPTLOCK Bit to lock the FLASH Option Byte Registers access */ + FLASH->OPTCR |= FLASH_OPTCR_OPTLOCK; + + return HAL_OK; +} + +/** + * @brief Launch the option byte loading. + * @retval HAL Status + */ +HAL_StatusTypeDef HAL_FLASH_OB_Launch(void) +{ + /* Set the OPTSTRT bit in OPTCR register */ + *(__IO uint8_t *)OPTCR_BYTE0_ADDRESS |= FLASH_OPTCR_OPTSTRT; + + /* Wait for last operation to be completed */ + return (FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE)); +} + +/** + * @} + */ + +/** @defgroup FLASH_Exported_Functions_Group3 Peripheral State and Errors functions + * @brief Peripheral Errors functions + * +@verbatim + =============================================================================== + ##### Peripheral Errors functions ##### + =============================================================================== + [..] + This subsection permits to get in run-time Errors of the FLASH peripheral. + +@endverbatim + * @{ + */ + +/** + * @brief Get the specific FLASH error flag. + * @retval FLASH_ErrorCode: The returned value can be a combination of: + * @arg HAL_FLASH_ERROR_RD: FLASH Read Protection error flag (PCROP) + * @arg HAL_FLASH_ERROR_PGS: FLASH Programming Sequence error flag + * @arg HAL_FLASH_ERROR_PGP: FLASH Programming Parallelism error flag + * @arg HAL_FLASH_ERROR_PGA: FLASH Programming Alignment error flag + * @arg HAL_FLASH_ERROR_WRP: FLASH Write protected error flag + * @arg HAL_FLASH_ERROR_OPERATION: FLASH operation Error flag + */ +uint32_t HAL_FLASH_GetError(void) +{ + return pFlash.ErrorCode; +} + +/** + * @} + */ + +/** + * @brief Wait for a FLASH operation to complete. + * @param Timeout maximum flash operationtimeout + * @retval HAL Status + */ +HAL_StatusTypeDef FLASH_WaitForLastOperation(uint32_t Timeout) +{ + uint32_t tickstart = 0U; + + /* Clear Error Code */ + pFlash.ErrorCode = HAL_FLASH_ERROR_NONE; + + /* Wait for the FLASH operation to complete by polling on BUSY flag to be reset. + Even if the FLASH operation fails, the BUSY flag will be reset and an error + flag will be set */ + /* Get tick */ + tickstart = HAL_GetTick(); + + while (__HAL_FLASH_GET_FLAG(FLASH_FLAG_BSY) != RESET) + { + if (Timeout != HAL_MAX_DELAY) + { + if ((Timeout == 0U) || ((HAL_GetTick() - tickstart) > Timeout)) + { + return HAL_TIMEOUT; + } + } + } + + /* Check FLASH End of Operation flag */ + if (__HAL_FLASH_GET_FLAG(FLASH_FLAG_EOP) != RESET) + { + /* Clear FLASH End of Operation pending bit */ + __HAL_FLASH_CLEAR_FLAG(FLASH_FLAG_EOP); + } +#if defined(FLASH_SR_RDERR) + if (__HAL_FLASH_GET_FLAG((FLASH_FLAG_OPERR | FLASH_FLAG_WRPERR | FLASH_FLAG_PGAERR | \ + FLASH_FLAG_PGPERR | FLASH_FLAG_PGSERR | FLASH_FLAG_RDERR)) != RESET) +#else + if (__HAL_FLASH_GET_FLAG((FLASH_FLAG_OPERR | FLASH_FLAG_WRPERR | FLASH_FLAG_PGAERR | \ + FLASH_FLAG_PGPERR | FLASH_FLAG_PGSERR)) != RESET) +#endif /* FLASH_SR_RDERR */ + { + /*Save the error code*/ + FLASH_SetErrorCode(); + return HAL_ERROR; + } + + /* If there is no error flag set */ + return HAL_OK; + +} + +/** + * @brief Program a double word (64-bit) at a specified address. + * @note This function must be used when the device voltage range is from + * 2.7V to 3.6V and Vpp in the range 7V to 9V. + * + * @note If an erase and a program operations are requested simultaneously, + * the erase operation is performed before the program one. + * + * @param Address specifies the address to be programmed. + * @param Data specifies the data to be programmed. + * @retval None + */ +static void FLASH_Program_DoubleWord(uint32_t Address, uint64_t Data) +{ + /* Check the parameters */ + assert_param(IS_FLASH_ADDRESS(Address)); + + /* If the previous operation is completed, proceed to program the new data */ + CLEAR_BIT(FLASH->CR, FLASH_CR_PSIZE); + FLASH->CR |= FLASH_PSIZE_DOUBLE_WORD; + FLASH->CR |= FLASH_CR_PG; + + /* Program first word */ + *(__IO uint32_t *)Address = (uint32_t)Data; + + /* Barrier to ensure programming is performed in 2 steps, in right order + (independently of compiler optimization behavior) */ + __ISB(); + + /* Program second word */ + *(__IO uint32_t *)(Address + 4) = (uint32_t)(Data >> 32); +} + + +/** + * @brief Program word (32-bit) at a specified address. + * @note This function must be used when the device voltage range is from + * 2.7V to 3.6V. + * + * @note If an erase and a program operations are requested simultaneously, + * the erase operation is performed before the program one. + * + * @param Address specifies the address to be programmed. + * @param Data specifies the data to be programmed. + * @retval None + */ +static void FLASH_Program_Word(uint32_t Address, uint32_t Data) +{ + /* Check the parameters */ + assert_param(IS_FLASH_ADDRESS(Address)); + + /* If the previous operation is completed, proceed to program the new data */ + CLEAR_BIT(FLASH->CR, FLASH_CR_PSIZE); + FLASH->CR |= FLASH_PSIZE_WORD; + FLASH->CR |= FLASH_CR_PG; + + *(__IO uint32_t *)Address = Data; +} + +/** + * @brief Program a half-word (16-bit) at a specified address. + * @note This function must be used when the device voltage range is from + * 2.1V to 3.6V. + * + * @note If an erase and a program operations are requested simultaneously, + * the erase operation is performed before the program one. + * + * @param Address specifies the address to be programmed. + * @param Data specifies the data to be programmed. + * @retval None + */ +static void FLASH_Program_HalfWord(uint32_t Address, uint16_t Data) +{ + /* Check the parameters */ + assert_param(IS_FLASH_ADDRESS(Address)); + + /* If the previous operation is completed, proceed to program the new data */ + CLEAR_BIT(FLASH->CR, FLASH_CR_PSIZE); + FLASH->CR |= FLASH_PSIZE_HALF_WORD; + FLASH->CR |= FLASH_CR_PG; + + *(__IO uint16_t *)Address = Data; +} + +/** + * @brief Program byte (8-bit) at a specified address. + * @note This function must be used when the device voltage range is from + * 1.8V to 3.6V. + * + * @note If an erase and a program operations are requested simultaneously, + * the erase operation is performed before the program one. + * + * @param Address specifies the address to be programmed. + * @param Data specifies the data to be programmed. + * @retval None + */ +static void FLASH_Program_Byte(uint32_t Address, uint8_t Data) +{ + /* Check the parameters */ + assert_param(IS_FLASH_ADDRESS(Address)); + + /* If the previous operation is completed, proceed to program the new data */ + CLEAR_BIT(FLASH->CR, FLASH_CR_PSIZE); + FLASH->CR |= FLASH_PSIZE_BYTE; + FLASH->CR |= FLASH_CR_PG; + + *(__IO uint8_t *)Address = Data; +} + +/** + * @brief Set the specific FLASH error flag. + * @retval None + */ +static void FLASH_SetErrorCode(void) +{ + if (__HAL_FLASH_GET_FLAG(FLASH_FLAG_WRPERR) != RESET) + { + pFlash.ErrorCode |= HAL_FLASH_ERROR_WRP; + + /* Clear FLASH write protection error pending bit */ + __HAL_FLASH_CLEAR_FLAG(FLASH_FLAG_WRPERR); + } + + if (__HAL_FLASH_GET_FLAG(FLASH_FLAG_PGAERR) != RESET) + { + pFlash.ErrorCode |= HAL_FLASH_ERROR_PGA; + + /* Clear FLASH Programming alignment error pending bit */ + __HAL_FLASH_CLEAR_FLAG(FLASH_FLAG_PGAERR); + } + + if (__HAL_FLASH_GET_FLAG(FLASH_FLAG_PGPERR) != RESET) + { + pFlash.ErrorCode |= HAL_FLASH_ERROR_PGP; + + /* Clear FLASH Programming parallelism error pending bit */ + __HAL_FLASH_CLEAR_FLAG(FLASH_FLAG_PGPERR); + } + + if (__HAL_FLASH_GET_FLAG(FLASH_FLAG_PGSERR) != RESET) + { + pFlash.ErrorCode |= HAL_FLASH_ERROR_PGS; + + /* Clear FLASH Programming sequence error pending bit */ + __HAL_FLASH_CLEAR_FLAG(FLASH_FLAG_PGSERR); + } +#if defined(FLASH_SR_RDERR) + if (__HAL_FLASH_GET_FLAG(FLASH_FLAG_RDERR) != RESET) + { + pFlash.ErrorCode |= HAL_FLASH_ERROR_RD; + + /* Clear FLASH Proprietary readout protection error pending bit */ + __HAL_FLASH_CLEAR_FLAG(FLASH_FLAG_RDERR); + } +#endif /* FLASH_SR_RDERR */ + if (__HAL_FLASH_GET_FLAG(FLASH_FLAG_OPERR) != RESET) + { + pFlash.ErrorCode |= HAL_FLASH_ERROR_OPERATION; + + /* Clear FLASH Operation error pending bit */ + __HAL_FLASH_CLEAR_FLAG(FLASH_FLAG_OPERR); + } +} + +/** + * @} + */ + +#endif /* HAL_FLASH_MODULE_ENABLED */ + +/** + * @} + */ + +/** + * @} + */ + diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_flash_ex.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_flash_ex.c new file mode 100644 index 0000000..f919fea --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_flash_ex.c @@ -0,0 +1,1344 @@ +/** + ****************************************************************************** + * @file stm32f4xx_hal_flash_ex.c + * @author MCD Application Team + * @brief Extended FLASH HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the FLASH extension peripheral: + * + Extended programming operations functions + * + @verbatim + ============================================================================== + ##### Flash Extension features ##### + ============================================================================== + + [..] Comparing to other previous devices, the FLASH interface for STM32F427xx/437xx and + STM32F429xx/439xx devices contains the following additional features + + (+) Capacity up to 2 Mbyte with dual bank architecture supporting read-while-write + capability (RWW) + (+) Dual bank memory organization + (+) PCROP protection for all banks + + ##### How to use this driver ##### + ============================================================================== + [..] This driver provides functions to configure and program the FLASH memory + of all STM32F427xx/437xx, STM32F429xx/439xx, STM32F469xx/479xx and STM32F446xx + devices. It includes + (#) FLASH Memory Erase functions: + (++) Lock and Unlock the FLASH interface using HAL_FLASH_Unlock() and + HAL_FLASH_Lock() functions + (++) Erase function: Erase sector, erase all sectors + (++) There are two modes of erase : + (+++) Polling Mode using HAL_FLASHEx_Erase() + (+++) Interrupt Mode using HAL_FLASHEx_Erase_IT() + + (#) Option Bytes Programming functions: Use HAL_FLASHEx_OBProgram() to : + (++) Set/Reset the write protection + (++) Set the Read protection Level + (++) Set the BOR level + (++) Program the user Option Bytes + (#) Advanced Option Bytes Programming functions: Use HAL_FLASHEx_AdvOBProgram() to : + (++) Extended space (bank 2) erase function + (++) Full FLASH space (2 Mo) erase (bank 1 and bank 2) + (++) Dual Boot activation + (++) Write protection configuration for bank 2 + (++) PCROP protection configuration and control for both banks + + @endverbatim + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx_hal.h" + +/** @addtogroup STM32F4xx_HAL_Driver + * @{ + */ + +/** @defgroup FLASHEx FLASHEx + * @brief FLASH HAL Extension module driver + * @{ + */ + +#ifdef HAL_FLASH_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/** @addtogroup FLASHEx_Private_Constants + * @{ + */ +#define FLASH_TIMEOUT_VALUE 50000U /* 50 s */ +/** + * @} + */ + +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/** @addtogroup FLASHEx_Private_Variables + * @{ + */ +extern FLASH_ProcessTypeDef pFlash; +/** + * @} + */ + +/* Private function prototypes -----------------------------------------------*/ +/** @addtogroup FLASHEx_Private_Functions + * @{ + */ +/* Option bytes control */ +static void FLASH_MassErase(uint8_t VoltageRange, uint32_t Banks); +static HAL_StatusTypeDef FLASH_OB_EnableWRP(uint32_t WRPSector, uint32_t Banks); +static HAL_StatusTypeDef FLASH_OB_DisableWRP(uint32_t WRPSector, uint32_t Banks); +static HAL_StatusTypeDef FLASH_OB_RDP_LevelConfig(uint8_t Level); +static HAL_StatusTypeDef FLASH_OB_UserConfig(uint8_t Iwdg, uint8_t Stop, uint8_t Stdby); +static HAL_StatusTypeDef FLASH_OB_BOR_LevelConfig(uint8_t Level); +static uint8_t FLASH_OB_GetUser(void); +static uint16_t FLASH_OB_GetWRP(void); +static uint8_t FLASH_OB_GetRDP(void); +static uint8_t FLASH_OB_GetBOR(void); + +#if defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) || defined(STM32F411xE) ||\ + defined(STM32F446xx) || defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) ||\ + defined(STM32F423xx) +static HAL_StatusTypeDef FLASH_OB_EnablePCROP(uint32_t Sector); +static HAL_StatusTypeDef FLASH_OB_DisablePCROP(uint32_t Sector); +#endif /* STM32F401xC || STM32F401xE || STM32F410xx || STM32F411xE || STM32F446xx || STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx + STM32F413xx || STM32F423xx */ + +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx)|| defined(STM32F439xx) || defined(STM32F469xx) || defined(STM32F479xx) +static HAL_StatusTypeDef FLASH_OB_EnablePCROP(uint32_t SectorBank1, uint32_t SectorBank2, uint32_t Banks); +static HAL_StatusTypeDef FLASH_OB_DisablePCROP(uint32_t SectorBank1, uint32_t SectorBank2, uint32_t Banks); +static HAL_StatusTypeDef FLASH_OB_BootConfig(uint8_t BootConfig); +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F469xx || STM32F479xx */ + +extern HAL_StatusTypeDef FLASH_WaitForLastOperation(uint32_t Timeout); +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup FLASHEx_Exported_Functions FLASHEx Exported Functions + * @{ + */ + +/** @defgroup FLASHEx_Exported_Functions_Group1 Extended IO operation functions + * @brief Extended IO operation functions + * +@verbatim + =============================================================================== + ##### Extended programming operation functions ##### + =============================================================================== + [..] + This subsection provides a set of functions allowing to manage the Extension FLASH + programming operations. + +@endverbatim + * @{ + */ +/** + * @brief Perform a mass erase or erase the specified FLASH memory sectors + * @param[in] pEraseInit pointer to an FLASH_EraseInitTypeDef structure that + * contains the configuration information for the erasing. + * + * @param[out] SectorError pointer to variable that + * contains the configuration information on faulty sector in case of error + * (0xFFFFFFFFU means that all the sectors have been correctly erased) + * + * @retval HAL Status + */ +HAL_StatusTypeDef HAL_FLASHEx_Erase(FLASH_EraseInitTypeDef *pEraseInit, uint32_t *SectorError) +{ + HAL_StatusTypeDef status = HAL_ERROR; + uint32_t index = 0U; + + /* Process Locked */ + __HAL_LOCK(&pFlash); + + /* Check the parameters */ + assert_param(IS_FLASH_TYPEERASE(pEraseInit->TypeErase)); + + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + if (status == HAL_OK) + { + /*Initialization of SectorError variable*/ + *SectorError = 0xFFFFFFFFU; + + if (pEraseInit->TypeErase == FLASH_TYPEERASE_MASSERASE) + { + /*Mass erase to be done*/ + FLASH_MassErase((uint8_t) pEraseInit->VoltageRange, pEraseInit->Banks); + + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + /* if the erase operation is completed, disable the MER Bit */ + FLASH->CR &= (~FLASH_MER_BIT); + } + else + { + /* Check the parameters */ + assert_param(IS_FLASH_NBSECTORS(pEraseInit->NbSectors + pEraseInit->Sector)); + + /* Erase by sector by sector to be done*/ + for (index = pEraseInit->Sector; index < (pEraseInit->NbSectors + pEraseInit->Sector); index++) + { + FLASH_Erase_Sector(index, (uint8_t) pEraseInit->VoltageRange); + + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + /* If the erase operation is completed, disable the SER and SNB Bits */ + CLEAR_BIT(FLASH->CR, (FLASH_CR_SER | FLASH_CR_SNB)); + + if (status != HAL_OK) + { + /* In case of error, stop erase procedure and return the faulty sector*/ + *SectorError = index; + break; + } + } + } + /* Flush the caches to be sure of the data consistency */ + FLASH_FlushCaches(); + } + + /* Process Unlocked */ + __HAL_UNLOCK(&pFlash); + + return status; +} + +/** + * @brief Perform a mass erase or erase the specified FLASH memory sectors with interrupt enabled + * @param pEraseInit pointer to an FLASH_EraseInitTypeDef structure that + * contains the configuration information for the erasing. + * + * @retval HAL Status + */ +HAL_StatusTypeDef HAL_FLASHEx_Erase_IT(FLASH_EraseInitTypeDef *pEraseInit) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_FLASH_TYPEERASE(pEraseInit->TypeErase)); + + /* Enable End of FLASH Operation interrupt */ + __HAL_FLASH_ENABLE_IT(FLASH_IT_EOP); + + /* Enable Error source interrupt */ + __HAL_FLASH_ENABLE_IT(FLASH_IT_ERR); + + /* Clear pending flags (if any) */ + __HAL_FLASH_CLEAR_FLAG(FLASH_FLAG_EOP | FLASH_FLAG_OPERR | FLASH_FLAG_WRPERR | \ + FLASH_FLAG_PGAERR | FLASH_FLAG_PGPERR | FLASH_FLAG_PGSERR); + + if (pEraseInit->TypeErase == FLASH_TYPEERASE_MASSERASE) + { + /*Mass erase to be done*/ + pFlash.ProcedureOnGoing = FLASH_PROC_MASSERASE; + pFlash.Bank = pEraseInit->Banks; + FLASH_MassErase((uint8_t) pEraseInit->VoltageRange, pEraseInit->Banks); + } + else + { + /* Erase by sector to be done*/ + + /* Check the parameters */ + assert_param(IS_FLASH_NBSECTORS(pEraseInit->NbSectors + pEraseInit->Sector)); + + pFlash.ProcedureOnGoing = FLASH_PROC_SECTERASE; + pFlash.NbSectorsToErase = pEraseInit->NbSectors; + pFlash.Sector = pEraseInit->Sector; + pFlash.VoltageForErase = (uint8_t)pEraseInit->VoltageRange; + + /*Erase 1st sector and wait for IT*/ + FLASH_Erase_Sector(pEraseInit->Sector, pEraseInit->VoltageRange); + } + + return status; +} + +/** + * @brief Program option bytes + * @param pOBInit pointer to an FLASH_OBInitStruct structure that + * contains the configuration information for the programming. + * + * @retval HAL Status + */ +HAL_StatusTypeDef HAL_FLASHEx_OBProgram(FLASH_OBProgramInitTypeDef *pOBInit) +{ + HAL_StatusTypeDef status = HAL_ERROR; + + /* Process Locked */ + __HAL_LOCK(&pFlash); + + /* Check the parameters */ + assert_param(IS_OPTIONBYTE(pOBInit->OptionType)); + + /*Write protection configuration*/ + if ((pOBInit->OptionType & OPTIONBYTE_WRP) == OPTIONBYTE_WRP) + { + assert_param(IS_WRPSTATE(pOBInit->WRPState)); + if (pOBInit->WRPState == OB_WRPSTATE_ENABLE) + { + /*Enable of Write protection on the selected Sector*/ + status = FLASH_OB_EnableWRP(pOBInit->WRPSector, pOBInit->Banks); + } + else + { + /*Disable of Write protection on the selected Sector*/ + status = FLASH_OB_DisableWRP(pOBInit->WRPSector, pOBInit->Banks); + } + } + + /*Read protection configuration*/ + if ((pOBInit->OptionType & OPTIONBYTE_RDP) == OPTIONBYTE_RDP) + { + status = FLASH_OB_RDP_LevelConfig(pOBInit->RDPLevel); + } + + /*USER configuration*/ + if ((pOBInit->OptionType & OPTIONBYTE_USER) == OPTIONBYTE_USER) + { + status = FLASH_OB_UserConfig(pOBInit->USERConfig & OB_IWDG_SW, + pOBInit->USERConfig & OB_STOP_NO_RST, + pOBInit->USERConfig & OB_STDBY_NO_RST); + } + + /*BOR Level configuration*/ + if ((pOBInit->OptionType & OPTIONBYTE_BOR) == OPTIONBYTE_BOR) + { + status = FLASH_OB_BOR_LevelConfig(pOBInit->BORLevel); + } + + /* Process Unlocked */ + __HAL_UNLOCK(&pFlash); + + return status; +} + +/** + * @brief Get the Option byte configuration + * @param pOBInit pointer to an FLASH_OBInitStruct structure that + * contains the configuration information for the programming. + * + * @retval None + */ +void HAL_FLASHEx_OBGetConfig(FLASH_OBProgramInitTypeDef *pOBInit) +{ + pOBInit->OptionType = OPTIONBYTE_WRP | OPTIONBYTE_RDP | OPTIONBYTE_USER | OPTIONBYTE_BOR; + + /*Get WRP*/ + pOBInit->WRPSector = (uint32_t)FLASH_OB_GetWRP(); + + /*Get RDP Level*/ + pOBInit->RDPLevel = (uint32_t)FLASH_OB_GetRDP(); + + /*Get USER*/ + pOBInit->USERConfig = (uint8_t)FLASH_OB_GetUser(); + + /*Get BOR Level*/ + pOBInit->BORLevel = (uint32_t)FLASH_OB_GetBOR(); +} + +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) ||\ + defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F410Tx) || defined(STM32F410Cx) ||\ + defined(STM32F410Rx) || defined(STM32F411xE) || defined(STM32F446xx) || defined(STM32F469xx) ||\ + defined(STM32F479xx) || defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) ||\ + defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) +/** + * @brief Program option bytes + * @param pAdvOBInit pointer to an FLASH_AdvOBProgramInitTypeDef structure that + * contains the configuration information for the programming. + * + * @retval HAL Status + */ +HAL_StatusTypeDef HAL_FLASHEx_AdvOBProgram(FLASH_AdvOBProgramInitTypeDef *pAdvOBInit) +{ + HAL_StatusTypeDef status = HAL_ERROR; + + /* Check the parameters */ + assert_param(IS_OBEX(pAdvOBInit->OptionType)); + + /*Program PCROP option byte*/ + if (((pAdvOBInit->OptionType) & OPTIONBYTE_PCROP) == OPTIONBYTE_PCROP) + { + /* Check the parameters */ + assert_param(IS_PCROPSTATE(pAdvOBInit->PCROPState)); + if ((pAdvOBInit->PCROPState) == OB_PCROP_STATE_ENABLE) + { + /*Enable of Write protection on the selected Sector*/ +#if defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) ||\ + defined(STM32F411xE) || defined(STM32F446xx) || defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) ||\ + defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) + status = FLASH_OB_EnablePCROP(pAdvOBInit->Sectors); +#else /* STM32F427xx || STM32F437xx || STM32F429xx|| STM32F439xx || STM32F469xx || STM32F479xx */ + status = FLASH_OB_EnablePCROP(pAdvOBInit->SectorsBank1, pAdvOBInit->SectorsBank2, pAdvOBInit->Banks); +#endif /* STM32F401xC || STM32F401xE || STM32F410xx || STM32F411xE || STM32F446xx || STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx || + STM32F413xx || STM32F423xx */ + } + else + { + /*Disable of Write protection on the selected Sector*/ +#if defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) ||\ + defined(STM32F411xE) || defined(STM32F446xx) || defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) ||\ + defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) + status = FLASH_OB_DisablePCROP(pAdvOBInit->Sectors); +#else /* STM32F427xx || STM32F437xx || STM32F429xx|| STM32F439xx || STM32F469xx || STM32F479xx */ + status = FLASH_OB_DisablePCROP(pAdvOBInit->SectorsBank1, pAdvOBInit->SectorsBank2, pAdvOBInit->Banks); +#endif /* STM32F401xC || STM32F401xE || STM32F410xx || STM32F411xE || STM32F446xx || STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx || + STM32F413xx || STM32F423xx */ + } + } + +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) || defined(STM32F469xx) || defined(STM32F479xx) + /*Program BOOT config option byte*/ + if (((pAdvOBInit->OptionType) & OPTIONBYTE_BOOTCONFIG) == OPTIONBYTE_BOOTCONFIG) + { + status = FLASH_OB_BootConfig(pAdvOBInit->BootConfig); + } +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F469xx || STM32F479xx */ + + return status; +} + +/** + * @brief Get the OBEX byte configuration + * @param pAdvOBInit pointer to an FLASH_AdvOBProgramInitTypeDef structure that + * contains the configuration information for the programming. + * + * @retval None + */ +void HAL_FLASHEx_AdvOBGetConfig(FLASH_AdvOBProgramInitTypeDef *pAdvOBInit) +{ +#if defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) ||\ + defined(STM32F411xE) || defined(STM32F446xx) || defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) ||\ + defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) + /*Get Sector*/ + pAdvOBInit->Sectors = (*(__IO uint16_t *)(OPTCR_BYTE2_ADDRESS)); +#else /* STM32F427xx || STM32F437xx || STM32F429xx|| STM32F439xx || STM32F469xx || STM32F479xx */ + /*Get Sector for Bank1*/ + pAdvOBInit->SectorsBank1 = (*(__IO uint16_t *)(OPTCR_BYTE2_ADDRESS)); + + /*Get Sector for Bank2*/ + pAdvOBInit->SectorsBank2 = (*(__IO uint16_t *)(OPTCR1_BYTE2_ADDRESS)); + + /*Get Boot config OB*/ + pAdvOBInit->BootConfig = *(__IO uint8_t *)OPTCR_BYTE0_ADDRESS; +#endif /* STM32F401xC || STM32F401xE || STM32F410xx || STM32F411xE || STM32F446xx || STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx || + STM32F413xx || STM32F423xx */ +} + +/** + * @brief Select the Protection Mode + * + * @note After PCROP activated Option Byte modification NOT POSSIBLE! excepted + * Global Read Out Protection modification (from level1 to level0) + * @note Once SPRMOD bit is active unprotection of a protected sector is not possible + * @note Read a protected sector will set RDERR Flag and write a protected sector will set WRPERR Flag + * @note This function can be used only for STM32F42xxx/STM32F43xxx/STM32F401xx/STM32F411xx/STM32F446xx/ + * STM32F469xx/STM32F479xx/STM32F412xx/STM32F413xx devices. + * + * @retval HAL Status + */ +HAL_StatusTypeDef HAL_FLASHEx_OB_SelectPCROP(void) +{ + uint8_t optiontmp = 0xFF; + + /* Mask SPRMOD bit */ + optiontmp = (uint8_t)((*(__IO uint8_t *)OPTCR_BYTE3_ADDRESS) & (uint8_t)0x7F); + + /* Update Option Byte */ + *(__IO uint8_t *)OPTCR_BYTE3_ADDRESS = (uint8_t)(OB_PCROP_SELECTED | optiontmp); + + return HAL_OK; +} + +/** + * @brief Deselect the Protection Mode + * + * @note After PCROP activated Option Byte modification NOT POSSIBLE! excepted + * Global Read Out Protection modification (from level1 to level0) + * @note Once SPRMOD bit is active unprotection of a protected sector is not possible + * @note Read a protected sector will set RDERR Flag and write a protected sector will set WRPERR Flag + * @note This function can be used only for STM32F42xxx/STM32F43xxx/STM32F401xx/STM32F411xx/STM32F446xx/ + * STM32F469xx/STM32F479xx/STM32F412xx/STM32F413xx devices. + * + * @retval HAL Status + */ +HAL_StatusTypeDef HAL_FLASHEx_OB_DeSelectPCROP(void) +{ + uint8_t optiontmp = 0xFF; + + /* Mask SPRMOD bit */ + optiontmp = (uint8_t)((*(__IO uint8_t *)OPTCR_BYTE3_ADDRESS) & (uint8_t)0x7F); + + /* Update Option Byte */ + *(__IO uint8_t *)OPTCR_BYTE3_ADDRESS = (uint8_t)(OB_PCROP_DESELECTED | optiontmp); + + return HAL_OK; +} +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F401xC || STM32F401xE || STM32F410xx ||\ + STM32F411xE || STM32F469xx || STM32F479xx || STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx || + STM32F413xx || STM32F423xx */ + +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx)|| defined(STM32F439xx) || defined(STM32F469xx) || defined(STM32F479xx) +/** + * @brief Returns the FLASH Write Protection Option Bytes value for Bank 2 + * @note This function can be used only for STM32F42xxx/STM32F43xxx/STM32F469xx/STM32F479xx devices. + * @retval The FLASH Write Protection Option Bytes value + */ +uint16_t HAL_FLASHEx_OB_GetBank2WRP(void) +{ + /* Return the FLASH write protection Register value */ + return (*(__IO uint16_t *)(OPTCR1_BYTE2_ADDRESS)); +} +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F469xx || STM32F479xx */ + +/** + * @} + */ + +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) || defined(STM32F469xx) || defined(STM32F479xx) +/** + * @brief Full erase of FLASH memory sectors + * @param VoltageRange The device voltage range which defines the erase parallelism. + * This parameter can be one of the following values: + * @arg FLASH_VOLTAGE_RANGE_1: when the device voltage range is 1.8V to 2.1V, + * the operation will be done by byte (8-bit) + * @arg FLASH_VOLTAGE_RANGE_2: when the device voltage range is 2.1V to 2.7V, + * the operation will be done by half word (16-bit) + * @arg FLASH_VOLTAGE_RANGE_3: when the device voltage range is 2.7V to 3.6V, + * the operation will be done by word (32-bit) + * @arg FLASH_VOLTAGE_RANGE_4: when the device voltage range is 2.7V to 3.6V + External Vpp, + * the operation will be done by double word (64-bit) + * + * @param Banks Banks to be erased + * This parameter can be one of the following values: + * @arg FLASH_BANK_1: Bank1 to be erased + * @arg FLASH_BANK_2: Bank2 to be erased + * @arg FLASH_BANK_BOTH: Bank1 and Bank2 to be erased + * + * @retval HAL Status + */ +static void FLASH_MassErase(uint8_t VoltageRange, uint32_t Banks) +{ + /* Check the parameters */ + assert_param(IS_VOLTAGERANGE(VoltageRange)); + assert_param(IS_FLASH_BANK(Banks)); + + /* if the previous operation is completed, proceed to erase all sectors */ + CLEAR_BIT(FLASH->CR, FLASH_CR_PSIZE); + + if (Banks == FLASH_BANK_BOTH) + { + /* bank1 & bank2 will be erased*/ + FLASH->CR |= FLASH_MER_BIT; + } + else if (Banks == FLASH_BANK_1) + { + /*Only bank1 will be erased*/ + FLASH->CR |= FLASH_CR_MER1; + } + else + { + /*Only bank2 will be erased*/ + FLASH->CR |= FLASH_CR_MER2; + } + FLASH->CR |= FLASH_CR_STRT | ((uint32_t)VoltageRange << 8U); +} + +/** + * @brief Erase the specified FLASH memory sector + * @param Sector FLASH sector to erase + * The value of this parameter depend on device used within the same series + * @param VoltageRange The device voltage range which defines the erase parallelism. + * This parameter can be one of the following values: + * @arg FLASH_VOLTAGE_RANGE_1: when the device voltage range is 1.8V to 2.1V, + * the operation will be done by byte (8-bit) + * @arg FLASH_VOLTAGE_RANGE_2: when the device voltage range is 2.1V to 2.7V, + * the operation will be done by half word (16-bit) + * @arg FLASH_VOLTAGE_RANGE_3: when the device voltage range is 2.7V to 3.6V, + * the operation will be done by word (32-bit) + * @arg FLASH_VOLTAGE_RANGE_4: when the device voltage range is 2.7V to 3.6V + External Vpp, + * the operation will be done by double word (64-bit) + * + * @retval None + */ +void FLASH_Erase_Sector(uint32_t Sector, uint8_t VoltageRange) +{ + uint32_t tmp_psize = 0U; + + /* Check the parameters */ + assert_param(IS_FLASH_SECTOR(Sector)); + assert_param(IS_VOLTAGERANGE(VoltageRange)); + + if (VoltageRange == FLASH_VOLTAGE_RANGE_1) + { + tmp_psize = FLASH_PSIZE_BYTE; + } + else if (VoltageRange == FLASH_VOLTAGE_RANGE_2) + { + tmp_psize = FLASH_PSIZE_HALF_WORD; + } + else if (VoltageRange == FLASH_VOLTAGE_RANGE_3) + { + tmp_psize = FLASH_PSIZE_WORD; + } + else + { + tmp_psize = FLASH_PSIZE_DOUBLE_WORD; + } + + /* Need to add offset of 4 when sector higher than FLASH_SECTOR_11 */ + if (Sector > FLASH_SECTOR_11) + { + Sector += 4U; + } + /* If the previous operation is completed, proceed to erase the sector */ + CLEAR_BIT(FLASH->CR, FLASH_CR_PSIZE); + FLASH->CR |= tmp_psize; + CLEAR_BIT(FLASH->CR, FLASH_CR_SNB); + FLASH->CR |= FLASH_CR_SER | (Sector << FLASH_CR_SNB_Pos); + FLASH->CR |= FLASH_CR_STRT; +} + +/** + * @brief Enable the write protection of the desired bank1 or bank 2 sectors + * + * @note When the memory read protection level is selected (RDP level = 1), + * it is not possible to program or erase the flash sector i if CortexM4 + * debug features are connected or boot code is executed in RAM, even if nWRPi = 1 + * @note Active value of nWRPi bits is inverted when PCROP mode is active (SPRMOD =1). + * + * @param WRPSector specifies the sector(s) to be write protected. + * This parameter can be one of the following values: + * @arg WRPSector: A value between OB_WRP_SECTOR_0 and OB_WRP_SECTOR_23 + * @arg OB_WRP_SECTOR_All + * @note BANK2 starts from OB_WRP_SECTOR_12 + * + * @param Banks Enable write protection on all the sectors for the specific bank + * This parameter can be one of the following values: + * @arg FLASH_BANK_1: WRP on all sectors of bank1 + * @arg FLASH_BANK_2: WRP on all sectors of bank2 + * @arg FLASH_BANK_BOTH: WRP on all sectors of bank1 & bank2 + * + * @retval HAL FLASH State + */ +static HAL_StatusTypeDef FLASH_OB_EnableWRP(uint32_t WRPSector, uint32_t Banks) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_OB_WRP_SECTOR(WRPSector)); + assert_param(IS_FLASH_BANK(Banks)); + + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + if (status == HAL_OK) + { + if (((WRPSector == OB_WRP_SECTOR_All) && ((Banks == FLASH_BANK_1) || (Banks == FLASH_BANK_BOTH))) || + (WRPSector < OB_WRP_SECTOR_12)) + { + if (WRPSector == OB_WRP_SECTOR_All) + { + /*Write protection on all sector of BANK1*/ + *(__IO uint16_t *)OPTCR_BYTE2_ADDRESS &= (~(WRPSector >> 12)); + } + else + { + /*Write protection done on sectors of BANK1*/ + *(__IO uint16_t *)OPTCR_BYTE2_ADDRESS &= (~WRPSector); + } + } + else + { + /*Write protection done on sectors of BANK2*/ + *(__IO uint16_t *)OPTCR1_BYTE2_ADDRESS &= (~(WRPSector >> 12)); + } + + /*Write protection on all sector of BANK2*/ + if ((WRPSector == OB_WRP_SECTOR_All) && (Banks == FLASH_BANK_BOTH)) + { + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + if (status == HAL_OK) + { + *(__IO uint16_t *)OPTCR1_BYTE2_ADDRESS &= (~(WRPSector >> 12)); + } + } + + } + return status; +} + +/** + * @brief Disable the write protection of the desired bank1 or bank 2 sectors + * + * @note When the memory read protection level is selected (RDP level = 1), + * it is not possible to program or erase the flash sector i if CortexM4 + * debug features are connected or boot code is executed in RAM, even if nWRPi = 1 + * @note Active value of nWRPi bits is inverted when PCROP mode is active (SPRMOD =1). + * + * @param WRPSector specifies the sector(s) to be write protected. + * This parameter can be one of the following values: + * @arg WRPSector: A value between OB_WRP_SECTOR_0 and OB_WRP_SECTOR_23 + * @arg OB_WRP_Sector_All + * @note BANK2 starts from OB_WRP_SECTOR_12 + * + * @param Banks Disable write protection on all the sectors for the specific bank + * This parameter can be one of the following values: + * @arg FLASH_BANK_1: Bank1 to be erased + * @arg FLASH_BANK_2: Bank2 to be erased + * @arg FLASH_BANK_BOTH: Bank1 and Bank2 to be erased + * + * @retval HAL Status + */ +static HAL_StatusTypeDef FLASH_OB_DisableWRP(uint32_t WRPSector, uint32_t Banks) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_OB_WRP_SECTOR(WRPSector)); + assert_param(IS_FLASH_BANK(Banks)); + + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + if (status == HAL_OK) + { + if (((WRPSector == OB_WRP_SECTOR_All) && ((Banks == FLASH_BANK_1) || (Banks == FLASH_BANK_BOTH))) || + (WRPSector < OB_WRP_SECTOR_12)) + { + if (WRPSector == OB_WRP_SECTOR_All) + { + /*Write protection on all sector of BANK1*/ + *(__IO uint16_t *)OPTCR_BYTE2_ADDRESS |= (uint16_t)(WRPSector >> 12); + } + else + { + /*Write protection done on sectors of BANK1*/ + *(__IO uint16_t *)OPTCR_BYTE2_ADDRESS |= (uint16_t)WRPSector; + } + } + else + { + /*Write protection done on sectors of BANK2*/ + *(__IO uint16_t *)OPTCR1_BYTE2_ADDRESS |= (uint16_t)(WRPSector >> 12); + } + + /*Write protection on all sector of BANK2*/ + if ((WRPSector == OB_WRP_SECTOR_All) && (Banks == FLASH_BANK_BOTH)) + { + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + if (status == HAL_OK) + { + *(__IO uint16_t *)OPTCR1_BYTE2_ADDRESS |= (uint16_t)(WRPSector >> 12); + } + } + + } + + return status; +} + +/** + * @brief Configure the Dual Bank Boot. + * + * @note This function can be used only for STM32F42xxx/43xxx devices. + * + * @param BootConfig specifies the Dual Bank Boot Option byte. + * This parameter can be one of the following values: + * @arg OB_Dual_BootEnabled: Dual Bank Boot Enable + * @arg OB_Dual_BootDisabled: Dual Bank Boot Disabled + * @retval None + */ +static HAL_StatusTypeDef FLASH_OB_BootConfig(uint8_t BootConfig) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_OB_BOOT(BootConfig)); + + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + if (status == HAL_OK) + { + /* Set Dual Bank Boot */ + *(__IO uint8_t *)OPTCR_BYTE0_ADDRESS &= (~FLASH_OPTCR_BFB2); + *(__IO uint8_t *)OPTCR_BYTE0_ADDRESS |= BootConfig; + } + + return status; +} + +/** + * @brief Enable the read/write protection (PCROP) of the desired + * sectors of Bank 1 and/or Bank 2. + * @note This function can be used only for STM32F42xxx/43xxx devices. + * @param SectorBank1 Specifies the sector(s) to be read/write protected or unprotected for bank1. + * This parameter can be one of the following values: + * @arg OB_PCROP: A value between OB_PCROP_SECTOR_0 and OB_PCROP_SECTOR_11 + * @arg OB_PCROP_SECTOR__All + * @param SectorBank2 Specifies the sector(s) to be read/write protected or unprotected for bank2. + * This parameter can be one of the following values: + * @arg OB_PCROP: A value between OB_PCROP_SECTOR_12 and OB_PCROP_SECTOR_23 + * @arg OB_PCROP_SECTOR__All + * @param Banks Enable PCROP protection on all the sectors for the specific bank + * This parameter can be one of the following values: + * @arg FLASH_BANK_1: WRP on all sectors of bank1 + * @arg FLASH_BANK_2: WRP on all sectors of bank2 + * @arg FLASH_BANK_BOTH: WRP on all sectors of bank1 & bank2 + * + * @retval HAL Status + */ +static HAL_StatusTypeDef FLASH_OB_EnablePCROP(uint32_t SectorBank1, uint32_t SectorBank2, uint32_t Banks) +{ + HAL_StatusTypeDef status = HAL_OK; + + assert_param(IS_FLASH_BANK(Banks)); + + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + if (status == HAL_OK) + { + if ((Banks == FLASH_BANK_1) || (Banks == FLASH_BANK_BOTH)) + { + assert_param(IS_OB_PCROP(SectorBank1)); + /*Write protection done on sectors of BANK1*/ + *(__IO uint16_t *)OPTCR_BYTE2_ADDRESS |= (uint16_t)SectorBank1; + } + else + { + assert_param(IS_OB_PCROP(SectorBank2)); + /*Write protection done on sectors of BANK2*/ + *(__IO uint16_t *)OPTCR1_BYTE2_ADDRESS |= (uint16_t)SectorBank2; + } + + /*Write protection on all sector of BANK2*/ + if (Banks == FLASH_BANK_BOTH) + { + assert_param(IS_OB_PCROP(SectorBank2)); + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + if (status == HAL_OK) + { + /*Write protection done on sectors of BANK2*/ + *(__IO uint16_t *)OPTCR1_BYTE2_ADDRESS |= (uint16_t)SectorBank2; + } + } + + } + + return status; +} + + +/** + * @brief Disable the read/write protection (PCROP) of the desired + * sectors of Bank 1 and/or Bank 2. + * @note This function can be used only for STM32F42xxx/43xxx devices. + * @param SectorBank1 specifies the sector(s) to be read/write protected or unprotected for bank1. + * This parameter can be one of the following values: + * @arg OB_PCROP: A value between OB_PCROP_SECTOR_0 and OB_PCROP_SECTOR_11 + * @arg OB_PCROP_SECTOR__All + * @param SectorBank2 Specifies the sector(s) to be read/write protected or unprotected for bank2. + * This parameter can be one of the following values: + * @arg OB_PCROP: A value between OB_PCROP_SECTOR_12 and OB_PCROP_SECTOR_23 + * @arg OB_PCROP_SECTOR__All + * @param Banks Disable PCROP protection on all the sectors for the specific bank + * This parameter can be one of the following values: + * @arg FLASH_BANK_1: WRP on all sectors of bank1 + * @arg FLASH_BANK_2: WRP on all sectors of bank2 + * @arg FLASH_BANK_BOTH: WRP on all sectors of bank1 & bank2 + * + * @retval HAL Status + */ +static HAL_StatusTypeDef FLASH_OB_DisablePCROP(uint32_t SectorBank1, uint32_t SectorBank2, uint32_t Banks) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_FLASH_BANK(Banks)); + + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + if (status == HAL_OK) + { + if ((Banks == FLASH_BANK_1) || (Banks == FLASH_BANK_BOTH)) + { + assert_param(IS_OB_PCROP(SectorBank1)); + /*Write protection done on sectors of BANK1*/ + *(__IO uint16_t *)OPTCR_BYTE2_ADDRESS &= (~SectorBank1); + } + else + { + /*Write protection done on sectors of BANK2*/ + assert_param(IS_OB_PCROP(SectorBank2)); + *(__IO uint16_t *)OPTCR1_BYTE2_ADDRESS &= (~SectorBank2); + } + + /*Write protection on all sector of BANK2*/ + if (Banks == FLASH_BANK_BOTH) + { + assert_param(IS_OB_PCROP(SectorBank2)); + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + if (status == HAL_OK) + { + /*Write protection done on sectors of BANK2*/ + *(__IO uint16_t *)OPTCR1_BYTE2_ADDRESS &= (~SectorBank2); + } + } + + } + + return status; + +} + +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F469xx || STM32F479xx */ + +#if defined(STM32F405xx) || defined(STM32F415xx) || defined(STM32F407xx) || defined(STM32F417xx) ||\ + defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F410Tx) || defined(STM32F410Cx) ||\ + defined(STM32F410Rx) || defined(STM32F411xE) || defined(STM32F446xx) || defined(STM32F412Zx) ||\ + defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) ||\ + defined(STM32F423xx) +/** + * @brief Mass erase of FLASH memory + * @param VoltageRange The device voltage range which defines the erase parallelism. + * This parameter can be one of the following values: + * @arg FLASH_VOLTAGE_RANGE_1: when the device voltage range is 1.8V to 2.1V, + * the operation will be done by byte (8-bit) + * @arg FLASH_VOLTAGE_RANGE_2: when the device voltage range is 2.1V to 2.7V, + * the operation will be done by half word (16-bit) + * @arg FLASH_VOLTAGE_RANGE_3: when the device voltage range is 2.7V to 3.6V, + * the operation will be done by word (32-bit) + * @arg FLASH_VOLTAGE_RANGE_4: when the device voltage range is 2.7V to 3.6V + External Vpp, + * the operation will be done by double word (64-bit) + * + * @param Banks Banks to be erased + * This parameter can be one of the following values: + * @arg FLASH_BANK_1: Bank1 to be erased + * + * @retval None + */ +static void FLASH_MassErase(uint8_t VoltageRange, uint32_t Banks) +{ + /* Check the parameters */ + assert_param(IS_VOLTAGERANGE(VoltageRange)); + assert_param(IS_FLASH_BANK(Banks)); + + /* If the previous operation is completed, proceed to erase all sectors */ + CLEAR_BIT(FLASH->CR, FLASH_CR_PSIZE); + FLASH->CR |= FLASH_CR_MER; + FLASH->CR |= FLASH_CR_STRT | ((uint32_t)VoltageRange << 8U); +} + +/** + * @brief Erase the specified FLASH memory sector + * @param Sector FLASH sector to erase + * The value of this parameter depend on device used within the same series + * @param VoltageRange The device voltage range which defines the erase parallelism. + * This parameter can be one of the following values: + * @arg FLASH_VOLTAGE_RANGE_1: when the device voltage range is 1.8V to 2.1V, + * the operation will be done by byte (8-bit) + * @arg FLASH_VOLTAGE_RANGE_2: when the device voltage range is 2.1V to 2.7V, + * the operation will be done by half word (16-bit) + * @arg FLASH_VOLTAGE_RANGE_3: when the device voltage range is 2.7V to 3.6V, + * the operation will be done by word (32-bit) + * @arg FLASH_VOLTAGE_RANGE_4: when the device voltage range is 2.7V to 3.6V + External Vpp, + * the operation will be done by double word (64-bit) + * + * @retval None + */ +void FLASH_Erase_Sector(uint32_t Sector, uint8_t VoltageRange) +{ + uint32_t tmp_psize = 0U; + + /* Check the parameters */ + assert_param(IS_FLASH_SECTOR(Sector)); + assert_param(IS_VOLTAGERANGE(VoltageRange)); + + if (VoltageRange == FLASH_VOLTAGE_RANGE_1) + { + tmp_psize = FLASH_PSIZE_BYTE; + } + else if (VoltageRange == FLASH_VOLTAGE_RANGE_2) + { + tmp_psize = FLASH_PSIZE_HALF_WORD; + } + else if (VoltageRange == FLASH_VOLTAGE_RANGE_3) + { + tmp_psize = FLASH_PSIZE_WORD; + } + else + { + tmp_psize = FLASH_PSIZE_DOUBLE_WORD; + } + + /* If the previous operation is completed, proceed to erase the sector */ + CLEAR_BIT(FLASH->CR, FLASH_CR_PSIZE); + FLASH->CR |= tmp_psize; + CLEAR_BIT(FLASH->CR, FLASH_CR_SNB); + FLASH->CR |= FLASH_CR_SER | (Sector << FLASH_CR_SNB_Pos); + FLASH->CR |= FLASH_CR_STRT; +} + +/** + * @brief Enable the write protection of the desired bank 1 sectors + * + * @note When the memory read protection level is selected (RDP level = 1), + * it is not possible to program or erase the flash sector i if CortexM4 + * debug features are connected or boot code is executed in RAM, even if nWRPi = 1 + * @note Active value of nWRPi bits is inverted when PCROP mode is active (SPRMOD =1). + * + * @param WRPSector specifies the sector(s) to be write protected. + * The value of this parameter depend on device used within the same series + * + * @param Banks Enable write protection on all the sectors for the specific bank + * This parameter can be one of the following values: + * @arg FLASH_BANK_1: WRP on all sectors of bank1 + * + * @retval HAL Status + */ +static HAL_StatusTypeDef FLASH_OB_EnableWRP(uint32_t WRPSector, uint32_t Banks) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_OB_WRP_SECTOR(WRPSector)); + assert_param(IS_FLASH_BANK(Banks)); + + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + if (status == HAL_OK) + { + *(__IO uint16_t *)OPTCR_BYTE2_ADDRESS &= (~WRPSector); + } + + return status; +} + +/** + * @brief Disable the write protection of the desired bank 1 sectors + * + * @note When the memory read protection level is selected (RDP level = 1), + * it is not possible to program or erase the flash sector i if CortexM4 + * debug features are connected or boot code is executed in RAM, even if nWRPi = 1 + * @note Active value of nWRPi bits is inverted when PCROP mode is active (SPRMOD =1). + * + * @param WRPSector specifies the sector(s) to be write protected. + * The value of this parameter depend on device used within the same series + * + * @param Banks Enable write protection on all the sectors for the specific bank + * This parameter can be one of the following values: + * @arg FLASH_BANK_1: WRP on all sectors of bank1 + * + * @retval HAL Status + */ +static HAL_StatusTypeDef FLASH_OB_DisableWRP(uint32_t WRPSector, uint32_t Banks) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_OB_WRP_SECTOR(WRPSector)); + assert_param(IS_FLASH_BANK(Banks)); + + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + if (status == HAL_OK) + { + *(__IO uint16_t *)OPTCR_BYTE2_ADDRESS |= (uint16_t)WRPSector; + } + + return status; +} +#endif /* STM32F40xxx || STM32F41xxx || STM32F401xx || STM32F410xx || STM32F411xE || STM32F446xx || STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx + STM32F413xx || STM32F423xx */ + +#if defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) ||\ + defined(STM32F411xE) || defined(STM32F446xx) || defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) ||\ + defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) +/** + * @brief Enable the read/write protection (PCROP) of the desired sectors. + * @note This function can be used only for STM32F401xx devices. + * @param Sector specifies the sector(s) to be read/write protected or unprotected. + * This parameter can be one of the following values: + * @arg OB_PCROP: A value between OB_PCROP_Sector0 and OB_PCROP_Sector5 + * @arg OB_PCROP_Sector_All + * @retval HAL Status + */ +static HAL_StatusTypeDef FLASH_OB_EnablePCROP(uint32_t Sector) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_OB_PCROP(Sector)); + + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + if (status == HAL_OK) + { + *(__IO uint16_t *)OPTCR_BYTE2_ADDRESS |= (uint16_t)Sector; + } + + return status; +} + + +/** + * @brief Disable the read/write protection (PCROP) of the desired sectors. + * @note This function can be used only for STM32F401xx devices. + * @param Sector specifies the sector(s) to be read/write protected or unprotected. + * This parameter can be one of the following values: + * @arg OB_PCROP: A value between OB_PCROP_Sector0 and OB_PCROP_Sector5 + * @arg OB_PCROP_Sector_All + * @retval HAL Status + */ +static HAL_StatusTypeDef FLASH_OB_DisablePCROP(uint32_t Sector) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_OB_PCROP(Sector)); + + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + if (status == HAL_OK) + { + *(__IO uint16_t *)OPTCR_BYTE2_ADDRESS &= (~Sector); + } + + return status; + +} +#endif /* STM32F401xC || STM32F401xE || STM32F411xE || STM32F446xx || STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx + STM32F413xx || STM32F423xx */ + +/** + * @brief Set the read protection level. + * @param Level specifies the read protection level. + * This parameter can be one of the following values: + * @arg OB_RDP_LEVEL_0: No protection + * @arg OB_RDP_LEVEL_1: Read protection of the memory + * @arg OB_RDP_LEVEL_2: Full chip protection + * + * @note WARNING: When enabling OB_RDP level 2 it's no more possible to go back to level 1 or 0 + * + * @retval HAL Status + */ +static HAL_StatusTypeDef FLASH_OB_RDP_LevelConfig(uint8_t Level) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_OB_RDP_LEVEL(Level)); + + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + if (status == HAL_OK) + { + *(__IO uint8_t *)OPTCR_BYTE1_ADDRESS = Level; + } + + return status; +} + +/** + * @brief Program the FLASH User Option Byte: IWDG_SW / RST_STOP / RST_STDBY. + * @param Iwdg Selects the IWDG mode + * This parameter can be one of the following values: + * @arg OB_IWDG_SW: Software IWDG selected + * @arg OB_IWDG_HW: Hardware IWDG selected + * @param Stop Reset event when entering STOP mode. + * This parameter can be one of the following values: + * @arg OB_STOP_NO_RST: No reset generated when entering in STOP + * @arg OB_STOP_RST: Reset generated when entering in STOP + * @param Stdby Reset event when entering Standby mode. + * This parameter can be one of the following values: + * @arg OB_STDBY_NO_RST: No reset generated when entering in STANDBY + * @arg OB_STDBY_RST: Reset generated when entering in STANDBY + * @retval HAL Status + */ +static HAL_StatusTypeDef FLASH_OB_UserConfig(uint8_t Iwdg, uint8_t Stop, uint8_t Stdby) +{ + uint8_t optiontmp = 0xFF; + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_OB_IWDG_SOURCE(Iwdg)); + assert_param(IS_OB_STOP_SOURCE(Stop)); + assert_param(IS_OB_STDBY_SOURCE(Stdby)); + + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + if (status == HAL_OK) + { + /* Mask OPTLOCK, OPTSTRT, BOR_LEV and BFB2 bits */ + optiontmp = (uint8_t)((*(__IO uint8_t *)OPTCR_BYTE0_ADDRESS) & (uint8_t)0x1F); + + /* Update User Option Byte */ + *(__IO uint8_t *)OPTCR_BYTE0_ADDRESS = Iwdg | (uint8_t)(Stdby | (uint8_t)(Stop | ((uint8_t)optiontmp))); + } + + return status; +} + +/** + * @brief Set the BOR Level. + * @param Level specifies the Option Bytes BOR Reset Level. + * This parameter can be one of the following values: + * @arg OB_BOR_LEVEL3: Supply voltage ranges from 2.7 to 3.6 V + * @arg OB_BOR_LEVEL2: Supply voltage ranges from 2.4 to 2.7 V + * @arg OB_BOR_LEVEL1: Supply voltage ranges from 2.1 to 2.4 V + * @arg OB_BOR_OFF: Supply voltage ranges from 1.62 to 2.1 V + * @retval HAL Status + */ +static HAL_StatusTypeDef FLASH_OB_BOR_LevelConfig(uint8_t Level) +{ + /* Check the parameters */ + assert_param(IS_OB_BOR_LEVEL(Level)); + + /* Set the BOR Level */ + *(__IO uint8_t *)OPTCR_BYTE0_ADDRESS &= (~FLASH_OPTCR_BOR_LEV); + *(__IO uint8_t *)OPTCR_BYTE0_ADDRESS |= Level; + + return HAL_OK; + +} + +/** + * @brief Return the FLASH User Option Byte value. + * @retval uint8_t FLASH User Option Bytes values: IWDG_SW(Bit0), RST_STOP(Bit1) + * and RST_STDBY(Bit2). + */ +static uint8_t FLASH_OB_GetUser(void) +{ + /* Return the User Option Byte */ + return ((uint8_t)(FLASH->OPTCR & 0xE0)); +} + +/** + * @brief Return the FLASH Write Protection Option Bytes value. + * @retval uint16_t FLASH Write Protection Option Bytes value + */ +static uint16_t FLASH_OB_GetWRP(void) +{ + /* Return the FLASH write protection Register value */ + return (*(__IO uint16_t *)(OPTCR_BYTE2_ADDRESS)); +} + +/** + * @brief Returns the FLASH Read Protection level. + * @retval FLASH ReadOut Protection Status: + * This parameter can be one of the following values: + * @arg OB_RDP_LEVEL_0: No protection + * @arg OB_RDP_LEVEL_1: Read protection of the memory + * @arg OB_RDP_LEVEL_2: Full chip protection + */ +static uint8_t FLASH_OB_GetRDP(void) +{ + uint8_t readstatus = OB_RDP_LEVEL_0; + + if (*(__IO uint8_t *)(OPTCR_BYTE1_ADDRESS) == (uint8_t)OB_RDP_LEVEL_2) + { + readstatus = OB_RDP_LEVEL_2; + } + else if (*(__IO uint8_t *)(OPTCR_BYTE1_ADDRESS) == (uint8_t)OB_RDP_LEVEL_0) + { + readstatus = OB_RDP_LEVEL_0; + } + else + { + readstatus = OB_RDP_LEVEL_1; + } + + return readstatus; +} + +/** + * @brief Returns the FLASH BOR level. + * @retval uint8_t The FLASH BOR level: + * - OB_BOR_LEVEL3: Supply voltage ranges from 2.7 to 3.6 V + * - OB_BOR_LEVEL2: Supply voltage ranges from 2.4 to 2.7 V + * - OB_BOR_LEVEL1: Supply voltage ranges from 2.1 to 2.4 V + * - OB_BOR_OFF : Supply voltage ranges from 1.62 to 2.1 V + */ +static uint8_t FLASH_OB_GetBOR(void) +{ + /* Return the FLASH BOR level */ + return (uint8_t)(*(__IO uint8_t *)(OPTCR_BYTE0_ADDRESS) & (uint8_t)0x0C); +} + +/** + * @brief Flush the instruction and data caches + * @retval None + */ +void FLASH_FlushCaches(void) +{ + /* Flush instruction cache */ + if (READ_BIT(FLASH->ACR, FLASH_ACR_ICEN) != RESET) + { + /* Disable instruction cache */ + __HAL_FLASH_INSTRUCTION_CACHE_DISABLE(); + /* Reset instruction cache */ + __HAL_FLASH_INSTRUCTION_CACHE_RESET(); + /* Enable instruction cache */ + __HAL_FLASH_INSTRUCTION_CACHE_ENABLE(); + } + + /* Flush data cache */ + if (READ_BIT(FLASH->ACR, FLASH_ACR_DCEN) != RESET) + { + /* Disable data cache */ + __HAL_FLASH_DATA_CACHE_DISABLE(); + /* Reset data cache */ + __HAL_FLASH_DATA_CACHE_RESET(); + /* Enable data cache */ + __HAL_FLASH_DATA_CACHE_ENABLE(); + } +} + +/** + * @} + */ + +#endif /* HAL_FLASH_MODULE_ENABLED */ + +/** + * @} + */ + +/** + * @} + */ + diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_flash_ramfunc.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_flash_ramfunc.c new file mode 100644 index 0000000..e6ab3ac --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_flash_ramfunc.c @@ -0,0 +1,172 @@ +/** + ****************************************************************************** + * @file stm32f4xx_hal_flash_ramfunc.c + * @author MCD Application Team + * @brief FLASH RAMFUNC module driver. + * This file provides a FLASH firmware functions which should be + * executed from internal SRAM + * + Stop/Start the flash interface while System Run + * + Enable/Disable the flash sleep while System Run + @verbatim + ============================================================================== + ##### APIs executed from Internal RAM ##### + ============================================================================== + [..] + *** ARM Compiler *** + -------------------- + [..] RAM functions are defined using the toolchain options. + Functions that are be executed in RAM should reside in a separate + source module. Using the 'Options for File' dialog you can simply change + the 'Code / Const' area of a module to a memory space in physical RAM. + Available memory areas are declared in the 'Target' tab of the + Options for Target' dialog. + + *** ICCARM Compiler *** + ----------------------- + [..] RAM functions are defined using a specific toolchain keyword "__ramfunc". + + *** GNU Compiler *** + -------------------- + [..] RAM functions are defined using a specific toolchain attribute + "__attribute__((section(".RamFunc")))". + + @endverbatim + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx_hal.h" + +/** @addtogroup STM32F4xx_HAL_Driver + * @{ + */ + +/** @defgroup FLASH_RAMFUNC FLASH RAMFUNC + * @brief FLASH functions executed from RAM + * @{ + */ +#ifdef HAL_FLASH_MODULE_ENABLED +#if defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) || defined(STM32F411xE) || defined(STM32F446xx) || defined(STM32F412Zx) || defined(STM32F412Vx) || \ + defined(STM32F412Rx) || defined(STM32F412Cx) + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/* Exported functions --------------------------------------------------------*/ +/** @defgroup FLASH_RAMFUNC_Exported_Functions FLASH RAMFUNC Exported Functions + * @{ + */ + +/** @defgroup FLASH_RAMFUNC_Exported_Functions_Group1 Peripheral features functions executed from internal RAM + * @brief Peripheral Extended features functions + * +@verbatim + + =============================================================================== + ##### ramfunc functions ##### + =============================================================================== + [..] + This subsection provides a set of functions that should be executed from RAM + transfers. + +@endverbatim + * @{ + */ + +/** + * @brief Stop the flash interface while System Run + * @note This mode is only available for STM32F41xxx/STM32F446xx devices. + * @note This mode couldn't be set while executing with the flash itself. + * It should be done with specific routine executed from RAM. + * @retval HAL status + */ +__RAM_FUNC HAL_StatusTypeDef HAL_FLASHEx_StopFlashInterfaceClk(void) +{ + /* Enable Power ctrl clock */ + __HAL_RCC_PWR_CLK_ENABLE(); + /* Stop the flash interface while System Run */ + SET_BIT(PWR->CR, PWR_CR_FISSR); + + return HAL_OK; +} + +/** + * @brief Start the flash interface while System Run + * @note This mode is only available for STM32F411xx/STM32F446xx devices. + * @note This mode couldn't be set while executing with the flash itself. + * It should be done with specific routine executed from RAM. + * @retval HAL status + */ +__RAM_FUNC HAL_StatusTypeDef HAL_FLASHEx_StartFlashInterfaceClk(void) +{ + /* Enable Power ctrl clock */ + __HAL_RCC_PWR_CLK_ENABLE(); + /* Start the flash interface while System Run */ + CLEAR_BIT(PWR->CR, PWR_CR_FISSR); + + return HAL_OK; +} + +/** + * @brief Enable the flash sleep while System Run + * @note This mode is only available for STM32F41xxx/STM32F446xx devices. + * @note This mode could n't be set while executing with the flash itself. + * It should be done with specific routine executed from RAM. + * @retval HAL status + */ +__RAM_FUNC HAL_StatusTypeDef HAL_FLASHEx_EnableFlashSleepMode(void) +{ + /* Enable Power ctrl clock */ + __HAL_RCC_PWR_CLK_ENABLE(); + /* Enable the flash sleep while System Run */ + SET_BIT(PWR->CR, PWR_CR_FMSSR); + + return HAL_OK; +} + +/** + * @brief Disable the flash sleep while System Run + * @note This mode is only available for STM32F41xxx/STM32F446xx devices. + * @note This mode couldn't be set while executing with the flash itself. + * It should be done with specific routine executed from RAM. + * @retval HAL status + */ +__RAM_FUNC HAL_StatusTypeDef HAL_FLASHEx_DisableFlashSleepMode(void) +{ + /* Enable Power ctrl clock */ + __HAL_RCC_PWR_CLK_ENABLE(); + /* Disable the flash sleep while System Run */ + CLEAR_BIT(PWR->CR, PWR_CR_FMSSR); + + return HAL_OK; +} + +/** + * @} + */ + +/** + * @} + */ + +#endif /* STM32F410xx || STM32F411xE || STM32F446xx || STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx */ +#endif /* HAL_FLASH_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ + diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_gpio.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_gpio.c new file mode 100644 index 0000000..b3ce9bb --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_gpio.c @@ -0,0 +1,533 @@ +/** + ****************************************************************************** + * @file stm32f4xx_hal_gpio.c + * @author MCD Application Team + * @brief GPIO HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the General Purpose Input/Output (GPIO) peripheral: + * + Initialization and de-initialization functions + * + IO operation functions + * + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + @verbatim + ============================================================================== + ##### GPIO Peripheral features ##### + ============================================================================== + [..] + Subject to the specific hardware characteristics of each I/O port listed in the datasheet, each + port bit of the General Purpose IO (GPIO) Ports, can be individually configured by software + in several modes: + (+) Input mode + (+) Analog mode + (+) Output mode + (+) Alternate function mode + (+) External interrupt/event lines + + [..] + During and just after reset, the alternate functions and external interrupt + lines are not active and the I/O ports are configured in input floating mode. + + [..] + All GPIO pins have weak internal pull-up and pull-down resistors, which can be + activated or not. + + [..] + In Output or Alternate mode, each IO can be configured on open-drain or push-pull + type and the IO speed can be selected depending on the VDD value. + + [..] + All ports have external interrupt/event capability. To use external interrupt + lines, the port must be configured in input mode. All available GPIO pins are + connected to the 16 external interrupt/event lines from EXTI0 to EXTI15. + + [..] + The external interrupt/event controller consists of up to 23 edge detectors + (16 lines are connected to GPIO) for generating event/interrupt requests (each + input line can be independently configured to select the type (interrupt or event) + and the corresponding trigger event (rising or falling or both). Each line can + also be masked independently. + + ##### How to use this driver ##### + ============================================================================== + [..] + (#) Enable the GPIO AHB clock using the following function: __HAL_RCC_GPIOx_CLK_ENABLE(). + + (#) Configure the GPIO pin(s) using HAL_GPIO_Init(). + (++) Configure the IO mode using "Mode" member from GPIO_InitTypeDef structure + (++) Activate Pull-up, Pull-down resistor using "Pull" member from GPIO_InitTypeDef + structure. + (++) In case of Output or alternate function mode selection: the speed is + configured through "Speed" member from GPIO_InitTypeDef structure. + (++) In alternate mode is selection, the alternate function connected to the IO + is configured through "Alternate" member from GPIO_InitTypeDef structure. + (++) Analog mode is required when a pin is to be used as ADC channel + or DAC output. + (++) In case of external interrupt/event selection the "Mode" member from + GPIO_InitTypeDef structure select the type (interrupt or event) and + the corresponding trigger event (rising or falling or both). + + (#) In case of external interrupt/event mode selection, configure NVIC IRQ priority + mapped to the EXTI line using HAL_NVIC_SetPriority() and enable it using + HAL_NVIC_EnableIRQ(). + + (#) To get the level of a pin configured in input mode use HAL_GPIO_ReadPin(). + + (#) To set/reset the level of a pin configured in output mode use + HAL_GPIO_WritePin()/HAL_GPIO_TogglePin(). + + (#) To lock pin configuration until next reset use HAL_GPIO_LockPin(). + + + (#) During and just after reset, the alternate functions are not + active and the GPIO pins are configured in input floating mode (except JTAG + pins). + + (#) The LSE oscillator pins OSC32_IN and OSC32_OUT can be used as general purpose + (PC14 and PC15, respectively) when the LSE oscillator is off. The LSE has + priority over the GPIO function. + + (#) The HSE oscillator pins OSC_IN/OSC_OUT can be used as + general purpose PH0 and PH1, respectively, when the HSE oscillator is off. + The HSE has priority over the GPIO function. + + @endverbatim + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx_hal.h" + +/** @addtogroup STM32F4xx_HAL_Driver + * @{ + */ + +/** @defgroup GPIO GPIO + * @brief GPIO HAL module driver + * @{ + */ + +#ifdef HAL_GPIO_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/** @addtogroup GPIO_Private_Constants GPIO Private Constants + * @{ + */ + +#define GPIO_NUMBER 16U +/** + * @} + */ +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ +/* Exported functions --------------------------------------------------------*/ +/** @defgroup GPIO_Exported_Functions GPIO Exported Functions + * @{ + */ + +/** @defgroup GPIO_Exported_Functions_Group1 Initialization and de-initialization functions + * @brief Initialization and Configuration functions + * +@verbatim + =============================================================================== + ##### Initialization and de-initialization functions ##### + =============================================================================== + [..] + This section provides functions allowing to initialize and de-initialize the GPIOs + to be ready for use. + +@endverbatim + * @{ + */ + + +/** + * @brief Initializes the GPIOx peripheral according to the specified parameters in the GPIO_Init. + * @param GPIOx where x can be (A..K) to select the GPIO peripheral for STM32F429X device or + * x can be (A..I) to select the GPIO peripheral for STM32F40XX and STM32F427X devices. + * @param GPIO_Init pointer to a GPIO_InitTypeDef structure that contains + * the configuration information for the specified GPIO peripheral. + * @retval None + */ +void HAL_GPIO_Init(GPIO_TypeDef *GPIOx, GPIO_InitTypeDef *GPIO_Init) +{ + uint32_t position; + uint32_t ioposition = 0x00U; + uint32_t iocurrent = 0x00U; + uint32_t temp = 0x00U; + + /* Check the parameters */ + assert_param(IS_GPIO_ALL_INSTANCE(GPIOx)); + assert_param(IS_GPIO_PIN(GPIO_Init->Pin)); + assert_param(IS_GPIO_MODE(GPIO_Init->Mode)); + + /* Configure the port pins */ + for(position = 0U; position < GPIO_NUMBER; position++) + { + /* Get the IO position */ + ioposition = 0x01U << position; + /* Get the current IO position */ + iocurrent = (uint32_t)(GPIO_Init->Pin) & ioposition; + + if(iocurrent == ioposition) + { + /*--------------------- GPIO Mode Configuration ------------------------*/ + /* In case of Output or Alternate function mode selection */ + if(((GPIO_Init->Mode & GPIO_MODE) == MODE_OUTPUT) || \ + (GPIO_Init->Mode & GPIO_MODE) == MODE_AF) + { + /* Check the Speed parameter */ + assert_param(IS_GPIO_SPEED(GPIO_Init->Speed)); + /* Configure the IO Speed */ + temp = GPIOx->OSPEEDR; + temp &= ~(GPIO_OSPEEDER_OSPEEDR0 << (position * 2U)); + temp |= (GPIO_Init->Speed << (position * 2U)); + GPIOx->OSPEEDR = temp; + + /* Configure the IO Output Type */ + temp = GPIOx->OTYPER; + temp &= ~(GPIO_OTYPER_OT_0 << position) ; + temp |= (((GPIO_Init->Mode & OUTPUT_TYPE) >> OUTPUT_TYPE_Pos) << position); + GPIOx->OTYPER = temp; + } + + if((GPIO_Init->Mode & GPIO_MODE) != MODE_ANALOG) + { + /* Check the parameters */ + assert_param(IS_GPIO_PULL(GPIO_Init->Pull)); + + /* Activate the Pull-up or Pull down resistor for the current IO */ + temp = GPIOx->PUPDR; + temp &= ~(GPIO_PUPDR_PUPDR0 << (position * 2U)); + temp |= ((GPIO_Init->Pull) << (position * 2U)); + GPIOx->PUPDR = temp; + } + + /* In case of Alternate function mode selection */ + if((GPIO_Init->Mode & GPIO_MODE) == MODE_AF) + { + /* Check the Alternate function parameter */ + assert_param(IS_GPIO_AF(GPIO_Init->Alternate)); + /* Configure Alternate function mapped with the current IO */ + temp = GPIOx->AFR[position >> 3U]; + temp &= ~(0xFU << ((uint32_t)(position & 0x07U) * 4U)) ; + temp |= ((uint32_t)(GPIO_Init->Alternate) << (((uint32_t)position & 0x07U) * 4U)); + GPIOx->AFR[position >> 3U] = temp; + } + + /* Configure IO Direction mode (Input, Output, Alternate or Analog) */ + temp = GPIOx->MODER; + temp &= ~(GPIO_MODER_MODER0 << (position * 2U)); + temp |= ((GPIO_Init->Mode & GPIO_MODE) << (position * 2U)); + GPIOx->MODER = temp; + + /*--------------------- EXTI Mode Configuration ------------------------*/ + /* Configure the External Interrupt or event for the current IO */ + if((GPIO_Init->Mode & EXTI_MODE) != 0x00U) + { + /* Enable SYSCFG Clock */ + __HAL_RCC_SYSCFG_CLK_ENABLE(); + + temp = SYSCFG->EXTICR[position >> 2U]; + temp &= ~(0x0FU << (4U * (position & 0x03U))); + temp |= ((uint32_t)(GPIO_GET_INDEX(GPIOx)) << (4U * (position & 0x03U))); + SYSCFG->EXTICR[position >> 2U] = temp; + + /* Clear Rising Falling edge configuration */ + temp = EXTI->RTSR; + temp &= ~((uint32_t)iocurrent); + if((GPIO_Init->Mode & TRIGGER_RISING) != 0x00U) + { + temp |= iocurrent; + } + EXTI->RTSR = temp; + + temp = EXTI->FTSR; + temp &= ~((uint32_t)iocurrent); + if((GPIO_Init->Mode & TRIGGER_FALLING) != 0x00U) + { + temp |= iocurrent; + } + EXTI->FTSR = temp; + + temp = EXTI->EMR; + temp &= ~((uint32_t)iocurrent); + if((GPIO_Init->Mode & EXTI_EVT) != 0x00U) + { + temp |= iocurrent; + } + EXTI->EMR = temp; + + /* Clear EXTI line configuration */ + temp = EXTI->IMR; + temp &= ~((uint32_t)iocurrent); + if((GPIO_Init->Mode & EXTI_IT) != 0x00U) + { + temp |= iocurrent; + } + EXTI->IMR = temp; + } + } + } +} + +/** + * @brief De-initializes the GPIOx peripheral registers to their default reset values. + * @param GPIOx where x can be (A..K) to select the GPIO peripheral for STM32F429X device or + * x can be (A..I) to select the GPIO peripheral for STM32F40XX and STM32F427X devices. + * @param GPIO_Pin specifies the port bit to be written. + * This parameter can be one of GPIO_PIN_x where x can be (0..15). + * @retval None + */ +void HAL_GPIO_DeInit(GPIO_TypeDef *GPIOx, uint32_t GPIO_Pin) +{ + uint32_t position; + uint32_t ioposition = 0x00U; + uint32_t iocurrent = 0x00U; + uint32_t tmp = 0x00U; + + /* Check the parameters */ + assert_param(IS_GPIO_ALL_INSTANCE(GPIOx)); + + /* Configure the port pins */ + for(position = 0U; position < GPIO_NUMBER; position++) + { + /* Get the IO position */ + ioposition = 0x01U << position; + /* Get the current IO position */ + iocurrent = (GPIO_Pin) & ioposition; + + if(iocurrent == ioposition) + { + /*------------------------- EXTI Mode Configuration --------------------*/ + tmp = SYSCFG->EXTICR[position >> 2U]; + tmp &= (0x0FU << (4U * (position & 0x03U))); + if(tmp == ((uint32_t)(GPIO_GET_INDEX(GPIOx)) << (4U * (position & 0x03U)))) + { + /* Clear EXTI line configuration */ + EXTI->IMR &= ~((uint32_t)iocurrent); + EXTI->EMR &= ~((uint32_t)iocurrent); + + /* Clear Rising Falling edge configuration */ + EXTI->FTSR &= ~((uint32_t)iocurrent); + EXTI->RTSR &= ~((uint32_t)iocurrent); + + /* Configure the External Interrupt or event for the current IO */ + tmp = 0x0FU << (4U * (position & 0x03U)); + SYSCFG->EXTICR[position >> 2U] &= ~tmp; + } + + /*------------------------- GPIO Mode Configuration --------------------*/ + /* Configure IO Direction in Input Floating Mode */ + GPIOx->MODER &= ~(GPIO_MODER_MODER0 << (position * 2U)); + + /* Configure the default Alternate Function in current IO */ + GPIOx->AFR[position >> 3U] &= ~(0xFU << ((uint32_t)(position & 0x07U) * 4U)) ; + + /* Deactivate the Pull-up and Pull-down resistor for the current IO */ + GPIOx->PUPDR &= ~(GPIO_PUPDR_PUPDR0 << (position * 2U)); + + /* Configure the default value IO Output Type */ + GPIOx->OTYPER &= ~(GPIO_OTYPER_OT_0 << position) ; + + /* Configure the default value for IO Speed */ + GPIOx->OSPEEDR &= ~(GPIO_OSPEEDER_OSPEEDR0 << (position * 2U)); + } + } +} + +/** + * @} + */ + +/** @defgroup GPIO_Exported_Functions_Group2 IO operation functions + * @brief GPIO Read and Write + * +@verbatim + =============================================================================== + ##### IO operation functions ##### + =============================================================================== + +@endverbatim + * @{ + */ + +/** + * @brief Reads the specified input port pin. + * @param GPIOx where x can be (A..K) to select the GPIO peripheral for STM32F429X device or + * x can be (A..I) to select the GPIO peripheral for STM32F40XX and STM32F427X devices. + * @param GPIO_Pin specifies the port bit to read. + * This parameter can be GPIO_PIN_x where x can be (0..15). + * @retval The input port pin value. + */ +GPIO_PinState HAL_GPIO_ReadPin(GPIO_TypeDef* GPIOx, uint16_t GPIO_Pin) +{ + GPIO_PinState bitstatus; + + /* Check the parameters */ + assert_param(IS_GPIO_PIN(GPIO_Pin)); + + if((GPIOx->IDR & GPIO_Pin) != (uint32_t)GPIO_PIN_RESET) + { + bitstatus = GPIO_PIN_SET; + } + else + { + bitstatus = GPIO_PIN_RESET; + } + return bitstatus; +} + +/** + * @brief Sets or clears the selected data port bit. + * + * @note This function uses GPIOx_BSRR register to allow atomic read/modify + * accesses. In this way, there is no risk of an IRQ occurring between + * the read and the modify access. + * + * @param GPIOx where x can be (A..K) to select the GPIO peripheral for STM32F429X device or + * x can be (A..I) to select the GPIO peripheral for STM32F40XX and STM32F427X devices. + * @param GPIO_Pin specifies the port bit to be written. + * This parameter can be one of GPIO_PIN_x where x can be (0..15). + * @param PinState specifies the value to be written to the selected bit. + * This parameter can be one of the GPIO_PinState enum values: + * @arg GPIO_PIN_RESET: to clear the port pin + * @arg GPIO_PIN_SET: to set the port pin + * @retval None + */ +void HAL_GPIO_WritePin(GPIO_TypeDef* GPIOx, uint16_t GPIO_Pin, GPIO_PinState PinState) +{ + /* Check the parameters */ + assert_param(IS_GPIO_PIN(GPIO_Pin)); + assert_param(IS_GPIO_PIN_ACTION(PinState)); + + if(PinState != GPIO_PIN_RESET) + { + GPIOx->BSRR = GPIO_Pin; + } + else + { + GPIOx->BSRR = (uint32_t)GPIO_Pin << 16U; + } +} + +/** + * @brief Toggles the specified GPIO pins. + * @param GPIOx Where x can be (A..K) to select the GPIO peripheral for STM32F429X device or + * x can be (A..I) to select the GPIO peripheral for STM32F40XX and STM32F427X devices. + * @param GPIO_Pin Specifies the pins to be toggled. + * @retval None + */ +void HAL_GPIO_TogglePin(GPIO_TypeDef* GPIOx, uint16_t GPIO_Pin) +{ + uint32_t odr; + + /* Check the parameters */ + assert_param(IS_GPIO_PIN(GPIO_Pin)); + + /* get current Output Data Register value */ + odr = GPIOx->ODR; + + /* Set selected pins that were at low level, and reset ones that were high */ + GPIOx->BSRR = ((odr & GPIO_Pin) << GPIO_NUMBER) | (~odr & GPIO_Pin); +} + +/** + * @brief Locks GPIO Pins configuration registers. + * @note The locked registers are GPIOx_MODER, GPIOx_OTYPER, GPIOx_OSPEEDR, + * GPIOx_PUPDR, GPIOx_AFRL and GPIOx_AFRH. + * @note The configuration of the locked GPIO pins can no longer be modified + * until the next reset. + * @param GPIOx where x can be (A..F) to select the GPIO peripheral for STM32F4 family + * @param GPIO_Pin specifies the port bit to be locked. + * This parameter can be any combination of GPIO_PIN_x where x can be (0..15). + * @retval None + */ +HAL_StatusTypeDef HAL_GPIO_LockPin(GPIO_TypeDef* GPIOx, uint16_t GPIO_Pin) +{ + __IO uint32_t tmp = GPIO_LCKR_LCKK; + + /* Check the parameters */ + assert_param(IS_GPIO_PIN(GPIO_Pin)); + + /* Apply lock key write sequence */ + tmp |= GPIO_Pin; + /* Set LCKx bit(s): LCKK='1' + LCK[15-0] */ + GPIOx->LCKR = tmp; + /* Reset LCKx bit(s): LCKK='0' + LCK[15-0] */ + GPIOx->LCKR = GPIO_Pin; + /* Set LCKx bit(s): LCKK='1' + LCK[15-0] */ + GPIOx->LCKR = tmp; + /* Read LCKR register. This read is mandatory to complete key lock sequence */ + tmp = GPIOx->LCKR; + + /* Read again in order to confirm lock is active */ + if((GPIOx->LCKR & GPIO_LCKR_LCKK) != RESET) + { + return HAL_OK; + } + else + { + return HAL_ERROR; + } +} + +/** + * @brief This function handles EXTI interrupt request. + * @param GPIO_Pin Specifies the pins connected EXTI line + * @retval None + */ +void HAL_GPIO_EXTI_IRQHandler(uint16_t GPIO_Pin) +{ + /* EXTI line interrupt detected */ + if(__HAL_GPIO_EXTI_GET_IT(GPIO_Pin) != RESET) + { + __HAL_GPIO_EXTI_CLEAR_IT(GPIO_Pin); + HAL_GPIO_EXTI_Callback(GPIO_Pin); + } +} + +/** + * @brief EXTI line detection callbacks. + * @param GPIO_Pin Specifies the pins connected EXTI line + * @retval None + */ +__weak void HAL_GPIO_EXTI_Callback(uint16_t GPIO_Pin) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(GPIO_Pin); + /* NOTE: This function Should not be modified, when the callback is needed, + the HAL_GPIO_EXTI_Callback could be implemented in the user file + */ +} + +/** + * @} + */ + + +/** + * @} + */ + +#endif /* HAL_GPIO_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ + diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_pwr.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_pwr.c new file mode 100644 index 0000000..5ccde64 --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_pwr.c @@ -0,0 +1,598 @@ +/** + ****************************************************************************** + * @file stm32f4xx_hal_pwr.c + * @author MCD Application Team + * @brief PWR HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the Power Controller (PWR) peripheral: + * + Initialization and de-initialization functions + * + Peripheral Control functions + * + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx_hal.h" + +/** @addtogroup STM32F4xx_HAL_Driver + * @{ + */ + +/** @defgroup PWR PWR + * @brief PWR HAL module driver + * @{ + */ + +#ifdef HAL_PWR_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/** @addtogroup PWR_Private_Constants + * @{ + */ + +/** @defgroup PWR_PVD_Mode_Mask PWR PVD Mode Mask + * @{ + */ +#define PVD_MODE_IT 0x00010000U +#define PVD_MODE_EVT 0x00020000U +#define PVD_RISING_EDGE 0x00000001U +#define PVD_FALLING_EDGE 0x00000002U +/** + * @} + */ + +/** + * @} + */ +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ + +/** @defgroup PWR_Exported_Functions PWR Exported Functions + * @{ + */ + +/** @defgroup PWR_Exported_Functions_Group1 Initialization and de-initialization functions + * @brief Initialization and de-initialization functions + * +@verbatim + =============================================================================== + ##### Initialization and de-initialization functions ##### + =============================================================================== + [..] + After reset, the backup domain (RTC registers, RTC backup data + registers and backup SRAM) is protected against possible unwanted + write accesses. + To enable access to the RTC Domain and RTC registers, proceed as follows: + (+) Enable the Power Controller (PWR) APB1 interface clock using the + __HAL_RCC_PWR_CLK_ENABLE() macro. + (+) Enable access to RTC domain using the HAL_PWR_EnableBkUpAccess() function. + +@endverbatim + * @{ + */ + +/** + * @brief Deinitializes the HAL PWR peripheral registers to their default reset values. + * @retval None + */ +void HAL_PWR_DeInit(void) +{ + __HAL_RCC_PWR_FORCE_RESET(); + __HAL_RCC_PWR_RELEASE_RESET(); +} + +/** + * @brief Enables access to the backup domain (RTC registers, RTC + * backup data registers and backup SRAM). + * @note If the HSE divided by 2, 3, ..31 is used as the RTC clock, the + * Backup Domain Access should be kept enabled. + * @note The following sequence is required to bypass the delay between + * DBP bit programming and the effective enabling of the backup domain. + * Please check the Errata Sheet for more details under "Possible delay + * in backup domain protection disabling/enabling after programming the + * DBP bit" section. + * @retval None + */ +void HAL_PWR_EnableBkUpAccess(void) +{ + __IO uint32_t dummyread; + *(__IO uint32_t *) CR_DBP_BB = (uint32_t)ENABLE; + dummyread = PWR->CR; + UNUSED(dummyread); +} + +/** + * @brief Disables access to the backup domain (RTC registers, RTC + * backup data registers and backup SRAM). + * @note If the HSE divided by 2, 3, ..31 is used as the RTC clock, the + * Backup Domain Access should be kept enabled. + * @note The following sequence is required to bypass the delay between + * DBP bit programming and the effective disabling of the backup domain. + * Please check the Errata Sheet for more details under "Possible delay + * in backup domain protection disabling/enabling after programming the + * DBP bit" section. + * @retval None + */ +void HAL_PWR_DisableBkUpAccess(void) +{ + __IO uint32_t dummyread; + *(__IO uint32_t *) CR_DBP_BB = (uint32_t)DISABLE; + dummyread = PWR->CR; + UNUSED(dummyread); +} + +/** + * @} + */ + +/** @defgroup PWR_Exported_Functions_Group2 Peripheral Control functions + * @brief Low Power modes configuration functions + * +@verbatim + + =============================================================================== + ##### Peripheral Control functions ##### + =============================================================================== + + *** PVD configuration *** + ========================= + [..] + (+) The PVD is used to monitor the VDD power supply by comparing it to a + threshold selected by the PVD Level (PLS[2:0] bits in the PWR_CR). + (+) A PVDO flag is available to indicate if VDD/VDDA is higher or lower + than the PVD threshold. This event is internally connected to the EXTI + line16 and can generate an interrupt if enabled. This is done through + __HAL_PWR_PVD_EXTI_ENABLE_IT() macro. + (+) The PVD is stopped in Standby mode. + + *** Wake-up pin configuration *** + ================================ + [..] + (+) Wake-up pin is used to wake up the system from Standby mode. This pin is + forced in input pull-down configuration and is active on rising edges. + (+) There is one Wake-up pin: Wake-up Pin 1 on PA.00. + (++) For STM32F446xx there are two Wake-Up pins: Pin1 on PA.00 and Pin2 on PC.13 + (++) For STM32F410xx/STM32F412xx/STM32F413xx/STM32F423xx there are three Wake-Up pins: Pin1 on PA.00, Pin2 on PC.00 and Pin3 on PC.01 + + *** Low Power modes configuration *** + ===================================== + [..] + The devices feature 3 low-power modes: + (+) Sleep mode: Cortex-M4 core stopped, peripherals kept running. + (+) Stop mode: all clocks are stopped, regulator running, regulator + in low power mode + (+) Standby mode: 1.2V domain powered off. + + *** Sleep mode *** + ================== + [..] + (+) Entry: + The Sleep mode is entered by using the HAL_PWR_EnterSLEEPMode(Regulator, SLEEPEntry) + functions with + (++) PWR_SLEEPENTRY_WFI: enter SLEEP mode with WFI instruction + (++) PWR_SLEEPENTRY_WFE: enter SLEEP mode with WFE instruction + (++) PWR_SLEEPENTRY_WFE_NO_EVT_CLEAR: Enter SLEEP mode with WFE instruction and + no clear of pending event before. + + -@@- The Regulator parameter is not used for the STM32F4 family + and is kept as parameter just to maintain compatibility with the + lower power families (STM32L). + (+) Exit: + Any peripheral interrupt acknowledged by the nested vectored interrupt + controller (NVIC) can wake up the device from Sleep mode. + + *** Stop mode *** + ================= + [..] + In Stop mode, all clocks in the 1.2V domain are stopped, the PLL, the HSI, + and the HSE RC oscillators are disabled. Internal SRAM and register contents + are preserved. + The voltage regulator can be configured either in normal or low-power mode. + To minimize the consumption In Stop mode, FLASH can be powered off before + entering the Stop mode using the HAL_PWREx_EnableFlashPowerDown() function. + It can be switched on again by software after exiting the Stop mode using + the HAL_PWREx_DisableFlashPowerDown() function. + + (+) Entry: + The Stop mode is entered using the HAL_PWR_EnterSTOPMode(Regulator, STOPEntry) + function with: + (++) Regulator: + (+++) Main regulator ON. + (+++) Low Power regulator ON. + (++) STOPEntry: + (+++) PWR_STOPENTRY_WFI : Enter STOP mode with WFI instruction. + (+++) PWR_STOPENTRY_WFE : Enter STOP mode with WFE instruction and + clear of pending events before. + (+++) PWR_STOPENTRY_WFE_NO_EVT_CLEAR : Enter STOP mode with WFE instruction and + no clear of pending event before. + (+) Exit: + Any EXTI Line (Internal or External) configured in Interrupt/Event mode. + + *** Standby mode *** + ==================== + [..] + (+) + The Standby mode allows to achieve the lowest power consumption. It is based + on the Cortex-M4 deep sleep mode, with the voltage regulator disabled. + The 1.2V domain is consequently powered off. The PLL, the HSI oscillator and + the HSE oscillator are also switched off. SRAM and register contents are lost + except for the RTC registers, RTC backup registers, backup SRAM and Standby + circuitry. + + The voltage regulator is OFF. + + (++) Entry: + (+++) The Standby mode is entered using the HAL_PWR_EnterSTANDBYMode() function. + (++) Exit: + (+++) WKUP pin rising edge, RTC alarm (Alarm A and Alarm B), RTC wake-up, + tamper event, time-stamp event, external reset in NRST pin, IWDG reset. + + *** Auto-wake-up (AWU) from low-power mode *** + ============================================= + [..] + + (+) The MCU can be woken up from low-power mode by an RTC Alarm event, an RTC + Wake-up event, a tamper event or a time-stamp event, without depending on + an external interrupt (Auto-wake-up mode). + + (+) RTC auto-wake-up (AWU) from the Stop and Standby modes + + (++) To wake up from the Stop mode with an RTC alarm event, it is necessary to + configure the RTC to generate the RTC alarm using the HAL_RTC_SetAlarm_IT() function. + + (++) To wake up from the Stop mode with an RTC Tamper or time stamp event, it + is necessary to configure the RTC to detect the tamper or time stamp event using the + HAL_RTCEx_SetTimeStamp_IT() or HAL_RTCEx_SetTamper_IT() functions. + + (++) To wake up from the Stop mode with an RTC Wake-up event, it is necessary to + configure the RTC to generate the RTC Wake-up event using the HAL_RTCEx_SetWakeUpTimer_IT() function. + +@endverbatim + * @{ + */ + +/** + * @brief Configures the voltage threshold detected by the Power Voltage Detector(PVD). + * @param sConfigPVD pointer to an PWR_PVDTypeDef structure that contains the configuration + * information for the PVD. + * @note Refer to the electrical characteristics of your device datasheet for + * more details about the voltage threshold corresponding to each + * detection level. + * @retval None + */ +void HAL_PWR_ConfigPVD(PWR_PVDTypeDef *sConfigPVD) +{ + /* Check the parameters */ + assert_param(IS_PWR_PVD_LEVEL(sConfigPVD->PVDLevel)); + assert_param(IS_PWR_PVD_MODE(sConfigPVD->Mode)); + + /* Set PLS[7:5] bits according to PVDLevel value */ + MODIFY_REG(PWR->CR, PWR_CR_PLS, sConfigPVD->PVDLevel); + + /* Clear any previous config. Keep it clear if no event or IT mode is selected */ + __HAL_PWR_PVD_EXTI_DISABLE_EVENT(); + __HAL_PWR_PVD_EXTI_DISABLE_IT(); + __HAL_PWR_PVD_EXTI_DISABLE_RISING_EDGE(); + __HAL_PWR_PVD_EXTI_DISABLE_FALLING_EDGE(); + + /* Configure interrupt mode */ + if((sConfigPVD->Mode & PVD_MODE_IT) == PVD_MODE_IT) + { + __HAL_PWR_PVD_EXTI_ENABLE_IT(); + } + + /* Configure event mode */ + if((sConfigPVD->Mode & PVD_MODE_EVT) == PVD_MODE_EVT) + { + __HAL_PWR_PVD_EXTI_ENABLE_EVENT(); + } + + /* Configure the edge */ + if((sConfigPVD->Mode & PVD_RISING_EDGE) == PVD_RISING_EDGE) + { + __HAL_PWR_PVD_EXTI_ENABLE_RISING_EDGE(); + } + + if((sConfigPVD->Mode & PVD_FALLING_EDGE) == PVD_FALLING_EDGE) + { + __HAL_PWR_PVD_EXTI_ENABLE_FALLING_EDGE(); + } +} + +/** + * @brief Enables the Power Voltage Detector(PVD). + * @retval None + */ +void HAL_PWR_EnablePVD(void) +{ + *(__IO uint32_t *) CR_PVDE_BB = (uint32_t)ENABLE; +} + +/** + * @brief Disables the Power Voltage Detector(PVD). + * @retval None + */ +void HAL_PWR_DisablePVD(void) +{ + *(__IO uint32_t *) CR_PVDE_BB = (uint32_t)DISABLE; +} + +/** + * @brief Enables the Wake-up PINx functionality. + * @param WakeUpPinx Specifies the Power Wake-Up pin to enable. + * This parameter can be one of the following values: + * @arg PWR_WAKEUP_PIN1 + * @arg PWR_WAKEUP_PIN2 available only on STM32F410xx/STM32F446xx/STM32F412xx/STM32F413xx/STM32F423xx devices + * @arg PWR_WAKEUP_PIN3 available only on STM32F410xx/STM32F412xx/STM32F413xx/STM32F423xx devices + * @retval None + */ +void HAL_PWR_EnableWakeUpPin(uint32_t WakeUpPinx) +{ + /* Check the parameter */ + assert_param(IS_PWR_WAKEUP_PIN(WakeUpPinx)); + + /* Enable the wake up pin */ + SET_BIT(PWR->CSR, WakeUpPinx); +} + +/** + * @brief Disables the Wake-up PINx functionality. + * @param WakeUpPinx Specifies the Power Wake-Up pin to disable. + * This parameter can be one of the following values: + * @arg PWR_WAKEUP_PIN1 + * @arg PWR_WAKEUP_PIN2 available only on STM32F410xx/STM32F446xx/STM32F412xx/STM32F413xx/STM32F423xx devices + * @arg PWR_WAKEUP_PIN3 available only on STM32F410xx/STM32F412xx/STM32F413xx/STM32F423xx devices + * @retval None + */ +void HAL_PWR_DisableWakeUpPin(uint32_t WakeUpPinx) +{ + /* Check the parameter */ + assert_param(IS_PWR_WAKEUP_PIN(WakeUpPinx)); + + /* Disable the wake up pin */ + CLEAR_BIT(PWR->CSR, WakeUpPinx); +} + +/** + * @brief Enters Sleep mode. + * + * @note In Sleep mode, all I/O pins keep the same state as in Run mode. + * + * @note In Sleep mode, the systick is stopped to avoid exit from this mode with + * systick interrupt when used as time base for Timeout + * + * @param Regulator Specifies the regulator state in SLEEP mode. + * This parameter can be one of the following values: + * @arg PWR_MAINREGULATOR_ON: SLEEP mode with regulator ON + * @arg PWR_LOWPOWERREGULATOR_ON: SLEEP mode with low power regulator ON + * @note This parameter is not used for the STM32F4 family and is kept as parameter + * just to maintain compatibility with the lower power families. + * @param SLEEPEntry Specifies if SLEEP mode in entered with WFI or WFE instruction. + * This parameter can be one of the following values: + * @arg PWR_SLEEPENTRY_WFI : Enter SLEEP mode with WFI instruction + * @arg PWR_SLEEPENTRY_WFE : Enter SLEEP mode with WFE instruction and + * clear of pending events before. + * @arg PWR_SLEEPENTRY_WFE_NO_EVT_CLEAR : Enter SLEEP mode with WFE instruction and + * no clear of pending event before. + * @retval None + */ +void HAL_PWR_EnterSLEEPMode(uint32_t Regulator, uint8_t SLEEPEntry) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(Regulator); + + /* Check the parameters */ + assert_param(IS_PWR_REGULATOR(Regulator)); + assert_param(IS_PWR_SLEEP_ENTRY(SLEEPEntry)); + + /* Clear SLEEPDEEP bit of Cortex System Control Register */ + CLEAR_BIT(SCB->SCR, ((uint32_t)SCB_SCR_SLEEPDEEP_Msk)); + + /* Select SLEEP mode entry -------------------------------------------------*/ + if(SLEEPEntry == PWR_SLEEPENTRY_WFI) + { + /* Request Wait For Interrupt */ + __WFI(); + } + else + { + if(SLEEPEntry != PWR_SLEEPENTRY_WFE_NO_EVT_CLEAR) + { + /* Clear all pending event */ + __SEV(); + __WFE(); + } + + /* Request Wait For Event */ + __WFE(); + } +} + +/** + * @brief Enters Stop mode. + * @note In Stop mode, all I/O pins keep the same state as in Run mode. + * @note When exiting Stop mode by issuing an interrupt or a wake-up event, + * the HSI RC oscillator is selected as system clock. + * @note When the voltage regulator operates in low power mode, an additional + * startup delay is incurred when waking up from Stop mode. + * By keeping the internal regulator ON during Stop mode, the consumption + * is higher although the startup time is reduced. + * @param Regulator Specifies the regulator state in Stop mode. + * This parameter can be one of the following values: + * @arg PWR_MAINREGULATOR_ON: Stop mode with regulator ON + * @arg PWR_LOWPOWERREGULATOR_ON: Stop mode with low power regulator ON + * @param STOPEntry Specifies if Stop mode in entered with WFI or WFE instruction. + * This parameter can be one of the following values: + * @arg PWR_STOPENTRY_WFI : Enter Stop mode with WFI instruction + * @arg PWR_STOPENTRY_WFE : Enter Stop mode with WFE instruction and + * clear of pending events before. + * @arg PWR_STOPENTRY_WFE_NO_EVT_CLEAR : Enter STOP mode with WFE instruction and + * no clear of pending event before. + * @retval None + */ +void HAL_PWR_EnterSTOPMode(uint32_t Regulator, uint8_t STOPEntry) +{ + /* Check the parameters */ + assert_param(IS_PWR_REGULATOR(Regulator)); + assert_param(IS_PWR_STOP_ENTRY(STOPEntry)); + + /* Select the regulator state in Stop mode: Set PDDS and LPDS bits according to PWR_Regulator value */ + MODIFY_REG(PWR->CR, (PWR_CR_PDDS | PWR_CR_LPDS), Regulator); + + /* Set SLEEPDEEP bit of Cortex System Control Register */ + SET_BIT(SCB->SCR, ((uint32_t)SCB_SCR_SLEEPDEEP_Msk)); + + /* Select Stop mode entry --------------------------------------------------*/ + if(STOPEntry == PWR_STOPENTRY_WFI) + { + /* Request Wait For Interrupt */ + __WFI(); + } + else + { + if(STOPEntry != PWR_STOPENTRY_WFE_NO_EVT_CLEAR) + { + /* Clear all pending event */ + __SEV(); + __WFE(); + } + /* Request Wait For Event */ + __WFE(); + } + /* Reset SLEEPDEEP bit of Cortex System Control Register */ + CLEAR_BIT(SCB->SCR, ((uint32_t)SCB_SCR_SLEEPDEEP_Msk)); +} + +/** + * @brief Enters Standby mode. + * @note In Standby mode, all I/O pins are high impedance except for: + * - Reset pad (still available) + * - RTC_AF1 pin (PC13) if configured for tamper, time-stamp, RTC + * Alarm out, or RTC clock calibration out. + * - RTC_AF2 pin (PI8) if configured for tamper or time-stamp. + * - WKUP pin 1 (PA0) if enabled. + * @retval None + */ +void HAL_PWR_EnterSTANDBYMode(void) +{ + /* Select Standby mode */ + SET_BIT(PWR->CR, PWR_CR_PDDS); + + /* Set SLEEPDEEP bit of Cortex System Control Register */ + SET_BIT(SCB->SCR, ((uint32_t)SCB_SCR_SLEEPDEEP_Msk)); + + /* This option is used to ensure that store operations are completed */ +#if defined ( __CC_ARM) + __force_stores(); +#endif + /* Request Wait For Interrupt */ + __WFI(); +} + +/** + * @brief This function handles the PWR PVD interrupt request. + * @note This API should be called under the PVD_IRQHandler(). + * @retval None + */ +void HAL_PWR_PVD_IRQHandler(void) +{ + /* Check PWR Exti flag */ + if(__HAL_PWR_PVD_EXTI_GET_FLAG() != RESET) + { + /* PWR PVD interrupt user callback */ + HAL_PWR_PVDCallback(); + + /* Clear PWR Exti pending bit */ + __HAL_PWR_PVD_EXTI_CLEAR_FLAG(); + } +} + +/** + * @brief PWR PVD interrupt callback + * @retval None + */ +__weak void HAL_PWR_PVDCallback(void) +{ + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_PWR_PVDCallback could be implemented in the user file + */ +} + +/** + * @brief Indicates Sleep-On-Exit when returning from Handler mode to Thread mode. + * @note Set SLEEPONEXIT bit of SCR register. When this bit is set, the processor + * re-enters SLEEP mode when an interruption handling is over. + * Setting this bit is useful when the processor is expected to run only on + * interruptions handling. + * @retval None + */ +void HAL_PWR_EnableSleepOnExit(void) +{ + /* Set SLEEPONEXIT bit of Cortex System Control Register */ + SET_BIT(SCB->SCR, ((uint32_t)SCB_SCR_SLEEPONEXIT_Msk)); +} + +/** + * @brief Disables Sleep-On-Exit feature when returning from Handler mode to Thread mode. + * @note Clears SLEEPONEXIT bit of SCR register. When this bit is set, the processor + * re-enters SLEEP mode when an interruption handling is over. + * @retval None + */ +void HAL_PWR_DisableSleepOnExit(void) +{ + /* Clear SLEEPONEXIT bit of Cortex System Control Register */ + CLEAR_BIT(SCB->SCR, ((uint32_t)SCB_SCR_SLEEPONEXIT_Msk)); +} + +/** + * @brief Enables CORTEX M4 SEVONPEND bit. + * @note Sets SEVONPEND bit of SCR register. When this bit is set, this causes + * WFE to wake up when an interrupt moves from inactive to pended. + * @retval None + */ +void HAL_PWR_EnableSEVOnPend(void) +{ + /* Set SEVONPEND bit of Cortex System Control Register */ + SET_BIT(SCB->SCR, ((uint32_t)SCB_SCR_SEVONPEND_Msk)); +} + +/** + * @brief Disables CORTEX M4 SEVONPEND bit. + * @note Clears SEVONPEND bit of SCR register. When this bit is set, this causes + * WFE to wake up when an interrupt moves from inactive to pended. + * @retval None + */ +void HAL_PWR_DisableSEVOnPend(void) +{ + /* Clear SEVONPEND bit of Cortex System Control Register */ + CLEAR_BIT(SCB->SCR, ((uint32_t)SCB_SCR_SEVONPEND_Msk)); +} + +/** + * @} + */ + +/** + * @} + */ + +#endif /* HAL_PWR_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_pwr_ex.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_pwr_ex.c new file mode 100644 index 0000000..77f9c35 --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_pwr_ex.c @@ -0,0 +1,600 @@ +/** + ****************************************************************************** + * @file stm32f4xx_hal_pwr_ex.c + * @author MCD Application Team + * @brief Extended PWR HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of PWR extension peripheral: + * + Peripheral Extended features functions + * + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx_hal.h" + +/** @addtogroup STM32F4xx_HAL_Driver + * @{ + */ + +/** @defgroup PWREx PWREx + * @brief PWR HAL module driver + * @{ + */ + +#ifdef HAL_PWR_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/** @addtogroup PWREx_Private_Constants + * @{ + */ +#define PWR_OVERDRIVE_TIMEOUT_VALUE 1000U +#define PWR_UDERDRIVE_TIMEOUT_VALUE 1000U +#define PWR_BKPREG_TIMEOUT_VALUE 1000U +#define PWR_VOSRDY_TIMEOUT_VALUE 1000U +/** + * @} + */ + + +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ +/** @defgroup PWREx_Exported_Functions PWREx Exported Functions + * @{ + */ + +/** @defgroup PWREx_Exported_Functions_Group1 Peripheral Extended features functions + * @brief Peripheral Extended features functions + * +@verbatim + + =============================================================================== + ##### Peripheral extended features functions ##### + =============================================================================== + + *** Main and Backup Regulators configuration *** + ================================================ + [..] + (+) The backup domain includes 4 Kbytes of backup SRAM accessible only from + the CPU, and address in 32-bit, 16-bit or 8-bit mode. Its content is + retained even in Standby or VBAT mode when the low power backup regulator + is enabled. It can be considered as an internal EEPROM when VBAT is + always present. You can use the HAL_PWREx_EnableBkUpReg() function to + enable the low power backup regulator. + + (+) When the backup domain is supplied by VDD (analog switch connected to VDD) + the backup SRAM is powered from VDD which replaces the VBAT power supply to + save battery life. + + (+) The backup SRAM is not mass erased by a tamper event. It is read + protected to prevent confidential data, such as cryptographic private + key, from being accessed. The backup SRAM can be erased only through + the Flash interface when a protection level change from level 1 to + level 0 is requested. + -@- Refer to the description of Read protection (RDP) in the Flash + programming manual. + + (+) The main internal regulator can be configured to have a tradeoff between + performance and power consumption when the device does not operate at + the maximum frequency. This is done through __HAL_PWR_MAINREGULATORMODE_CONFIG() + macro which configure VOS bit in PWR_CR register + + Refer to the product datasheets for more details. + + *** FLASH Power Down configuration **** + ======================================= + [..] + (+) By setting the FPDS bit in the PWR_CR register by using the + HAL_PWREx_EnableFlashPowerDown() function, the Flash memory also enters power + down mode when the device enters Stop mode. When the Flash memory + is in power down mode, an additional startup delay is incurred when + waking up from Stop mode. + + (+) For STM32F42xxx/43xxx/446xx/469xx/479xx Devices, the scale can be modified only when the PLL + is OFF and the HSI or HSE clock source is selected as system clock. + The new value programmed is active only when the PLL is ON. + When the PLL is OFF, the voltage scale 3 is automatically selected. + Refer to the datasheets for more details. + + *** Over-Drive and Under-Drive configuration **** + ================================================= + [..] + (+) For STM32F42xxx/43xxx/446xx/469xx/479xx Devices, in Run mode: the main regulator has + 2 operating modes available: + (++) Normal mode: The CPU and core logic operate at maximum frequency at a given + voltage scaling (scale 1, scale 2 or scale 3) + (++) Over-drive mode: This mode allows the CPU and the core logic to operate at a + higher frequency than the normal mode for a given voltage scaling (scale 1, + scale 2 or scale 3). This mode is enabled through HAL_PWREx_EnableOverDrive() function and + disabled by HAL_PWREx_DisableOverDrive() function, to enter or exit from Over-drive mode please follow + the sequence described in Reference manual. + + (+) For STM32F42xxx/43xxx/446xx/469xx/479xx Devices, in Stop mode: the main regulator or low power regulator + supplies a low power voltage to the 1.2V domain, thus preserving the content of registers + and internal SRAM. 2 operating modes are available: + (++) Normal mode: the 1.2V domain is preserved in nominal leakage mode. This mode is only + available when the main regulator or the low power regulator is used in Scale 3 or + low voltage mode. + (++) Under-drive mode: the 1.2V domain is preserved in reduced leakage mode. This mode is only + available when the main regulator or the low power regulator is in low voltage mode. + +@endverbatim + * @{ + */ + +/** + * @brief Enables the Backup Regulator. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PWREx_EnableBkUpReg(void) +{ + uint32_t tickstart = 0U; + + *(__IO uint32_t *) CSR_BRE_BB = (uint32_t)ENABLE; + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Wait till Backup regulator ready flag is set */ + while(__HAL_PWR_GET_FLAG(PWR_FLAG_BRR) == RESET) + { + if((HAL_GetTick() - tickstart ) > PWR_BKPREG_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + return HAL_OK; +} + +/** + * @brief Disables the Backup Regulator. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PWREx_DisableBkUpReg(void) +{ + uint32_t tickstart = 0U; + + *(__IO uint32_t *) CSR_BRE_BB = (uint32_t)DISABLE; + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Wait till Backup regulator ready flag is set */ + while(__HAL_PWR_GET_FLAG(PWR_FLAG_BRR) != RESET) + { + if((HAL_GetTick() - tickstart ) > PWR_BKPREG_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + return HAL_OK; +} + +/** + * @brief Enables the Flash Power Down in Stop mode. + * @retval None + */ +void HAL_PWREx_EnableFlashPowerDown(void) +{ + *(__IO uint32_t *) CR_FPDS_BB = (uint32_t)ENABLE; +} + +/** + * @brief Disables the Flash Power Down in Stop mode. + * @retval None + */ +void HAL_PWREx_DisableFlashPowerDown(void) +{ + *(__IO uint32_t *) CR_FPDS_BB = (uint32_t)DISABLE; +} + +/** + * @brief Return Voltage Scaling Range. + * @retval The configured scale for the regulator voltage(VOS bit field). + * The returned value can be one of the following: + * - @arg PWR_REGULATOR_VOLTAGE_SCALE1: Regulator voltage output Scale 1 mode + * - @arg PWR_REGULATOR_VOLTAGE_SCALE2: Regulator voltage output Scale 2 mode + * - @arg PWR_REGULATOR_VOLTAGE_SCALE3: Regulator voltage output Scale 3 mode + */ +uint32_t HAL_PWREx_GetVoltageRange(void) +{ + return (PWR->CR & PWR_CR_VOS); +} + +#if defined(STM32F405xx) || defined(STM32F415xx) || defined(STM32F407xx) || defined(STM32F417xx) +/** + * @brief Configures the main internal regulator output voltage. + * @param VoltageScaling specifies the regulator output voltage to achieve + * a tradeoff between performance and power consumption. + * This parameter can be one of the following values: + * @arg PWR_REGULATOR_VOLTAGE_SCALE1: Regulator voltage output range 1 mode, + * the maximum value of fHCLK = 168 MHz. + * @arg PWR_REGULATOR_VOLTAGE_SCALE2: Regulator voltage output range 2 mode, + * the maximum value of fHCLK = 144 MHz. + * @note When moving from Range 1 to Range 2, the system frequency must be decreased to + * a value below 144 MHz before calling HAL_PWREx_ConfigVoltageScaling() API. + * When moving from Range 2 to Range 1, the system frequency can be increased to + * a value up to 168 MHz after calling HAL_PWREx_ConfigVoltageScaling() API. + * @retval HAL Status + */ +HAL_StatusTypeDef HAL_PWREx_ControlVoltageScaling(uint32_t VoltageScaling) +{ + uint32_t tickstart = 0U; + + assert_param(IS_PWR_VOLTAGE_SCALING_RANGE(VoltageScaling)); + + /* Enable PWR RCC Clock Peripheral */ + __HAL_RCC_PWR_CLK_ENABLE(); + + /* Set Range */ + __HAL_PWR_VOLTAGESCALING_CONFIG(VoltageScaling); + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + while((__HAL_PWR_GET_FLAG(PWR_FLAG_VOSRDY) == RESET)) + { + if((HAL_GetTick() - tickstart ) > PWR_VOSRDY_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + return HAL_OK; +} + +#elif defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) || \ + defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F410Tx) || defined(STM32F410Cx) || \ + defined(STM32F410Rx) || defined(STM32F411xE) || defined(STM32F446xx) || defined(STM32F469xx) || \ + defined(STM32F479xx) || defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || \ + defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) +/** + * @brief Configures the main internal regulator output voltage. + * @param VoltageScaling specifies the regulator output voltage to achieve + * a tradeoff between performance and power consumption. + * This parameter can be one of the following values: + * @arg PWR_REGULATOR_VOLTAGE_SCALE1: Regulator voltage output range 1 mode, + * the maximum value of fHCLK is 168 MHz. It can be extended to + * 180 MHz by activating the over-drive mode. + * @arg PWR_REGULATOR_VOLTAGE_SCALE2: Regulator voltage output range 2 mode, + * the maximum value of fHCLK is 144 MHz. It can be extended to, + * 168 MHz by activating the over-drive mode. + * @arg PWR_REGULATOR_VOLTAGE_SCALE3: Regulator voltage output range 3 mode, + * the maximum value of fHCLK is 120 MHz. + * @note To update the system clock frequency(SYSCLK): + * - Set the HSI or HSE as system clock frequency using the HAL_RCC_ClockConfig(). + * - Call the HAL_RCC_OscConfig() to configure the PLL. + * - Call HAL_PWREx_ConfigVoltageScaling() API to adjust the voltage scale. + * - Set the new system clock frequency using the HAL_RCC_ClockConfig(). + * @note The scale can be modified only when the HSI or HSE clock source is selected + * as system clock source, otherwise the API returns HAL_ERROR. + * @note When the PLL is OFF, the voltage scale 3 is automatically selected and the VOS bits + * value in the PWR_CR1 register are not taken in account. + * @note This API forces the PLL state ON to allow the possibility to configure the voltage scale 1 or 2. + * @note The new voltage scale is active only when the PLL is ON. + * @retval HAL Status + */ +HAL_StatusTypeDef HAL_PWREx_ControlVoltageScaling(uint32_t VoltageScaling) +{ + uint32_t tickstart = 0U; + + assert_param(IS_PWR_VOLTAGE_SCALING_RANGE(VoltageScaling)); + + /* Enable PWR RCC Clock Peripheral */ + __HAL_RCC_PWR_CLK_ENABLE(); + + /* Check if the PLL is used as system clock or not */ + if(__HAL_RCC_GET_SYSCLK_SOURCE() != RCC_CFGR_SWS_PLL) + { + /* Disable the main PLL */ + __HAL_RCC_PLL_DISABLE(); + + /* Get Start Tick */ + tickstart = HAL_GetTick(); + /* Wait till PLL is disabled */ + while(__HAL_RCC_GET_FLAG(RCC_FLAG_PLLRDY) != RESET) + { + if((HAL_GetTick() - tickstart ) > PLL_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + /* Set Range */ + __HAL_PWR_VOLTAGESCALING_CONFIG(VoltageScaling); + + /* Enable the main PLL */ + __HAL_RCC_PLL_ENABLE(); + + /* Get Start Tick */ + tickstart = HAL_GetTick(); + /* Wait till PLL is ready */ + while(__HAL_RCC_GET_FLAG(RCC_FLAG_PLLRDY) == RESET) + { + if((HAL_GetTick() - tickstart ) > PLL_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + /* Get Start Tick */ + tickstart = HAL_GetTick(); + while((__HAL_PWR_GET_FLAG(PWR_FLAG_VOSRDY) == RESET)) + { + if((HAL_GetTick() - tickstart ) > PWR_VOSRDY_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + else + { + return HAL_ERROR; + } + + return HAL_OK; +} +#endif /* STM32F405xx || STM32F415xx || STM32F407xx || STM32F417xx */ + +#if defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) ||\ + defined(STM32F411xE) || defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) ||\ + defined(STM32F413xx) || defined(STM32F423xx) +/** + * @brief Enables Main Regulator low voltage mode. + * @note This mode is only available for STM32F401xx/STM32F410xx/STM32F411xx/STM32F412Zx/STM32F412Rx/STM32F412Vx/STM32F412Cx/ + * STM32F413xx/STM32F423xx devices. + * @retval None + */ +void HAL_PWREx_EnableMainRegulatorLowVoltage(void) +{ + *(__IO uint32_t *) CR_MRLVDS_BB = (uint32_t)ENABLE; +} + +/** + * @brief Disables Main Regulator low voltage mode. + * @note This mode is only available for STM32F401xx/STM32F410xx/STM32F411xx/STM32F412Zx/STM32F412Rx/STM32F412Vx/STM32F412Cx/ + * STM32F413xx/STM32F423xxdevices. + * @retval None + */ +void HAL_PWREx_DisableMainRegulatorLowVoltage(void) +{ + *(__IO uint32_t *) CR_MRLVDS_BB = (uint32_t)DISABLE; +} + +/** + * @brief Enables Low Power Regulator low voltage mode. + * @note This mode is only available for STM32F401xx/STM32F410xx/STM32F411xx/STM32F412Zx/STM32F412Rx/STM32F412Vx/STM32F412Cx/ + * STM32F413xx/STM32F423xx devices. + * @retval None + */ +void HAL_PWREx_EnableLowRegulatorLowVoltage(void) +{ + *(__IO uint32_t *) CR_LPLVDS_BB = (uint32_t)ENABLE; +} + +/** + * @brief Disables Low Power Regulator low voltage mode. + * @note This mode is only available for STM32F401xx/STM32F410xx/STM32F411xx/STM32F412Zx/STM32F412Rx/STM32F412Vx/STM32F412Cx/ + * STM32F413xx/STM32F423xx devices. + * @retval None + */ +void HAL_PWREx_DisableLowRegulatorLowVoltage(void) +{ + *(__IO uint32_t *) CR_LPLVDS_BB = (uint32_t)DISABLE; +} + +#endif /* STM32F401xC || STM32F401xE || STM32F410xx || STM32F411xE || STM32F412Zx || STM32F412Rx || STM32F412Vx || STM32F412Cx || + STM32F413xx || STM32F423xx */ + +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) ||\ + defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) +/** + * @brief Activates the Over-Drive mode. + * @note This function can be used only for STM32F42xx/STM32F43xx/STM32F446xx/STM32F469xx/STM32F479xx devices. + * This mode allows the CPU and the core logic to operate at a higher frequency + * than the normal mode for a given voltage scaling (scale 1, scale 2 or scale 3). + * @note It is recommended to enter or exit Over-drive mode when the application is not running + * critical tasks and when the system clock source is either HSI or HSE. + * During the Over-drive switch activation, no peripheral clocks should be enabled. + * The peripheral clocks must be enabled once the Over-drive mode is activated. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PWREx_EnableOverDrive(void) +{ + uint32_t tickstart = 0U; + + __HAL_RCC_PWR_CLK_ENABLE(); + + /* Enable the Over-drive to extend the clock frequency to 180 Mhz */ + __HAL_PWR_OVERDRIVE_ENABLE(); + + /* Get tick */ + tickstart = HAL_GetTick(); + + while(!__HAL_PWR_GET_FLAG(PWR_FLAG_ODRDY)) + { + if((HAL_GetTick() - tickstart) > PWR_OVERDRIVE_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + /* Enable the Over-drive switch */ + __HAL_PWR_OVERDRIVESWITCHING_ENABLE(); + + /* Get tick */ + tickstart = HAL_GetTick(); + + while(!__HAL_PWR_GET_FLAG(PWR_FLAG_ODSWRDY)) + { + if((HAL_GetTick() - tickstart ) > PWR_OVERDRIVE_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + return HAL_OK; +} + +/** + * @brief Deactivates the Over-Drive mode. + * @note This function can be used only for STM32F42xx/STM32F43xx/STM32F446xx/STM32F469xx/STM32F479xx devices. + * This mode allows the CPU and the core logic to operate at a higher frequency + * than the normal mode for a given voltage scaling (scale 1, scale 2 or scale 3). + * @note It is recommended to enter or exit Over-drive mode when the application is not running + * critical tasks and when the system clock source is either HSI or HSE. + * During the Over-drive switch activation, no peripheral clocks should be enabled. + * The peripheral clocks must be enabled once the Over-drive mode is activated. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PWREx_DisableOverDrive(void) +{ + uint32_t tickstart = 0U; + + __HAL_RCC_PWR_CLK_ENABLE(); + + /* Disable the Over-drive switch */ + __HAL_PWR_OVERDRIVESWITCHING_DISABLE(); + + /* Get tick */ + tickstart = HAL_GetTick(); + + while(__HAL_PWR_GET_FLAG(PWR_FLAG_ODSWRDY)) + { + if((HAL_GetTick() - tickstart) > PWR_OVERDRIVE_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + /* Disable the Over-drive */ + __HAL_PWR_OVERDRIVE_DISABLE(); + + /* Get tick */ + tickstart = HAL_GetTick(); + + while(__HAL_PWR_GET_FLAG(PWR_FLAG_ODRDY)) + { + if((HAL_GetTick() - tickstart) > PWR_OVERDRIVE_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + return HAL_OK; +} + +/** + * @brief Enters in Under-Drive STOP mode. + * + * @note This mode is only available for STM32F42xxx/STM32F43xxx/STM32F446xx/STM32F469xx/STM32F479xx devices. + * + * @note This mode can be selected only when the Under-Drive is already active + * + * @note This mode is enabled only with STOP low power mode. + * In this mode, the 1.2V domain is preserved in reduced leakage mode. This + * mode is only available when the main regulator or the low power regulator + * is in low voltage mode + * + * @note If the Under-drive mode was enabled, it is automatically disabled after + * exiting Stop mode. + * When the voltage regulator operates in Under-drive mode, an additional + * startup delay is induced when waking up from Stop mode. + * + * @note In Stop mode, all I/O pins keep the same state as in Run mode. + * + * @note When exiting Stop mode by issuing an interrupt or a wake-up event, + * the HSI RC oscillator is selected as system clock. + * + * @note When the voltage regulator operates in low power mode, an additional + * startup delay is incurred when waking up from Stop mode. + * By keeping the internal regulator ON during Stop mode, the consumption + * is higher although the startup time is reduced. + * + * @param Regulator specifies the regulator state in STOP mode. + * This parameter can be one of the following values: + * @arg PWR_MAINREGULATOR_UNDERDRIVE_ON: Main Regulator in under-drive mode + * and Flash memory in power-down when the device is in Stop under-drive mode + * @arg PWR_LOWPOWERREGULATOR_UNDERDRIVE_ON: Low Power Regulator in under-drive mode + * and Flash memory in power-down when the device is in Stop under-drive mode + * @param STOPEntry specifies if STOP mode in entered with WFI or WFE instruction. + * This parameter can be one of the following values: + * @arg PWR_SLEEPENTRY_WFI: enter STOP mode with WFI instruction + * @arg PWR_SLEEPENTRY_WFE: enter STOP mode with WFE instruction + * @retval None + */ +HAL_StatusTypeDef HAL_PWREx_EnterUnderDriveSTOPMode(uint32_t Regulator, uint8_t STOPEntry) +{ + uint32_t tmpreg1 = 0U; + + /* Check the parameters */ + assert_param(IS_PWR_REGULATOR_UNDERDRIVE(Regulator)); + assert_param(IS_PWR_STOP_ENTRY(STOPEntry)); + + /* Enable Power ctrl clock */ + __HAL_RCC_PWR_CLK_ENABLE(); + /* Enable the Under-drive Mode ---------------------------------------------*/ + /* Clear Under-drive flag */ + __HAL_PWR_CLEAR_ODRUDR_FLAG(); + + /* Enable the Under-drive */ + __HAL_PWR_UNDERDRIVE_ENABLE(); + + /* Select the regulator state in STOP mode ---------------------------------*/ + tmpreg1 = PWR->CR; + /* Clear PDDS, LPDS, MRLUDS and LPLUDS bits */ + tmpreg1 &= (uint32_t)~(PWR_CR_PDDS | PWR_CR_LPDS | PWR_CR_LPUDS | PWR_CR_MRUDS); + + /* Set LPDS, MRLUDS and LPLUDS bits according to PWR_Regulator value */ + tmpreg1 |= Regulator; + + /* Store the new value */ + PWR->CR = tmpreg1; + + /* Set SLEEPDEEP bit of Cortex System Control Register */ + SCB->SCR |= SCB_SCR_SLEEPDEEP_Msk; + + /* Select STOP mode entry --------------------------------------------------*/ + if(STOPEntry == PWR_SLEEPENTRY_WFI) + { + /* Request Wait For Interrupt */ + __WFI(); + } + else + { + /* Request Wait For Event */ + __WFE(); + } + /* Reset SLEEPDEEP bit of Cortex System Control Register */ + SCB->SCR &= (uint32_t)~((uint32_t)SCB_SCR_SLEEPDEEP_Msk); + + return HAL_OK; +} + +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F446xx || STM32F469xx || STM32F479xx */ +/** + * @} + */ + +/** + * @} + */ + +#endif /* HAL_PWR_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_rcc.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_rcc.c new file mode 100644 index 0000000..c04d33a --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_rcc.c @@ -0,0 +1,1124 @@ +/** + ****************************************************************************** + * @file stm32f4xx_hal_rcc.c + * @author MCD Application Team + * @brief RCC HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the Reset and Clock Control (RCC) peripheral: + * + Initialization and de-initialization functions + * + Peripheral Control functions + * + @verbatim + ============================================================================== + ##### RCC specific features ##### + ============================================================================== + [..] + After reset the device is running from Internal High Speed oscillator + (HSI 16MHz) with Flash 0 wait state, Flash prefetch buffer, D-Cache + and I-Cache are disabled, and all peripherals are off except internal + SRAM, Flash and JTAG. + (+) There is no prescaler on High speed (AHB) and Low speed (APB) busses; + all peripherals mapped on these busses are running at HSI speed. + (+) The clock for all peripherals is switched off, except the SRAM and FLASH. + (+) All GPIOs are in input floating state, except the JTAG pins which + are assigned to be used for debug purpose. + + [..] + Once the device started from reset, the user application has to: + (+) Configure the clock source to be used to drive the System clock + (if the application needs higher frequency/performance) + (+) Configure the System clock frequency and Flash settings + (+) Configure the AHB and APB busses prescalers + (+) Enable the clock for the peripheral(s) to be used + (+) Configure the clock source(s) for peripherals which clocks are not + derived from the System clock (I2S, RTC, ADC, USB OTG FS/SDIO/RNG) + + ##### RCC Limitations ##### + ============================================================================== + [..] + A delay between an RCC peripheral clock enable and the effective peripheral + enabling should be taken into account in order to manage the peripheral read/write + from/to registers. + (+) This delay depends on the peripheral mapping. + (+) If peripheral is mapped on AHB: the delay is 2 AHB clock cycle + after the clock enable bit is set on the hardware register + (+) If peripheral is mapped on APB: the delay is 2 APB clock cycle + after the clock enable bit is set on the hardware register + + [..] + Implemented Workaround: + (+) For AHB & APB peripherals, a dummy read to the peripheral register has been + inserted in each __HAL_RCC_PPP_CLK_ENABLE() macro. + + @endverbatim + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx_hal.h" + +/** @addtogroup STM32F4xx_HAL_Driver + * @{ + */ + +/** @defgroup RCC RCC + * @brief RCC HAL module driver + * @{ + */ + +#ifdef HAL_RCC_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/** @addtogroup RCC_Private_Constants + * @{ + */ + +/* Private macro -------------------------------------------------------------*/ +#define __MCO1_CLK_ENABLE() __HAL_RCC_GPIOA_CLK_ENABLE() +#define MCO1_GPIO_PORT GPIOA +#define MCO1_PIN GPIO_PIN_8 + +#define __MCO2_CLK_ENABLE() __HAL_RCC_GPIOC_CLK_ENABLE() +#define MCO2_GPIO_PORT GPIOC +#define MCO2_PIN GPIO_PIN_9 +/** + * @} + */ + +/* Private variables ---------------------------------------------------------*/ +/** @defgroup RCC_Private_Variables RCC Private Variables + * @{ + */ +/** + * @} + */ +/* Private function prototypes -----------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ + +/** @defgroup RCC_Exported_Functions RCC Exported Functions + * @{ + */ + +/** @defgroup RCC_Exported_Functions_Group1 Initialization and de-initialization functions + * @brief Initialization and Configuration functions + * +@verbatim + =============================================================================== + ##### Initialization and de-initialization functions ##### + =============================================================================== + [..] + This section provides functions allowing to configure the internal/external oscillators + (HSE, HSI, LSE, LSI, PLL, CSS and MCO) and the System busses clocks (SYSCLK, AHB, APB1 + and APB2). + + [..] Internal/external clock and PLL configuration + (#) HSI (high-speed internal), 16 MHz factory-trimmed RC used directly or through + the PLL as System clock source. + + (#) LSI (low-speed internal), 32 KHz low consumption RC used as IWDG and/or RTC + clock source. + + (#) HSE (high-speed external), 4 to 26 MHz crystal oscillator used directly or + through the PLL as System clock source. Can be used also as RTC clock source. + + (#) LSE (low-speed external), 32 KHz oscillator used as RTC clock source. + + (#) PLL (clocked by HSI or HSE), featuring two different output clocks: + (++) The first output is used to generate the high speed system clock (up to 168 MHz) + (++) The second output is used to generate the clock for the USB OTG FS (48 MHz), + the random analog generator (<=48 MHz) and the SDIO (<= 48 MHz). + + (#) CSS (Clock security system), once enable using the macro __HAL_RCC_CSS_ENABLE() + and if a HSE clock failure occurs(HSE used directly or through PLL as System + clock source), the System clocks automatically switched to HSI and an interrupt + is generated if enabled. The interrupt is linked to the Cortex-M4 NMI + (Non-Maskable Interrupt) exception vector. + + (#) MCO1 (microcontroller clock output), used to output HSI, LSE, HSE or PLL + clock (through a configurable prescaler) on PA8 pin. + + (#) MCO2 (microcontroller clock output), used to output HSE, PLL, SYSCLK or PLLI2S + clock (through a configurable prescaler) on PC9 pin. + + [..] System, AHB and APB busses clocks configuration + (#) Several clock sources can be used to drive the System clock (SYSCLK): HSI, + HSE and PLL. + The AHB clock (HCLK) is derived from System clock through configurable + prescaler and used to clock the CPU, memory and peripherals mapped + on AHB bus (DMA, GPIO...). APB1 (PCLK1) and APB2 (PCLK2) clocks are derived + from AHB clock through configurable prescalers and used to clock + the peripherals mapped on these busses. You can use + "HAL_RCC_GetSysClockFreq()" function to retrieve the frequencies of these clocks. + + (#) For the STM32F405xx/07xx and STM32F415xx/17xx devices, the maximum + frequency of the SYSCLK and HCLK is 168 MHz, PCLK2 84 MHz and PCLK1 42 MHz. + Depending on the device voltage range, the maximum frequency should + be adapted accordingly (refer to the product datasheets for more details). + + (#) For the STM32F42xxx, STM32F43xxx, STM32F446xx, STM32F469xx and STM32F479xx devices, + the maximum frequency of the SYSCLK and HCLK is 180 MHz, PCLK2 90 MHz and PCLK1 45 MHz. + Depending on the device voltage range, the maximum frequency should + be adapted accordingly (refer to the product datasheets for more details). + + (#) For the STM32F401xx, the maximum frequency of the SYSCLK and HCLK is 84 MHz, + PCLK2 84 MHz and PCLK1 42 MHz. + Depending on the device voltage range, the maximum frequency should + be adapted accordingly (refer to the product datasheets for more details). + + (#) For the STM32F41xxx, the maximum frequency of the SYSCLK and HCLK is 100 MHz, + PCLK2 100 MHz and PCLK1 50 MHz. + Depending on the device voltage range, the maximum frequency should + be adapted accordingly (refer to the product datasheets for more details). + +@endverbatim + * @{ + */ + +/** + * @brief Resets the RCC clock configuration to the default reset state. + * @note The default reset state of the clock configuration is given below: + * - HSI ON and used as system clock source + * - HSE and PLL OFF + * - AHB, APB1 and APB2 prescaler set to 1. + * - CSS, MCO1 and MCO2 OFF + * - All interrupts disabled + * @note This function doesn't modify the configuration of the + * - Peripheral clocks + * - LSI, LSE and RTC clocks + * @retval HAL status + */ +__weak HAL_StatusTypeDef HAL_RCC_DeInit(void) +{ + return HAL_OK; +} + +/** + * @brief Initializes the RCC Oscillators according to the specified parameters in the + * RCC_OscInitTypeDef. + * @param RCC_OscInitStruct pointer to an RCC_OscInitTypeDef structure that + * contains the configuration information for the RCC Oscillators. + * @note The PLL is not disabled when used as system clock. + * @note Transitions LSE Bypass to LSE On and LSE On to LSE Bypass are not + * supported by this API. User should request a transition to LSE Off + * first and then LSE On or LSE Bypass. + * @note Transition HSE Bypass to HSE On and HSE On to HSE Bypass are not + * supported by this API. User should request a transition to HSE Off + * first and then HSE On or HSE Bypass. + * @retval HAL status + */ +__weak HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct) +{ + uint32_t tickstart; + uint32_t pll_config; + /* Check Null pointer */ + if (RCC_OscInitStruct == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_RCC_OSCILLATORTYPE(RCC_OscInitStruct->OscillatorType)); + /*------------------------------- HSE Configuration ------------------------*/ + if (((RCC_OscInitStruct->OscillatorType) & RCC_OSCILLATORTYPE_HSE) == RCC_OSCILLATORTYPE_HSE) + { + /* Check the parameters */ + assert_param(IS_RCC_HSE(RCC_OscInitStruct->HSEState)); + /* When the HSE is used as system clock or clock source for PLL in these cases HSE will not disabled */ + if ((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_HSE) || \ + ((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_PLL) && ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLCFGR_PLLSRC_HSE))) + { + if ((__HAL_RCC_GET_FLAG(RCC_FLAG_HSERDY) != RESET) && (RCC_OscInitStruct->HSEState == RCC_HSE_OFF)) + { + return HAL_ERROR; + } + } + else + { + /* Set the new HSE configuration ---------------------------------------*/ + __HAL_RCC_HSE_CONFIG(RCC_OscInitStruct->HSEState); + + /* Check the HSE State */ + if ((RCC_OscInitStruct->HSEState) != RCC_HSE_OFF) + { + /* Get Start Tick */ + tickstart = HAL_GetTick(); + + /* Wait till HSE is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_HSERDY) == RESET) + { + if ((HAL_GetTick() - tickstart) > HSE_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + else + { + /* Get Start Tick */ + tickstart = HAL_GetTick(); + + /* Wait till HSE is bypassed or disabled */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_HSERDY) != RESET) + { + if ((HAL_GetTick() - tickstart) > HSE_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + } + } + /*----------------------------- HSI Configuration --------------------------*/ + if (((RCC_OscInitStruct->OscillatorType) & RCC_OSCILLATORTYPE_HSI) == RCC_OSCILLATORTYPE_HSI) + { + /* Check the parameters */ + assert_param(IS_RCC_HSI(RCC_OscInitStruct->HSIState)); + assert_param(IS_RCC_CALIBRATION_VALUE(RCC_OscInitStruct->HSICalibrationValue)); + + /* Check if HSI is used as system clock or as PLL source when PLL is selected as system clock */ + if ((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_HSI) || \ + ((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_PLL) && ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLCFGR_PLLSRC_HSI))) + { + /* When HSI is used as system clock it will not disabled */ + if ((__HAL_RCC_GET_FLAG(RCC_FLAG_HSIRDY) != RESET) && (RCC_OscInitStruct->HSIState != RCC_HSI_ON)) + { + return HAL_ERROR; + } + /* Otherwise, just the calibration is allowed */ + else + { + /* Adjusts the Internal High Speed oscillator (HSI) calibration value.*/ + __HAL_RCC_HSI_CALIBRATIONVALUE_ADJUST(RCC_OscInitStruct->HSICalibrationValue); + } + } + else + { + /* Check the HSI State */ + if ((RCC_OscInitStruct->HSIState) != RCC_HSI_OFF) + { + /* Enable the Internal High Speed oscillator (HSI). */ + __HAL_RCC_HSI_ENABLE(); + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till HSI is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_HSIRDY) == RESET) + { + if ((HAL_GetTick() - tickstart) > HSI_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + /* Adjusts the Internal High Speed oscillator (HSI) calibration value. */ + __HAL_RCC_HSI_CALIBRATIONVALUE_ADJUST(RCC_OscInitStruct->HSICalibrationValue); + } + else + { + /* Disable the Internal High Speed oscillator (HSI). */ + __HAL_RCC_HSI_DISABLE(); + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till HSI is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_HSIRDY) != RESET) + { + if ((HAL_GetTick() - tickstart) > HSI_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + } + } + /*------------------------------ LSI Configuration -------------------------*/ + if (((RCC_OscInitStruct->OscillatorType) & RCC_OSCILLATORTYPE_LSI) == RCC_OSCILLATORTYPE_LSI) + { + /* Check the parameters */ + assert_param(IS_RCC_LSI(RCC_OscInitStruct->LSIState)); + + /* Check the LSI State */ + if ((RCC_OscInitStruct->LSIState) != RCC_LSI_OFF) + { + /* Enable the Internal Low Speed oscillator (LSI). */ + __HAL_RCC_LSI_ENABLE(); + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till LSI is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_LSIRDY) == RESET) + { + if ((HAL_GetTick() - tickstart) > LSI_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + else + { + /* Disable the Internal Low Speed oscillator (LSI). */ + __HAL_RCC_LSI_DISABLE(); + + /* Get Start Tick */ + tickstart = HAL_GetTick(); + + /* Wait till LSI is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_LSIRDY) != RESET) + { + if ((HAL_GetTick() - tickstart) > LSI_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + } + /*------------------------------ LSE Configuration -------------------------*/ + if (((RCC_OscInitStruct->OscillatorType) & RCC_OSCILLATORTYPE_LSE) == RCC_OSCILLATORTYPE_LSE) + { + FlagStatus pwrclkchanged = RESET; + + /* Check the parameters */ + assert_param(IS_RCC_LSE(RCC_OscInitStruct->LSEState)); + + /* Update LSE configuration in Backup Domain control register */ + /* Requires to enable write access to Backup Domain of necessary */ + if (__HAL_RCC_PWR_IS_CLK_DISABLED()) + { + __HAL_RCC_PWR_CLK_ENABLE(); + pwrclkchanged = SET; + } + + if (HAL_IS_BIT_CLR(PWR->CR, PWR_CR_DBP)) + { + /* Enable write access to Backup domain */ + SET_BIT(PWR->CR, PWR_CR_DBP); + + /* Wait for Backup domain Write protection disable */ + tickstart = HAL_GetTick(); + + while (HAL_IS_BIT_CLR(PWR->CR, PWR_CR_DBP)) + { + if ((HAL_GetTick() - tickstart) > RCC_DBP_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + + /* Set the new LSE configuration -----------------------------------------*/ + __HAL_RCC_LSE_CONFIG(RCC_OscInitStruct->LSEState); + /* Check the LSE State */ + if ((RCC_OscInitStruct->LSEState) != RCC_LSE_OFF) + { + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till LSE is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_LSERDY) == RESET) + { + if ((HAL_GetTick() - tickstart) > RCC_LSE_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + else + { + /* Get Start Tick */ + tickstart = HAL_GetTick(); + + /* Wait till LSE is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_LSERDY) != RESET) + { + if ((HAL_GetTick() - tickstart) > RCC_LSE_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + + /* Restore clock configuration if changed */ + if (pwrclkchanged == SET) + { + __HAL_RCC_PWR_CLK_DISABLE(); + } + } + /*-------------------------------- PLL Configuration -----------------------*/ + /* Check the parameters */ + assert_param(IS_RCC_PLL(RCC_OscInitStruct->PLL.PLLState)); + if ((RCC_OscInitStruct->PLL.PLLState) != RCC_PLL_NONE) + { + /* Check if the PLL is used as system clock or not */ + if (__HAL_RCC_GET_SYSCLK_SOURCE() != RCC_CFGR_SWS_PLL) + { + if ((RCC_OscInitStruct->PLL.PLLState) == RCC_PLL_ON) + { + /* Check the parameters */ + assert_param(IS_RCC_PLLSOURCE(RCC_OscInitStruct->PLL.PLLSource)); + assert_param(IS_RCC_PLLM_VALUE(RCC_OscInitStruct->PLL.PLLM)); + assert_param(IS_RCC_PLLN_VALUE(RCC_OscInitStruct->PLL.PLLN)); + assert_param(IS_RCC_PLLP_VALUE(RCC_OscInitStruct->PLL.PLLP)); + assert_param(IS_RCC_PLLQ_VALUE(RCC_OscInitStruct->PLL.PLLQ)); + + /* Disable the main PLL. */ + __HAL_RCC_PLL_DISABLE(); + + /* Get Start Tick */ + tickstart = HAL_GetTick(); + + /* Wait till PLL is disabled */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLRDY) != RESET) + { + if ((HAL_GetTick() - tickstart) > PLL_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + /* Configure the main PLL clock source, multiplication and division factors. */ + WRITE_REG(RCC->PLLCFGR, (RCC_OscInitStruct->PLL.PLLSource | \ + RCC_OscInitStruct->PLL.PLLM | \ + (RCC_OscInitStruct->PLL.PLLN << RCC_PLLCFGR_PLLN_Pos) | \ + (((RCC_OscInitStruct->PLL.PLLP >> 1U) - 1U) << RCC_PLLCFGR_PLLP_Pos) | \ + (RCC_OscInitStruct->PLL.PLLQ << RCC_PLLCFGR_PLLQ_Pos))); + /* Enable the main PLL. */ + __HAL_RCC_PLL_ENABLE(); + + /* Get Start Tick */ + tickstart = HAL_GetTick(); + + /* Wait till PLL is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLRDY) == RESET) + { + if ((HAL_GetTick() - tickstart) > PLL_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + else + { + /* Disable the main PLL. */ + __HAL_RCC_PLL_DISABLE(); + + /* Get Start Tick */ + tickstart = HAL_GetTick(); + + /* Wait till PLL is disabled */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLRDY) != RESET) + { + if ((HAL_GetTick() - tickstart) > PLL_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + } + else + { + /* Check if there is a request to disable the PLL used as System clock source */ + if ((RCC_OscInitStruct->PLL.PLLState) == RCC_PLL_OFF) + { + return HAL_ERROR; + } + else + { + /* Do not return HAL_ERROR if request repeats the current configuration */ + pll_config = RCC->PLLCFGR; +#if defined (RCC_PLLCFGR_PLLR) + if (((RCC_OscInitStruct->PLL.PLLState) == RCC_PLL_OFF) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLSRC) != RCC_OscInitStruct->PLL.PLLSource) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLM) != (RCC_OscInitStruct->PLL.PLLM) << RCC_PLLCFGR_PLLM_Pos) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLN) != (RCC_OscInitStruct->PLL.PLLN) << RCC_PLLCFGR_PLLN_Pos) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLP) != (((RCC_OscInitStruct->PLL.PLLP >> 1U) - 1U)) << RCC_PLLCFGR_PLLP_Pos) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLQ) != (RCC_OscInitStruct->PLL.PLLQ << RCC_PLLCFGR_PLLQ_Pos)) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLR) != (RCC_OscInitStruct->PLL.PLLR << RCC_PLLCFGR_PLLR_Pos))) +#else + if (((RCC_OscInitStruct->PLL.PLLState) == RCC_PLL_OFF) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLSRC) != RCC_OscInitStruct->PLL.PLLSource) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLM) != (RCC_OscInitStruct->PLL.PLLM) << RCC_PLLCFGR_PLLM_Pos) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLN) != (RCC_OscInitStruct->PLL.PLLN) << RCC_PLLCFGR_PLLN_Pos) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLP) != (((RCC_OscInitStruct->PLL.PLLP >> 1U) - 1U)) << RCC_PLLCFGR_PLLP_Pos) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLQ) != (RCC_OscInitStruct->PLL.PLLQ << RCC_PLLCFGR_PLLQ_Pos))) +#endif /* RCC_PLLCFGR_PLLR */ + { + return HAL_ERROR; + } + } + } + } + return HAL_OK; +} + +/** + * @brief Initializes the CPU, AHB and APB busses clocks according to the specified + * parameters in the RCC_ClkInitStruct. + * @param RCC_ClkInitStruct pointer to an RCC_OscInitTypeDef structure that + * contains the configuration information for the RCC peripheral. + * @param FLatency FLASH Latency, this parameter depend on device selected + * + * @note The SystemCoreClock CMSIS variable is used to store System Clock Frequency + * and updated by HAL_RCC_GetHCLKFreq() function called within this function + * + * @note The HSI is used (enabled by hardware) as system clock source after + * startup from Reset, wake-up from STOP and STANDBY mode, or in case + * of failure of the HSE used directly or indirectly as system clock + * (if the Clock Security System CSS is enabled). + * + * @note A switch from one clock source to another occurs only if the target + * clock source is ready (clock stable after startup delay or PLL locked). + * If a clock source which is not yet ready is selected, the switch will + * occur when the clock source will be ready. + * + * @note Depending on the device voltage range, the software has to set correctly + * HPRE[3:0] bits to ensure that HCLK not exceed the maximum allowed frequency + * (for more details refer to section above "Initialization/de-initialization functions") + * @retval None + */ +HAL_StatusTypeDef HAL_RCC_ClockConfig(RCC_ClkInitTypeDef *RCC_ClkInitStruct, uint32_t FLatency) +{ + uint32_t tickstart; + + /* Check Null pointer */ + if (RCC_ClkInitStruct == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_RCC_CLOCKTYPE(RCC_ClkInitStruct->ClockType)); + assert_param(IS_FLASH_LATENCY(FLatency)); + + /* To correctly read data from FLASH memory, the number of wait states (LATENCY) + must be correctly programmed according to the frequency of the CPU clock + (HCLK) and the supply voltage of the device. */ + + /* Increasing the number of wait states because of higher CPU frequency */ + if (FLatency > __HAL_FLASH_GET_LATENCY()) + { + /* Program the new number of wait states to the LATENCY bits in the FLASH_ACR register */ + __HAL_FLASH_SET_LATENCY(FLatency); + + /* Check that the new number of wait states is taken into account to access the Flash + memory by reading the FLASH_ACR register */ + if (__HAL_FLASH_GET_LATENCY() != FLatency) + { + return HAL_ERROR; + } + } + + /*-------------------------- HCLK Configuration --------------------------*/ + if (((RCC_ClkInitStruct->ClockType) & RCC_CLOCKTYPE_HCLK) == RCC_CLOCKTYPE_HCLK) + { + /* Set the highest APBx dividers in order to ensure that we do not go through + a non-spec phase whatever we decrease or increase HCLK. */ + if (((RCC_ClkInitStruct->ClockType) & RCC_CLOCKTYPE_PCLK1) == RCC_CLOCKTYPE_PCLK1) + { + MODIFY_REG(RCC->CFGR, RCC_CFGR_PPRE1, RCC_HCLK_DIV16); + } + + if (((RCC_ClkInitStruct->ClockType) & RCC_CLOCKTYPE_PCLK2) == RCC_CLOCKTYPE_PCLK2) + { + MODIFY_REG(RCC->CFGR, RCC_CFGR_PPRE2, (RCC_HCLK_DIV16 << 3)); + } + + assert_param(IS_RCC_HCLK(RCC_ClkInitStruct->AHBCLKDivider)); + MODIFY_REG(RCC->CFGR, RCC_CFGR_HPRE, RCC_ClkInitStruct->AHBCLKDivider); + } + + /*------------------------- SYSCLK Configuration ---------------------------*/ + if (((RCC_ClkInitStruct->ClockType) & RCC_CLOCKTYPE_SYSCLK) == RCC_CLOCKTYPE_SYSCLK) + { + assert_param(IS_RCC_SYSCLKSOURCE(RCC_ClkInitStruct->SYSCLKSource)); + + /* HSE is selected as System Clock Source */ + if (RCC_ClkInitStruct->SYSCLKSource == RCC_SYSCLKSOURCE_HSE) + { + /* Check the HSE ready flag */ + if (__HAL_RCC_GET_FLAG(RCC_FLAG_HSERDY) == RESET) + { + return HAL_ERROR; + } + } + /* PLL is selected as System Clock Source */ + else if ((RCC_ClkInitStruct->SYSCLKSource == RCC_SYSCLKSOURCE_PLLCLK) || + (RCC_ClkInitStruct->SYSCLKSource == RCC_SYSCLKSOURCE_PLLRCLK)) + { + /* Check the PLL ready flag */ + if (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLRDY) == RESET) + { + return HAL_ERROR; + } + } + /* HSI is selected as System Clock Source */ + else + { + /* Check the HSI ready flag */ + if (__HAL_RCC_GET_FLAG(RCC_FLAG_HSIRDY) == RESET) + { + return HAL_ERROR; + } + } + + __HAL_RCC_SYSCLK_CONFIG(RCC_ClkInitStruct->SYSCLKSource); + + /* Get Start Tick */ + tickstart = HAL_GetTick(); + + while (__HAL_RCC_GET_SYSCLK_SOURCE() != (RCC_ClkInitStruct->SYSCLKSource << RCC_CFGR_SWS_Pos)) + { + if ((HAL_GetTick() - tickstart) > CLOCKSWITCH_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + + /* Decreasing the number of wait states because of lower CPU frequency */ + if (FLatency < __HAL_FLASH_GET_LATENCY()) + { + /* Program the new number of wait states to the LATENCY bits in the FLASH_ACR register */ + __HAL_FLASH_SET_LATENCY(FLatency); + + /* Check that the new number of wait states is taken into account to access the Flash + memory by reading the FLASH_ACR register */ + if (__HAL_FLASH_GET_LATENCY() != FLatency) + { + return HAL_ERROR; + } + } + + /*-------------------------- PCLK1 Configuration ---------------------------*/ + if (((RCC_ClkInitStruct->ClockType) & RCC_CLOCKTYPE_PCLK1) == RCC_CLOCKTYPE_PCLK1) + { + assert_param(IS_RCC_PCLK(RCC_ClkInitStruct->APB1CLKDivider)); + MODIFY_REG(RCC->CFGR, RCC_CFGR_PPRE1, RCC_ClkInitStruct->APB1CLKDivider); + } + + /*-------------------------- PCLK2 Configuration ---------------------------*/ + if (((RCC_ClkInitStruct->ClockType) & RCC_CLOCKTYPE_PCLK2) == RCC_CLOCKTYPE_PCLK2) + { + assert_param(IS_RCC_PCLK(RCC_ClkInitStruct->APB2CLKDivider)); + MODIFY_REG(RCC->CFGR, RCC_CFGR_PPRE2, ((RCC_ClkInitStruct->APB2CLKDivider) << 3U)); + } + + /* Update the SystemCoreClock global variable */ + SystemCoreClock = HAL_RCC_GetSysClockFreq() >> AHBPrescTable[(RCC->CFGR & RCC_CFGR_HPRE) >> RCC_CFGR_HPRE_Pos]; + + /* Configure the source of time base considering new system clocks settings */ + HAL_InitTick(uwTickPrio); + + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup RCC_Exported_Functions_Group2 Peripheral Control functions + * @brief RCC clocks control functions + * +@verbatim + =============================================================================== + ##### Peripheral Control functions ##### + =============================================================================== + [..] + This subsection provides a set of functions allowing to control the RCC Clocks + frequencies. + +@endverbatim + * @{ + */ + +/** + * @brief Selects the clock source to output on MCO1 pin(PA8) or on MCO2 pin(PC9). + * @note PA8/PC9 should be configured in alternate function mode. + * @param RCC_MCOx specifies the output direction for the clock source. + * This parameter can be one of the following values: + * @arg RCC_MCO1: Clock source to output on MCO1 pin(PA8). + * @arg RCC_MCO2: Clock source to output on MCO2 pin(PC9). + * @param RCC_MCOSource specifies the clock source to output. + * This parameter can be one of the following values: + * @arg RCC_MCO1SOURCE_HSI: HSI clock selected as MCO1 source + * @arg RCC_MCO1SOURCE_LSE: LSE clock selected as MCO1 source + * @arg RCC_MCO1SOURCE_HSE: HSE clock selected as MCO1 source + * @arg RCC_MCO1SOURCE_PLLCLK: main PLL clock selected as MCO1 source + * @arg RCC_MCO2SOURCE_SYSCLK: System clock (SYSCLK) selected as MCO2 source + * @arg RCC_MCO2SOURCE_PLLI2SCLK: PLLI2S clock selected as MCO2 source, available for all STM32F4 devices except STM32F410xx + * @arg RCC_MCO2SOURCE_I2SCLK: I2SCLK clock selected as MCO2 source, available only for STM32F410Rx devices + * @arg RCC_MCO2SOURCE_HSE: HSE clock selected as MCO2 source + * @arg RCC_MCO2SOURCE_PLLCLK: main PLL clock selected as MCO2 source + * @param RCC_MCODiv specifies the MCOx prescaler. + * This parameter can be one of the following values: + * @arg RCC_MCODIV_1: no division applied to MCOx clock + * @arg RCC_MCODIV_2: division by 2 applied to MCOx clock + * @arg RCC_MCODIV_3: division by 3 applied to MCOx clock + * @arg RCC_MCODIV_4: division by 4 applied to MCOx clock + * @arg RCC_MCODIV_5: division by 5 applied to MCOx clock + * @note For STM32F410Rx devices to output I2SCLK clock on MCO2 you should have + * at last one of the SPI clocks enabled (SPI1, SPI2 or SPI5). + * @retval None + */ +void HAL_RCC_MCOConfig(uint32_t RCC_MCOx, uint32_t RCC_MCOSource, uint32_t RCC_MCODiv) +{ + GPIO_InitTypeDef GPIO_InitStruct; + /* Check the parameters */ + assert_param(IS_RCC_MCO(RCC_MCOx)); + assert_param(IS_RCC_MCODIV(RCC_MCODiv)); + /* RCC_MCO1 */ + if (RCC_MCOx == RCC_MCO1) + { + assert_param(IS_RCC_MCO1SOURCE(RCC_MCOSource)); + + /* MCO1 Clock Enable */ + __MCO1_CLK_ENABLE(); + + /* Configure the MCO1 pin in alternate function mode */ + GPIO_InitStruct.Pin = MCO1_PIN; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Alternate = GPIO_AF0_MCO; + HAL_GPIO_Init(MCO1_GPIO_PORT, &GPIO_InitStruct); + + /* Mask MCO1 and MCO1PRE[2:0] bits then Select MCO1 clock source and prescaler */ + MODIFY_REG(RCC->CFGR, (RCC_CFGR_MCO1 | RCC_CFGR_MCO1PRE), (RCC_MCOSource | RCC_MCODiv)); + + /* This RCC MCO1 enable feature is available only on STM32F410xx devices */ +#if defined(RCC_CFGR_MCO1EN) + __HAL_RCC_MCO1_ENABLE(); +#endif /* RCC_CFGR_MCO1EN */ + } +#if defined(RCC_CFGR_MCO2) + else + { + assert_param(IS_RCC_MCO2SOURCE(RCC_MCOSource)); + + /* MCO2 Clock Enable */ + __MCO2_CLK_ENABLE(); + + /* Configure the MCO2 pin in alternate function mode */ + GPIO_InitStruct.Pin = MCO2_PIN; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Alternate = GPIO_AF0_MCO; + HAL_GPIO_Init(MCO2_GPIO_PORT, &GPIO_InitStruct); + + /* Mask MCO2 and MCO2PRE[2:0] bits then Select MCO2 clock source and prescaler */ + MODIFY_REG(RCC->CFGR, (RCC_CFGR_MCO2 | RCC_CFGR_MCO2PRE), (RCC_MCOSource | (RCC_MCODiv << 3U))); + + /* This RCC MCO2 enable feature is available only on STM32F410Rx devices */ +#if defined(RCC_CFGR_MCO2EN) + __HAL_RCC_MCO2_ENABLE(); +#endif /* RCC_CFGR_MCO2EN */ + } +#endif /* RCC_CFGR_MCO2 */ +} + +/** + * @brief Enables the Clock Security System. + * @note If a failure is detected on the HSE oscillator clock, this oscillator + * is automatically disabled and an interrupt is generated to inform the + * software about the failure (Clock Security System Interrupt, CSSI), + * allowing the MCU to perform rescue operations. The CSSI is linked to + * the Cortex-M4 NMI (Non-Maskable Interrupt) exception vector. + * @retval None + */ +void HAL_RCC_EnableCSS(void) +{ + *(__IO uint32_t *) RCC_CR_CSSON_BB = (uint32_t)ENABLE; +} + +/** + * @brief Disables the Clock Security System. + * @retval None + */ +void HAL_RCC_DisableCSS(void) +{ + *(__IO uint32_t *) RCC_CR_CSSON_BB = (uint32_t)DISABLE; +} + +/** + * @brief Returns the SYSCLK frequency + * + * @note The system frequency computed by this function is not the real + * frequency in the chip. It is calculated based on the predefined + * constant and the selected clock source: + * @note If SYSCLK source is HSI, function returns values based on HSI_VALUE(*) + * @note If SYSCLK source is HSE, function returns values based on HSE_VALUE(**) + * @note If SYSCLK source is PLL, function returns values based on HSE_VALUE(**) + * or HSI_VALUE(*) multiplied/divided by the PLL factors. + * @note (*) HSI_VALUE is a constant defined in stm32f4xx_hal_conf.h file (default value + * 16 MHz) but the real value may vary depending on the variations + * in voltage and temperature. + * @note (**) HSE_VALUE is a constant defined in stm32f4xx_hal_conf.h file (default value + * 25 MHz), user has to ensure that HSE_VALUE is same as the real + * frequency of the crystal used. Otherwise, this function may + * have wrong result. + * + * @note The result of this function could be not correct when using fractional + * value for HSE crystal. + * + * @note This function can be used by the user application to compute the + * baudrate for the communication peripherals or configure other parameters. + * + * @note Each time SYSCLK changes, this function must be called to update the + * right SYSCLK value. Otherwise, any configuration based on this function will be incorrect. + * + * + * @retval SYSCLK frequency + */ +__weak uint32_t HAL_RCC_GetSysClockFreq(void) +{ + uint32_t pllm = 0U; + uint32_t pllvco = 0U; + uint32_t pllp = 0U; + uint32_t sysclockfreq = 0U; + + /* Get SYSCLK source -------------------------------------------------------*/ + switch (RCC->CFGR & RCC_CFGR_SWS) + { + case RCC_CFGR_SWS_HSI: /* HSI used as system clock source */ + { + sysclockfreq = HSI_VALUE; + break; + } + case RCC_CFGR_SWS_HSE: /* HSE used as system clock source */ + { + sysclockfreq = HSE_VALUE; + break; + } + case RCC_CFGR_SWS_PLL: /* PLL used as system clock source */ + { + /* PLL_VCO = (HSE_VALUE or HSI_VALUE / PLLM) * PLLN + SYSCLK = PLL_VCO / PLLP */ + pllm = RCC->PLLCFGR & RCC_PLLCFGR_PLLM; + if (__HAL_RCC_GET_PLL_OSCSOURCE() != RCC_PLLSOURCE_HSI) + { + /* HSE used as PLL clock source */ + pllvco = (uint32_t)((((uint64_t) HSE_VALUE * ((uint64_t)((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> RCC_PLLCFGR_PLLN_Pos)))) / (uint64_t)pllm); + } + else + { + /* HSI used as PLL clock source */ + pllvco = (uint32_t)((((uint64_t) HSI_VALUE * ((uint64_t)((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> RCC_PLLCFGR_PLLN_Pos)))) / (uint64_t)pllm); + } + pllp = ((((RCC->PLLCFGR & RCC_PLLCFGR_PLLP) >> RCC_PLLCFGR_PLLP_Pos) + 1U) * 2U); + + sysclockfreq = pllvco / pllp; + break; + } + default: + { + sysclockfreq = HSI_VALUE; + break; + } + } + return sysclockfreq; +} + +/** + * @brief Returns the HCLK frequency + * @note Each time HCLK changes, this function must be called to update the + * right HCLK value. Otherwise, any configuration based on this function will be incorrect. + * + * @note The SystemCoreClock CMSIS variable is used to store System Clock Frequency + * and updated within this function + * @retval HCLK frequency + */ +uint32_t HAL_RCC_GetHCLKFreq(void) +{ + return SystemCoreClock; +} + +/** + * @brief Returns the PCLK1 frequency + * @note Each time PCLK1 changes, this function must be called to update the + * right PCLK1 value. Otherwise, any configuration based on this function will be incorrect. + * @retval PCLK1 frequency + */ +uint32_t HAL_RCC_GetPCLK1Freq(void) +{ + /* Get HCLK source and Compute PCLK1 frequency ---------------------------*/ + return (HAL_RCC_GetHCLKFreq() >> APBPrescTable[(RCC->CFGR & RCC_CFGR_PPRE1) >> RCC_CFGR_PPRE1_Pos]); +} + +/** + * @brief Returns the PCLK2 frequency + * @note Each time PCLK2 changes, this function must be called to update the + * right PCLK2 value. Otherwise, any configuration based on this function will be incorrect. + * @retval PCLK2 frequency + */ +uint32_t HAL_RCC_GetPCLK2Freq(void) +{ + /* Get HCLK source and Compute PCLK2 frequency ---------------------------*/ + return (HAL_RCC_GetHCLKFreq() >> APBPrescTable[(RCC->CFGR & RCC_CFGR_PPRE2) >> RCC_CFGR_PPRE2_Pos]); +} + +/** + * @brief Configures the RCC_OscInitStruct according to the internal + * RCC configuration registers. + * @param RCC_OscInitStruct pointer to an RCC_OscInitTypeDef structure that + * will be configured. + * @retval None + */ +__weak void HAL_RCC_GetOscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct) +{ + /* Set all possible values for the Oscillator type parameter ---------------*/ + RCC_OscInitStruct->OscillatorType = RCC_OSCILLATORTYPE_HSE | RCC_OSCILLATORTYPE_HSI | RCC_OSCILLATORTYPE_LSE | RCC_OSCILLATORTYPE_LSI; + + /* Get the HSE configuration -----------------------------------------------*/ + if ((RCC->CR & RCC_CR_HSEBYP) == RCC_CR_HSEBYP) + { + RCC_OscInitStruct->HSEState = RCC_HSE_BYPASS; + } + else if ((RCC->CR & RCC_CR_HSEON) == RCC_CR_HSEON) + { + RCC_OscInitStruct->HSEState = RCC_HSE_ON; + } + else + { + RCC_OscInitStruct->HSEState = RCC_HSE_OFF; + } + + /* Get the HSI configuration -----------------------------------------------*/ + if ((RCC->CR & RCC_CR_HSION) == RCC_CR_HSION) + { + RCC_OscInitStruct->HSIState = RCC_HSI_ON; + } + else + { + RCC_OscInitStruct->HSIState = RCC_HSI_OFF; + } + + RCC_OscInitStruct->HSICalibrationValue = (uint32_t)((RCC->CR & RCC_CR_HSITRIM) >> RCC_CR_HSITRIM_Pos); + + /* Get the LSE configuration -----------------------------------------------*/ + if ((RCC->BDCR & RCC_BDCR_LSEBYP) == RCC_BDCR_LSEBYP) + { + RCC_OscInitStruct->LSEState = RCC_LSE_BYPASS; + } + else if ((RCC->BDCR & RCC_BDCR_LSEON) == RCC_BDCR_LSEON) + { + RCC_OscInitStruct->LSEState = RCC_LSE_ON; + } + else + { + RCC_OscInitStruct->LSEState = RCC_LSE_OFF; + } + + /* Get the LSI configuration -----------------------------------------------*/ + if ((RCC->CSR & RCC_CSR_LSION) == RCC_CSR_LSION) + { + RCC_OscInitStruct->LSIState = RCC_LSI_ON; + } + else + { + RCC_OscInitStruct->LSIState = RCC_LSI_OFF; + } + + /* Get the PLL configuration -----------------------------------------------*/ + if ((RCC->CR & RCC_CR_PLLON) == RCC_CR_PLLON) + { + RCC_OscInitStruct->PLL.PLLState = RCC_PLL_ON; + } + else + { + RCC_OscInitStruct->PLL.PLLState = RCC_PLL_OFF; + } + RCC_OscInitStruct->PLL.PLLSource = (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC); + RCC_OscInitStruct->PLL.PLLM = (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM); + RCC_OscInitStruct->PLL.PLLN = (uint32_t)((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> RCC_PLLCFGR_PLLN_Pos); + RCC_OscInitStruct->PLL.PLLP = (uint32_t)((((RCC->PLLCFGR & RCC_PLLCFGR_PLLP) + RCC_PLLCFGR_PLLP_0) << 1U) >> RCC_PLLCFGR_PLLP_Pos); + RCC_OscInitStruct->PLL.PLLQ = (uint32_t)((RCC->PLLCFGR & RCC_PLLCFGR_PLLQ) >> RCC_PLLCFGR_PLLQ_Pos); +} + +/** + * @brief Configures the RCC_ClkInitStruct according to the internal + * RCC configuration registers. + * @param RCC_ClkInitStruct pointer to an RCC_ClkInitTypeDef structure that + * will be configured. + * @param pFLatency Pointer on the Flash Latency. + * @retval None + */ +void HAL_RCC_GetClockConfig(RCC_ClkInitTypeDef *RCC_ClkInitStruct, uint32_t *pFLatency) +{ + /* Set all possible values for the Clock type parameter --------------------*/ + RCC_ClkInitStruct->ClockType = RCC_CLOCKTYPE_SYSCLK | RCC_CLOCKTYPE_HCLK | RCC_CLOCKTYPE_PCLK1 | RCC_CLOCKTYPE_PCLK2; + + /* Get the SYSCLK configuration --------------------------------------------*/ + RCC_ClkInitStruct->SYSCLKSource = (uint32_t)(RCC->CFGR & RCC_CFGR_SW); + + /* Get the HCLK configuration ----------------------------------------------*/ + RCC_ClkInitStruct->AHBCLKDivider = (uint32_t)(RCC->CFGR & RCC_CFGR_HPRE); + + /* Get the APB1 configuration ----------------------------------------------*/ + RCC_ClkInitStruct->APB1CLKDivider = (uint32_t)(RCC->CFGR & RCC_CFGR_PPRE1); + + /* Get the APB2 configuration ----------------------------------------------*/ + RCC_ClkInitStruct->APB2CLKDivider = (uint32_t)((RCC->CFGR & RCC_CFGR_PPRE2) >> 3U); + + /* Get the Flash Wait State (Latency) configuration ------------------------*/ + *pFLatency = (uint32_t)(FLASH->ACR & FLASH_ACR_LATENCY); +} + +/** + * @brief This function handles the RCC CSS interrupt request. + * @note This API should be called under the NMI_Handler(). + * @retval None + */ +void HAL_RCC_NMI_IRQHandler(void) +{ + /* Check RCC CSSF flag */ + if (__HAL_RCC_GET_IT(RCC_IT_CSS)) + { + /* RCC Clock Security System interrupt user callback */ + HAL_RCC_CSSCallback(); + + /* Clear RCC CSS pending bit */ + __HAL_RCC_CLEAR_IT(RCC_IT_CSS); + } +} + +/** + * @brief RCC Clock Security System interrupt callback + * @retval None + */ +__weak void HAL_RCC_CSSCallback(void) +{ + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_RCC_CSSCallback could be implemented in the user file + */ +} + +/** + * @} + */ + +/** + * @} + */ + +#endif /* HAL_RCC_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ + diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_rcc_ex.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_rcc_ex.c new file mode 100644 index 0000000..7b3b20b --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_rcc_ex.c @@ -0,0 +1,3833 @@ +/** + ****************************************************************************** + * @file stm32f4xx_hal_rcc_ex.c + * @author MCD Application Team + * @brief Extension RCC HAL module driver. + * This file provides firmware functions to manage the following + * functionalities RCC extension peripheral: + * + Extended Peripheral Control functions + * + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx_hal.h" + +/** @addtogroup STM32F4xx_HAL_Driver + * @{ + */ + +/** @defgroup RCCEx RCCEx + * @brief RCCEx HAL module driver + * @{ + */ + +#ifdef HAL_RCC_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/** @addtogroup RCCEx_Private_Constants + * @{ + */ +/** + * @} + */ +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ +/** @defgroup RCCEx_Exported_Functions RCCEx Exported Functions + * @{ + */ + +/** @defgroup RCCEx_Exported_Functions_Group1 Extended Peripheral Control functions + * @brief Extended Peripheral Control functions + * +@verbatim + =============================================================================== + ##### Extended Peripheral Control functions ##### + =============================================================================== + [..] + This subsection provides a set of functions allowing to control the RCC Clocks + frequencies. + [..] + (@) Important note: Care must be taken when HAL_RCCEx_PeriphCLKConfig() is used to + select the RTC clock source; in this case the Backup domain will be reset in + order to modify the RTC Clock source, as consequence RTC registers (including + the backup registers) and RCC_BDCR register are set to their reset values. + +@endverbatim + * @{ + */ + +#if defined(STM32F446xx) +/** + * @brief Initializes the RCC extended peripherals clocks according to the specified + * parameters in the RCC_PeriphCLKInitTypeDef. + * @param PeriphClkInit pointer to an RCC_PeriphCLKInitTypeDef structure that + * contains the configuration information for the Extended Peripherals + * clocks(I2S, SAI, LTDC RTC and TIM). + * + * @note Care must be taken when HAL_RCCEx_PeriphCLKConfig() is used to select + * the RTC clock source; in this case the Backup domain will be reset in + * order to modify the RTC Clock source, as consequence RTC registers (including + * the backup registers) and RCC_BDCR register are set to their reset values. + * + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClkInit) +{ + uint32_t tickstart = 0U; + uint32_t tmpreg1 = 0U; + uint32_t plli2sp = 0U; + uint32_t plli2sq = 0U; + uint32_t plli2sr = 0U; + uint32_t pllsaip = 0U; + uint32_t pllsaiq = 0U; + uint32_t plli2sused = 0U; + uint32_t pllsaiused = 0U; + + /* Check the peripheral clock selection parameters */ + assert_param(IS_RCC_PERIPHCLOCK(PeriphClkInit->PeriphClockSelection)); + + /*------------------------ I2S APB1 configuration --------------------------*/ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S_APB1) == (RCC_PERIPHCLK_I2S_APB1)) + { + /* Check the parameters */ + assert_param(IS_RCC_I2SAPB1CLKSOURCE(PeriphClkInit->I2sApb1ClockSelection)); + + /* Configure I2S Clock source */ + __HAL_RCC_I2S_APB1_CONFIG(PeriphClkInit->I2sApb1ClockSelection); + /* Enable the PLLI2S when it's used as clock source for I2S */ + if (PeriphClkInit->I2sApb1ClockSelection == RCC_I2SAPB1CLKSOURCE_PLLI2S) + { + plli2sused = 1U; + } + } + /*--------------------------------------------------------------------------*/ + + /*---------------------------- I2S APB2 configuration ----------------------*/ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S_APB2) == (RCC_PERIPHCLK_I2S_APB2)) + { + /* Check the parameters */ + assert_param(IS_RCC_I2SAPB2CLKSOURCE(PeriphClkInit->I2sApb2ClockSelection)); + + /* Configure I2S Clock source */ + __HAL_RCC_I2S_APB2_CONFIG(PeriphClkInit->I2sApb2ClockSelection); + /* Enable the PLLI2S when it's used as clock source for I2S */ + if (PeriphClkInit->I2sApb2ClockSelection == RCC_I2SAPB2CLKSOURCE_PLLI2S) + { + plli2sused = 1U; + } + } + /*--------------------------------------------------------------------------*/ + + /*--------------------------- SAI1 configuration ---------------------------*/ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI1) == (RCC_PERIPHCLK_SAI1)) + { + /* Check the parameters */ + assert_param(IS_RCC_SAI1CLKSOURCE(PeriphClkInit->Sai1ClockSelection)); + + /* Configure SAI1 Clock source */ + __HAL_RCC_SAI1_CONFIG(PeriphClkInit->Sai1ClockSelection); + /* Enable the PLLI2S when it's used as clock source for SAI */ + if (PeriphClkInit->Sai1ClockSelection == RCC_SAI1CLKSOURCE_PLLI2S) + { + plli2sused = 1U; + } + /* Enable the PLLSAI when it's used as clock source for SAI */ + if (PeriphClkInit->Sai1ClockSelection == RCC_SAI1CLKSOURCE_PLLSAI) + { + pllsaiused = 1U; + } + } + /*--------------------------------------------------------------------------*/ + + /*-------------------------- SAI2 configuration ----------------------------*/ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI2) == (RCC_PERIPHCLK_SAI2)) + { + /* Check the parameters */ + assert_param(IS_RCC_SAI2CLKSOURCE(PeriphClkInit->Sai2ClockSelection)); + + /* Configure SAI2 Clock source */ + __HAL_RCC_SAI2_CONFIG(PeriphClkInit->Sai2ClockSelection); + + /* Enable the PLLI2S when it's used as clock source for SAI */ + if (PeriphClkInit->Sai2ClockSelection == RCC_SAI2CLKSOURCE_PLLI2S) + { + plli2sused = 1U; + } + /* Enable the PLLSAI when it's used as clock source for SAI */ + if (PeriphClkInit->Sai2ClockSelection == RCC_SAI2CLKSOURCE_PLLSAI) + { + pllsaiused = 1U; + } + } + /*--------------------------------------------------------------------------*/ + + /*----------------------------- RTC configuration --------------------------*/ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_RTC) == (RCC_PERIPHCLK_RTC)) + { + /* Check for RTC Parameters used to output RTCCLK */ + assert_param(IS_RCC_RTCCLKSOURCE(PeriphClkInit->RTCClockSelection)); + + /* Enable Power Clock*/ + __HAL_RCC_PWR_CLK_ENABLE(); + + /* Enable write access to Backup domain */ + PWR->CR |= PWR_CR_DBP; + + /* Get tick */ + tickstart = HAL_GetTick(); + + while ((PWR->CR & PWR_CR_DBP) == RESET) + { + if ((HAL_GetTick() - tickstart) > RCC_DBP_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + /* Reset the Backup domain only if the RTC Clock source selection is modified from reset value */ + tmpreg1 = (RCC->BDCR & RCC_BDCR_RTCSEL); + if ((tmpreg1 != 0x00000000U) && ((tmpreg1) != (PeriphClkInit->RTCClockSelection & RCC_BDCR_RTCSEL))) + { + /* Store the content of BDCR register before the reset of Backup Domain */ + tmpreg1 = (RCC->BDCR & ~(RCC_BDCR_RTCSEL)); + /* RTC Clock selection can be changed only if the Backup Domain is reset */ + __HAL_RCC_BACKUPRESET_FORCE(); + __HAL_RCC_BACKUPRESET_RELEASE(); + /* Restore the Content of BDCR register */ + RCC->BDCR = tmpreg1; + + /* Wait for LSE reactivation if LSE was enable prior to Backup Domain reset */ + if (HAL_IS_BIT_SET(RCC->BDCR, RCC_BDCR_LSEON)) + { + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Wait till LSE is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_LSERDY) == RESET) + { + if ((HAL_GetTick() - tickstart) > RCC_LSE_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + } + __HAL_RCC_RTC_CONFIG(PeriphClkInit->RTCClockSelection); + } + /*--------------------------------------------------------------------------*/ + + /*---------------------------- TIM configuration ---------------------------*/ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_TIM) == (RCC_PERIPHCLK_TIM)) + { + /* Configure Timer Prescaler */ + __HAL_RCC_TIMCLKPRESCALER(PeriphClkInit->TIMPresSelection); + } + /*--------------------------------------------------------------------------*/ + + /*---------------------------- FMPI2C1 Configuration -----------------------*/ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_FMPI2C1) == RCC_PERIPHCLK_FMPI2C1) + { + /* Check the parameters */ + assert_param(IS_RCC_FMPI2C1CLKSOURCE(PeriphClkInit->Fmpi2c1ClockSelection)); + + /* Configure the FMPI2C1 clock source */ + __HAL_RCC_FMPI2C1_CONFIG(PeriphClkInit->Fmpi2c1ClockSelection); + } + /*--------------------------------------------------------------------------*/ + + /*------------------------------ CEC Configuration -------------------------*/ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_CEC) == RCC_PERIPHCLK_CEC) + { + /* Check the parameters */ + assert_param(IS_RCC_CECCLKSOURCE(PeriphClkInit->CecClockSelection)); + + /* Configure the CEC clock source */ + __HAL_RCC_CEC_CONFIG(PeriphClkInit->CecClockSelection); + } + /*--------------------------------------------------------------------------*/ + + /*----------------------------- CLK48 Configuration ------------------------*/ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_CLK48) == RCC_PERIPHCLK_CLK48) + { + /* Check the parameters */ + assert_param(IS_RCC_CLK48CLKSOURCE(PeriphClkInit->Clk48ClockSelection)); + + /* Configure the CLK48 clock source */ + __HAL_RCC_CLK48_CONFIG(PeriphClkInit->Clk48ClockSelection); + + /* Enable the PLLSAI when it's used as clock source for CLK48 */ + if (PeriphClkInit->Clk48ClockSelection == RCC_CLK48CLKSOURCE_PLLSAIP) + { + pllsaiused = 1U; + } + } + /*--------------------------------------------------------------------------*/ + + /*----------------------------- SDIO Configuration -------------------------*/ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SDIO) == RCC_PERIPHCLK_SDIO) + { + /* Check the parameters */ + assert_param(IS_RCC_SDIOCLKSOURCE(PeriphClkInit->SdioClockSelection)); + + /* Configure the SDIO clock source */ + __HAL_RCC_SDIO_CONFIG(PeriphClkInit->SdioClockSelection); + } + /*--------------------------------------------------------------------------*/ + + /*------------------------------ SPDIFRX Configuration ---------------------*/ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SPDIFRX) == RCC_PERIPHCLK_SPDIFRX) + { + /* Check the parameters */ + assert_param(IS_RCC_SPDIFRXCLKSOURCE(PeriphClkInit->SpdifClockSelection)); + + /* Configure the SPDIFRX clock source */ + __HAL_RCC_SPDIFRX_CONFIG(PeriphClkInit->SpdifClockSelection); + /* Enable the PLLI2S when it's used as clock source for SPDIFRX */ + if (PeriphClkInit->SpdifClockSelection == RCC_SPDIFRXCLKSOURCE_PLLI2SP) + { + plli2sused = 1U; + } + } + /*--------------------------------------------------------------------------*/ + + /*---------------------------- PLLI2S Configuration ------------------------*/ + /* PLLI2S is configured when a peripheral will use it as source clock : SAI1, SAI2, I2S on APB1, + I2S on APB2 or SPDIFRX */ + if ((plli2sused == 1U) || (PeriphClkInit->PeriphClockSelection == RCC_PERIPHCLK_PLLI2S)) + { + /* Disable the PLLI2S */ + __HAL_RCC_PLLI2S_DISABLE(); + /* Get tick */ + tickstart = HAL_GetTick(); + /* Wait till PLLI2S is disabled */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) != RESET) + { + if ((HAL_GetTick() - tickstart) > PLLI2S_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + + /* check for common PLLI2S Parameters */ + assert_param(IS_RCC_PLLI2SM_VALUE(PeriphClkInit->PLLI2S.PLLI2SM)); + assert_param(IS_RCC_PLLI2SN_VALUE(PeriphClkInit->PLLI2S.PLLI2SN)); + + /*------ In Case of PLLI2S is selected as source clock for I2S -----------*/ + if (((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S_APB1) == RCC_PERIPHCLK_I2S_APB1) + && (PeriphClkInit->I2sApb1ClockSelection == RCC_I2SAPB1CLKSOURCE_PLLI2S)) || + ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S_APB2) == RCC_PERIPHCLK_I2S_APB2) && (PeriphClkInit->I2sApb2ClockSelection == RCC_I2SAPB2CLKSOURCE_PLLI2S))) + { + /* check for Parameters */ + assert_param(IS_RCC_PLLI2SR_VALUE(PeriphClkInit->PLLI2S.PLLI2SR)); + + /* Read PLLI2SP/PLLI2SQ value from PLLI2SCFGR register (this value is not needed for I2S configuration) */ + plli2sp = ((((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SP) >> RCC_PLLI2SCFGR_PLLI2SP_Pos) + 1U) << 1U); + plli2sq = ((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SQ) >> RCC_PLLI2SCFGR_PLLI2SQ_Pos); + /* Configure the PLLI2S division factors */ + /* PLLI2S_VCO = f(VCO clock) = f(PLLI2S clock input) * (PLLI2SN/PLLI2SM) */ + /* I2SCLK = f(PLLI2S clock output) = f(VCO clock) / PLLI2SR */ + __HAL_RCC_PLLI2S_CONFIG(PeriphClkInit->PLLI2S.PLLI2SM, PeriphClkInit->PLLI2S.PLLI2SN, plli2sp, plli2sq, + PeriphClkInit->PLLI2S.PLLI2SR); + } + + /*------- In Case of PLLI2S is selected as source clock for SAI ----------*/ + if (((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI1) == RCC_PERIPHCLK_SAI1) + && (PeriphClkInit->Sai1ClockSelection == RCC_SAI1CLKSOURCE_PLLI2S)) || + ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI2) == RCC_PERIPHCLK_SAI2) && (PeriphClkInit->Sai2ClockSelection == RCC_SAI2CLKSOURCE_PLLI2S))) + { + /* Check for PLLI2S Parameters */ + assert_param(IS_RCC_PLLI2SQ_VALUE(PeriphClkInit->PLLI2S.PLLI2SQ)); + /* Check for PLLI2S/DIVQ parameters */ + assert_param(IS_RCC_PLLI2S_DIVQ_VALUE(PeriphClkInit->PLLI2SDivQ)); + + /* Read PLLI2SP/PLLI2SR value from PLLI2SCFGR register (this value is not needed for SAI configuration) */ + plli2sp = ((((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SP) >> RCC_PLLI2SCFGR_PLLI2SP_Pos) + 1U) << 1U); + plli2sr = ((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SR) >> RCC_PLLI2SCFGR_PLLI2SR_Pos); + /* Configure the PLLI2S division factors */ + /* PLLI2S_VCO Input = PLL_SOURCE/PLLI2SM */ + /* PLLI2S_VCO Output = PLLI2S_VCO Input * PLLI2SN */ + /* SAI_CLK(first level) = PLLI2S_VCO Output/PLLI2SQ */ + __HAL_RCC_PLLI2S_CONFIG(PeriphClkInit->PLLI2S.PLLI2SM, PeriphClkInit->PLLI2S.PLLI2SN, plli2sp, + PeriphClkInit->PLLI2S.PLLI2SQ, plli2sr); + + /* SAI_CLK_x = SAI_CLK(first level)/PLLI2SDIVQ */ + __HAL_RCC_PLLI2S_PLLSAICLKDIVQ_CONFIG(PeriphClkInit->PLLI2SDivQ); + } + + /*------ In Case of PLLI2S is selected as source clock for SPDIFRX -------*/ + if ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SPDIFRX) == RCC_PERIPHCLK_SPDIFRX) + && (PeriphClkInit->SpdifClockSelection == RCC_SPDIFRXCLKSOURCE_PLLI2SP)) + { + /* check for Parameters */ + assert_param(IS_RCC_PLLI2SP_VALUE(PeriphClkInit->PLLI2S.PLLI2SP)); + /* Read PLLI2SR value from PLLI2SCFGR register (this value is not need for SAI configuration) */ + plli2sq = ((((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SP) >> RCC_PLLI2SCFGR_PLLI2SP_Pos) + 1U) << 1U); + plli2sr = ((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SR) >> RCC_PLLI2SCFGR_PLLI2SR_Pos); + /* Configure the PLLI2S division factors */ + /* PLLI2S_VCO = f(VCO clock) = f(PLLI2S clock input) * (PLLI2SN/PLLI2SM) */ + /* SPDIFRXCLK = f(PLLI2S clock output) = f(VCO clock) / PLLI2SP */ + __HAL_RCC_PLLI2S_CONFIG(PeriphClkInit->PLLI2S.PLLI2SM, PeriphClkInit->PLLI2S.PLLI2SN, PeriphClkInit->PLLI2S.PLLI2SP, + plli2sq, plli2sr); + } + + /*----------------- In Case of PLLI2S is just selected -----------------*/ + if ((PeriphClkInit->PeriphClockSelection & RCC_PERIPHCLK_PLLI2S) == RCC_PERIPHCLK_PLLI2S) + { + /* Check for Parameters */ + assert_param(IS_RCC_PLLI2SP_VALUE(PeriphClkInit->PLLI2S.PLLI2SP)); + assert_param(IS_RCC_PLLI2SR_VALUE(PeriphClkInit->PLLI2S.PLLI2SR)); + assert_param(IS_RCC_PLLI2SQ_VALUE(PeriphClkInit->PLLI2S.PLLI2SQ)); + + /* Configure the PLLI2S division factors */ + /* PLLI2S_VCO = f(VCO clock) = f(PLLI2S clock input) * (PLLI2SN/PLLI2SM) */ + __HAL_RCC_PLLI2S_CONFIG(PeriphClkInit->PLLI2S.PLLI2SM, PeriphClkInit->PLLI2S.PLLI2SN, PeriphClkInit->PLLI2S.PLLI2SP, + PeriphClkInit->PLLI2S.PLLI2SQ, PeriphClkInit->PLLI2S.PLLI2SR); + } + + /* Enable the PLLI2S */ + __HAL_RCC_PLLI2S_ENABLE(); + /* Get tick */ + tickstart = HAL_GetTick(); + /* Wait till PLLI2S is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) == RESET) + { + if ((HAL_GetTick() - tickstart) > PLLI2S_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + } + /*--------------------------------------------------------------------------*/ + + /*----------------------------- PLLSAI Configuration -----------------------*/ + /* PLLSAI is configured when a peripheral will use it as source clock : SAI1, SAI2, CLK48 or SDIO */ + if (pllsaiused == 1U) + { + /* Disable PLLSAI Clock */ + __HAL_RCC_PLLSAI_DISABLE(); + /* Get tick */ + tickstart = HAL_GetTick(); + /* Wait till PLLSAI is disabled */ + while (__HAL_RCC_PLLSAI_GET_FLAG() != RESET) + { + if ((HAL_GetTick() - tickstart) > PLLSAI_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + + /* Check the PLLSAI division factors */ + assert_param(IS_RCC_PLLSAIM_VALUE(PeriphClkInit->PLLSAI.PLLSAIM)); + assert_param(IS_RCC_PLLSAIN_VALUE(PeriphClkInit->PLLSAI.PLLSAIN)); + + /*------ In Case of PLLSAI is selected as source clock for SAI -----------*/ + if (((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI1) == RCC_PERIPHCLK_SAI1) + && (PeriphClkInit->Sai1ClockSelection == RCC_SAI1CLKSOURCE_PLLSAI)) || + ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI2) == RCC_PERIPHCLK_SAI2) && (PeriphClkInit->Sai2ClockSelection == RCC_SAI2CLKSOURCE_PLLSAI))) + { + /* check for PLLSAIQ Parameter */ + assert_param(IS_RCC_PLLSAIQ_VALUE(PeriphClkInit->PLLSAI.PLLSAIQ)); + /* check for PLLSAI/DIVQ Parameter */ + assert_param(IS_RCC_PLLSAI_DIVQ_VALUE(PeriphClkInit->PLLSAIDivQ)); + + /* Read PLLSAIP value from PLLSAICFGR register (this value is not needed for SAI configuration) */ + pllsaip = ((((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIP) >> RCC_PLLSAICFGR_PLLSAIP_Pos) + 1U) << 1U); + /* PLLSAI_VCO Input = PLL_SOURCE/PLLM */ + /* PLLSAI_VCO Output = PLLSAI_VCO Input * PLLSAIN */ + /* SAI_CLK(first level) = PLLSAI_VCO Output/PLLSAIQ */ + __HAL_RCC_PLLSAI_CONFIG(PeriphClkInit->PLLSAI.PLLSAIM, PeriphClkInit->PLLSAI.PLLSAIN, pllsaip, + PeriphClkInit->PLLSAI.PLLSAIQ, 0U); + + /* SAI_CLK_x = SAI_CLK(first level)/PLLSAIDIVQ */ + __HAL_RCC_PLLSAI_PLLSAICLKDIVQ_CONFIG(PeriphClkInit->PLLSAIDivQ); + } + + /*------ In Case of PLLSAI is selected as source clock for CLK48 ---------*/ + /* In Case of PLLI2S is selected as source clock for CLK48 */ + if ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_CLK48) == RCC_PERIPHCLK_CLK48) + && (PeriphClkInit->Clk48ClockSelection == RCC_CLK48CLKSOURCE_PLLSAIP)) + { + /* check for Parameters */ + assert_param(IS_RCC_PLLSAIP_VALUE(PeriphClkInit->PLLSAI.PLLSAIP)); + /* Read PLLSAIQ value from PLLI2SCFGR register (this value is not need for SAI configuration) */ + pllsaiq = ((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIQ) >> RCC_PLLSAICFGR_PLLSAIQ_Pos); + /* Configure the PLLSAI division factors */ + /* PLLSAI_VCO = f(VCO clock) = f(PLLSAI clock input) * (PLLI2SN/PLLSAIM) */ + /* 48CLK = f(PLLSAI clock output) = f(VCO clock) / PLLSAIP */ + __HAL_RCC_PLLSAI_CONFIG(PeriphClkInit->PLLSAI.PLLSAIM, PeriphClkInit->PLLSAI.PLLSAIN, PeriphClkInit->PLLSAI.PLLSAIP, + pllsaiq, 0U); + } + + /* Enable PLLSAI Clock */ + __HAL_RCC_PLLSAI_ENABLE(); + /* Get tick */ + tickstart = HAL_GetTick(); + /* Wait till PLLSAI is ready */ + while (__HAL_RCC_PLLSAI_GET_FLAG() == RESET) + { + if ((HAL_GetTick() - tickstart) > PLLSAI_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + } + return HAL_OK; +} + +/** + * @brief Get the RCC_PeriphCLKInitTypeDef according to the internal + * RCC configuration registers. + * @param PeriphClkInit pointer to an RCC_PeriphCLKInitTypeDef structure that + * will be configured. + * @retval None + */ +void HAL_RCCEx_GetPeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClkInit) +{ + uint32_t tempreg; + + /* Set all possible values for the extended clock type parameter------------*/ + PeriphClkInit->PeriphClockSelection = RCC_PERIPHCLK_I2S_APB1 | RCC_PERIPHCLK_I2S_APB2 | \ + RCC_PERIPHCLK_SAI1 | RCC_PERIPHCLK_SAI2 | \ + RCC_PERIPHCLK_TIM | RCC_PERIPHCLK_RTC | \ + RCC_PERIPHCLK_CEC | RCC_PERIPHCLK_FMPI2C1 | \ + RCC_PERIPHCLK_CLK48 | RCC_PERIPHCLK_SDIO | \ + RCC_PERIPHCLK_SPDIFRX; + + /* Get the PLLI2S Clock configuration --------------------------------------*/ + PeriphClkInit->PLLI2S.PLLI2SM = (uint32_t)((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SM) >> RCC_PLLI2SCFGR_PLLI2SM_Pos); + PeriphClkInit->PLLI2S.PLLI2SN = (uint32_t)((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SN) >> RCC_PLLI2SCFGR_PLLI2SN_Pos); + PeriphClkInit->PLLI2S.PLLI2SP = (uint32_t)((((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SP) >> RCC_PLLI2SCFGR_PLLI2SP_Pos) + 1U) << 1U); + PeriphClkInit->PLLI2S.PLLI2SQ = (uint32_t)((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SQ) >> RCC_PLLI2SCFGR_PLLI2SQ_Pos); + PeriphClkInit->PLLI2S.PLLI2SR = (uint32_t)((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SR) >> RCC_PLLI2SCFGR_PLLI2SR_Pos); + /* Get the PLLSAI Clock configuration --------------------------------------*/ + PeriphClkInit->PLLSAI.PLLSAIM = (uint32_t)((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIM) >> RCC_PLLSAICFGR_PLLSAIM_Pos); + PeriphClkInit->PLLSAI.PLLSAIN = (uint32_t)((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIN) >> RCC_PLLSAICFGR_PLLSAIN_Pos); + PeriphClkInit->PLLSAI.PLLSAIP = (uint32_t)((((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIP) >> RCC_PLLSAICFGR_PLLSAIP_Pos) + 1U) << 1U); + PeriphClkInit->PLLSAI.PLLSAIQ = (uint32_t)((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIQ) >> RCC_PLLSAICFGR_PLLSAIQ_Pos); + /* Get the PLLSAI/PLLI2S division factors ----------------------------------*/ + PeriphClkInit->PLLI2SDivQ = (uint32_t)((RCC->DCKCFGR & RCC_DCKCFGR_PLLI2SDIVQ) >> RCC_DCKCFGR_PLLI2SDIVQ_Pos); + PeriphClkInit->PLLSAIDivQ = (uint32_t)((RCC->DCKCFGR & RCC_DCKCFGR_PLLSAIDIVQ) >> RCC_DCKCFGR_PLLSAIDIVQ_Pos); + + /* Get the SAI1 clock configuration ----------------------------------------*/ + PeriphClkInit->Sai1ClockSelection = __HAL_RCC_GET_SAI1_SOURCE(); + + /* Get the SAI2 clock configuration ----------------------------------------*/ + PeriphClkInit->Sai2ClockSelection = __HAL_RCC_GET_SAI2_SOURCE(); + + /* Get the I2S APB1 clock configuration ------------------------------------*/ + PeriphClkInit->I2sApb1ClockSelection = __HAL_RCC_GET_I2S_APB1_SOURCE(); + + /* Get the I2S APB2 clock configuration ------------------------------------*/ + PeriphClkInit->I2sApb2ClockSelection = __HAL_RCC_GET_I2S_APB2_SOURCE(); + + /* Get the RTC Clock configuration -----------------------------------------*/ + tempreg = (RCC->CFGR & RCC_CFGR_RTCPRE); + PeriphClkInit->RTCClockSelection = (uint32_t)((tempreg) | (RCC->BDCR & RCC_BDCR_RTCSEL)); + + /* Get the CEC clock configuration -----------------------------------------*/ + PeriphClkInit->CecClockSelection = __HAL_RCC_GET_CEC_SOURCE(); + + /* Get the FMPI2C1 clock configuration -------------------------------------*/ + PeriphClkInit->Fmpi2c1ClockSelection = __HAL_RCC_GET_FMPI2C1_SOURCE(); + + /* Get the CLK48 clock configuration ----------------------------------------*/ + PeriphClkInit->Clk48ClockSelection = __HAL_RCC_GET_CLK48_SOURCE(); + + /* Get the SDIO clock configuration ----------------------------------------*/ + PeriphClkInit->SdioClockSelection = __HAL_RCC_GET_SDIO_SOURCE(); + + /* Get the SPDIFRX clock configuration -------------------------------------*/ + PeriphClkInit->SpdifClockSelection = __HAL_RCC_GET_SPDIFRX_SOURCE(); + + /* Get the TIM Prescaler configuration -------------------------------------*/ + if ((RCC->DCKCFGR & RCC_DCKCFGR_TIMPRE) == RESET) + { + PeriphClkInit->TIMPresSelection = RCC_TIMPRES_DESACTIVATED; + } + else + { + PeriphClkInit->TIMPresSelection = RCC_TIMPRES_ACTIVATED; + } +} + +/** + * @brief Return the peripheral clock frequency for a given peripheral(SAI..) + * @note Return 0 if peripheral clock identifier not managed by this API + * @param PeriphClk Peripheral clock identifier + * This parameter can be one of the following values: + * @arg RCC_PERIPHCLK_SAI1: SAI1 peripheral clock + * @arg RCC_PERIPHCLK_SAI2: SAI2 peripheral clock + * @arg RCC_PERIPHCLK_I2S_APB1: I2S APB1 peripheral clock + * @arg RCC_PERIPHCLK_I2S_APB2: I2S APB2 peripheral clock + * @retval Frequency in KHz + */ +uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) +{ + uint32_t tmpreg1 = 0U; + /* This variable used to store the SAI clock frequency (value in Hz) */ + uint32_t frequency = 0U; + /* This variable used to store the VCO Input (value in Hz) */ + uint32_t vcoinput = 0U; + /* This variable used to store the SAI clock source */ + uint32_t saiclocksource = 0U; + uint32_t srcclk = 0U; + /* This variable used to store the VCO Output (value in Hz) */ + uint32_t vcooutput = 0U; + switch (PeriphClk) + { + case RCC_PERIPHCLK_SAI1: + case RCC_PERIPHCLK_SAI2: + { + saiclocksource = RCC->DCKCFGR; + saiclocksource &= (RCC_DCKCFGR_SAI1SRC | RCC_DCKCFGR_SAI2SRC); + switch (saiclocksource) + { + case 0U: /* PLLSAI is the clock source for SAI*/ + { + /* Configure the PLLSAI division factor */ + /* PLLSAI_VCO Input = PLL_SOURCE/PLLSAIM */ + if ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSI) + { + /* In Case the PLL Source is HSI (Internal Clock) */ + vcoinput = (HSI_VALUE / (uint32_t)(RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIM)); + } + else + { + /* In Case the PLL Source is HSE (External Clock) */ + vcoinput = ((HSE_VALUE / (uint32_t)(RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIM))); + } + /* PLLSAI_VCO Output = PLLSAI_VCO Input * PLLSAIN */ + /* SAI_CLK(first level) = PLLSAI_VCO Output/PLLSAIQ */ + tmpreg1 = (RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIQ) >> 24U; + frequency = (vcoinput * ((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIN) >> 6U)) / (tmpreg1); + + /* SAI_CLK_x = SAI_CLK(first level)/PLLSAIDIVQ */ + tmpreg1 = (((RCC->DCKCFGR & RCC_DCKCFGR_PLLSAIDIVQ) >> 8U) + 1U); + frequency = frequency / (tmpreg1); + break; + } + case RCC_DCKCFGR_SAI1SRC_0: /* PLLI2S is the clock source for SAI*/ + case RCC_DCKCFGR_SAI2SRC_0: /* PLLI2S is the clock source for SAI*/ + { + /* Configure the PLLI2S division factor */ + /* PLLI2S_VCO Input = PLL_SOURCE/PLLI2SM */ + if ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSI) + { + /* In Case the PLL Source is HSI (Internal Clock) */ + vcoinput = (HSI_VALUE / (uint32_t)(RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SM)); + } + else + { + /* In Case the PLL Source is HSE (External Clock) */ + vcoinput = ((HSE_VALUE / (uint32_t)(RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SM))); + } + + /* PLLI2S_VCO Output = PLLI2S_VCO Input * PLLI2SN */ + /* SAI_CLK(first level) = PLLI2S_VCO Output/PLLI2SQ */ + tmpreg1 = (RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SQ) >> 24U; + frequency = (vcoinput * ((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SN) >> 6U)) / (tmpreg1); + + /* SAI_CLK_x = SAI_CLK(first level)/PLLI2SDIVQ */ + tmpreg1 = ((RCC->DCKCFGR & RCC_DCKCFGR_PLLI2SDIVQ) + 1U); + frequency = frequency / (tmpreg1); + break; + } + case RCC_DCKCFGR_SAI1SRC_1: /* PLLR is the clock source for SAI*/ + case RCC_DCKCFGR_SAI2SRC_1: /* PLLR is the clock source for SAI*/ + { + /* Configure the PLLI2S division factor */ + /* PLL_VCO Input = PLL_SOURCE/PLLM */ + if ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSI) + { + /* In Case the PLL Source is HSI (Internal Clock) */ + vcoinput = (HSI_VALUE / (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM)); + } + else + { + /* In Case the PLL Source is HSE (External Clock) */ + vcoinput = ((HSE_VALUE / (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM))); + } + + /* PLL_VCO Output = PLL_VCO Input * PLLN */ + /* SAI_CLK_x = PLL_VCO Output/PLLR */ + tmpreg1 = (RCC->PLLCFGR & RCC_PLLCFGR_PLLR) >> 28U; + frequency = (vcoinput * ((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> 6U)) / (tmpreg1); + break; + } + case RCC_DCKCFGR_SAI1SRC: /* External clock is the clock source for SAI*/ + { + frequency = EXTERNAL_CLOCK_VALUE; + break; + } + case RCC_DCKCFGR_SAI2SRC: /* PLLSRC(HSE or HSI) is the clock source for SAI*/ + { + if ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSI) + { + /* In Case the PLL Source is HSI (Internal Clock) */ + frequency = (uint32_t)(HSI_VALUE); + } + else + { + /* In Case the PLL Source is HSE (External Clock) */ + frequency = (uint32_t)(HSE_VALUE); + } + break; + } + default : + { + break; + } + } + break; + } + case RCC_PERIPHCLK_I2S_APB1: + { + /* Get the current I2S source */ + srcclk = __HAL_RCC_GET_I2S_APB1_SOURCE(); + switch (srcclk) + { + /* Check if I2S clock selection is External clock mapped on the I2S_CKIN pin used as I2S clock */ + case RCC_I2SAPB1CLKSOURCE_EXT: + { + /* Set the I2S clock to the external clock value */ + frequency = EXTERNAL_CLOCK_VALUE; + break; + } + /* Check if I2S clock selection is PLLI2S VCO output clock divided by PLLI2SR used as I2S clock */ + case RCC_I2SAPB1CLKSOURCE_PLLI2S: + { + /* Configure the PLLI2S division factor */ + /* PLLI2S_VCO Input = PLL_SOURCE/PLLI2SM */ + if ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) + { + /* Get the I2S source clock value */ + vcoinput = (uint32_t)(HSE_VALUE / (uint32_t)(RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SM)); + } + else + { + /* Get the I2S source clock value */ + vcoinput = (uint32_t)(HSI_VALUE / (uint32_t)(RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SM)); + } + + /* PLLI2S_VCO Output = PLLI2S_VCO Input * PLLI2SN */ + vcooutput = (uint32_t)(vcoinput * (((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SN) >> 6U) & (RCC_PLLI2SCFGR_PLLI2SN >> 6U))); + /* I2S_CLK = PLLI2S_VCO Output/PLLI2SR */ + frequency = (uint32_t)(vcooutput / (((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SR) >> 28U) & (RCC_PLLI2SCFGR_PLLI2SR >> 28U))); + break; + } + /* Check if I2S clock selection is PLL VCO Output divided by PLLR used as I2S clock */ + case RCC_I2SAPB1CLKSOURCE_PLLR: + { + /* Configure the PLL division factor R */ + /* PLL_VCO Input = PLL_SOURCE/PLLM */ + if ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) + { + /* Get the I2S source clock value */ + vcoinput = (uint32_t)(HSE_VALUE / (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM)); + } + else + { + /* Get the I2S source clock value */ + vcoinput = (uint32_t)(HSI_VALUE / (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM)); + } + + /* PLL_VCO Output = PLL_VCO Input * PLLN */ + vcooutput = (uint32_t)(vcoinput * (((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> 6U) & (RCC_PLLCFGR_PLLN >> 6U))); + /* I2S_CLK = PLL_VCO Output/PLLR */ + frequency = (uint32_t)(vcooutput / (((RCC->PLLCFGR & RCC_PLLCFGR_PLLR) >> 28U) & (RCC_PLLCFGR_PLLR >> 28U))); + break; + } + /* Check if I2S clock selection is HSI or HSE depending from PLL source Clock */ + case RCC_I2SAPB1CLKSOURCE_PLLSRC: + { + if ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) + { + frequency = HSE_VALUE; + } + else + { + frequency = HSI_VALUE; + } + break; + } + /* Clock not enabled for I2S*/ + default: + { + frequency = 0U; + break; + } + } + break; + } + case RCC_PERIPHCLK_I2S_APB2: + { + /* Get the current I2S source */ + srcclk = __HAL_RCC_GET_I2S_APB2_SOURCE(); + switch (srcclk) + { + /* Check if I2S clock selection is External clock mapped on the I2S_CKIN pin used as I2S clock */ + case RCC_I2SAPB2CLKSOURCE_EXT: + { + /* Set the I2S clock to the external clock value */ + frequency = EXTERNAL_CLOCK_VALUE; + break; + } + /* Check if I2S clock selection is PLLI2S VCO output clock divided by PLLI2SR used as I2S clock */ + case RCC_I2SAPB2CLKSOURCE_PLLI2S: + { + /* Configure the PLLI2S division factor */ + /* PLLI2S_VCO Input = PLL_SOURCE/PLLI2SM */ + if ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) + { + /* Get the I2S source clock value */ + vcoinput = (uint32_t)(HSE_VALUE / (uint32_t)(RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SM)); + } + else + { + /* Get the I2S source clock value */ + vcoinput = (uint32_t)(HSI_VALUE / (uint32_t)(RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SM)); + } + + /* PLLI2S_VCO Output = PLLI2S_VCO Input * PLLI2SN */ + vcooutput = (uint32_t)(vcoinput * (((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SN) >> 6U) & (RCC_PLLI2SCFGR_PLLI2SN >> 6U))); + /* I2S_CLK = PLLI2S_VCO Output/PLLI2SR */ + frequency = (uint32_t)(vcooutput / (((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SR) >> 28U) & (RCC_PLLI2SCFGR_PLLI2SR >> 28U))); + break; + } + /* Check if I2S clock selection is PLL VCO Output divided by PLLR used as I2S clock */ + case RCC_I2SAPB2CLKSOURCE_PLLR: + { + /* Configure the PLL division factor R */ + /* PLL_VCO Input = PLL_SOURCE/PLLM */ + if ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) + { + /* Get the I2S source clock value */ + vcoinput = (uint32_t)(HSE_VALUE / (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM)); + } + else + { + /* Get the I2S source clock value */ + vcoinput = (uint32_t)(HSI_VALUE / (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM)); + } + + /* PLL_VCO Output = PLL_VCO Input * PLLN */ + vcooutput = (uint32_t)(vcoinput * (((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> 6U) & (RCC_PLLCFGR_PLLN >> 6U))); + /* I2S_CLK = PLL_VCO Output/PLLR */ + frequency = (uint32_t)(vcooutput / (((RCC->PLLCFGR & RCC_PLLCFGR_PLLR) >> 28U) & (RCC_PLLCFGR_PLLR >> 28U))); + break; + } + /* Check if I2S clock selection is HSI or HSE depending from PLL source Clock */ + case RCC_I2SAPB2CLKSOURCE_PLLSRC: + { + if ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) + { + frequency = HSE_VALUE; + } + else + { + frequency = HSI_VALUE; + } + break; + } + /* Clock not enabled for I2S*/ + default: + { + frequency = 0U; + break; + } + } + break; + } + default: + { + break; + } + } + return frequency; +} +#endif /* STM32F446xx */ + +#if defined(STM32F469xx) || defined(STM32F479xx) +/** + * @brief Initializes the RCC extended peripherals clocks according to the specified + * parameters in the RCC_PeriphCLKInitTypeDef. + * @param PeriphClkInit pointer to an RCC_PeriphCLKInitTypeDef structure that + * contains the configuration information for the Extended Peripherals + * clocks(I2S, SAI, LTDC, RTC and TIM). + * + * @note Care must be taken when HAL_RCCEx_PeriphCLKConfig() is used to select + * the RTC clock source; in this case the Backup domain will be reset in + * order to modify the RTC Clock source, as consequence RTC registers (including + * the backup registers) and RCC_BDCR register are set to their reset values. + * + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClkInit) +{ + uint32_t tickstart = 0U; + uint32_t tmpreg1 = 0U; + uint32_t pllsaip = 0U; + uint32_t pllsaiq = 0U; + uint32_t pllsair = 0U; + + /* Check the parameters */ + assert_param(IS_RCC_PERIPHCLOCK(PeriphClkInit->PeriphClockSelection)); + + /*--------------------------- CLK48 Configuration --------------------------*/ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_CLK48) == RCC_PERIPHCLK_CLK48) + { + /* Check the parameters */ + assert_param(IS_RCC_CLK48CLKSOURCE(PeriphClkInit->Clk48ClockSelection)); + + /* Configure the CLK48 clock source */ + __HAL_RCC_CLK48_CONFIG(PeriphClkInit->Clk48ClockSelection); + } + /*--------------------------------------------------------------------------*/ + + /*------------------------------ SDIO Configuration ------------------------*/ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SDIO) == RCC_PERIPHCLK_SDIO) + { + /* Check the parameters */ + assert_param(IS_RCC_SDIOCLKSOURCE(PeriphClkInit->SdioClockSelection)); + + /* Configure the SDIO clock source */ + __HAL_RCC_SDIO_CONFIG(PeriphClkInit->SdioClockSelection); + } + /*--------------------------------------------------------------------------*/ + + /*----------------------- SAI/I2S Configuration (PLLI2S) -------------------*/ + /*------------------- Common configuration SAI/I2S -------------------------*/ + /* In Case of SAI or I2S Clock Configuration through PLLI2S, PLLI2SN division + factor is common parameters for both peripherals */ + if ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S) == RCC_PERIPHCLK_I2S) || + (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI_PLLI2S) == RCC_PERIPHCLK_SAI_PLLI2S) || + (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_PLLI2S) == RCC_PERIPHCLK_PLLI2S)) + { + /* check for Parameters */ + assert_param(IS_RCC_PLLI2SN_VALUE(PeriphClkInit->PLLI2S.PLLI2SN)); + + /* Disable the PLLI2S */ + __HAL_RCC_PLLI2S_DISABLE(); + /* Get tick */ + tickstart = HAL_GetTick(); + /* Wait till PLLI2S is disabled */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) != RESET) + { + if ((HAL_GetTick() - tickstart) > PLLI2S_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + + /*---------------------- I2S configuration -------------------------------*/ + /* In Case of I2S Clock Configuration through PLLI2S, PLLI2SR must be added + only for I2S configuration */ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S) == (RCC_PERIPHCLK_I2S)) + { + /* check for Parameters */ + assert_param(IS_RCC_PLLI2SR_VALUE(PeriphClkInit->PLLI2S.PLLI2SR)); + /* Configure the PLLI2S division factors */ + /* PLLI2S_VCO = f(VCO clock) = f(PLLI2S clock input) x (PLLI2SN/PLLM) */ + /* I2SCLK = f(PLLI2S clock output) = f(VCO clock) / PLLI2SR */ + __HAL_RCC_PLLI2S_CONFIG(PeriphClkInit->PLLI2S.PLLI2SN, PeriphClkInit->PLLI2S.PLLI2SR); + } + + /*---------------------------- SAI configuration -------------------------*/ + /* In Case of SAI Clock Configuration through PLLI2S, PLLI2SQ and PLLI2S_DIVQ must + be added only for SAI configuration */ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI_PLLI2S) == (RCC_PERIPHCLK_SAI_PLLI2S)) + { + /* Check the PLLI2S division factors */ + assert_param(IS_RCC_PLLI2SQ_VALUE(PeriphClkInit->PLLI2S.PLLI2SQ)); + assert_param(IS_RCC_PLLI2S_DIVQ_VALUE(PeriphClkInit->PLLI2SDivQ)); + + /* Read PLLI2SR value from PLLI2SCFGR register (this value is not need for SAI configuration) */ + tmpreg1 = ((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SR) >> RCC_PLLI2SCFGR_PLLI2SR_Pos); + /* Configure the PLLI2S division factors */ + /* PLLI2S_VCO Input = PLL_SOURCE/PLLM */ + /* PLLI2S_VCO Output = PLLI2S_VCO Input * PLLI2SN */ + /* SAI_CLK(first level) = PLLI2S_VCO Output/PLLI2SQ */ + __HAL_RCC_PLLI2S_SAICLK_CONFIG(PeriphClkInit->PLLI2S.PLLI2SN, PeriphClkInit->PLLI2S.PLLI2SQ, tmpreg1); + /* SAI_CLK_x = SAI_CLK(first level)/PLLI2SDIVQ */ + __HAL_RCC_PLLI2S_PLLSAICLKDIVQ_CONFIG(PeriphClkInit->PLLI2SDivQ); + } + + /*----------------- In Case of PLLI2S is just selected -----------------*/ + if ((PeriphClkInit->PeriphClockSelection & RCC_PERIPHCLK_PLLI2S) == RCC_PERIPHCLK_PLLI2S) + { + /* Check for Parameters */ + assert_param(IS_RCC_PLLI2SQ_VALUE(PeriphClkInit->PLLI2S.PLLI2SQ)); + assert_param(IS_RCC_PLLI2SR_VALUE(PeriphClkInit->PLLI2S.PLLI2SR)); + + /* Configure the PLLI2S multiplication and division factors */ + __HAL_RCC_PLLI2S_SAICLK_CONFIG(PeriphClkInit->PLLI2S.PLLI2SN, PeriphClkInit->PLLI2S.PLLI2SQ, + PeriphClkInit->PLLI2S.PLLI2SR); + } + + /* Enable the PLLI2S */ + __HAL_RCC_PLLI2S_ENABLE(); + /* Get tick */ + tickstart = HAL_GetTick(); + /* Wait till PLLI2S is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) == RESET) + { + if ((HAL_GetTick() - tickstart) > PLLI2S_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + } + /*--------------------------------------------------------------------------*/ + + /*----------------------- SAI/LTDC Configuration (PLLSAI) ------------------*/ + /*----------------------- Common configuration SAI/LTDC --------------------*/ + /* In Case of SAI, LTDC or CLK48 Clock Configuration through PLLSAI, PLLSAIN division + factor is common parameters for these peripherals */ + if ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI_PLLSAI) == RCC_PERIPHCLK_SAI_PLLSAI) || + (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_LTDC) == RCC_PERIPHCLK_LTDC) || + ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_CLK48) == RCC_PERIPHCLK_CLK48) && + (PeriphClkInit->Clk48ClockSelection == RCC_CLK48CLKSOURCE_PLLSAIP))) + { + /* Check the PLLSAI division factors */ + assert_param(IS_RCC_PLLSAIN_VALUE(PeriphClkInit->PLLSAI.PLLSAIN)); + + /* Disable PLLSAI Clock */ + __HAL_RCC_PLLSAI_DISABLE(); + /* Get tick */ + tickstart = HAL_GetTick(); + /* Wait till PLLSAI is disabled */ + while (__HAL_RCC_PLLSAI_GET_FLAG() != RESET) + { + if ((HAL_GetTick() - tickstart) > PLLSAI_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + + /*---------------------------- SAI configuration -------------------------*/ + /* In Case of SAI Clock Configuration through PLLSAI, PLLSAIQ and PLLSAI_DIVQ must + be added only for SAI configuration */ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI_PLLSAI) == (RCC_PERIPHCLK_SAI_PLLSAI)) + { + assert_param(IS_RCC_PLLSAIQ_VALUE(PeriphClkInit->PLLSAI.PLLSAIQ)); + assert_param(IS_RCC_PLLSAI_DIVQ_VALUE(PeriphClkInit->PLLSAIDivQ)); + + /* Read PLLSAIP value from PLLSAICFGR register (this value is not needed for SAI configuration) */ + pllsaip = ((((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIP) >> RCC_PLLSAICFGR_PLLSAIP_Pos) + 1U) << 1U); + /* Read PLLSAIR value from PLLSAICFGR register (this value is not need for SAI configuration) */ + pllsair = ((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIR) >> RCC_PLLSAICFGR_PLLSAIR_Pos); + /* PLLSAI_VCO Input = PLL_SOURCE/PLLM */ + /* PLLSAI_VCO Output = PLLSAI_VCO Input * PLLSAIN */ + /* SAI_CLK(first level) = PLLSAI_VCO Output/PLLSAIQ */ + __HAL_RCC_PLLSAI_CONFIG(PeriphClkInit->PLLSAI.PLLSAIN, pllsaip, PeriphClkInit->PLLSAI.PLLSAIQ, pllsair); + /* SAI_CLK_x = SAI_CLK(first level)/PLLSAIDIVQ */ + __HAL_RCC_PLLSAI_PLLSAICLKDIVQ_CONFIG(PeriphClkInit->PLLSAIDivQ); + } + + /*---------------------------- LTDC configuration ------------------------*/ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_LTDC) == (RCC_PERIPHCLK_LTDC)) + { + assert_param(IS_RCC_PLLSAIR_VALUE(PeriphClkInit->PLLSAI.PLLSAIR)); + assert_param(IS_RCC_PLLSAI_DIVR_VALUE(PeriphClkInit->PLLSAIDivR)); + + /* Read PLLSAIP value from PLLSAICFGR register (this value is not needed for SAI configuration) */ + pllsaip = ((((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIP) >> RCC_PLLSAICFGR_PLLSAIP_Pos) + 1U) << 1U); + /* Read PLLSAIQ value from PLLSAICFGR register (this value is not need for SAI configuration) */ + pllsaiq = ((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIQ) >> RCC_PLLSAICFGR_PLLSAIQ_Pos); + /* PLLSAI_VCO Input = PLL_SOURCE/PLLM */ + /* PLLSAI_VCO Output = PLLSAI_VCO Input * PLLSAIN */ + /* LTDC_CLK(first level) = PLLSAI_VCO Output/PLLSAIR */ + __HAL_RCC_PLLSAI_CONFIG(PeriphClkInit->PLLSAI.PLLSAIN, pllsaip, pllsaiq, PeriphClkInit->PLLSAI.PLLSAIR); + /* LTDC_CLK = LTDC_CLK(first level)/PLLSAIDIVR */ + __HAL_RCC_PLLSAI_PLLSAICLKDIVR_CONFIG(PeriphClkInit->PLLSAIDivR); + } + + /*---------------------------- CLK48 configuration ------------------------*/ + /* Configure the PLLSAI when it is used as clock source for CLK48 */ + if ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_CLK48) == (RCC_PERIPHCLK_CLK48)) && + (PeriphClkInit->Clk48ClockSelection == RCC_CLK48CLKSOURCE_PLLSAIP)) + { + assert_param(IS_RCC_PLLSAIP_VALUE(PeriphClkInit->PLLSAI.PLLSAIP)); + + /* Read PLLSAIQ value from PLLSAICFGR register (this value is not need for SAI configuration) */ + pllsaiq = ((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIQ) >> RCC_PLLSAICFGR_PLLSAIQ_Pos); + /* Read PLLSAIR value from PLLSAICFGR register (this value is not need for SAI configuration) */ + pllsair = ((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIR) >> RCC_PLLSAICFGR_PLLSAIR_Pos); + /* PLLSAI_VCO Input = PLL_SOURCE/PLLM */ + /* PLLSAI_VCO Output = PLLSAI_VCO Input * PLLSAIN */ + /* CLK48_CLK(first level) = PLLSAI_VCO Output/PLLSAIP */ + __HAL_RCC_PLLSAI_CONFIG(PeriphClkInit->PLLSAI.PLLSAIN, PeriphClkInit->PLLSAI.PLLSAIP, pllsaiq, pllsair); + } + + /* Enable PLLSAI Clock */ + __HAL_RCC_PLLSAI_ENABLE(); + /* Get tick */ + tickstart = HAL_GetTick(); + /* Wait till PLLSAI is ready */ + while (__HAL_RCC_PLLSAI_GET_FLAG() == RESET) + { + if ((HAL_GetTick() - tickstart) > PLLSAI_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + } + + /*--------------------------------------------------------------------------*/ + + /*---------------------------- RTC configuration ---------------------------*/ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_RTC) == (RCC_PERIPHCLK_RTC)) + { + /* Check for RTC Parameters used to output RTCCLK */ + assert_param(IS_RCC_RTCCLKSOURCE(PeriphClkInit->RTCClockSelection)); + + /* Enable Power Clock*/ + __HAL_RCC_PWR_CLK_ENABLE(); + + /* Enable write access to Backup domain */ + PWR->CR |= PWR_CR_DBP; + + /* Get tick */ + tickstart = HAL_GetTick(); + + while ((PWR->CR & PWR_CR_DBP) == RESET) + { + if ((HAL_GetTick() - tickstart) > RCC_DBP_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + /* Reset the Backup domain only if the RTC Clock source selection is modified from reset value */ + tmpreg1 = (RCC->BDCR & RCC_BDCR_RTCSEL); + if ((tmpreg1 != 0x00000000U) && ((tmpreg1) != (PeriphClkInit->RTCClockSelection & RCC_BDCR_RTCSEL))) + { + /* Store the content of BDCR register before the reset of Backup Domain */ + tmpreg1 = (RCC->BDCR & ~(RCC_BDCR_RTCSEL)); + /* RTC Clock selection can be changed only if the Backup Domain is reset */ + __HAL_RCC_BACKUPRESET_FORCE(); + __HAL_RCC_BACKUPRESET_RELEASE(); + /* Restore the Content of BDCR register */ + RCC->BDCR = tmpreg1; + + /* Wait for LSE reactivation if LSE was enable prior to Backup Domain reset */ + if (HAL_IS_BIT_SET(RCC->BDCR, RCC_BDCR_LSEON)) + { + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Wait till LSE is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_LSERDY) == RESET) + { + if ((HAL_GetTick() - tickstart) > RCC_LSE_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + } + __HAL_RCC_RTC_CONFIG(PeriphClkInit->RTCClockSelection); + } + /*--------------------------------------------------------------------------*/ + + /*---------------------------- TIM configuration ---------------------------*/ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_TIM) == (RCC_PERIPHCLK_TIM)) + { + __HAL_RCC_TIMCLKPRESCALER(PeriphClkInit->TIMPresSelection); + } + return HAL_OK; +} + +/** + * @brief Configures the RCC_PeriphCLKInitTypeDef according to the internal + * RCC configuration registers. + * @param PeriphClkInit pointer to an RCC_PeriphCLKInitTypeDef structure that + * will be configured. + * @retval None + */ +void HAL_RCCEx_GetPeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClkInit) +{ + uint32_t tempreg; + + /* Set all possible values for the extended clock type parameter------------*/ + PeriphClkInit->PeriphClockSelection = RCC_PERIPHCLK_I2S | RCC_PERIPHCLK_SAI_PLLSAI | \ + RCC_PERIPHCLK_SAI_PLLI2S | RCC_PERIPHCLK_LTDC | \ + RCC_PERIPHCLK_TIM | RCC_PERIPHCLK_RTC | \ + RCC_PERIPHCLK_CLK48 | RCC_PERIPHCLK_SDIO; + + /* Get the PLLI2S Clock configuration --------------------------------------*/ + PeriphClkInit->PLLI2S.PLLI2SN = (uint32_t)((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SN) >> RCC_PLLI2SCFGR_PLLI2SN_Pos); + PeriphClkInit->PLLI2S.PLLI2SR = (uint32_t)((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SR) >> RCC_PLLI2SCFGR_PLLI2SR_Pos); + PeriphClkInit->PLLI2S.PLLI2SQ = (uint32_t)((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SQ) >> RCC_PLLI2SCFGR_PLLI2SQ_Pos); + /* Get the PLLSAI Clock configuration --------------------------------------*/ + PeriphClkInit->PLLSAI.PLLSAIN = (uint32_t)((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIN) >> RCC_PLLSAICFGR_PLLSAIN_Pos); + PeriphClkInit->PLLSAI.PLLSAIR = (uint32_t)((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIR) >> RCC_PLLSAICFGR_PLLSAIR_Pos); + PeriphClkInit->PLLSAI.PLLSAIQ = (uint32_t)((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIQ) >> RCC_PLLSAICFGR_PLLSAIQ_Pos); + /* Get the PLLSAI/PLLI2S division factors ----------------------------------*/ + PeriphClkInit->PLLI2SDivQ = (uint32_t)((RCC->DCKCFGR & RCC_DCKCFGR_PLLI2SDIVQ) >> RCC_DCKCFGR_PLLI2SDIVQ_Pos); + PeriphClkInit->PLLSAIDivQ = (uint32_t)((RCC->DCKCFGR & RCC_DCKCFGR_PLLSAIDIVQ) >> RCC_DCKCFGR_PLLSAIDIVQ_Pos); + PeriphClkInit->PLLSAIDivR = (uint32_t)(RCC->DCKCFGR & RCC_DCKCFGR_PLLSAIDIVR); + /* Get the RTC Clock configuration -----------------------------------------*/ + tempreg = (RCC->CFGR & RCC_CFGR_RTCPRE); + PeriphClkInit->RTCClockSelection = (uint32_t)((tempreg) | (RCC->BDCR & RCC_BDCR_RTCSEL)); + + /* Get the CLK48 clock configuration -------------------------------------*/ + PeriphClkInit->Clk48ClockSelection = __HAL_RCC_GET_CLK48_SOURCE(); + + /* Get the SDIO clock configuration ----------------------------------------*/ + PeriphClkInit->SdioClockSelection = __HAL_RCC_GET_SDIO_SOURCE(); + + if ((RCC->DCKCFGR & RCC_DCKCFGR_TIMPRE) == RESET) + { + PeriphClkInit->TIMPresSelection = RCC_TIMPRES_DESACTIVATED; + } + else + { + PeriphClkInit->TIMPresSelection = RCC_TIMPRES_ACTIVATED; + } +} + +/** + * @brief Return the peripheral clock frequency for a given peripheral(SAI..) + * @note Return 0 if peripheral clock identifier not managed by this API + * @param PeriphClk Peripheral clock identifier + * This parameter can be one of the following values: + * @arg RCC_PERIPHCLK_I2S: I2S peripheral clock + * @retval Frequency in KHz + */ +uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) +{ + /* This variable used to store the I2S clock frequency (value in Hz) */ + uint32_t frequency = 0U; + /* This variable used to store the VCO Input (value in Hz) */ + uint32_t vcoinput = 0U; + uint32_t srcclk = 0U; + /* This variable used to store the VCO Output (value in Hz) */ + uint32_t vcooutput = 0U; + switch (PeriphClk) + { + case RCC_PERIPHCLK_I2S: + { + /* Get the current I2S source */ + srcclk = __HAL_RCC_GET_I2S_SOURCE(); + switch (srcclk) + { + /* Check if I2S clock selection is External clock mapped on the I2S_CKIN pin used as I2S clock */ + case RCC_I2SCLKSOURCE_EXT: + { + /* Set the I2S clock to the external clock value */ + frequency = EXTERNAL_CLOCK_VALUE; + break; + } + /* Check if I2S clock selection is PLLI2S VCO output clock divided by PLLI2SR used as I2S clock */ + case RCC_I2SCLKSOURCE_PLLI2S: + { + /* Configure the PLLI2S division factor */ + /* PLLI2S_VCO Input = PLL_SOURCE/PLLI2SM */ + if ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) + { + /* Get the I2S source clock value */ + vcoinput = (uint32_t)(HSE_VALUE / (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM)); + } + else + { + /* Get the I2S source clock value */ + vcoinput = (uint32_t)(HSI_VALUE / (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM)); + } + + /* PLLI2S_VCO Output = PLLI2S_VCO Input * PLLI2SN */ + vcooutput = (uint32_t)(vcoinput * (((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SN) >> 6U) & (RCC_PLLI2SCFGR_PLLI2SN >> 6U))); + /* I2S_CLK = PLLI2S_VCO Output/PLLI2SR */ + frequency = (uint32_t)(vcooutput / (((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SR) >> 28U) & (RCC_PLLI2SCFGR_PLLI2SR >> 28U))); + break; + } + /* Clock not enabled for I2S*/ + default: + { + frequency = 0U; + break; + } + } + break; + } + default: + { + break; + } + } + return frequency; +} +#endif /* STM32F469xx || STM32F479xx */ + +#if defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) +/** + * @brief Initializes the RCC extended peripherals clocks according to the specified + * parameters in the RCC_PeriphCLKInitTypeDef. + * @param PeriphClkInit pointer to an RCC_PeriphCLKInitTypeDef structure that + * contains the configuration information for the Extended Peripherals + * clocks(I2S, LTDC RTC and TIM). + * + * @note Care must be taken when HAL_RCCEx_PeriphCLKConfig() is used to select + * the RTC clock source; in this case the Backup domain will be reset in + * order to modify the RTC Clock source, as consequence RTC registers (including + * the backup registers) and RCC_BDCR register are set to their reset values. + * + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClkInit) +{ + uint32_t tickstart = 0U; + uint32_t tmpreg1 = 0U; +#if defined(STM32F413xx) || defined(STM32F423xx) + uint32_t plli2sq = 0U; +#endif /* STM32F413xx || STM32F423xx */ + uint32_t plli2sused = 0U; + + /* Check the peripheral clock selection parameters */ + assert_param(IS_RCC_PERIPHCLOCK(PeriphClkInit->PeriphClockSelection)); + + /*----------------------------------- I2S APB1 configuration ---------------*/ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S_APB1) == (RCC_PERIPHCLK_I2S_APB1)) + { + /* Check the parameters */ + assert_param(IS_RCC_I2SAPB1CLKSOURCE(PeriphClkInit->I2sApb1ClockSelection)); + + /* Configure I2S Clock source */ + __HAL_RCC_I2S_APB1_CONFIG(PeriphClkInit->I2sApb1ClockSelection); + /* Enable the PLLI2S when it's used as clock source for I2S */ + if (PeriphClkInit->I2sApb1ClockSelection == RCC_I2SAPB1CLKSOURCE_PLLI2S) + { + plli2sused = 1U; + } + } + /*--------------------------------------------------------------------------*/ + + /*----------------------------------- I2S APB2 configuration ---------------*/ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S_APB2) == (RCC_PERIPHCLK_I2S_APB2)) + { + /* Check the parameters */ + assert_param(IS_RCC_I2SAPB2CLKSOURCE(PeriphClkInit->I2sApb2ClockSelection)); + + /* Configure I2S Clock source */ + __HAL_RCC_I2S_APB2_CONFIG(PeriphClkInit->I2sApb2ClockSelection); + /* Enable the PLLI2S when it's used as clock source for I2S */ + if (PeriphClkInit->I2sApb2ClockSelection == RCC_I2SAPB2CLKSOURCE_PLLI2S) + { + plli2sused = 1U; + } + } + /*--------------------------------------------------------------------------*/ + +#if defined(STM32F413xx) || defined(STM32F423xx) + /*----------------------- SAI1 Block A configuration -----------------------*/ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAIA) == (RCC_PERIPHCLK_SAIA)) + { + /* Check the parameters */ + assert_param(IS_RCC_SAIACLKSOURCE(PeriphClkInit->SaiAClockSelection)); + + /* Configure SAI1 Clock source */ + __HAL_RCC_SAI_BLOCKACLKSOURCE_CONFIG(PeriphClkInit->SaiAClockSelection); + /* Enable the PLLI2S when it's used as clock source for SAI */ + if (PeriphClkInit->SaiAClockSelection == RCC_SAIACLKSOURCE_PLLI2SR) + { + plli2sused = 1U; + } + /* Enable the PLLSAI when it's used as clock source for SAI */ + if (PeriphClkInit->SaiAClockSelection == RCC_SAIACLKSOURCE_PLLR) + { + /* Check for PLL/DIVR parameters */ + assert_param(IS_RCC_PLL_DIVR_VALUE(PeriphClkInit->PLLDivR)); + + /* SAI_CLK_x = SAI_CLK(first level)/PLLDIVR */ + __HAL_RCC_PLL_PLLSAICLKDIVR_CONFIG(PeriphClkInit->PLLDivR); + } + } + /*--------------------------------------------------------------------------*/ + + /*---------------------- SAI1 Block B configuration ------------------------*/ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAIB) == (RCC_PERIPHCLK_SAIB)) + { + /* Check the parameters */ + assert_param(IS_RCC_SAIBCLKSOURCE(PeriphClkInit->SaiBClockSelection)); + + /* Configure SAI1 Clock source */ + __HAL_RCC_SAI_BLOCKBCLKSOURCE_CONFIG(PeriphClkInit->SaiBClockSelection); + /* Enable the PLLI2S when it's used as clock source for SAI */ + if (PeriphClkInit->SaiBClockSelection == RCC_SAIBCLKSOURCE_PLLI2SR) + { + plli2sused = 1U; + } + /* Enable the PLLSAI when it's used as clock source for SAI */ + if (PeriphClkInit->SaiBClockSelection == RCC_SAIBCLKSOURCE_PLLR) + { + /* Check for PLL/DIVR parameters */ + assert_param(IS_RCC_PLL_DIVR_VALUE(PeriphClkInit->PLLDivR)); + + /* SAI_CLK_x = SAI_CLK(first level)/PLLDIVR */ + __HAL_RCC_PLL_PLLSAICLKDIVR_CONFIG(PeriphClkInit->PLLDivR); + } + } + /*--------------------------------------------------------------------------*/ +#endif /* STM32F413xx || STM32F423xx */ + + /*------------------------------------ RTC configuration -------------------*/ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_RTC) == (RCC_PERIPHCLK_RTC)) + { + /* Check for RTC Parameters used to output RTCCLK */ + assert_param(IS_RCC_RTCCLKSOURCE(PeriphClkInit->RTCClockSelection)); + + /* Enable Power Clock*/ + __HAL_RCC_PWR_CLK_ENABLE(); + + /* Enable write access to Backup domain */ + PWR->CR |= PWR_CR_DBP; + + /* Get tick */ + tickstart = HAL_GetTick(); + + while ((PWR->CR & PWR_CR_DBP) == RESET) + { + if ((HAL_GetTick() - tickstart) > RCC_DBP_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + /* Reset the Backup domain only if the RTC Clock source selection is modified from reset value */ + tmpreg1 = (RCC->BDCR & RCC_BDCR_RTCSEL); + if ((tmpreg1 != 0x00000000U) && ((tmpreg1) != (PeriphClkInit->RTCClockSelection & RCC_BDCR_RTCSEL))) + { + /* Store the content of BDCR register before the reset of Backup Domain */ + tmpreg1 = (RCC->BDCR & ~(RCC_BDCR_RTCSEL)); + /* RTC Clock selection can be changed only if the Backup Domain is reset */ + __HAL_RCC_BACKUPRESET_FORCE(); + __HAL_RCC_BACKUPRESET_RELEASE(); + /* Restore the Content of BDCR register */ + RCC->BDCR = tmpreg1; + + /* Wait for LSE reactivation if LSE was enable prior to Backup Domain reset */ + if (HAL_IS_BIT_SET(RCC->BDCR, RCC_BDCR_LSEON)) + { + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Wait till LSE is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_LSERDY) == RESET) + { + if ((HAL_GetTick() - tickstart) > RCC_LSE_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + } + __HAL_RCC_RTC_CONFIG(PeriphClkInit->RTCClockSelection); + } + /*--------------------------------------------------------------------------*/ + + /*------------------------------------ TIM configuration -------------------*/ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_TIM) == (RCC_PERIPHCLK_TIM)) + { + /* Configure Timer Prescaler */ + __HAL_RCC_TIMCLKPRESCALER(PeriphClkInit->TIMPresSelection); + } + /*--------------------------------------------------------------------------*/ + + /*------------------------------------- FMPI2C1 Configuration --------------*/ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_FMPI2C1) == RCC_PERIPHCLK_FMPI2C1) + { + /* Check the parameters */ + assert_param(IS_RCC_FMPI2C1CLKSOURCE(PeriphClkInit->Fmpi2c1ClockSelection)); + + /* Configure the FMPI2C1 clock source */ + __HAL_RCC_FMPI2C1_CONFIG(PeriphClkInit->Fmpi2c1ClockSelection); + } + /*--------------------------------------------------------------------------*/ + + /*------------------------------------- CLK48 Configuration ----------------*/ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_CLK48) == RCC_PERIPHCLK_CLK48) + { + /* Check the parameters */ + assert_param(IS_RCC_CLK48CLKSOURCE(PeriphClkInit->Clk48ClockSelection)); + + /* Configure the SDIO clock source */ + __HAL_RCC_CLK48_CONFIG(PeriphClkInit->Clk48ClockSelection); + + /* Enable the PLLI2S when it's used as clock source for CLK48 */ + if (PeriphClkInit->Clk48ClockSelection == RCC_CLK48CLKSOURCE_PLLI2SQ) + { + plli2sused = 1U; + } + } + /*--------------------------------------------------------------------------*/ + + /*------------------------------------- SDIO Configuration -----------------*/ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SDIO) == RCC_PERIPHCLK_SDIO) + { + /* Check the parameters */ + assert_param(IS_RCC_SDIOCLKSOURCE(PeriphClkInit->SdioClockSelection)); + + /* Configure the SDIO clock source */ + __HAL_RCC_SDIO_CONFIG(PeriphClkInit->SdioClockSelection); + } + /*--------------------------------------------------------------------------*/ + + /*-------------------------------------- PLLI2S Configuration --------------*/ + /* PLLI2S is configured when a peripheral will use it as source clock : I2S on APB1 or + I2S on APB2*/ + if ((plli2sused == 1U) || (PeriphClkInit->PeriphClockSelection == RCC_PERIPHCLK_PLLI2S)) + { + /* Disable the PLLI2S */ + __HAL_RCC_PLLI2S_DISABLE(); + /* Get tick */ + tickstart = HAL_GetTick(); + /* Wait till PLLI2S is disabled */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) != RESET) + { + if ((HAL_GetTick() - tickstart) > PLLI2S_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + + /* check for common PLLI2S Parameters */ + assert_param(IS_RCC_PLLI2SCLKSOURCE(PeriphClkInit->PLLI2SSelection)); + assert_param(IS_RCC_PLLI2SM_VALUE(PeriphClkInit->PLLI2S.PLLI2SM)); + assert_param(IS_RCC_PLLI2SN_VALUE(PeriphClkInit->PLLI2S.PLLI2SN)); + /*-------------------- Set the PLL I2S clock -----------------------------*/ + __HAL_RCC_PLL_I2S_CONFIG(PeriphClkInit->PLLI2SSelection); + + /*------- In Case of PLLI2S is selected as source clock for I2S ----------*/ + if (((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S_APB1) == RCC_PERIPHCLK_I2S_APB1) + && (PeriphClkInit->I2sApb1ClockSelection == RCC_I2SAPB1CLKSOURCE_PLLI2S)) || + ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S_APB2) == RCC_PERIPHCLK_I2S_APB2) && (PeriphClkInit->I2sApb2ClockSelection == RCC_I2SAPB2CLKSOURCE_PLLI2S)) || + ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_CLK48) == RCC_PERIPHCLK_CLK48) && (PeriphClkInit->Clk48ClockSelection == RCC_CLK48CLKSOURCE_PLLI2SQ)) || + ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SDIO) == RCC_PERIPHCLK_SDIO) && (PeriphClkInit->SdioClockSelection == RCC_SDIOCLKSOURCE_CLK48) && (PeriphClkInit->Clk48ClockSelection == RCC_CLK48CLKSOURCE_PLLI2SQ))) + { + /* check for Parameters */ + assert_param(IS_RCC_PLLI2SR_VALUE(PeriphClkInit->PLLI2S.PLLI2SR)); + assert_param(IS_RCC_PLLI2SQ_VALUE(PeriphClkInit->PLLI2S.PLLI2SQ)); + + /* Configure the PLLI2S division factors */ + /* PLLI2S_VCO = f(VCO clock) = f(PLLI2S clock input) * (PLLI2SN/PLLI2SM)*/ + /* I2SCLK = f(PLLI2S clock output) = f(VCO clock) / PLLI2SR */ + __HAL_RCC_PLLI2S_CONFIG(PeriphClkInit->PLLI2S.PLLI2SM, PeriphClkInit->PLLI2S.PLLI2SN, PeriphClkInit->PLLI2S.PLLI2SQ, + PeriphClkInit->PLLI2S.PLLI2SR); + } + +#if defined(STM32F413xx) || defined(STM32F423xx) + /*------- In Case of PLLI2S is selected as source clock for SAI ----------*/ + if (((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAIA) == RCC_PERIPHCLK_SAIA) + && (PeriphClkInit->SaiAClockSelection == RCC_SAIACLKSOURCE_PLLI2SR)) || + ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAIB) == RCC_PERIPHCLK_SAIB) && (PeriphClkInit->SaiBClockSelection == RCC_SAIBCLKSOURCE_PLLI2SR))) + { + /* Check for PLLI2S Parameters */ + assert_param(IS_RCC_PLLI2SR_VALUE(PeriphClkInit->PLLI2S.PLLI2SR)); + /* Check for PLLI2S/DIVR parameters */ + assert_param(IS_RCC_PLLI2S_DIVR_VALUE(PeriphClkInit->PLLI2SDivR)); + + /* Read PLLI2SQ value from PLLI2SCFGR register (this value is not needed for SAI configuration) */ + plli2sq = ((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SQ) >> RCC_PLLI2SCFGR_PLLI2SQ_Pos); + /* Configure the PLLI2S division factors */ + /* PLLI2S_VCO Input = PLL_SOURCE/PLLI2SM */ + /* PLLI2S_VCO Output = PLLI2S_VCO Input * PLLI2SN */ + /* SAI_CLK(first level) = PLLI2S_VCO Output/PLLI2SQ */ + __HAL_RCC_PLLI2S_CONFIG(PeriphClkInit->PLLI2S.PLLI2SM, PeriphClkInit->PLLI2S.PLLI2SN, plli2sq, + PeriphClkInit->PLLI2S.PLLI2SR); + + /* SAI_CLK_x = SAI_CLK(first level)/PLLI2SDIVR */ + __HAL_RCC_PLLI2S_PLLSAICLKDIVR_CONFIG(PeriphClkInit->PLLI2SDivR); + } +#endif /* STM32F413xx || STM32F423xx */ + + /*----------------- In Case of PLLI2S is just selected ------------------*/ + if ((PeriphClkInit->PeriphClockSelection & RCC_PERIPHCLK_PLLI2S) == RCC_PERIPHCLK_PLLI2S) + { + /* Check for Parameters */ + assert_param(IS_RCC_PLLI2SR_VALUE(PeriphClkInit->PLLI2S.PLLI2SR)); + assert_param(IS_RCC_PLLI2SQ_VALUE(PeriphClkInit->PLLI2S.PLLI2SQ)); + + /* Configure the PLLI2S division factors */ + /* PLLI2S_VCO = f(VCO clock) = f(PLLI2S clock input) * (PLLI2SN/PLLI2SM)*/ + /* SPDIFRXCLK = f(PLLI2S clock output) = f(VCO clock) / PLLI2SP */ + __HAL_RCC_PLLI2S_CONFIG(PeriphClkInit->PLLI2S.PLLI2SM, PeriphClkInit->PLLI2S.PLLI2SN, PeriphClkInit->PLLI2S.PLLI2SQ, + PeriphClkInit->PLLI2S.PLLI2SR); + } + + /* Enable the PLLI2S */ + __HAL_RCC_PLLI2S_ENABLE(); + /* Get tick */ + tickstart = HAL_GetTick(); + /* Wait till PLLI2S is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) == RESET) + { + if ((HAL_GetTick() - tickstart) > PLLI2S_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + } + /*--------------------------------------------------------------------------*/ + + /*-------------------- DFSDM1 clock source configuration -------------------*/ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_DFSDM1) == RCC_PERIPHCLK_DFSDM1) + { + /* Check the parameters */ + assert_param(IS_RCC_DFSDM1CLKSOURCE(PeriphClkInit->Dfsdm1ClockSelection)); + + /* Configure the DFSDM1 interface clock source */ + __HAL_RCC_DFSDM1_CONFIG(PeriphClkInit->Dfsdm1ClockSelection); + } + /*--------------------------------------------------------------------------*/ + + /*-------------------- DFSDM1 Audio clock source configuration -------------*/ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_DFSDM1_AUDIO) == RCC_PERIPHCLK_DFSDM1_AUDIO) + { + /* Check the parameters */ + assert_param(IS_RCC_DFSDM1AUDIOCLKSOURCE(PeriphClkInit->Dfsdm1AudioClockSelection)); + + /* Configure the DFSDM1 Audio interface clock source */ + __HAL_RCC_DFSDM1AUDIO_CONFIG(PeriphClkInit->Dfsdm1AudioClockSelection); + } + /*--------------------------------------------------------------------------*/ + +#if defined(STM32F413xx) || defined(STM32F423xx) + /*-------------------- DFSDM2 clock source configuration -------------------*/ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_DFSDM2) == RCC_PERIPHCLK_DFSDM2) + { + /* Check the parameters */ + assert_param(IS_RCC_DFSDM2CLKSOURCE(PeriphClkInit->Dfsdm2ClockSelection)); + + /* Configure the DFSDM1 interface clock source */ + __HAL_RCC_DFSDM2_CONFIG(PeriphClkInit->Dfsdm2ClockSelection); + } + /*--------------------------------------------------------------------------*/ + + /*-------------------- DFSDM2 Audio clock source configuration -------------*/ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_DFSDM2_AUDIO) == RCC_PERIPHCLK_DFSDM2_AUDIO) + { + /* Check the parameters */ + assert_param(IS_RCC_DFSDM2AUDIOCLKSOURCE(PeriphClkInit->Dfsdm2AudioClockSelection)); + + /* Configure the DFSDM1 Audio interface clock source */ + __HAL_RCC_DFSDM2AUDIO_CONFIG(PeriphClkInit->Dfsdm2AudioClockSelection); + } + /*--------------------------------------------------------------------------*/ + + /*---------------------------- LPTIM1 Configuration ------------------------*/ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_LPTIM1) == RCC_PERIPHCLK_LPTIM1) + { + /* Check the parameters */ + assert_param(IS_RCC_LPTIM1CLKSOURCE(PeriphClkInit->Lptim1ClockSelection)); + + /* Configure the LPTIM1 clock source */ + __HAL_RCC_LPTIM1_CONFIG(PeriphClkInit->Lptim1ClockSelection); + } + /*--------------------------------------------------------------------------*/ +#endif /* STM32F413xx || STM32F423xx */ + + return HAL_OK; +} + +/** + * @brief Get the RCC_PeriphCLKInitTypeDef according to the internal + * RCC configuration registers. + * @param PeriphClkInit pointer to an RCC_PeriphCLKInitTypeDef structure that + * will be configured. + * @retval None + */ +void HAL_RCCEx_GetPeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClkInit) +{ + uint32_t tempreg; + + /* Set all possible values for the extended clock type parameter------------*/ +#if defined(STM32F413xx) || defined(STM32F423xx) + PeriphClkInit->PeriphClockSelection = RCC_PERIPHCLK_I2S_APB1 | RCC_PERIPHCLK_I2S_APB2 | \ + RCC_PERIPHCLK_TIM | RCC_PERIPHCLK_RTC | \ + RCC_PERIPHCLK_FMPI2C1 | RCC_PERIPHCLK_CLK48 | \ + RCC_PERIPHCLK_SDIO | RCC_PERIPHCLK_DFSDM1 | \ + RCC_PERIPHCLK_DFSDM1_AUDIO | RCC_PERIPHCLK_DFSDM2 | \ + RCC_PERIPHCLK_DFSDM2_AUDIO | RCC_PERIPHCLK_LPTIM1 | \ + RCC_PERIPHCLK_SAIA | RCC_PERIPHCLK_SAIB; +#else /* STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx */ + PeriphClkInit->PeriphClockSelection = RCC_PERIPHCLK_I2S_APB1 | RCC_PERIPHCLK_I2S_APB2 | \ + RCC_PERIPHCLK_TIM | RCC_PERIPHCLK_RTC | \ + RCC_PERIPHCLK_FMPI2C1 | RCC_PERIPHCLK_CLK48 | \ + RCC_PERIPHCLK_SDIO | RCC_PERIPHCLK_DFSDM1 | \ + RCC_PERIPHCLK_DFSDM1_AUDIO; +#endif /* STM32F413xx || STM32F423xx */ + + + + /* Get the PLLI2S Clock configuration --------------------------------------*/ + PeriphClkInit->PLLI2S.PLLI2SM = (uint32_t)((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SM) >> RCC_PLLI2SCFGR_PLLI2SM_Pos); + PeriphClkInit->PLLI2S.PLLI2SN = (uint32_t)((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SN) >> RCC_PLLI2SCFGR_PLLI2SN_Pos); + PeriphClkInit->PLLI2S.PLLI2SQ = (uint32_t)((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SQ) >> RCC_PLLI2SCFGR_PLLI2SQ_Pos); + PeriphClkInit->PLLI2S.PLLI2SR = (uint32_t)((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SR) >> RCC_PLLI2SCFGR_PLLI2SR_Pos); +#if defined(STM32F413xx) || defined(STM32F423xx) + /* Get the PLL/PLLI2S division factors -------------------------------------*/ + PeriphClkInit->PLLI2SDivR = (uint32_t)((RCC->DCKCFGR & RCC_DCKCFGR_PLLI2SDIVR) >> RCC_DCKCFGR_PLLI2SDIVR_Pos); + PeriphClkInit->PLLDivR = (uint32_t)((RCC->DCKCFGR & RCC_DCKCFGR_PLLDIVR) >> RCC_DCKCFGR_PLLDIVR_Pos); +#endif /* STM32F413xx || STM32F423xx */ + + /* Get the I2S APB1 clock configuration ------------------------------------*/ + PeriphClkInit->I2sApb1ClockSelection = __HAL_RCC_GET_I2S_APB1_SOURCE(); + + /* Get the I2S APB2 clock configuration ------------------------------------*/ + PeriphClkInit->I2sApb2ClockSelection = __HAL_RCC_GET_I2S_APB2_SOURCE(); + + /* Get the RTC Clock configuration -----------------------------------------*/ + tempreg = (RCC->CFGR & RCC_CFGR_RTCPRE); + PeriphClkInit->RTCClockSelection = (uint32_t)((tempreg) | (RCC->BDCR & RCC_BDCR_RTCSEL)); + + /* Get the FMPI2C1 clock configuration -------------------------------------*/ + PeriphClkInit->Fmpi2c1ClockSelection = __HAL_RCC_GET_FMPI2C1_SOURCE(); + + /* Get the CLK48 clock configuration ---------------------------------------*/ + PeriphClkInit->Clk48ClockSelection = __HAL_RCC_GET_CLK48_SOURCE(); + + /* Get the SDIO clock configuration ----------------------------------------*/ + PeriphClkInit->SdioClockSelection = __HAL_RCC_GET_SDIO_SOURCE(); + + /* Get the DFSDM1 clock configuration --------------------------------------*/ + PeriphClkInit->Dfsdm1ClockSelection = __HAL_RCC_GET_DFSDM1_SOURCE(); + + /* Get the DFSDM1 Audio clock configuration --------------------------------*/ + PeriphClkInit->Dfsdm1AudioClockSelection = __HAL_RCC_GET_DFSDM1AUDIO_SOURCE(); + +#if defined(STM32F413xx) || defined(STM32F423xx) + /* Get the DFSDM2 clock configuration --------------------------------------*/ + PeriphClkInit->Dfsdm2ClockSelection = __HAL_RCC_GET_DFSDM2_SOURCE(); + + /* Get the DFSDM2 Audio clock configuration --------------------------------*/ + PeriphClkInit->Dfsdm2AudioClockSelection = __HAL_RCC_GET_DFSDM2AUDIO_SOURCE(); + + /* Get the LPTIM1 clock configuration --------------------------------------*/ + PeriphClkInit->Lptim1ClockSelection = __HAL_RCC_GET_LPTIM1_SOURCE(); + + /* Get the SAI1 Block Aclock configuration ---------------------------------*/ + PeriphClkInit->SaiAClockSelection = __HAL_RCC_GET_SAI_BLOCKA_SOURCE(); + + /* Get the SAI1 Block B clock configuration --------------------------------*/ + PeriphClkInit->SaiBClockSelection = __HAL_RCC_GET_SAI_BLOCKB_SOURCE(); +#endif /* STM32F413xx || STM32F423xx */ + + /* Get the TIM Prescaler configuration -------------------------------------*/ + if ((RCC->DCKCFGR & RCC_DCKCFGR_TIMPRE) == RESET) + { + PeriphClkInit->TIMPresSelection = RCC_TIMPRES_DESACTIVATED; + } + else + { + PeriphClkInit->TIMPresSelection = RCC_TIMPRES_ACTIVATED; + } +} + +/** + * @brief Return the peripheral clock frequency for a given peripheral(I2S..) + * @note Return 0 if peripheral clock identifier not managed by this API + * @param PeriphClk Peripheral clock identifier + * This parameter can be one of the following values: + * @arg RCC_PERIPHCLK_I2S_APB1: I2S APB1 peripheral clock + * @arg RCC_PERIPHCLK_I2S_APB2: I2S APB2 peripheral clock + * @retval Frequency in KHz + */ +uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) +{ + /* This variable used to store the I2S clock frequency (value in Hz) */ + uint32_t frequency = 0U; + /* This variable used to store the VCO Input (value in Hz) */ + uint32_t vcoinput = 0U; + uint32_t srcclk = 0U; + /* This variable used to store the VCO Output (value in Hz) */ + uint32_t vcooutput = 0U; + switch (PeriphClk) + { + case RCC_PERIPHCLK_I2S_APB1: + { + /* Get the current I2S source */ + srcclk = __HAL_RCC_GET_I2S_APB1_SOURCE(); + switch (srcclk) + { + /* Check if I2S clock selection is External clock mapped on the I2S_CKIN pin used as I2S clock */ + case RCC_I2SAPB1CLKSOURCE_EXT: + { + /* Set the I2S clock to the external clock value */ + frequency = EXTERNAL_CLOCK_VALUE; + break; + } + /* Check if I2S clock selection is PLLI2S VCO output clock divided by PLLI2SR used as I2S clock */ + case RCC_I2SAPB1CLKSOURCE_PLLI2S: + { + if ((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SSRC) == RCC_PLLI2SCFGR_PLLI2SSRC) + { + /* Get the I2S source clock value */ + vcoinput = (uint32_t)(EXTERNAL_CLOCK_VALUE / (uint32_t)(RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SM)); + } + else + { + /* Configure the PLLI2S division factor */ + /* PLLI2S_VCO Input = PLL_SOURCE/PLLI2SM */ + if ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) + { + /* Get the I2S source clock value */ + vcoinput = (uint32_t)(HSE_VALUE / (uint32_t)(RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SM)); + } + else + { + /* Get the I2S source clock value */ + vcoinput = (uint32_t)(HSI_VALUE / (uint32_t)(RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SM)); + } + } + /* PLLI2S_VCO Output = PLLI2S_VCO Input * PLLI2SN */ + vcooutput = (uint32_t)(vcoinput * (((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SN) >> 6U) & (RCC_PLLI2SCFGR_PLLI2SN >> 6U))); + /* I2S_CLK = PLLI2S_VCO Output/PLLI2SR */ + frequency = (uint32_t)(vcooutput / (((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SR) >> 28U) & (RCC_PLLI2SCFGR_PLLI2SR >> 28U))); + break; + } + /* Check if I2S clock selection is PLL VCO Output divided by PLLR used as I2S clock */ + case RCC_I2SAPB1CLKSOURCE_PLLR: + { + /* Configure the PLL division factor R */ + /* PLL_VCO Input = PLL_SOURCE/PLLM */ + if ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) + { + /* Get the I2S source clock value */ + vcoinput = (uint32_t)(HSE_VALUE / (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM)); + } + else + { + /* Get the I2S source clock value */ + vcoinput = (uint32_t)(HSI_VALUE / (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM)); + } + + /* PLL_VCO Output = PLL_VCO Input * PLLN */ + vcooutput = (uint32_t)(vcoinput * (((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> 6U) & (RCC_PLLCFGR_PLLN >> 6U))); + /* I2S_CLK = PLL_VCO Output/PLLR */ + frequency = (uint32_t)(vcooutput / (((RCC->PLLCFGR & RCC_PLLCFGR_PLLR) >> 28U) & (RCC_PLLCFGR_PLLR >> 28U))); + break; + } + /* Check if I2S clock selection is HSI or HSE depending from PLL source Clock */ + case RCC_I2SAPB1CLKSOURCE_PLLSRC: + { + if ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) + { + frequency = HSE_VALUE; + } + else + { + frequency = HSI_VALUE; + } + break; + } + /* Clock not enabled for I2S*/ + default: + { + frequency = 0U; + break; + } + } + break; + } + case RCC_PERIPHCLK_I2S_APB2: + { + /* Get the current I2S source */ + srcclk = __HAL_RCC_GET_I2S_APB2_SOURCE(); + switch (srcclk) + { + /* Check if I2S clock selection is External clock mapped on the I2S_CKIN pin used as I2S clock */ + case RCC_I2SAPB2CLKSOURCE_EXT: + { + /* Set the I2S clock to the external clock value */ + frequency = EXTERNAL_CLOCK_VALUE; + break; + } + /* Check if I2S clock selection is PLLI2S VCO output clock divided by PLLI2SR used as I2S clock */ + case RCC_I2SAPB2CLKSOURCE_PLLI2S: + { + if ((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SSRC) == RCC_PLLI2SCFGR_PLLI2SSRC) + { + /* Get the I2S source clock value */ + vcoinput = (uint32_t)(EXTERNAL_CLOCK_VALUE / (uint32_t)(RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SM)); + } + else + { + /* Configure the PLLI2S division factor */ + /* PLLI2S_VCO Input = PLL_SOURCE/PLLI2SM */ + if ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) + { + /* Get the I2S source clock value */ + vcoinput = (uint32_t)(HSE_VALUE / (uint32_t)(RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SM)); + } + else + { + /* Get the I2S source clock value */ + vcoinput = (uint32_t)(HSI_VALUE / (uint32_t)(RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SM)); + } + } + /* PLLI2S_VCO Output = PLLI2S_VCO Input * PLLI2SN */ + vcooutput = (uint32_t)(vcoinput * (((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SN) >> 6U) & (RCC_PLLI2SCFGR_PLLI2SN >> 6U))); + /* I2S_CLK = PLLI2S_VCO Output/PLLI2SR */ + frequency = (uint32_t)(vcooutput / (((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SR) >> 28U) & (RCC_PLLI2SCFGR_PLLI2SR >> 28U))); + break; + } + /* Check if I2S clock selection is PLL VCO Output divided by PLLR used as I2S clock */ + case RCC_I2SAPB2CLKSOURCE_PLLR: + { + /* Configure the PLL division factor R */ + /* PLL_VCO Input = PLL_SOURCE/PLLM */ + if ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) + { + /* Get the I2S source clock value */ + vcoinput = (uint32_t)(HSE_VALUE / (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM)); + } + else + { + /* Get the I2S source clock value */ + vcoinput = (uint32_t)(HSI_VALUE / (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM)); + } + + /* PLL_VCO Output = PLL_VCO Input * PLLN */ + vcooutput = (uint32_t)(vcoinput * (((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> 6U) & (RCC_PLLCFGR_PLLN >> 6U))); + /* I2S_CLK = PLL_VCO Output/PLLR */ + frequency = (uint32_t)(vcooutput / (((RCC->PLLCFGR & RCC_PLLCFGR_PLLR) >> 28U) & (RCC_PLLCFGR_PLLR >> 28U))); + break; + } + /* Check if I2S clock selection is HSI or HSE depending from PLL source Clock */ + case RCC_I2SAPB2CLKSOURCE_PLLSRC: + { + if ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) + { + frequency = HSE_VALUE; + } + else + { + frequency = HSI_VALUE; + } + break; + } + /* Clock not enabled for I2S*/ + default: + { + frequency = 0U; + break; + } + } + break; + } + default: + { + break; + } + } + return frequency; +} +#endif /* STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx || STM32F413xx || STM32F423xx */ + +#if defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) +/** + * @brief Initializes the RCC extended peripherals clocks according to the specified parameters in the + * RCC_PeriphCLKInitTypeDef. + * @param PeriphClkInit pointer to an RCC_PeriphCLKInitTypeDef structure that + * contains the configuration information for the Extended Peripherals clocks(I2S and RTC clocks). + * + * @note A caution to be taken when HAL_RCCEx_PeriphCLKConfig() is used to select RTC clock selection, in this case + * the Reset of Backup domain will be applied in order to modify the RTC Clock source as consequence all backup + * domain (RTC and RCC_BDCR register expect BKPSRAM) will be reset + * + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClkInit) +{ + uint32_t tickstart = 0U; + uint32_t tmpreg1 = 0U; + + /* Check the parameters */ + assert_param(IS_RCC_PERIPHCLOCK(PeriphClkInit->PeriphClockSelection)); + + /*---------------------------- RTC configuration ---------------------------*/ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_RTC) == (RCC_PERIPHCLK_RTC)) + { + /* Check for RTC Parameters used to output RTCCLK */ + assert_param(IS_RCC_RTCCLKSOURCE(PeriphClkInit->RTCClockSelection)); + + /* Enable Power Clock*/ + __HAL_RCC_PWR_CLK_ENABLE(); + + /* Enable write access to Backup domain */ + PWR->CR |= PWR_CR_DBP; + + /* Get tick */ + tickstart = HAL_GetTick(); + + while ((PWR->CR & PWR_CR_DBP) == RESET) + { + if ((HAL_GetTick() - tickstart) > RCC_DBP_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + /* Reset the Backup domain only if the RTC Clock source selection is modified from reset value */ + tmpreg1 = (RCC->BDCR & RCC_BDCR_RTCSEL); + if ((tmpreg1 != 0x00000000U) && ((tmpreg1) != (PeriphClkInit->RTCClockSelection & RCC_BDCR_RTCSEL))) + { + /* Store the content of BDCR register before the reset of Backup Domain */ + tmpreg1 = (RCC->BDCR & ~(RCC_BDCR_RTCSEL)); + /* RTC Clock selection can be changed only if the Backup Domain is reset */ + __HAL_RCC_BACKUPRESET_FORCE(); + __HAL_RCC_BACKUPRESET_RELEASE(); + /* Restore the Content of BDCR register */ + RCC->BDCR = tmpreg1; + + /* Wait for LSE reactivation if LSE was enable prior to Backup Domain reset */ + if (HAL_IS_BIT_SET(RCC->BDCR, RCC_BDCR_LSEON)) + { + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Wait till LSE is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_LSERDY) == RESET) + { + if ((HAL_GetTick() - tickstart) > RCC_LSE_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + } + __HAL_RCC_RTC_CONFIG(PeriphClkInit->RTCClockSelection); + } + /*--------------------------------------------------------------------------*/ + + /*---------------------------- TIM configuration ---------------------------*/ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_TIM) == (RCC_PERIPHCLK_TIM)) + { + __HAL_RCC_TIMCLKPRESCALER(PeriphClkInit->TIMPresSelection); + } + /*--------------------------------------------------------------------------*/ + + /*---------------------------- FMPI2C1 Configuration -----------------------*/ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_FMPI2C1) == RCC_PERIPHCLK_FMPI2C1) + { + /* Check the parameters */ + assert_param(IS_RCC_FMPI2C1CLKSOURCE(PeriphClkInit->Fmpi2c1ClockSelection)); + + /* Configure the FMPI2C1 clock source */ + __HAL_RCC_FMPI2C1_CONFIG(PeriphClkInit->Fmpi2c1ClockSelection); + } + /*--------------------------------------------------------------------------*/ + + /*---------------------------- LPTIM1 Configuration ------------------------*/ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_LPTIM1) == RCC_PERIPHCLK_LPTIM1) + { + /* Check the parameters */ + assert_param(IS_RCC_LPTIM1CLKSOURCE(PeriphClkInit->Lptim1ClockSelection)); + + /* Configure the LPTIM1 clock source */ + __HAL_RCC_LPTIM1_CONFIG(PeriphClkInit->Lptim1ClockSelection); + } + + /*---------------------------- I2S Configuration ---------------------------*/ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S) == RCC_PERIPHCLK_I2S) + { + /* Check the parameters */ + assert_param(IS_RCC_I2SAPBCLKSOURCE(PeriphClkInit->I2SClockSelection)); + + /* Configure the I2S clock source */ + __HAL_RCC_I2S_CONFIG(PeriphClkInit->I2SClockSelection); + } + + return HAL_OK; +} + +/** + * @brief Configures the RCC_OscInitStruct according to the internal + * RCC configuration registers. + * @param PeriphClkInit pointer to an RCC_PeriphCLKInitTypeDef structure that + * will be configured. + * @retval None + */ +void HAL_RCCEx_GetPeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClkInit) +{ + uint32_t tempreg; + + /* Set all possible values for the extended clock type parameter------------*/ + PeriphClkInit->PeriphClockSelection = RCC_PERIPHCLK_FMPI2C1 | RCC_PERIPHCLK_LPTIM1 | RCC_PERIPHCLK_TIM | RCC_PERIPHCLK_RTC; + + tempreg = (RCC->CFGR & RCC_CFGR_RTCPRE); + PeriphClkInit->RTCClockSelection = (uint32_t)((tempreg) | (RCC->BDCR & RCC_BDCR_RTCSEL)); + + if ((RCC->DCKCFGR & RCC_DCKCFGR_TIMPRE) == RESET) + { + PeriphClkInit->TIMPresSelection = RCC_TIMPRES_DESACTIVATED; + } + else + { + PeriphClkInit->TIMPresSelection = RCC_TIMPRES_ACTIVATED; + } + /* Get the FMPI2C1 clock configuration -------------------------------------*/ + PeriphClkInit->Fmpi2c1ClockSelection = __HAL_RCC_GET_FMPI2C1_SOURCE(); + + /* Get the I2S clock configuration -----------------------------------------*/ + PeriphClkInit->I2SClockSelection = __HAL_RCC_GET_I2S_SOURCE(); + + +} +/** + * @brief Return the peripheral clock frequency for a given peripheral(SAI..) + * @note Return 0 if peripheral clock identifier not managed by this API + * @param PeriphClk Peripheral clock identifier + * This parameter can be one of the following values: + * @arg RCC_PERIPHCLK_I2S: I2S peripheral clock + * @retval Frequency in KHz + */ +uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) +{ + /* This variable used to store the I2S clock frequency (value in Hz) */ + uint32_t frequency = 0U; + /* This variable used to store the VCO Input (value in Hz) */ + uint32_t vcoinput = 0U; + uint32_t srcclk = 0U; + /* This variable used to store the VCO Output (value in Hz) */ + uint32_t vcooutput = 0U; + switch (PeriphClk) + { + case RCC_PERIPHCLK_I2S: + { + /* Get the current I2S source */ + srcclk = __HAL_RCC_GET_I2S_SOURCE(); + switch (srcclk) + { + /* Check if I2S clock selection is External clock mapped on the I2S_CKIN pin used as I2S clock */ + case RCC_I2SAPBCLKSOURCE_EXT: + { + /* Set the I2S clock to the external clock value */ + frequency = EXTERNAL_CLOCK_VALUE; + break; + } + /* Check if I2S clock selection is PLL VCO Output divided by PLLR used as I2S clock */ + case RCC_I2SAPBCLKSOURCE_PLLR: + { + /* Configure the PLL division factor R */ + /* PLL_VCO Input = PLL_SOURCE/PLLM */ + if ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) + { + /* Get the I2S source clock value */ + vcoinput = (uint32_t)(HSE_VALUE / (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM)); + } + else + { + /* Get the I2S source clock value */ + vcoinput = (uint32_t)(HSI_VALUE / (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM)); + } + + /* PLL_VCO Output = PLL_VCO Input * PLLN */ + vcooutput = (uint32_t)(vcoinput * (((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> 6U) & (RCC_PLLCFGR_PLLN >> 6U))); + /* I2S_CLK = PLL_VCO Output/PLLR */ + frequency = (uint32_t)(vcooutput / (((RCC->PLLCFGR & RCC_PLLCFGR_PLLR) >> 28U) & (RCC_PLLCFGR_PLLR >> 28U))); + break; + } + /* Check if I2S clock selection is HSI or HSE depending from PLL source Clock */ + case RCC_I2SAPBCLKSOURCE_PLLSRC: + { + if ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) + { + frequency = HSE_VALUE; + } + else + { + frequency = HSI_VALUE; + } + break; + } + /* Clock not enabled for I2S*/ + default: + { + frequency = 0U; + break; + } + } + break; + } + default: + { + break; + } + } + return frequency; +} +#endif /* STM32F410Tx || STM32F410Cx || STM32F410Rx */ + +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) +/** + * @brief Initializes the RCC extended peripherals clocks according to the specified + * parameters in the RCC_PeriphCLKInitTypeDef. + * @param PeriphClkInit pointer to an RCC_PeriphCLKInitTypeDef structure that + * contains the configuration information for the Extended Peripherals + * clocks(I2S, SAI, LTDC RTC and TIM). + * + * @note Care must be taken when HAL_RCCEx_PeriphCLKConfig() is used to select + * the RTC clock source; in this case the Backup domain will be reset in + * order to modify the RTC Clock source, as consequence RTC registers (including + * the backup registers) and RCC_BDCR register are set to their reset values. + * + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClkInit) +{ + uint32_t tickstart = 0U; + uint32_t tmpreg1 = 0U; + + /* Check the parameters */ + assert_param(IS_RCC_PERIPHCLOCK(PeriphClkInit->PeriphClockSelection)); + + /*----------------------- SAI/I2S Configuration (PLLI2S) -------------------*/ + /*----------------------- Common configuration SAI/I2S ---------------------*/ + /* In Case of SAI or I2S Clock Configuration through PLLI2S, PLLI2SN division + factor is common parameters for both peripherals */ + if ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S) == RCC_PERIPHCLK_I2S) || + (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI_PLLI2S) == RCC_PERIPHCLK_SAI_PLLI2S) || + (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_PLLI2S) == RCC_PERIPHCLK_PLLI2S)) + { + /* check for Parameters */ + assert_param(IS_RCC_PLLI2SN_VALUE(PeriphClkInit->PLLI2S.PLLI2SN)); + + /* Disable the PLLI2S */ + __HAL_RCC_PLLI2S_DISABLE(); + /* Get tick */ + tickstart = HAL_GetTick(); + /* Wait till PLLI2S is disabled */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) != RESET) + { + if ((HAL_GetTick() - tickstart) > PLLI2S_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + + /*---------------------------- I2S configuration -------------------------*/ + /* In Case of I2S Clock Configuration through PLLI2S, PLLI2SR must be added + only for I2S configuration */ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S) == (RCC_PERIPHCLK_I2S)) + { + /* check for Parameters */ + assert_param(IS_RCC_PLLI2SR_VALUE(PeriphClkInit->PLLI2S.PLLI2SR)); + /* Configure the PLLI2S division factors */ + /* PLLI2S_VCO = f(VCO clock) = f(PLLI2S clock input) * (PLLI2SN/PLLM) */ + /* I2SCLK = f(PLLI2S clock output) = f(VCO clock) / PLLI2SR */ + __HAL_RCC_PLLI2S_CONFIG(PeriphClkInit->PLLI2S.PLLI2SN, PeriphClkInit->PLLI2S.PLLI2SR); + } + + /*---------------------------- SAI configuration -------------------------*/ + /* In Case of SAI Clock Configuration through PLLI2S, PLLI2SQ and PLLI2S_DIVQ must + be added only for SAI configuration */ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI_PLLI2S) == (RCC_PERIPHCLK_SAI_PLLI2S)) + { + /* Check the PLLI2S division factors */ + assert_param(IS_RCC_PLLI2SQ_VALUE(PeriphClkInit->PLLI2S.PLLI2SQ)); + assert_param(IS_RCC_PLLI2S_DIVQ_VALUE(PeriphClkInit->PLLI2SDivQ)); + + /* Read PLLI2SR value from PLLI2SCFGR register (this value is not need for SAI configuration) */ + tmpreg1 = ((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SR) >> RCC_PLLI2SCFGR_PLLI2SR_Pos); + /* Configure the PLLI2S division factors */ + /* PLLI2S_VCO Input = PLL_SOURCE/PLLM */ + /* PLLI2S_VCO Output = PLLI2S_VCO Input * PLLI2SN */ + /* SAI_CLK(first level) = PLLI2S_VCO Output/PLLI2SQ */ + __HAL_RCC_PLLI2S_SAICLK_CONFIG(PeriphClkInit->PLLI2S.PLLI2SN, PeriphClkInit->PLLI2S.PLLI2SQ, tmpreg1); + /* SAI_CLK_x = SAI_CLK(first level)/PLLI2SDIVQ */ + __HAL_RCC_PLLI2S_PLLSAICLKDIVQ_CONFIG(PeriphClkInit->PLLI2SDivQ); + } + + /*----------------- In Case of PLLI2S is just selected -----------------*/ + if ((PeriphClkInit->PeriphClockSelection & RCC_PERIPHCLK_PLLI2S) == RCC_PERIPHCLK_PLLI2S) + { + /* Check for Parameters */ + assert_param(IS_RCC_PLLI2SQ_VALUE(PeriphClkInit->PLLI2S.PLLI2SQ)); + assert_param(IS_RCC_PLLI2SR_VALUE(PeriphClkInit->PLLI2S.PLLI2SR)); + + /* Configure the PLLI2S multiplication and division factors */ + __HAL_RCC_PLLI2S_SAICLK_CONFIG(PeriphClkInit->PLLI2S.PLLI2SN, PeriphClkInit->PLLI2S.PLLI2SQ, + PeriphClkInit->PLLI2S.PLLI2SR); + } + + /* Enable the PLLI2S */ + __HAL_RCC_PLLI2S_ENABLE(); + /* Get tick */ + tickstart = HAL_GetTick(); + /* Wait till PLLI2S is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) == RESET) + { + if ((HAL_GetTick() - tickstart) > PLLI2S_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + } + /*--------------------------------------------------------------------------*/ + + /*----------------------- SAI/LTDC Configuration (PLLSAI) ------------------*/ + /*----------------------- Common configuration SAI/LTDC --------------------*/ + /* In Case of SAI or LTDC Clock Configuration through PLLSAI, PLLSAIN division + factor is common parameters for both peripherals */ + if ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI_PLLSAI) == RCC_PERIPHCLK_SAI_PLLSAI) || + (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_LTDC) == RCC_PERIPHCLK_LTDC)) + { + /* Check the PLLSAI division factors */ + assert_param(IS_RCC_PLLSAIN_VALUE(PeriphClkInit->PLLSAI.PLLSAIN)); + + /* Disable PLLSAI Clock */ + __HAL_RCC_PLLSAI_DISABLE(); + /* Get tick */ + tickstart = HAL_GetTick(); + /* Wait till PLLSAI is disabled */ + while (__HAL_RCC_PLLSAI_GET_FLAG() != RESET) + { + if ((HAL_GetTick() - tickstart) > PLLSAI_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + + /*---------------------------- SAI configuration -------------------------*/ + /* In Case of SAI Clock Configuration through PLLSAI, PLLSAIQ and PLLSAI_DIVQ must + be added only for SAI configuration */ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI_PLLSAI) == (RCC_PERIPHCLK_SAI_PLLSAI)) + { + assert_param(IS_RCC_PLLSAIQ_VALUE(PeriphClkInit->PLLSAI.PLLSAIQ)); + assert_param(IS_RCC_PLLSAI_DIVQ_VALUE(PeriphClkInit->PLLSAIDivQ)); + + /* Read PLLSAIR value from PLLSAICFGR register (this value is not need for SAI configuration) */ + tmpreg1 = ((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIR) >> RCC_PLLSAICFGR_PLLSAIR_Pos); + /* PLLSAI_VCO Input = PLL_SOURCE/PLLM */ + /* PLLSAI_VCO Output = PLLSAI_VCO Input * PLLSAIN */ + /* SAI_CLK(first level) = PLLSAI_VCO Output/PLLSAIQ */ + __HAL_RCC_PLLSAI_CONFIG(PeriphClkInit->PLLSAI.PLLSAIN, PeriphClkInit->PLLSAI.PLLSAIQ, tmpreg1); + /* SAI_CLK_x = SAI_CLK(first level)/PLLSAIDIVQ */ + __HAL_RCC_PLLSAI_PLLSAICLKDIVQ_CONFIG(PeriphClkInit->PLLSAIDivQ); + } + + /*---------------------------- LTDC configuration ------------------------*/ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_LTDC) == (RCC_PERIPHCLK_LTDC)) + { + assert_param(IS_RCC_PLLSAIR_VALUE(PeriphClkInit->PLLSAI.PLLSAIR)); + assert_param(IS_RCC_PLLSAI_DIVR_VALUE(PeriphClkInit->PLLSAIDivR)); + + /* Read PLLSAIR value from PLLSAICFGR register (this value is not need for SAI configuration) */ + tmpreg1 = ((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIQ) >> RCC_PLLSAICFGR_PLLSAIQ_Pos); + /* PLLSAI_VCO Input = PLL_SOURCE/PLLM */ + /* PLLSAI_VCO Output = PLLSAI_VCO Input * PLLSAIN */ + /* LTDC_CLK(first level) = PLLSAI_VCO Output/PLLSAIR */ + __HAL_RCC_PLLSAI_CONFIG(PeriphClkInit->PLLSAI.PLLSAIN, tmpreg1, PeriphClkInit->PLLSAI.PLLSAIR); + /* LTDC_CLK = LTDC_CLK(first level)/PLLSAIDIVR */ + __HAL_RCC_PLLSAI_PLLSAICLKDIVR_CONFIG(PeriphClkInit->PLLSAIDivR); + } + /* Enable PLLSAI Clock */ + __HAL_RCC_PLLSAI_ENABLE(); + /* Get tick */ + tickstart = HAL_GetTick(); + /* Wait till PLLSAI is ready */ + while (__HAL_RCC_PLLSAI_GET_FLAG() == RESET) + { + if ((HAL_GetTick() - tickstart) > PLLSAI_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + } + /*--------------------------------------------------------------------------*/ + + /*---------------------------- RTC configuration ---------------------------*/ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_RTC) == (RCC_PERIPHCLK_RTC)) + { + /* Check for RTC Parameters used to output RTCCLK */ + assert_param(IS_RCC_RTCCLKSOURCE(PeriphClkInit->RTCClockSelection)); + + /* Enable Power Clock*/ + __HAL_RCC_PWR_CLK_ENABLE(); + + /* Enable write access to Backup domain */ + PWR->CR |= PWR_CR_DBP; + + /* Get tick */ + tickstart = HAL_GetTick(); + + while ((PWR->CR & PWR_CR_DBP) == RESET) + { + if ((HAL_GetTick() - tickstart) > RCC_DBP_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + /* Reset the Backup domain only if the RTC Clock source selection is modified from reset value */ + tmpreg1 = (RCC->BDCR & RCC_BDCR_RTCSEL); + if ((tmpreg1 != 0x00000000U) && ((tmpreg1) != (PeriphClkInit->RTCClockSelection & RCC_BDCR_RTCSEL))) + { + /* Store the content of BDCR register before the reset of Backup Domain */ + tmpreg1 = (RCC->BDCR & ~(RCC_BDCR_RTCSEL)); + /* RTC Clock selection can be changed only if the Backup Domain is reset */ + __HAL_RCC_BACKUPRESET_FORCE(); + __HAL_RCC_BACKUPRESET_RELEASE(); + /* Restore the Content of BDCR register */ + RCC->BDCR = tmpreg1; + + /* Wait for LSE reactivation if LSE was enable prior to Backup Domain reset */ + if (HAL_IS_BIT_SET(RCC->BDCR, RCC_BDCR_LSEON)) + { + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Wait till LSE is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_LSERDY) == RESET) + { + if ((HAL_GetTick() - tickstart) > RCC_LSE_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + } + __HAL_RCC_RTC_CONFIG(PeriphClkInit->RTCClockSelection); + } + /*--------------------------------------------------------------------------*/ + + /*---------------------------- TIM configuration ---------------------------*/ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_TIM) == (RCC_PERIPHCLK_TIM)) + { + __HAL_RCC_TIMCLKPRESCALER(PeriphClkInit->TIMPresSelection); + } + return HAL_OK; +} + +/** + * @brief Configures the PeriphClkInit according to the internal + * RCC configuration registers. + * @param PeriphClkInit pointer to an RCC_PeriphCLKInitTypeDef structure that + * will be configured. + * @retval None + */ +void HAL_RCCEx_GetPeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClkInit) +{ + uint32_t tempreg; + + /* Set all possible values for the extended clock type parameter------------*/ + PeriphClkInit->PeriphClockSelection = RCC_PERIPHCLK_I2S | RCC_PERIPHCLK_SAI_PLLSAI | RCC_PERIPHCLK_SAI_PLLI2S | RCC_PERIPHCLK_LTDC | RCC_PERIPHCLK_TIM | RCC_PERIPHCLK_RTC; + + /* Get the PLLI2S Clock configuration -----------------------------------------------*/ + PeriphClkInit->PLLI2S.PLLI2SN = (uint32_t)((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SN) >> RCC_PLLI2SCFGR_PLLI2SN_Pos); + PeriphClkInit->PLLI2S.PLLI2SR = (uint32_t)((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SR) >> RCC_PLLI2SCFGR_PLLI2SR_Pos); + PeriphClkInit->PLLI2S.PLLI2SQ = (uint32_t)((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SQ) >> RCC_PLLI2SCFGR_PLLI2SQ_Pos); + /* Get the PLLSAI Clock configuration -----------------------------------------------*/ + PeriphClkInit->PLLSAI.PLLSAIN = (uint32_t)((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIN) >> RCC_PLLSAICFGR_PLLSAIN_Pos); + PeriphClkInit->PLLSAI.PLLSAIR = (uint32_t)((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIR) >> RCC_PLLSAICFGR_PLLSAIR_Pos); + PeriphClkInit->PLLSAI.PLLSAIQ = (uint32_t)((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIQ) >> RCC_PLLSAICFGR_PLLSAIQ_Pos); + /* Get the PLLSAI/PLLI2S division factors -----------------------------------------------*/ + PeriphClkInit->PLLI2SDivQ = (uint32_t)((RCC->DCKCFGR & RCC_DCKCFGR_PLLI2SDIVQ) >> RCC_DCKCFGR_PLLI2SDIVQ_Pos); + PeriphClkInit->PLLSAIDivQ = (uint32_t)((RCC->DCKCFGR & RCC_DCKCFGR_PLLSAIDIVQ) >> RCC_DCKCFGR_PLLSAIDIVQ_Pos); + PeriphClkInit->PLLSAIDivR = (uint32_t)(RCC->DCKCFGR & RCC_DCKCFGR_PLLSAIDIVR); + /* Get the RTC Clock configuration -----------------------------------------------*/ + tempreg = (RCC->CFGR & RCC_CFGR_RTCPRE); + PeriphClkInit->RTCClockSelection = (uint32_t)((tempreg) | (RCC->BDCR & RCC_BDCR_RTCSEL)); + + if ((RCC->DCKCFGR & RCC_DCKCFGR_TIMPRE) == RESET) + { + PeriphClkInit->TIMPresSelection = RCC_TIMPRES_DESACTIVATED; + } + else + { + PeriphClkInit->TIMPresSelection = RCC_TIMPRES_ACTIVATED; + } +} + +/** + * @brief Return the peripheral clock frequency for a given peripheral(SAI..) + * @note Return 0 if peripheral clock identifier not managed by this API + * @param PeriphClk Peripheral clock identifier + * This parameter can be one of the following values: + * @arg RCC_PERIPHCLK_I2S: I2S peripheral clock + * @retval Frequency in KHz + */ +uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) +{ + /* This variable used to store the I2S clock frequency (value in Hz) */ + uint32_t frequency = 0U; + /* This variable used to store the VCO Input (value in Hz) */ + uint32_t vcoinput = 0U; + uint32_t srcclk = 0U; + /* This variable used to store the VCO Output (value in Hz) */ + uint32_t vcooutput = 0U; + switch (PeriphClk) + { + case RCC_PERIPHCLK_I2S: + { + /* Get the current I2S source */ + srcclk = __HAL_RCC_GET_I2S_SOURCE(); + switch (srcclk) + { + /* Check if I2S clock selection is External clock mapped on the I2S_CKIN pin used as I2S clock */ + case RCC_I2SCLKSOURCE_EXT: + { + /* Set the I2S clock to the external clock value */ + frequency = EXTERNAL_CLOCK_VALUE; + break; + } + /* Check if I2S clock selection is PLLI2S VCO output clock divided by PLLI2SR used as I2S clock */ + case RCC_I2SCLKSOURCE_PLLI2S: + { + /* Configure the PLLI2S division factor */ + /* PLLI2S_VCO Input = PLL_SOURCE/PLLM */ + if ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) + { + /* Get the I2S source clock value */ + vcoinput = (uint32_t)(HSE_VALUE / (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM)); + } + else + { + /* Get the I2S source clock value */ + vcoinput = (uint32_t)(HSI_VALUE / (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM)); + } + + /* PLLI2S_VCO Output = PLLI2S_VCO Input * PLLI2SN */ + vcooutput = (uint32_t)(vcoinput * (((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SN) >> 6U) & (RCC_PLLI2SCFGR_PLLI2SN >> 6U))); + /* I2S_CLK = PLLI2S_VCO Output/PLLI2SR */ + frequency = (uint32_t)(vcooutput / (((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SR) >> 28U) & (RCC_PLLI2SCFGR_PLLI2SR >> 28U))); + break; + } + /* Clock not enabled for I2S*/ + default: + { + frequency = 0U; + break; + } + } + break; + } + default: + { + break; + } + } + return frequency; +} +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx */ + +#if defined(STM32F405xx) || defined(STM32F415xx) || defined(STM32F407xx)|| defined(STM32F417xx) ||\ + defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F411xE) +/** + * @brief Initializes the RCC extended peripherals clocks according to the specified parameters in the + * RCC_PeriphCLKInitTypeDef. + * @param PeriphClkInit pointer to an RCC_PeriphCLKInitTypeDef structure that + * contains the configuration information for the Extended Peripherals clocks(I2S and RTC clocks). + * + * @note A caution to be taken when HAL_RCCEx_PeriphCLKConfig() is used to select RTC clock selection, in this case + * the Reset of Backup domain will be applied in order to modify the RTC Clock source as consequence all backup + * domain (RTC and RCC_BDCR register expect BKPSRAM) will be reset + * + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClkInit) +{ + uint32_t tickstart = 0U; + uint32_t tmpreg1 = 0U; + + /* Check the parameters */ + assert_param(IS_RCC_PERIPHCLOCK(PeriphClkInit->PeriphClockSelection)); + + /*---------------------------- I2S configuration ---------------------------*/ + if ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S) == RCC_PERIPHCLK_I2S) || + (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_PLLI2S) == RCC_PERIPHCLK_PLLI2S)) + { + /* check for Parameters */ + assert_param(IS_RCC_PLLI2SR_VALUE(PeriphClkInit->PLLI2S.PLLI2SR)); + assert_param(IS_RCC_PLLI2SN_VALUE(PeriphClkInit->PLLI2S.PLLI2SN)); +#if defined(STM32F411xE) + assert_param(IS_RCC_PLLI2SM_VALUE(PeriphClkInit->PLLI2S.PLLI2SM)); +#endif /* STM32F411xE */ + /* Disable the PLLI2S */ + __HAL_RCC_PLLI2S_DISABLE(); + /* Get tick */ + tickstart = HAL_GetTick(); + /* Wait till PLLI2S is disabled */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) != RESET) + { + if ((HAL_GetTick() - tickstart) > PLLI2S_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + +#if defined(STM32F411xE) + /* Configure the PLLI2S division factors */ + /* PLLI2S_VCO = f(VCO clock) = f(PLLI2S clock input) * (PLLI2SN/PLLI2SM) */ + /* I2SCLK = f(PLLI2S clock output) = f(VCO clock) / PLLI2SR */ + __HAL_RCC_PLLI2S_I2SCLK_CONFIG(PeriphClkInit->PLLI2S.PLLI2SM, PeriphClkInit->PLLI2S.PLLI2SN, + PeriphClkInit->PLLI2S.PLLI2SR); +#else + /* Configure the PLLI2S division factors */ + /* PLLI2S_VCO = f(VCO clock) = f(PLLI2S clock input) * (PLLI2SN/PLLM) */ + /* I2SCLK = f(PLLI2S clock output) = f(VCO clock) / PLLI2SR */ + __HAL_RCC_PLLI2S_CONFIG(PeriphClkInit->PLLI2S.PLLI2SN, PeriphClkInit->PLLI2S.PLLI2SR); +#endif /* STM32F411xE */ + + /* Enable the PLLI2S */ + __HAL_RCC_PLLI2S_ENABLE(); + /* Get tick */ + tickstart = HAL_GetTick(); + /* Wait till PLLI2S is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) == RESET) + { + if ((HAL_GetTick() - tickstart) > PLLI2S_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + } + + /*---------------------------- RTC configuration ---------------------------*/ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_RTC) == (RCC_PERIPHCLK_RTC)) + { + /* Check for RTC Parameters used to output RTCCLK */ + assert_param(IS_RCC_RTCCLKSOURCE(PeriphClkInit->RTCClockSelection)); + + /* Enable Power Clock*/ + __HAL_RCC_PWR_CLK_ENABLE(); + + /* Enable write access to Backup domain */ + PWR->CR |= PWR_CR_DBP; + + /* Get tick */ + tickstart = HAL_GetTick(); + + while ((PWR->CR & PWR_CR_DBP) == RESET) + { + if ((HAL_GetTick() - tickstart) > RCC_DBP_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + /* Reset the Backup domain only if the RTC Clock source selection is modified from reset value */ + tmpreg1 = (RCC->BDCR & RCC_BDCR_RTCSEL); + if ((tmpreg1 != 0x00000000U) && ((tmpreg1) != (PeriphClkInit->RTCClockSelection & RCC_BDCR_RTCSEL))) + { + /* Store the content of BDCR register before the reset of Backup Domain */ + tmpreg1 = (RCC->BDCR & ~(RCC_BDCR_RTCSEL)); + /* RTC Clock selection can be changed only if the Backup Domain is reset */ + __HAL_RCC_BACKUPRESET_FORCE(); + __HAL_RCC_BACKUPRESET_RELEASE(); + /* Restore the Content of BDCR register */ + RCC->BDCR = tmpreg1; + + /* Wait for LSE reactivation if LSE was enable prior to Backup Domain reset */ + if (HAL_IS_BIT_SET(RCC->BDCR, RCC_BDCR_LSEON)) + { + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Wait till LSE is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_LSERDY) == RESET) + { + if ((HAL_GetTick() - tickstart) > RCC_LSE_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + } + __HAL_RCC_RTC_CONFIG(PeriphClkInit->RTCClockSelection); + } +#if defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F411xE) + /*---------------------------- TIM configuration ---------------------------*/ + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_TIM) == (RCC_PERIPHCLK_TIM)) + { + __HAL_RCC_TIMCLKPRESCALER(PeriphClkInit->TIMPresSelection); + } +#endif /* STM32F401xC || STM32F401xE || STM32F411xE */ + return HAL_OK; +} + +/** + * @brief Configures the RCC_OscInitStruct according to the internal + * RCC configuration registers. + * @param PeriphClkInit pointer to an RCC_PeriphCLKInitTypeDef structure that + * will be configured. + * @retval None + */ +void HAL_RCCEx_GetPeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClkInit) +{ + uint32_t tempreg; + + /* Set all possible values for the extended clock type parameter------------*/ + PeriphClkInit->PeriphClockSelection = RCC_PERIPHCLK_I2S | RCC_PERIPHCLK_RTC; + + /* Get the PLLI2S Clock configuration --------------------------------------*/ + PeriphClkInit->PLLI2S.PLLI2SN = (uint32_t)((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SN) >> RCC_PLLI2SCFGR_PLLI2SN_Pos); + PeriphClkInit->PLLI2S.PLLI2SR = (uint32_t)((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SR) >> RCC_PLLI2SCFGR_PLLI2SR_Pos); +#if defined(STM32F411xE) + PeriphClkInit->PLLI2S.PLLI2SM = (uint32_t)(RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SM); +#endif /* STM32F411xE */ + /* Get the RTC Clock configuration -----------------------------------------*/ + tempreg = (RCC->CFGR & RCC_CFGR_RTCPRE); + PeriphClkInit->RTCClockSelection = (uint32_t)((tempreg) | (RCC->BDCR & RCC_BDCR_RTCSEL)); + +#if defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F411xE) + /* Get the TIM Prescaler configuration -------------------------------------*/ + if ((RCC->DCKCFGR & RCC_DCKCFGR_TIMPRE) == RESET) + { + PeriphClkInit->TIMPresSelection = RCC_TIMPRES_DESACTIVATED; + } + else + { + PeriphClkInit->TIMPresSelection = RCC_TIMPRES_ACTIVATED; + } +#endif /* STM32F401xC || STM32F401xE || STM32F411xE */ +} + +/** + * @brief Return the peripheral clock frequency for a given peripheral(SAI..) + * @note Return 0 if peripheral clock identifier not managed by this API + * @param PeriphClk Peripheral clock identifier + * This parameter can be one of the following values: + * @arg RCC_PERIPHCLK_I2S: I2S peripheral clock + * @retval Frequency in KHz + */ +uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) +{ + /* This variable used to store the I2S clock frequency (value in Hz) */ + uint32_t frequency = 0U; + /* This variable used to store the VCO Input (value in Hz) */ + uint32_t vcoinput = 0U; + uint32_t srcclk = 0U; + /* This variable used to store the VCO Output (value in Hz) */ + uint32_t vcooutput = 0U; + switch (PeriphClk) + { + case RCC_PERIPHCLK_I2S: + { + /* Get the current I2S source */ + srcclk = __HAL_RCC_GET_I2S_SOURCE(); + switch (srcclk) + { + /* Check if I2S clock selection is External clock mapped on the I2S_CKIN pin used as I2S clock */ + case RCC_I2SCLKSOURCE_EXT: + { + /* Set the I2S clock to the external clock value */ + frequency = EXTERNAL_CLOCK_VALUE; + break; + } + /* Check if I2S clock selection is PLLI2S VCO output clock divided by PLLI2SR used as I2S clock */ + case RCC_I2SCLKSOURCE_PLLI2S: + { +#if defined(STM32F411xE) + /* Configure the PLLI2S division factor */ + /* PLLI2S_VCO Input = PLL_SOURCE/PLLI2SM */ + if ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) + { + /* Get the I2S source clock value */ + vcoinput = (uint32_t)(HSE_VALUE / (uint32_t)(RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SM)); + } + else + { + /* Get the I2S source clock value */ + vcoinput = (uint32_t)(HSI_VALUE / (uint32_t)(RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SM)); + } +#else + /* Configure the PLLI2S division factor */ + /* PLLI2S_VCO Input = PLL_SOURCE/PLLM */ + if ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) + { + /* Get the I2S source clock value */ + vcoinput = (uint32_t)(HSE_VALUE / (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM)); + } + else + { + /* Get the I2S source clock value */ + vcoinput = (uint32_t)(HSI_VALUE / (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM)); + } +#endif /* STM32F411xE */ + /* PLLI2S_VCO Output = PLLI2S_VCO Input * PLLI2SN */ + vcooutput = (uint32_t)(vcoinput * (((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SN) >> 6U) & (RCC_PLLI2SCFGR_PLLI2SN >> 6U))); + /* I2S_CLK = PLLI2S_VCO Output/PLLI2SR */ + frequency = (uint32_t)(vcooutput / (((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SR) >> 28U) & (RCC_PLLI2SCFGR_PLLI2SR >> 28U))); + break; + } + /* Clock not enabled for I2S*/ + default: + { + frequency = 0U; + break; + } + } + break; + } + default: + { + break; + } + } + return frequency; +} +#endif /* STM32F405xx || STM32F415xx || STM32F407xx || STM32F417xx || STM32F401xC || STM32F401xE || STM32F411xE */ + +#if defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) || defined(STM32F411xE) || defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) || defined(STM32F412Zx) ||\ + defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) +/** + * @brief Select LSE mode + * + * @note This mode is only available for STM32F410xx/STM32F411xx/STM32F446xx/STM32F469xx/STM32F479xx/STM32F412Zx/STM32F412Vx/STM32F412Rx/STM32F412Cx devices. + * + * @param Mode specifies the LSE mode. + * This parameter can be one of the following values: + * @arg RCC_LSE_LOWPOWER_MODE: LSE oscillator in low power mode selection + * @arg RCC_LSE_HIGHDRIVE_MODE: LSE oscillator in High Drive mode selection + * @retval None + */ +void HAL_RCCEx_SelectLSEMode(uint8_t Mode) +{ + /* Check the parameters */ + assert_param(IS_RCC_LSE_MODE(Mode)); + if (Mode == RCC_LSE_HIGHDRIVE_MODE) + { + SET_BIT(RCC->BDCR, RCC_BDCR_LSEMOD); + } + else + { + CLEAR_BIT(RCC->BDCR, RCC_BDCR_LSEMOD); + } +} + +#endif /* STM32F410xx || STM32F411xE || STM32F446xx || STM32F469xx || STM32F479xx || STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx || STM32F413xx || STM32F423xx */ + +/** @defgroup RCCEx_Exported_Functions_Group2 Extended Clock management functions + * @brief Extended Clock management functions + * +@verbatim + =============================================================================== + ##### Extended clock management functions ##### + =============================================================================== + [..] + This subsection provides a set of functions allowing to control the + activation or deactivation of PLLI2S, PLLSAI. +@endverbatim + * @{ + */ + +#if defined(RCC_PLLI2S_SUPPORT) +/** + * @brief Enable PLLI2S. + * @param PLLI2SInit pointer to an RCC_PLLI2SInitTypeDef structure that + * contains the configuration information for the PLLI2S + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RCCEx_EnablePLLI2S(RCC_PLLI2SInitTypeDef *PLLI2SInit) +{ + uint32_t tickstart; + + /* Check for parameters */ + assert_param(IS_RCC_PLLI2SN_VALUE(PLLI2SInit->PLLI2SN)); + assert_param(IS_RCC_PLLI2SR_VALUE(PLLI2SInit->PLLI2SR)); +#if defined(RCC_PLLI2SCFGR_PLLI2SM) + assert_param(IS_RCC_PLLI2SM_VALUE(PLLI2SInit->PLLI2SM)); +#endif /* RCC_PLLI2SCFGR_PLLI2SM */ +#if defined(RCC_PLLI2SCFGR_PLLI2SP) + assert_param(IS_RCC_PLLI2SP_VALUE(PLLI2SInit->PLLI2SP)); +#endif /* RCC_PLLI2SCFGR_PLLI2SP */ +#if defined(RCC_PLLI2SCFGR_PLLI2SQ) + assert_param(IS_RCC_PLLI2SQ_VALUE(PLLI2SInit->PLLI2SQ)); +#endif /* RCC_PLLI2SCFGR_PLLI2SQ */ + + /* Disable the PLLI2S */ + __HAL_RCC_PLLI2S_DISABLE(); + + /* Wait till PLLI2S is disabled */ + tickstart = HAL_GetTick(); + while (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) != RESET) + { + if ((HAL_GetTick() - tickstart) > PLLI2S_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + + /* Configure the PLLI2S division factors */ +#if defined(STM32F446xx) + /* PLLI2S_VCO = f(VCO clock) = f(PLLI2S clock input) * (PLLI2SN/PLLI2SM) */ + /* I2SPCLK = PLLI2S_VCO / PLLI2SP */ + /* I2SQCLK = PLLI2S_VCO / PLLI2SQ */ + /* I2SRCLK = PLLI2S_VCO / PLLI2SR */ + __HAL_RCC_PLLI2S_CONFIG(PLLI2SInit->PLLI2SM, PLLI2SInit->PLLI2SN, \ + PLLI2SInit->PLLI2SP, PLLI2SInit->PLLI2SQ, PLLI2SInit->PLLI2SR); +#elif defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) ||\ + defined(STM32F413xx) || defined(STM32F423xx) + /* PLLI2S_VCO = f(VCO clock) = f(PLLI2S clock input) * (PLLI2SN/PLLI2SM)*/ + /* I2SQCLK = PLLI2S_VCO / PLLI2SQ */ + /* I2SRCLK = PLLI2S_VCO / PLLI2SR */ + __HAL_RCC_PLLI2S_CONFIG(PLLI2SInit->PLLI2SM, PLLI2SInit->PLLI2SN, \ + PLLI2SInit->PLLI2SQ, PLLI2SInit->PLLI2SR); +#elif defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) ||\ + defined(STM32F469xx) || defined(STM32F479xx) + /* PLLI2S_VCO = f(VCO clock) = f(PLLI2S clock input) * PLLI2SN */ + /* I2SQCLK = PLLI2S_VCO / PLLI2SQ */ + /* I2SRCLK = PLLI2S_VCO / PLLI2SR */ + __HAL_RCC_PLLI2S_SAICLK_CONFIG(PLLI2SInit->PLLI2SN, PLLI2SInit->PLLI2SQ, PLLI2SInit->PLLI2SR); +#elif defined(STM32F411xE) + /* PLLI2S_VCO = f(VCO clock) = f(PLLI2S clock input) * (PLLI2SN/PLLI2SM) */ + /* I2SRCLK = PLLI2S_VCO / PLLI2SR */ + __HAL_RCC_PLLI2S_I2SCLK_CONFIG(PLLI2SInit->PLLI2SM, PLLI2SInit->PLLI2SN, PLLI2SInit->PLLI2SR); +#else + /* PLLI2S_VCO = f(VCO clock) = f(PLLI2S clock input) x PLLI2SN */ + /* I2SRCLK = PLLI2S_VCO / PLLI2SR */ + __HAL_RCC_PLLI2S_CONFIG(PLLI2SInit->PLLI2SN, PLLI2SInit->PLLI2SR); +#endif /* STM32F446xx */ + + /* Enable the PLLI2S */ + __HAL_RCC_PLLI2S_ENABLE(); + + /* Wait till PLLI2S is ready */ + tickstart = HAL_GetTick(); + while (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) == RESET) + { + if ((HAL_GetTick() - tickstart) > PLLI2S_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + + return HAL_OK; +} + +/** + * @brief Disable PLLI2S. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RCCEx_DisablePLLI2S(void) +{ + uint32_t tickstart; + + /* Disable the PLLI2S */ + __HAL_RCC_PLLI2S_DISABLE(); + + /* Wait till PLLI2S is disabled */ + tickstart = HAL_GetTick(); + while (READ_BIT(RCC->CR, RCC_CR_PLLI2SRDY) != RESET) + { + if ((HAL_GetTick() - tickstart) > PLLI2S_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + + return HAL_OK; +} + +#endif /* RCC_PLLI2S_SUPPORT */ + +#if defined(RCC_PLLSAI_SUPPORT) +/** + * @brief Enable PLLSAI. + * @param PLLSAIInit pointer to an RCC_PLLSAIInitTypeDef structure that + * contains the configuration information for the PLLSAI + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RCCEx_EnablePLLSAI(RCC_PLLSAIInitTypeDef *PLLSAIInit) +{ + uint32_t tickstart; + + /* Check for parameters */ + assert_param(IS_RCC_PLLSAIN_VALUE(PLLSAIInit->PLLSAIN)); + assert_param(IS_RCC_PLLSAIQ_VALUE(PLLSAIInit->PLLSAIQ)); +#if defined(RCC_PLLSAICFGR_PLLSAIM) + assert_param(IS_RCC_PLLSAIM_VALUE(PLLSAIInit->PLLSAIM)); +#endif /* RCC_PLLSAICFGR_PLLSAIM */ +#if defined(RCC_PLLSAICFGR_PLLSAIP) + assert_param(IS_RCC_PLLSAIP_VALUE(PLLSAIInit->PLLSAIP)); +#endif /* RCC_PLLSAICFGR_PLLSAIP */ +#if defined(RCC_PLLSAICFGR_PLLSAIR) + assert_param(IS_RCC_PLLSAIR_VALUE(PLLSAIInit->PLLSAIR)); +#endif /* RCC_PLLSAICFGR_PLLSAIR */ + + /* Disable the PLLSAI */ + __HAL_RCC_PLLSAI_DISABLE(); + + /* Wait till PLLSAI is disabled */ + tickstart = HAL_GetTick(); + while (__HAL_RCC_PLLSAI_GET_FLAG() != RESET) + { + if ((HAL_GetTick() - tickstart) > PLLSAI_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + + /* Configure the PLLSAI division factors */ +#if defined(STM32F446xx) + /* PLLSAI_VCO = f(VCO clock) = f(PLLSAI clock input) * (PLLSAIN/PLLSAIM) */ + /* SAIPCLK = PLLSAI_VCO / PLLSAIP */ + /* SAIQCLK = PLLSAI_VCO / PLLSAIQ */ + /* SAIRCLK = PLLSAI_VCO / PLLSAIR */ + __HAL_RCC_PLLSAI_CONFIG(PLLSAIInit->PLLSAIM, PLLSAIInit->PLLSAIN, \ + PLLSAIInit->PLLSAIP, PLLSAIInit->PLLSAIQ, 0U); +#elif defined(STM32F469xx) || defined(STM32F479xx) + /* PLLSAI_VCO = f(VCO clock) = f(PLLSAI clock input) * PLLSAIN */ + /* SAIPCLK = PLLSAI_VCO / PLLSAIP */ + /* SAIQCLK = PLLSAI_VCO / PLLSAIQ */ + /* SAIRCLK = PLLSAI_VCO / PLLSAIR */ + __HAL_RCC_PLLSAI_CONFIG(PLLSAIInit->PLLSAIN, PLLSAIInit->PLLSAIP, \ + PLLSAIInit->PLLSAIQ, PLLSAIInit->PLLSAIR); +#else + /* PLLSAI_VCO = f(VCO clock) = f(PLLSAI clock input) x PLLSAIN */ + /* SAIQCLK = PLLSAI_VCO / PLLSAIQ */ + /* SAIRCLK = PLLSAI_VCO / PLLSAIR */ + __HAL_RCC_PLLSAI_CONFIG(PLLSAIInit->PLLSAIN, PLLSAIInit->PLLSAIQ, PLLSAIInit->PLLSAIR); +#endif /* STM32F446xx */ + + /* Enable the PLLSAI */ + __HAL_RCC_PLLSAI_ENABLE(); + + /* Wait till PLLSAI is ready */ + tickstart = HAL_GetTick(); + while (__HAL_RCC_PLLSAI_GET_FLAG() == RESET) + { + if ((HAL_GetTick() - tickstart) > PLLSAI_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + + return HAL_OK; +} + +/** + * @brief Disable PLLSAI. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RCCEx_DisablePLLSAI(void) +{ + uint32_t tickstart; + + /* Disable the PLLSAI */ + __HAL_RCC_PLLSAI_DISABLE(); + + /* Wait till PLLSAI is disabled */ + tickstart = HAL_GetTick(); + while (__HAL_RCC_PLLSAI_GET_FLAG() != RESET) + { + if ((HAL_GetTick() - tickstart) > PLLSAI_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + + return HAL_OK; +} + +#endif /* RCC_PLLSAI_SUPPORT */ + +/** + * @} + */ + +#if defined(STM32F446xx) +/** + * @brief Returns the SYSCLK frequency + * + * @note This function implementation is valid only for STM32F446xx devices. + * @note This function add the PLL/PLLR System clock source + * + * @note The system frequency computed by this function is not the real + * frequency in the chip. It is calculated based on the predefined + * constant and the selected clock source: + * @note If SYSCLK source is HSI, function returns values based on HSI_VALUE(*) + * @note If SYSCLK source is HSE, function returns values based on HSE_VALUE(**) + * @note If SYSCLK source is PLL or PLLR, function returns values based on HSE_VALUE(**) + * or HSI_VALUE(*) multiplied/divided by the PLL factors. + * @note (*) HSI_VALUE is a constant defined in stm32f4xx_hal_conf.h file (default value + * 16 MHz) but the real value may vary depending on the variations + * in voltage and temperature. + * @note (**) HSE_VALUE is a constant defined in stm32f4xx_hal_conf.h file (default value + * 25 MHz), user has to ensure that HSE_VALUE is same as the real + * frequency of the crystal used. Otherwise, this function may + * have wrong result. + * + * @note The result of this function could be not correct when using fractional + * value for HSE crystal. + * + * @note This function can be used by the user application to compute the + * baudrate for the communication peripherals or configure other parameters. + * + * @note Each time SYSCLK changes, this function must be called to update the + * right SYSCLK value. Otherwise, any configuration based on this function will be incorrect. + * + * + * @retval SYSCLK frequency + */ +uint32_t HAL_RCC_GetSysClockFreq(void) +{ + uint32_t pllm = 0U; + uint32_t pllvco = 0U; + uint32_t pllp = 0U; + uint32_t pllr = 0U; + uint32_t sysclockfreq = 0U; + + /* Get SYSCLK source -------------------------------------------------------*/ + switch (RCC->CFGR & RCC_CFGR_SWS) + { + case RCC_CFGR_SWS_HSI: /* HSI used as system clock source */ + { + sysclockfreq = HSI_VALUE; + break; + } + case RCC_CFGR_SWS_HSE: /* HSE used as system clock source */ + { + sysclockfreq = HSE_VALUE; + break; + } + case RCC_CFGR_SWS_PLL: /* PLL/PLLP used as system clock source */ + { + /* PLL_VCO = (HSE_VALUE or HSI_VALUE / PLLM) * PLLN + SYSCLK = PLL_VCO / PLLP */ + pllm = RCC->PLLCFGR & RCC_PLLCFGR_PLLM; + if (__HAL_RCC_GET_PLL_OSCSOURCE() != RCC_PLLSOURCE_HSI) + { + /* HSE used as PLL clock source */ + pllvco = (uint32_t)((((uint64_t) HSE_VALUE * ((uint64_t)((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> RCC_PLLCFGR_PLLN_Pos)))) / (uint64_t)pllm); + } + else + { + /* HSI used as PLL clock source */ + pllvco = (uint32_t)((((uint64_t) HSI_VALUE * ((uint64_t)((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> RCC_PLLCFGR_PLLN_Pos)))) / (uint64_t)pllm); + } + pllp = ((((RCC->PLLCFGR & RCC_PLLCFGR_PLLP) >> RCC_PLLCFGR_PLLP_Pos) + 1U) * 2U); + + sysclockfreq = pllvco / pllp; + break; + } + case RCC_CFGR_SWS_PLLR: /* PLL/PLLR used as system clock source */ + { + /* PLL_VCO = (HSE_VALUE or HSI_VALUE / PLLM) * PLLN + SYSCLK = PLL_VCO / PLLR */ + pllm = RCC->PLLCFGR & RCC_PLLCFGR_PLLM; + if (__HAL_RCC_GET_PLL_OSCSOURCE() != RCC_PLLSOURCE_HSI) + { + /* HSE used as PLL clock source */ + pllvco = (uint32_t)((((uint64_t) HSE_VALUE * ((uint64_t)((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> RCC_PLLCFGR_PLLN_Pos)))) / (uint64_t)pllm); + } + else + { + /* HSI used as PLL clock source */ + pllvco = (uint32_t)((((uint64_t) HSI_VALUE * ((uint64_t)((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> RCC_PLLCFGR_PLLN_Pos)))) / (uint64_t)pllm); + } + pllr = ((RCC->PLLCFGR & RCC_PLLCFGR_PLLR) >> RCC_PLLCFGR_PLLR_Pos); + + sysclockfreq = pllvco / pllr; + break; + } + default: + { + sysclockfreq = HSI_VALUE; + break; + } + } + return sysclockfreq; +} +#endif /* STM32F446xx */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @brief Resets the RCC clock configuration to the default reset state. + * @note The default reset state of the clock configuration is given below: + * - HSI ON and used as system clock source + * - HSE, PLL, PLLI2S and PLLSAI OFF + * - AHB, APB1 and APB2 prescaler set to 1. + * - CSS, MCO1 and MCO2 OFF + * - All interrupts disabled + * @note This function doesn't modify the configuration of the + * - Peripheral clocks + * - LSI, LSE and RTC clocks + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RCC_DeInit(void) +{ + uint32_t tickstart; + + /* Get Start Tick */ + tickstart = HAL_GetTick(); + + /* Set HSION bit to the reset value */ + SET_BIT(RCC->CR, RCC_CR_HSION); + + /* Wait till HSI is ready */ + while (READ_BIT(RCC->CR, RCC_CR_HSIRDY) == RESET) + { + if ((HAL_GetTick() - tickstart) > HSI_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + /* Set HSITRIM[4:0] bits to the reset value */ + SET_BIT(RCC->CR, RCC_CR_HSITRIM_4); + + /* Get Start Tick */ + tickstart = HAL_GetTick(); + + /* Reset CFGR register */ + CLEAR_REG(RCC->CFGR); + + /* Wait till clock switch is ready */ + while (READ_BIT(RCC->CFGR, RCC_CFGR_SWS) != RESET) + { + if ((HAL_GetTick() - tickstart) > CLOCKSWITCH_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + /* Get Start Tick */ + tickstart = HAL_GetTick(); + + /* Clear HSEON, HSEBYP and CSSON bits */ + CLEAR_BIT(RCC->CR, RCC_CR_HSEON | RCC_CR_HSEBYP | RCC_CR_CSSON); + + /* Wait till HSE is disabled */ + while (READ_BIT(RCC->CR, RCC_CR_HSERDY) != RESET) + { + if ((HAL_GetTick() - tickstart) > HSE_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + /* Get Start Tick */ + tickstart = HAL_GetTick(); + + /* Clear PLLON bit */ + CLEAR_BIT(RCC->CR, RCC_CR_PLLON); + + /* Wait till PLL is disabled */ + while (READ_BIT(RCC->CR, RCC_CR_PLLRDY) != RESET) + { + if ((HAL_GetTick() - tickstart) > PLL_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + +#if defined(RCC_PLLI2S_SUPPORT) + /* Get Start Tick */ + tickstart = HAL_GetTick(); + + /* Reset PLLI2SON bit */ + CLEAR_BIT(RCC->CR, RCC_CR_PLLI2SON); + + /* Wait till PLLI2S is disabled */ + while (READ_BIT(RCC->CR, RCC_CR_PLLI2SRDY) != RESET) + { + if ((HAL_GetTick() - tickstart) > PLLI2S_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } +#endif /* RCC_PLLI2S_SUPPORT */ + +#if defined(RCC_PLLSAI_SUPPORT) + /* Get Start Tick */ + tickstart = HAL_GetTick(); + + /* Reset PLLSAI bit */ + CLEAR_BIT(RCC->CR, RCC_CR_PLLSAION); + + /* Wait till PLLSAI is disabled */ + while (READ_BIT(RCC->CR, RCC_CR_PLLSAIRDY) != RESET) + { + if ((HAL_GetTick() - tickstart) > PLLSAI_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } +#endif /* RCC_PLLSAI_SUPPORT */ + + /* Once PLL, PLLI2S and PLLSAI are OFF, reset PLLCFGR register to default value */ +#if defined(STM32F412Cx) || defined(STM32F412Rx) || defined(STM32F412Vx) || defined(STM32F412Zx) || defined(STM32F413xx) || \ + defined(STM32F423xx) || defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) + RCC->PLLCFGR = RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLN_6 | RCC_PLLCFGR_PLLN_7 | RCC_PLLCFGR_PLLQ_2 | RCC_PLLCFGR_PLLR_1; +#elif defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) + RCC->PLLCFGR = RCC_PLLCFGR_PLLR_0 | RCC_PLLCFGR_PLLR_1 | RCC_PLLCFGR_PLLR_2 | RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLN_6 | RCC_PLLCFGR_PLLN_7 | RCC_PLLCFGR_PLLQ_0 | RCC_PLLCFGR_PLLQ_1 | RCC_PLLCFGR_PLLQ_2 | RCC_PLLCFGR_PLLQ_3; +#else + RCC->PLLCFGR = RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLN_6 | RCC_PLLCFGR_PLLN_7 | RCC_PLLCFGR_PLLQ_2; +#endif /* STM32F412Cx || STM32F412Rx || STM32F412Vx || STM32F412Zx || STM32F413xx || STM32F423xx || STM32F446xx || STM32F469xx || STM32F479xx */ + + /* Reset PLLI2SCFGR register to default value */ +#if defined(STM32F412Cx) || defined(STM32F412Rx) || defined(STM32F412Vx) || defined(STM32F412Zx) || defined(STM32F413xx) || \ + defined(STM32F423xx) || defined(STM32F446xx) + RCC->PLLI2SCFGR = RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SN_6 | RCC_PLLI2SCFGR_PLLI2SN_7 | RCC_PLLI2SCFGR_PLLI2SQ_2 | RCC_PLLI2SCFGR_PLLI2SR_1; +#elif defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F405xx) || defined(STM32F415xx) || defined(STM32F407xx) || defined(STM32F417xx) + RCC->PLLI2SCFGR = RCC_PLLI2SCFGR_PLLI2SN_6 | RCC_PLLI2SCFGR_PLLI2SN_7 | RCC_PLLI2SCFGR_PLLI2SR_1; +#elif defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) || defined(STM32F469xx) || defined(STM32F479xx) + RCC->PLLI2SCFGR = RCC_PLLI2SCFGR_PLLI2SN_6 | RCC_PLLI2SCFGR_PLLI2SN_7 | RCC_PLLI2SCFGR_PLLI2SQ_2 | RCC_PLLI2SCFGR_PLLI2SR_1; +#elif defined(STM32F411xE) + RCC->PLLI2SCFGR = RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SN_6 | RCC_PLLI2SCFGR_PLLI2SN_7 | RCC_PLLI2SCFGR_PLLI2SR_1; +#endif /* STM32F412Cx || STM32F412Rx || STM32F412Vx || STM32F412Zx || STM32F413xx || STM32F423xx || STM32F446xx */ + + /* Reset PLLSAICFGR register */ +#if defined(STM32F427xx) || defined(STM32F429xx) || defined(STM32F437xx) || defined(STM32F439xx) || defined(STM32F469xx) || defined(STM32F479xx) + RCC->PLLSAICFGR = RCC_PLLSAICFGR_PLLSAIN_6 | RCC_PLLSAICFGR_PLLSAIN_7 | RCC_PLLSAICFGR_PLLSAIQ_2 | RCC_PLLSAICFGR_PLLSAIR_1; +#elif defined(STM32F446xx) + RCC->PLLSAICFGR = RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIN_6 | RCC_PLLSAICFGR_PLLSAIN_7 | RCC_PLLSAICFGR_PLLSAIQ_2; +#endif /* STM32F427xx || STM32F429xx || STM32F437xx || STM32F439xx || STM32F469xx || STM32F479xx */ + + /* Disable all interrupts */ + CLEAR_BIT(RCC->CIR, RCC_CIR_LSIRDYIE | RCC_CIR_LSERDYIE | RCC_CIR_HSIRDYIE | RCC_CIR_HSERDYIE | RCC_CIR_PLLRDYIE); + +#if defined(RCC_CIR_PLLI2SRDYIE) + CLEAR_BIT(RCC->CIR, RCC_CIR_PLLI2SRDYIE); +#endif /* RCC_CIR_PLLI2SRDYIE */ + +#if defined(RCC_CIR_PLLSAIRDYIE) + CLEAR_BIT(RCC->CIR, RCC_CIR_PLLSAIRDYIE); +#endif /* RCC_CIR_PLLSAIRDYIE */ + + /* Clear all interrupt flags */ + SET_BIT(RCC->CIR, RCC_CIR_LSIRDYC | RCC_CIR_LSERDYC | RCC_CIR_HSIRDYC | RCC_CIR_HSERDYC | RCC_CIR_PLLRDYC | + RCC_CIR_CSSC); + +#if defined(RCC_CIR_PLLI2SRDYC) + SET_BIT(RCC->CIR, RCC_CIR_PLLI2SRDYC); +#endif /* RCC_CIR_PLLI2SRDYC */ + +#if defined(RCC_CIR_PLLSAIRDYC) + SET_BIT(RCC->CIR, RCC_CIR_PLLSAIRDYC); +#endif /* RCC_CIR_PLLSAIRDYC */ + + /* Clear LSION bit */ + CLEAR_BIT(RCC->CSR, RCC_CSR_LSION); + + /* Reset all CSR flags */ + SET_BIT(RCC->CSR, RCC_CSR_RMVF); + + /* Update the SystemCoreClock global variable */ + SystemCoreClock = HSI_VALUE; + + /* Adapt Systick interrupt period */ + if (HAL_InitTick(uwTickPrio) != HAL_OK) + { + return HAL_ERROR; + } + else + { + return HAL_OK; + } +} + +#if defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) || defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) || defined(STM32F412Zx) ||\ + defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) +/** + * @brief Initializes the RCC Oscillators according to the specified parameters in the + * RCC_OscInitTypeDef. + * @param RCC_OscInitStruct pointer to an RCC_OscInitTypeDef structure that + * contains the configuration information for the RCC Oscillators. + * @note The PLL is not disabled when used as system clock. + * @note Transitions LSE Bypass to LSE On and LSE On to LSE Bypass are not + * supported by this API. User should request a transition to LSE Off + * first and then LSE On or LSE Bypass. + * @note Transition HSE Bypass to HSE On and HSE On to HSE Bypass are not + * supported by this API. User should request a transition to HSE Off + * first and then HSE On or HSE Bypass. + * @note This function add the PLL/PLLR factor management during PLL configuration this feature + * is only available in STM32F410xx/STM32F446xx/STM32F469xx/STM32F479xx/STM32F412Zx/STM32F412Vx/STM32F412Rx/STM32F412Cx devices + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct) +{ + uint32_t tickstart; + uint32_t pll_config; + + /* Check Null pointer */ + if (RCC_OscInitStruct == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_RCC_OSCILLATORTYPE(RCC_OscInitStruct->OscillatorType)); + /*------------------------------- HSE Configuration ------------------------*/ + if (((RCC_OscInitStruct->OscillatorType) & RCC_OSCILLATORTYPE_HSE) == RCC_OSCILLATORTYPE_HSE) + { + /* Check the parameters */ + assert_param(IS_RCC_HSE(RCC_OscInitStruct->HSEState)); + /* When the HSE is used as system clock or clock source for PLL in these cases HSE will not disabled */ +#if defined(STM32F446xx) + if ((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_HSE) + || \ + ((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_PLL) && ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLCFGR_PLLSRC_HSE)) || \ + ((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_PLLR) && ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLCFGR_PLLSRC_HSE))) +#else + if ((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_HSE) + || \ + ((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_PLL) && ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLCFGR_PLLSRC_HSE))) +#endif /* STM32F446xx */ + { + if ((__HAL_RCC_GET_FLAG(RCC_FLAG_HSERDY) != RESET) && (RCC_OscInitStruct->HSEState == RCC_HSE_OFF)) + { + return HAL_ERROR; + } + } + else + { + /* Set the new HSE configuration ---------------------------------------*/ + __HAL_RCC_HSE_CONFIG(RCC_OscInitStruct->HSEState); + + /* Check the HSE State */ + if ((RCC_OscInitStruct->HSEState) != RCC_HSE_OFF) + { + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till HSE is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_HSERDY) == RESET) + { + if ((HAL_GetTick() - tickstart) > HSE_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + else + { + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till HSE is bypassed or disabled */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_HSERDY) != RESET) + { + if ((HAL_GetTick() - tickstart) > HSE_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + } + } + /*----------------------------- HSI Configuration --------------------------*/ + if (((RCC_OscInitStruct->OscillatorType) & RCC_OSCILLATORTYPE_HSI) == RCC_OSCILLATORTYPE_HSI) + { + /* Check the parameters */ + assert_param(IS_RCC_HSI(RCC_OscInitStruct->HSIState)); + assert_param(IS_RCC_CALIBRATION_VALUE(RCC_OscInitStruct->HSICalibrationValue)); + + /* Check if HSI is used as system clock or as PLL source when PLL is selected as system clock */ +#if defined(STM32F446xx) + if ((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_HSI) + || \ + ((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_PLL) && ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLCFGR_PLLSRC_HSI)) || \ + ((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_PLLR) && ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLCFGR_PLLSRC_HSI))) +#else + if ((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_HSI) + || \ + ((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_PLL) && ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLCFGR_PLLSRC_HSI))) +#endif /* STM32F446xx */ + { + /* When HSI is used as system clock it will not disabled */ + if ((__HAL_RCC_GET_FLAG(RCC_FLAG_HSIRDY) != RESET) && (RCC_OscInitStruct->HSIState != RCC_HSI_ON)) + { + return HAL_ERROR; + } + /* Otherwise, just the calibration is allowed */ + else + { + /* Adjusts the Internal High Speed oscillator (HSI) calibration value.*/ + __HAL_RCC_HSI_CALIBRATIONVALUE_ADJUST(RCC_OscInitStruct->HSICalibrationValue); + } + } + else + { + /* Check the HSI State */ + if ((RCC_OscInitStruct->HSIState) != RCC_HSI_OFF) + { + /* Enable the Internal High Speed oscillator (HSI). */ + __HAL_RCC_HSI_ENABLE(); + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till HSI is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_HSIRDY) == RESET) + { + if ((HAL_GetTick() - tickstart) > HSI_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + /* Adjusts the Internal High Speed oscillator (HSI) calibration value.*/ + __HAL_RCC_HSI_CALIBRATIONVALUE_ADJUST(RCC_OscInitStruct->HSICalibrationValue); + } + else + { + /* Disable the Internal High Speed oscillator (HSI). */ + __HAL_RCC_HSI_DISABLE(); + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till HSI is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_HSIRDY) != RESET) + { + if ((HAL_GetTick() - tickstart) > HSI_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + } + } + /*------------------------------ LSI Configuration -------------------------*/ + if (((RCC_OscInitStruct->OscillatorType) & RCC_OSCILLATORTYPE_LSI) == RCC_OSCILLATORTYPE_LSI) + { + /* Check the parameters */ + assert_param(IS_RCC_LSI(RCC_OscInitStruct->LSIState)); + + /* Check the LSI State */ + if ((RCC_OscInitStruct->LSIState) != RCC_LSI_OFF) + { + /* Enable the Internal Low Speed oscillator (LSI). */ + __HAL_RCC_LSI_ENABLE(); + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till LSI is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_LSIRDY) == RESET) + { + if ((HAL_GetTick() - tickstart) > LSI_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + else + { + /* Disable the Internal Low Speed oscillator (LSI). */ + __HAL_RCC_LSI_DISABLE(); + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till LSI is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_LSIRDY) != RESET) + { + if ((HAL_GetTick() - tickstart) > LSI_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + } + /*------------------------------ LSE Configuration -------------------------*/ + if (((RCC_OscInitStruct->OscillatorType) & RCC_OSCILLATORTYPE_LSE) == RCC_OSCILLATORTYPE_LSE) + { + FlagStatus pwrclkchanged = RESET; + + /* Check the parameters */ + assert_param(IS_RCC_LSE(RCC_OscInitStruct->LSEState)); + + /* Update LSE configuration in Backup Domain control register */ + /* Requires to enable write access to Backup Domain of necessary */ + if (__HAL_RCC_PWR_IS_CLK_DISABLED()) + { + __HAL_RCC_PWR_CLK_ENABLE(); + pwrclkchanged = SET; + } + + if (HAL_IS_BIT_CLR(PWR->CR, PWR_CR_DBP)) + { + /* Enable write access to Backup domain */ + SET_BIT(PWR->CR, PWR_CR_DBP); + + /* Wait for Backup domain Write protection disable */ + tickstart = HAL_GetTick(); + + while (HAL_IS_BIT_CLR(PWR->CR, PWR_CR_DBP)) + { + if ((HAL_GetTick() - tickstart) > RCC_DBP_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + + /* Set the new LSE configuration -----------------------------------------*/ + __HAL_RCC_LSE_CONFIG(RCC_OscInitStruct->LSEState); + /* Check the LSE State */ + if ((RCC_OscInitStruct->LSEState) != RCC_LSE_OFF) + { + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till LSE is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_LSERDY) == RESET) + { + if ((HAL_GetTick() - tickstart) > RCC_LSE_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + else + { + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till LSE is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_LSERDY) != RESET) + { + if ((HAL_GetTick() - tickstart) > RCC_LSE_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + + /* Restore clock configuration if changed */ + if (pwrclkchanged == SET) + { + __HAL_RCC_PWR_CLK_DISABLE(); + } + } + /*-------------------------------- PLL Configuration -----------------------*/ + /* Check the parameters */ + assert_param(IS_RCC_PLL(RCC_OscInitStruct->PLL.PLLState)); + if ((RCC_OscInitStruct->PLL.PLLState) != RCC_PLL_NONE) + { + /* Check if the PLL is used as system clock or not */ + if (__HAL_RCC_GET_SYSCLK_SOURCE() != RCC_CFGR_SWS_PLL) + { + if ((RCC_OscInitStruct->PLL.PLLState) == RCC_PLL_ON) + { + /* Check the parameters */ + assert_param(IS_RCC_PLLSOURCE(RCC_OscInitStruct->PLL.PLLSource)); + assert_param(IS_RCC_PLLM_VALUE(RCC_OscInitStruct->PLL.PLLM)); + assert_param(IS_RCC_PLLN_VALUE(RCC_OscInitStruct->PLL.PLLN)); + assert_param(IS_RCC_PLLP_VALUE(RCC_OscInitStruct->PLL.PLLP)); + assert_param(IS_RCC_PLLQ_VALUE(RCC_OscInitStruct->PLL.PLLQ)); + assert_param(IS_RCC_PLLR_VALUE(RCC_OscInitStruct->PLL.PLLR)); + + /* Disable the main PLL. */ + __HAL_RCC_PLL_DISABLE(); + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till PLL is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLRDY) != RESET) + { + if ((HAL_GetTick() - tickstart) > PLL_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + /* Configure the main PLL clock source, multiplication and division factors. */ + WRITE_REG(RCC->PLLCFGR, (RCC_OscInitStruct->PLL.PLLSource | \ + RCC_OscInitStruct->PLL.PLLM | \ + (RCC_OscInitStruct->PLL.PLLN << RCC_PLLCFGR_PLLN_Pos) | \ + (((RCC_OscInitStruct->PLL.PLLP >> 1U) - 1U) << RCC_PLLCFGR_PLLP_Pos) | \ + (RCC_OscInitStruct->PLL.PLLQ << RCC_PLLCFGR_PLLQ_Pos) | \ + (RCC_OscInitStruct->PLL.PLLR << RCC_PLLCFGR_PLLR_Pos))); + /* Enable the main PLL. */ + __HAL_RCC_PLL_ENABLE(); + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till PLL is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLRDY) == RESET) + { + if ((HAL_GetTick() - tickstart) > PLL_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + else + { + /* Disable the main PLL. */ + __HAL_RCC_PLL_DISABLE(); + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till PLL is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLRDY) != RESET) + { + if ((HAL_GetTick() - tickstart) > PLL_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + } + else + { + /* Check if there is a request to disable the PLL used as System clock source */ + if ((RCC_OscInitStruct->PLL.PLLState) == RCC_PLL_OFF) + { + return HAL_ERROR; + } + else + { + /* Do not return HAL_ERROR if request repeats the current configuration */ + pll_config = RCC->PLLCFGR; +#if defined (RCC_PLLCFGR_PLLR) + if (((RCC_OscInitStruct->PLL.PLLState) == RCC_PLL_OFF) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLSRC) != RCC_OscInitStruct->PLL.PLLSource) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLM) != (RCC_OscInitStruct->PLL.PLLM) << RCC_PLLCFGR_PLLM_Pos) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLN) != (RCC_OscInitStruct->PLL.PLLN) << RCC_PLLCFGR_PLLN_Pos) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLP) != (((RCC_OscInitStruct->PLL.PLLP >> 1U) - 1U)) << RCC_PLLCFGR_PLLP_Pos) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLQ) != (RCC_OscInitStruct->PLL.PLLQ << RCC_PLLCFGR_PLLQ_Pos)) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLR) != (RCC_OscInitStruct->PLL.PLLR << RCC_PLLCFGR_PLLR_Pos))) +#else + if (((RCC_OscInitStruct->PLL.PLLState) == RCC_PLL_OFF) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLSRC) != RCC_OscInitStruct->PLL.PLLSource) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLM) != (RCC_OscInitStruct->PLL.PLLM) << RCC_PLLCFGR_PLLM_Pos) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLN) != (RCC_OscInitStruct->PLL.PLLN) << RCC_PLLCFGR_PLLN_Pos) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLP) != (((RCC_OscInitStruct->PLL.PLLP >> 1U) - 1U)) << RCC_PLLCFGR_PLLP_Pos) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLQ) != (RCC_OscInitStruct->PLL.PLLQ << RCC_PLLCFGR_PLLQ_Pos))) +#endif /* RCC_PLLCFGR_PLLR */ + { + return HAL_ERROR; + } + } + } + } + return HAL_OK; +} + +/** + * @brief Configures the RCC_OscInitStruct according to the internal + * RCC configuration registers. + * @param RCC_OscInitStruct pointer to an RCC_OscInitTypeDef structure that will be configured. + * + * @note This function is only available in case of STM32F410xx/STM32F446xx/STM32F469xx/STM32F479xx/STM32F412Zx/STM32F412Vx/STM32F412Rx/STM32F412Cx devices. + * @note This function add the PLL/PLLR factor management + * @retval None + */ +void HAL_RCC_GetOscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct) +{ + /* Set all possible values for the Oscillator type parameter ---------------*/ + RCC_OscInitStruct->OscillatorType = RCC_OSCILLATORTYPE_HSE | RCC_OSCILLATORTYPE_HSI | RCC_OSCILLATORTYPE_LSE | RCC_OSCILLATORTYPE_LSI; + + /* Get the HSE configuration -----------------------------------------------*/ + if ((RCC->CR & RCC_CR_HSEBYP) == RCC_CR_HSEBYP) + { + RCC_OscInitStruct->HSEState = RCC_HSE_BYPASS; + } + else if ((RCC->CR & RCC_CR_HSEON) == RCC_CR_HSEON) + { + RCC_OscInitStruct->HSEState = RCC_HSE_ON; + } + else + { + RCC_OscInitStruct->HSEState = RCC_HSE_OFF; + } + + /* Get the HSI configuration -----------------------------------------------*/ + if ((RCC->CR & RCC_CR_HSION) == RCC_CR_HSION) + { + RCC_OscInitStruct->HSIState = RCC_HSI_ON; + } + else + { + RCC_OscInitStruct->HSIState = RCC_HSI_OFF; + } + + RCC_OscInitStruct->HSICalibrationValue = (uint32_t)((RCC->CR & RCC_CR_HSITRIM) >> RCC_CR_HSITRIM_Pos); + + /* Get the LSE configuration -----------------------------------------------*/ + if ((RCC->BDCR & RCC_BDCR_LSEBYP) == RCC_BDCR_LSEBYP) + { + RCC_OscInitStruct->LSEState = RCC_LSE_BYPASS; + } + else if ((RCC->BDCR & RCC_BDCR_LSEON) == RCC_BDCR_LSEON) + { + RCC_OscInitStruct->LSEState = RCC_LSE_ON; + } + else + { + RCC_OscInitStruct->LSEState = RCC_LSE_OFF; + } + + /* Get the LSI configuration -----------------------------------------------*/ + if ((RCC->CSR & RCC_CSR_LSION) == RCC_CSR_LSION) + { + RCC_OscInitStruct->LSIState = RCC_LSI_ON; + } + else + { + RCC_OscInitStruct->LSIState = RCC_LSI_OFF; + } + + /* Get the PLL configuration -----------------------------------------------*/ + if ((RCC->CR & RCC_CR_PLLON) == RCC_CR_PLLON) + { + RCC_OscInitStruct->PLL.PLLState = RCC_PLL_ON; + } + else + { + RCC_OscInitStruct->PLL.PLLState = RCC_PLL_OFF; + } + RCC_OscInitStruct->PLL.PLLSource = (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC); + RCC_OscInitStruct->PLL.PLLM = (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM); + RCC_OscInitStruct->PLL.PLLN = (uint32_t)((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> RCC_PLLCFGR_PLLN_Pos); + RCC_OscInitStruct->PLL.PLLP = (uint32_t)((((RCC->PLLCFGR & RCC_PLLCFGR_PLLP) + RCC_PLLCFGR_PLLP_0) << 1U) >> RCC_PLLCFGR_PLLP_Pos); + RCC_OscInitStruct->PLL.PLLQ = (uint32_t)((RCC->PLLCFGR & RCC_PLLCFGR_PLLQ) >> RCC_PLLCFGR_PLLQ_Pos); + RCC_OscInitStruct->PLL.PLLR = (uint32_t)((RCC->PLLCFGR & RCC_PLLCFGR_PLLR) >> RCC_PLLCFGR_PLLR_Pos); +} +#endif /* STM32F410xx || STM32F446xx || STM32F469xx || STM32F479xx || STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx || STM32F413xx || STM32F423xx */ + +#endif /* HAL_RCC_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ + diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_spi.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_spi.c new file mode 100644 index 0000000..99b5549 --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_spi.c @@ -0,0 +1,3930 @@ +/** + ****************************************************************************** + * @file stm32f4xx_hal_spi.c + * @author MCD Application Team + * @brief SPI HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the Serial Peripheral Interface (SPI) peripheral: + * + Initialization and de-initialization functions + * + IO operation functions + * + Peripheral Control functions + * + Peripheral State functions + ****************************************************************************** + * @attention + * + * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + @verbatim + ============================================================================== + ##### How to use this driver ##### + ============================================================================== + [..] + The SPI HAL driver can be used as follows: + + (#) Declare a SPI_HandleTypeDef handle structure, for example: + SPI_HandleTypeDef hspi; + + (#)Initialize the SPI low level resources by implementing the HAL_SPI_MspInit() API: + (##) Enable the SPIx interface clock + (##) SPI pins configuration + (+++) Enable the clock for the SPI GPIOs + (+++) Configure these SPI pins as alternate function push-pull + (##) NVIC configuration if you need to use interrupt process + (+++) Configure the SPIx interrupt priority + (+++) Enable the NVIC SPI IRQ handle + (##) DMA Configuration if you need to use DMA process + (+++) Declare a DMA_HandleTypeDef handle structure for the transmit or receive Stream/Channel + (+++) Enable the DMAx clock + (+++) Configure the DMA handle parameters + (+++) Configure the DMA Tx or Rx Stream/Channel + (+++) Associate the initialized hdma_tx(or _rx) handle to the hspi DMA Tx or Rx handle + (+++) Configure the priority and enable the NVIC for the transfer complete interrupt on the DMA Tx or Rx Stream/Channel + + (#) Program the Mode, BidirectionalMode , Data size, Baudrate Prescaler, NSS + management, Clock polarity and phase, FirstBit and CRC configuration in the hspi Init structure. + + (#) Initialize the SPI registers by calling the HAL_SPI_Init() API: + (++) This API configures also the low level Hardware GPIO, CLOCK, CORTEX...etc) + by calling the customized HAL_SPI_MspInit() API. + [..] + Circular mode restriction: + (#) The DMA circular mode cannot be used when the SPI is configured in these modes: + (##) Master 2Lines RxOnly + (##) Master 1Line Rx + (#) The CRC feature is not managed when the DMA circular mode is enabled + (#) When the SPI DMA Pause/Stop features are used, we must use the following APIs + the HAL_SPI_DMAPause()/ HAL_SPI_DMAStop() only under the SPI callbacks + [..] + Master Receive mode restriction: + (#) In Master unidirectional receive-only mode (MSTR =1, BIDIMODE=0, RXONLY=1) or + bidirectional receive mode (MSTR=1, BIDIMODE=1, BIDIOE=0), to ensure that the SPI + does not initiate a new transfer the following procedure has to be respected: + (##) HAL_SPI_DeInit() + (##) HAL_SPI_Init() + [..] + Callback registration: + + (#) The compilation flag USE_HAL_SPI_REGISTER_CALLBACKS when set to 1U + allows the user to configure dynamically the driver callbacks. + Use Functions HAL_SPI_RegisterCallback() to register an interrupt callback. + + Function HAL_SPI_RegisterCallback() allows to register following callbacks: + (++) TxCpltCallback : SPI Tx Completed callback + (++) RxCpltCallback : SPI Rx Completed callback + (++) TxRxCpltCallback : SPI TxRx Completed callback + (++) TxHalfCpltCallback : SPI Tx Half Completed callback + (++) RxHalfCpltCallback : SPI Rx Half Completed callback + (++) TxRxHalfCpltCallback : SPI TxRx Half Completed callback + (++) ErrorCallback : SPI Error callback + (++) AbortCpltCallback : SPI Abort callback + (++) MspInitCallback : SPI Msp Init callback + (++) MspDeInitCallback : SPI Msp DeInit callback + This function takes as parameters the HAL peripheral handle, the Callback ID + and a pointer to the user callback function. + + + (#) Use function HAL_SPI_UnRegisterCallback to reset a callback to the default + weak function. + HAL_SPI_UnRegisterCallback takes as parameters the HAL peripheral handle, + and the Callback ID. + This function allows to reset following callbacks: + (++) TxCpltCallback : SPI Tx Completed callback + (++) RxCpltCallback : SPI Rx Completed callback + (++) TxRxCpltCallback : SPI TxRx Completed callback + (++) TxHalfCpltCallback : SPI Tx Half Completed callback + (++) RxHalfCpltCallback : SPI Rx Half Completed callback + (++) TxRxHalfCpltCallback : SPI TxRx Half Completed callback + (++) ErrorCallback : SPI Error callback + (++) AbortCpltCallback : SPI Abort callback + (++) MspInitCallback : SPI Msp Init callback + (++) MspDeInitCallback : SPI Msp DeInit callback + + [..] + By default, after the HAL_SPI_Init() and when the state is HAL_SPI_STATE_RESET + all callbacks are set to the corresponding weak functions: + examples HAL_SPI_MasterTxCpltCallback(), HAL_SPI_MasterRxCpltCallback(). + Exception done for MspInit and MspDeInit functions that are + reset to the legacy weak functions in the HAL_SPI_Init()/ HAL_SPI_DeInit() only when + these callbacks are null (not registered beforehand). + If MspInit or MspDeInit are not null, the HAL_SPI_Init()/ HAL_SPI_DeInit() + keep and use the user MspInit/MspDeInit callbacks (registered beforehand) whatever the state. + + [..] + Callbacks can be registered/unregistered in HAL_SPI_STATE_READY state only. + Exception done MspInit/MspDeInit functions that can be registered/unregistered + in HAL_SPI_STATE_READY or HAL_SPI_STATE_RESET state, + thus registered (user) MspInit/DeInit callbacks can be used during the Init/DeInit. + Then, the user first registers the MspInit/MspDeInit user callbacks + using HAL_SPI_RegisterCallback() before calling HAL_SPI_DeInit() + or HAL_SPI_Init() function. + + [..] + When the compilation define USE_HAL_PPP_REGISTER_CALLBACKS is set to 0 or + not defined, the callback registering feature is not available + and weak (surcharged) callbacks are used. + + [..] + Using the HAL it is not possible to reach all supported SPI frequency with the different SPI Modes, + the following table resume the max SPI frequency reached with data size 8bits/16bits, + according to frequency of the APBx Peripheral Clock (fPCLK) used by the SPI instance. + + @endverbatim + + Additional table : + + DataSize = SPI_DATASIZE_8BIT: + +----------------------------------------------------------------------------------------------+ + | | | 2Lines Fullduplex | 2Lines RxOnly | 1Line | + | Process | Transfer mode |---------------------|----------------------|----------------------| + | | | Master | Slave | Master | Slave | Master | Slave | + |==============================================================================================| + | T | Polling | Fpclk/2 | Fpclk/2 | NA | NA | NA | NA | + | X |----------------|----------|----------|-----------|----------|-----------|----------| + | / | Interrupt | Fpclk/4 | Fpclk/8 | NA | NA | NA | NA | + | R |----------------|----------|----------|-----------|----------|-----------|----------| + | X | DMA | Fpclk/2 | Fpclk/2 | NA | NA | NA | NA | + |=========|================|==========|==========|===========|==========|===========|==========| + | | Polling | Fpclk/2 | Fpclk/2 | Fpclk/64 | Fpclk/2 | Fpclk/64 | Fpclk/2 | + | |----------------|----------|----------|-----------|----------|-----------|----------| + | R | Interrupt | Fpclk/8 | Fpclk/8 | Fpclk/64 | Fpclk/2 | Fpclk/64 | Fpclk/2 | + | X |----------------|----------|----------|-----------|----------|-----------|----------| + | | DMA | Fpclk/2 | Fpclk/2 | Fpclk/64 | Fpclk/2 | Fpclk/128 | Fpclk/2 | + |=========|================|==========|==========|===========|==========|===========|==========| + | | Polling | Fpclk/2 | Fpclk/4 | NA | NA | Fpclk/2 | Fpclk/64 | + | |----------------|----------|----------|-----------|----------|-----------|----------| + | T | Interrupt | Fpclk/2 | Fpclk/4 | NA | NA | Fpclk/2 | Fpclk/64 | + | X |----------------|----------|----------|-----------|----------|-----------|----------| + | | DMA | Fpclk/2 | Fpclk/2 | NA | NA | Fpclk/2 | Fpclk/128| + +----------------------------------------------------------------------------------------------+ + + DataSize = SPI_DATASIZE_16BIT: + +----------------------------------------------------------------------------------------------+ + | | | 2Lines Fullduplex | 2Lines RxOnly | 1Line | + | Process | Transfer mode |---------------------|----------------------|----------------------| + | | | Master | Slave | Master | Slave | Master | Slave | + |==============================================================================================| + | T | Polling | Fpclk/2 | Fpclk/2 | NA | NA | NA | NA | + | X |----------------|----------|----------|-----------|----------|-----------|----------| + | / | Interrupt | Fpclk/4 | Fpclk/4 | NA | NA | NA | NA | + | R |----------------|----------|----------|-----------|----------|-----------|----------| + | X | DMA | Fpclk/2 | Fpclk/2 | NA | NA | NA | NA | + |=========|================|==========|==========|===========|==========|===========|==========| + | | Polling | Fpclk/2 | Fpclk/2 | Fpclk/64 | Fpclk/2 | Fpclk/32 | Fpclk/2 | + | |----------------|----------|----------|-----------|----------|-----------|----------| + | R | Interrupt | Fpclk/4 | Fpclk/4 | Fpclk/64 | Fpclk/2 | Fpclk/64 | Fpclk/2 | + | X |----------------|----------|----------|-----------|----------|-----------|----------| + | | DMA | Fpclk/2 | Fpclk/2 | Fpclk/64 | Fpclk/2 | Fpclk/128 | Fpclk/2 | + |=========|================|==========|==========|===========|==========|===========|==========| + | | Polling | Fpclk/2 | Fpclk/2 | NA | NA | Fpclk/2 | Fpclk/32 | + | |----------------|----------|----------|-----------|----------|-----------|----------| + | T | Interrupt | Fpclk/2 | Fpclk/2 | NA | NA | Fpclk/2 | Fpclk/64 | + | X |----------------|----------|----------|-----------|----------|-----------|----------| + | | DMA | Fpclk/2 | Fpclk/2 | NA | NA | Fpclk/2 | Fpclk/128| + +----------------------------------------------------------------------------------------------+ + @note The max SPI frequency depend on SPI data size (8bits, 16bits), + SPI mode(2 Lines fullduplex, 2 lines RxOnly, 1 line TX/RX) and Process mode (Polling, IT, DMA). + @note + (#) TX/RX processes are HAL_SPI_TransmitReceive(), HAL_SPI_TransmitReceive_IT() and HAL_SPI_TransmitReceive_DMA() + (#) RX processes are HAL_SPI_Receive(), HAL_SPI_Receive_IT() and HAL_SPI_Receive_DMA() + (#) TX processes are HAL_SPI_Transmit(), HAL_SPI_Transmit_IT() and HAL_SPI_Transmit_DMA() + + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx_hal.h" + +/** @addtogroup STM32F4xx_HAL_Driver + * @{ + */ + +/** @defgroup SPI SPI + * @brief SPI HAL module driver + * @{ + */ +#ifdef HAL_SPI_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private defines -----------------------------------------------------------*/ +/** @defgroup SPI_Private_Constants SPI Private Constants + * @{ + */ +#define SPI_DEFAULT_TIMEOUT 100U +#define SPI_BSY_FLAG_WORKAROUND_TIMEOUT 1000U /*!< Timeout 1000 µs */ +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/** @defgroup SPI_Private_Functions SPI Private Functions + * @{ + */ +static void SPI_DMATransmitCplt(DMA_HandleTypeDef *hdma); +static void SPI_DMAReceiveCplt(DMA_HandleTypeDef *hdma); +static void SPI_DMATransmitReceiveCplt(DMA_HandleTypeDef *hdma); +static void SPI_DMAHalfTransmitCplt(DMA_HandleTypeDef *hdma); +static void SPI_DMAHalfReceiveCplt(DMA_HandleTypeDef *hdma); +static void SPI_DMAHalfTransmitReceiveCplt(DMA_HandleTypeDef *hdma); +static void SPI_DMAError(DMA_HandleTypeDef *hdma); +static void SPI_DMAAbortOnError(DMA_HandleTypeDef *hdma); +static void SPI_DMATxAbortCallback(DMA_HandleTypeDef *hdma); +static void SPI_DMARxAbortCallback(DMA_HandleTypeDef *hdma); +static HAL_StatusTypeDef SPI_WaitFlagStateUntilTimeout(SPI_HandleTypeDef *hspi, uint32_t Flag, FlagStatus State, + uint32_t Timeout, uint32_t Tickstart); +static void SPI_TxISR_8BIT(struct __SPI_HandleTypeDef *hspi); +static void SPI_TxISR_16BIT(struct __SPI_HandleTypeDef *hspi); +static void SPI_RxISR_8BIT(struct __SPI_HandleTypeDef *hspi); +static void SPI_RxISR_16BIT(struct __SPI_HandleTypeDef *hspi); +static void SPI_2linesRxISR_8BIT(struct __SPI_HandleTypeDef *hspi); +static void SPI_2linesTxISR_8BIT(struct __SPI_HandleTypeDef *hspi); +static void SPI_2linesTxISR_16BIT(struct __SPI_HandleTypeDef *hspi); +static void SPI_2linesRxISR_16BIT(struct __SPI_HandleTypeDef *hspi); +#if (USE_SPI_CRC != 0U) +static void SPI_RxISR_8BITCRC(struct __SPI_HandleTypeDef *hspi); +static void SPI_RxISR_16BITCRC(struct __SPI_HandleTypeDef *hspi); +static void SPI_2linesRxISR_8BITCRC(struct __SPI_HandleTypeDef *hspi); +static void SPI_2linesRxISR_16BITCRC(struct __SPI_HandleTypeDef *hspi); +#endif /* USE_SPI_CRC */ +static void SPI_AbortRx_ISR(SPI_HandleTypeDef *hspi); +static void SPI_AbortTx_ISR(SPI_HandleTypeDef *hspi); +static void SPI_CloseRxTx_ISR(SPI_HandleTypeDef *hspi); +static void SPI_CloseRx_ISR(SPI_HandleTypeDef *hspi); +static void SPI_CloseTx_ISR(SPI_HandleTypeDef *hspi); +static HAL_StatusTypeDef SPI_EndRxTransaction(SPI_HandleTypeDef *hspi, uint32_t Timeout, uint32_t Tickstart); +static HAL_StatusTypeDef SPI_EndRxTxTransaction(SPI_HandleTypeDef *hspi, uint32_t Timeout, uint32_t Tickstart); +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup SPI_Exported_Functions SPI Exported Functions + * @{ + */ + +/** @defgroup SPI_Exported_Functions_Group1 Initialization and de-initialization functions + * @brief Initialization and Configuration functions + * +@verbatim + =============================================================================== + ##### Initialization and de-initialization functions ##### + =============================================================================== + [..] This subsection provides a set of functions allowing to initialize and + de-initialize the SPIx peripheral: + + (+) User must implement HAL_SPI_MspInit() function in which he configures + all related peripherals resources (CLOCK, GPIO, DMA, IT and NVIC ). + + (+) Call the function HAL_SPI_Init() to configure the selected device with + the selected configuration: + (++) Mode + (++) Direction + (++) Data Size + (++) Clock Polarity and Phase + (++) NSS Management + (++) BaudRate Prescaler + (++) FirstBit + (++) TIMode + (++) CRC Calculation + (++) CRC Polynomial if CRC enabled + + (+) Call the function HAL_SPI_DeInit() to restore the default configuration + of the selected SPIx peripheral. + +@endverbatim + * @{ + */ + +/** + * @brief Initialize the SPI according to the specified parameters + * in the SPI_InitTypeDef and initialize the associated handle. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SPI_Init(SPI_HandleTypeDef *hspi) +{ + /* Check the SPI handle allocation */ + if (hspi == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_SPI_ALL_INSTANCE(hspi->Instance)); + assert_param(IS_SPI_MODE(hspi->Init.Mode)); + assert_param(IS_SPI_DIRECTION(hspi->Init.Direction)); + assert_param(IS_SPI_DATASIZE(hspi->Init.DataSize)); + assert_param(IS_SPI_NSS(hspi->Init.NSS)); + assert_param(IS_SPI_BAUDRATE_PRESCALER(hspi->Init.BaudRatePrescaler)); + assert_param(IS_SPI_FIRST_BIT(hspi->Init.FirstBit)); + assert_param(IS_SPI_TIMODE(hspi->Init.TIMode)); + if (hspi->Init.TIMode == SPI_TIMODE_DISABLE) + { + assert_param(IS_SPI_CPOL(hspi->Init.CLKPolarity)); + assert_param(IS_SPI_CPHA(hspi->Init.CLKPhase)); + + if (hspi->Init.Mode == SPI_MODE_MASTER) + { + assert_param(IS_SPI_BAUDRATE_PRESCALER(hspi->Init.BaudRatePrescaler)); + } + else + { + /* Baudrate prescaler not use in Motoraola Slave mode. force to default value */ + hspi->Init.BaudRatePrescaler = SPI_BAUDRATEPRESCALER_2; + } + } + else + { + assert_param(IS_SPI_BAUDRATE_PRESCALER(hspi->Init.BaudRatePrescaler)); + + /* Force polarity and phase to TI protocaol requirements */ + hspi->Init.CLKPolarity = SPI_POLARITY_LOW; + hspi->Init.CLKPhase = SPI_PHASE_1EDGE; + } +#if (USE_SPI_CRC != 0U) + assert_param(IS_SPI_CRC_CALCULATION(hspi->Init.CRCCalculation)); + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + assert_param(IS_SPI_CRC_POLYNOMIAL(hspi->Init.CRCPolynomial)); + } +#else + hspi->Init.CRCCalculation = SPI_CRCCALCULATION_DISABLE; +#endif /* USE_SPI_CRC */ + + if (hspi->State == HAL_SPI_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + hspi->Lock = HAL_UNLOCKED; + +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + /* Init the SPI Callback settings */ + hspi->TxCpltCallback = HAL_SPI_TxCpltCallback; /* Legacy weak TxCpltCallback */ + hspi->RxCpltCallback = HAL_SPI_RxCpltCallback; /* Legacy weak RxCpltCallback */ + hspi->TxRxCpltCallback = HAL_SPI_TxRxCpltCallback; /* Legacy weak TxRxCpltCallback */ + hspi->TxHalfCpltCallback = HAL_SPI_TxHalfCpltCallback; /* Legacy weak TxHalfCpltCallback */ + hspi->RxHalfCpltCallback = HAL_SPI_RxHalfCpltCallback; /* Legacy weak RxHalfCpltCallback */ + hspi->TxRxHalfCpltCallback = HAL_SPI_TxRxHalfCpltCallback; /* Legacy weak TxRxHalfCpltCallback */ + hspi->ErrorCallback = HAL_SPI_ErrorCallback; /* Legacy weak ErrorCallback */ + hspi->AbortCpltCallback = HAL_SPI_AbortCpltCallback; /* Legacy weak AbortCpltCallback */ + + if (hspi->MspInitCallback == NULL) + { + hspi->MspInitCallback = HAL_SPI_MspInit; /* Legacy weak MspInit */ + } + + /* Init the low level hardware : GPIO, CLOCK, NVIC... */ + hspi->MspInitCallback(hspi); +#else + /* Init the low level hardware : GPIO, CLOCK, NVIC... */ + HAL_SPI_MspInit(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ + } + + hspi->State = HAL_SPI_STATE_BUSY; + + /* Disable the selected SPI peripheral */ + __HAL_SPI_DISABLE(hspi); + + /*----------------------- SPIx CR1 & CR2 Configuration ---------------------*/ + /* Configure : SPI Mode, Communication Mode, Data size, Clock polarity and phase, NSS management, + Communication speed, First bit and CRC calculation state */ + WRITE_REG(hspi->Instance->CR1, ((hspi->Init.Mode & (SPI_CR1_MSTR | SPI_CR1_SSI)) | + (hspi->Init.Direction & (SPI_CR1_RXONLY | SPI_CR1_BIDIMODE)) | + (hspi->Init.DataSize & SPI_CR1_DFF) | + (hspi->Init.CLKPolarity & SPI_CR1_CPOL) | + (hspi->Init.CLKPhase & SPI_CR1_CPHA) | + (hspi->Init.NSS & SPI_CR1_SSM) | + (hspi->Init.BaudRatePrescaler & SPI_CR1_BR_Msk) | + (hspi->Init.FirstBit & SPI_CR1_LSBFIRST) | + (hspi->Init.CRCCalculation & SPI_CR1_CRCEN))); + + /* Configure : NSS management, TI Mode */ + WRITE_REG(hspi->Instance->CR2, (((hspi->Init.NSS >> 16U) & SPI_CR2_SSOE) | (hspi->Init.TIMode & SPI_CR2_FRF))); + +#if (USE_SPI_CRC != 0U) + /*---------------------------- SPIx CRCPOLY Configuration ------------------*/ + /* Configure : CRC Polynomial */ + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + WRITE_REG(hspi->Instance->CRCPR, (hspi->Init.CRCPolynomial & SPI_CRCPR_CRCPOLY_Msk)); + } +#endif /* USE_SPI_CRC */ + +#if defined(SPI_I2SCFGR_I2SMOD) + /* Activate the SPI mode (Make sure that I2SMOD bit in I2SCFGR register is reset) */ + CLEAR_BIT(hspi->Instance->I2SCFGR, SPI_I2SCFGR_I2SMOD); +#endif /* SPI_I2SCFGR_I2SMOD */ + + hspi->ErrorCode = HAL_SPI_ERROR_NONE; + hspi->State = HAL_SPI_STATE_READY; + + return HAL_OK; +} + +/** + * @brief De-Initialize the SPI peripheral. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SPI_DeInit(SPI_HandleTypeDef *hspi) +{ + /* Check the SPI handle allocation */ + if (hspi == NULL) + { + return HAL_ERROR; + } + + /* Check SPI Instance parameter */ + assert_param(IS_SPI_ALL_INSTANCE(hspi->Instance)); + + hspi->State = HAL_SPI_STATE_BUSY; + + /* Disable the SPI Peripheral Clock */ + __HAL_SPI_DISABLE(hspi); + +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + if (hspi->MspDeInitCallback == NULL) + { + hspi->MspDeInitCallback = HAL_SPI_MspDeInit; /* Legacy weak MspDeInit */ + } + + /* DeInit the low level hardware: GPIO, CLOCK, NVIC... */ + hspi->MspDeInitCallback(hspi); +#else + /* DeInit the low level hardware: GPIO, CLOCK, NVIC... */ + HAL_SPI_MspDeInit(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ + + hspi->ErrorCode = HAL_SPI_ERROR_NONE; + hspi->State = HAL_SPI_STATE_RESET; + + /* Release Lock */ + __HAL_UNLOCK(hspi); + + return HAL_OK; +} + +/** + * @brief Initialize the SPI MSP. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +__weak void HAL_SPI_MspInit(SPI_HandleTypeDef *hspi) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hspi); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_SPI_MspInit should be implemented in the user file + */ +} + +/** + * @brief De-Initialize the SPI MSP. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +__weak void HAL_SPI_MspDeInit(SPI_HandleTypeDef *hspi) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hspi); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_SPI_MspDeInit should be implemented in the user file + */ +} + +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) +/** + * @brief Register a User SPI Callback + * To be used instead of the weak predefined callback + * @param hspi Pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for the specified SPI. + * @param CallbackID ID of the callback to be registered + * @param pCallback pointer to the Callback function + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SPI_RegisterCallback(SPI_HandleTypeDef *hspi, HAL_SPI_CallbackIDTypeDef CallbackID, + pSPI_CallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + + if (pCallback == NULL) + { + /* Update the error code */ + hspi->ErrorCode |= HAL_SPI_ERROR_INVALID_CALLBACK; + + return HAL_ERROR; + } + /* Process locked */ + __HAL_LOCK(hspi); + + if (HAL_SPI_STATE_READY == hspi->State) + { + switch (CallbackID) + { + case HAL_SPI_TX_COMPLETE_CB_ID : + hspi->TxCpltCallback = pCallback; + break; + + case HAL_SPI_RX_COMPLETE_CB_ID : + hspi->RxCpltCallback = pCallback; + break; + + case HAL_SPI_TX_RX_COMPLETE_CB_ID : + hspi->TxRxCpltCallback = pCallback; + break; + + case HAL_SPI_TX_HALF_COMPLETE_CB_ID : + hspi->TxHalfCpltCallback = pCallback; + break; + + case HAL_SPI_RX_HALF_COMPLETE_CB_ID : + hspi->RxHalfCpltCallback = pCallback; + break; + + case HAL_SPI_TX_RX_HALF_COMPLETE_CB_ID : + hspi->TxRxHalfCpltCallback = pCallback; + break; + + case HAL_SPI_ERROR_CB_ID : + hspi->ErrorCallback = pCallback; + break; + + case HAL_SPI_ABORT_CB_ID : + hspi->AbortCpltCallback = pCallback; + break; + + case HAL_SPI_MSPINIT_CB_ID : + hspi->MspInitCallback = pCallback; + break; + + case HAL_SPI_MSPDEINIT_CB_ID : + hspi->MspDeInitCallback = pCallback; + break; + + default : + /* Update the error code */ + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_INVALID_CALLBACK); + + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else if (HAL_SPI_STATE_RESET == hspi->State) + { + switch (CallbackID) + { + case HAL_SPI_MSPINIT_CB_ID : + hspi->MspInitCallback = pCallback; + break; + + case HAL_SPI_MSPDEINIT_CB_ID : + hspi->MspDeInitCallback = pCallback; + break; + + default : + /* Update the error code */ + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_INVALID_CALLBACK); + + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Update the error code */ + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_INVALID_CALLBACK); + + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hspi); + return status; +} + +/** + * @brief Unregister an SPI Callback + * SPI callback is redirected to the weak predefined callback + * @param hspi Pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for the specified SPI. + * @param CallbackID ID of the callback to be unregistered + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SPI_UnRegisterCallback(SPI_HandleTypeDef *hspi, HAL_SPI_CallbackIDTypeDef CallbackID) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Process locked */ + __HAL_LOCK(hspi); + + if (HAL_SPI_STATE_READY == hspi->State) + { + switch (CallbackID) + { + case HAL_SPI_TX_COMPLETE_CB_ID : + hspi->TxCpltCallback = HAL_SPI_TxCpltCallback; /* Legacy weak TxCpltCallback */ + break; + + case HAL_SPI_RX_COMPLETE_CB_ID : + hspi->RxCpltCallback = HAL_SPI_RxCpltCallback; /* Legacy weak RxCpltCallback */ + break; + + case HAL_SPI_TX_RX_COMPLETE_CB_ID : + hspi->TxRxCpltCallback = HAL_SPI_TxRxCpltCallback; /* Legacy weak TxRxCpltCallback */ + break; + + case HAL_SPI_TX_HALF_COMPLETE_CB_ID : + hspi->TxHalfCpltCallback = HAL_SPI_TxHalfCpltCallback; /* Legacy weak TxHalfCpltCallback */ + break; + + case HAL_SPI_RX_HALF_COMPLETE_CB_ID : + hspi->RxHalfCpltCallback = HAL_SPI_RxHalfCpltCallback; /* Legacy weak RxHalfCpltCallback */ + break; + + case HAL_SPI_TX_RX_HALF_COMPLETE_CB_ID : + hspi->TxRxHalfCpltCallback = HAL_SPI_TxRxHalfCpltCallback; /* Legacy weak TxRxHalfCpltCallback */ + break; + + case HAL_SPI_ERROR_CB_ID : + hspi->ErrorCallback = HAL_SPI_ErrorCallback; /* Legacy weak ErrorCallback */ + break; + + case HAL_SPI_ABORT_CB_ID : + hspi->AbortCpltCallback = HAL_SPI_AbortCpltCallback; /* Legacy weak AbortCpltCallback */ + break; + + case HAL_SPI_MSPINIT_CB_ID : + hspi->MspInitCallback = HAL_SPI_MspInit; /* Legacy weak MspInit */ + break; + + case HAL_SPI_MSPDEINIT_CB_ID : + hspi->MspDeInitCallback = HAL_SPI_MspDeInit; /* Legacy weak MspDeInit */ + break; + + default : + /* Update the error code */ + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_INVALID_CALLBACK); + + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else if (HAL_SPI_STATE_RESET == hspi->State) + { + switch (CallbackID) + { + case HAL_SPI_MSPINIT_CB_ID : + hspi->MspInitCallback = HAL_SPI_MspInit; /* Legacy weak MspInit */ + break; + + case HAL_SPI_MSPDEINIT_CB_ID : + hspi->MspDeInitCallback = HAL_SPI_MspDeInit; /* Legacy weak MspDeInit */ + break; + + default : + /* Update the error code */ + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_INVALID_CALLBACK); + + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Update the error code */ + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_INVALID_CALLBACK); + + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hspi); + return status; +} +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ +/** + * @} + */ + +/** @defgroup SPI_Exported_Functions_Group2 IO operation functions + * @brief Data transfers functions + * +@verbatim + ============================================================================== + ##### IO operation functions ##### + =============================================================================== + [..] + This subsection provides a set of functions allowing to manage the SPI + data transfers. + + [..] The SPI supports master and slave mode : + + (#) There are two modes of transfer: + (++) Blocking mode: The communication is performed in polling mode. + The HAL status of all data processing is returned by the same function + after finishing transfer. + (++) No-Blocking mode: The communication is performed using Interrupts + or DMA, These APIs return the HAL status. + The end of the data processing will be indicated through the + dedicated SPI IRQ when using Interrupt mode or the DMA IRQ when + using DMA mode. + The HAL_SPI_TxCpltCallback(), HAL_SPI_RxCpltCallback() and HAL_SPI_TxRxCpltCallback() user callbacks + will be executed respectively at the end of the transmit or Receive process + The HAL_SPI_ErrorCallback()user callback will be executed when a communication error is detected + + (#) APIs provided for these 2 transfer modes (Blocking mode or Non blocking mode using either Interrupt or DMA) + exist for 1Line (simplex) and 2Lines (full duplex) modes. + +@endverbatim + * @{ + */ + +/** + * @brief Transmit an amount of data in blocking mode. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @param pData pointer to data buffer + * @param Size amount of data to be sent + * @param Timeout Timeout duration + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SPI_Transmit(SPI_HandleTypeDef *hspi, const uint8_t *pData, uint16_t Size, uint32_t Timeout) +{ + uint32_t tickstart; + uint16_t initial_TxXferCount; + + /* Check Direction parameter */ + assert_param(IS_SPI_DIRECTION_2LINES_OR_1LINE(hspi->Init.Direction)); + + /* Init tickstart for timeout management*/ + tickstart = HAL_GetTick(); + initial_TxXferCount = Size; + + if (hspi->State != HAL_SPI_STATE_READY) + { + return HAL_BUSY; + } + + if ((pData == NULL) || (Size == 0U)) + { + return HAL_ERROR; + } + + /* Process Locked */ + __HAL_LOCK(hspi); + + /* Set the transaction information */ + hspi->State = HAL_SPI_STATE_BUSY_TX; + hspi->ErrorCode = HAL_SPI_ERROR_NONE; + hspi->pTxBuffPtr = (const uint8_t *)pData; + hspi->TxXferSize = Size; + hspi->TxXferCount = Size; + + /*Init field not used in handle to zero */ + hspi->pRxBuffPtr = (uint8_t *)NULL; + hspi->RxXferSize = 0U; + hspi->RxXferCount = 0U; + hspi->TxISR = NULL; + hspi->RxISR = NULL; + + /* Configure communication direction : 1Line */ + if (hspi->Init.Direction == SPI_DIRECTION_1LINE) + { + /* Disable SPI Peripheral before set 1Line direction (BIDIOE bit) */ + __HAL_SPI_DISABLE(hspi); + SPI_1LINE_TX(hspi); + } + +#if (USE_SPI_CRC != 0U) + /* Reset CRC Calculation */ + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + SPI_RESET_CRC(hspi); + } +#endif /* USE_SPI_CRC */ + + /* Check if the SPI is already enabled */ + if ((hspi->Instance->CR1 & SPI_CR1_SPE) != SPI_CR1_SPE) + { + /* Enable SPI peripheral */ + __HAL_SPI_ENABLE(hspi); + } + + /* Transmit data in 16 Bit mode */ + if (hspi->Init.DataSize == SPI_DATASIZE_16BIT) + { + if ((hspi->Init.Mode == SPI_MODE_SLAVE) || (initial_TxXferCount == 0x01U)) + { + hspi->Instance->DR = *((const uint16_t *)hspi->pTxBuffPtr); + hspi->pTxBuffPtr += sizeof(uint16_t); + hspi->TxXferCount--; + } + /* Transmit data in 16 Bit mode */ + while (hspi->TxXferCount > 0U) + { + /* Wait until TXE flag is set to send data */ + if (__HAL_SPI_GET_FLAG(hspi, SPI_FLAG_TXE)) + { + hspi->Instance->DR = *((const uint16_t *)hspi->pTxBuffPtr); + hspi->pTxBuffPtr += sizeof(uint16_t); + hspi->TxXferCount--; + } + else + { + /* Timeout management */ + if ((((HAL_GetTick() - tickstart) >= Timeout) && (Timeout != HAL_MAX_DELAY)) || (Timeout == 0U)) + { + hspi->State = HAL_SPI_STATE_READY; + __HAL_UNLOCK(hspi); + return HAL_TIMEOUT; + } + } + } + } + /* Transmit data in 8 Bit mode */ + else + { + if ((hspi->Init.Mode == SPI_MODE_SLAVE) || (initial_TxXferCount == 0x01U)) + { + *((__IO uint8_t *)&hspi->Instance->DR) = *((const uint8_t *)hspi->pTxBuffPtr); + hspi->pTxBuffPtr += sizeof(uint8_t); + hspi->TxXferCount--; + } + while (hspi->TxXferCount > 0U) + { + /* Wait until TXE flag is set to send data */ + if (__HAL_SPI_GET_FLAG(hspi, SPI_FLAG_TXE)) + { + *((__IO uint8_t *)&hspi->Instance->DR) = *((const uint8_t *)hspi->pTxBuffPtr); + hspi->pTxBuffPtr += sizeof(uint8_t); + hspi->TxXferCount--; + } + else + { + /* Timeout management */ + if ((((HAL_GetTick() - tickstart) >= Timeout) && (Timeout != HAL_MAX_DELAY)) || (Timeout == 0U)) + { + hspi->State = HAL_SPI_STATE_READY; + __HAL_UNLOCK(hspi); + return HAL_TIMEOUT; + } + } + } + } +#if (USE_SPI_CRC != 0U) + /* Enable CRC Transmission */ + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + SET_BIT(hspi->Instance->CR1, SPI_CR1_CRCNEXT); + } +#endif /* USE_SPI_CRC */ + + /* Check the end of the transaction */ + if (SPI_EndRxTxTransaction(hspi, Timeout, tickstart) != HAL_OK) + { + hspi->ErrorCode = HAL_SPI_ERROR_FLAG; + } + + /* Clear overrun flag in 2 Lines communication mode because received is not read */ + if (hspi->Init.Direction == SPI_DIRECTION_2LINES) + { + __HAL_SPI_CLEAR_OVRFLAG(hspi); + } + + hspi->State = HAL_SPI_STATE_READY; + /* Process Unlocked */ + __HAL_UNLOCK(hspi); + + if (hspi->ErrorCode != HAL_SPI_ERROR_NONE) + { + return HAL_ERROR; + } + else + { + return HAL_OK; + } +} + +/** + * @brief Receive an amount of data in blocking mode. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @param pData pointer to data buffer + * @param Size amount of data to be received + * @param Timeout Timeout duration + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SPI_Receive(SPI_HandleTypeDef *hspi, uint8_t *pData, uint16_t Size, uint32_t Timeout) +{ +#if (USE_SPI_CRC != 0U) + __IO uint32_t tmpreg = 0U; +#endif /* USE_SPI_CRC */ + uint32_t tickstart; + + if (hspi->State != HAL_SPI_STATE_READY) + { + return HAL_BUSY; + } + + if ((hspi->Init.Mode == SPI_MODE_MASTER) && (hspi->Init.Direction == SPI_DIRECTION_2LINES)) + { + hspi->State = HAL_SPI_STATE_BUSY_RX; + /* Call transmit-receive function to send Dummy data on Tx line and generate clock on CLK line */ + return HAL_SPI_TransmitReceive(hspi, pData, pData, Size, Timeout); + } + + /* Init tickstart for timeout management*/ + tickstart = HAL_GetTick(); + + if ((pData == NULL) || (Size == 0U)) + { + return HAL_ERROR; + } + + /* Process Locked */ + __HAL_LOCK(hspi); + + /* Set the transaction information */ + hspi->State = HAL_SPI_STATE_BUSY_RX; + hspi->ErrorCode = HAL_SPI_ERROR_NONE; + hspi->pRxBuffPtr = (uint8_t *)pData; + hspi->RxXferSize = Size; + hspi->RxXferCount = Size; + + /*Init field not used in handle to zero */ + hspi->pTxBuffPtr = (uint8_t *)NULL; + hspi->TxXferSize = 0U; + hspi->TxXferCount = 0U; + hspi->RxISR = NULL; + hspi->TxISR = NULL; + +#if (USE_SPI_CRC != 0U) + /* Reset CRC Calculation */ + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + SPI_RESET_CRC(hspi); + /* this is done to handle the CRCNEXT before the latest data */ + hspi->RxXferCount--; + } +#endif /* USE_SPI_CRC */ + + /* Configure communication direction: 1Line */ + if (hspi->Init.Direction == SPI_DIRECTION_1LINE) + { + /* Disable SPI Peripheral before set 1Line direction (BIDIOE bit) */ + __HAL_SPI_DISABLE(hspi); + SPI_1LINE_RX(hspi); + } + + /* Check if the SPI is already enabled */ + if ((hspi->Instance->CR1 & SPI_CR1_SPE) != SPI_CR1_SPE) + { + /* Enable SPI peripheral */ + __HAL_SPI_ENABLE(hspi); + } + + /* Receive data in 8 Bit mode */ + if (hspi->Init.DataSize == SPI_DATASIZE_8BIT) + { + /* Transfer loop */ + while (hspi->RxXferCount > 0U) + { + /* Check the RXNE flag */ + if (__HAL_SPI_GET_FLAG(hspi, SPI_FLAG_RXNE)) + { + /* read the received data */ + (* (uint8_t *)hspi->pRxBuffPtr) = *(__IO uint8_t *)&hspi->Instance->DR; + hspi->pRxBuffPtr += sizeof(uint8_t); + hspi->RxXferCount--; + } + else + { + /* Timeout management */ + if ((((HAL_GetTick() - tickstart) >= Timeout) && (Timeout != HAL_MAX_DELAY)) || (Timeout == 0U)) + { + hspi->State = HAL_SPI_STATE_READY; + __HAL_UNLOCK(hspi); + return HAL_TIMEOUT; + } + } + } + } + else + { + /* Transfer loop */ + while (hspi->RxXferCount > 0U) + { + /* Check the RXNE flag */ + if (__HAL_SPI_GET_FLAG(hspi, SPI_FLAG_RXNE)) + { + *((uint16_t *)hspi->pRxBuffPtr) = (uint16_t)hspi->Instance->DR; + hspi->pRxBuffPtr += sizeof(uint16_t); + hspi->RxXferCount--; + } + else + { + /* Timeout management */ + if ((((HAL_GetTick() - tickstart) >= Timeout) && (Timeout != HAL_MAX_DELAY)) || (Timeout == 0U)) + { + hspi->State = HAL_SPI_STATE_READY; + __HAL_UNLOCK(hspi); + return HAL_TIMEOUT; + } + } + } + } + +#if (USE_SPI_CRC != 0U) + /* Handle the CRC Transmission */ + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + /* freeze the CRC before the latest data */ + SET_BIT(hspi->Instance->CR1, SPI_CR1_CRCNEXT); + + /* Read the latest data */ + if (SPI_WaitFlagStateUntilTimeout(hspi, SPI_FLAG_RXNE, SET, Timeout, tickstart) != HAL_OK) + { + /* the latest data has not been received */ + __HAL_UNLOCK(hspi); + return HAL_TIMEOUT; + } + + /* Receive last data in 16 Bit mode */ + if (hspi->Init.DataSize == SPI_DATASIZE_16BIT) + { + *((uint16_t *)hspi->pRxBuffPtr) = (uint16_t)hspi->Instance->DR; + } + /* Receive last data in 8 Bit mode */ + else + { + (*(uint8_t *)hspi->pRxBuffPtr) = *(__IO uint8_t *)&hspi->Instance->DR; + } + + /* Wait the CRC data */ + if (SPI_WaitFlagStateUntilTimeout(hspi, SPI_FLAG_RXNE, SET, Timeout, tickstart) != HAL_OK) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_CRC); + hspi->State = HAL_SPI_STATE_READY; + __HAL_UNLOCK(hspi); + return HAL_TIMEOUT; + } + + /* Read CRC to Flush DR and RXNE flag */ + tmpreg = READ_REG(hspi->Instance->DR); + /* To avoid GCC warning */ + UNUSED(tmpreg); + } +#endif /* USE_SPI_CRC */ + + /* Check the end of the transaction */ + if (SPI_EndRxTransaction(hspi, Timeout, tickstart) != HAL_OK) + { + hspi->ErrorCode = HAL_SPI_ERROR_FLAG; + } + +#if (USE_SPI_CRC != 0U) + /* Check if CRC error occurred */ + if (__HAL_SPI_GET_FLAG(hspi, SPI_FLAG_CRCERR)) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_CRC); + __HAL_SPI_CLEAR_CRCERRFLAG(hspi); + } +#endif /* USE_SPI_CRC */ + + hspi->State = HAL_SPI_STATE_READY; + /* Unlock the process */ + __HAL_UNLOCK(hspi); + if (hspi->ErrorCode != HAL_SPI_ERROR_NONE) + { + return HAL_ERROR; + } + else + { + return HAL_OK; + } +} + +/** + * @brief Transmit and Receive an amount of data in blocking mode. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @param pTxData pointer to transmission data buffer + * @param pRxData pointer to reception data buffer + * @param Size amount of data to be sent and received + * @param Timeout Timeout duration + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SPI_TransmitReceive(SPI_HandleTypeDef *hspi, const uint8_t *pTxData, uint8_t *pRxData, + uint16_t Size, uint32_t Timeout) +{ + uint16_t initial_TxXferCount; + uint32_t tmp_mode; + HAL_SPI_StateTypeDef tmp_state; + uint32_t tickstart; +#if (USE_SPI_CRC != 0U) + __IO uint32_t tmpreg = 0U; +#endif /* USE_SPI_CRC */ + + /* Variable used to alternate Rx and Tx during transfer */ + uint32_t txallowed = 1U; + + /* Check Direction parameter */ + assert_param(IS_SPI_DIRECTION_2LINES(hspi->Init.Direction)); + + /* Init tickstart for timeout management*/ + tickstart = HAL_GetTick(); + + /* Init temporary variables */ + tmp_state = hspi->State; + tmp_mode = hspi->Init.Mode; + initial_TxXferCount = Size; + + if (!((tmp_state == HAL_SPI_STATE_READY) || \ + ((tmp_mode == SPI_MODE_MASTER) && (hspi->Init.Direction == SPI_DIRECTION_2LINES) && (tmp_state == HAL_SPI_STATE_BUSY_RX)))) + { + return HAL_BUSY; + } + + if ((pTxData == NULL) || (pRxData == NULL) || (Size == 0U)) + { + return HAL_ERROR; + } + + /* Process Locked */ + __HAL_LOCK(hspi); + + /* Don't overwrite in case of HAL_SPI_STATE_BUSY_RX */ + if (hspi->State != HAL_SPI_STATE_BUSY_RX) + { + hspi->State = HAL_SPI_STATE_BUSY_TX_RX; + } + + /* Set the transaction information */ + hspi->ErrorCode = HAL_SPI_ERROR_NONE; + hspi->pRxBuffPtr = (uint8_t *)pRxData; + hspi->RxXferCount = Size; + hspi->RxXferSize = Size; + hspi->pTxBuffPtr = (const uint8_t *)pTxData; + hspi->TxXferCount = Size; + hspi->TxXferSize = Size; + + /*Init field not used in handle to zero */ + hspi->RxISR = NULL; + hspi->TxISR = NULL; + +#if (USE_SPI_CRC != 0U) + /* Reset CRC Calculation */ + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + SPI_RESET_CRC(hspi); + } +#endif /* USE_SPI_CRC */ + + /* Check if the SPI is already enabled */ + if ((hspi->Instance->CR1 & SPI_CR1_SPE) != SPI_CR1_SPE) + { + /* Enable SPI peripheral */ + __HAL_SPI_ENABLE(hspi); + } + + /* Transmit and Receive data in 16 Bit mode */ + if (hspi->Init.DataSize == SPI_DATASIZE_16BIT) + { + if ((hspi->Init.Mode == SPI_MODE_SLAVE) || (initial_TxXferCount == 0x01U)) + { + hspi->Instance->DR = *((const uint16_t *)hspi->pTxBuffPtr); + hspi->pTxBuffPtr += sizeof(uint16_t); + hspi->TxXferCount--; + +#if (USE_SPI_CRC != 0U) + /* Enable CRC Transmission */ + if ((hspi->TxXferCount == 0U) && (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE)) + { + SET_BIT(hspi->Instance->CR1, SPI_CR1_CRCNEXT); + } +#endif /* USE_SPI_CRC */ + + } + while ((hspi->TxXferCount > 0U) || (hspi->RxXferCount > 0U)) + { + /* Check TXE flag */ + if ((__HAL_SPI_GET_FLAG(hspi, SPI_FLAG_TXE)) && (hspi->TxXferCount > 0U) && (txallowed == 1U)) + { + hspi->Instance->DR = *((const uint16_t *)hspi->pTxBuffPtr); + hspi->pTxBuffPtr += sizeof(uint16_t); + hspi->TxXferCount--; + /* Next Data is a reception (Rx). Tx not allowed */ + txallowed = 0U; + +#if (USE_SPI_CRC != 0U) + /* Enable CRC Transmission */ + if ((hspi->TxXferCount == 0U) && (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE)) + { + SET_BIT(hspi->Instance->CR1, SPI_CR1_CRCNEXT); + } +#endif /* USE_SPI_CRC */ + } + + /* Check RXNE flag */ + if ((__HAL_SPI_GET_FLAG(hspi, SPI_FLAG_RXNE)) && (hspi->RxXferCount > 0U)) + { + *((uint16_t *)hspi->pRxBuffPtr) = (uint16_t)hspi->Instance->DR; + hspi->pRxBuffPtr += sizeof(uint16_t); + hspi->RxXferCount--; + /* Next Data is a Transmission (Tx). Tx is allowed */ + txallowed = 1U; + } + if (((HAL_GetTick() - tickstart) >= Timeout) && (Timeout != HAL_MAX_DELAY)) + { + hspi->State = HAL_SPI_STATE_READY; + __HAL_UNLOCK(hspi); + return HAL_TIMEOUT; + } + } + } + /* Transmit and Receive data in 8 Bit mode */ + else + { + if ((hspi->Init.Mode == SPI_MODE_SLAVE) || (initial_TxXferCount == 0x01U)) + { + *((__IO uint8_t *)&hspi->Instance->DR) = *((const uint8_t *)hspi->pTxBuffPtr); + hspi->pTxBuffPtr += sizeof(uint8_t); + hspi->TxXferCount--; + +#if (USE_SPI_CRC != 0U) + /* Enable CRC Transmission */ + if ((hspi->TxXferCount == 0U) && (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE)) + { + SET_BIT(hspi->Instance->CR1, SPI_CR1_CRCNEXT); + } +#endif /* USE_SPI_CRC */ + } + while ((hspi->TxXferCount > 0U) || (hspi->RxXferCount > 0U)) + { + /* Check TXE flag */ + if ((__HAL_SPI_GET_FLAG(hspi, SPI_FLAG_TXE)) && (hspi->TxXferCount > 0U) && (txallowed == 1U)) + { + *(__IO uint8_t *)&hspi->Instance->DR = *((const uint8_t *)hspi->pTxBuffPtr); + hspi->pTxBuffPtr++; + hspi->TxXferCount--; + /* Next Data is a reception (Rx). Tx not allowed */ + txallowed = 0U; + +#if (USE_SPI_CRC != 0U) + /* Enable CRC Transmission */ + if ((hspi->TxXferCount == 0U) && (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE)) + { + SET_BIT(hspi->Instance->CR1, SPI_CR1_CRCNEXT); + } +#endif /* USE_SPI_CRC */ + } + + /* Wait until RXNE flag is reset */ + if ((__HAL_SPI_GET_FLAG(hspi, SPI_FLAG_RXNE)) && (hspi->RxXferCount > 0U)) + { + (*(uint8_t *)hspi->pRxBuffPtr) = hspi->Instance->DR; + hspi->pRxBuffPtr++; + hspi->RxXferCount--; + /* Next Data is a Transmission (Tx). Tx is allowed */ + txallowed = 1U; + } + if ((((HAL_GetTick() - tickstart) >= Timeout) && ((Timeout != HAL_MAX_DELAY))) || (Timeout == 0U)) + { + hspi->State = HAL_SPI_STATE_READY; + __HAL_UNLOCK(hspi); + return HAL_TIMEOUT; + } + } + } + +#if (USE_SPI_CRC != 0U) + /* Read CRC from DR to close CRC calculation process */ + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + /* Wait until TXE flag */ + if (SPI_WaitFlagStateUntilTimeout(hspi, SPI_FLAG_RXNE, SET, Timeout, tickstart) != HAL_OK) + { + /* Error on the CRC reception */ + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_CRC); + hspi->State = HAL_SPI_STATE_READY; + __HAL_UNLOCK(hspi); + return HAL_TIMEOUT; + } + /* Read CRC */ + tmpreg = READ_REG(hspi->Instance->DR); + /* To avoid GCC warning */ + UNUSED(tmpreg); + } + + /* Check if CRC error occurred */ + if (__HAL_SPI_GET_FLAG(hspi, SPI_FLAG_CRCERR)) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_CRC); + /* Clear CRC Flag */ + __HAL_SPI_CLEAR_CRCERRFLAG(hspi); + __HAL_UNLOCK(hspi); + return HAL_ERROR; + } +#endif /* USE_SPI_CRC */ + + /* Check the end of the transaction */ + if (SPI_EndRxTxTransaction(hspi, Timeout, tickstart) != HAL_OK) + { + hspi->ErrorCode = HAL_SPI_ERROR_FLAG; + __HAL_UNLOCK(hspi); + return HAL_ERROR; + } + + /* Clear overrun flag in 2 Lines communication mode because received is not read */ + if (hspi->Init.Direction == SPI_DIRECTION_2LINES) + { + __HAL_SPI_CLEAR_OVRFLAG(hspi); + } + + + hspi->State = HAL_SPI_STATE_READY; + /* Unlock the process */ + __HAL_UNLOCK(hspi); + + if (hspi->ErrorCode != HAL_SPI_ERROR_NONE) + { + return HAL_ERROR; + } + else + { + return HAL_OK; + } +} + +/** + * @brief Transmit an amount of data in non-blocking mode with Interrupt. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @param pData pointer to data buffer + * @param Size amount of data to be sent + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SPI_Transmit_IT(SPI_HandleTypeDef *hspi, const uint8_t *pData, uint16_t Size) +{ + + /* Check Direction parameter */ + assert_param(IS_SPI_DIRECTION_2LINES_OR_1LINE(hspi->Init.Direction)); + + + if ((pData == NULL) || (Size == 0U)) + { + return HAL_ERROR; + } + + if (hspi->State != HAL_SPI_STATE_READY) + { + return HAL_BUSY; + } + + /* Process Locked */ + __HAL_LOCK(hspi); + + /* Set the transaction information */ + hspi->State = HAL_SPI_STATE_BUSY_TX; + hspi->ErrorCode = HAL_SPI_ERROR_NONE; + hspi->pTxBuffPtr = (const uint8_t *)pData; + hspi->TxXferSize = Size; + hspi->TxXferCount = Size; + + /* Init field not used in handle to zero */ + hspi->pRxBuffPtr = (uint8_t *)NULL; + hspi->RxXferSize = 0U; + hspi->RxXferCount = 0U; + hspi->RxISR = NULL; + + /* Set the function for IT treatment */ + if (hspi->Init.DataSize > SPI_DATASIZE_8BIT) + { + hspi->TxISR = SPI_TxISR_16BIT; + } + else + { + hspi->TxISR = SPI_TxISR_8BIT; + } + + /* Configure communication direction : 1Line */ + if (hspi->Init.Direction == SPI_DIRECTION_1LINE) + { + /* Disable SPI Peripheral before set 1Line direction (BIDIOE bit) */ + __HAL_SPI_DISABLE(hspi); + SPI_1LINE_TX(hspi); + } + +#if (USE_SPI_CRC != 0U) + /* Reset CRC Calculation */ + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + SPI_RESET_CRC(hspi); + } +#endif /* USE_SPI_CRC */ + + /* Check if the SPI is already enabled */ + if ((hspi->Instance->CR1 & SPI_CR1_SPE) != SPI_CR1_SPE) + { + /* Enable SPI peripheral */ + __HAL_SPI_ENABLE(hspi); + } + + /* Process Unlocked */ + __HAL_UNLOCK(hspi); + /* Enable TXE and ERR interrupt */ + __HAL_SPI_ENABLE_IT(hspi, (SPI_IT_TXE | SPI_IT_ERR)); + + return HAL_OK; +} + +/** + * @brief Receive an amount of data in non-blocking mode with Interrupt. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @param pData pointer to data buffer + * @param Size amount of data to be sent + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SPI_Receive_IT(SPI_HandleTypeDef *hspi, uint8_t *pData, uint16_t Size) +{ + + if (hspi->State != HAL_SPI_STATE_READY) + { + return HAL_BUSY; + } + + if ((hspi->Init.Direction == SPI_DIRECTION_2LINES) && (hspi->Init.Mode == SPI_MODE_MASTER)) + { + hspi->State = HAL_SPI_STATE_BUSY_RX; + /* Call transmit-receive function to send Dummy data on Tx line and generate clock on CLK line */ + return HAL_SPI_TransmitReceive_IT(hspi, pData, pData, Size); + } + + + if ((pData == NULL) || (Size == 0U)) + { + return HAL_ERROR; + } + + /* Process Locked */ + __HAL_LOCK(hspi); + + /* Set the transaction information */ + hspi->State = HAL_SPI_STATE_BUSY_RX; + hspi->ErrorCode = HAL_SPI_ERROR_NONE; + hspi->pRxBuffPtr = (uint8_t *)pData; + hspi->RxXferSize = Size; + hspi->RxXferCount = Size; + + /* Init field not used in handle to zero */ + hspi->pTxBuffPtr = (uint8_t *)NULL; + hspi->TxXferSize = 0U; + hspi->TxXferCount = 0U; + hspi->TxISR = NULL; + + /* Set the function for IT treatment */ + if (hspi->Init.DataSize > SPI_DATASIZE_8BIT) + { + hspi->RxISR = SPI_RxISR_16BIT; + } + else + { + hspi->RxISR = SPI_RxISR_8BIT; + } + + /* Configure communication direction : 1Line */ + if (hspi->Init.Direction == SPI_DIRECTION_1LINE) + { + /* Disable SPI Peripheral before set 1Line direction (BIDIOE bit) */ + __HAL_SPI_DISABLE(hspi); + SPI_1LINE_RX(hspi); + } + +#if (USE_SPI_CRC != 0U) + /* Reset CRC Calculation */ + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + SPI_RESET_CRC(hspi); + } +#endif /* USE_SPI_CRC */ + + /* Note : The SPI must be enabled after unlocking current process + to avoid the risk of SPI interrupt handle execution before current + process unlock */ + + /* Check if the SPI is already enabled */ + if ((hspi->Instance->CR1 & SPI_CR1_SPE) != SPI_CR1_SPE) + { + /* Enable SPI peripheral */ + __HAL_SPI_ENABLE(hspi); + } + + /* Process Unlocked */ + __HAL_UNLOCK(hspi); + /* Enable RXNE and ERR interrupt */ + __HAL_SPI_ENABLE_IT(hspi, (SPI_IT_RXNE | SPI_IT_ERR)); + + return HAL_OK; +} + +/** + * @brief Transmit and Receive an amount of data in non-blocking mode with Interrupt. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @param pTxData pointer to transmission data buffer + * @param pRxData pointer to reception data buffer + * @param Size amount of data to be sent and received + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SPI_TransmitReceive_IT(SPI_HandleTypeDef *hspi, const uint8_t *pTxData, uint8_t *pRxData, + uint16_t Size) +{ + uint32_t tmp_mode; + HAL_SPI_StateTypeDef tmp_state; + + /* Check Direction parameter */ + assert_param(IS_SPI_DIRECTION_2LINES(hspi->Init.Direction)); + + /* Init temporary variables */ + tmp_state = hspi->State; + tmp_mode = hspi->Init.Mode; + + if (!((tmp_state == HAL_SPI_STATE_READY) || \ + ((tmp_mode == SPI_MODE_MASTER) && (hspi->Init.Direction == SPI_DIRECTION_2LINES) && (tmp_state == HAL_SPI_STATE_BUSY_RX)))) + { + return HAL_BUSY; + } + + if ((pTxData == NULL) || (pRxData == NULL) || (Size == 0U)) + { + return HAL_ERROR; + } + + /* Process locked */ + __HAL_LOCK(hspi); + + /* Don't overwrite in case of HAL_SPI_STATE_BUSY_RX */ + if (hspi->State != HAL_SPI_STATE_BUSY_RX) + { + hspi->State = HAL_SPI_STATE_BUSY_TX_RX; + } + + /* Set the transaction information */ + hspi->ErrorCode = HAL_SPI_ERROR_NONE; + hspi->pTxBuffPtr = (const uint8_t *)pTxData; + hspi->TxXferSize = Size; + hspi->TxXferCount = Size; + hspi->pRxBuffPtr = (uint8_t *)pRxData; + hspi->RxXferSize = Size; + hspi->RxXferCount = Size; + + /* Set the function for IT treatment */ + if (hspi->Init.DataSize > SPI_DATASIZE_8BIT) + { + hspi->RxISR = SPI_2linesRxISR_16BIT; + hspi->TxISR = SPI_2linesTxISR_16BIT; + } + else + { + hspi->RxISR = SPI_2linesRxISR_8BIT; + hspi->TxISR = SPI_2linesTxISR_8BIT; + } + +#if (USE_SPI_CRC != 0U) + /* Reset CRC Calculation */ + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + SPI_RESET_CRC(hspi); + } +#endif /* USE_SPI_CRC */ + + + /* Check if the SPI is already enabled */ + if ((hspi->Instance->CR1 & SPI_CR1_SPE) != SPI_CR1_SPE) + { + /* Enable SPI peripheral */ + __HAL_SPI_ENABLE(hspi); + } + + /* Process Unlocked */ + __HAL_UNLOCK(hspi); + /* Enable TXE, RXNE and ERR interrupt */ + __HAL_SPI_ENABLE_IT(hspi, (SPI_IT_TXE | SPI_IT_RXNE | SPI_IT_ERR)); + + return HAL_OK; +} + +/** + * @brief Transmit an amount of data in non-blocking mode with DMA. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @param pData pointer to data buffer + * @param Size amount of data to be sent + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SPI_Transmit_DMA(SPI_HandleTypeDef *hspi, const uint8_t *pData, uint16_t Size) +{ + + /* Check tx dma handle */ + assert_param(IS_SPI_DMA_HANDLE(hspi->hdmatx)); + + /* Check Direction parameter */ + assert_param(IS_SPI_DIRECTION_2LINES_OR_1LINE(hspi->Init.Direction)); + + if (hspi->State != HAL_SPI_STATE_READY) + { + return HAL_BUSY; + } + + if ((pData == NULL) || (Size == 0U)) + { + return HAL_ERROR; + } + + /* Process Locked */ + __HAL_LOCK(hspi); + + /* Set the transaction information */ + hspi->State = HAL_SPI_STATE_BUSY_TX; + hspi->ErrorCode = HAL_SPI_ERROR_NONE; + hspi->pTxBuffPtr = (const uint8_t *)pData; + hspi->TxXferSize = Size; + hspi->TxXferCount = Size; + + /* Init field not used in handle to zero */ + hspi->pRxBuffPtr = (uint8_t *)NULL; + hspi->TxISR = NULL; + hspi->RxISR = NULL; + hspi->RxXferSize = 0U; + hspi->RxXferCount = 0U; + + /* Configure communication direction : 1Line */ + if (hspi->Init.Direction == SPI_DIRECTION_1LINE) + { + /* Disable SPI Peripheral before set 1Line direction (BIDIOE bit) */ + __HAL_SPI_DISABLE(hspi); + SPI_1LINE_TX(hspi); + } + +#if (USE_SPI_CRC != 0U) + /* Reset CRC Calculation */ + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + SPI_RESET_CRC(hspi); + } +#endif /* USE_SPI_CRC */ + + /* Set the SPI TxDMA Half transfer complete callback */ + hspi->hdmatx->XferHalfCpltCallback = SPI_DMAHalfTransmitCplt; + + /* Set the SPI TxDMA transfer complete callback */ + hspi->hdmatx->XferCpltCallback = SPI_DMATransmitCplt; + + /* Set the DMA error callback */ + hspi->hdmatx->XferErrorCallback = SPI_DMAError; + + /* Set the DMA AbortCpltCallback */ + hspi->hdmatx->XferAbortCallback = NULL; + + /* Enable the Tx DMA Stream/Channel */ + if (HAL_OK != HAL_DMA_Start_IT(hspi->hdmatx, (uint32_t)hspi->pTxBuffPtr, (uint32_t)&hspi->Instance->DR, + hspi->TxXferCount)) + { + /* Update SPI error code */ + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_DMA); + /* Process Unlocked */ + __HAL_UNLOCK(hspi); + return HAL_ERROR; + } + + /* Check if the SPI is already enabled */ + if ((hspi->Instance->CR1 & SPI_CR1_SPE) != SPI_CR1_SPE) + { + /* Enable SPI peripheral */ + __HAL_SPI_ENABLE(hspi); + } + + /* Process Unlocked */ + __HAL_UNLOCK(hspi); + + /* Enable the SPI Error Interrupt Bit */ + __HAL_SPI_ENABLE_IT(hspi, (SPI_IT_ERR)); + + /* Enable Tx DMA Request */ + SET_BIT(hspi->Instance->CR2, SPI_CR2_TXDMAEN); + + return HAL_OK; +} + +/** + * @brief Receive an amount of data in non-blocking mode with DMA. + * @note In case of MASTER mode and SPI_DIRECTION_2LINES direction, hdmatx shall be defined. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @param pData pointer to data buffer + * @note When the CRC feature is enabled the pData Length must be Size + 1. + * @param Size amount of data to be sent + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SPI_Receive_DMA(SPI_HandleTypeDef *hspi, uint8_t *pData, uint16_t Size) +{ + /* Check rx dma handle */ + assert_param(IS_SPI_DMA_HANDLE(hspi->hdmarx)); + + if (hspi->State != HAL_SPI_STATE_READY) + { + return HAL_BUSY; + } + + if ((hspi->Init.Direction == SPI_DIRECTION_2LINES) && (hspi->Init.Mode == SPI_MODE_MASTER)) + { + hspi->State = HAL_SPI_STATE_BUSY_RX; + + /* Check tx dma handle */ + assert_param(IS_SPI_DMA_HANDLE(hspi->hdmatx)); + + /* Call transmit-receive function to send Dummy data on Tx line and generate clock on CLK line */ + return HAL_SPI_TransmitReceive_DMA(hspi, pData, pData, Size); + } + + if ((pData == NULL) || (Size == 0U)) + { + return HAL_ERROR; + } + + /* Process Locked */ + __HAL_LOCK(hspi); + + /* Set the transaction information */ + hspi->State = HAL_SPI_STATE_BUSY_RX; + hspi->ErrorCode = HAL_SPI_ERROR_NONE; + hspi->pRxBuffPtr = (uint8_t *)pData; + hspi->RxXferSize = Size; + hspi->RxXferCount = Size; + + /*Init field not used in handle to zero */ + hspi->RxISR = NULL; + hspi->TxISR = NULL; + hspi->TxXferSize = 0U; + hspi->TxXferCount = 0U; + + /* Configure communication direction : 1Line */ + if (hspi->Init.Direction == SPI_DIRECTION_1LINE) + { + /* Disable SPI Peripheral before set 1Line direction (BIDIOE bit) */ + __HAL_SPI_DISABLE(hspi); + SPI_1LINE_RX(hspi); + } + +#if (USE_SPI_CRC != 0U) + /* Reset CRC Calculation */ + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + SPI_RESET_CRC(hspi); + } +#endif /* USE_SPI_CRC */ + + /* Set the SPI RxDMA Half transfer complete callback */ + hspi->hdmarx->XferHalfCpltCallback = SPI_DMAHalfReceiveCplt; + + /* Set the SPI Rx DMA transfer complete callback */ + hspi->hdmarx->XferCpltCallback = SPI_DMAReceiveCplt; + + /* Set the DMA error callback */ + hspi->hdmarx->XferErrorCallback = SPI_DMAError; + + /* Set the DMA AbortCpltCallback */ + hspi->hdmarx->XferAbortCallback = NULL; + + /* Enable the Rx DMA Stream/Channel */ + if (HAL_OK != HAL_DMA_Start_IT(hspi->hdmarx, (uint32_t)&hspi->Instance->DR, (uint32_t)hspi->pRxBuffPtr, + hspi->RxXferCount)) + { + /* Update SPI error code */ + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_DMA); + /* Process Unlocked */ + __HAL_UNLOCK(hspi); + return HAL_ERROR; + } + + /* Check if the SPI is already enabled */ + if ((hspi->Instance->CR1 & SPI_CR1_SPE) != SPI_CR1_SPE) + { + /* Enable SPI peripheral */ + __HAL_SPI_ENABLE(hspi); + } + + /* Process Unlocked */ + __HAL_UNLOCK(hspi); + + /* Enable the SPI Error Interrupt Bit */ + __HAL_SPI_ENABLE_IT(hspi, (SPI_IT_ERR)); + + /* Enable Rx DMA Request */ + SET_BIT(hspi->Instance->CR2, SPI_CR2_RXDMAEN); + + return HAL_OK; +} + +/** + * @brief Transmit and Receive an amount of data in non-blocking mode with DMA. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @param pTxData pointer to transmission data buffer + * @param pRxData pointer to reception data buffer + * @note When the CRC feature is enabled the pRxData Length must be Size + 1 + * @param Size amount of data to be sent + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SPI_TransmitReceive_DMA(SPI_HandleTypeDef *hspi, const uint8_t *pTxData, uint8_t *pRxData, + uint16_t Size) +{ + uint32_t tmp_mode; + HAL_SPI_StateTypeDef tmp_state; + + /* Check rx & tx dma handles */ + assert_param(IS_SPI_DMA_HANDLE(hspi->hdmarx)); + assert_param(IS_SPI_DMA_HANDLE(hspi->hdmatx)); + + /* Check Direction parameter */ + assert_param(IS_SPI_DIRECTION_2LINES(hspi->Init.Direction)); + + /* Init temporary variables */ + tmp_state = hspi->State; + tmp_mode = hspi->Init.Mode; + + if (!((tmp_state == HAL_SPI_STATE_READY) || + ((tmp_mode == SPI_MODE_MASTER) && (hspi->Init.Direction == SPI_DIRECTION_2LINES) && (tmp_state == HAL_SPI_STATE_BUSY_RX)))) + { + return HAL_BUSY; + } + + if ((pTxData == NULL) || (pRxData == NULL) || (Size == 0U)) + { + return HAL_ERROR; + } + + /* Process locked */ + __HAL_LOCK(hspi); + + /* Don't overwrite in case of HAL_SPI_STATE_BUSY_RX */ + if (hspi->State != HAL_SPI_STATE_BUSY_RX) + { + hspi->State = HAL_SPI_STATE_BUSY_TX_RX; + } + + /* Set the transaction information */ + hspi->ErrorCode = HAL_SPI_ERROR_NONE; + hspi->pTxBuffPtr = (const uint8_t *)pTxData; + hspi->TxXferSize = Size; + hspi->TxXferCount = Size; + hspi->pRxBuffPtr = (uint8_t *)pRxData; + hspi->RxXferSize = Size; + hspi->RxXferCount = Size; + + /* Init field not used in handle to zero */ + hspi->RxISR = NULL; + hspi->TxISR = NULL; + +#if (USE_SPI_CRC != 0U) + /* Reset CRC Calculation */ + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + SPI_RESET_CRC(hspi); + } +#endif /* USE_SPI_CRC */ + + /* Check if we are in Rx only or in Rx/Tx Mode and configure the DMA transfer complete callback */ + if (hspi->State == HAL_SPI_STATE_BUSY_RX) + { + /* Set the SPI Rx DMA Half transfer complete callback */ + hspi->hdmarx->XferHalfCpltCallback = SPI_DMAHalfReceiveCplt; + hspi->hdmarx->XferCpltCallback = SPI_DMAReceiveCplt; + } + else + { + /* Set the SPI Tx/Rx DMA Half transfer complete callback */ + hspi->hdmarx->XferHalfCpltCallback = SPI_DMAHalfTransmitReceiveCplt; + hspi->hdmarx->XferCpltCallback = SPI_DMATransmitReceiveCplt; + } + + /* Set the DMA error callback */ + hspi->hdmarx->XferErrorCallback = SPI_DMAError; + + /* Set the DMA AbortCpltCallback */ + hspi->hdmarx->XferAbortCallback = NULL; + + /* Enable the Rx DMA Stream/Channel */ + if (HAL_OK != HAL_DMA_Start_IT(hspi->hdmarx, (uint32_t)&hspi->Instance->DR, (uint32_t)hspi->pRxBuffPtr, + hspi->RxXferCount)) + { + /* Update SPI error code */ + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_DMA); + /* Process Unlocked */ + __HAL_UNLOCK(hspi); + return HAL_ERROR; + } + + /* Enable Rx DMA Request */ + SET_BIT(hspi->Instance->CR2, SPI_CR2_RXDMAEN); + + /* Set the SPI Tx DMA transfer complete callback as NULL because the communication closing + is performed in DMA reception complete callback */ + hspi->hdmatx->XferHalfCpltCallback = NULL; + hspi->hdmatx->XferCpltCallback = NULL; + hspi->hdmatx->XferErrorCallback = NULL; + hspi->hdmatx->XferAbortCallback = NULL; + + /* Enable the Tx DMA Stream/Channel */ + if (HAL_OK != HAL_DMA_Start_IT(hspi->hdmatx, (uint32_t)hspi->pTxBuffPtr, (uint32_t)&hspi->Instance->DR, + hspi->TxXferCount)) + { + /* Update SPI error code */ + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_DMA); + /* Process Unlocked */ + __HAL_UNLOCK(hspi); + return HAL_ERROR; + } + + /* Check if the SPI is already enabled */ + if ((hspi->Instance->CR1 & SPI_CR1_SPE) != SPI_CR1_SPE) + { + /* Enable SPI peripheral */ + __HAL_SPI_ENABLE(hspi); + } + + /* Process Unlocked */ + __HAL_UNLOCK(hspi); + + /* Enable the SPI Error Interrupt Bit */ + __HAL_SPI_ENABLE_IT(hspi, (SPI_IT_ERR)); + + /* Enable Tx DMA Request */ + SET_BIT(hspi->Instance->CR2, SPI_CR2_TXDMAEN); + + return HAL_OK; +} + +/** + * @brief Abort ongoing transfer (blocking mode). + * @param hspi SPI handle. + * @note This procedure could be used for aborting any ongoing transfer (Tx and Rx), + * started in Interrupt or DMA mode. + * This procedure performs following operations : + * - Disable SPI Interrupts (depending of transfer direction) + * - Disable the DMA transfer in the peripheral register (if enabled) + * - Abort DMA transfer by calling HAL_DMA_Abort (in case of transfer in DMA mode) + * - Set handle State to READY + * @note This procedure is executed in blocking mode : when exiting function, Abort is considered as completed. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SPI_Abort(SPI_HandleTypeDef *hspi) +{ + HAL_StatusTypeDef errorcode; + __IO uint32_t count; + __IO uint32_t resetcount; + + /* Initialized local variable */ + errorcode = HAL_OK; + resetcount = SPI_DEFAULT_TIMEOUT * (SystemCoreClock / 24U / 1000U); + count = resetcount; + + /* Clear ERRIE interrupt to avoid error interrupts generation during Abort procedure */ + CLEAR_BIT(hspi->Instance->CR2, SPI_CR2_ERRIE); + + /* Disable TXEIE, RXNEIE and ERRIE(mode fault event, overrun error, TI frame error) interrupts */ + if (HAL_IS_BIT_SET(hspi->Instance->CR2, SPI_CR2_TXEIE)) + { + hspi->TxISR = SPI_AbortTx_ISR; + /* Wait HAL_SPI_STATE_ABORT state */ + do + { + if (count == 0U) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_ABORT); + break; + } + count--; + } while (hspi->State != HAL_SPI_STATE_ABORT); + /* Reset Timeout Counter */ + count = resetcount; + } + + if (HAL_IS_BIT_SET(hspi->Instance->CR2, SPI_CR2_RXNEIE)) + { + hspi->RxISR = SPI_AbortRx_ISR; + /* Wait HAL_SPI_STATE_ABORT state */ + do + { + if (count == 0U) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_ABORT); + break; + } + count--; + } while (hspi->State != HAL_SPI_STATE_ABORT); + /* Reset Timeout Counter */ + count = resetcount; + } + + /* Disable the SPI DMA Tx request if enabled */ + if (HAL_IS_BIT_SET(hspi->Instance->CR2, SPI_CR2_TXDMAEN)) + { + /* Abort the SPI DMA Tx Stream/Channel : use blocking DMA Abort API (no callback) */ + if (hspi->hdmatx != NULL) + { + /* Set the SPI DMA Abort callback : + will lead to call HAL_SPI_AbortCpltCallback() at end of DMA abort procedure */ + hspi->hdmatx->XferAbortCallback = NULL; + + /* Abort DMA Tx Handle linked to SPI Peripheral */ + if (HAL_DMA_Abort(hspi->hdmatx) != HAL_OK) + { + hspi->ErrorCode = HAL_SPI_ERROR_ABORT; + } + + /* Disable Tx DMA Request */ + CLEAR_BIT(hspi->Instance->CR2, (SPI_CR2_TXDMAEN)); + + /* Wait until TXE flag is set */ + do + { + if (count == 0U) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_ABORT); + break; + } + count--; + } while ((hspi->Instance->SR & SPI_FLAG_TXE) == RESET); + } + } + + /* Disable the SPI DMA Rx request if enabled */ + if (HAL_IS_BIT_SET(hspi->Instance->CR2, SPI_CR2_RXDMAEN)) + { + /* Abort the SPI DMA Rx Stream/Channel : use blocking DMA Abort API (no callback) */ + if (hspi->hdmarx != NULL) + { + /* Set the SPI DMA Abort callback : + will lead to call HAL_SPI_AbortCpltCallback() at end of DMA abort procedure */ + hspi->hdmarx->XferAbortCallback = NULL; + + /* Abort DMA Rx Handle linked to SPI Peripheral */ + if (HAL_DMA_Abort(hspi->hdmarx) != HAL_OK) + { + hspi->ErrorCode = HAL_SPI_ERROR_ABORT; + } + + /* Disable peripheral */ + __HAL_SPI_DISABLE(hspi); + + /* Disable Rx DMA Request */ + CLEAR_BIT(hspi->Instance->CR2, (SPI_CR2_RXDMAEN)); + } + } + /* Reset Tx and Rx transfer counters */ + hspi->RxXferCount = 0U; + hspi->TxXferCount = 0U; + + /* Check error during Abort procedure */ + if (hspi->ErrorCode == HAL_SPI_ERROR_ABORT) + { + /* return HAL_Error in case of error during Abort procedure */ + errorcode = HAL_ERROR; + } + else + { + /* Reset errorCode */ + hspi->ErrorCode = HAL_SPI_ERROR_NONE; + } + + /* Clear the Error flags in the SR register */ + __HAL_SPI_CLEAR_OVRFLAG(hspi); + __HAL_SPI_CLEAR_FREFLAG(hspi); + + /* Restore hspi->state to ready */ + hspi->State = HAL_SPI_STATE_READY; + + return errorcode; +} + +/** + * @brief Abort ongoing transfer (Interrupt mode). + * @param hspi SPI handle. + * @note This procedure could be used for aborting any ongoing transfer (Tx and Rx), + * started in Interrupt or DMA mode. + * This procedure performs following operations : + * - Disable SPI Interrupts (depending of transfer direction) + * - Disable the DMA transfer in the peripheral register (if enabled) + * - Abort DMA transfer by calling HAL_DMA_Abort_IT (in case of transfer in DMA mode) + * - Set handle State to READY + * - At abort completion, call user abort complete callback + * @note This procedure is executed in Interrupt mode, meaning that abort procedure could be + * considered as completed only when user abort complete callback is executed (not when exiting function). + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SPI_Abort_IT(SPI_HandleTypeDef *hspi) +{ + HAL_StatusTypeDef errorcode; + uint32_t abortcplt ; + __IO uint32_t count; + __IO uint32_t resetcount; + + /* Initialized local variable */ + errorcode = HAL_OK; + abortcplt = 1U; + resetcount = SPI_DEFAULT_TIMEOUT * (SystemCoreClock / 24U / 1000U); + count = resetcount; + + /* Clear ERRIE interrupt to avoid error interrupts generation during Abort procedure */ + CLEAR_BIT(hspi->Instance->CR2, SPI_CR2_ERRIE); + + /* Change Rx and Tx Irq Handler to Disable TXEIE, RXNEIE and ERRIE interrupts */ + if (HAL_IS_BIT_SET(hspi->Instance->CR2, SPI_CR2_TXEIE)) + { + hspi->TxISR = SPI_AbortTx_ISR; + /* Wait HAL_SPI_STATE_ABORT state */ + do + { + if (count == 0U) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_ABORT); + break; + } + count--; + } while (hspi->State != HAL_SPI_STATE_ABORT); + /* Reset Timeout Counter */ + count = resetcount; + } + + if (HAL_IS_BIT_SET(hspi->Instance->CR2, SPI_CR2_RXNEIE)) + { + hspi->RxISR = SPI_AbortRx_ISR; + /* Wait HAL_SPI_STATE_ABORT state */ + do + { + if (count == 0U) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_ABORT); + break; + } + count--; + } while (hspi->State != HAL_SPI_STATE_ABORT); + /* Reset Timeout Counter */ + count = resetcount; + } + + /* If DMA Tx and/or DMA Rx Handles are associated to SPI Handle, DMA Abort complete callbacks should be initialised + before any call to DMA Abort functions */ + /* DMA Tx Handle is valid */ + if (hspi->hdmatx != NULL) + { + /* Set DMA Abort Complete callback if UART DMA Tx request if enabled. + Otherwise, set it to NULL */ + if (HAL_IS_BIT_SET(hspi->Instance->CR2, SPI_CR2_TXDMAEN)) + { + hspi->hdmatx->XferAbortCallback = SPI_DMATxAbortCallback; + } + else + { + hspi->hdmatx->XferAbortCallback = NULL; + } + } + /* DMA Rx Handle is valid */ + if (hspi->hdmarx != NULL) + { + /* Set DMA Abort Complete callback if UART DMA Rx request if enabled. + Otherwise, set it to NULL */ + if (HAL_IS_BIT_SET(hspi->Instance->CR2, SPI_CR2_RXDMAEN)) + { + hspi->hdmarx->XferAbortCallback = SPI_DMARxAbortCallback; + } + else + { + hspi->hdmarx->XferAbortCallback = NULL; + } + } + + /* Disable the SPI DMA Tx request if enabled */ + if (HAL_IS_BIT_SET(hspi->Instance->CR2, SPI_CR2_TXDMAEN)) + { + /* Abort the SPI DMA Tx Stream/Channel */ + if (hspi->hdmatx != NULL) + { + /* Abort DMA Tx Handle linked to SPI Peripheral */ + if (HAL_DMA_Abort_IT(hspi->hdmatx) != HAL_OK) + { + hspi->hdmatx->XferAbortCallback = NULL; + hspi->ErrorCode = HAL_SPI_ERROR_ABORT; + } + else + { + abortcplt = 0U; + } + } + } + /* Disable the SPI DMA Rx request if enabled */ + if (HAL_IS_BIT_SET(hspi->Instance->CR2, SPI_CR2_RXDMAEN)) + { + /* Abort the SPI DMA Rx Stream/Channel */ + if (hspi->hdmarx != NULL) + { + /* Abort DMA Rx Handle linked to SPI Peripheral */ + if (HAL_DMA_Abort_IT(hspi->hdmarx) != HAL_OK) + { + hspi->hdmarx->XferAbortCallback = NULL; + hspi->ErrorCode = HAL_SPI_ERROR_ABORT; + } + else + { + abortcplt = 0U; + } + } + } + + if (abortcplt == 1U) + { + /* Reset Tx and Rx transfer counters */ + hspi->RxXferCount = 0U; + hspi->TxXferCount = 0U; + + /* Check error during Abort procedure */ + if (hspi->ErrorCode == HAL_SPI_ERROR_ABORT) + { + /* return HAL_Error in case of error during Abort procedure */ + errorcode = HAL_ERROR; + } + else + { + /* Reset errorCode */ + hspi->ErrorCode = HAL_SPI_ERROR_NONE; + } + + /* Clear the Error flags in the SR register */ + __HAL_SPI_CLEAR_OVRFLAG(hspi); + __HAL_SPI_CLEAR_FREFLAG(hspi); + + /* Restore hspi->State to Ready */ + hspi->State = HAL_SPI_STATE_READY; + + /* As no DMA to be aborted, call directly user Abort complete callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->AbortCpltCallback(hspi); +#else + HAL_SPI_AbortCpltCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ + } + + return errorcode; +} + +/** + * @brief Pause the DMA Transfer. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for the specified SPI module. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SPI_DMAPause(SPI_HandleTypeDef *hspi) +{ + /* Process Locked */ + __HAL_LOCK(hspi); + + /* Disable the SPI DMA Tx & Rx requests */ + CLEAR_BIT(hspi->Instance->CR2, SPI_CR2_TXDMAEN | SPI_CR2_RXDMAEN); + + /* Process Unlocked */ + __HAL_UNLOCK(hspi); + + return HAL_OK; +} + +/** + * @brief Resume the DMA Transfer. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for the specified SPI module. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SPI_DMAResume(SPI_HandleTypeDef *hspi) +{ + /* Process Locked */ + __HAL_LOCK(hspi); + + /* Enable the SPI DMA Tx & Rx requests */ + SET_BIT(hspi->Instance->CR2, SPI_CR2_TXDMAEN | SPI_CR2_RXDMAEN); + + /* Process Unlocked */ + __HAL_UNLOCK(hspi); + + return HAL_OK; +} + +/** + * @brief Stop the DMA Transfer. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for the specified SPI module. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SPI_DMAStop(SPI_HandleTypeDef *hspi) +{ + HAL_StatusTypeDef errorcode = HAL_OK; + /* The Lock is not implemented on this API to allow the user application + to call the HAL SPI API under callbacks HAL_SPI_TxCpltCallback() or HAL_SPI_RxCpltCallback() or HAL_SPI_TxRxCpltCallback(): + when calling HAL_DMA_Abort() API the DMA TX/RX Transfer complete interrupt is generated + and the correspond call back is executed HAL_SPI_TxCpltCallback() or HAL_SPI_RxCpltCallback() or HAL_SPI_TxRxCpltCallback() + */ + + /* Abort the SPI DMA tx Stream/Channel */ + if (hspi->hdmatx != NULL) + { + if (HAL_OK != HAL_DMA_Abort(hspi->hdmatx)) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_DMA); + errorcode = HAL_ERROR; + } + } + /* Abort the SPI DMA rx Stream/Channel */ + if (hspi->hdmarx != NULL) + { + if (HAL_OK != HAL_DMA_Abort(hspi->hdmarx)) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_DMA); + errorcode = HAL_ERROR; + } + } + + /* Disable the SPI DMA Tx & Rx requests */ + CLEAR_BIT(hspi->Instance->CR2, SPI_CR2_TXDMAEN | SPI_CR2_RXDMAEN); + hspi->State = HAL_SPI_STATE_READY; + return errorcode; +} + +/** + * @brief Handle SPI interrupt request. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for the specified SPI module. + * @retval None + */ +void HAL_SPI_IRQHandler(SPI_HandleTypeDef *hspi) +{ + uint32_t itsource = hspi->Instance->CR2; + uint32_t itflag = hspi->Instance->SR; + + /* SPI in mode Receiver ----------------------------------------------------*/ + if ((SPI_CHECK_FLAG(itflag, SPI_FLAG_OVR) == RESET) && + (SPI_CHECK_FLAG(itflag, SPI_FLAG_RXNE) != RESET) && (SPI_CHECK_IT_SOURCE(itsource, SPI_IT_RXNE) != RESET)) + { + hspi->RxISR(hspi); + return; + } + + /* SPI in mode Transmitter -------------------------------------------------*/ + if ((SPI_CHECK_FLAG(itflag, SPI_FLAG_TXE) != RESET) && (SPI_CHECK_IT_SOURCE(itsource, SPI_IT_TXE) != RESET)) + { + hspi->TxISR(hspi); + return; + } + + /* SPI in Error Treatment --------------------------------------------------*/ + if (((SPI_CHECK_FLAG(itflag, SPI_FLAG_MODF) != RESET) || (SPI_CHECK_FLAG(itflag, SPI_FLAG_OVR) != RESET) + || (SPI_CHECK_FLAG(itflag, SPI_FLAG_FRE) != RESET)) && (SPI_CHECK_IT_SOURCE(itsource, SPI_IT_ERR) != RESET)) + { + /* SPI Overrun error interrupt occurred ----------------------------------*/ + if (SPI_CHECK_FLAG(itflag, SPI_FLAG_OVR) != RESET) + { + if (hspi->State != HAL_SPI_STATE_BUSY_TX) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_OVR); + __HAL_SPI_CLEAR_OVRFLAG(hspi); + } + else + { + __HAL_SPI_CLEAR_OVRFLAG(hspi); + return; + } + } + + /* SPI Mode Fault error interrupt occurred -------------------------------*/ + if (SPI_CHECK_FLAG(itflag, SPI_FLAG_MODF) != RESET) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_MODF); + __HAL_SPI_CLEAR_MODFFLAG(hspi); + } + + /* SPI Frame error interrupt occurred ------------------------------------*/ + if (SPI_CHECK_FLAG(itflag, SPI_FLAG_FRE) != RESET) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_FRE); + __HAL_SPI_CLEAR_FREFLAG(hspi); + } + + if (hspi->ErrorCode != HAL_SPI_ERROR_NONE) + { + /* Disable all interrupts */ + __HAL_SPI_DISABLE_IT(hspi, SPI_IT_RXNE | SPI_IT_TXE | SPI_IT_ERR); + + hspi->State = HAL_SPI_STATE_READY; + /* Disable the SPI DMA requests if enabled */ + if ((HAL_IS_BIT_SET(itsource, SPI_CR2_TXDMAEN)) || (HAL_IS_BIT_SET(itsource, SPI_CR2_RXDMAEN))) + { + CLEAR_BIT(hspi->Instance->CR2, (SPI_CR2_TXDMAEN | SPI_CR2_RXDMAEN)); + + /* Abort the SPI DMA Rx channel */ + if (hspi->hdmarx != NULL) + { + /* Set the SPI DMA Abort callback : + will lead to call HAL_SPI_ErrorCallback() at end of DMA abort procedure */ + hspi->hdmarx->XferAbortCallback = SPI_DMAAbortOnError; + if (HAL_OK != HAL_DMA_Abort_IT(hspi->hdmarx)) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_ABORT); + } + } + /* Abort the SPI DMA Tx channel */ + if (hspi->hdmatx != NULL) + { + /* Set the SPI DMA Abort callback : + will lead to call HAL_SPI_ErrorCallback() at end of DMA abort procedure */ + hspi->hdmatx->XferAbortCallback = SPI_DMAAbortOnError; + if (HAL_OK != HAL_DMA_Abort_IT(hspi->hdmatx)) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_ABORT); + } + } + } + else + { + /* Call user error callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->ErrorCallback(hspi); +#else + HAL_SPI_ErrorCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ + } + } + return; + } +} + +/** + * @brief Tx Transfer completed callback. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +__weak void HAL_SPI_TxCpltCallback(SPI_HandleTypeDef *hspi) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hspi); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_SPI_TxCpltCallback should be implemented in the user file + */ +} + +/** + * @brief Rx Transfer completed callback. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +__weak void HAL_SPI_RxCpltCallback(SPI_HandleTypeDef *hspi) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hspi); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_SPI_RxCpltCallback should be implemented in the user file + */ +} + +/** + * @brief Tx and Rx Transfer completed callback. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +__weak void HAL_SPI_TxRxCpltCallback(SPI_HandleTypeDef *hspi) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hspi); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_SPI_TxRxCpltCallback should be implemented in the user file + */ +} + +/** + * @brief Tx Half Transfer completed callback. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +__weak void HAL_SPI_TxHalfCpltCallback(SPI_HandleTypeDef *hspi) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hspi); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_SPI_TxHalfCpltCallback should be implemented in the user file + */ +} + +/** + * @brief Rx Half Transfer completed callback. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +__weak void HAL_SPI_RxHalfCpltCallback(SPI_HandleTypeDef *hspi) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hspi); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_SPI_RxHalfCpltCallback() should be implemented in the user file + */ +} + +/** + * @brief Tx and Rx Half Transfer callback. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +__weak void HAL_SPI_TxRxHalfCpltCallback(SPI_HandleTypeDef *hspi) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hspi); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_SPI_TxRxHalfCpltCallback() should be implemented in the user file + */ +} + +/** + * @brief SPI error callback. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +__weak void HAL_SPI_ErrorCallback(SPI_HandleTypeDef *hspi) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hspi); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_SPI_ErrorCallback should be implemented in the user file + */ + /* NOTE : The ErrorCode parameter in the hspi handle is updated by the SPI processes + and user can use HAL_SPI_GetError() API to check the latest error occurred + */ +} + +/** + * @brief SPI Abort Complete callback. + * @param hspi SPI handle. + * @retval None + */ +__weak void HAL_SPI_AbortCpltCallback(SPI_HandleTypeDef *hspi) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hspi); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_SPI_AbortCpltCallback can be implemented in the user file. + */ +} + +/** + * @} + */ + +/** @defgroup SPI_Exported_Functions_Group3 Peripheral State and Errors functions + * @brief SPI control functions + * +@verbatim + =============================================================================== + ##### Peripheral State and Errors functions ##### + =============================================================================== + [..] + This subsection provides a set of functions allowing to control the SPI. + (+) HAL_SPI_GetState() API can be helpful to check in run-time the state of the SPI peripheral + (+) HAL_SPI_GetError() check in run-time Errors occurring during communication +@endverbatim + * @{ + */ + +/** + * @brief Return the SPI handle state. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval SPI state + */ +HAL_SPI_StateTypeDef HAL_SPI_GetState(const SPI_HandleTypeDef *hspi) +{ + /* Return SPI handle state */ + return hspi->State; +} + +/** + * @brief Return the SPI error code. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval SPI error code in bitmap format + */ +uint32_t HAL_SPI_GetError(const SPI_HandleTypeDef *hspi) +{ + /* Return SPI ErrorCode */ + return hspi->ErrorCode; +} + +/** + * @} + */ + +/** + * @} + */ + +/** @addtogroup SPI_Private_Functions + * @brief Private functions + * @{ + */ + +/** + * @brief DMA SPI transmit process complete callback. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA module. + * @retval None + */ +static void SPI_DMATransmitCplt(DMA_HandleTypeDef *hdma) +{ + SPI_HandleTypeDef *hspi = (SPI_HandleTypeDef *)(((DMA_HandleTypeDef *)hdma)->Parent); /* Derogation MISRAC2012-Rule-11.5 */ + uint32_t tickstart; + + /* Init tickstart for timeout management*/ + tickstart = HAL_GetTick(); + + /* DMA Normal Mode */ + if ((hdma->Instance->CR & DMA_SxCR_CIRC) != DMA_SxCR_CIRC) + { + /* Disable ERR interrupt */ + __HAL_SPI_DISABLE_IT(hspi, SPI_IT_ERR); + + /* Disable Tx DMA Request */ + CLEAR_BIT(hspi->Instance->CR2, SPI_CR2_TXDMAEN); + + /* Check the end of the transaction */ + if (SPI_EndRxTxTransaction(hspi, SPI_DEFAULT_TIMEOUT, tickstart) != HAL_OK) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_FLAG); + } + + /* Clear overrun flag in 2 Lines communication mode because received data is not read */ + if (hspi->Init.Direction == SPI_DIRECTION_2LINES) + { + __HAL_SPI_CLEAR_OVRFLAG(hspi); + } + + hspi->TxXferCount = 0U; + hspi->State = HAL_SPI_STATE_READY; + + if (hspi->ErrorCode != HAL_SPI_ERROR_NONE) + { + /* Call user error callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->ErrorCallback(hspi); +#else + HAL_SPI_ErrorCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ + return; + } + } + /* Call user Tx complete callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->TxCpltCallback(hspi); +#else + HAL_SPI_TxCpltCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ +} + +/** + * @brief DMA SPI receive process complete callback. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA module. + * @retval None + */ +static void SPI_DMAReceiveCplt(DMA_HandleTypeDef *hdma) +{ + SPI_HandleTypeDef *hspi = (SPI_HandleTypeDef *)(((DMA_HandleTypeDef *)hdma)->Parent); /* Derogation MISRAC2012-Rule-11.5 */ + uint32_t tickstart; +#if (USE_SPI_CRC != 0U) + __IO uint32_t tmpreg = 0U; +#endif /* USE_SPI_CRC */ + + /* Init tickstart for timeout management*/ + tickstart = HAL_GetTick(); + + /* DMA Normal Mode */ + if ((hdma->Instance->CR & DMA_SxCR_CIRC) != DMA_SxCR_CIRC) + { + /* Disable ERR interrupt */ + __HAL_SPI_DISABLE_IT(hspi, SPI_IT_ERR); + +#if (USE_SPI_CRC != 0U) + /* CRC handling */ + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + /* Wait until RXNE flag */ + if (SPI_WaitFlagStateUntilTimeout(hspi, SPI_FLAG_RXNE, SET, SPI_DEFAULT_TIMEOUT, tickstart) != HAL_OK) + { + /* Error on the CRC reception */ + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_CRC); + } + /* Read CRC */ + tmpreg = READ_REG(hspi->Instance->DR); + /* To avoid GCC warning */ + UNUSED(tmpreg); + } +#endif /* USE_SPI_CRC */ + + /* Check if we are in Master RX 2 line mode */ + if ((hspi->Init.Direction == SPI_DIRECTION_2LINES) && (hspi->Init.Mode == SPI_MODE_MASTER)) + { + /* Disable Rx/Tx DMA Request (done by default to handle the case master rx direction 2 lines) */ + CLEAR_BIT(hspi->Instance->CR2, SPI_CR2_TXDMAEN | SPI_CR2_RXDMAEN); + } + else + { + /* Normal case */ + CLEAR_BIT(hspi->Instance->CR2, SPI_CR2_RXDMAEN); + } + + /* Check the end of the transaction */ + if (SPI_EndRxTransaction(hspi, SPI_DEFAULT_TIMEOUT, tickstart) != HAL_OK) + { + hspi->ErrorCode = HAL_SPI_ERROR_FLAG; + } + + hspi->RxXferCount = 0U; + hspi->State = HAL_SPI_STATE_READY; + +#if (USE_SPI_CRC != 0U) + /* Check if CRC error occurred */ + if (__HAL_SPI_GET_FLAG(hspi, SPI_FLAG_CRCERR)) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_CRC); + __HAL_SPI_CLEAR_CRCERRFLAG(hspi); + } +#endif /* USE_SPI_CRC */ + + if (hspi->ErrorCode != HAL_SPI_ERROR_NONE) + { + /* Call user error callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->ErrorCallback(hspi); +#else + HAL_SPI_ErrorCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ + return; + } + } + /* Call user Rx complete callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->RxCpltCallback(hspi); +#else + HAL_SPI_RxCpltCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ +} + +/** + * @brief DMA SPI transmit receive process complete callback. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA module. + * @retval None + */ +static void SPI_DMATransmitReceiveCplt(DMA_HandleTypeDef *hdma) +{ + SPI_HandleTypeDef *hspi = (SPI_HandleTypeDef *)(((DMA_HandleTypeDef *)hdma)->Parent); /* Derogation MISRAC2012-Rule-11.5 */ + uint32_t tickstart; +#if (USE_SPI_CRC != 0U) + __IO uint32_t tmpreg = 0U; +#endif /* USE_SPI_CRC */ + + /* Init tickstart for timeout management*/ + tickstart = HAL_GetTick(); + + /* DMA Normal Mode */ + if ((hdma->Instance->CR & DMA_SxCR_CIRC) != DMA_SxCR_CIRC) + { + /* Disable ERR interrupt */ + __HAL_SPI_DISABLE_IT(hspi, SPI_IT_ERR); + +#if (USE_SPI_CRC != 0U) + /* CRC handling */ + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + /* Wait the CRC data */ + if (SPI_WaitFlagStateUntilTimeout(hspi, SPI_FLAG_RXNE, SET, SPI_DEFAULT_TIMEOUT, tickstart) != HAL_OK) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_CRC); + } + /* Read CRC to Flush DR and RXNE flag */ + tmpreg = READ_REG(hspi->Instance->DR); + /* To avoid GCC warning */ + UNUSED(tmpreg); + } +#endif /* USE_SPI_CRC */ + + /* Check the end of the transaction */ + if (SPI_EndRxTxTransaction(hspi, SPI_DEFAULT_TIMEOUT, tickstart) != HAL_OK) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_FLAG); + } + + /* Disable Rx/Tx DMA Request */ + CLEAR_BIT(hspi->Instance->CR2, SPI_CR2_TXDMAEN | SPI_CR2_RXDMAEN); + + hspi->TxXferCount = 0U; + hspi->RxXferCount = 0U; + hspi->State = HAL_SPI_STATE_READY; + +#if (USE_SPI_CRC != 0U) + /* Check if CRC error occurred */ + if (__HAL_SPI_GET_FLAG(hspi, SPI_FLAG_CRCERR)) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_CRC); + __HAL_SPI_CLEAR_CRCERRFLAG(hspi); + } +#endif /* USE_SPI_CRC */ + + if (hspi->ErrorCode != HAL_SPI_ERROR_NONE) + { + /* Call user error callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->ErrorCallback(hspi); +#else + HAL_SPI_ErrorCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ + return; + } + } + /* Call user TxRx complete callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->TxRxCpltCallback(hspi); +#else + HAL_SPI_TxRxCpltCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ +} + +/** + * @brief DMA SPI half transmit process complete callback. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA module. + * @retval None + */ +static void SPI_DMAHalfTransmitCplt(DMA_HandleTypeDef *hdma) +{ + SPI_HandleTypeDef *hspi = (SPI_HandleTypeDef *)(((DMA_HandleTypeDef *)hdma)->Parent); /* Derogation MISRAC2012-Rule-11.5 */ + + /* Call user Tx half complete callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->TxHalfCpltCallback(hspi); +#else + HAL_SPI_TxHalfCpltCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ +} + +/** + * @brief DMA SPI half receive process complete callback + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA module. + * @retval None + */ +static void SPI_DMAHalfReceiveCplt(DMA_HandleTypeDef *hdma) +{ + SPI_HandleTypeDef *hspi = (SPI_HandleTypeDef *)(((DMA_HandleTypeDef *)hdma)->Parent); /* Derogation MISRAC2012-Rule-11.5 */ + + /* Call user Rx half complete callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->RxHalfCpltCallback(hspi); +#else + HAL_SPI_RxHalfCpltCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ +} + +/** + * @brief DMA SPI half transmit receive process complete callback. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA module. + * @retval None + */ +static void SPI_DMAHalfTransmitReceiveCplt(DMA_HandleTypeDef *hdma) +{ + SPI_HandleTypeDef *hspi = (SPI_HandleTypeDef *)(((DMA_HandleTypeDef *)hdma)->Parent); /* Derogation MISRAC2012-Rule-11.5 */ + + /* Call user TxRx half complete callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->TxRxHalfCpltCallback(hspi); +#else + HAL_SPI_TxRxHalfCpltCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ +} + +/** + * @brief DMA SPI communication error callback. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA module. + * @retval None + */ +static void SPI_DMAError(DMA_HandleTypeDef *hdma) +{ + SPI_HandleTypeDef *hspi = (SPI_HandleTypeDef *)(((DMA_HandleTypeDef *)hdma)->Parent); /* Derogation MISRAC2012-Rule-11.5 */ + + /* Stop the disable DMA transfer on SPI side */ + CLEAR_BIT(hspi->Instance->CR2, SPI_CR2_TXDMAEN | SPI_CR2_RXDMAEN); + + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_DMA); + hspi->State = HAL_SPI_STATE_READY; + /* Call user error callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->ErrorCallback(hspi); +#else + HAL_SPI_ErrorCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ +} + +/** + * @brief DMA SPI communication abort callback, when initiated by HAL services on Error + * (To be called at end of DMA Abort procedure following error occurrence). + * @param hdma DMA handle. + * @retval None + */ +static void SPI_DMAAbortOnError(DMA_HandleTypeDef *hdma) +{ + SPI_HandleTypeDef *hspi = (SPI_HandleTypeDef *)(((DMA_HandleTypeDef *)hdma)->Parent); /* Derogation MISRAC2012-Rule-11.5 */ + hspi->RxXferCount = 0U; + hspi->TxXferCount = 0U; + + /* Call user error callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->ErrorCallback(hspi); +#else + HAL_SPI_ErrorCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ +} + +/** + * @brief DMA SPI Tx communication abort callback, when initiated by user + * (To be called at end of DMA Tx Abort procedure following user abort request). + * @note When this callback is executed, User Abort complete call back is called only if no + * Abort still ongoing for Rx DMA Handle. + * @param hdma DMA handle. + * @retval None + */ +static void SPI_DMATxAbortCallback(DMA_HandleTypeDef *hdma) +{ + SPI_HandleTypeDef *hspi = (SPI_HandleTypeDef *)(((DMA_HandleTypeDef *)hdma)->Parent); /* Derogation MISRAC2012-Rule-11.5 */ + __IO uint32_t count; + + hspi->hdmatx->XferAbortCallback = NULL; + count = SPI_DEFAULT_TIMEOUT * (SystemCoreClock / 24U / 1000U); + + /* Disable Tx DMA Request */ + CLEAR_BIT(hspi->Instance->CR2, SPI_CR2_TXDMAEN); + + /* Wait until TXE flag is set */ + do + { + if (count == 0U) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_ABORT); + break; + } + count--; + } while ((hspi->Instance->SR & SPI_FLAG_TXE) == RESET); + + /* Check if an Abort process is still ongoing */ + if (hspi->hdmarx != NULL) + { + if (hspi->hdmarx->XferAbortCallback != NULL) + { + return; + } + } + + /* No Abort process still ongoing : All DMA Stream/Channel are aborted, call user Abort Complete callback */ + hspi->RxXferCount = 0U; + hspi->TxXferCount = 0U; + + /* Check no error during Abort procedure */ + if (hspi->ErrorCode != HAL_SPI_ERROR_ABORT) + { + /* Reset errorCode */ + hspi->ErrorCode = HAL_SPI_ERROR_NONE; + } + + /* Clear the Error flags in the SR register */ + __HAL_SPI_CLEAR_OVRFLAG(hspi); + __HAL_SPI_CLEAR_FREFLAG(hspi); + + /* Restore hspi->State to Ready */ + hspi->State = HAL_SPI_STATE_READY; + + /* Call user Abort complete callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->AbortCpltCallback(hspi); +#else + HAL_SPI_AbortCpltCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ +} + +/** + * @brief DMA SPI Rx communication abort callback, when initiated by user + * (To be called at end of DMA Rx Abort procedure following user abort request). + * @note When this callback is executed, User Abort complete call back is called only if no + * Abort still ongoing for Tx DMA Handle. + * @param hdma DMA handle. + * @retval None + */ +static void SPI_DMARxAbortCallback(DMA_HandleTypeDef *hdma) +{ + SPI_HandleTypeDef *hspi = (SPI_HandleTypeDef *)(((DMA_HandleTypeDef *)hdma)->Parent); /* Derogation MISRAC2012-Rule-11.5 */ + + /* Disable SPI Peripheral */ + __HAL_SPI_DISABLE(hspi); + + hspi->hdmarx->XferAbortCallback = NULL; + + /* Disable Rx DMA Request */ + CLEAR_BIT(hspi->Instance->CR2, SPI_CR2_RXDMAEN); + + /* Check Busy flag */ + if (SPI_EndRxTxTransaction(hspi, SPI_DEFAULT_TIMEOUT, HAL_GetTick()) != HAL_OK) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_ABORT); + } + + /* Check if an Abort process is still ongoing */ + if (hspi->hdmatx != NULL) + { + if (hspi->hdmatx->XferAbortCallback != NULL) + { + return; + } + } + + /* No Abort process still ongoing : All DMA Stream/Channel are aborted, call user Abort Complete callback */ + hspi->RxXferCount = 0U; + hspi->TxXferCount = 0U; + + /* Check no error during Abort procedure */ + if (hspi->ErrorCode != HAL_SPI_ERROR_ABORT) + { + /* Reset errorCode */ + hspi->ErrorCode = HAL_SPI_ERROR_NONE; + } + + /* Clear the Error flags in the SR register */ + __HAL_SPI_CLEAR_OVRFLAG(hspi); + __HAL_SPI_CLEAR_FREFLAG(hspi); + + /* Restore hspi->State to Ready */ + hspi->State = HAL_SPI_STATE_READY; + + /* Call user Abort complete callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->AbortCpltCallback(hspi); +#else + HAL_SPI_AbortCpltCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ +} + +/** + * @brief Rx 8-bit handler for Transmit and Receive in Interrupt mode. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +static void SPI_2linesRxISR_8BIT(struct __SPI_HandleTypeDef *hspi) +{ + /* Receive data in 8bit mode */ + *hspi->pRxBuffPtr = *((__IO uint8_t *)&hspi->Instance->DR); + hspi->pRxBuffPtr++; + hspi->RxXferCount--; + + /* Check end of the reception */ + if (hspi->RxXferCount == 0U) + { +#if (USE_SPI_CRC != 0U) + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + hspi->RxISR = SPI_2linesRxISR_8BITCRC; + return; + } +#endif /* USE_SPI_CRC */ + + /* Disable RXNE and ERR interrupt */ + __HAL_SPI_DISABLE_IT(hspi, (SPI_IT_RXNE | SPI_IT_ERR)); + + if (hspi->TxXferCount == 0U) + { + SPI_CloseRxTx_ISR(hspi); + } + } +} + +#if (USE_SPI_CRC != 0U) +/** + * @brief Rx 8-bit handler for Transmit and Receive in Interrupt mode. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +static void SPI_2linesRxISR_8BITCRC(struct __SPI_HandleTypeDef *hspi) +{ + __IO uint8_t *ptmpreg8; + __IO uint8_t tmpreg8 = 0; + + /* Initialize the 8bit temporary pointer */ + ptmpreg8 = (__IO uint8_t *)&hspi->Instance->DR; + /* Read 8bit CRC to flush Data Register */ + tmpreg8 = *ptmpreg8; + /* To avoid GCC warning */ + UNUSED(tmpreg8); + + /* Disable RXNE and ERR interrupt */ + __HAL_SPI_DISABLE_IT(hspi, (SPI_IT_RXNE | SPI_IT_ERR)); + + if (hspi->TxXferCount == 0U) + { + SPI_CloseRxTx_ISR(hspi); + } +} +#endif /* USE_SPI_CRC */ + +/** + * @brief Tx 8-bit handler for Transmit and Receive in Interrupt mode. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +static void SPI_2linesTxISR_8BIT(struct __SPI_HandleTypeDef *hspi) +{ + *(__IO uint8_t *)&hspi->Instance->DR = *((const uint8_t *)hspi->pTxBuffPtr); + hspi->pTxBuffPtr++; + hspi->TxXferCount--; + + /* Check the end of the transmission */ + if (hspi->TxXferCount == 0U) + { +#if (USE_SPI_CRC != 0U) + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + /* Set CRC Next Bit to send CRC */ + SET_BIT(hspi->Instance->CR1, SPI_CR1_CRCNEXT); + /* Disable TXE interrupt */ + __HAL_SPI_DISABLE_IT(hspi, SPI_IT_TXE); + return; + } +#endif /* USE_SPI_CRC */ + + /* Disable TXE interrupt */ + __HAL_SPI_DISABLE_IT(hspi, SPI_IT_TXE); + + if (hspi->RxXferCount == 0U) + { + SPI_CloseRxTx_ISR(hspi); + } + } +} + +/** + * @brief Rx 16-bit handler for Transmit and Receive in Interrupt mode. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +static void SPI_2linesRxISR_16BIT(struct __SPI_HandleTypeDef *hspi) +{ + /* Receive data in 16 Bit mode */ + *((uint16_t *)hspi->pRxBuffPtr) = (uint16_t)(hspi->Instance->DR); + hspi->pRxBuffPtr += sizeof(uint16_t); + hspi->RxXferCount--; + + if (hspi->RxXferCount == 0U) + { +#if (USE_SPI_CRC != 0U) + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + hspi->RxISR = SPI_2linesRxISR_16BITCRC; + return; + } +#endif /* USE_SPI_CRC */ + + /* Disable RXNE interrupt */ + __HAL_SPI_DISABLE_IT(hspi, SPI_IT_RXNE); + + if (hspi->TxXferCount == 0U) + { + SPI_CloseRxTx_ISR(hspi); + } + } +} + +#if (USE_SPI_CRC != 0U) +/** + * @brief Manage the CRC 16-bit receive for Transmit and Receive in Interrupt mode. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +static void SPI_2linesRxISR_16BITCRC(struct __SPI_HandleTypeDef *hspi) +{ + __IO uint32_t tmpreg = 0U; + + /* Read 16bit CRC to flush Data Register */ + tmpreg = READ_REG(hspi->Instance->DR); + /* To avoid GCC warning */ + UNUSED(tmpreg); + + /* Disable RXNE interrupt */ + __HAL_SPI_DISABLE_IT(hspi, SPI_IT_RXNE); + + SPI_CloseRxTx_ISR(hspi); +} +#endif /* USE_SPI_CRC */ + +/** + * @brief Tx 16-bit handler for Transmit and Receive in Interrupt mode. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +static void SPI_2linesTxISR_16BIT(struct __SPI_HandleTypeDef *hspi) +{ + /* Transmit data in 16 Bit mode */ + hspi->Instance->DR = *((const uint16_t *)hspi->pTxBuffPtr); + hspi->pTxBuffPtr += sizeof(uint16_t); + hspi->TxXferCount--; + + /* Enable CRC Transmission */ + if (hspi->TxXferCount == 0U) + { +#if (USE_SPI_CRC != 0U) + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + /* Set CRC Next Bit to send CRC */ + SET_BIT(hspi->Instance->CR1, SPI_CR1_CRCNEXT); + /* Disable TXE interrupt */ + __HAL_SPI_DISABLE_IT(hspi, SPI_IT_TXE); + return; + } +#endif /* USE_SPI_CRC */ + + /* Disable TXE interrupt */ + __HAL_SPI_DISABLE_IT(hspi, SPI_IT_TXE); + + if (hspi->RxXferCount == 0U) + { + SPI_CloseRxTx_ISR(hspi); + } + } +} + +#if (USE_SPI_CRC != 0U) +/** + * @brief Manage the CRC 8-bit receive in Interrupt context. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +static void SPI_RxISR_8BITCRC(struct __SPI_HandleTypeDef *hspi) +{ + __IO uint8_t *ptmpreg8; + __IO uint8_t tmpreg8 = 0; + + /* Initialize the 8bit temporary pointer */ + ptmpreg8 = (__IO uint8_t *)&hspi->Instance->DR; + /* Read 8bit CRC to flush Data Register */ + tmpreg8 = *ptmpreg8; + /* To avoid GCC warning */ + UNUSED(tmpreg8); + + SPI_CloseRx_ISR(hspi); +} +#endif /* USE_SPI_CRC */ + +/** + * @brief Manage the receive 8-bit in Interrupt context. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +static void SPI_RxISR_8BIT(struct __SPI_HandleTypeDef *hspi) +{ + *hspi->pRxBuffPtr = (*(__IO uint8_t *)&hspi->Instance->DR); + hspi->pRxBuffPtr++; + hspi->RxXferCount--; + +#if (USE_SPI_CRC != 0U) + /* Enable CRC Transmission */ + if ((hspi->RxXferCount == 1U) && (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE)) + { + SET_BIT(hspi->Instance->CR1, SPI_CR1_CRCNEXT); + } +#endif /* USE_SPI_CRC */ + + if (hspi->RxXferCount == 0U) + { +#if (USE_SPI_CRC != 0U) + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + hspi->RxISR = SPI_RxISR_8BITCRC; + return; + } +#endif /* USE_SPI_CRC */ + SPI_CloseRx_ISR(hspi); + } +} + +#if (USE_SPI_CRC != 0U) +/** + * @brief Manage the CRC 16-bit receive in Interrupt context. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +static void SPI_RxISR_16BITCRC(struct __SPI_HandleTypeDef *hspi) +{ + __IO uint32_t tmpreg = 0U; + + /* Read 16bit CRC to flush Data Register */ + tmpreg = READ_REG(hspi->Instance->DR); + /* To avoid GCC warning */ + UNUSED(tmpreg); + + /* Disable RXNE and ERR interrupt */ + __HAL_SPI_DISABLE_IT(hspi, (SPI_IT_RXNE | SPI_IT_ERR)); + + SPI_CloseRx_ISR(hspi); +} +#endif /* USE_SPI_CRC */ + +/** + * @brief Manage the 16-bit receive in Interrupt context. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +static void SPI_RxISR_16BIT(struct __SPI_HandleTypeDef *hspi) +{ + *((uint16_t *)hspi->pRxBuffPtr) = (uint16_t)(hspi->Instance->DR); + hspi->pRxBuffPtr += sizeof(uint16_t); + hspi->RxXferCount--; + +#if (USE_SPI_CRC != 0U) + /* Enable CRC Transmission */ + if ((hspi->RxXferCount == 1U) && (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE)) + { + SET_BIT(hspi->Instance->CR1, SPI_CR1_CRCNEXT); + } +#endif /* USE_SPI_CRC */ + + if (hspi->RxXferCount == 0U) + { +#if (USE_SPI_CRC != 0U) + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + hspi->RxISR = SPI_RxISR_16BITCRC; + return; + } +#endif /* USE_SPI_CRC */ + SPI_CloseRx_ISR(hspi); + } +} + +/** + * @brief Handle the data 8-bit transmit in Interrupt mode. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +static void SPI_TxISR_8BIT(struct __SPI_HandleTypeDef *hspi) +{ + *(__IO uint8_t *)&hspi->Instance->DR = *((const uint8_t *)hspi->pTxBuffPtr); + hspi->pTxBuffPtr++; + hspi->TxXferCount--; + + if (hspi->TxXferCount == 0U) + { +#if (USE_SPI_CRC != 0U) + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + /* Enable CRC Transmission */ + SET_BIT(hspi->Instance->CR1, SPI_CR1_CRCNEXT); + } +#endif /* USE_SPI_CRC */ + SPI_CloseTx_ISR(hspi); + } +} + +/** + * @brief Handle the data 16-bit transmit in Interrupt mode. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +static void SPI_TxISR_16BIT(struct __SPI_HandleTypeDef *hspi) +{ + /* Transmit data in 16 Bit mode */ + hspi->Instance->DR = *((const uint16_t *)hspi->pTxBuffPtr); + hspi->pTxBuffPtr += sizeof(uint16_t); + hspi->TxXferCount--; + + if (hspi->TxXferCount == 0U) + { +#if (USE_SPI_CRC != 0U) + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + /* Enable CRC Transmission */ + SET_BIT(hspi->Instance->CR1, SPI_CR1_CRCNEXT); + } +#endif /* USE_SPI_CRC */ + SPI_CloseTx_ISR(hspi); + } +} + +/** + * @brief Handle SPI Communication Timeout. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @param Flag SPI flag to check + * @param State flag state to check + * @param Timeout Timeout duration + * @param Tickstart tick start value + * @retval HAL status + */ +static HAL_StatusTypeDef SPI_WaitFlagStateUntilTimeout(SPI_HandleTypeDef *hspi, uint32_t Flag, FlagStatus State, + uint32_t Timeout, uint32_t Tickstart) +{ + __IO uint32_t count; + uint32_t tmp_timeout; + uint32_t tmp_tickstart; + + /* Adjust Timeout value in case of end of transfer */ + tmp_timeout = Timeout - (HAL_GetTick() - Tickstart); + tmp_tickstart = HAL_GetTick(); + + /* Calculate Timeout based on a software loop to avoid blocking issue if Systick is disabled */ + count = tmp_timeout * ((SystemCoreClock * 32U) >> 20U); + + while ((__HAL_SPI_GET_FLAG(hspi, Flag) ? SET : RESET) != State) + { + if (Timeout != HAL_MAX_DELAY) + { + if (((HAL_GetTick() - tmp_tickstart) >= tmp_timeout) || (tmp_timeout == 0U)) + { + /* Disable the SPI and reset the CRC: the CRC value should be cleared + on both master and slave sides in order to resynchronize the master + and slave for their respective CRC calculation */ + + /* Disable TXE, RXNE and ERR interrupts for the interrupt process */ + __HAL_SPI_DISABLE_IT(hspi, (SPI_IT_TXE | SPI_IT_RXNE | SPI_IT_ERR)); + + if ((hspi->Init.Mode == SPI_MODE_MASTER) && ((hspi->Init.Direction == SPI_DIRECTION_1LINE) + || (hspi->Init.Direction == SPI_DIRECTION_2LINES_RXONLY))) + { + /* Disable SPI peripheral */ + __HAL_SPI_DISABLE(hspi); + } + + /* Reset CRC Calculation */ + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + SPI_RESET_CRC(hspi); + } + + hspi->State = HAL_SPI_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hspi); + + return HAL_TIMEOUT; + } + /* If Systick is disabled or not incremented, deactivate timeout to go in disable loop procedure */ + if (count == 0U) + { + tmp_timeout = 0U; + } + count--; + } + } + + return HAL_OK; +} + +/** + * @brief Handle the check of the RX transaction complete. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @param Timeout Timeout duration + * @param Tickstart tick start value + * @retval HAL status + */ +static HAL_StatusTypeDef SPI_EndRxTransaction(SPI_HandleTypeDef *hspi, uint32_t Timeout, uint32_t Tickstart) +{ + if ((hspi->Init.Mode == SPI_MODE_MASTER) && ((hspi->Init.Direction == SPI_DIRECTION_1LINE) + || (hspi->Init.Direction == SPI_DIRECTION_2LINES_RXONLY))) + { + /* Disable SPI peripheral */ + __HAL_SPI_DISABLE(hspi); + } + + /* Erratasheet: BSY bit may stay high at the end of a data transfer in Slave mode */ + if (hspi->Init.Mode == SPI_MODE_MASTER) + { + if (hspi->Init.Direction != SPI_DIRECTION_2LINES_RXONLY) + { + /* Control the BSY flag */ + if (SPI_WaitFlagStateUntilTimeout(hspi, SPI_FLAG_BSY, RESET, Timeout, Tickstart) != HAL_OK) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_FLAG); + return HAL_TIMEOUT; + } + } + else + { + /* Wait the RXNE reset */ + if (SPI_WaitFlagStateUntilTimeout(hspi, SPI_FLAG_RXNE, RESET, Timeout, Tickstart) != HAL_OK) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_FLAG); + return HAL_TIMEOUT; + } + } + } + else + { + /* Wait the RXNE reset */ + if (SPI_WaitFlagStateUntilTimeout(hspi, SPI_FLAG_RXNE, RESET, Timeout, Tickstart) != HAL_OK) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_FLAG); + return HAL_TIMEOUT; + } + } + return HAL_OK; +} + +/** + * @brief Handle the check of the RXTX or TX transaction complete. + * @param hspi SPI handle + * @param Timeout Timeout duration + * @param Tickstart tick start value + * @retval HAL status + */ +static HAL_StatusTypeDef SPI_EndRxTxTransaction(SPI_HandleTypeDef *hspi, uint32_t Timeout, uint32_t Tickstart) +{ + /* Wait until TXE flag */ + if(SPI_WaitFlagStateUntilTimeout(hspi, SPI_FLAG_TXE, SET, Timeout, Tickstart) != HAL_OK) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_FLAG); + return HAL_TIMEOUT; + } + + /* Timeout in µs */ + __IO uint32_t count = SPI_BSY_FLAG_WORKAROUND_TIMEOUT * (SystemCoreClock / 24U / 1000000U); + /* Erratasheet: BSY bit may stay high at the end of a data transfer in Slave mode */ + if (hspi->Init.Mode == SPI_MODE_MASTER) + { + /* Control the BSY flag */ + if (SPI_WaitFlagStateUntilTimeout(hspi, SPI_FLAG_BSY, RESET, Timeout, Tickstart) != HAL_OK) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_FLAG); + return HAL_TIMEOUT; + } + } + else + { + /* Wait BSY flag during 1 Byte time transfer in case of Full-Duplex and Tx transfer + * If Timeout is reached, the transfer is considered as finish. + * User have to calculate the timeout value to fit with the time of 1 byte transfer. + * This time is directly link with the SPI clock from Master device. + */ + do + { + if (count == 0U) + { + break; + } + count--; + } while (__HAL_SPI_GET_FLAG(hspi, SPI_FLAG_BSY) != RESET); + } + + return HAL_OK; +} + +/** + * @brief Handle the end of the RXTX transaction. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +static void SPI_CloseRxTx_ISR(SPI_HandleTypeDef *hspi) +{ + uint32_t tickstart; + __IO uint32_t count = SPI_DEFAULT_TIMEOUT * (SystemCoreClock / 24U / 1000U); + + /* Init tickstart for timeout management */ + tickstart = HAL_GetTick(); + + /* Disable ERR interrupt */ + __HAL_SPI_DISABLE_IT(hspi, SPI_IT_ERR); + + /* Wait until TXE flag is set */ + do + { + if (count == 0U) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_FLAG); + break; + } + count--; + } while ((hspi->Instance->SR & SPI_FLAG_TXE) == RESET); + + /* Check the end of the transaction */ + if (SPI_EndRxTxTransaction(hspi, SPI_DEFAULT_TIMEOUT, tickstart) != HAL_OK) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_FLAG); + } + + /* Clear overrun flag in 2 Lines communication mode because received is not read */ + if (hspi->Init.Direction == SPI_DIRECTION_2LINES) + { + __HAL_SPI_CLEAR_OVRFLAG(hspi); + } + +#if (USE_SPI_CRC != 0U) + /* Check if CRC error occurred */ + if (__HAL_SPI_GET_FLAG(hspi, SPI_FLAG_CRCERR) != RESET) + { + hspi->State = HAL_SPI_STATE_READY; + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_CRC); + __HAL_SPI_CLEAR_CRCERRFLAG(hspi); + /* Call user error callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->ErrorCallback(hspi); +#else + HAL_SPI_ErrorCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ + } + else + { +#endif /* USE_SPI_CRC */ + if (hspi->ErrorCode == HAL_SPI_ERROR_NONE) + { + if (hspi->State == HAL_SPI_STATE_BUSY_RX) + { + hspi->State = HAL_SPI_STATE_READY; + /* Call user Rx complete callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->RxCpltCallback(hspi); +#else + HAL_SPI_RxCpltCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ + } + else + { + hspi->State = HAL_SPI_STATE_READY; + /* Call user TxRx complete callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->TxRxCpltCallback(hspi); +#else + HAL_SPI_TxRxCpltCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ + } + } + else + { + hspi->State = HAL_SPI_STATE_READY; + /* Call user error callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->ErrorCallback(hspi); +#else + HAL_SPI_ErrorCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ + } +#if (USE_SPI_CRC != 0U) + } +#endif /* USE_SPI_CRC */ +} + +/** + * @brief Handle the end of the RX transaction. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +static void SPI_CloseRx_ISR(SPI_HandleTypeDef *hspi) +{ + /* Disable RXNE and ERR interrupt */ + __HAL_SPI_DISABLE_IT(hspi, (SPI_IT_RXNE | SPI_IT_ERR)); + + /* Check the end of the transaction */ + if (SPI_EndRxTransaction(hspi, SPI_DEFAULT_TIMEOUT, HAL_GetTick()) != HAL_OK) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_FLAG); + } + + /* Clear overrun flag in 2 Lines communication mode because received is not read */ + if (hspi->Init.Direction == SPI_DIRECTION_2LINES) + { + __HAL_SPI_CLEAR_OVRFLAG(hspi); + } + hspi->State = HAL_SPI_STATE_READY; + +#if (USE_SPI_CRC != 0U) + /* Check if CRC error occurred */ + if (__HAL_SPI_GET_FLAG(hspi, SPI_FLAG_CRCERR) != RESET) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_CRC); + __HAL_SPI_CLEAR_CRCERRFLAG(hspi); + /* Call user error callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->ErrorCallback(hspi); +#else + HAL_SPI_ErrorCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ + } + else + { +#endif /* USE_SPI_CRC */ + if (hspi->ErrorCode == HAL_SPI_ERROR_NONE) + { + /* Call user Rx complete callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->RxCpltCallback(hspi); +#else + HAL_SPI_RxCpltCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ + } + else + { + /* Call user error callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->ErrorCallback(hspi); +#else + HAL_SPI_ErrorCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ + } +#if (USE_SPI_CRC != 0U) + } +#endif /* USE_SPI_CRC */ +} + +/** + * @brief Handle the end of the TX transaction. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +static void SPI_CloseTx_ISR(SPI_HandleTypeDef *hspi) +{ + uint32_t tickstart; + __IO uint32_t count = SPI_DEFAULT_TIMEOUT * (SystemCoreClock / 24U / 1000U); + + /* Init tickstart for timeout management*/ + tickstart = HAL_GetTick(); + + /* Wait until TXE flag is set */ + do + { + if (count == 0U) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_FLAG); + break; + } + count--; + } while ((hspi->Instance->SR & SPI_FLAG_TXE) == RESET); + + /* Disable TXE and ERR interrupt */ + __HAL_SPI_DISABLE_IT(hspi, (SPI_IT_TXE | SPI_IT_ERR)); + + /* Check the end of the transaction */ + if (SPI_EndRxTxTransaction(hspi, SPI_DEFAULT_TIMEOUT, tickstart) != HAL_OK) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_FLAG); + } + + /* Clear overrun flag in 2 Lines communication mode because received is not read */ + if (hspi->Init.Direction == SPI_DIRECTION_2LINES) + { + __HAL_SPI_CLEAR_OVRFLAG(hspi); + } + + hspi->State = HAL_SPI_STATE_READY; + if (hspi->ErrorCode != HAL_SPI_ERROR_NONE) + { + /* Call user error callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->ErrorCallback(hspi); +#else + HAL_SPI_ErrorCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ + } + else + { + /* Call user Rx complete callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->TxCpltCallback(hspi); +#else + HAL_SPI_TxCpltCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ + } +} + +/** + * @brief Handle abort a Rx transaction. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +static void SPI_AbortRx_ISR(SPI_HandleTypeDef *hspi) +{ + __IO uint32_t tmpreg = 0U; + __IO uint32_t count = SPI_DEFAULT_TIMEOUT * (SystemCoreClock / 24U / 1000U); + + /* Wait until TXE flag is set */ + do + { + if (count == 0U) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_ABORT); + break; + } + count--; + } while ((hspi->Instance->SR & SPI_FLAG_TXE) == RESET); + + /* Disable SPI Peripheral */ + __HAL_SPI_DISABLE(hspi); + + /* Disable TXEIE, RXNEIE and ERRIE(mode fault event, overrun error, TI frame error) interrupts */ + CLEAR_BIT(hspi->Instance->CR2, (SPI_CR2_TXEIE | SPI_CR2_RXNEIE | SPI_CR2_ERRIE)); + + /* Flush Data Register by a blank read */ + tmpreg = READ_REG(hspi->Instance->DR); + /* To avoid GCC warning */ + UNUSED(tmpreg); + + hspi->State = HAL_SPI_STATE_ABORT; +} + +/** + * @brief Handle abort a Tx or Rx/Tx transaction. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +static void SPI_AbortTx_ISR(SPI_HandleTypeDef *hspi) +{ + /* Disable TXEIE interrupt */ + CLEAR_BIT(hspi->Instance->CR2, (SPI_CR2_TXEIE)); + + /* Disable SPI Peripheral */ + __HAL_SPI_DISABLE(hspi); + + hspi->State = HAL_SPI_STATE_ABORT; +} + +/** + * @} + */ + +#endif /* HAL_SPI_MODULE_ENABLED */ + +/** + * @} + */ + +/** + * @} + */ + diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_tim.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_tim.c new file mode 100644 index 0000000..d5978cc --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_tim.c @@ -0,0 +1,7632 @@ +/** + ****************************************************************************** + * @file stm32f4xx_hal_tim.c + * @author MCD Application Team + * @brief TIM HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the Timer (TIM) peripheral: + * + TIM Time Base Initialization + * + TIM Time Base Start + * + TIM Time Base Start Interruption + * + TIM Time Base Start DMA + * + TIM Output Compare/PWM Initialization + * + TIM Output Compare/PWM Channel Configuration + * + TIM Output Compare/PWM Start + * + TIM Output Compare/PWM Start Interruption + * + TIM Output Compare/PWM Start DMA + * + TIM Input Capture Initialization + * + TIM Input Capture Channel Configuration + * + TIM Input Capture Start + * + TIM Input Capture Start Interruption + * + TIM Input Capture Start DMA + * + TIM One Pulse Initialization + * + TIM One Pulse Channel Configuration + * + TIM One Pulse Start + * + TIM Encoder Interface Initialization + * + TIM Encoder Interface Start + * + TIM Encoder Interface Start Interruption + * + TIM Encoder Interface Start DMA + * + Commutation Event configuration with Interruption and DMA + * + TIM OCRef clear configuration + * + TIM External Clock configuration + ****************************************************************************** + * @attention + * + * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + @verbatim + ============================================================================== + ##### TIMER Generic features ##### + ============================================================================== + [..] The Timer features include: + (#) 16-bit up, down, up/down auto-reload counter. + (#) 16-bit programmable prescaler allowing dividing (also on the fly) the + counter clock frequency either by any factor between 1 and 65536. + (#) Up to 4 independent channels for: + (++) Input Capture + (++) Output Compare + (++) PWM generation (Edge and Center-aligned Mode) + (++) One-pulse mode output + (#) Synchronization circuit to control the timer with external signals and to interconnect + several timers together. + (#) Supports incremental encoder for positioning purposes + + ##### How to use this driver ##### + ============================================================================== + [..] + (#) Initialize the TIM low level resources by implementing the following functions + depending on the selected feature: + (++) Time Base : HAL_TIM_Base_MspInit() + (++) Input Capture : HAL_TIM_IC_MspInit() + (++) Output Compare : HAL_TIM_OC_MspInit() + (++) PWM generation : HAL_TIM_PWM_MspInit() + (++) One-pulse mode output : HAL_TIM_OnePulse_MspInit() + (++) Encoder mode output : HAL_TIM_Encoder_MspInit() + + (#) Initialize the TIM low level resources : + (##) Enable the TIM interface clock using __HAL_RCC_TIMx_CLK_ENABLE(); + (##) TIM pins configuration + (+++) Enable the clock for the TIM GPIOs using the following function: + __HAL_RCC_GPIOx_CLK_ENABLE(); + (+++) Configure these TIM pins in Alternate function mode using HAL_GPIO_Init(); + + (#) The external Clock can be configured, if needed (the default clock is the + internal clock from the APBx), using the following function: + HAL_TIM_ConfigClockSource, the clock configuration should be done before + any start function. + + (#) Configure the TIM in the desired functioning mode using one of the + Initialization function of this driver: + (++) HAL_TIM_Base_Init: to use the Timer to generate a simple time base + (++) HAL_TIM_OC_Init and HAL_TIM_OC_ConfigChannel: to use the Timer to generate an + Output Compare signal. + (++) HAL_TIM_PWM_Init and HAL_TIM_PWM_ConfigChannel: to use the Timer to generate a + PWM signal. + (++) HAL_TIM_IC_Init and HAL_TIM_IC_ConfigChannel: to use the Timer to measure an + external signal. + (++) HAL_TIM_OnePulse_Init and HAL_TIM_OnePulse_ConfigChannel: to use the Timer + in One Pulse Mode. + (++) HAL_TIM_Encoder_Init: to use the Timer Encoder Interface. + + (#) Activate the TIM peripheral using one of the start functions depending from the feature used: + (++) Time Base : HAL_TIM_Base_Start(), HAL_TIM_Base_Start_DMA(), HAL_TIM_Base_Start_IT() + (++) Input Capture : HAL_TIM_IC_Start(), HAL_TIM_IC_Start_DMA(), HAL_TIM_IC_Start_IT() + (++) Output Compare : HAL_TIM_OC_Start(), HAL_TIM_OC_Start_DMA(), HAL_TIM_OC_Start_IT() + (++) PWM generation : HAL_TIM_PWM_Start(), HAL_TIM_PWM_Start_DMA(), HAL_TIM_PWM_Start_IT() + (++) One-pulse mode output : HAL_TIM_OnePulse_Start(), HAL_TIM_OnePulse_Start_IT() + (++) Encoder mode output : HAL_TIM_Encoder_Start(), HAL_TIM_Encoder_Start_DMA(), HAL_TIM_Encoder_Start_IT(). + + (#) The DMA Burst is managed with the two following functions: + HAL_TIM_DMABurst_WriteStart() + HAL_TIM_DMABurst_ReadStart() + + *** Callback registration *** + ============================================= + + [..] + The compilation define USE_HAL_TIM_REGISTER_CALLBACKS when set to 1 + allows the user to configure dynamically the driver callbacks. + + [..] + Use Function HAL_TIM_RegisterCallback() to register a callback. + HAL_TIM_RegisterCallback() takes as parameters the HAL peripheral handle, + the Callback ID and a pointer to the user callback function. + + [..] + Use function HAL_TIM_UnRegisterCallback() to reset a callback to the default + weak function. + HAL_TIM_UnRegisterCallback takes as parameters the HAL peripheral handle, + and the Callback ID. + + [..] + These functions allow to register/unregister following callbacks: + (+) Base_MspInitCallback : TIM Base Msp Init Callback. + (+) Base_MspDeInitCallback : TIM Base Msp DeInit Callback. + (+) IC_MspInitCallback : TIM IC Msp Init Callback. + (+) IC_MspDeInitCallback : TIM IC Msp DeInit Callback. + (+) OC_MspInitCallback : TIM OC Msp Init Callback. + (+) OC_MspDeInitCallback : TIM OC Msp DeInit Callback. + (+) PWM_MspInitCallback : TIM PWM Msp Init Callback. + (+) PWM_MspDeInitCallback : TIM PWM Msp DeInit Callback. + (+) OnePulse_MspInitCallback : TIM One Pulse Msp Init Callback. + (+) OnePulse_MspDeInitCallback : TIM One Pulse Msp DeInit Callback. + (+) Encoder_MspInitCallback : TIM Encoder Msp Init Callback. + (+) Encoder_MspDeInitCallback : TIM Encoder Msp DeInit Callback. + (+) HallSensor_MspInitCallback : TIM Hall Sensor Msp Init Callback. + (+) HallSensor_MspDeInitCallback : TIM Hall Sensor Msp DeInit Callback. + (+) PeriodElapsedCallback : TIM Period Elapsed Callback. + (+) PeriodElapsedHalfCpltCallback : TIM Period Elapsed half complete Callback. + (+) TriggerCallback : TIM Trigger Callback. + (+) TriggerHalfCpltCallback : TIM Trigger half complete Callback. + (+) IC_CaptureCallback : TIM Input Capture Callback. + (+) IC_CaptureHalfCpltCallback : TIM Input Capture half complete Callback. + (+) OC_DelayElapsedCallback : TIM Output Compare Delay Elapsed Callback. + (+) PWM_PulseFinishedCallback : TIM PWM Pulse Finished Callback. + (+) PWM_PulseFinishedHalfCpltCallback : TIM PWM Pulse Finished half complete Callback. + (+) ErrorCallback : TIM Error Callback. + (+) CommutationCallback : TIM Commutation Callback. + (+) CommutationHalfCpltCallback : TIM Commutation half complete Callback. + (+) BreakCallback : TIM Break Callback. + + [..] +By default, after the Init and when the state is HAL_TIM_STATE_RESET +all interrupt callbacks are set to the corresponding weak functions: + examples HAL_TIM_TriggerCallback(), HAL_TIM_ErrorCallback(). + + [..] + Exception done for MspInit and MspDeInit functions that are reset to the legacy weak + functionalities in the Init / DeInit only when these callbacks are null + (not registered beforehand). If not, MspInit or MspDeInit are not null, the Init / DeInit + keep and use the user MspInit / MspDeInit callbacks(registered beforehand) + + [..] + Callbacks can be registered / unregistered in HAL_TIM_STATE_READY state only. + Exception done MspInit / MspDeInit that can be registered / unregistered + in HAL_TIM_STATE_READY or HAL_TIM_STATE_RESET state, + thus registered(user) MspInit / DeInit callbacks can be used during the Init / DeInit. + In that case first register the MspInit/MspDeInit user callbacks + using HAL_TIM_RegisterCallback() before calling DeInit or Init function. + + [..] + When The compilation define USE_HAL_TIM_REGISTER_CALLBACKS is set to 0 or + not defined, the callback registration feature is not available and all callbacks + are set to the corresponding weak functions. + + @endverbatim + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx_hal.h" + +/** @addtogroup STM32F4xx_HAL_Driver + * @{ + */ + +/** @defgroup TIM TIM + * @brief TIM HAL module driver + * @{ + */ + +#ifdef HAL_TIM_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/* Private macros ------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/** @addtogroup TIM_Private_Functions + * @{ + */ +static void TIM_OC1_SetConfig(TIM_TypeDef *TIMx, const TIM_OC_InitTypeDef *OC_Config); +static void TIM_OC3_SetConfig(TIM_TypeDef *TIMx, const TIM_OC_InitTypeDef *OC_Config); +static void TIM_OC4_SetConfig(TIM_TypeDef *TIMx, const TIM_OC_InitTypeDef *OC_Config); +static void TIM_TI1_ConfigInputStage(TIM_TypeDef *TIMx, uint32_t TIM_ICPolarity, uint32_t TIM_ICFilter); +static void TIM_TI2_SetConfig(TIM_TypeDef *TIMx, uint32_t TIM_ICPolarity, uint32_t TIM_ICSelection, + uint32_t TIM_ICFilter); +static void TIM_TI2_ConfigInputStage(TIM_TypeDef *TIMx, uint32_t TIM_ICPolarity, uint32_t TIM_ICFilter); +static void TIM_TI3_SetConfig(TIM_TypeDef *TIMx, uint32_t TIM_ICPolarity, uint32_t TIM_ICSelection, + uint32_t TIM_ICFilter); +static void TIM_TI4_SetConfig(TIM_TypeDef *TIMx, uint32_t TIM_ICPolarity, uint32_t TIM_ICSelection, + uint32_t TIM_ICFilter); +static void TIM_ITRx_SetConfig(TIM_TypeDef *TIMx, uint32_t InputTriggerSource); +static void TIM_DMAPeriodElapsedCplt(DMA_HandleTypeDef *hdma); +static void TIM_DMAPeriodElapsedHalfCplt(DMA_HandleTypeDef *hdma); +static void TIM_DMADelayPulseCplt(DMA_HandleTypeDef *hdma); +static void TIM_DMATriggerCplt(DMA_HandleTypeDef *hdma); +static void TIM_DMATriggerHalfCplt(DMA_HandleTypeDef *hdma); +static HAL_StatusTypeDef TIM_SlaveTimer_SetConfig(TIM_HandleTypeDef *htim, + const TIM_SlaveConfigTypeDef *sSlaveConfig); +/** + * @} + */ +/* Exported functions --------------------------------------------------------*/ + +/** @defgroup TIM_Exported_Functions TIM Exported Functions + * @{ + */ + +/** @defgroup TIM_Exported_Functions_Group1 TIM Time Base functions + * @brief Time Base functions + * +@verbatim + ============================================================================== + ##### Time Base functions ##### + ============================================================================== + [..] + This section provides functions allowing to: + (+) Initialize and configure the TIM base. + (+) De-initialize the TIM base. + (+) Start the Time Base. + (+) Stop the Time Base. + (+) Start the Time Base and enable interrupt. + (+) Stop the Time Base and disable interrupt. + (+) Start the Time Base and enable DMA transfer. + (+) Stop the Time Base and disable DMA transfer. + +@endverbatim + * @{ + */ +/** + * @brief Initializes the TIM Time base Unit according to the specified + * parameters in the TIM_HandleTypeDef and initialize the associated handle. + * @note Switching from Center Aligned counter mode to Edge counter mode (or reverse) + * requires a timer reset to avoid unexpected direction + * due to DIR bit readonly in center aligned mode. + * Ex: call @ref HAL_TIM_Base_DeInit() before HAL_TIM_Base_Init() + * @param htim TIM Base handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Base_Init(TIM_HandleTypeDef *htim) +{ + /* Check the TIM handle allocation */ + if (htim == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + assert_param(IS_TIM_COUNTER_MODE(htim->Init.CounterMode)); + assert_param(IS_TIM_CLOCKDIVISION_DIV(htim->Init.ClockDivision)); + assert_param(IS_TIM_PERIOD(htim, htim->Init.Period)); + assert_param(IS_TIM_AUTORELOAD_PRELOAD(htim->Init.AutoReloadPreload)); + + if (htim->State == HAL_TIM_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + htim->Lock = HAL_UNLOCKED; + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + /* Reset interrupt callbacks to legacy weak callbacks */ + TIM_ResetCallback(htim); + + if (htim->Base_MspInitCallback == NULL) + { + htim->Base_MspInitCallback = HAL_TIM_Base_MspInit; + } + /* Init the low level hardware : GPIO, CLOCK, NVIC */ + htim->Base_MspInitCallback(htim); +#else + /* Init the low level hardware : GPIO, CLOCK, NVIC */ + HAL_TIM_Base_MspInit(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + + /* Set the TIM state */ + htim->State = HAL_TIM_STATE_BUSY; + + /* Set the Time Base configuration */ + TIM_Base_SetConfig(htim->Instance, &htim->Init); + + /* Initialize the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_READY; + + /* Initialize the TIM channels state */ + TIM_CHANNEL_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_READY); + + /* Initialize the TIM state*/ + htim->State = HAL_TIM_STATE_READY; + + return HAL_OK; +} + +/** + * @brief DeInitializes the TIM Base peripheral + * @param htim TIM Base handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Base_DeInit(TIM_HandleTypeDef *htim) +{ + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + + htim->State = HAL_TIM_STATE_BUSY; + + /* Disable the TIM Peripheral Clock */ + __HAL_TIM_DISABLE(htim); + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + if (htim->Base_MspDeInitCallback == NULL) + { + htim->Base_MspDeInitCallback = HAL_TIM_Base_MspDeInit; + } + /* DeInit the low level hardware */ + htim->Base_MspDeInitCallback(htim); +#else + /* DeInit the low level hardware: GPIO, CLOCK, NVIC */ + HAL_TIM_Base_MspDeInit(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + + /* Change the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_RESET; + + /* Change the TIM channels state */ + TIM_CHANNEL_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_RESET); + TIM_CHANNEL_N_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_RESET); + + /* Change TIM state */ + htim->State = HAL_TIM_STATE_RESET; + + /* Release Lock */ + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Initializes the TIM Base MSP. + * @param htim TIM Base handle + * @retval None + */ +__weak void HAL_TIM_Base_MspInit(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_Base_MspInit could be implemented in the user file + */ +} + +/** + * @brief DeInitializes TIM Base MSP. + * @param htim TIM Base handle + * @retval None + */ +__weak void HAL_TIM_Base_MspDeInit(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_Base_MspDeInit could be implemented in the user file + */ +} + + +/** + * @brief Starts the TIM Base generation. + * @param htim TIM Base handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Base_Start(TIM_HandleTypeDef *htim) +{ + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + + /* Check the TIM state */ + if (htim->State != HAL_TIM_STATE_READY) + { + return HAL_ERROR; + } + + /* Set the TIM state */ + htim->State = HAL_TIM_STATE_BUSY; + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM Base generation. + * @param htim TIM Base handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Base_Stop(TIM_HandleTypeDef *htim) +{ + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM state */ + htim->State = HAL_TIM_STATE_READY; + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts the TIM Base generation in interrupt mode. + * @param htim TIM Base handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Base_Start_IT(TIM_HandleTypeDef *htim) +{ + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + + /* Check the TIM state */ + if (htim->State != HAL_TIM_STATE_READY) + { + return HAL_ERROR; + } + + /* Set the TIM state */ + htim->State = HAL_TIM_STATE_BUSY; + + /* Enable the TIM Update interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_UPDATE); + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM Base generation in interrupt mode. + * @param htim TIM Base handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Base_Stop_IT(TIM_HandleTypeDef *htim) +{ + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + + /* Disable the TIM Update interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_UPDATE); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM state */ + htim->State = HAL_TIM_STATE_READY; + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts the TIM Base generation in DMA mode. + * @param htim TIM Base handle + * @param pData The source Buffer address. + * @param Length The length of data to be transferred from memory to peripheral. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Base_Start_DMA(TIM_HandleTypeDef *htim, const uint32_t *pData, uint16_t Length) +{ + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_DMA_INSTANCE(htim->Instance)); + + /* Set the TIM state */ + if (htim->State == HAL_TIM_STATE_BUSY) + { + return HAL_BUSY; + } + else if (htim->State == HAL_TIM_STATE_READY) + { + if ((pData == NULL) || (Length == 0U)) + { + return HAL_ERROR; + } + else + { + htim->State = HAL_TIM_STATE_BUSY; + } + } + else + { + return HAL_ERROR; + } + + /* Set the DMA Period elapsed callbacks */ + htim->hdma[TIM_DMA_ID_UPDATE]->XferCpltCallback = TIM_DMAPeriodElapsedCplt; + htim->hdma[TIM_DMA_ID_UPDATE]->XferHalfCpltCallback = TIM_DMAPeriodElapsedHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_UPDATE]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_UPDATE], (uint32_t)pData, (uint32_t)&htim->Instance->ARR, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + + /* Enable the TIM Update DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_UPDATE); + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM Base generation in DMA mode. + * @param htim TIM Base handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Base_Stop_DMA(TIM_HandleTypeDef *htim) +{ + /* Check the parameters */ + assert_param(IS_TIM_DMA_INSTANCE(htim->Instance)); + + /* Disable the TIM Update DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_UPDATE); + + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_UPDATE]); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM state */ + htim->State = HAL_TIM_STATE_READY; + + /* Return function status */ + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup TIM_Exported_Functions_Group2 TIM Output Compare functions + * @brief TIM Output Compare functions + * +@verbatim + ============================================================================== + ##### TIM Output Compare functions ##### + ============================================================================== + [..] + This section provides functions allowing to: + (+) Initialize and configure the TIM Output Compare. + (+) De-initialize the TIM Output Compare. + (+) Start the TIM Output Compare. + (+) Stop the TIM Output Compare. + (+) Start the TIM Output Compare and enable interrupt. + (+) Stop the TIM Output Compare and disable interrupt. + (+) Start the TIM Output Compare and enable DMA transfer. + (+) Stop the TIM Output Compare and disable DMA transfer. + +@endverbatim + * @{ + */ +/** + * @brief Initializes the TIM Output Compare according to the specified + * parameters in the TIM_HandleTypeDef and initializes the associated handle. + * @note Switching from Center Aligned counter mode to Edge counter mode (or reverse) + * requires a timer reset to avoid unexpected direction + * due to DIR bit readonly in center aligned mode. + * Ex: call @ref HAL_TIM_OC_DeInit() before HAL_TIM_OC_Init() + * @param htim TIM Output Compare handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OC_Init(TIM_HandleTypeDef *htim) +{ + /* Check the TIM handle allocation */ + if (htim == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + assert_param(IS_TIM_COUNTER_MODE(htim->Init.CounterMode)); + assert_param(IS_TIM_CLOCKDIVISION_DIV(htim->Init.ClockDivision)); + assert_param(IS_TIM_PERIOD(htim, htim->Init.Period)); + assert_param(IS_TIM_AUTORELOAD_PRELOAD(htim->Init.AutoReloadPreload)); + + if (htim->State == HAL_TIM_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + htim->Lock = HAL_UNLOCKED; + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + /* Reset interrupt callbacks to legacy weak callbacks */ + TIM_ResetCallback(htim); + + if (htim->OC_MspInitCallback == NULL) + { + htim->OC_MspInitCallback = HAL_TIM_OC_MspInit; + } + /* Init the low level hardware : GPIO, CLOCK, NVIC */ + htim->OC_MspInitCallback(htim); +#else + /* Init the low level hardware : GPIO, CLOCK, NVIC and DMA */ + HAL_TIM_OC_MspInit(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + + /* Set the TIM state */ + htim->State = HAL_TIM_STATE_BUSY; + + /* Init the base time for the Output Compare */ + TIM_Base_SetConfig(htim->Instance, &htim->Init); + + /* Initialize the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_READY; + + /* Initialize the TIM channels state */ + TIM_CHANNEL_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_READY); + + /* Initialize the TIM state*/ + htim->State = HAL_TIM_STATE_READY; + + return HAL_OK; +} + +/** + * @brief DeInitializes the TIM peripheral + * @param htim TIM Output Compare handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OC_DeInit(TIM_HandleTypeDef *htim) +{ + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + + htim->State = HAL_TIM_STATE_BUSY; + + /* Disable the TIM Peripheral Clock */ + __HAL_TIM_DISABLE(htim); + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + if (htim->OC_MspDeInitCallback == NULL) + { + htim->OC_MspDeInitCallback = HAL_TIM_OC_MspDeInit; + } + /* DeInit the low level hardware */ + htim->OC_MspDeInitCallback(htim); +#else + /* DeInit the low level hardware: GPIO, CLOCK, NVIC and DMA */ + HAL_TIM_OC_MspDeInit(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + + /* Change the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_RESET; + + /* Change the TIM channels state */ + TIM_CHANNEL_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_RESET); + TIM_CHANNEL_N_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_RESET); + + /* Change TIM state */ + htim->State = HAL_TIM_STATE_RESET; + + /* Release Lock */ + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Initializes the TIM Output Compare MSP. + * @param htim TIM Output Compare handle + * @retval None + */ +__weak void HAL_TIM_OC_MspInit(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_OC_MspInit could be implemented in the user file + */ +} + +/** + * @brief DeInitializes TIM Output Compare MSP. + * @param htim TIM Output Compare handle + * @retval None + */ +__weak void HAL_TIM_OC_MspDeInit(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_OC_MspDeInit could be implemented in the user file + */ +} + +/** + * @brief Starts the TIM Output Compare signal generation. + * @param htim TIM Output Compare handle + * @param Channel TIM Channel to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OC_Start(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + /* Check the TIM channel state */ + if (TIM_CHANNEL_STATE_GET(htim, Channel) != HAL_TIM_CHANNEL_STATE_READY) + { + return HAL_ERROR; + } + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + + /* Enable the Output compare channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_ENABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Enable the main output */ + __HAL_TIM_MOE_ENABLE(htim); + } + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM Output Compare signal generation. + * @param htim TIM Output Compare handle + * @param Channel TIM Channel to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OC_Stop(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + /* Disable the Output compare channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_DISABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + } + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts the TIM Output Compare signal generation in interrupt mode. + * @param htim TIM Output Compare handle + * @param Channel TIM Channel to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OC_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + HAL_StatusTypeDef status = HAL_OK; + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + /* Check the TIM channel state */ + if (TIM_CHANNEL_STATE_GET(htim, Channel) != HAL_TIM_CHANNEL_STATE_READY) + { + return HAL_ERROR; + } + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Enable the TIM Capture/Compare 1 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC1); + break; + } + + case TIM_CHANNEL_2: + { + /* Enable the TIM Capture/Compare 2 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC2); + break; + } + + case TIM_CHANNEL_3: + { + /* Enable the TIM Capture/Compare 3 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC3); + break; + } + + case TIM_CHANNEL_4: + { + /* Enable the TIM Capture/Compare 4 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC4); + break; + } + + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Enable the Output compare channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_ENABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Enable the main output */ + __HAL_TIM_MOE_ENABLE(htim); + } + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } + } + + /* Return function status */ + return status; +} + +/** + * @brief Stops the TIM Output Compare signal generation in interrupt mode. + * @param htim TIM Output Compare handle + * @param Channel TIM Channel to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OC_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Disable the TIM Capture/Compare 1 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC1); + break; + } + + case TIM_CHANNEL_2: + { + /* Disable the TIM Capture/Compare 2 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC2); + break; + } + + case TIM_CHANNEL_3: + { + /* Disable the TIM Capture/Compare 3 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC3); + break; + } + + case TIM_CHANNEL_4: + { + /* Disable the TIM Capture/Compare 4 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC4); + break; + } + + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Disable the Output compare channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_DISABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + } + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + } + + /* Return function status */ + return status; +} + +/** + * @brief Starts the TIM Output Compare signal generation in DMA mode. + * @param htim TIM Output Compare handle + * @param Channel TIM Channel to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @param pData The source Buffer address. + * @param Length The length of data to be transferred from memory to TIM peripheral + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OC_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, const uint32_t *pData, + uint16_t Length) +{ + HAL_StatusTypeDef status = HAL_OK; + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + /* Set the TIM channel state */ + if (TIM_CHANNEL_STATE_GET(htim, Channel) == HAL_TIM_CHANNEL_STATE_BUSY) + { + return HAL_BUSY; + } + else if (TIM_CHANNEL_STATE_GET(htim, Channel) == HAL_TIM_CHANNEL_STATE_READY) + { + if ((pData == NULL) || (Length == 0U)) + { + return HAL_ERROR; + } + else + { + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + } + } + else + { + return HAL_ERROR; + } + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC1]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC1]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC1]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)pData, (uint32_t)&htim->Instance->CCR1, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + + /* Enable the TIM Capture/Compare 1 DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC1); + break; + } + + case TIM_CHANNEL_2: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC2]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC2]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC2]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC2], (uint32_t)pData, (uint32_t)&htim->Instance->CCR2, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + + /* Enable the TIM Capture/Compare 2 DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC2); + break; + } + + case TIM_CHANNEL_3: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC3]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC3]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC3]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC3], (uint32_t)pData, (uint32_t)&htim->Instance->CCR3, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + /* Enable the TIM Capture/Compare 3 DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC3); + break; + } + + case TIM_CHANNEL_4: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC4]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC4]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC4]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC4], (uint32_t)pData, (uint32_t)&htim->Instance->CCR4, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + /* Enable the TIM Capture/Compare 4 DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC4); + break; + } + + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Enable the Output compare channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_ENABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Enable the main output */ + __HAL_TIM_MOE_ENABLE(htim); + } + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } + } + + /* Return function status */ + return status; +} + +/** + * @brief Stops the TIM Output Compare signal generation in DMA mode. + * @param htim TIM Output Compare handle + * @param Channel TIM Channel to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OC_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Disable the TIM Capture/Compare 1 DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC1); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC1]); + break; + } + + case TIM_CHANNEL_2: + { + /* Disable the TIM Capture/Compare 2 DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC2); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC2]); + break; + } + + case TIM_CHANNEL_3: + { + /* Disable the TIM Capture/Compare 3 DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC3); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC3]); + break; + } + + case TIM_CHANNEL_4: + { + /* Disable the TIM Capture/Compare 4 interrupt */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC4); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC4]); + break; + } + + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Disable the Output compare channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_DISABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + } + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + } + + /* Return function status */ + return status; +} + +/** + * @} + */ + +/** @defgroup TIM_Exported_Functions_Group3 TIM PWM functions + * @brief TIM PWM functions + * +@verbatim + ============================================================================== + ##### TIM PWM functions ##### + ============================================================================== + [..] + This section provides functions allowing to: + (+) Initialize and configure the TIM PWM. + (+) De-initialize the TIM PWM. + (+) Start the TIM PWM. + (+) Stop the TIM PWM. + (+) Start the TIM PWM and enable interrupt. + (+) Stop the TIM PWM and disable interrupt. + (+) Start the TIM PWM and enable DMA transfer. + (+) Stop the TIM PWM and disable DMA transfer. + +@endverbatim + * @{ + */ +/** + * @brief Initializes the TIM PWM Time Base according to the specified + * parameters in the TIM_HandleTypeDef and initializes the associated handle. + * @note Switching from Center Aligned counter mode to Edge counter mode (or reverse) + * requires a timer reset to avoid unexpected direction + * due to DIR bit readonly in center aligned mode. + * Ex: call @ref HAL_TIM_PWM_DeInit() before HAL_TIM_PWM_Init() + * @param htim TIM PWM handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_PWM_Init(TIM_HandleTypeDef *htim) +{ + /* Check the TIM handle allocation */ + if (htim == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + assert_param(IS_TIM_COUNTER_MODE(htim->Init.CounterMode)); + assert_param(IS_TIM_CLOCKDIVISION_DIV(htim->Init.ClockDivision)); + assert_param(IS_TIM_PERIOD(htim, htim->Init.Period)); + assert_param(IS_TIM_AUTORELOAD_PRELOAD(htim->Init.AutoReloadPreload)); + + if (htim->State == HAL_TIM_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + htim->Lock = HAL_UNLOCKED; + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + /* Reset interrupt callbacks to legacy weak callbacks */ + TIM_ResetCallback(htim); + + if (htim->PWM_MspInitCallback == NULL) + { + htim->PWM_MspInitCallback = HAL_TIM_PWM_MspInit; + } + /* Init the low level hardware : GPIO, CLOCK, NVIC */ + htim->PWM_MspInitCallback(htim); +#else + /* Init the low level hardware : GPIO, CLOCK, NVIC and DMA */ + HAL_TIM_PWM_MspInit(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + + /* Set the TIM state */ + htim->State = HAL_TIM_STATE_BUSY; + + /* Init the base time for the PWM */ + TIM_Base_SetConfig(htim->Instance, &htim->Init); + + /* Initialize the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_READY; + + /* Initialize the TIM channels state */ + TIM_CHANNEL_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_READY); + + /* Initialize the TIM state*/ + htim->State = HAL_TIM_STATE_READY; + + return HAL_OK; +} + +/** + * @brief DeInitializes the TIM peripheral + * @param htim TIM PWM handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_PWM_DeInit(TIM_HandleTypeDef *htim) +{ + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + + htim->State = HAL_TIM_STATE_BUSY; + + /* Disable the TIM Peripheral Clock */ + __HAL_TIM_DISABLE(htim); + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + if (htim->PWM_MspDeInitCallback == NULL) + { + htim->PWM_MspDeInitCallback = HAL_TIM_PWM_MspDeInit; + } + /* DeInit the low level hardware */ + htim->PWM_MspDeInitCallback(htim); +#else + /* DeInit the low level hardware: GPIO, CLOCK, NVIC and DMA */ + HAL_TIM_PWM_MspDeInit(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + + /* Change the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_RESET; + + /* Change the TIM channels state */ + TIM_CHANNEL_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_RESET); + TIM_CHANNEL_N_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_RESET); + + /* Change TIM state */ + htim->State = HAL_TIM_STATE_RESET; + + /* Release Lock */ + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Initializes the TIM PWM MSP. + * @param htim TIM PWM handle + * @retval None + */ +__weak void HAL_TIM_PWM_MspInit(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_PWM_MspInit could be implemented in the user file + */ +} + +/** + * @brief DeInitializes TIM PWM MSP. + * @param htim TIM PWM handle + * @retval None + */ +__weak void HAL_TIM_PWM_MspDeInit(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_PWM_MspDeInit could be implemented in the user file + */ +} + +/** + * @brief Starts the PWM signal generation. + * @param htim TIM handle + * @param Channel TIM Channels to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_PWM_Start(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + /* Check the TIM channel state */ + if (TIM_CHANNEL_STATE_GET(htim, Channel) != HAL_TIM_CHANNEL_STATE_READY) + { + return HAL_ERROR; + } + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + + /* Enable the Capture compare channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_ENABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Enable the main output */ + __HAL_TIM_MOE_ENABLE(htim); + } + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the PWM signal generation. + * @param htim TIM PWM handle + * @param Channel TIM Channels to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_PWM_Stop(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + /* Disable the Capture compare channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_DISABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + } + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts the PWM signal generation in interrupt mode. + * @param htim TIM PWM handle + * @param Channel TIM Channel to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_PWM_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + HAL_StatusTypeDef status = HAL_OK; + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + /* Check the TIM channel state */ + if (TIM_CHANNEL_STATE_GET(htim, Channel) != HAL_TIM_CHANNEL_STATE_READY) + { + return HAL_ERROR; + } + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Enable the TIM Capture/Compare 1 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC1); + break; + } + + case TIM_CHANNEL_2: + { + /* Enable the TIM Capture/Compare 2 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC2); + break; + } + + case TIM_CHANNEL_3: + { + /* Enable the TIM Capture/Compare 3 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC3); + break; + } + + case TIM_CHANNEL_4: + { + /* Enable the TIM Capture/Compare 4 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC4); + break; + } + + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Enable the Capture compare channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_ENABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Enable the main output */ + __HAL_TIM_MOE_ENABLE(htim); + } + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } + } + + /* Return function status */ + return status; +} + +/** + * @brief Stops the PWM signal generation in interrupt mode. + * @param htim TIM PWM handle + * @param Channel TIM Channels to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_PWM_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Disable the TIM Capture/Compare 1 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC1); + break; + } + + case TIM_CHANNEL_2: + { + /* Disable the TIM Capture/Compare 2 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC2); + break; + } + + case TIM_CHANNEL_3: + { + /* Disable the TIM Capture/Compare 3 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC3); + break; + } + + case TIM_CHANNEL_4: + { + /* Disable the TIM Capture/Compare 4 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC4); + break; + } + + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Disable the Capture compare channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_DISABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + } + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + } + + /* Return function status */ + return status; +} + +/** + * @brief Starts the TIM PWM signal generation in DMA mode. + * @param htim TIM PWM handle + * @param Channel TIM Channels to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @param pData The source Buffer address. + * @param Length The length of data to be transferred from memory to TIM peripheral + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_PWM_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, const uint32_t *pData, + uint16_t Length) +{ + HAL_StatusTypeDef status = HAL_OK; + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + /* Set the TIM channel state */ + if (TIM_CHANNEL_STATE_GET(htim, Channel) == HAL_TIM_CHANNEL_STATE_BUSY) + { + return HAL_BUSY; + } + else if (TIM_CHANNEL_STATE_GET(htim, Channel) == HAL_TIM_CHANNEL_STATE_READY) + { + if ((pData == NULL) || (Length == 0U)) + { + return HAL_ERROR; + } + else + { + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + } + } + else + { + return HAL_ERROR; + } + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC1]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC1]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC1]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)pData, (uint32_t)&htim->Instance->CCR1, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + + /* Enable the TIM Capture/Compare 1 DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC1); + break; + } + + case TIM_CHANNEL_2: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC2]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC2]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC2]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC2], (uint32_t)pData, (uint32_t)&htim->Instance->CCR2, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + /* Enable the TIM Capture/Compare 2 DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC2); + break; + } + + case TIM_CHANNEL_3: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC3]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC3]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC3]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC3], (uint32_t)pData, (uint32_t)&htim->Instance->CCR3, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + /* Enable the TIM Output Capture/Compare 3 request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC3); + break; + } + + case TIM_CHANNEL_4: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC4]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC4]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC4]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC4], (uint32_t)pData, (uint32_t)&htim->Instance->CCR4, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + /* Enable the TIM Capture/Compare 4 DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC4); + break; + } + + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Enable the Capture compare channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_ENABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Enable the main output */ + __HAL_TIM_MOE_ENABLE(htim); + } + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } + } + + /* Return function status */ + return status; +} + +/** + * @brief Stops the TIM PWM signal generation in DMA mode. + * @param htim TIM PWM handle + * @param Channel TIM Channels to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_PWM_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Disable the TIM Capture/Compare 1 DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC1); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC1]); + break; + } + + case TIM_CHANNEL_2: + { + /* Disable the TIM Capture/Compare 2 DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC2); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC2]); + break; + } + + case TIM_CHANNEL_3: + { + /* Disable the TIM Capture/Compare 3 DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC3); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC3]); + break; + } + + case TIM_CHANNEL_4: + { + /* Disable the TIM Capture/Compare 4 interrupt */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC4); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC4]); + break; + } + + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Disable the Capture compare channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_DISABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + } + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + } + + /* Return function status */ + return status; +} + +/** + * @} + */ + +/** @defgroup TIM_Exported_Functions_Group4 TIM Input Capture functions + * @brief TIM Input Capture functions + * +@verbatim + ============================================================================== + ##### TIM Input Capture functions ##### + ============================================================================== + [..] + This section provides functions allowing to: + (+) Initialize and configure the TIM Input Capture. + (+) De-initialize the TIM Input Capture. + (+) Start the TIM Input Capture. + (+) Stop the TIM Input Capture. + (+) Start the TIM Input Capture and enable interrupt. + (+) Stop the TIM Input Capture and disable interrupt. + (+) Start the TIM Input Capture and enable DMA transfer. + (+) Stop the TIM Input Capture and disable DMA transfer. + +@endverbatim + * @{ + */ +/** + * @brief Initializes the TIM Input Capture Time base according to the specified + * parameters in the TIM_HandleTypeDef and initializes the associated handle. + * @note Switching from Center Aligned counter mode to Edge counter mode (or reverse) + * requires a timer reset to avoid unexpected direction + * due to DIR bit readonly in center aligned mode. + * Ex: call @ref HAL_TIM_IC_DeInit() before HAL_TIM_IC_Init() + * @param htim TIM Input Capture handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_IC_Init(TIM_HandleTypeDef *htim) +{ + /* Check the TIM handle allocation */ + if (htim == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + assert_param(IS_TIM_COUNTER_MODE(htim->Init.CounterMode)); + assert_param(IS_TIM_CLOCKDIVISION_DIV(htim->Init.ClockDivision)); + assert_param(IS_TIM_PERIOD(htim, htim->Init.Period)); + assert_param(IS_TIM_AUTORELOAD_PRELOAD(htim->Init.AutoReloadPreload)); + + if (htim->State == HAL_TIM_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + htim->Lock = HAL_UNLOCKED; + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + /* Reset interrupt callbacks to legacy weak callbacks */ + TIM_ResetCallback(htim); + + if (htim->IC_MspInitCallback == NULL) + { + htim->IC_MspInitCallback = HAL_TIM_IC_MspInit; + } + /* Init the low level hardware : GPIO, CLOCK, NVIC */ + htim->IC_MspInitCallback(htim); +#else + /* Init the low level hardware : GPIO, CLOCK, NVIC and DMA */ + HAL_TIM_IC_MspInit(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + + /* Set the TIM state */ + htim->State = HAL_TIM_STATE_BUSY; + + /* Init the base time for the input capture */ + TIM_Base_SetConfig(htim->Instance, &htim->Init); + + /* Initialize the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_READY; + + /* Initialize the TIM channels state */ + TIM_CHANNEL_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_READY); + + /* Initialize the TIM state*/ + htim->State = HAL_TIM_STATE_READY; + + return HAL_OK; +} + +/** + * @brief DeInitializes the TIM peripheral + * @param htim TIM Input Capture handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_IC_DeInit(TIM_HandleTypeDef *htim) +{ + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + + htim->State = HAL_TIM_STATE_BUSY; + + /* Disable the TIM Peripheral Clock */ + __HAL_TIM_DISABLE(htim); + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + if (htim->IC_MspDeInitCallback == NULL) + { + htim->IC_MspDeInitCallback = HAL_TIM_IC_MspDeInit; + } + /* DeInit the low level hardware */ + htim->IC_MspDeInitCallback(htim); +#else + /* DeInit the low level hardware: GPIO, CLOCK, NVIC and DMA */ + HAL_TIM_IC_MspDeInit(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + + /* Change the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_RESET; + + /* Change the TIM channels state */ + TIM_CHANNEL_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_RESET); + TIM_CHANNEL_N_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_RESET); + + /* Change TIM state */ + htim->State = HAL_TIM_STATE_RESET; + + /* Release Lock */ + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Initializes the TIM Input Capture MSP. + * @param htim TIM Input Capture handle + * @retval None + */ +__weak void HAL_TIM_IC_MspInit(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_IC_MspInit could be implemented in the user file + */ +} + +/** + * @brief DeInitializes TIM Input Capture MSP. + * @param htim TIM handle + * @retval None + */ +__weak void HAL_TIM_IC_MspDeInit(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_IC_MspDeInit could be implemented in the user file + */ +} + +/** + * @brief Starts the TIM Input Capture measurement. + * @param htim TIM Input Capture handle + * @param Channel TIM Channels to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_IC_Start(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + uint32_t tmpsmcr; + HAL_TIM_ChannelStateTypeDef channel_state = TIM_CHANNEL_STATE_GET(htim, Channel); + HAL_TIM_ChannelStateTypeDef complementary_channel_state = TIM_CHANNEL_N_STATE_GET(htim, Channel); + + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + /* Check the TIM channel state */ + if ((channel_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_state != HAL_TIM_CHANNEL_STATE_READY)) + { + return HAL_ERROR; + } + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + + /* Enable the Input Capture channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_ENABLE); + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM Input Capture measurement. + * @param htim TIM Input Capture handle + * @param Channel TIM Channels to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_IC_Stop(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + /* Disable the Input Capture channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_DISABLE); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts the TIM Input Capture measurement in interrupt mode. + * @param htim TIM Input Capture handle + * @param Channel TIM Channels to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_IC_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + HAL_StatusTypeDef status = HAL_OK; + uint32_t tmpsmcr; + + HAL_TIM_ChannelStateTypeDef channel_state = TIM_CHANNEL_STATE_GET(htim, Channel); + HAL_TIM_ChannelStateTypeDef complementary_channel_state = TIM_CHANNEL_N_STATE_GET(htim, Channel); + + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + /* Check the TIM channel state */ + if ((channel_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_state != HAL_TIM_CHANNEL_STATE_READY)) + { + return HAL_ERROR; + } + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Enable the TIM Capture/Compare 1 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC1); + break; + } + + case TIM_CHANNEL_2: + { + /* Enable the TIM Capture/Compare 2 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC2); + break; + } + + case TIM_CHANNEL_3: + { + /* Enable the TIM Capture/Compare 3 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC3); + break; + } + + case TIM_CHANNEL_4: + { + /* Enable the TIM Capture/Compare 4 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC4); + break; + } + + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Enable the Input Capture channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_ENABLE); + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } + } + + /* Return function status */ + return status; +} + +/** + * @brief Stops the TIM Input Capture measurement in interrupt mode. + * @param htim TIM Input Capture handle + * @param Channel TIM Channels to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_IC_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Disable the TIM Capture/Compare 1 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC1); + break; + } + + case TIM_CHANNEL_2: + { + /* Disable the TIM Capture/Compare 2 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC2); + break; + } + + case TIM_CHANNEL_3: + { + /* Disable the TIM Capture/Compare 3 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC3); + break; + } + + case TIM_CHANNEL_4: + { + /* Disable the TIM Capture/Compare 4 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC4); + break; + } + + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Disable the Input Capture channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_DISABLE); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + } + + /* Return function status */ + return status; +} + +/** + * @brief Starts the TIM Input Capture measurement in DMA mode. + * @param htim TIM Input Capture handle + * @param Channel TIM Channels to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @param pData The destination Buffer address. + * @param Length The length of data to be transferred from TIM peripheral to memory. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_IC_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, uint32_t *pData, uint16_t Length) +{ + HAL_StatusTypeDef status = HAL_OK; + uint32_t tmpsmcr; + + HAL_TIM_ChannelStateTypeDef channel_state = TIM_CHANNEL_STATE_GET(htim, Channel); + HAL_TIM_ChannelStateTypeDef complementary_channel_state = TIM_CHANNEL_N_STATE_GET(htim, Channel); + + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + assert_param(IS_TIM_DMA_CC_INSTANCE(htim->Instance)); + + /* Set the TIM channel state */ + if ((channel_state == HAL_TIM_CHANNEL_STATE_BUSY) + || (complementary_channel_state == HAL_TIM_CHANNEL_STATE_BUSY)) + { + return HAL_BUSY; + } + else if ((channel_state == HAL_TIM_CHANNEL_STATE_READY) + && (complementary_channel_state == HAL_TIM_CHANNEL_STATE_READY)) + { + if ((pData == NULL) || (Length == 0U)) + { + return HAL_ERROR; + } + else + { + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + } + } + else + { + return HAL_ERROR; + } + + /* Enable the Input Capture channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_ENABLE); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Set the DMA capture callbacks */ + htim->hdma[TIM_DMA_ID_CC1]->XferCpltCallback = TIM_DMACaptureCplt; + htim->hdma[TIM_DMA_ID_CC1]->XferHalfCpltCallback = TIM_DMACaptureHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC1]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)&htim->Instance->CCR1, (uint32_t)pData, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + /* Enable the TIM Capture/Compare 1 DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC1); + break; + } + + case TIM_CHANNEL_2: + { + /* Set the DMA capture callbacks */ + htim->hdma[TIM_DMA_ID_CC2]->XferCpltCallback = TIM_DMACaptureCplt; + htim->hdma[TIM_DMA_ID_CC2]->XferHalfCpltCallback = TIM_DMACaptureHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC2]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC2], (uint32_t)&htim->Instance->CCR2, (uint32_t)pData, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + /* Enable the TIM Capture/Compare 2 DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC2); + break; + } + + case TIM_CHANNEL_3: + { + /* Set the DMA capture callbacks */ + htim->hdma[TIM_DMA_ID_CC3]->XferCpltCallback = TIM_DMACaptureCplt; + htim->hdma[TIM_DMA_ID_CC3]->XferHalfCpltCallback = TIM_DMACaptureHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC3]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC3], (uint32_t)&htim->Instance->CCR3, (uint32_t)pData, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + /* Enable the TIM Capture/Compare 3 DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC3); + break; + } + + case TIM_CHANNEL_4: + { + /* Set the DMA capture callbacks */ + htim->hdma[TIM_DMA_ID_CC4]->XferCpltCallback = TIM_DMACaptureCplt; + htim->hdma[TIM_DMA_ID_CC4]->XferHalfCpltCallback = TIM_DMACaptureHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC4]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC4], (uint32_t)&htim->Instance->CCR4, (uint32_t)pData, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + /* Enable the TIM Capture/Compare 4 DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC4); + break; + } + + default: + status = HAL_ERROR; + break; + } + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } + + /* Return function status */ + return status; +} + +/** + * @brief Stops the TIM Input Capture measurement in DMA mode. + * @param htim TIM Input Capture handle + * @param Channel TIM Channels to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_IC_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + assert_param(IS_TIM_DMA_CC_INSTANCE(htim->Instance)); + + /* Disable the Input Capture channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_DISABLE); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Disable the TIM Capture/Compare 1 DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC1); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC1]); + break; + } + + case TIM_CHANNEL_2: + { + /* Disable the TIM Capture/Compare 2 DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC2); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC2]); + break; + } + + case TIM_CHANNEL_3: + { + /* Disable the TIM Capture/Compare 3 DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC3); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC3]); + break; + } + + case TIM_CHANNEL_4: + { + /* Disable the TIM Capture/Compare 4 DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC4); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC4]); + break; + } + + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + } + + /* Return function status */ + return status; +} +/** + * @} + */ + +/** @defgroup TIM_Exported_Functions_Group5 TIM One Pulse functions + * @brief TIM One Pulse functions + * +@verbatim + ============================================================================== + ##### TIM One Pulse functions ##### + ============================================================================== + [..] + This section provides functions allowing to: + (+) Initialize and configure the TIM One Pulse. + (+) De-initialize the TIM One Pulse. + (+) Start the TIM One Pulse. + (+) Stop the TIM One Pulse. + (+) Start the TIM One Pulse and enable interrupt. + (+) Stop the TIM One Pulse and disable interrupt. + (+) Start the TIM One Pulse and enable DMA transfer. + (+) Stop the TIM One Pulse and disable DMA transfer. + +@endverbatim + * @{ + */ +/** + * @brief Initializes the TIM One Pulse Time Base according to the specified + * parameters in the TIM_HandleTypeDef and initializes the associated handle. + * @note Switching from Center Aligned counter mode to Edge counter mode (or reverse) + * requires a timer reset to avoid unexpected direction + * due to DIR bit readonly in center aligned mode. + * Ex: call @ref HAL_TIM_OnePulse_DeInit() before HAL_TIM_OnePulse_Init() + * @note When the timer instance is initialized in One Pulse mode, timer + * channels 1 and channel 2 are reserved and cannot be used for other + * purpose. + * @param htim TIM One Pulse handle + * @param OnePulseMode Select the One pulse mode. + * This parameter can be one of the following values: + * @arg TIM_OPMODE_SINGLE: Only one pulse will be generated. + * @arg TIM_OPMODE_REPETITIVE: Repetitive pulses will be generated. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OnePulse_Init(TIM_HandleTypeDef *htim, uint32_t OnePulseMode) +{ + /* Check the TIM handle allocation */ + if (htim == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + assert_param(IS_TIM_COUNTER_MODE(htim->Init.CounterMode)); + assert_param(IS_TIM_CLOCKDIVISION_DIV(htim->Init.ClockDivision)); + assert_param(IS_TIM_OPM_MODE(OnePulseMode)); + assert_param(IS_TIM_PERIOD(htim, htim->Init.Period)); + assert_param(IS_TIM_AUTORELOAD_PRELOAD(htim->Init.AutoReloadPreload)); + + if (htim->State == HAL_TIM_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + htim->Lock = HAL_UNLOCKED; + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + /* Reset interrupt callbacks to legacy weak callbacks */ + TIM_ResetCallback(htim); + + if (htim->OnePulse_MspInitCallback == NULL) + { + htim->OnePulse_MspInitCallback = HAL_TIM_OnePulse_MspInit; + } + /* Init the low level hardware : GPIO, CLOCK, NVIC */ + htim->OnePulse_MspInitCallback(htim); +#else + /* Init the low level hardware : GPIO, CLOCK, NVIC and DMA */ + HAL_TIM_OnePulse_MspInit(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + + /* Set the TIM state */ + htim->State = HAL_TIM_STATE_BUSY; + + /* Configure the Time base in the One Pulse Mode */ + TIM_Base_SetConfig(htim->Instance, &htim->Init); + + /* Reset the OPM Bit */ + htim->Instance->CR1 &= ~TIM_CR1_OPM; + + /* Configure the OPM Mode */ + htim->Instance->CR1 |= OnePulseMode; + + /* Initialize the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_READY; + + /* Initialize the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + + /* Initialize the TIM state*/ + htim->State = HAL_TIM_STATE_READY; + + return HAL_OK; +} + +/** + * @brief DeInitializes the TIM One Pulse + * @param htim TIM One Pulse handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OnePulse_DeInit(TIM_HandleTypeDef *htim) +{ + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + + htim->State = HAL_TIM_STATE_BUSY; + + /* Disable the TIM Peripheral Clock */ + __HAL_TIM_DISABLE(htim); + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + if (htim->OnePulse_MspDeInitCallback == NULL) + { + htim->OnePulse_MspDeInitCallback = HAL_TIM_OnePulse_MspDeInit; + } + /* DeInit the low level hardware */ + htim->OnePulse_MspDeInitCallback(htim); +#else + /* DeInit the low level hardware: GPIO, CLOCK, NVIC */ + HAL_TIM_OnePulse_MspDeInit(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + + /* Change the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_RESET; + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_RESET); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_RESET); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_RESET); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_RESET); + + /* Change TIM state */ + htim->State = HAL_TIM_STATE_RESET; + + /* Release Lock */ + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Initializes the TIM One Pulse MSP. + * @param htim TIM One Pulse handle + * @retval None + */ +__weak void HAL_TIM_OnePulse_MspInit(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_OnePulse_MspInit could be implemented in the user file + */ +} + +/** + * @brief DeInitializes TIM One Pulse MSP. + * @param htim TIM One Pulse handle + * @retval None + */ +__weak void HAL_TIM_OnePulse_MspDeInit(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_OnePulse_MspDeInit could be implemented in the user file + */ +} + +/** + * @brief Starts the TIM One Pulse signal generation. + * @note Though OutputChannel parameter is deprecated and ignored by the function + * it has been kept to avoid HAL_TIM API compatibility break. + * @note The pulse output channel is determined when calling + * @ref HAL_TIM_OnePulse_ConfigChannel(). + * @param htim TIM One Pulse handle + * @param OutputChannel See note above + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OnePulse_Start(TIM_HandleTypeDef *htim, uint32_t OutputChannel) +{ + HAL_TIM_ChannelStateTypeDef channel_1_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef channel_2_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_2); + HAL_TIM_ChannelStateTypeDef complementary_channel_1_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef complementary_channel_2_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_2); + + /* Prevent unused argument(s) compilation warning */ + UNUSED(OutputChannel); + + /* Check the TIM channels state */ + if ((channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (channel_2_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_2_state != HAL_TIM_CHANNEL_STATE_READY)) + { + return HAL_ERROR; + } + + /* Set the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + + /* Enable the Capture compare and the Input Capture channels + (in the OPM Mode the two possible channels that can be used are TIM_CHANNEL_1 and TIM_CHANNEL_2) + if TIM_CHANNEL_1 is used as output, the TIM_CHANNEL_2 will be used as input and + if TIM_CHANNEL_1 is used as input, the TIM_CHANNEL_2 will be used as output + whatever the combination, the TIM_CHANNEL_1 and TIM_CHANNEL_2 should be enabled together + + No need to enable the counter, it's enabled automatically by hardware + (the counter starts in response to a stimulus and generate a pulse */ + + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_ENABLE); + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_ENABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Enable the main output */ + __HAL_TIM_MOE_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM One Pulse signal generation. + * @note Though OutputChannel parameter is deprecated and ignored by the function + * it has been kept to avoid HAL_TIM API compatibility break. + * @note The pulse output channel is determined when calling + * @ref HAL_TIM_OnePulse_ConfigChannel(). + * @param htim TIM One Pulse handle + * @param OutputChannel See note above + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OnePulse_Stop(TIM_HandleTypeDef *htim, uint32_t OutputChannel) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(OutputChannel); + + /* Disable the Capture compare and the Input Capture channels + (in the OPM Mode the two possible channels that can be used are TIM_CHANNEL_1 and TIM_CHANNEL_2) + if TIM_CHANNEL_1 is used as output, the TIM_CHANNEL_2 will be used as input and + if TIM_CHANNEL_1 is used as input, the TIM_CHANNEL_2 will be used as output + whatever the combination, the TIM_CHANNEL_1 and TIM_CHANNEL_2 should be disabled together */ + + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_DISABLE); + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_DISABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + } + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts the TIM One Pulse signal generation in interrupt mode. + * @note Though OutputChannel parameter is deprecated and ignored by the function + * it has been kept to avoid HAL_TIM API compatibility break. + * @note The pulse output channel is determined when calling + * @ref HAL_TIM_OnePulse_ConfigChannel(). + * @param htim TIM One Pulse handle + * @param OutputChannel See note above + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OnePulse_Start_IT(TIM_HandleTypeDef *htim, uint32_t OutputChannel) +{ + HAL_TIM_ChannelStateTypeDef channel_1_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef channel_2_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_2); + HAL_TIM_ChannelStateTypeDef complementary_channel_1_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef complementary_channel_2_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_2); + + /* Prevent unused argument(s) compilation warning */ + UNUSED(OutputChannel); + + /* Check the TIM channels state */ + if ((channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (channel_2_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_2_state != HAL_TIM_CHANNEL_STATE_READY)) + { + return HAL_ERROR; + } + + /* Set the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + + /* Enable the Capture compare and the Input Capture channels + (in the OPM Mode the two possible channels that can be used are TIM_CHANNEL_1 and TIM_CHANNEL_2) + if TIM_CHANNEL_1 is used as output, the TIM_CHANNEL_2 will be used as input and + if TIM_CHANNEL_1 is used as input, the TIM_CHANNEL_2 will be used as output + whatever the combination, the TIM_CHANNEL_1 and TIM_CHANNEL_2 should be enabled together + + No need to enable the counter, it's enabled automatically by hardware + (the counter starts in response to a stimulus and generate a pulse */ + + /* Enable the TIM Capture/Compare 1 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC1); + + /* Enable the TIM Capture/Compare 2 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC2); + + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_ENABLE); + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_ENABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Enable the main output */ + __HAL_TIM_MOE_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM One Pulse signal generation in interrupt mode. + * @note Though OutputChannel parameter is deprecated and ignored by the function + * it has been kept to avoid HAL_TIM API compatibility break. + * @note The pulse output channel is determined when calling + * @ref HAL_TIM_OnePulse_ConfigChannel(). + * @param htim TIM One Pulse handle + * @param OutputChannel See note above + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OnePulse_Stop_IT(TIM_HandleTypeDef *htim, uint32_t OutputChannel) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(OutputChannel); + + /* Disable the TIM Capture/Compare 1 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC1); + + /* Disable the TIM Capture/Compare 2 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC2); + + /* Disable the Capture compare and the Input Capture channels + (in the OPM Mode the two possible channels that can be used are TIM_CHANNEL_1 and TIM_CHANNEL_2) + if TIM_CHANNEL_1 is used as output, the TIM_CHANNEL_2 will be used as input and + if TIM_CHANNEL_1 is used as input, the TIM_CHANNEL_2 will be used as output + whatever the combination, the TIM_CHANNEL_1 and TIM_CHANNEL_2 should be disabled together */ + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_DISABLE); + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_DISABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + } + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + + /* Return function status */ + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup TIM_Exported_Functions_Group6 TIM Encoder functions + * @brief TIM Encoder functions + * +@verbatim + ============================================================================== + ##### TIM Encoder functions ##### + ============================================================================== + [..] + This section provides functions allowing to: + (+) Initialize and configure the TIM Encoder. + (+) De-initialize the TIM Encoder. + (+) Start the TIM Encoder. + (+) Stop the TIM Encoder. + (+) Start the TIM Encoder and enable interrupt. + (+) Stop the TIM Encoder and disable interrupt. + (+) Start the TIM Encoder and enable DMA transfer. + (+) Stop the TIM Encoder and disable DMA transfer. + +@endverbatim + * @{ + */ +/** + * @brief Initializes the TIM Encoder Interface and initialize the associated handle. + * @note Switching from Center Aligned counter mode to Edge counter mode (or reverse) + * requires a timer reset to avoid unexpected direction + * due to DIR bit readonly in center aligned mode. + * Ex: call @ref HAL_TIM_Encoder_DeInit() before HAL_TIM_Encoder_Init() + * @note Encoder mode and External clock mode 2 are not compatible and must not be selected together + * Ex: A call for @ref HAL_TIM_Encoder_Init will erase the settings of @ref HAL_TIM_ConfigClockSource + * using TIM_CLOCKSOURCE_ETRMODE2 and vice versa + * @note When the timer instance is initialized in Encoder mode, timer + * channels 1 and channel 2 are reserved and cannot be used for other + * purpose. + * @param htim TIM Encoder Interface handle + * @param sConfig TIM Encoder Interface configuration structure + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Encoder_Init(TIM_HandleTypeDef *htim, const TIM_Encoder_InitTypeDef *sConfig) +{ + uint32_t tmpsmcr; + uint32_t tmpccmr1; + uint32_t tmpccer; + + /* Check the TIM handle allocation */ + if (htim == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_TIM_ENCODER_INTERFACE_INSTANCE(htim->Instance)); + assert_param(IS_TIM_COUNTER_MODE(htim->Init.CounterMode)); + assert_param(IS_TIM_CLOCKDIVISION_DIV(htim->Init.ClockDivision)); + assert_param(IS_TIM_AUTORELOAD_PRELOAD(htim->Init.AutoReloadPreload)); + assert_param(IS_TIM_ENCODER_MODE(sConfig->EncoderMode)); + assert_param(IS_TIM_IC_SELECTION(sConfig->IC1Selection)); + assert_param(IS_TIM_IC_SELECTION(sConfig->IC2Selection)); + assert_param(IS_TIM_ENCODERINPUT_POLARITY(sConfig->IC1Polarity)); + assert_param(IS_TIM_ENCODERINPUT_POLARITY(sConfig->IC2Polarity)); + assert_param(IS_TIM_IC_PRESCALER(sConfig->IC1Prescaler)); + assert_param(IS_TIM_IC_PRESCALER(sConfig->IC2Prescaler)); + assert_param(IS_TIM_IC_FILTER(sConfig->IC1Filter)); + assert_param(IS_TIM_IC_FILTER(sConfig->IC2Filter)); + assert_param(IS_TIM_PERIOD(htim, htim->Init.Period)); + + if (htim->State == HAL_TIM_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + htim->Lock = HAL_UNLOCKED; + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + /* Reset interrupt callbacks to legacy weak callbacks */ + TIM_ResetCallback(htim); + + if (htim->Encoder_MspInitCallback == NULL) + { + htim->Encoder_MspInitCallback = HAL_TIM_Encoder_MspInit; + } + /* Init the low level hardware : GPIO, CLOCK, NVIC */ + htim->Encoder_MspInitCallback(htim); +#else + /* Init the low level hardware : GPIO, CLOCK, NVIC and DMA */ + HAL_TIM_Encoder_MspInit(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + + /* Set the TIM state */ + htim->State = HAL_TIM_STATE_BUSY; + + /* Reset the SMS and ECE bits */ + htim->Instance->SMCR &= ~(TIM_SMCR_SMS | TIM_SMCR_ECE); + + /* Configure the Time base in the Encoder Mode */ + TIM_Base_SetConfig(htim->Instance, &htim->Init); + + /* Get the TIMx SMCR register value */ + tmpsmcr = htim->Instance->SMCR; + + /* Get the TIMx CCMR1 register value */ + tmpccmr1 = htim->Instance->CCMR1; + + /* Get the TIMx CCER register value */ + tmpccer = htim->Instance->CCER; + + /* Set the encoder Mode */ + tmpsmcr |= sConfig->EncoderMode; + + /* Select the Capture Compare 1 and the Capture Compare 2 as input */ + tmpccmr1 &= ~(TIM_CCMR1_CC1S | TIM_CCMR1_CC2S); + tmpccmr1 |= (sConfig->IC1Selection | (sConfig->IC2Selection << 8U)); + + /* Set the Capture Compare 1 and the Capture Compare 2 prescalers and filters */ + tmpccmr1 &= ~(TIM_CCMR1_IC1PSC | TIM_CCMR1_IC2PSC); + tmpccmr1 &= ~(TIM_CCMR1_IC1F | TIM_CCMR1_IC2F); + tmpccmr1 |= sConfig->IC1Prescaler | (sConfig->IC2Prescaler << 8U); + tmpccmr1 |= (sConfig->IC1Filter << 4U) | (sConfig->IC2Filter << 12U); + + /* Set the TI1 and the TI2 Polarities */ + tmpccer &= ~(TIM_CCER_CC1P | TIM_CCER_CC2P); + tmpccer &= ~(TIM_CCER_CC1NP | TIM_CCER_CC2NP); + tmpccer |= sConfig->IC1Polarity | (sConfig->IC2Polarity << 4U); + + /* Write to TIMx SMCR */ + htim->Instance->SMCR = tmpsmcr; + + /* Write to TIMx CCMR1 */ + htim->Instance->CCMR1 = tmpccmr1; + + /* Write to TIMx CCER */ + htim->Instance->CCER = tmpccer; + + /* Initialize the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_READY; + + /* Set the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + + /* Initialize the TIM state*/ + htim->State = HAL_TIM_STATE_READY; + + return HAL_OK; +} + + +/** + * @brief DeInitializes the TIM Encoder interface + * @param htim TIM Encoder Interface handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Encoder_DeInit(TIM_HandleTypeDef *htim) +{ + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + + htim->State = HAL_TIM_STATE_BUSY; + + /* Disable the TIM Peripheral Clock */ + __HAL_TIM_DISABLE(htim); + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + if (htim->Encoder_MspDeInitCallback == NULL) + { + htim->Encoder_MspDeInitCallback = HAL_TIM_Encoder_MspDeInit; + } + /* DeInit the low level hardware */ + htim->Encoder_MspDeInitCallback(htim); +#else + /* DeInit the low level hardware: GPIO, CLOCK, NVIC */ + HAL_TIM_Encoder_MspDeInit(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + + /* Change the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_RESET; + + /* Set the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_RESET); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_RESET); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_RESET); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_RESET); + + /* Change TIM state */ + htim->State = HAL_TIM_STATE_RESET; + + /* Release Lock */ + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Initializes the TIM Encoder Interface MSP. + * @param htim TIM Encoder Interface handle + * @retval None + */ +__weak void HAL_TIM_Encoder_MspInit(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_Encoder_MspInit could be implemented in the user file + */ +} + +/** + * @brief DeInitializes TIM Encoder Interface MSP. + * @param htim TIM Encoder Interface handle + * @retval None + */ +__weak void HAL_TIM_Encoder_MspDeInit(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_Encoder_MspDeInit could be implemented in the user file + */ +} + +/** + * @brief Starts the TIM Encoder Interface. + * @param htim TIM Encoder Interface handle + * @param Channel TIM Channels to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_ALL: TIM Channel 1 and TIM Channel 2 are selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Encoder_Start(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + HAL_TIM_ChannelStateTypeDef channel_1_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef channel_2_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_2); + HAL_TIM_ChannelStateTypeDef complementary_channel_1_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef complementary_channel_2_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_2); + + /* Check the parameters */ + assert_param(IS_TIM_ENCODER_INTERFACE_INSTANCE(htim->Instance)); + + /* Set the TIM channel(s) state */ + if (Channel == TIM_CHANNEL_1) + { + if ((channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_1_state != HAL_TIM_CHANNEL_STATE_READY)) + { + return HAL_ERROR; + } + else + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + } + } + else if (Channel == TIM_CHANNEL_2) + { + if ((channel_2_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_2_state != HAL_TIM_CHANNEL_STATE_READY)) + { + return HAL_ERROR; + } + else + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + } + } + else + { + if ((channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (channel_2_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_2_state != HAL_TIM_CHANNEL_STATE_READY)) + { + return HAL_ERROR; + } + else + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + } + } + + /* Enable the encoder interface channels */ + switch (Channel) + { + case TIM_CHANNEL_1: + { + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_ENABLE); + break; + } + + case TIM_CHANNEL_2: + { + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_ENABLE); + break; + } + + default : + { + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_ENABLE); + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_ENABLE); + break; + } + } + /* Enable the Peripheral */ + __HAL_TIM_ENABLE(htim); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM Encoder Interface. + * @param htim TIM Encoder Interface handle + * @param Channel TIM Channels to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_ALL: TIM Channel 1 and TIM Channel 2 are selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Encoder_Stop(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + /* Check the parameters */ + assert_param(IS_TIM_ENCODER_INTERFACE_INSTANCE(htim->Instance)); + + /* Disable the Input Capture channels 1 and 2 + (in the EncoderInterface the two possible channels that can be used are TIM_CHANNEL_1 and TIM_CHANNEL_2) */ + switch (Channel) + { + case TIM_CHANNEL_1: + { + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_DISABLE); + break; + } + + case TIM_CHANNEL_2: + { + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_DISABLE); + break; + } + + default : + { + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_DISABLE); + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_DISABLE); + break; + } + } + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM channel(s) state */ + if ((Channel == TIM_CHANNEL_1) || (Channel == TIM_CHANNEL_2)) + { + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + } + else + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts the TIM Encoder Interface in interrupt mode. + * @param htim TIM Encoder Interface handle + * @param Channel TIM Channels to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_ALL: TIM Channel 1 and TIM Channel 2 are selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Encoder_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + HAL_TIM_ChannelStateTypeDef channel_1_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef channel_2_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_2); + HAL_TIM_ChannelStateTypeDef complementary_channel_1_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef complementary_channel_2_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_2); + + /* Check the parameters */ + assert_param(IS_TIM_ENCODER_INTERFACE_INSTANCE(htim->Instance)); + + /* Set the TIM channel(s) state */ + if (Channel == TIM_CHANNEL_1) + { + if ((channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_1_state != HAL_TIM_CHANNEL_STATE_READY)) + { + return HAL_ERROR; + } + else + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + } + } + else if (Channel == TIM_CHANNEL_2) + { + if ((channel_2_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_2_state != HAL_TIM_CHANNEL_STATE_READY)) + { + return HAL_ERROR; + } + else + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + } + } + else + { + if ((channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (channel_2_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_2_state != HAL_TIM_CHANNEL_STATE_READY)) + { + return HAL_ERROR; + } + else + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + } + } + + /* Enable the encoder interface channels */ + /* Enable the capture compare Interrupts 1 and/or 2 */ + switch (Channel) + { + case TIM_CHANNEL_1: + { + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_ENABLE); + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC1); + break; + } + + case TIM_CHANNEL_2: + { + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_ENABLE); + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC2); + break; + } + + default : + { + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_ENABLE); + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_ENABLE); + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC1); + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC2); + break; + } + } + + /* Enable the Peripheral */ + __HAL_TIM_ENABLE(htim); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM Encoder Interface in interrupt mode. + * @param htim TIM Encoder Interface handle + * @param Channel TIM Channels to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_ALL: TIM Channel 1 and TIM Channel 2 are selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Encoder_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + /* Check the parameters */ + assert_param(IS_TIM_ENCODER_INTERFACE_INSTANCE(htim->Instance)); + + /* Disable the Input Capture channels 1 and 2 + (in the EncoderInterface the two possible channels that can be used are TIM_CHANNEL_1 and TIM_CHANNEL_2) */ + if (Channel == TIM_CHANNEL_1) + { + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_DISABLE); + + /* Disable the capture compare Interrupts 1 */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC1); + } + else if (Channel == TIM_CHANNEL_2) + { + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_DISABLE); + + /* Disable the capture compare Interrupts 2 */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC2); + } + else + { + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_DISABLE); + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_DISABLE); + + /* Disable the capture compare Interrupts 1 and 2 */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC1); + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC2); + } + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM channel(s) state */ + if ((Channel == TIM_CHANNEL_1) || (Channel == TIM_CHANNEL_2)) + { + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + } + else + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts the TIM Encoder Interface in DMA mode. + * @param htim TIM Encoder Interface handle + * @param Channel TIM Channels to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_ALL: TIM Channel 1 and TIM Channel 2 are selected + * @param pData1 The destination Buffer address for IC1. + * @param pData2 The destination Buffer address for IC2. + * @param Length The length of data to be transferred from TIM peripheral to memory. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Encoder_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, uint32_t *pData1, + uint32_t *pData2, uint16_t Length) +{ + HAL_TIM_ChannelStateTypeDef channel_1_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef channel_2_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_2); + HAL_TIM_ChannelStateTypeDef complementary_channel_1_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef complementary_channel_2_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_2); + + /* Check the parameters */ + assert_param(IS_TIM_ENCODER_INTERFACE_INSTANCE(htim->Instance)); + + /* Set the TIM channel(s) state */ + if (Channel == TIM_CHANNEL_1) + { + if ((channel_1_state == HAL_TIM_CHANNEL_STATE_BUSY) + || (complementary_channel_1_state == HAL_TIM_CHANNEL_STATE_BUSY)) + { + return HAL_BUSY; + } + else if ((channel_1_state == HAL_TIM_CHANNEL_STATE_READY) + && (complementary_channel_1_state == HAL_TIM_CHANNEL_STATE_READY)) + { + if ((pData1 == NULL) || (Length == 0U)) + { + return HAL_ERROR; + } + else + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + } + } + else + { + return HAL_ERROR; + } + } + else if (Channel == TIM_CHANNEL_2) + { + if ((channel_2_state == HAL_TIM_CHANNEL_STATE_BUSY) + || (complementary_channel_2_state == HAL_TIM_CHANNEL_STATE_BUSY)) + { + return HAL_BUSY; + } + else if ((channel_2_state == HAL_TIM_CHANNEL_STATE_READY) + && (complementary_channel_2_state == HAL_TIM_CHANNEL_STATE_READY)) + { + if ((pData2 == NULL) || (Length == 0U)) + { + return HAL_ERROR; + } + else + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + } + } + else + { + return HAL_ERROR; + } + } + else + { + if ((channel_1_state == HAL_TIM_CHANNEL_STATE_BUSY) + || (channel_2_state == HAL_TIM_CHANNEL_STATE_BUSY) + || (complementary_channel_1_state == HAL_TIM_CHANNEL_STATE_BUSY) + || (complementary_channel_2_state == HAL_TIM_CHANNEL_STATE_BUSY)) + { + return HAL_BUSY; + } + else if ((channel_1_state == HAL_TIM_CHANNEL_STATE_READY) + && (channel_2_state == HAL_TIM_CHANNEL_STATE_READY) + && (complementary_channel_1_state == HAL_TIM_CHANNEL_STATE_READY) + && (complementary_channel_2_state == HAL_TIM_CHANNEL_STATE_READY)) + { + if ((((pData1 == NULL) || (pData2 == NULL))) || (Length == 0U)) + { + return HAL_ERROR; + } + else + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + } + } + else + { + return HAL_ERROR; + } + } + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Set the DMA capture callbacks */ + htim->hdma[TIM_DMA_ID_CC1]->XferCpltCallback = TIM_DMACaptureCplt; + htim->hdma[TIM_DMA_ID_CC1]->XferHalfCpltCallback = TIM_DMACaptureHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC1]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)&htim->Instance->CCR1, (uint32_t)pData1, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + /* Enable the TIM Input Capture DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC1); + + /* Enable the Capture compare channel */ + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_ENABLE); + + /* Enable the Peripheral */ + __HAL_TIM_ENABLE(htim); + + break; + } + + case TIM_CHANNEL_2: + { + /* Set the DMA capture callbacks */ + htim->hdma[TIM_DMA_ID_CC2]->XferCpltCallback = TIM_DMACaptureCplt; + htim->hdma[TIM_DMA_ID_CC2]->XferHalfCpltCallback = TIM_DMACaptureHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC2]->XferErrorCallback = TIM_DMAError; + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC2], (uint32_t)&htim->Instance->CCR2, (uint32_t)pData2, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + /* Enable the TIM Input Capture DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC2); + + /* Enable the Capture compare channel */ + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_ENABLE); + + /* Enable the Peripheral */ + __HAL_TIM_ENABLE(htim); + + break; + } + + default: + { + /* Set the DMA capture callbacks */ + htim->hdma[TIM_DMA_ID_CC1]->XferCpltCallback = TIM_DMACaptureCplt; + htim->hdma[TIM_DMA_ID_CC1]->XferHalfCpltCallback = TIM_DMACaptureHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC1]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)&htim->Instance->CCR1, (uint32_t)pData1, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + + /* Set the DMA capture callbacks */ + htim->hdma[TIM_DMA_ID_CC2]->XferCpltCallback = TIM_DMACaptureCplt; + htim->hdma[TIM_DMA_ID_CC2]->XferHalfCpltCallback = TIM_DMACaptureHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC2]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC2], (uint32_t)&htim->Instance->CCR2, (uint32_t)pData2, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + + /* Enable the TIM Input Capture DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC1); + /* Enable the TIM Input Capture DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC2); + + /* Enable the Capture compare channel */ + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_ENABLE); + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_ENABLE); + + /* Enable the Peripheral */ + __HAL_TIM_ENABLE(htim); + + break; + } + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM Encoder Interface in DMA mode. + * @param htim TIM Encoder Interface handle + * @param Channel TIM Channels to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_ALL: TIM Channel 1 and TIM Channel 2 are selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Encoder_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + /* Check the parameters */ + assert_param(IS_TIM_ENCODER_INTERFACE_INSTANCE(htim->Instance)); + + /* Disable the Input Capture channels 1 and 2 + (in the EncoderInterface the two possible channels that can be used are TIM_CHANNEL_1 and TIM_CHANNEL_2) */ + if (Channel == TIM_CHANNEL_1) + { + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_DISABLE); + + /* Disable the capture compare DMA Request 1 */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC1); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC1]); + } + else if (Channel == TIM_CHANNEL_2) + { + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_DISABLE); + + /* Disable the capture compare DMA Request 2 */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC2); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC2]); + } + else + { + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_DISABLE); + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_DISABLE); + + /* Disable the capture compare DMA Request 1 and 2 */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC1); + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC2); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC1]); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC2]); + } + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM channel(s) state */ + if ((Channel == TIM_CHANNEL_1) || (Channel == TIM_CHANNEL_2)) + { + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + } + else + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @} + */ +/** @defgroup TIM_Exported_Functions_Group7 TIM IRQ handler management + * @brief TIM IRQ handler management + * +@verbatim + ============================================================================== + ##### IRQ handler management ##### + ============================================================================== + [..] + This section provides Timer IRQ handler function. + +@endverbatim + * @{ + */ +/** + * @brief This function handles TIM interrupts requests. + * @param htim TIM handle + * @retval None + */ +void HAL_TIM_IRQHandler(TIM_HandleTypeDef *htim) +{ + uint32_t itsource = htim->Instance->DIER; + uint32_t itflag = htim->Instance->SR; + + /* Capture compare 1 event */ + if ((itflag & (TIM_FLAG_CC1)) == (TIM_FLAG_CC1)) + { + if ((itsource & (TIM_IT_CC1)) == (TIM_IT_CC1)) + { + { + __HAL_TIM_CLEAR_FLAG(htim, TIM_FLAG_CC1); + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_1; + + /* Input capture event */ + if ((htim->Instance->CCMR1 & TIM_CCMR1_CC1S) != 0x00U) + { +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->IC_CaptureCallback(htim); +#else + HAL_TIM_IC_CaptureCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + /* Output compare event */ + else + { +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->OC_DelayElapsedCallback(htim); + htim->PWM_PulseFinishedCallback(htim); +#else + HAL_TIM_OC_DelayElapsedCallback(htim); + HAL_TIM_PWM_PulseFinishedCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_CLEARED; + } + } + } + /* Capture compare 2 event */ + if ((itflag & (TIM_FLAG_CC2)) == (TIM_FLAG_CC2)) + { + if ((itsource & (TIM_IT_CC2)) == (TIM_IT_CC2)) + { + __HAL_TIM_CLEAR_FLAG(htim, TIM_FLAG_CC2); + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_2; + /* Input capture event */ + if ((htim->Instance->CCMR1 & TIM_CCMR1_CC2S) != 0x00U) + { +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->IC_CaptureCallback(htim); +#else + HAL_TIM_IC_CaptureCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + /* Output compare event */ + else + { +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->OC_DelayElapsedCallback(htim); + htim->PWM_PulseFinishedCallback(htim); +#else + HAL_TIM_OC_DelayElapsedCallback(htim); + HAL_TIM_PWM_PulseFinishedCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_CLEARED; + } + } + /* Capture compare 3 event */ + if ((itflag & (TIM_FLAG_CC3)) == (TIM_FLAG_CC3)) + { + if ((itsource & (TIM_IT_CC3)) == (TIM_IT_CC3)) + { + __HAL_TIM_CLEAR_FLAG(htim, TIM_FLAG_CC3); + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_3; + /* Input capture event */ + if ((htim->Instance->CCMR2 & TIM_CCMR2_CC3S) != 0x00U) + { +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->IC_CaptureCallback(htim); +#else + HAL_TIM_IC_CaptureCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + /* Output compare event */ + else + { +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->OC_DelayElapsedCallback(htim); + htim->PWM_PulseFinishedCallback(htim); +#else + HAL_TIM_OC_DelayElapsedCallback(htim); + HAL_TIM_PWM_PulseFinishedCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_CLEARED; + } + } + /* Capture compare 4 event */ + if ((itflag & (TIM_FLAG_CC4)) == (TIM_FLAG_CC4)) + { + if ((itsource & (TIM_IT_CC4)) == (TIM_IT_CC4)) + { + __HAL_TIM_CLEAR_FLAG(htim, TIM_FLAG_CC4); + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_4; + /* Input capture event */ + if ((htim->Instance->CCMR2 & TIM_CCMR2_CC4S) != 0x00U) + { +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->IC_CaptureCallback(htim); +#else + HAL_TIM_IC_CaptureCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + /* Output compare event */ + else + { +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->OC_DelayElapsedCallback(htim); + htim->PWM_PulseFinishedCallback(htim); +#else + HAL_TIM_OC_DelayElapsedCallback(htim); + HAL_TIM_PWM_PulseFinishedCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_CLEARED; + } + } + /* TIM Update event */ + if ((itflag & (TIM_FLAG_UPDATE)) == (TIM_FLAG_UPDATE)) + { + if ((itsource & (TIM_IT_UPDATE)) == (TIM_IT_UPDATE)) + { + __HAL_TIM_CLEAR_FLAG(htim, TIM_FLAG_UPDATE); +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->PeriodElapsedCallback(htim); +#else + HAL_TIM_PeriodElapsedCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + } + /* TIM Break input event */ + if ((itflag & (TIM_FLAG_BREAK)) == (TIM_FLAG_BREAK)) + { + if ((itsource & (TIM_IT_BREAK)) == (TIM_IT_BREAK)) + { + __HAL_TIM_CLEAR_FLAG(htim, TIM_FLAG_BREAK); +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->BreakCallback(htim); +#else + HAL_TIMEx_BreakCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + } + /* TIM Trigger detection event */ + if ((itflag & (TIM_FLAG_TRIGGER)) == (TIM_FLAG_TRIGGER)) + { + if ((itsource & (TIM_IT_TRIGGER)) == (TIM_IT_TRIGGER)) + { + __HAL_TIM_CLEAR_FLAG(htim, TIM_FLAG_TRIGGER); +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->TriggerCallback(htim); +#else + HAL_TIM_TriggerCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + } + /* TIM commutation event */ + if ((itflag & (TIM_FLAG_COM)) == (TIM_FLAG_COM)) + { + if ((itsource & (TIM_IT_COM)) == (TIM_IT_COM)) + { + __HAL_TIM_CLEAR_FLAG(htim, TIM_FLAG_COM); +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->CommutationCallback(htim); +#else + HAL_TIMEx_CommutCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + } +} + +/** + * @} + */ + +/** @defgroup TIM_Exported_Functions_Group8 TIM Peripheral Control functions + * @brief TIM Peripheral Control functions + * +@verbatim + ============================================================================== + ##### Peripheral Control functions ##### + ============================================================================== + [..] + This section provides functions allowing to: + (+) Configure The Input Output channels for OC, PWM, IC or One Pulse mode. + (+) Configure External Clock source. + (+) Configure Complementary channels, break features and dead time. + (+) Configure Master and the Slave synchronization. + (+) Configure the DMA Burst Mode. + +@endverbatim + * @{ + */ + +/** + * @brief Initializes the TIM Output Compare Channels according to the specified + * parameters in the TIM_OC_InitTypeDef. + * @param htim TIM Output Compare handle + * @param sConfig TIM Output Compare configuration structure + * @param Channel TIM Channels to configure + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OC_ConfigChannel(TIM_HandleTypeDef *htim, + const TIM_OC_InitTypeDef *sConfig, + uint32_t Channel) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_TIM_CHANNELS(Channel)); + assert_param(IS_TIM_OC_MODE(sConfig->OCMode)); + assert_param(IS_TIM_OC_POLARITY(sConfig->OCPolarity)); + + /* Process Locked */ + __HAL_LOCK(htim); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Check the parameters */ + assert_param(IS_TIM_CC1_INSTANCE(htim->Instance)); + + /* Configure the TIM Channel 1 in Output Compare */ + TIM_OC1_SetConfig(htim->Instance, sConfig); + break; + } + + case TIM_CHANNEL_2: + { + /* Check the parameters */ + assert_param(IS_TIM_CC2_INSTANCE(htim->Instance)); + + /* Configure the TIM Channel 2 in Output Compare */ + TIM_OC2_SetConfig(htim->Instance, sConfig); + break; + } + + case TIM_CHANNEL_3: + { + /* Check the parameters */ + assert_param(IS_TIM_CC3_INSTANCE(htim->Instance)); + + /* Configure the TIM Channel 3 in Output Compare */ + TIM_OC3_SetConfig(htim->Instance, sConfig); + break; + } + + case TIM_CHANNEL_4: + { + /* Check the parameters */ + assert_param(IS_TIM_CC4_INSTANCE(htim->Instance)); + + /* Configure the TIM Channel 4 in Output Compare */ + TIM_OC4_SetConfig(htim->Instance, sConfig); + break; + } + + default: + status = HAL_ERROR; + break; + } + + __HAL_UNLOCK(htim); + + return status; +} + +/** + * @brief Initializes the TIM Input Capture Channels according to the specified + * parameters in the TIM_IC_InitTypeDef. + * @param htim TIM IC handle + * @param sConfig TIM Input Capture configuration structure + * @param Channel TIM Channel to configure + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_IC_ConfigChannel(TIM_HandleTypeDef *htim, const TIM_IC_InitTypeDef *sConfig, uint32_t Channel) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_TIM_CC1_INSTANCE(htim->Instance)); + assert_param(IS_TIM_IC_POLARITY(sConfig->ICPolarity)); + assert_param(IS_TIM_IC_SELECTION(sConfig->ICSelection)); + assert_param(IS_TIM_IC_PRESCALER(sConfig->ICPrescaler)); + assert_param(IS_TIM_IC_FILTER(sConfig->ICFilter)); + + /* Process Locked */ + __HAL_LOCK(htim); + + if (Channel == TIM_CHANNEL_1) + { + /* TI1 Configuration */ + TIM_TI1_SetConfig(htim->Instance, + sConfig->ICPolarity, + sConfig->ICSelection, + sConfig->ICFilter); + + /* Reset the IC1PSC Bits */ + htim->Instance->CCMR1 &= ~TIM_CCMR1_IC1PSC; + + /* Set the IC1PSC value */ + htim->Instance->CCMR1 |= sConfig->ICPrescaler; + } + else if (Channel == TIM_CHANNEL_2) + { + /* TI2 Configuration */ + assert_param(IS_TIM_CC2_INSTANCE(htim->Instance)); + + TIM_TI2_SetConfig(htim->Instance, + sConfig->ICPolarity, + sConfig->ICSelection, + sConfig->ICFilter); + + /* Reset the IC2PSC Bits */ + htim->Instance->CCMR1 &= ~TIM_CCMR1_IC2PSC; + + /* Set the IC2PSC value */ + htim->Instance->CCMR1 |= (sConfig->ICPrescaler << 8U); + } + else if (Channel == TIM_CHANNEL_3) + { + /* TI3 Configuration */ + assert_param(IS_TIM_CC3_INSTANCE(htim->Instance)); + + TIM_TI3_SetConfig(htim->Instance, + sConfig->ICPolarity, + sConfig->ICSelection, + sConfig->ICFilter); + + /* Reset the IC3PSC Bits */ + htim->Instance->CCMR2 &= ~TIM_CCMR2_IC3PSC; + + /* Set the IC3PSC value */ + htim->Instance->CCMR2 |= sConfig->ICPrescaler; + } + else if (Channel == TIM_CHANNEL_4) + { + /* TI4 Configuration */ + assert_param(IS_TIM_CC4_INSTANCE(htim->Instance)); + + TIM_TI4_SetConfig(htim->Instance, + sConfig->ICPolarity, + sConfig->ICSelection, + sConfig->ICFilter); + + /* Reset the IC4PSC Bits */ + htim->Instance->CCMR2 &= ~TIM_CCMR2_IC4PSC; + + /* Set the IC4PSC value */ + htim->Instance->CCMR2 |= (sConfig->ICPrescaler << 8U); + } + else + { + status = HAL_ERROR; + } + + __HAL_UNLOCK(htim); + + return status; +} + +/** + * @brief Initializes the TIM PWM channels according to the specified + * parameters in the TIM_OC_InitTypeDef. + * @param htim TIM PWM handle + * @param sConfig TIM PWM configuration structure + * @param Channel TIM Channels to be configured + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_PWM_ConfigChannel(TIM_HandleTypeDef *htim, + const TIM_OC_InitTypeDef *sConfig, + uint32_t Channel) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_TIM_CHANNELS(Channel)); + assert_param(IS_TIM_PWM_MODE(sConfig->OCMode)); + assert_param(IS_TIM_OC_POLARITY(sConfig->OCPolarity)); + assert_param(IS_TIM_FAST_STATE(sConfig->OCFastMode)); + + /* Process Locked */ + __HAL_LOCK(htim); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Check the parameters */ + assert_param(IS_TIM_CC1_INSTANCE(htim->Instance)); + + /* Configure the Channel 1 in PWM mode */ + TIM_OC1_SetConfig(htim->Instance, sConfig); + + /* Set the Preload enable bit for channel1 */ + htim->Instance->CCMR1 |= TIM_CCMR1_OC1PE; + + /* Configure the Output Fast mode */ + htim->Instance->CCMR1 &= ~TIM_CCMR1_OC1FE; + htim->Instance->CCMR1 |= sConfig->OCFastMode; + break; + } + + case TIM_CHANNEL_2: + { + /* Check the parameters */ + assert_param(IS_TIM_CC2_INSTANCE(htim->Instance)); + + /* Configure the Channel 2 in PWM mode */ + TIM_OC2_SetConfig(htim->Instance, sConfig); + + /* Set the Preload enable bit for channel2 */ + htim->Instance->CCMR1 |= TIM_CCMR1_OC2PE; + + /* Configure the Output Fast mode */ + htim->Instance->CCMR1 &= ~TIM_CCMR1_OC2FE; + htim->Instance->CCMR1 |= sConfig->OCFastMode << 8U; + break; + } + + case TIM_CHANNEL_3: + { + /* Check the parameters */ + assert_param(IS_TIM_CC3_INSTANCE(htim->Instance)); + + /* Configure the Channel 3 in PWM mode */ + TIM_OC3_SetConfig(htim->Instance, sConfig); + + /* Set the Preload enable bit for channel3 */ + htim->Instance->CCMR2 |= TIM_CCMR2_OC3PE; + + /* Configure the Output Fast mode */ + htim->Instance->CCMR2 &= ~TIM_CCMR2_OC3FE; + htim->Instance->CCMR2 |= sConfig->OCFastMode; + break; + } + + case TIM_CHANNEL_4: + { + /* Check the parameters */ + assert_param(IS_TIM_CC4_INSTANCE(htim->Instance)); + + /* Configure the Channel 4 in PWM mode */ + TIM_OC4_SetConfig(htim->Instance, sConfig); + + /* Set the Preload enable bit for channel4 */ + htim->Instance->CCMR2 |= TIM_CCMR2_OC4PE; + + /* Configure the Output Fast mode */ + htim->Instance->CCMR2 &= ~TIM_CCMR2_OC4FE; + htim->Instance->CCMR2 |= sConfig->OCFastMode << 8U; + break; + } + + default: + status = HAL_ERROR; + break; + } + + __HAL_UNLOCK(htim); + + return status; +} + +/** + * @brief Initializes the TIM One Pulse Channels according to the specified + * parameters in the TIM_OnePulse_InitTypeDef. + * @param htim TIM One Pulse handle + * @param sConfig TIM One Pulse configuration structure + * @param OutputChannel TIM output channel to configure + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @param InputChannel TIM input Channel to configure + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @note To output a waveform with a minimum delay user can enable the fast + * mode by calling the @ref __HAL_TIM_ENABLE_OCxFAST macro. Then CCx + * output is forced in response to the edge detection on TIx input, + * without taking in account the comparison. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OnePulse_ConfigChannel(TIM_HandleTypeDef *htim, TIM_OnePulse_InitTypeDef *sConfig, + uint32_t OutputChannel, uint32_t InputChannel) +{ + HAL_StatusTypeDef status = HAL_OK; + TIM_OC_InitTypeDef temp1; + + /* Check the parameters */ + assert_param(IS_TIM_OPM_CHANNELS(OutputChannel)); + assert_param(IS_TIM_OPM_CHANNELS(InputChannel)); + + if (OutputChannel != InputChannel) + { + /* Process Locked */ + __HAL_LOCK(htim); + + htim->State = HAL_TIM_STATE_BUSY; + + /* Extract the Output compare configuration from sConfig structure */ + temp1.OCMode = sConfig->OCMode; + temp1.Pulse = sConfig->Pulse; + temp1.OCPolarity = sConfig->OCPolarity; + temp1.OCNPolarity = sConfig->OCNPolarity; + temp1.OCIdleState = sConfig->OCIdleState; + temp1.OCNIdleState = sConfig->OCNIdleState; + + switch (OutputChannel) + { + case TIM_CHANNEL_1: + { + assert_param(IS_TIM_CC1_INSTANCE(htim->Instance)); + + TIM_OC1_SetConfig(htim->Instance, &temp1); + break; + } + + case TIM_CHANNEL_2: + { + assert_param(IS_TIM_CC2_INSTANCE(htim->Instance)); + + TIM_OC2_SetConfig(htim->Instance, &temp1); + break; + } + + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + switch (InputChannel) + { + case TIM_CHANNEL_1: + { + assert_param(IS_TIM_CC1_INSTANCE(htim->Instance)); + + TIM_TI1_SetConfig(htim->Instance, sConfig->ICPolarity, + sConfig->ICSelection, sConfig->ICFilter); + + /* Reset the IC1PSC Bits */ + htim->Instance->CCMR1 &= ~TIM_CCMR1_IC1PSC; + + /* Select the Trigger source */ + htim->Instance->SMCR &= ~TIM_SMCR_TS; + htim->Instance->SMCR |= TIM_TS_TI1FP1; + + /* Select the Slave Mode */ + htim->Instance->SMCR &= ~TIM_SMCR_SMS; + htim->Instance->SMCR |= TIM_SLAVEMODE_TRIGGER; + break; + } + + case TIM_CHANNEL_2: + { + assert_param(IS_TIM_CC2_INSTANCE(htim->Instance)); + + TIM_TI2_SetConfig(htim->Instance, sConfig->ICPolarity, + sConfig->ICSelection, sConfig->ICFilter); + + /* Reset the IC2PSC Bits */ + htim->Instance->CCMR1 &= ~TIM_CCMR1_IC2PSC; + + /* Select the Trigger source */ + htim->Instance->SMCR &= ~TIM_SMCR_TS; + htim->Instance->SMCR |= TIM_TS_TI2FP2; + + /* Select the Slave Mode */ + htim->Instance->SMCR &= ~TIM_SMCR_SMS; + htim->Instance->SMCR |= TIM_SLAVEMODE_TRIGGER; + break; + } + + default: + status = HAL_ERROR; + break; + } + } + + htim->State = HAL_TIM_STATE_READY; + + __HAL_UNLOCK(htim); + + return status; + } + else + { + return HAL_ERROR; + } +} + +/** + * @brief Configure the DMA Burst to transfer Data from the memory to the TIM peripheral + * @param htim TIM handle + * @param BurstBaseAddress TIM Base address from where the DMA will start the Data write + * This parameter can be one of the following values: + * @arg TIM_DMABASE_CR1 + * @arg TIM_DMABASE_CR2 + * @arg TIM_DMABASE_SMCR + * @arg TIM_DMABASE_DIER + * @arg TIM_DMABASE_SR + * @arg TIM_DMABASE_EGR + * @arg TIM_DMABASE_CCMR1 + * @arg TIM_DMABASE_CCMR2 + * @arg TIM_DMABASE_CCER + * @arg TIM_DMABASE_CNT + * @arg TIM_DMABASE_PSC + * @arg TIM_DMABASE_ARR + * @arg TIM_DMABASE_RCR + * @arg TIM_DMABASE_CCR1 + * @arg TIM_DMABASE_CCR2 + * @arg TIM_DMABASE_CCR3 + * @arg TIM_DMABASE_CCR4 + * @arg TIM_DMABASE_BDTR + * @param BurstRequestSrc TIM DMA Request sources + * This parameter can be one of the following values: + * @arg TIM_DMA_UPDATE: TIM update Interrupt source + * @arg TIM_DMA_CC1: TIM Capture Compare 1 DMA source + * @arg TIM_DMA_CC2: TIM Capture Compare 2 DMA source + * @arg TIM_DMA_CC3: TIM Capture Compare 3 DMA source + * @arg TIM_DMA_CC4: TIM Capture Compare 4 DMA source + * @arg TIM_DMA_COM: TIM Commutation DMA source + * @arg TIM_DMA_TRIGGER: TIM Trigger DMA source + * @param BurstBuffer The Buffer address. + * @param BurstLength DMA Burst length. This parameter can be one value + * between: TIM_DMABURSTLENGTH_1TRANSFER and TIM_DMABURSTLENGTH_18TRANSFERS. + * @note This function should be used only when BurstLength is equal to DMA data transfer length. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_DMABurst_WriteStart(TIM_HandleTypeDef *htim, uint32_t BurstBaseAddress, + uint32_t BurstRequestSrc, const uint32_t *BurstBuffer, + uint32_t BurstLength) +{ + HAL_StatusTypeDef status; + + status = HAL_TIM_DMABurst_MultiWriteStart(htim, BurstBaseAddress, BurstRequestSrc, BurstBuffer, BurstLength, + ((BurstLength) >> 8U) + 1U); + + + + return status; +} + +/** + * @brief Configure the DMA Burst to transfer multiple Data from the memory to the TIM peripheral + * @param htim TIM handle + * @param BurstBaseAddress TIM Base address from where the DMA will start the Data write + * This parameter can be one of the following values: + * @arg TIM_DMABASE_CR1 + * @arg TIM_DMABASE_CR2 + * @arg TIM_DMABASE_SMCR + * @arg TIM_DMABASE_DIER + * @arg TIM_DMABASE_SR + * @arg TIM_DMABASE_EGR + * @arg TIM_DMABASE_CCMR1 + * @arg TIM_DMABASE_CCMR2 + * @arg TIM_DMABASE_CCER + * @arg TIM_DMABASE_CNT + * @arg TIM_DMABASE_PSC + * @arg TIM_DMABASE_ARR + * @arg TIM_DMABASE_RCR + * @arg TIM_DMABASE_CCR1 + * @arg TIM_DMABASE_CCR2 + * @arg TIM_DMABASE_CCR3 + * @arg TIM_DMABASE_CCR4 + * @arg TIM_DMABASE_BDTR + * @param BurstRequestSrc TIM DMA Request sources + * This parameter can be one of the following values: + * @arg TIM_DMA_UPDATE: TIM update Interrupt source + * @arg TIM_DMA_CC1: TIM Capture Compare 1 DMA source + * @arg TIM_DMA_CC2: TIM Capture Compare 2 DMA source + * @arg TIM_DMA_CC3: TIM Capture Compare 3 DMA source + * @arg TIM_DMA_CC4: TIM Capture Compare 4 DMA source + * @arg TIM_DMA_COM: TIM Commutation DMA source + * @arg TIM_DMA_TRIGGER: TIM Trigger DMA source + * @param BurstBuffer The Buffer address. + * @param BurstLength DMA Burst length. This parameter can be one value + * between: TIM_DMABURSTLENGTH_1TRANSFER and TIM_DMABURSTLENGTH_18TRANSFERS. + * @param DataLength Data length. This parameter can be one value + * between 1 and 0xFFFF. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_DMABurst_MultiWriteStart(TIM_HandleTypeDef *htim, uint32_t BurstBaseAddress, + uint32_t BurstRequestSrc, const uint32_t *BurstBuffer, + uint32_t BurstLength, uint32_t DataLength) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_TIM_DMABURST_INSTANCE(htim->Instance)); + assert_param(IS_TIM_DMA_BASE(BurstBaseAddress)); + assert_param(IS_TIM_DMA_SOURCE(BurstRequestSrc)); + assert_param(IS_TIM_DMA_LENGTH(BurstLength)); + assert_param(IS_TIM_DMA_DATA_LENGTH(DataLength)); + + if (htim->DMABurstState == HAL_DMA_BURST_STATE_BUSY) + { + return HAL_BUSY; + } + else if (htim->DMABurstState == HAL_DMA_BURST_STATE_READY) + { + if ((BurstBuffer == NULL) && (BurstLength > 0U)) + { + return HAL_ERROR; + } + else + { + htim->DMABurstState = HAL_DMA_BURST_STATE_BUSY; + } + } + else + { + /* nothing to do */ + } + + switch (BurstRequestSrc) + { + case TIM_DMA_UPDATE: + { + /* Set the DMA Period elapsed callbacks */ + htim->hdma[TIM_DMA_ID_UPDATE]->XferCpltCallback = TIM_DMAPeriodElapsedCplt; + htim->hdma[TIM_DMA_ID_UPDATE]->XferHalfCpltCallback = TIM_DMAPeriodElapsedHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_UPDATE]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_UPDATE], (uint32_t)BurstBuffer, + (uint32_t)&htim->Instance->DMAR, DataLength) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + break; + } + case TIM_DMA_CC1: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC1]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC1]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC1]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)BurstBuffer, + (uint32_t)&htim->Instance->DMAR, DataLength) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + break; + } + case TIM_DMA_CC2: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC2]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC2]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC2]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC2], (uint32_t)BurstBuffer, + (uint32_t)&htim->Instance->DMAR, DataLength) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + break; + } + case TIM_DMA_CC3: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC3]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC3]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC3]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC3], (uint32_t)BurstBuffer, + (uint32_t)&htim->Instance->DMAR, DataLength) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + break; + } + case TIM_DMA_CC4: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC4]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC4]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC4]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC4], (uint32_t)BurstBuffer, + (uint32_t)&htim->Instance->DMAR, DataLength) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + break; + } + case TIM_DMA_COM: + { + /* Set the DMA commutation callbacks */ + htim->hdma[TIM_DMA_ID_COMMUTATION]->XferCpltCallback = TIMEx_DMACommutationCplt; + htim->hdma[TIM_DMA_ID_COMMUTATION]->XferHalfCpltCallback = TIMEx_DMACommutationHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_COMMUTATION]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_COMMUTATION], (uint32_t)BurstBuffer, + (uint32_t)&htim->Instance->DMAR, DataLength) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + break; + } + case TIM_DMA_TRIGGER: + { + /* Set the DMA trigger callbacks */ + htim->hdma[TIM_DMA_ID_TRIGGER]->XferCpltCallback = TIM_DMATriggerCplt; + htim->hdma[TIM_DMA_ID_TRIGGER]->XferHalfCpltCallback = TIM_DMATriggerHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_TRIGGER]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_TRIGGER], (uint32_t)BurstBuffer, + (uint32_t)&htim->Instance->DMAR, DataLength) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + break; + } + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Configure the DMA Burst Mode */ + htim->Instance->DCR = (BurstBaseAddress | BurstLength); + /* Enable the TIM DMA Request */ + __HAL_TIM_ENABLE_DMA(htim, BurstRequestSrc); + } + + /* Return function status */ + return status; +} + +/** + * @brief Stops the TIM DMA Burst mode + * @param htim TIM handle + * @param BurstRequestSrc TIM DMA Request sources to disable + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_DMABurst_WriteStop(TIM_HandleTypeDef *htim, uint32_t BurstRequestSrc) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_TIM_DMA_SOURCE(BurstRequestSrc)); + + /* Abort the DMA transfer (at least disable the DMA stream) */ + switch (BurstRequestSrc) + { + case TIM_DMA_UPDATE: + { + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_UPDATE]); + break; + } + case TIM_DMA_CC1: + { + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC1]); + break; + } + case TIM_DMA_CC2: + { + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC2]); + break; + } + case TIM_DMA_CC3: + { + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC3]); + break; + } + case TIM_DMA_CC4: + { + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC4]); + break; + } + case TIM_DMA_COM: + { + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_COMMUTATION]); + break; + } + case TIM_DMA_TRIGGER: + { + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_TRIGGER]); + break; + } + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Disable the TIM Update DMA request */ + __HAL_TIM_DISABLE_DMA(htim, BurstRequestSrc); + + /* Change the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_READY; + } + + /* Return function status */ + return status; +} + +/** + * @brief Configure the DMA Burst to transfer Data from the TIM peripheral to the memory + * @param htim TIM handle + * @param BurstBaseAddress TIM Base address from where the DMA will start the Data read + * This parameter can be one of the following values: + * @arg TIM_DMABASE_CR1 + * @arg TIM_DMABASE_CR2 + * @arg TIM_DMABASE_SMCR + * @arg TIM_DMABASE_DIER + * @arg TIM_DMABASE_SR + * @arg TIM_DMABASE_EGR + * @arg TIM_DMABASE_CCMR1 + * @arg TIM_DMABASE_CCMR2 + * @arg TIM_DMABASE_CCER + * @arg TIM_DMABASE_CNT + * @arg TIM_DMABASE_PSC + * @arg TIM_DMABASE_ARR + * @arg TIM_DMABASE_RCR + * @arg TIM_DMABASE_CCR1 + * @arg TIM_DMABASE_CCR2 + * @arg TIM_DMABASE_CCR3 + * @arg TIM_DMABASE_CCR4 + * @arg TIM_DMABASE_BDTR + * @param BurstRequestSrc TIM DMA Request sources + * This parameter can be one of the following values: + * @arg TIM_DMA_UPDATE: TIM update Interrupt source + * @arg TIM_DMA_CC1: TIM Capture Compare 1 DMA source + * @arg TIM_DMA_CC2: TIM Capture Compare 2 DMA source + * @arg TIM_DMA_CC3: TIM Capture Compare 3 DMA source + * @arg TIM_DMA_CC4: TIM Capture Compare 4 DMA source + * @arg TIM_DMA_COM: TIM Commutation DMA source + * @arg TIM_DMA_TRIGGER: TIM Trigger DMA source + * @param BurstBuffer The Buffer address. + * @param BurstLength DMA Burst length. This parameter can be one value + * between: TIM_DMABURSTLENGTH_1TRANSFER and TIM_DMABURSTLENGTH_18TRANSFERS. + * @note This function should be used only when BurstLength is equal to DMA data transfer length. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_DMABurst_ReadStart(TIM_HandleTypeDef *htim, uint32_t BurstBaseAddress, + uint32_t BurstRequestSrc, uint32_t *BurstBuffer, uint32_t BurstLength) +{ + HAL_StatusTypeDef status; + + status = HAL_TIM_DMABurst_MultiReadStart(htim, BurstBaseAddress, BurstRequestSrc, BurstBuffer, BurstLength, + ((BurstLength) >> 8U) + 1U); + + + return status; +} + +/** + * @brief Configure the DMA Burst to transfer Data from the TIM peripheral to the memory + * @param htim TIM handle + * @param BurstBaseAddress TIM Base address from where the DMA will start the Data read + * This parameter can be one of the following values: + * @arg TIM_DMABASE_CR1 + * @arg TIM_DMABASE_CR2 + * @arg TIM_DMABASE_SMCR + * @arg TIM_DMABASE_DIER + * @arg TIM_DMABASE_SR + * @arg TIM_DMABASE_EGR + * @arg TIM_DMABASE_CCMR1 + * @arg TIM_DMABASE_CCMR2 + * @arg TIM_DMABASE_CCER + * @arg TIM_DMABASE_CNT + * @arg TIM_DMABASE_PSC + * @arg TIM_DMABASE_ARR + * @arg TIM_DMABASE_RCR + * @arg TIM_DMABASE_CCR1 + * @arg TIM_DMABASE_CCR2 + * @arg TIM_DMABASE_CCR3 + * @arg TIM_DMABASE_CCR4 + * @arg TIM_DMABASE_BDTR + * @param BurstRequestSrc TIM DMA Request sources + * This parameter can be one of the following values: + * @arg TIM_DMA_UPDATE: TIM update Interrupt source + * @arg TIM_DMA_CC1: TIM Capture Compare 1 DMA source + * @arg TIM_DMA_CC2: TIM Capture Compare 2 DMA source + * @arg TIM_DMA_CC3: TIM Capture Compare 3 DMA source + * @arg TIM_DMA_CC4: TIM Capture Compare 4 DMA source + * @arg TIM_DMA_COM: TIM Commutation DMA source + * @arg TIM_DMA_TRIGGER: TIM Trigger DMA source + * @param BurstBuffer The Buffer address. + * @param BurstLength DMA Burst length. This parameter can be one value + * between: TIM_DMABURSTLENGTH_1TRANSFER and TIM_DMABURSTLENGTH_18TRANSFERS. + * @param DataLength Data length. This parameter can be one value + * between 1 and 0xFFFF. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_DMABurst_MultiReadStart(TIM_HandleTypeDef *htim, uint32_t BurstBaseAddress, + uint32_t BurstRequestSrc, uint32_t *BurstBuffer, + uint32_t BurstLength, uint32_t DataLength) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_TIM_DMABURST_INSTANCE(htim->Instance)); + assert_param(IS_TIM_DMA_BASE(BurstBaseAddress)); + assert_param(IS_TIM_DMA_SOURCE(BurstRequestSrc)); + assert_param(IS_TIM_DMA_LENGTH(BurstLength)); + assert_param(IS_TIM_DMA_DATA_LENGTH(DataLength)); + + if (htim->DMABurstState == HAL_DMA_BURST_STATE_BUSY) + { + return HAL_BUSY; + } + else if (htim->DMABurstState == HAL_DMA_BURST_STATE_READY) + { + if ((BurstBuffer == NULL) && (BurstLength > 0U)) + { + return HAL_ERROR; + } + else + { + htim->DMABurstState = HAL_DMA_BURST_STATE_BUSY; + } + } + else + { + /* nothing to do */ + } + switch (BurstRequestSrc) + { + case TIM_DMA_UPDATE: + { + /* Set the DMA Period elapsed callbacks */ + htim->hdma[TIM_DMA_ID_UPDATE]->XferCpltCallback = TIM_DMAPeriodElapsedCplt; + htim->hdma[TIM_DMA_ID_UPDATE]->XferHalfCpltCallback = TIM_DMAPeriodElapsedHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_UPDATE]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_UPDATE], (uint32_t)&htim->Instance->DMAR, (uint32_t)BurstBuffer, + DataLength) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + break; + } + case TIM_DMA_CC1: + { + /* Set the DMA capture callbacks */ + htim->hdma[TIM_DMA_ID_CC1]->XferCpltCallback = TIM_DMACaptureCplt; + htim->hdma[TIM_DMA_ID_CC1]->XferHalfCpltCallback = TIM_DMACaptureHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC1]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)&htim->Instance->DMAR, (uint32_t)BurstBuffer, + DataLength) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + break; + } + case TIM_DMA_CC2: + { + /* Set the DMA capture callbacks */ + htim->hdma[TIM_DMA_ID_CC2]->XferCpltCallback = TIM_DMACaptureCplt; + htim->hdma[TIM_DMA_ID_CC2]->XferHalfCpltCallback = TIM_DMACaptureHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC2]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC2], (uint32_t)&htim->Instance->DMAR, (uint32_t)BurstBuffer, + DataLength) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + break; + } + case TIM_DMA_CC3: + { + /* Set the DMA capture callbacks */ + htim->hdma[TIM_DMA_ID_CC3]->XferCpltCallback = TIM_DMACaptureCplt; + htim->hdma[TIM_DMA_ID_CC3]->XferHalfCpltCallback = TIM_DMACaptureHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC3]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC3], (uint32_t)&htim->Instance->DMAR, (uint32_t)BurstBuffer, + DataLength) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + break; + } + case TIM_DMA_CC4: + { + /* Set the DMA capture callbacks */ + htim->hdma[TIM_DMA_ID_CC4]->XferCpltCallback = TIM_DMACaptureCplt; + htim->hdma[TIM_DMA_ID_CC4]->XferHalfCpltCallback = TIM_DMACaptureHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC4]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC4], (uint32_t)&htim->Instance->DMAR, (uint32_t)BurstBuffer, + DataLength) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + break; + } + case TIM_DMA_COM: + { + /* Set the DMA commutation callbacks */ + htim->hdma[TIM_DMA_ID_COMMUTATION]->XferCpltCallback = TIMEx_DMACommutationCplt; + htim->hdma[TIM_DMA_ID_COMMUTATION]->XferHalfCpltCallback = TIMEx_DMACommutationHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_COMMUTATION]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_COMMUTATION], (uint32_t)&htim->Instance->DMAR, (uint32_t)BurstBuffer, + DataLength) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + break; + } + case TIM_DMA_TRIGGER: + { + /* Set the DMA trigger callbacks */ + htim->hdma[TIM_DMA_ID_TRIGGER]->XferCpltCallback = TIM_DMATriggerCplt; + htim->hdma[TIM_DMA_ID_TRIGGER]->XferHalfCpltCallback = TIM_DMATriggerHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_TRIGGER]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_TRIGGER], (uint32_t)&htim->Instance->DMAR, (uint32_t)BurstBuffer, + DataLength) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + break; + } + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Configure the DMA Burst Mode */ + htim->Instance->DCR = (BurstBaseAddress | BurstLength); + + /* Enable the TIM DMA Request */ + __HAL_TIM_ENABLE_DMA(htim, BurstRequestSrc); + } + + /* Return function status */ + return status; +} + +/** + * @brief Stop the DMA burst reading + * @param htim TIM handle + * @param BurstRequestSrc TIM DMA Request sources to disable. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_DMABurst_ReadStop(TIM_HandleTypeDef *htim, uint32_t BurstRequestSrc) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_TIM_DMA_SOURCE(BurstRequestSrc)); + + /* Abort the DMA transfer (at least disable the DMA stream) */ + switch (BurstRequestSrc) + { + case TIM_DMA_UPDATE: + { + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_UPDATE]); + break; + } + case TIM_DMA_CC1: + { + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC1]); + break; + } + case TIM_DMA_CC2: + { + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC2]); + break; + } + case TIM_DMA_CC3: + { + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC3]); + break; + } + case TIM_DMA_CC4: + { + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC4]); + break; + } + case TIM_DMA_COM: + { + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_COMMUTATION]); + break; + } + case TIM_DMA_TRIGGER: + { + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_TRIGGER]); + break; + } + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Disable the TIM Update DMA request */ + __HAL_TIM_DISABLE_DMA(htim, BurstRequestSrc); + + /* Change the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_READY; + } + + /* Return function status */ + return status; +} + +/** + * @brief Generate a software event + * @param htim TIM handle + * @param EventSource specifies the event source. + * This parameter can be one of the following values: + * @arg TIM_EVENTSOURCE_UPDATE: Timer update Event source + * @arg TIM_EVENTSOURCE_CC1: Timer Capture Compare 1 Event source + * @arg TIM_EVENTSOURCE_CC2: Timer Capture Compare 2 Event source + * @arg TIM_EVENTSOURCE_CC3: Timer Capture Compare 3 Event source + * @arg TIM_EVENTSOURCE_CC4: Timer Capture Compare 4 Event source + * @arg TIM_EVENTSOURCE_COM: Timer COM event source + * @arg TIM_EVENTSOURCE_TRIGGER: Timer Trigger Event source + * @arg TIM_EVENTSOURCE_BREAK: Timer Break event source + * @note Basic timers can only generate an update event. + * @note TIM_EVENTSOURCE_COM is relevant only with advanced timer instances. + * @note TIM_EVENTSOURCE_BREAK are relevant only for timer instances + * supporting a break input. + * @retval HAL status + */ + +HAL_StatusTypeDef HAL_TIM_GenerateEvent(TIM_HandleTypeDef *htim, uint32_t EventSource) +{ + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + assert_param(IS_TIM_EVENT_SOURCE(EventSource)); + + /* Process Locked */ + __HAL_LOCK(htim); + + /* Change the TIM state */ + htim->State = HAL_TIM_STATE_BUSY; + + /* Set the event sources */ + htim->Instance->EGR = EventSource; + + /* Change the TIM state */ + htim->State = HAL_TIM_STATE_READY; + + __HAL_UNLOCK(htim); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Configures the OCRef clear feature + * @param htim TIM handle + * @param sClearInputConfig pointer to a TIM_ClearInputConfigTypeDef structure that + * contains the OCREF clear feature and parameters for the TIM peripheral. + * @param Channel specifies the TIM Channel + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 + * @arg TIM_CHANNEL_2: TIM Channel 2 + * @arg TIM_CHANNEL_3: TIM Channel 3 + * @arg TIM_CHANNEL_4: TIM Channel 4 + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_ConfigOCrefClear(TIM_HandleTypeDef *htim, + const TIM_ClearInputConfigTypeDef *sClearInputConfig, + uint32_t Channel) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_TIM_OCXREF_CLEAR_INSTANCE(htim->Instance)); + assert_param(IS_TIM_CLEARINPUT_SOURCE(sClearInputConfig->ClearInputSource)); + + /* Process Locked */ + __HAL_LOCK(htim); + + htim->State = HAL_TIM_STATE_BUSY; + + switch (sClearInputConfig->ClearInputSource) + { + case TIM_CLEARINPUTSOURCE_NONE: + { + /* Clear the OCREF clear selection bit and the the ETR Bits */ + CLEAR_BIT(htim->Instance->SMCR, (TIM_SMCR_ETF | TIM_SMCR_ETPS | TIM_SMCR_ECE | TIM_SMCR_ETP)); + break; + } + + case TIM_CLEARINPUTSOURCE_ETR: + { + /* Check the parameters */ + assert_param(IS_TIM_CLEARINPUT_POLARITY(sClearInputConfig->ClearInputPolarity)); + assert_param(IS_TIM_CLEARINPUT_PRESCALER(sClearInputConfig->ClearInputPrescaler)); + assert_param(IS_TIM_CLEARINPUT_FILTER(sClearInputConfig->ClearInputFilter)); + + /* When OCRef clear feature is used with ETR source, ETR prescaler must be off */ + if (sClearInputConfig->ClearInputPrescaler != TIM_CLEARINPUTPRESCALER_DIV1) + { + htim->State = HAL_TIM_STATE_READY; + __HAL_UNLOCK(htim); + return HAL_ERROR; + } + + TIM_ETR_SetConfig(htim->Instance, + sClearInputConfig->ClearInputPrescaler, + sClearInputConfig->ClearInputPolarity, + sClearInputConfig->ClearInputFilter); + break; + } + + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + switch (Channel) + { + case TIM_CHANNEL_1: + { + if (sClearInputConfig->ClearInputState != (uint32_t)DISABLE) + { + /* Enable the OCREF clear feature for Channel 1 */ + SET_BIT(htim->Instance->CCMR1, TIM_CCMR1_OC1CE); + } + else + { + /* Disable the OCREF clear feature for Channel 1 */ + CLEAR_BIT(htim->Instance->CCMR1, TIM_CCMR1_OC1CE); + } + break; + } + case TIM_CHANNEL_2: + { + if (sClearInputConfig->ClearInputState != (uint32_t)DISABLE) + { + /* Enable the OCREF clear feature for Channel 2 */ + SET_BIT(htim->Instance->CCMR1, TIM_CCMR1_OC2CE); + } + else + { + /* Disable the OCREF clear feature for Channel 2 */ + CLEAR_BIT(htim->Instance->CCMR1, TIM_CCMR1_OC2CE); + } + break; + } + case TIM_CHANNEL_3: + { + if (sClearInputConfig->ClearInputState != (uint32_t)DISABLE) + { + /* Enable the OCREF clear feature for Channel 3 */ + SET_BIT(htim->Instance->CCMR2, TIM_CCMR2_OC3CE); + } + else + { + /* Disable the OCREF clear feature for Channel 3 */ + CLEAR_BIT(htim->Instance->CCMR2, TIM_CCMR2_OC3CE); + } + break; + } + case TIM_CHANNEL_4: + { + if (sClearInputConfig->ClearInputState != (uint32_t)DISABLE) + { + /* Enable the OCREF clear feature for Channel 4 */ + SET_BIT(htim->Instance->CCMR2, TIM_CCMR2_OC4CE); + } + else + { + /* Disable the OCREF clear feature for Channel 4 */ + CLEAR_BIT(htim->Instance->CCMR2, TIM_CCMR2_OC4CE); + } + break; + } + default: + break; + } + } + + htim->State = HAL_TIM_STATE_READY; + + __HAL_UNLOCK(htim); + + return status; +} + +/** + * @brief Configures the clock source to be used + * @param htim TIM handle + * @param sClockSourceConfig pointer to a TIM_ClockConfigTypeDef structure that + * contains the clock source information for the TIM peripheral. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_ConfigClockSource(TIM_HandleTypeDef *htim, const TIM_ClockConfigTypeDef *sClockSourceConfig) +{ + HAL_StatusTypeDef status = HAL_OK; + uint32_t tmpsmcr; + + /* Process Locked */ + __HAL_LOCK(htim); + + htim->State = HAL_TIM_STATE_BUSY; + + /* Check the parameters */ + assert_param(IS_TIM_CLOCKSOURCE(sClockSourceConfig->ClockSource)); + + /* Reset the SMS, TS, ECE, ETPS and ETRF bits */ + tmpsmcr = htim->Instance->SMCR; + tmpsmcr &= ~(TIM_SMCR_SMS | TIM_SMCR_TS); + tmpsmcr &= ~(TIM_SMCR_ETF | TIM_SMCR_ETPS | TIM_SMCR_ECE | TIM_SMCR_ETP); + htim->Instance->SMCR = tmpsmcr; + + switch (sClockSourceConfig->ClockSource) + { + case TIM_CLOCKSOURCE_INTERNAL: + { + assert_param(IS_TIM_INSTANCE(htim->Instance)); + break; + } + + case TIM_CLOCKSOURCE_ETRMODE1: + { + /* Check whether or not the timer instance supports external trigger input mode 1 (ETRF)*/ + assert_param(IS_TIM_CLOCKSOURCE_ETRMODE1_INSTANCE(htim->Instance)); + + /* Check ETR input conditioning related parameters */ + assert_param(IS_TIM_CLOCKPRESCALER(sClockSourceConfig->ClockPrescaler)); + assert_param(IS_TIM_CLOCKPOLARITY(sClockSourceConfig->ClockPolarity)); + assert_param(IS_TIM_CLOCKFILTER(sClockSourceConfig->ClockFilter)); + + /* Configure the ETR Clock source */ + TIM_ETR_SetConfig(htim->Instance, + sClockSourceConfig->ClockPrescaler, + sClockSourceConfig->ClockPolarity, + sClockSourceConfig->ClockFilter); + + /* Select the External clock mode1 and the ETRF trigger */ + tmpsmcr = htim->Instance->SMCR; + tmpsmcr |= (TIM_SLAVEMODE_EXTERNAL1 | TIM_CLOCKSOURCE_ETRMODE1); + /* Write to TIMx SMCR */ + htim->Instance->SMCR = tmpsmcr; + break; + } + + case TIM_CLOCKSOURCE_ETRMODE2: + { + /* Check whether or not the timer instance supports external trigger input mode 2 (ETRF)*/ + assert_param(IS_TIM_CLOCKSOURCE_ETRMODE2_INSTANCE(htim->Instance)); + + /* Check ETR input conditioning related parameters */ + assert_param(IS_TIM_CLOCKPRESCALER(sClockSourceConfig->ClockPrescaler)); + assert_param(IS_TIM_CLOCKPOLARITY(sClockSourceConfig->ClockPolarity)); + assert_param(IS_TIM_CLOCKFILTER(sClockSourceConfig->ClockFilter)); + + /* Configure the ETR Clock source */ + TIM_ETR_SetConfig(htim->Instance, + sClockSourceConfig->ClockPrescaler, + sClockSourceConfig->ClockPolarity, + sClockSourceConfig->ClockFilter); + /* Enable the External clock mode2 */ + htim->Instance->SMCR |= TIM_SMCR_ECE; + break; + } + + case TIM_CLOCKSOURCE_TI1: + { + /* Check whether or not the timer instance supports external clock mode 1 */ + assert_param(IS_TIM_CLOCKSOURCE_TIX_INSTANCE(htim->Instance)); + + /* Check TI1 input conditioning related parameters */ + assert_param(IS_TIM_CLOCKPOLARITY(sClockSourceConfig->ClockPolarity)); + assert_param(IS_TIM_CLOCKFILTER(sClockSourceConfig->ClockFilter)); + + TIM_TI1_ConfigInputStage(htim->Instance, + sClockSourceConfig->ClockPolarity, + sClockSourceConfig->ClockFilter); + TIM_ITRx_SetConfig(htim->Instance, TIM_CLOCKSOURCE_TI1); + break; + } + + case TIM_CLOCKSOURCE_TI2: + { + /* Check whether or not the timer instance supports external clock mode 1 (ETRF)*/ + assert_param(IS_TIM_CLOCKSOURCE_TIX_INSTANCE(htim->Instance)); + + /* Check TI2 input conditioning related parameters */ + assert_param(IS_TIM_CLOCKPOLARITY(sClockSourceConfig->ClockPolarity)); + assert_param(IS_TIM_CLOCKFILTER(sClockSourceConfig->ClockFilter)); + + TIM_TI2_ConfigInputStage(htim->Instance, + sClockSourceConfig->ClockPolarity, + sClockSourceConfig->ClockFilter); + TIM_ITRx_SetConfig(htim->Instance, TIM_CLOCKSOURCE_TI2); + break; + } + + case TIM_CLOCKSOURCE_TI1ED: + { + /* Check whether or not the timer instance supports external clock mode 1 */ + assert_param(IS_TIM_CLOCKSOURCE_TIX_INSTANCE(htim->Instance)); + + /* Check TI1 input conditioning related parameters */ + assert_param(IS_TIM_CLOCKPOLARITY(sClockSourceConfig->ClockPolarity)); + assert_param(IS_TIM_CLOCKFILTER(sClockSourceConfig->ClockFilter)); + + TIM_TI1_ConfigInputStage(htim->Instance, + sClockSourceConfig->ClockPolarity, + sClockSourceConfig->ClockFilter); + TIM_ITRx_SetConfig(htim->Instance, TIM_CLOCKSOURCE_TI1ED); + break; + } + + case TIM_CLOCKSOURCE_ITR0: + case TIM_CLOCKSOURCE_ITR1: + case TIM_CLOCKSOURCE_ITR2: + case TIM_CLOCKSOURCE_ITR3: + { + /* Check whether or not the timer instance supports internal trigger input */ + assert_param(IS_TIM_CLOCKSOURCE_ITRX_INSTANCE(htim->Instance)); + + TIM_ITRx_SetConfig(htim->Instance, sClockSourceConfig->ClockSource); + break; + } + + default: + status = HAL_ERROR; + break; + } + htim->State = HAL_TIM_STATE_READY; + + __HAL_UNLOCK(htim); + + return status; +} + +/** + * @brief Selects the signal connected to the TI1 input: direct from CH1_input + * or a XOR combination between CH1_input, CH2_input & CH3_input + * @param htim TIM handle. + * @param TI1_Selection Indicate whether or not channel 1 is connected to the + * output of a XOR gate. + * This parameter can be one of the following values: + * @arg TIM_TI1SELECTION_CH1: The TIMx_CH1 pin is connected to TI1 input + * @arg TIM_TI1SELECTION_XORCOMBINATION: The TIMx_CH1, CH2 and CH3 + * pins are connected to the TI1 input (XOR combination) + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_ConfigTI1Input(TIM_HandleTypeDef *htim, uint32_t TI1_Selection) +{ + uint32_t tmpcr2; + + /* Check the parameters */ + assert_param(IS_TIM_XOR_INSTANCE(htim->Instance)); + assert_param(IS_TIM_TI1SELECTION(TI1_Selection)); + + /* Get the TIMx CR2 register value */ + tmpcr2 = htim->Instance->CR2; + + /* Reset the TI1 selection */ + tmpcr2 &= ~TIM_CR2_TI1S; + + /* Set the TI1 selection */ + tmpcr2 |= TI1_Selection; + + /* Write to TIMxCR2 */ + htim->Instance->CR2 = tmpcr2; + + return HAL_OK; +} + +/** + * @brief Configures the TIM in Slave mode + * @param htim TIM handle. + * @param sSlaveConfig pointer to a TIM_SlaveConfigTypeDef structure that + * contains the selected trigger (internal trigger input, filtered + * timer input or external trigger input) and the Slave mode + * (Disable, Reset, Gated, Trigger, External clock mode 1). + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_SlaveConfigSynchro(TIM_HandleTypeDef *htim, const TIM_SlaveConfigTypeDef *sSlaveConfig) +{ + /* Check the parameters */ + assert_param(IS_TIM_SLAVE_INSTANCE(htim->Instance)); + assert_param(IS_TIM_SLAVE_MODE(sSlaveConfig->SlaveMode)); + assert_param(IS_TIM_TRIGGER_SELECTION(sSlaveConfig->InputTrigger)); + + __HAL_LOCK(htim); + + htim->State = HAL_TIM_STATE_BUSY; + + if (TIM_SlaveTimer_SetConfig(htim, sSlaveConfig) != HAL_OK) + { + htim->State = HAL_TIM_STATE_READY; + __HAL_UNLOCK(htim); + return HAL_ERROR; + } + + /* Disable Trigger Interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_TRIGGER); + + /* Disable Trigger DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_TRIGGER); + + htim->State = HAL_TIM_STATE_READY; + + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Configures the TIM in Slave mode in interrupt mode + * @param htim TIM handle. + * @param sSlaveConfig pointer to a TIM_SlaveConfigTypeDef structure that + * contains the selected trigger (internal trigger input, filtered + * timer input or external trigger input) and the Slave mode + * (Disable, Reset, Gated, Trigger, External clock mode 1). + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_SlaveConfigSynchro_IT(TIM_HandleTypeDef *htim, + const TIM_SlaveConfigTypeDef *sSlaveConfig) +{ + /* Check the parameters */ + assert_param(IS_TIM_SLAVE_INSTANCE(htim->Instance)); + assert_param(IS_TIM_SLAVE_MODE(sSlaveConfig->SlaveMode)); + assert_param(IS_TIM_TRIGGER_SELECTION(sSlaveConfig->InputTrigger)); + + __HAL_LOCK(htim); + + htim->State = HAL_TIM_STATE_BUSY; + + if (TIM_SlaveTimer_SetConfig(htim, sSlaveConfig) != HAL_OK) + { + htim->State = HAL_TIM_STATE_READY; + __HAL_UNLOCK(htim); + return HAL_ERROR; + } + + /* Enable Trigger Interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_TRIGGER); + + /* Disable Trigger DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_TRIGGER); + + htim->State = HAL_TIM_STATE_READY; + + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Read the captured value from Capture Compare unit + * @param htim TIM handle. + * @param Channel TIM Channels to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval Captured value + */ +uint32_t HAL_TIM_ReadCapturedValue(const TIM_HandleTypeDef *htim, uint32_t Channel) +{ + uint32_t tmpreg = 0U; + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Check the parameters */ + assert_param(IS_TIM_CC1_INSTANCE(htim->Instance)); + + /* Return the capture 1 value */ + tmpreg = htim->Instance->CCR1; + + break; + } + case TIM_CHANNEL_2: + { + /* Check the parameters */ + assert_param(IS_TIM_CC2_INSTANCE(htim->Instance)); + + /* Return the capture 2 value */ + tmpreg = htim->Instance->CCR2; + + break; + } + + case TIM_CHANNEL_3: + { + /* Check the parameters */ + assert_param(IS_TIM_CC3_INSTANCE(htim->Instance)); + + /* Return the capture 3 value */ + tmpreg = htim->Instance->CCR3; + + break; + } + + case TIM_CHANNEL_4: + { + /* Check the parameters */ + assert_param(IS_TIM_CC4_INSTANCE(htim->Instance)); + + /* Return the capture 4 value */ + tmpreg = htim->Instance->CCR4; + + break; + } + + default: + break; + } + + return tmpreg; +} + +/** + * @} + */ + +/** @defgroup TIM_Exported_Functions_Group9 TIM Callbacks functions + * @brief TIM Callbacks functions + * +@verbatim + ============================================================================== + ##### TIM Callbacks functions ##### + ============================================================================== + [..] + This section provides TIM callback functions: + (+) TIM Period elapsed callback + (+) TIM Output Compare callback + (+) TIM Input capture callback + (+) TIM Trigger callback + (+) TIM Error callback + +@endverbatim + * @{ + */ + +/** + * @brief Period elapsed callback in non-blocking mode + * @param htim TIM handle + * @retval None + */ +__weak void HAL_TIM_PeriodElapsedCallback(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_PeriodElapsedCallback could be implemented in the user file + */ +} + +/** + * @brief Period elapsed half complete callback in non-blocking mode + * @param htim TIM handle + * @retval None + */ +__weak void HAL_TIM_PeriodElapsedHalfCpltCallback(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_PeriodElapsedHalfCpltCallback could be implemented in the user file + */ +} + +/** + * @brief Output Compare callback in non-blocking mode + * @param htim TIM OC handle + * @retval None + */ +__weak void HAL_TIM_OC_DelayElapsedCallback(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_OC_DelayElapsedCallback could be implemented in the user file + */ +} + +/** + * @brief Input Capture callback in non-blocking mode + * @param htim TIM IC handle + * @retval None + */ +__weak void HAL_TIM_IC_CaptureCallback(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_IC_CaptureCallback could be implemented in the user file + */ +} + +/** + * @brief Input Capture half complete callback in non-blocking mode + * @param htim TIM IC handle + * @retval None + */ +__weak void HAL_TIM_IC_CaptureHalfCpltCallback(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_IC_CaptureHalfCpltCallback could be implemented in the user file + */ +} + +/** + * @brief PWM Pulse finished callback in non-blocking mode + * @param htim TIM handle + * @retval None + */ +__weak void HAL_TIM_PWM_PulseFinishedCallback(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_PWM_PulseFinishedCallback could be implemented in the user file + */ +} + +/** + * @brief PWM Pulse finished half complete callback in non-blocking mode + * @param htim TIM handle + * @retval None + */ +__weak void HAL_TIM_PWM_PulseFinishedHalfCpltCallback(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_PWM_PulseFinishedHalfCpltCallback could be implemented in the user file + */ +} + +/** + * @brief Hall Trigger detection callback in non-blocking mode + * @param htim TIM handle + * @retval None + */ +__weak void HAL_TIM_TriggerCallback(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_TriggerCallback could be implemented in the user file + */ +} + +/** + * @brief Hall Trigger detection half complete callback in non-blocking mode + * @param htim TIM handle + * @retval None + */ +__weak void HAL_TIM_TriggerHalfCpltCallback(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_TriggerHalfCpltCallback could be implemented in the user file + */ +} + +/** + * @brief Timer error callback in non-blocking mode + * @param htim TIM handle + * @retval None + */ +__weak void HAL_TIM_ErrorCallback(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_ErrorCallback could be implemented in the user file + */ +} + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) +/** + * @brief Register a User TIM callback to be used instead of the weak predefined callback + * @param htim tim handle + * @param CallbackID ID of the callback to be registered + * This parameter can be one of the following values: + * @arg @ref HAL_TIM_BASE_MSPINIT_CB_ID Base MspInit Callback ID + * @arg @ref HAL_TIM_BASE_MSPDEINIT_CB_ID Base MspDeInit Callback ID + * @arg @ref HAL_TIM_IC_MSPINIT_CB_ID IC MspInit Callback ID + * @arg @ref HAL_TIM_IC_MSPDEINIT_CB_ID IC MspDeInit Callback ID + * @arg @ref HAL_TIM_OC_MSPINIT_CB_ID OC MspInit Callback ID + * @arg @ref HAL_TIM_OC_MSPDEINIT_CB_ID OC MspDeInit Callback ID + * @arg @ref HAL_TIM_PWM_MSPINIT_CB_ID PWM MspInit Callback ID + * @arg @ref HAL_TIM_PWM_MSPDEINIT_CB_ID PWM MspDeInit Callback ID + * @arg @ref HAL_TIM_ONE_PULSE_MSPINIT_CB_ID One Pulse MspInit Callback ID + * @arg @ref HAL_TIM_ONE_PULSE_MSPDEINIT_CB_ID One Pulse MspDeInit Callback ID + * @arg @ref HAL_TIM_ENCODER_MSPINIT_CB_ID Encoder MspInit Callback ID + * @arg @ref HAL_TIM_ENCODER_MSPDEINIT_CB_ID Encoder MspDeInit Callback ID + * @arg @ref HAL_TIM_HALL_SENSOR_MSPINIT_CB_ID Hall Sensor MspInit Callback ID + * @arg @ref HAL_TIM_HALL_SENSOR_MSPDEINIT_CB_ID Hall Sensor MspDeInit Callback ID + * @arg @ref HAL_TIM_PERIOD_ELAPSED_CB_ID Period Elapsed Callback ID + * @arg @ref HAL_TIM_PERIOD_ELAPSED_HALF_CB_ID Period Elapsed half complete Callback ID + * @arg @ref HAL_TIM_TRIGGER_CB_ID Trigger Callback ID + * @arg @ref HAL_TIM_TRIGGER_HALF_CB_ID Trigger half complete Callback ID + * @arg @ref HAL_TIM_IC_CAPTURE_CB_ID Input Capture Callback ID + * @arg @ref HAL_TIM_IC_CAPTURE_HALF_CB_ID Input Capture half complete Callback ID + * @arg @ref HAL_TIM_OC_DELAY_ELAPSED_CB_ID Output Compare Delay Elapsed Callback ID + * @arg @ref HAL_TIM_PWM_PULSE_FINISHED_CB_ID PWM Pulse Finished Callback ID + * @arg @ref HAL_TIM_PWM_PULSE_FINISHED_HALF_CB_ID PWM Pulse Finished half complete Callback ID + * @arg @ref HAL_TIM_ERROR_CB_ID Error Callback ID + * @arg @ref HAL_TIM_COMMUTATION_CB_ID Commutation Callback ID + * @arg @ref HAL_TIM_COMMUTATION_HALF_CB_ID Commutation half complete Callback ID + * @arg @ref HAL_TIM_BREAK_CB_ID Break Callback ID + * @param pCallback pointer to the callback function + * @retval status + */ +HAL_StatusTypeDef HAL_TIM_RegisterCallback(TIM_HandleTypeDef *htim, HAL_TIM_CallbackIDTypeDef CallbackID, + pTIM_CallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + + if (pCallback == NULL) + { + return HAL_ERROR; + } + + if (htim->State == HAL_TIM_STATE_READY) + { + switch (CallbackID) + { + case HAL_TIM_BASE_MSPINIT_CB_ID : + htim->Base_MspInitCallback = pCallback; + break; + + case HAL_TIM_BASE_MSPDEINIT_CB_ID : + htim->Base_MspDeInitCallback = pCallback; + break; + + case HAL_TIM_IC_MSPINIT_CB_ID : + htim->IC_MspInitCallback = pCallback; + break; + + case HAL_TIM_IC_MSPDEINIT_CB_ID : + htim->IC_MspDeInitCallback = pCallback; + break; + + case HAL_TIM_OC_MSPINIT_CB_ID : + htim->OC_MspInitCallback = pCallback; + break; + + case HAL_TIM_OC_MSPDEINIT_CB_ID : + htim->OC_MspDeInitCallback = pCallback; + break; + + case HAL_TIM_PWM_MSPINIT_CB_ID : + htim->PWM_MspInitCallback = pCallback; + break; + + case HAL_TIM_PWM_MSPDEINIT_CB_ID : + htim->PWM_MspDeInitCallback = pCallback; + break; + + case HAL_TIM_ONE_PULSE_MSPINIT_CB_ID : + htim->OnePulse_MspInitCallback = pCallback; + break; + + case HAL_TIM_ONE_PULSE_MSPDEINIT_CB_ID : + htim->OnePulse_MspDeInitCallback = pCallback; + break; + + case HAL_TIM_ENCODER_MSPINIT_CB_ID : + htim->Encoder_MspInitCallback = pCallback; + break; + + case HAL_TIM_ENCODER_MSPDEINIT_CB_ID : + htim->Encoder_MspDeInitCallback = pCallback; + break; + + case HAL_TIM_HALL_SENSOR_MSPINIT_CB_ID : + htim->HallSensor_MspInitCallback = pCallback; + break; + + case HAL_TIM_HALL_SENSOR_MSPDEINIT_CB_ID : + htim->HallSensor_MspDeInitCallback = pCallback; + break; + + case HAL_TIM_PERIOD_ELAPSED_CB_ID : + htim->PeriodElapsedCallback = pCallback; + break; + + case HAL_TIM_PERIOD_ELAPSED_HALF_CB_ID : + htim->PeriodElapsedHalfCpltCallback = pCallback; + break; + + case HAL_TIM_TRIGGER_CB_ID : + htim->TriggerCallback = pCallback; + break; + + case HAL_TIM_TRIGGER_HALF_CB_ID : + htim->TriggerHalfCpltCallback = pCallback; + break; + + case HAL_TIM_IC_CAPTURE_CB_ID : + htim->IC_CaptureCallback = pCallback; + break; + + case HAL_TIM_IC_CAPTURE_HALF_CB_ID : + htim->IC_CaptureHalfCpltCallback = pCallback; + break; + + case HAL_TIM_OC_DELAY_ELAPSED_CB_ID : + htim->OC_DelayElapsedCallback = pCallback; + break; + + case HAL_TIM_PWM_PULSE_FINISHED_CB_ID : + htim->PWM_PulseFinishedCallback = pCallback; + break; + + case HAL_TIM_PWM_PULSE_FINISHED_HALF_CB_ID : + htim->PWM_PulseFinishedHalfCpltCallback = pCallback; + break; + + case HAL_TIM_ERROR_CB_ID : + htim->ErrorCallback = pCallback; + break; + + case HAL_TIM_COMMUTATION_CB_ID : + htim->CommutationCallback = pCallback; + break; + + case HAL_TIM_COMMUTATION_HALF_CB_ID : + htim->CommutationHalfCpltCallback = pCallback; + break; + + case HAL_TIM_BREAK_CB_ID : + htim->BreakCallback = pCallback; + break; + + default : + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else if (htim->State == HAL_TIM_STATE_RESET) + { + switch (CallbackID) + { + case HAL_TIM_BASE_MSPINIT_CB_ID : + htim->Base_MspInitCallback = pCallback; + break; + + case HAL_TIM_BASE_MSPDEINIT_CB_ID : + htim->Base_MspDeInitCallback = pCallback; + break; + + case HAL_TIM_IC_MSPINIT_CB_ID : + htim->IC_MspInitCallback = pCallback; + break; + + case HAL_TIM_IC_MSPDEINIT_CB_ID : + htim->IC_MspDeInitCallback = pCallback; + break; + + case HAL_TIM_OC_MSPINIT_CB_ID : + htim->OC_MspInitCallback = pCallback; + break; + + case HAL_TIM_OC_MSPDEINIT_CB_ID : + htim->OC_MspDeInitCallback = pCallback; + break; + + case HAL_TIM_PWM_MSPINIT_CB_ID : + htim->PWM_MspInitCallback = pCallback; + break; + + case HAL_TIM_PWM_MSPDEINIT_CB_ID : + htim->PWM_MspDeInitCallback = pCallback; + break; + + case HAL_TIM_ONE_PULSE_MSPINIT_CB_ID : + htim->OnePulse_MspInitCallback = pCallback; + break; + + case HAL_TIM_ONE_PULSE_MSPDEINIT_CB_ID : + htim->OnePulse_MspDeInitCallback = pCallback; + break; + + case HAL_TIM_ENCODER_MSPINIT_CB_ID : + htim->Encoder_MspInitCallback = pCallback; + break; + + case HAL_TIM_ENCODER_MSPDEINIT_CB_ID : + htim->Encoder_MspDeInitCallback = pCallback; + break; + + case HAL_TIM_HALL_SENSOR_MSPINIT_CB_ID : + htim->HallSensor_MspInitCallback = pCallback; + break; + + case HAL_TIM_HALL_SENSOR_MSPDEINIT_CB_ID : + htim->HallSensor_MspDeInitCallback = pCallback; + break; + + default : + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Return error status */ + status = HAL_ERROR; + } + + return status; +} + +/** + * @brief Unregister a TIM callback + * TIM callback is redirected to the weak predefined callback + * @param htim tim handle + * @param CallbackID ID of the callback to be unregistered + * This parameter can be one of the following values: + * @arg @ref HAL_TIM_BASE_MSPINIT_CB_ID Base MspInit Callback ID + * @arg @ref HAL_TIM_BASE_MSPDEINIT_CB_ID Base MspDeInit Callback ID + * @arg @ref HAL_TIM_IC_MSPINIT_CB_ID IC MspInit Callback ID + * @arg @ref HAL_TIM_IC_MSPDEINIT_CB_ID IC MspDeInit Callback ID + * @arg @ref HAL_TIM_OC_MSPINIT_CB_ID OC MspInit Callback ID + * @arg @ref HAL_TIM_OC_MSPDEINIT_CB_ID OC MspDeInit Callback ID + * @arg @ref HAL_TIM_PWM_MSPINIT_CB_ID PWM MspInit Callback ID + * @arg @ref HAL_TIM_PWM_MSPDEINIT_CB_ID PWM MspDeInit Callback ID + * @arg @ref HAL_TIM_ONE_PULSE_MSPINIT_CB_ID One Pulse MspInit Callback ID + * @arg @ref HAL_TIM_ONE_PULSE_MSPDEINIT_CB_ID One Pulse MspDeInit Callback ID + * @arg @ref HAL_TIM_ENCODER_MSPINIT_CB_ID Encoder MspInit Callback ID + * @arg @ref HAL_TIM_ENCODER_MSPDEINIT_CB_ID Encoder MspDeInit Callback ID + * @arg @ref HAL_TIM_HALL_SENSOR_MSPINIT_CB_ID Hall Sensor MspInit Callback ID + * @arg @ref HAL_TIM_HALL_SENSOR_MSPDEINIT_CB_ID Hall Sensor MspDeInit Callback ID + * @arg @ref HAL_TIM_PERIOD_ELAPSED_CB_ID Period Elapsed Callback ID + * @arg @ref HAL_TIM_PERIOD_ELAPSED_HALF_CB_ID Period Elapsed half complete Callback ID + * @arg @ref HAL_TIM_TRIGGER_CB_ID Trigger Callback ID + * @arg @ref HAL_TIM_TRIGGER_HALF_CB_ID Trigger half complete Callback ID + * @arg @ref HAL_TIM_IC_CAPTURE_CB_ID Input Capture Callback ID + * @arg @ref HAL_TIM_IC_CAPTURE_HALF_CB_ID Input Capture half complete Callback ID + * @arg @ref HAL_TIM_OC_DELAY_ELAPSED_CB_ID Output Compare Delay Elapsed Callback ID + * @arg @ref HAL_TIM_PWM_PULSE_FINISHED_CB_ID PWM Pulse Finished Callback ID + * @arg @ref HAL_TIM_PWM_PULSE_FINISHED_HALF_CB_ID PWM Pulse Finished half complete Callback ID + * @arg @ref HAL_TIM_ERROR_CB_ID Error Callback ID + * @arg @ref HAL_TIM_COMMUTATION_CB_ID Commutation Callback ID + * @arg @ref HAL_TIM_COMMUTATION_HALF_CB_ID Commutation half complete Callback ID + * @arg @ref HAL_TIM_BREAK_CB_ID Break Callback ID + * @retval status + */ +HAL_StatusTypeDef HAL_TIM_UnRegisterCallback(TIM_HandleTypeDef *htim, HAL_TIM_CallbackIDTypeDef CallbackID) +{ + HAL_StatusTypeDef status = HAL_OK; + + if (htim->State == HAL_TIM_STATE_READY) + { + switch (CallbackID) + { + case HAL_TIM_BASE_MSPINIT_CB_ID : + /* Legacy weak Base MspInit Callback */ + htim->Base_MspInitCallback = HAL_TIM_Base_MspInit; + break; + + case HAL_TIM_BASE_MSPDEINIT_CB_ID : + /* Legacy weak Base Msp DeInit Callback */ + htim->Base_MspDeInitCallback = HAL_TIM_Base_MspDeInit; + break; + + case HAL_TIM_IC_MSPINIT_CB_ID : + /* Legacy weak IC Msp Init Callback */ + htim->IC_MspInitCallback = HAL_TIM_IC_MspInit; + break; + + case HAL_TIM_IC_MSPDEINIT_CB_ID : + /* Legacy weak IC Msp DeInit Callback */ + htim->IC_MspDeInitCallback = HAL_TIM_IC_MspDeInit; + break; + + case HAL_TIM_OC_MSPINIT_CB_ID : + /* Legacy weak OC Msp Init Callback */ + htim->OC_MspInitCallback = HAL_TIM_OC_MspInit; + break; + + case HAL_TIM_OC_MSPDEINIT_CB_ID : + /* Legacy weak OC Msp DeInit Callback */ + htim->OC_MspDeInitCallback = HAL_TIM_OC_MspDeInit; + break; + + case HAL_TIM_PWM_MSPINIT_CB_ID : + /* Legacy weak PWM Msp Init Callback */ + htim->PWM_MspInitCallback = HAL_TIM_PWM_MspInit; + break; + + case HAL_TIM_PWM_MSPDEINIT_CB_ID : + /* Legacy weak PWM Msp DeInit Callback */ + htim->PWM_MspDeInitCallback = HAL_TIM_PWM_MspDeInit; + break; + + case HAL_TIM_ONE_PULSE_MSPINIT_CB_ID : + /* Legacy weak One Pulse Msp Init Callback */ + htim->OnePulse_MspInitCallback = HAL_TIM_OnePulse_MspInit; + break; + + case HAL_TIM_ONE_PULSE_MSPDEINIT_CB_ID : + /* Legacy weak One Pulse Msp DeInit Callback */ + htim->OnePulse_MspDeInitCallback = HAL_TIM_OnePulse_MspDeInit; + break; + + case HAL_TIM_ENCODER_MSPINIT_CB_ID : + /* Legacy weak Encoder Msp Init Callback */ + htim->Encoder_MspInitCallback = HAL_TIM_Encoder_MspInit; + break; + + case HAL_TIM_ENCODER_MSPDEINIT_CB_ID : + /* Legacy weak Encoder Msp DeInit Callback */ + htim->Encoder_MspDeInitCallback = HAL_TIM_Encoder_MspDeInit; + break; + + case HAL_TIM_HALL_SENSOR_MSPINIT_CB_ID : + /* Legacy weak Hall Sensor Msp Init Callback */ + htim->HallSensor_MspInitCallback = HAL_TIMEx_HallSensor_MspInit; + break; + + case HAL_TIM_HALL_SENSOR_MSPDEINIT_CB_ID : + /* Legacy weak Hall Sensor Msp DeInit Callback */ + htim->HallSensor_MspDeInitCallback = HAL_TIMEx_HallSensor_MspDeInit; + break; + + case HAL_TIM_PERIOD_ELAPSED_CB_ID : + /* Legacy weak Period Elapsed Callback */ + htim->PeriodElapsedCallback = HAL_TIM_PeriodElapsedCallback; + break; + + case HAL_TIM_PERIOD_ELAPSED_HALF_CB_ID : + /* Legacy weak Period Elapsed half complete Callback */ + htim->PeriodElapsedHalfCpltCallback = HAL_TIM_PeriodElapsedHalfCpltCallback; + break; + + case HAL_TIM_TRIGGER_CB_ID : + /* Legacy weak Trigger Callback */ + htim->TriggerCallback = HAL_TIM_TriggerCallback; + break; + + case HAL_TIM_TRIGGER_HALF_CB_ID : + /* Legacy weak Trigger half complete Callback */ + htim->TriggerHalfCpltCallback = HAL_TIM_TriggerHalfCpltCallback; + break; + + case HAL_TIM_IC_CAPTURE_CB_ID : + /* Legacy weak IC Capture Callback */ + htim->IC_CaptureCallback = HAL_TIM_IC_CaptureCallback; + break; + + case HAL_TIM_IC_CAPTURE_HALF_CB_ID : + /* Legacy weak IC Capture half complete Callback */ + htim->IC_CaptureHalfCpltCallback = HAL_TIM_IC_CaptureHalfCpltCallback; + break; + + case HAL_TIM_OC_DELAY_ELAPSED_CB_ID : + /* Legacy weak OC Delay Elapsed Callback */ + htim->OC_DelayElapsedCallback = HAL_TIM_OC_DelayElapsedCallback; + break; + + case HAL_TIM_PWM_PULSE_FINISHED_CB_ID : + /* Legacy weak PWM Pulse Finished Callback */ + htim->PWM_PulseFinishedCallback = HAL_TIM_PWM_PulseFinishedCallback; + break; + + case HAL_TIM_PWM_PULSE_FINISHED_HALF_CB_ID : + /* Legacy weak PWM Pulse Finished half complete Callback */ + htim->PWM_PulseFinishedHalfCpltCallback = HAL_TIM_PWM_PulseFinishedHalfCpltCallback; + break; + + case HAL_TIM_ERROR_CB_ID : + /* Legacy weak Error Callback */ + htim->ErrorCallback = HAL_TIM_ErrorCallback; + break; + + case HAL_TIM_COMMUTATION_CB_ID : + /* Legacy weak Commutation Callback */ + htim->CommutationCallback = HAL_TIMEx_CommutCallback; + break; + + case HAL_TIM_COMMUTATION_HALF_CB_ID : + /* Legacy weak Commutation half complete Callback */ + htim->CommutationHalfCpltCallback = HAL_TIMEx_CommutHalfCpltCallback; + break; + + case HAL_TIM_BREAK_CB_ID : + /* Legacy weak Break Callback */ + htim->BreakCallback = HAL_TIMEx_BreakCallback; + break; + + default : + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else if (htim->State == HAL_TIM_STATE_RESET) + { + switch (CallbackID) + { + case HAL_TIM_BASE_MSPINIT_CB_ID : + /* Legacy weak Base MspInit Callback */ + htim->Base_MspInitCallback = HAL_TIM_Base_MspInit; + break; + + case HAL_TIM_BASE_MSPDEINIT_CB_ID : + /* Legacy weak Base Msp DeInit Callback */ + htim->Base_MspDeInitCallback = HAL_TIM_Base_MspDeInit; + break; + + case HAL_TIM_IC_MSPINIT_CB_ID : + /* Legacy weak IC Msp Init Callback */ + htim->IC_MspInitCallback = HAL_TIM_IC_MspInit; + break; + + case HAL_TIM_IC_MSPDEINIT_CB_ID : + /* Legacy weak IC Msp DeInit Callback */ + htim->IC_MspDeInitCallback = HAL_TIM_IC_MspDeInit; + break; + + case HAL_TIM_OC_MSPINIT_CB_ID : + /* Legacy weak OC Msp Init Callback */ + htim->OC_MspInitCallback = HAL_TIM_OC_MspInit; + break; + + case HAL_TIM_OC_MSPDEINIT_CB_ID : + /* Legacy weak OC Msp DeInit Callback */ + htim->OC_MspDeInitCallback = HAL_TIM_OC_MspDeInit; + break; + + case HAL_TIM_PWM_MSPINIT_CB_ID : + /* Legacy weak PWM Msp Init Callback */ + htim->PWM_MspInitCallback = HAL_TIM_PWM_MspInit; + break; + + case HAL_TIM_PWM_MSPDEINIT_CB_ID : + /* Legacy weak PWM Msp DeInit Callback */ + htim->PWM_MspDeInitCallback = HAL_TIM_PWM_MspDeInit; + break; + + case HAL_TIM_ONE_PULSE_MSPINIT_CB_ID : + /* Legacy weak One Pulse Msp Init Callback */ + htim->OnePulse_MspInitCallback = HAL_TIM_OnePulse_MspInit; + break; + + case HAL_TIM_ONE_PULSE_MSPDEINIT_CB_ID : + /* Legacy weak One Pulse Msp DeInit Callback */ + htim->OnePulse_MspDeInitCallback = HAL_TIM_OnePulse_MspDeInit; + break; + + case HAL_TIM_ENCODER_MSPINIT_CB_ID : + /* Legacy weak Encoder Msp Init Callback */ + htim->Encoder_MspInitCallback = HAL_TIM_Encoder_MspInit; + break; + + case HAL_TIM_ENCODER_MSPDEINIT_CB_ID : + /* Legacy weak Encoder Msp DeInit Callback */ + htim->Encoder_MspDeInitCallback = HAL_TIM_Encoder_MspDeInit; + break; + + case HAL_TIM_HALL_SENSOR_MSPINIT_CB_ID : + /* Legacy weak Hall Sensor Msp Init Callback */ + htim->HallSensor_MspInitCallback = HAL_TIMEx_HallSensor_MspInit; + break; + + case HAL_TIM_HALL_SENSOR_MSPDEINIT_CB_ID : + /* Legacy weak Hall Sensor Msp DeInit Callback */ + htim->HallSensor_MspDeInitCallback = HAL_TIMEx_HallSensor_MspDeInit; + break; + + default : + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Return error status */ + status = HAL_ERROR; + } + + return status; +} +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + +/** + * @} + */ + +/** @defgroup TIM_Exported_Functions_Group10 TIM Peripheral State functions + * @brief TIM Peripheral State functions + * +@verbatim + ============================================================================== + ##### Peripheral State functions ##### + ============================================================================== + [..] + This subsection permits to get in run-time the status of the peripheral + and the data flow. + +@endverbatim + * @{ + */ + +/** + * @brief Return the TIM Base handle state. + * @param htim TIM Base handle + * @retval HAL state + */ +HAL_TIM_StateTypeDef HAL_TIM_Base_GetState(const TIM_HandleTypeDef *htim) +{ + return htim->State; +} + +/** + * @brief Return the TIM OC handle state. + * @param htim TIM Output Compare handle + * @retval HAL state + */ +HAL_TIM_StateTypeDef HAL_TIM_OC_GetState(const TIM_HandleTypeDef *htim) +{ + return htim->State; +} + +/** + * @brief Return the TIM PWM handle state. + * @param htim TIM handle + * @retval HAL state + */ +HAL_TIM_StateTypeDef HAL_TIM_PWM_GetState(const TIM_HandleTypeDef *htim) +{ + return htim->State; +} + +/** + * @brief Return the TIM Input Capture handle state. + * @param htim TIM IC handle + * @retval HAL state + */ +HAL_TIM_StateTypeDef HAL_TIM_IC_GetState(const TIM_HandleTypeDef *htim) +{ + return htim->State; +} + +/** + * @brief Return the TIM One Pulse Mode handle state. + * @param htim TIM OPM handle + * @retval HAL state + */ +HAL_TIM_StateTypeDef HAL_TIM_OnePulse_GetState(const TIM_HandleTypeDef *htim) +{ + return htim->State; +} + +/** + * @brief Return the TIM Encoder Mode handle state. + * @param htim TIM Encoder Interface handle + * @retval HAL state + */ +HAL_TIM_StateTypeDef HAL_TIM_Encoder_GetState(const TIM_HandleTypeDef *htim) +{ + return htim->State; +} + +/** + * @brief Return the TIM Encoder Mode handle state. + * @param htim TIM handle + * @retval Active channel + */ +HAL_TIM_ActiveChannel HAL_TIM_GetActiveChannel(const TIM_HandleTypeDef *htim) +{ + return htim->Channel; +} + +/** + * @brief Return actual state of the TIM channel. + * @param htim TIM handle + * @param Channel TIM Channel + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 + * @arg TIM_CHANNEL_2: TIM Channel 2 + * @arg TIM_CHANNEL_3: TIM Channel 3 + * @arg TIM_CHANNEL_4: TIM Channel 4 + * @arg TIM_CHANNEL_5: TIM Channel 5 + * @arg TIM_CHANNEL_6: TIM Channel 6 + * @retval TIM Channel state + */ +HAL_TIM_ChannelStateTypeDef HAL_TIM_GetChannelState(const TIM_HandleTypeDef *htim, uint32_t Channel) +{ + HAL_TIM_ChannelStateTypeDef channel_state; + + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + channel_state = TIM_CHANNEL_STATE_GET(htim, Channel); + + return channel_state; +} + +/** + * @brief Return actual state of a DMA burst operation. + * @param htim TIM handle + * @retval DMA burst state + */ +HAL_TIM_DMABurstStateTypeDef HAL_TIM_DMABurstState(const TIM_HandleTypeDef *htim) +{ + /* Check the parameters */ + assert_param(IS_TIM_DMABURST_INSTANCE(htim->Instance)); + + return htim->DMABurstState; +} + +/** + * @} + */ + +/** + * @} + */ + +/** @defgroup TIM_Private_Functions TIM Private Functions + * @{ + */ + +/** + * @brief TIM DMA error callback + * @param hdma pointer to DMA handle. + * @retval None + */ +void TIM_DMAError(DMA_HandleTypeDef *hdma) +{ + TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + if (hdma == htim->hdma[TIM_DMA_ID_CC1]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_1; + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC2]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_2; + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC3]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_3; + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_3, HAL_TIM_CHANNEL_STATE_READY); + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC4]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_4; + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_4, HAL_TIM_CHANNEL_STATE_READY); + } + else + { + htim->State = HAL_TIM_STATE_READY; + } + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->ErrorCallback(htim); +#else + HAL_TIM_ErrorCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_CLEARED; +} + +/** + * @brief TIM DMA Delay Pulse complete callback. + * @param hdma pointer to DMA handle. + * @retval None + */ +static void TIM_DMADelayPulseCplt(DMA_HandleTypeDef *hdma) +{ + TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + if (hdma == htim->hdma[TIM_DMA_ID_CC1]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_1; + + if (hdma->Init.Mode == DMA_NORMAL) + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + } + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC2]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_2; + + if (hdma->Init.Mode == DMA_NORMAL) + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + } + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC3]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_3; + + if (hdma->Init.Mode == DMA_NORMAL) + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_3, HAL_TIM_CHANNEL_STATE_READY); + } + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC4]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_4; + + if (hdma->Init.Mode == DMA_NORMAL) + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_4, HAL_TIM_CHANNEL_STATE_READY); + } + } + else + { + /* nothing to do */ + } + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->PWM_PulseFinishedCallback(htim); +#else + HAL_TIM_PWM_PulseFinishedCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_CLEARED; +} + +/** + * @brief TIM DMA Delay Pulse half complete callback. + * @param hdma pointer to DMA handle. + * @retval None + */ +void TIM_DMADelayPulseHalfCplt(DMA_HandleTypeDef *hdma) +{ + TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + if (hdma == htim->hdma[TIM_DMA_ID_CC1]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_1; + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC2]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_2; + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC3]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_3; + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC4]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_4; + } + else + { + /* nothing to do */ + } + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->PWM_PulseFinishedHalfCpltCallback(htim); +#else + HAL_TIM_PWM_PulseFinishedHalfCpltCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_CLEARED; +} + +/** + * @brief TIM DMA Capture complete callback. + * @param hdma pointer to DMA handle. + * @retval None + */ +void TIM_DMACaptureCplt(DMA_HandleTypeDef *hdma) +{ + TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + if (hdma == htim->hdma[TIM_DMA_ID_CC1]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_1; + + if (hdma->Init.Mode == DMA_NORMAL) + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + } + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC2]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_2; + + if (hdma->Init.Mode == DMA_NORMAL) + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + } + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC3]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_3; + + if (hdma->Init.Mode == DMA_NORMAL) + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_3, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_3, HAL_TIM_CHANNEL_STATE_READY); + } + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC4]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_4; + + if (hdma->Init.Mode == DMA_NORMAL) + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_4, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_4, HAL_TIM_CHANNEL_STATE_READY); + } + } + else + { + /* nothing to do */ + } + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->IC_CaptureCallback(htim); +#else + HAL_TIM_IC_CaptureCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_CLEARED; +} + +/** + * @brief TIM DMA Capture half complete callback. + * @param hdma pointer to DMA handle. + * @retval None + */ +void TIM_DMACaptureHalfCplt(DMA_HandleTypeDef *hdma) +{ + TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + if (hdma == htim->hdma[TIM_DMA_ID_CC1]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_1; + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC2]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_2; + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC3]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_3; + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC4]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_4; + } + else + { + /* nothing to do */ + } + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->IC_CaptureHalfCpltCallback(htim); +#else + HAL_TIM_IC_CaptureHalfCpltCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_CLEARED; +} + +/** + * @brief TIM DMA Period Elapse complete callback. + * @param hdma pointer to DMA handle. + * @retval None + */ +static void TIM_DMAPeriodElapsedCplt(DMA_HandleTypeDef *hdma) +{ + TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + if (htim->hdma[TIM_DMA_ID_UPDATE]->Init.Mode == DMA_NORMAL) + { + htim->State = HAL_TIM_STATE_READY; + } + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->PeriodElapsedCallback(htim); +#else + HAL_TIM_PeriodElapsedCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ +} + +/** + * @brief TIM DMA Period Elapse half complete callback. + * @param hdma pointer to DMA handle. + * @retval None + */ +static void TIM_DMAPeriodElapsedHalfCplt(DMA_HandleTypeDef *hdma) +{ + TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->PeriodElapsedHalfCpltCallback(htim); +#else + HAL_TIM_PeriodElapsedHalfCpltCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ +} + +/** + * @brief TIM DMA Trigger callback. + * @param hdma pointer to DMA handle. + * @retval None + */ +static void TIM_DMATriggerCplt(DMA_HandleTypeDef *hdma) +{ + TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + if (htim->hdma[TIM_DMA_ID_TRIGGER]->Init.Mode == DMA_NORMAL) + { + htim->State = HAL_TIM_STATE_READY; + } + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->TriggerCallback(htim); +#else + HAL_TIM_TriggerCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ +} + +/** + * @brief TIM DMA Trigger half complete callback. + * @param hdma pointer to DMA handle. + * @retval None + */ +static void TIM_DMATriggerHalfCplt(DMA_HandleTypeDef *hdma) +{ + TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->TriggerHalfCpltCallback(htim); +#else + HAL_TIM_TriggerHalfCpltCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ +} + +/** + * @brief Time Base configuration + * @param TIMx TIM peripheral + * @param Structure TIM Base configuration structure + * @retval None + */ +void TIM_Base_SetConfig(TIM_TypeDef *TIMx, const TIM_Base_InitTypeDef *Structure) +{ + uint32_t tmpcr1; + tmpcr1 = TIMx->CR1; + + /* Set TIM Time Base Unit parameters ---------------------------------------*/ + if (IS_TIM_COUNTER_MODE_SELECT_INSTANCE(TIMx)) + { + /* Select the Counter Mode */ + tmpcr1 &= ~(TIM_CR1_DIR | TIM_CR1_CMS); + tmpcr1 |= Structure->CounterMode; + } + + if (IS_TIM_CLOCK_DIVISION_INSTANCE(TIMx)) + { + /* Set the clock division */ + tmpcr1 &= ~TIM_CR1_CKD; + tmpcr1 |= (uint32_t)Structure->ClockDivision; + } + + /* Set the auto-reload preload */ + MODIFY_REG(tmpcr1, TIM_CR1_ARPE, Structure->AutoReloadPreload); + + TIMx->CR1 = tmpcr1; + + /* Set the Autoreload value */ + TIMx->ARR = (uint32_t)Structure->Period ; + + /* Set the Prescaler value */ + TIMx->PSC = Structure->Prescaler; + + if (IS_TIM_REPETITION_COUNTER_INSTANCE(TIMx)) + { + /* Set the Repetition Counter value */ + TIMx->RCR = Structure->RepetitionCounter; + } + + /* Generate an update event to reload the Prescaler + and the repetition counter (only for advanced timer) value immediately */ + TIMx->EGR = TIM_EGR_UG; + + /* Check if the update flag is set after the Update Generation, if so clear the UIF flag */ + if (HAL_IS_BIT_SET(TIMx->SR, TIM_FLAG_UPDATE)) + { + /* Clear the update flag */ + CLEAR_BIT(TIMx->SR, TIM_FLAG_UPDATE); + } +} + +/** + * @brief Timer Output Compare 1 configuration + * @param TIMx to select the TIM peripheral + * @param OC_Config The output configuration structure + * @retval None + */ +static void TIM_OC1_SetConfig(TIM_TypeDef *TIMx, const TIM_OC_InitTypeDef *OC_Config) +{ + uint32_t tmpccmrx; + uint32_t tmpccer; + uint32_t tmpcr2; + + /* Get the TIMx CCER register value */ + tmpccer = TIMx->CCER; + + /* Disable the Channel 1: Reset the CC1E Bit */ + TIMx->CCER &= ~TIM_CCER_CC1E; + + /* Get the TIMx CR2 register value */ + tmpcr2 = TIMx->CR2; + + /* Get the TIMx CCMR1 register value */ + tmpccmrx = TIMx->CCMR1; + + /* Reset the Output Compare Mode Bits */ + tmpccmrx &= ~TIM_CCMR1_OC1M; + tmpccmrx &= ~TIM_CCMR1_CC1S; + /* Select the Output Compare Mode */ + tmpccmrx |= OC_Config->OCMode; + + /* Reset the Output Polarity level */ + tmpccer &= ~TIM_CCER_CC1P; + /* Set the Output Compare Polarity */ + tmpccer |= OC_Config->OCPolarity; + + if (IS_TIM_CCXN_INSTANCE(TIMx, TIM_CHANNEL_1)) + { + /* Check parameters */ + assert_param(IS_TIM_OCN_POLARITY(OC_Config->OCNPolarity)); + + /* Reset the Output N Polarity level */ + tmpccer &= ~TIM_CCER_CC1NP; + /* Set the Output N Polarity */ + tmpccer |= OC_Config->OCNPolarity; + /* Reset the Output N State */ + tmpccer &= ~TIM_CCER_CC1NE; + } + + if (IS_TIM_BREAK_INSTANCE(TIMx)) + { + /* Check parameters */ + assert_param(IS_TIM_OCNIDLE_STATE(OC_Config->OCNIdleState)); + assert_param(IS_TIM_OCIDLE_STATE(OC_Config->OCIdleState)); + + /* Reset the Output Compare and Output Compare N IDLE State */ + tmpcr2 &= ~TIM_CR2_OIS1; + tmpcr2 &= ~TIM_CR2_OIS1N; + /* Set the Output Idle state */ + tmpcr2 |= OC_Config->OCIdleState; + /* Set the Output N Idle state */ + tmpcr2 |= OC_Config->OCNIdleState; + } + + /* Write to TIMx CR2 */ + TIMx->CR2 = tmpcr2; + + /* Write to TIMx CCMR1 */ + TIMx->CCMR1 = tmpccmrx; + + /* Set the Capture Compare Register value */ + TIMx->CCR1 = OC_Config->Pulse; + + /* Write to TIMx CCER */ + TIMx->CCER = tmpccer; +} + +/** + * @brief Timer Output Compare 2 configuration + * @param TIMx to select the TIM peripheral + * @param OC_Config The output configuration structure + * @retval None + */ +void TIM_OC2_SetConfig(TIM_TypeDef *TIMx, const TIM_OC_InitTypeDef *OC_Config) +{ + uint32_t tmpccmrx; + uint32_t tmpccer; + uint32_t tmpcr2; + + /* Get the TIMx CCER register value */ + tmpccer = TIMx->CCER; + + /* Disable the Channel 2: Reset the CC2E Bit */ + TIMx->CCER &= ~TIM_CCER_CC2E; + + /* Get the TIMx CR2 register value */ + tmpcr2 = TIMx->CR2; + + /* Get the TIMx CCMR1 register value */ + tmpccmrx = TIMx->CCMR1; + + /* Reset the Output Compare mode and Capture/Compare selection Bits */ + tmpccmrx &= ~TIM_CCMR1_OC2M; + tmpccmrx &= ~TIM_CCMR1_CC2S; + + /* Select the Output Compare Mode */ + tmpccmrx |= (OC_Config->OCMode << 8U); + + /* Reset the Output Polarity level */ + tmpccer &= ~TIM_CCER_CC2P; + /* Set the Output Compare Polarity */ + tmpccer |= (OC_Config->OCPolarity << 4U); + + if (IS_TIM_CCXN_INSTANCE(TIMx, TIM_CHANNEL_2)) + { + assert_param(IS_TIM_OCN_POLARITY(OC_Config->OCNPolarity)); + + /* Reset the Output N Polarity level */ + tmpccer &= ~TIM_CCER_CC2NP; + /* Set the Output N Polarity */ + tmpccer |= (OC_Config->OCNPolarity << 4U); + /* Reset the Output N State */ + tmpccer &= ~TIM_CCER_CC2NE; + } + + if (IS_TIM_BREAK_INSTANCE(TIMx)) + { + /* Check parameters */ + assert_param(IS_TIM_OCNIDLE_STATE(OC_Config->OCNIdleState)); + assert_param(IS_TIM_OCIDLE_STATE(OC_Config->OCIdleState)); + + /* Reset the Output Compare and Output Compare N IDLE State */ + tmpcr2 &= ~TIM_CR2_OIS2; + tmpcr2 &= ~TIM_CR2_OIS2N; + /* Set the Output Idle state */ + tmpcr2 |= (OC_Config->OCIdleState << 2U); + /* Set the Output N Idle state */ + tmpcr2 |= (OC_Config->OCNIdleState << 2U); + } + + /* Write to TIMx CR2 */ + TIMx->CR2 = tmpcr2; + + /* Write to TIMx CCMR1 */ + TIMx->CCMR1 = tmpccmrx; + + /* Set the Capture Compare Register value */ + TIMx->CCR2 = OC_Config->Pulse; + + /* Write to TIMx CCER */ + TIMx->CCER = tmpccer; +} + +/** + * @brief Timer Output Compare 3 configuration + * @param TIMx to select the TIM peripheral + * @param OC_Config The output configuration structure + * @retval None + */ +static void TIM_OC3_SetConfig(TIM_TypeDef *TIMx, const TIM_OC_InitTypeDef *OC_Config) +{ + uint32_t tmpccmrx; + uint32_t tmpccer; + uint32_t tmpcr2; + + /* Get the TIMx CCER register value */ + tmpccer = TIMx->CCER; + + /* Disable the Channel 3: Reset the CC2E Bit */ + TIMx->CCER &= ~TIM_CCER_CC3E; + + /* Get the TIMx CR2 register value */ + tmpcr2 = TIMx->CR2; + + /* Get the TIMx CCMR2 register value */ + tmpccmrx = TIMx->CCMR2; + + /* Reset the Output Compare mode and Capture/Compare selection Bits */ + tmpccmrx &= ~TIM_CCMR2_OC3M; + tmpccmrx &= ~TIM_CCMR2_CC3S; + /* Select the Output Compare Mode */ + tmpccmrx |= OC_Config->OCMode; + + /* Reset the Output Polarity level */ + tmpccer &= ~TIM_CCER_CC3P; + /* Set the Output Compare Polarity */ + tmpccer |= (OC_Config->OCPolarity << 8U); + + if (IS_TIM_CCXN_INSTANCE(TIMx, TIM_CHANNEL_3)) + { + assert_param(IS_TIM_OCN_POLARITY(OC_Config->OCNPolarity)); + + /* Reset the Output N Polarity level */ + tmpccer &= ~TIM_CCER_CC3NP; + /* Set the Output N Polarity */ + tmpccer |= (OC_Config->OCNPolarity << 8U); + /* Reset the Output N State */ + tmpccer &= ~TIM_CCER_CC3NE; + } + + if (IS_TIM_BREAK_INSTANCE(TIMx)) + { + /* Check parameters */ + assert_param(IS_TIM_OCNIDLE_STATE(OC_Config->OCNIdleState)); + assert_param(IS_TIM_OCIDLE_STATE(OC_Config->OCIdleState)); + + /* Reset the Output Compare and Output Compare N IDLE State */ + tmpcr2 &= ~TIM_CR2_OIS3; + tmpcr2 &= ~TIM_CR2_OIS3N; + /* Set the Output Idle state */ + tmpcr2 |= (OC_Config->OCIdleState << 4U); + /* Set the Output N Idle state */ + tmpcr2 |= (OC_Config->OCNIdleState << 4U); + } + + /* Write to TIMx CR2 */ + TIMx->CR2 = tmpcr2; + + /* Write to TIMx CCMR2 */ + TIMx->CCMR2 = tmpccmrx; + + /* Set the Capture Compare Register value */ + TIMx->CCR3 = OC_Config->Pulse; + + /* Write to TIMx CCER */ + TIMx->CCER = tmpccer; +} + +/** + * @brief Timer Output Compare 4 configuration + * @param TIMx to select the TIM peripheral + * @param OC_Config The output configuration structure + * @retval None + */ +static void TIM_OC4_SetConfig(TIM_TypeDef *TIMx, const TIM_OC_InitTypeDef *OC_Config) +{ + uint32_t tmpccmrx; + uint32_t tmpccer; + uint32_t tmpcr2; + + /* Get the TIMx CCER register value */ + tmpccer = TIMx->CCER; + + /* Disable the Channel 4: Reset the CC4E Bit */ + TIMx->CCER &= ~TIM_CCER_CC4E; + + /* Get the TIMx CR2 register value */ + tmpcr2 = TIMx->CR2; + + /* Get the TIMx CCMR2 register value */ + tmpccmrx = TIMx->CCMR2; + + /* Reset the Output Compare mode and Capture/Compare selection Bits */ + tmpccmrx &= ~TIM_CCMR2_OC4M; + tmpccmrx &= ~TIM_CCMR2_CC4S; + + /* Select the Output Compare Mode */ + tmpccmrx |= (OC_Config->OCMode << 8U); + + /* Reset the Output Polarity level */ + tmpccer &= ~TIM_CCER_CC4P; + /* Set the Output Compare Polarity */ + tmpccer |= (OC_Config->OCPolarity << 12U); + + if (IS_TIM_BREAK_INSTANCE(TIMx)) + { + /* Check parameters */ + assert_param(IS_TIM_OCIDLE_STATE(OC_Config->OCIdleState)); + + /* Reset the Output Compare IDLE State */ + tmpcr2 &= ~TIM_CR2_OIS4; + + /* Set the Output Idle state */ + tmpcr2 |= (OC_Config->OCIdleState << 6U); + } + + /* Write to TIMx CR2 */ + TIMx->CR2 = tmpcr2; + + /* Write to TIMx CCMR2 */ + TIMx->CCMR2 = tmpccmrx; + + /* Set the Capture Compare Register value */ + TIMx->CCR4 = OC_Config->Pulse; + + /* Write to TIMx CCER */ + TIMx->CCER = tmpccer; +} + +/** + * @brief Slave Timer configuration function + * @param htim TIM handle + * @param sSlaveConfig Slave timer configuration + * @retval None + */ +static HAL_StatusTypeDef TIM_SlaveTimer_SetConfig(TIM_HandleTypeDef *htim, + const TIM_SlaveConfigTypeDef *sSlaveConfig) +{ + HAL_StatusTypeDef status = HAL_OK; + uint32_t tmpsmcr; + uint32_t tmpccmr1; + uint32_t tmpccer; + + /* Get the TIMx SMCR register value */ + tmpsmcr = htim->Instance->SMCR; + + /* Reset the Trigger Selection Bits */ + tmpsmcr &= ~TIM_SMCR_TS; + /* Set the Input Trigger source */ + tmpsmcr |= sSlaveConfig->InputTrigger; + + /* Reset the slave mode Bits */ + tmpsmcr &= ~TIM_SMCR_SMS; + /* Set the slave mode */ + tmpsmcr |= sSlaveConfig->SlaveMode; + + /* Write to TIMx SMCR */ + htim->Instance->SMCR = tmpsmcr; + + /* Configure the trigger prescaler, filter, and polarity */ + switch (sSlaveConfig->InputTrigger) + { + case TIM_TS_ETRF: + { + /* Check the parameters */ + assert_param(IS_TIM_CLOCKSOURCE_ETRMODE1_INSTANCE(htim->Instance)); + assert_param(IS_TIM_TRIGGERPRESCALER(sSlaveConfig->TriggerPrescaler)); + assert_param(IS_TIM_TRIGGERPOLARITY(sSlaveConfig->TriggerPolarity)); + assert_param(IS_TIM_TRIGGERFILTER(sSlaveConfig->TriggerFilter)); + /* Configure the ETR Trigger source */ + TIM_ETR_SetConfig(htim->Instance, + sSlaveConfig->TriggerPrescaler, + sSlaveConfig->TriggerPolarity, + sSlaveConfig->TriggerFilter); + break; + } + + case TIM_TS_TI1F_ED: + { + /* Check the parameters */ + assert_param(IS_TIM_CC1_INSTANCE(htim->Instance)); + assert_param(IS_TIM_TRIGGERFILTER(sSlaveConfig->TriggerFilter)); + + if (sSlaveConfig->SlaveMode == TIM_SLAVEMODE_GATED) + { + return HAL_ERROR; + } + + /* Disable the Channel 1: Reset the CC1E Bit */ + tmpccer = htim->Instance->CCER; + htim->Instance->CCER &= ~TIM_CCER_CC1E; + tmpccmr1 = htim->Instance->CCMR1; + + /* Set the filter */ + tmpccmr1 &= ~TIM_CCMR1_IC1F; + tmpccmr1 |= ((sSlaveConfig->TriggerFilter) << 4U); + + /* Write to TIMx CCMR1 and CCER registers */ + htim->Instance->CCMR1 = tmpccmr1; + htim->Instance->CCER = tmpccer; + break; + } + + case TIM_TS_TI1FP1: + { + /* Check the parameters */ + assert_param(IS_TIM_CC1_INSTANCE(htim->Instance)); + assert_param(IS_TIM_TRIGGERPOLARITY(sSlaveConfig->TriggerPolarity)); + assert_param(IS_TIM_TRIGGERFILTER(sSlaveConfig->TriggerFilter)); + + /* Configure TI1 Filter and Polarity */ + TIM_TI1_ConfigInputStage(htim->Instance, + sSlaveConfig->TriggerPolarity, + sSlaveConfig->TriggerFilter); + break; + } + + case TIM_TS_TI2FP2: + { + /* Check the parameters */ + assert_param(IS_TIM_CC2_INSTANCE(htim->Instance)); + assert_param(IS_TIM_TRIGGERPOLARITY(sSlaveConfig->TriggerPolarity)); + assert_param(IS_TIM_TRIGGERFILTER(sSlaveConfig->TriggerFilter)); + + /* Configure TI2 Filter and Polarity */ + TIM_TI2_ConfigInputStage(htim->Instance, + sSlaveConfig->TriggerPolarity, + sSlaveConfig->TriggerFilter); + break; + } + + case TIM_TS_ITR0: + case TIM_TS_ITR1: + case TIM_TS_ITR2: + case TIM_TS_ITR3: + { + /* Check the parameter */ + assert_param(IS_TIM_CC2_INSTANCE(htim->Instance)); + break; + } + + default: + status = HAL_ERROR; + break; + } + + return status; +} + +/** + * @brief Configure the TI1 as Input. + * @param TIMx to select the TIM peripheral. + * @param TIM_ICPolarity The Input Polarity. + * This parameter can be one of the following values: + * @arg TIM_ICPOLARITY_RISING + * @arg TIM_ICPOLARITY_FALLING + * @arg TIM_ICPOLARITY_BOTHEDGE + * @param TIM_ICSelection specifies the input to be used. + * This parameter can be one of the following values: + * @arg TIM_ICSELECTION_DIRECTTI: TIM Input 1 is selected to be connected to IC1. + * @arg TIM_ICSELECTION_INDIRECTTI: TIM Input 1 is selected to be connected to IC2. + * @arg TIM_ICSELECTION_TRC: TIM Input 1 is selected to be connected to TRC. + * @param TIM_ICFilter Specifies the Input Capture Filter. + * This parameter must be a value between 0x00 and 0x0F. + * @retval None + * @note TIM_ICFilter and TIM_ICPolarity are not used in INDIRECT mode as TI2FP1 + * (on channel2 path) is used as the input signal. Therefore CCMR1 must be + * protected against un-initialized filter and polarity values. + */ +void TIM_TI1_SetConfig(TIM_TypeDef *TIMx, uint32_t TIM_ICPolarity, uint32_t TIM_ICSelection, + uint32_t TIM_ICFilter) +{ + uint32_t tmpccmr1; + uint32_t tmpccer; + + /* Disable the Channel 1: Reset the CC1E Bit */ + tmpccer = TIMx->CCER; + TIMx->CCER &= ~TIM_CCER_CC1E; + tmpccmr1 = TIMx->CCMR1; + + /* Select the Input */ + if (IS_TIM_CC2_INSTANCE(TIMx) != RESET) + { + tmpccmr1 &= ~TIM_CCMR1_CC1S; + tmpccmr1 |= TIM_ICSelection; + } + else + { + tmpccmr1 |= TIM_CCMR1_CC1S_0; + } + + /* Set the filter */ + tmpccmr1 &= ~TIM_CCMR1_IC1F; + tmpccmr1 |= ((TIM_ICFilter << 4U) & TIM_CCMR1_IC1F); + + /* Select the Polarity and set the CC1E Bit */ + tmpccer &= ~(TIM_CCER_CC1P | TIM_CCER_CC1NP); + tmpccer |= (TIM_ICPolarity & (TIM_CCER_CC1P | TIM_CCER_CC1NP)); + + /* Write to TIMx CCMR1 and CCER registers */ + TIMx->CCMR1 = tmpccmr1; + TIMx->CCER = tmpccer; +} + +/** + * @brief Configure the Polarity and Filter for TI1. + * @param TIMx to select the TIM peripheral. + * @param TIM_ICPolarity The Input Polarity. + * This parameter can be one of the following values: + * @arg TIM_ICPOLARITY_RISING + * @arg TIM_ICPOLARITY_FALLING + * @arg TIM_ICPOLARITY_BOTHEDGE + * @param TIM_ICFilter Specifies the Input Capture Filter. + * This parameter must be a value between 0x00 and 0x0F. + * @retval None + */ +static void TIM_TI1_ConfigInputStage(TIM_TypeDef *TIMx, uint32_t TIM_ICPolarity, uint32_t TIM_ICFilter) +{ + uint32_t tmpccmr1; + uint32_t tmpccer; + + /* Disable the Channel 1: Reset the CC1E Bit */ + tmpccer = TIMx->CCER; + TIMx->CCER &= ~TIM_CCER_CC1E; + tmpccmr1 = TIMx->CCMR1; + + /* Set the filter */ + tmpccmr1 &= ~TIM_CCMR1_IC1F; + tmpccmr1 |= (TIM_ICFilter << 4U); + + /* Select the Polarity and set the CC1E Bit */ + tmpccer &= ~(TIM_CCER_CC1P | TIM_CCER_CC1NP); + tmpccer |= TIM_ICPolarity; + + /* Write to TIMx CCMR1 and CCER registers */ + TIMx->CCMR1 = tmpccmr1; + TIMx->CCER = tmpccer; +} + +/** + * @brief Configure the TI2 as Input. + * @param TIMx to select the TIM peripheral + * @param TIM_ICPolarity The Input Polarity. + * This parameter can be one of the following values: + * @arg TIM_ICPOLARITY_RISING + * @arg TIM_ICPOLARITY_FALLING + * @arg TIM_ICPOLARITY_BOTHEDGE + * @param TIM_ICSelection specifies the input to be used. + * This parameter can be one of the following values: + * @arg TIM_ICSELECTION_DIRECTTI: TIM Input 2 is selected to be connected to IC2. + * @arg TIM_ICSELECTION_INDIRECTTI: TIM Input 2 is selected to be connected to IC1. + * @arg TIM_ICSELECTION_TRC: TIM Input 2 is selected to be connected to TRC. + * @param TIM_ICFilter Specifies the Input Capture Filter. + * This parameter must be a value between 0x00 and 0x0F. + * @retval None + * @note TIM_ICFilter and TIM_ICPolarity are not used in INDIRECT mode as TI1FP2 + * (on channel1 path) is used as the input signal. Therefore CCMR1 must be + * protected against un-initialized filter and polarity values. + */ +static void TIM_TI2_SetConfig(TIM_TypeDef *TIMx, uint32_t TIM_ICPolarity, uint32_t TIM_ICSelection, + uint32_t TIM_ICFilter) +{ + uint32_t tmpccmr1; + uint32_t tmpccer; + + /* Disable the Channel 2: Reset the CC2E Bit */ + tmpccer = TIMx->CCER; + TIMx->CCER &= ~TIM_CCER_CC2E; + tmpccmr1 = TIMx->CCMR1; + + /* Select the Input */ + tmpccmr1 &= ~TIM_CCMR1_CC2S; + tmpccmr1 |= (TIM_ICSelection << 8U); + + /* Set the filter */ + tmpccmr1 &= ~TIM_CCMR1_IC2F; + tmpccmr1 |= ((TIM_ICFilter << 12U) & TIM_CCMR1_IC2F); + + /* Select the Polarity and set the CC2E Bit */ + tmpccer &= ~(TIM_CCER_CC2P | TIM_CCER_CC2NP); + tmpccer |= ((TIM_ICPolarity << 4U) & (TIM_CCER_CC2P | TIM_CCER_CC2NP)); + + /* Write to TIMx CCMR1 and CCER registers */ + TIMx->CCMR1 = tmpccmr1 ; + TIMx->CCER = tmpccer; +} + +/** + * @brief Configure the Polarity and Filter for TI2. + * @param TIMx to select the TIM peripheral. + * @param TIM_ICPolarity The Input Polarity. + * This parameter can be one of the following values: + * @arg TIM_ICPOLARITY_RISING + * @arg TIM_ICPOLARITY_FALLING + * @arg TIM_ICPOLARITY_BOTHEDGE + * @param TIM_ICFilter Specifies the Input Capture Filter. + * This parameter must be a value between 0x00 and 0x0F. + * @retval None + */ +static void TIM_TI2_ConfigInputStage(TIM_TypeDef *TIMx, uint32_t TIM_ICPolarity, uint32_t TIM_ICFilter) +{ + uint32_t tmpccmr1; + uint32_t tmpccer; + + /* Disable the Channel 2: Reset the CC2E Bit */ + tmpccer = TIMx->CCER; + TIMx->CCER &= ~TIM_CCER_CC2E; + tmpccmr1 = TIMx->CCMR1; + + /* Set the filter */ + tmpccmr1 &= ~TIM_CCMR1_IC2F; + tmpccmr1 |= (TIM_ICFilter << 12U); + + /* Select the Polarity and set the CC2E Bit */ + tmpccer &= ~(TIM_CCER_CC2P | TIM_CCER_CC2NP); + tmpccer |= (TIM_ICPolarity << 4U); + + /* Write to TIMx CCMR1 and CCER registers */ + TIMx->CCMR1 = tmpccmr1 ; + TIMx->CCER = tmpccer; +} + +/** + * @brief Configure the TI3 as Input. + * @param TIMx to select the TIM peripheral + * @param TIM_ICPolarity The Input Polarity. + * This parameter can be one of the following values: + * @arg TIM_ICPOLARITY_RISING + * @arg TIM_ICPOLARITY_FALLING + * @arg TIM_ICPOLARITY_BOTHEDGE + * @param TIM_ICSelection specifies the input to be used. + * This parameter can be one of the following values: + * @arg TIM_ICSELECTION_DIRECTTI: TIM Input 3 is selected to be connected to IC3. + * @arg TIM_ICSELECTION_INDIRECTTI: TIM Input 3 is selected to be connected to IC4. + * @arg TIM_ICSELECTION_TRC: TIM Input 3 is selected to be connected to TRC. + * @param TIM_ICFilter Specifies the Input Capture Filter. + * This parameter must be a value between 0x00 and 0x0F. + * @retval None + * @note TIM_ICFilter and TIM_ICPolarity are not used in INDIRECT mode as TI3FP4 + * (on channel1 path) is used as the input signal. Therefore CCMR2 must be + * protected against un-initialized filter and polarity values. + */ +static void TIM_TI3_SetConfig(TIM_TypeDef *TIMx, uint32_t TIM_ICPolarity, uint32_t TIM_ICSelection, + uint32_t TIM_ICFilter) +{ + uint32_t tmpccmr2; + uint32_t tmpccer; + + /* Disable the Channel 3: Reset the CC3E Bit */ + tmpccer = TIMx->CCER; + TIMx->CCER &= ~TIM_CCER_CC3E; + tmpccmr2 = TIMx->CCMR2; + + /* Select the Input */ + tmpccmr2 &= ~TIM_CCMR2_CC3S; + tmpccmr2 |= TIM_ICSelection; + + /* Set the filter */ + tmpccmr2 &= ~TIM_CCMR2_IC3F; + tmpccmr2 |= ((TIM_ICFilter << 4U) & TIM_CCMR2_IC3F); + + /* Select the Polarity and set the CC3E Bit */ + tmpccer &= ~(TIM_CCER_CC3P | TIM_CCER_CC3NP); + tmpccer |= ((TIM_ICPolarity << 8U) & (TIM_CCER_CC3P | TIM_CCER_CC3NP)); + + /* Write to TIMx CCMR2 and CCER registers */ + TIMx->CCMR2 = tmpccmr2; + TIMx->CCER = tmpccer; +} + +/** + * @brief Configure the TI4 as Input. + * @param TIMx to select the TIM peripheral + * @param TIM_ICPolarity The Input Polarity. + * This parameter can be one of the following values: + * @arg TIM_ICPOLARITY_RISING + * @arg TIM_ICPOLARITY_FALLING + * @arg TIM_ICPOLARITY_BOTHEDGE + * @param TIM_ICSelection specifies the input to be used. + * This parameter can be one of the following values: + * @arg TIM_ICSELECTION_DIRECTTI: TIM Input 4 is selected to be connected to IC4. + * @arg TIM_ICSELECTION_INDIRECTTI: TIM Input 4 is selected to be connected to IC3. + * @arg TIM_ICSELECTION_TRC: TIM Input 4 is selected to be connected to TRC. + * @param TIM_ICFilter Specifies the Input Capture Filter. + * This parameter must be a value between 0x00 and 0x0F. + * @note TIM_ICFilter and TIM_ICPolarity are not used in INDIRECT mode as TI4FP3 + * (on channel1 path) is used as the input signal. Therefore CCMR2 must be + * protected against un-initialized filter and polarity values. + * @retval None + */ +static void TIM_TI4_SetConfig(TIM_TypeDef *TIMx, uint32_t TIM_ICPolarity, uint32_t TIM_ICSelection, + uint32_t TIM_ICFilter) +{ + uint32_t tmpccmr2; + uint32_t tmpccer; + + /* Disable the Channel 4: Reset the CC4E Bit */ + tmpccer = TIMx->CCER; + TIMx->CCER &= ~TIM_CCER_CC4E; + tmpccmr2 = TIMx->CCMR2; + + /* Select the Input */ + tmpccmr2 &= ~TIM_CCMR2_CC4S; + tmpccmr2 |= (TIM_ICSelection << 8U); + + /* Set the filter */ + tmpccmr2 &= ~TIM_CCMR2_IC4F; + tmpccmr2 |= ((TIM_ICFilter << 12U) & TIM_CCMR2_IC4F); + + /* Select the Polarity and set the CC4E Bit */ + tmpccer &= ~(TIM_CCER_CC4P | TIM_CCER_CC4NP); + tmpccer |= ((TIM_ICPolarity << 12U) & (TIM_CCER_CC4P | TIM_CCER_CC4NP)); + + /* Write to TIMx CCMR2 and CCER registers */ + TIMx->CCMR2 = tmpccmr2; + TIMx->CCER = tmpccer ; +} + +/** + * @brief Selects the Input Trigger source + * @param TIMx to select the TIM peripheral + * @param InputTriggerSource The Input Trigger source. + * This parameter can be one of the following values: + * @arg TIM_TS_ITR0: Internal Trigger 0 + * @arg TIM_TS_ITR1: Internal Trigger 1 + * @arg TIM_TS_ITR2: Internal Trigger 2 + * @arg TIM_TS_ITR3: Internal Trigger 3 + * @arg TIM_TS_TI1F_ED: TI1 Edge Detector + * @arg TIM_TS_TI1FP1: Filtered Timer Input 1 + * @arg TIM_TS_TI2FP2: Filtered Timer Input 2 + * @arg TIM_TS_ETRF: External Trigger input + * @retval None + */ +static void TIM_ITRx_SetConfig(TIM_TypeDef *TIMx, uint32_t InputTriggerSource) +{ + uint32_t tmpsmcr; + + /* Get the TIMx SMCR register value */ + tmpsmcr = TIMx->SMCR; + /* Reset the TS Bits */ + tmpsmcr &= ~TIM_SMCR_TS; + /* Set the Input Trigger source and the slave mode*/ + tmpsmcr |= (InputTriggerSource | TIM_SLAVEMODE_EXTERNAL1); + /* Write to TIMx SMCR */ + TIMx->SMCR = tmpsmcr; +} +/** + * @brief Configures the TIMx External Trigger (ETR). + * @param TIMx to select the TIM peripheral + * @param TIM_ExtTRGPrescaler The external Trigger Prescaler. + * This parameter can be one of the following values: + * @arg TIM_ETRPRESCALER_DIV1: ETRP Prescaler OFF. + * @arg TIM_ETRPRESCALER_DIV2: ETRP frequency divided by 2. + * @arg TIM_ETRPRESCALER_DIV4: ETRP frequency divided by 4. + * @arg TIM_ETRPRESCALER_DIV8: ETRP frequency divided by 8. + * @param TIM_ExtTRGPolarity The external Trigger Polarity. + * This parameter can be one of the following values: + * @arg TIM_ETRPOLARITY_INVERTED: active low or falling edge active. + * @arg TIM_ETRPOLARITY_NONINVERTED: active high or rising edge active. + * @param ExtTRGFilter External Trigger Filter. + * This parameter must be a value between 0x00 and 0x0F + * @retval None + */ +void TIM_ETR_SetConfig(TIM_TypeDef *TIMx, uint32_t TIM_ExtTRGPrescaler, + uint32_t TIM_ExtTRGPolarity, uint32_t ExtTRGFilter) +{ + uint32_t tmpsmcr; + + tmpsmcr = TIMx->SMCR; + + /* Reset the ETR Bits */ + tmpsmcr &= ~(TIM_SMCR_ETF | TIM_SMCR_ETPS | TIM_SMCR_ECE | TIM_SMCR_ETP); + + /* Set the Prescaler, the Filter value and the Polarity */ + tmpsmcr |= (uint32_t)(TIM_ExtTRGPrescaler | (TIM_ExtTRGPolarity | (ExtTRGFilter << 8U))); + + /* Write to TIMx SMCR */ + TIMx->SMCR = tmpsmcr; +} + +/** + * @brief Enables or disables the TIM Capture Compare Channel x. + * @param TIMx to select the TIM peripheral + * @param Channel specifies the TIM Channel + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 + * @arg TIM_CHANNEL_2: TIM Channel 2 + * @arg TIM_CHANNEL_3: TIM Channel 3 + * @arg TIM_CHANNEL_4: TIM Channel 4 + * @param ChannelState specifies the TIM Channel CCxE bit new state. + * This parameter can be: TIM_CCx_ENABLE or TIM_CCx_DISABLE. + * @retval None + */ +void TIM_CCxChannelCmd(TIM_TypeDef *TIMx, uint32_t Channel, uint32_t ChannelState) +{ + uint32_t tmp; + + /* Check the parameters */ + assert_param(IS_TIM_CC1_INSTANCE(TIMx)); + assert_param(IS_TIM_CHANNELS(Channel)); + + tmp = TIM_CCER_CC1E << (Channel & 0x1FU); /* 0x1FU = 31 bits max shift */ + + /* Reset the CCxE Bit */ + TIMx->CCER &= ~tmp; + + /* Set or reset the CCxE Bit */ + TIMx->CCER |= (uint32_t)(ChannelState << (Channel & 0x1FU)); /* 0x1FU = 31 bits max shift */ +} + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) +/** + * @brief Reset interrupt callbacks to the legacy weak callbacks. + * @param htim pointer to a TIM_HandleTypeDef structure that contains + * the configuration information for TIM module. + * @retval None + */ +void TIM_ResetCallback(TIM_HandleTypeDef *htim) +{ + /* Reset the TIM callback to the legacy weak callbacks */ + htim->PeriodElapsedCallback = HAL_TIM_PeriodElapsedCallback; + htim->PeriodElapsedHalfCpltCallback = HAL_TIM_PeriodElapsedHalfCpltCallback; + htim->TriggerCallback = HAL_TIM_TriggerCallback; + htim->TriggerHalfCpltCallback = HAL_TIM_TriggerHalfCpltCallback; + htim->IC_CaptureCallback = HAL_TIM_IC_CaptureCallback; + htim->IC_CaptureHalfCpltCallback = HAL_TIM_IC_CaptureHalfCpltCallback; + htim->OC_DelayElapsedCallback = HAL_TIM_OC_DelayElapsedCallback; + htim->PWM_PulseFinishedCallback = HAL_TIM_PWM_PulseFinishedCallback; + htim->PWM_PulseFinishedHalfCpltCallback = HAL_TIM_PWM_PulseFinishedHalfCpltCallback; + htim->ErrorCallback = HAL_TIM_ErrorCallback; + htim->CommutationCallback = HAL_TIMEx_CommutCallback; + htim->CommutationHalfCpltCallback = HAL_TIMEx_CommutHalfCpltCallback; + htim->BreakCallback = HAL_TIMEx_BreakCallback; +} +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + +/** + * @} + */ + +#endif /* HAL_TIM_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_tim_ex.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_tim_ex.c new file mode 100644 index 0000000..889f8fb --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_tim_ex.c @@ -0,0 +1,2410 @@ +/** + ****************************************************************************** + * @file stm32f4xx_hal_tim_ex.c + * @author MCD Application Team + * @brief TIM HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the Timer Extended peripheral: + * + Time Hall Sensor Interface Initialization + * + Time Hall Sensor Interface Start + * + Time Complementary signal break and dead time configuration + * + Time Master and Slave synchronization configuration + * + Timer remapping capabilities configuration + ****************************************************************************** + * @attention + * + * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + @verbatim + ============================================================================== + ##### TIMER Extended features ##### + ============================================================================== + [..] + The Timer Extended features include: + (#) Complementary outputs with programmable dead-time for : + (++) Output Compare + (++) PWM generation (Edge and Center-aligned Mode) + (++) One-pulse mode output + (#) Synchronization circuit to control the timer with external signals and to + interconnect several timers together. + (#) Break input to put the timer output signals in reset state or in a known state. + (#) Supports incremental (quadrature) encoder and hall-sensor circuitry for + positioning purposes + + ##### How to use this driver ##### + ============================================================================== + [..] + (#) Initialize the TIM low level resources by implementing the following functions + depending on the selected feature: + (++) Hall Sensor output : HAL_TIMEx_HallSensor_MspInit() + + (#) Initialize the TIM low level resources : + (##) Enable the TIM interface clock using __HAL_RCC_TIMx_CLK_ENABLE(); + (##) TIM pins configuration + (+++) Enable the clock for the TIM GPIOs using the following function: + __HAL_RCC_GPIOx_CLK_ENABLE(); + (+++) Configure these TIM pins in Alternate function mode using HAL_GPIO_Init(); + + (#) The external Clock can be configured, if needed (the default clock is the + internal clock from the APBx), using the following function: + HAL_TIM_ConfigClockSource, the clock configuration should be done before + any start function. + + (#) Configure the TIM in the desired functioning mode using one of the + initialization function of this driver: + (++) HAL_TIMEx_HallSensor_Init() and HAL_TIMEx_ConfigCommutEvent(): to use the + Timer Hall Sensor Interface and the commutation event with the corresponding + Interrupt and DMA request if needed (Note that One Timer is used to interface + with the Hall sensor Interface and another Timer should be used to use + the commutation event). + + (#) Activate the TIM peripheral using one of the start functions: + (++) Complementary Output Compare : HAL_TIMEx_OCN_Start(), HAL_TIMEx_OCN_Start_DMA(), + HAL_TIMEx_OCN_Start_IT() + (++) Complementary PWM generation : HAL_TIMEx_PWMN_Start(), HAL_TIMEx_PWMN_Start_DMA(), + HAL_TIMEx_PWMN_Start_IT() + (++) Complementary One-pulse mode output : HAL_TIMEx_OnePulseN_Start(), HAL_TIMEx_OnePulseN_Start_IT() + (++) Hall Sensor output : HAL_TIMEx_HallSensor_Start(), HAL_TIMEx_HallSensor_Start_DMA(), + HAL_TIMEx_HallSensor_Start_IT(). + + @endverbatim + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx_hal.h" + +/** @addtogroup STM32F4xx_HAL_Driver + * @{ + */ + +/** @defgroup TIMEx TIMEx + * @brief TIM Extended HAL module driver + * @{ + */ + +#ifdef HAL_TIM_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/* Private macros ------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +static void TIM_DMADelayPulseNCplt(DMA_HandleTypeDef *hdma); +static void TIM_DMAErrorCCxN(DMA_HandleTypeDef *hdma); +static void TIM_CCxNChannelCmd(TIM_TypeDef *TIMx, uint32_t Channel, uint32_t ChannelNState); + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup TIMEx_Exported_Functions TIM Extended Exported Functions + * @{ + */ + +/** @defgroup TIMEx_Exported_Functions_Group1 Extended Timer Hall Sensor functions + * @brief Timer Hall Sensor functions + * +@verbatim + ============================================================================== + ##### Timer Hall Sensor functions ##### + ============================================================================== + [..] + This section provides functions allowing to: + (+) Initialize and configure TIM HAL Sensor. + (+) De-initialize TIM HAL Sensor. + (+) Start the Hall Sensor Interface. + (+) Stop the Hall Sensor Interface. + (+) Start the Hall Sensor Interface and enable interrupts. + (+) Stop the Hall Sensor Interface and disable interrupts. + (+) Start the Hall Sensor Interface and enable DMA transfers. + (+) Stop the Hall Sensor Interface and disable DMA transfers. + +@endverbatim + * @{ + */ +/** + * @brief Initializes the TIM Hall Sensor Interface and initialize the associated handle. + * @note When the timer instance is initialized in Hall Sensor Interface mode, + * timer channels 1 and channel 2 are reserved and cannot be used for + * other purpose. + * @param htim TIM Hall Sensor Interface handle + * @param sConfig TIM Hall Sensor configuration structure + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_HallSensor_Init(TIM_HandleTypeDef *htim, const TIM_HallSensor_InitTypeDef *sConfig) +{ + TIM_OC_InitTypeDef OC_Config; + + /* Check the TIM handle allocation */ + if (htim == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_TIM_HALL_SENSOR_INTERFACE_INSTANCE(htim->Instance)); + assert_param(IS_TIM_COUNTER_MODE(htim->Init.CounterMode)); + assert_param(IS_TIM_CLOCKDIVISION_DIV(htim->Init.ClockDivision)); + assert_param(IS_TIM_AUTORELOAD_PRELOAD(htim->Init.AutoReloadPreload)); + assert_param(IS_TIM_IC_POLARITY(sConfig->IC1Polarity)); + assert_param(IS_TIM_PERIOD(htim, htim->Init.Period)); + assert_param(IS_TIM_IC_PRESCALER(sConfig->IC1Prescaler)); + assert_param(IS_TIM_IC_FILTER(sConfig->IC1Filter)); + + if (htim->State == HAL_TIM_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + htim->Lock = HAL_UNLOCKED; + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + /* Reset interrupt callbacks to legacy week callbacks */ + TIM_ResetCallback(htim); + + if (htim->HallSensor_MspInitCallback == NULL) + { + htim->HallSensor_MspInitCallback = HAL_TIMEx_HallSensor_MspInit; + } + /* Init the low level hardware : GPIO, CLOCK, NVIC */ + htim->HallSensor_MspInitCallback(htim); +#else + /* Init the low level hardware : GPIO, CLOCK, NVIC and DMA */ + HAL_TIMEx_HallSensor_MspInit(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + + /* Set the TIM state */ + htim->State = HAL_TIM_STATE_BUSY; + + /* Configure the Time base in the Encoder Mode */ + TIM_Base_SetConfig(htim->Instance, &htim->Init); + + /* Configure the Channel 1 as Input Channel to interface with the three Outputs of the Hall sensor */ + TIM_TI1_SetConfig(htim->Instance, sConfig->IC1Polarity, TIM_ICSELECTION_TRC, sConfig->IC1Filter); + + /* Reset the IC1PSC Bits */ + htim->Instance->CCMR1 &= ~TIM_CCMR1_IC1PSC; + /* Set the IC1PSC value */ + htim->Instance->CCMR1 |= sConfig->IC1Prescaler; + + /* Enable the Hall sensor interface (XOR function of the three inputs) */ + htim->Instance->CR2 |= TIM_CR2_TI1S; + + /* Select the TIM_TS_TI1F_ED signal as Input trigger for the TIM */ + htim->Instance->SMCR &= ~TIM_SMCR_TS; + htim->Instance->SMCR |= TIM_TS_TI1F_ED; + + /* Use the TIM_TS_TI1F_ED signal to reset the TIM counter each edge detection */ + htim->Instance->SMCR &= ~TIM_SMCR_SMS; + htim->Instance->SMCR |= TIM_SLAVEMODE_RESET; + + /* Program channel 2 in PWM 2 mode with the desired Commutation_Delay*/ + OC_Config.OCFastMode = TIM_OCFAST_DISABLE; + OC_Config.OCIdleState = TIM_OCIDLESTATE_RESET; + OC_Config.OCMode = TIM_OCMODE_PWM2; + OC_Config.OCNIdleState = TIM_OCNIDLESTATE_RESET; + OC_Config.OCNPolarity = TIM_OCNPOLARITY_HIGH; + OC_Config.OCPolarity = TIM_OCPOLARITY_HIGH; + OC_Config.Pulse = sConfig->Commutation_Delay; + + TIM_OC2_SetConfig(htim->Instance, &OC_Config); + + /* Select OC2REF as trigger output on TRGO: write the MMS bits in the TIMx_CR2 + register to 101 */ + htim->Instance->CR2 &= ~TIM_CR2_MMS; + htim->Instance->CR2 |= TIM_TRGO_OC2REF; + + /* Initialize the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_READY; + + /* Initialize the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + + /* Initialize the TIM state*/ + htim->State = HAL_TIM_STATE_READY; + + return HAL_OK; +} + +/** + * @brief DeInitializes the TIM Hall Sensor interface + * @param htim TIM Hall Sensor Interface handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_HallSensor_DeInit(TIM_HandleTypeDef *htim) +{ + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + + htim->State = HAL_TIM_STATE_BUSY; + + /* Disable the TIM Peripheral Clock */ + __HAL_TIM_DISABLE(htim); + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + if (htim->HallSensor_MspDeInitCallback == NULL) + { + htim->HallSensor_MspDeInitCallback = HAL_TIMEx_HallSensor_MspDeInit; + } + /* DeInit the low level hardware */ + htim->HallSensor_MspDeInitCallback(htim); +#else + /* DeInit the low level hardware: GPIO, CLOCK, NVIC */ + HAL_TIMEx_HallSensor_MspDeInit(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + + /* Change the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_RESET; + + /* Change the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_RESET); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_RESET); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_RESET); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_RESET); + + /* Change TIM state */ + htim->State = HAL_TIM_STATE_RESET; + + /* Release Lock */ + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Initializes the TIM Hall Sensor MSP. + * @param htim TIM Hall Sensor Interface handle + * @retval None + */ +__weak void HAL_TIMEx_HallSensor_MspInit(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIMEx_HallSensor_MspInit could be implemented in the user file + */ +} + +/** + * @brief DeInitializes TIM Hall Sensor MSP. + * @param htim TIM Hall Sensor Interface handle + * @retval None + */ +__weak void HAL_TIMEx_HallSensor_MspDeInit(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIMEx_HallSensor_MspDeInit could be implemented in the user file + */ +} + +/** + * @brief Starts the TIM Hall Sensor Interface. + * @param htim TIM Hall Sensor Interface handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_HallSensor_Start(TIM_HandleTypeDef *htim) +{ + uint32_t tmpsmcr; + HAL_TIM_ChannelStateTypeDef channel_1_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef channel_2_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_2); + HAL_TIM_ChannelStateTypeDef complementary_channel_1_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef complementary_channel_2_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_2); + + /* Check the parameters */ + assert_param(IS_TIM_HALL_SENSOR_INTERFACE_INSTANCE(htim->Instance)); + + /* Check the TIM channels state */ + if ((channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (channel_2_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_2_state != HAL_TIM_CHANNEL_STATE_READY)) + { + return HAL_ERROR; + } + + /* Set the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + + /* Enable the Input Capture channel 1 + (in the Hall Sensor Interface the three possible channels that can be used are TIM_CHANNEL_1, + TIM_CHANNEL_2 and TIM_CHANNEL_3) */ + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_ENABLE); + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM Hall sensor Interface. + * @param htim TIM Hall Sensor Interface handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_HallSensor_Stop(TIM_HandleTypeDef *htim) +{ + /* Check the parameters */ + assert_param(IS_TIM_HALL_SENSOR_INTERFACE_INSTANCE(htim->Instance)); + + /* Disable the Input Capture channels 1, 2 and 3 + (in the Hall Sensor Interface the three possible channels that can be used are TIM_CHANNEL_1, + TIM_CHANNEL_2 and TIM_CHANNEL_3) */ + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_DISABLE); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts the TIM Hall Sensor Interface in interrupt mode. + * @param htim TIM Hall Sensor Interface handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_HallSensor_Start_IT(TIM_HandleTypeDef *htim) +{ + uint32_t tmpsmcr; + HAL_TIM_ChannelStateTypeDef channel_1_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef channel_2_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_2); + HAL_TIM_ChannelStateTypeDef complementary_channel_1_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef complementary_channel_2_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_2); + + /* Check the parameters */ + assert_param(IS_TIM_HALL_SENSOR_INTERFACE_INSTANCE(htim->Instance)); + + /* Check the TIM channels state */ + if ((channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (channel_2_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_2_state != HAL_TIM_CHANNEL_STATE_READY)) + { + return HAL_ERROR; + } + + /* Set the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + + /* Enable the capture compare Interrupts 1 event */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC1); + + /* Enable the Input Capture channel 1 + (in the Hall Sensor Interface the three possible channels that can be used are TIM_CHANNEL_1, + TIM_CHANNEL_2 and TIM_CHANNEL_3) */ + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_ENABLE); + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM Hall Sensor Interface in interrupt mode. + * @param htim TIM Hall Sensor Interface handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_HallSensor_Stop_IT(TIM_HandleTypeDef *htim) +{ + /* Check the parameters */ + assert_param(IS_TIM_HALL_SENSOR_INTERFACE_INSTANCE(htim->Instance)); + + /* Disable the Input Capture channel 1 + (in the Hall Sensor Interface the three possible channels that can be used are TIM_CHANNEL_1, + TIM_CHANNEL_2 and TIM_CHANNEL_3) */ + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_DISABLE); + + /* Disable the capture compare Interrupts event */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC1); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts the TIM Hall Sensor Interface in DMA mode. + * @param htim TIM Hall Sensor Interface handle + * @param pData The destination Buffer address. + * @param Length The length of data to be transferred from TIM peripheral to memory. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_HallSensor_Start_DMA(TIM_HandleTypeDef *htim, uint32_t *pData, uint16_t Length) +{ + uint32_t tmpsmcr; + HAL_TIM_ChannelStateTypeDef channel_1_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef complementary_channel_1_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_1); + + /* Check the parameters */ + assert_param(IS_TIM_HALL_SENSOR_INTERFACE_INSTANCE(htim->Instance)); + + /* Set the TIM channel state */ + if ((channel_1_state == HAL_TIM_CHANNEL_STATE_BUSY) + || (complementary_channel_1_state == HAL_TIM_CHANNEL_STATE_BUSY)) + { + return HAL_BUSY; + } + else if ((channel_1_state == HAL_TIM_CHANNEL_STATE_READY) + && (complementary_channel_1_state == HAL_TIM_CHANNEL_STATE_READY)) + { + if ((pData == NULL) || (Length == 0U)) + { + return HAL_ERROR; + } + else + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + } + } + else + { + return HAL_ERROR; + } + + /* Enable the Input Capture channel 1 + (in the Hall Sensor Interface the three possible channels that can be used are TIM_CHANNEL_1, + TIM_CHANNEL_2 and TIM_CHANNEL_3) */ + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_ENABLE); + + /* Set the DMA Input Capture 1 Callbacks */ + htim->hdma[TIM_DMA_ID_CC1]->XferCpltCallback = TIM_DMACaptureCplt; + htim->hdma[TIM_DMA_ID_CC1]->XferHalfCpltCallback = TIM_DMACaptureHalfCplt; + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC1]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream for Capture 1*/ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)&htim->Instance->CCR1, (uint32_t)pData, Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + /* Enable the capture compare 1 Interrupt */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC1); + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM Hall Sensor Interface in DMA mode. + * @param htim TIM Hall Sensor Interface handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_HallSensor_Stop_DMA(TIM_HandleTypeDef *htim) +{ + /* Check the parameters */ + assert_param(IS_TIM_HALL_SENSOR_INTERFACE_INSTANCE(htim->Instance)); + + /* Disable the Input Capture channel 1 + (in the Hall Sensor Interface the three possible channels that can be used are TIM_CHANNEL_1, + TIM_CHANNEL_2 and TIM_CHANNEL_3) */ + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_DISABLE); + + + /* Disable the capture compare Interrupts 1 event */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC1); + + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC1]); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + + /* Return function status */ + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup TIMEx_Exported_Functions_Group2 Extended Timer Complementary Output Compare functions + * @brief Timer Complementary Output Compare functions + * +@verbatim + ============================================================================== + ##### Timer Complementary Output Compare functions ##### + ============================================================================== + [..] + This section provides functions allowing to: + (+) Start the Complementary Output Compare/PWM. + (+) Stop the Complementary Output Compare/PWM. + (+) Start the Complementary Output Compare/PWM and enable interrupts. + (+) Stop the Complementary Output Compare/PWM and disable interrupts. + (+) Start the Complementary Output Compare/PWM and enable DMA transfers. + (+) Stop the Complementary Output Compare/PWM and disable DMA transfers. + +@endverbatim + * @{ + */ + +/** + * @brief Starts the TIM Output Compare signal generation on the complementary + * output. + * @param htim TIM Output Compare handle + * @param Channel TIM Channel to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_OCN_Start(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, Channel)); + + /* Check the TIM complementary channel state */ + if (TIM_CHANNEL_N_STATE_GET(htim, Channel) != HAL_TIM_CHANNEL_STATE_READY) + { + return HAL_ERROR; + } + + /* Set the TIM complementary channel state */ + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + + /* Enable the Capture compare channel N */ + TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_ENABLE); + + /* Enable the Main Output */ + __HAL_TIM_MOE_ENABLE(htim); + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM Output Compare signal generation on the complementary + * output. + * @param htim TIM handle + * @param Channel TIM Channel to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_OCN_Stop(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, Channel)); + + /* Disable the Capture compare channel N */ + TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_DISABLE); + + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM complementary channel state */ + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts the TIM Output Compare signal generation in interrupt mode + * on the complementary output. + * @param htim TIM OC handle + * @param Channel TIM Channel to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_OCN_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + HAL_StatusTypeDef status = HAL_OK; + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, Channel)); + + /* Check the TIM complementary channel state */ + if (TIM_CHANNEL_N_STATE_GET(htim, Channel) != HAL_TIM_CHANNEL_STATE_READY) + { + return HAL_ERROR; + } + + /* Set the TIM complementary channel state */ + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Enable the TIM Output Compare interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC1); + break; + } + + case TIM_CHANNEL_2: + { + /* Enable the TIM Output Compare interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC2); + break; + } + + case TIM_CHANNEL_3: + { + /* Enable the TIM Output Compare interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC3); + break; + } + + + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Enable the TIM Break interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_BREAK); + + /* Enable the Capture compare channel N */ + TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_ENABLE); + + /* Enable the Main Output */ + __HAL_TIM_MOE_ENABLE(htim); + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } + } + + /* Return function status */ + return status; +} + +/** + * @brief Stops the TIM Output Compare signal generation in interrupt mode + * on the complementary output. + * @param htim TIM Output Compare handle + * @param Channel TIM Channel to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_OCN_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + HAL_StatusTypeDef status = HAL_OK; + uint32_t tmpccer; + + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, Channel)); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Disable the TIM Output Compare interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC1); + break; + } + + case TIM_CHANNEL_2: + { + /* Disable the TIM Output Compare interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC2); + break; + } + + case TIM_CHANNEL_3: + { + /* Disable the TIM Output Compare interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC3); + break; + } + + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Disable the Capture compare channel N */ + TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_DISABLE); + + /* Disable the TIM Break interrupt (only if no more channel is active) */ + tmpccer = htim->Instance->CCER; + if ((tmpccer & TIM_CCER_CCxNE_MASK) == (uint32_t)RESET) + { + __HAL_TIM_DISABLE_IT(htim, TIM_IT_BREAK); + } + + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM complementary channel state */ + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + } + + /* Return function status */ + return status; +} + +/** + * @brief Starts the TIM Output Compare signal generation in DMA mode + * on the complementary output. + * @param htim TIM Output Compare handle + * @param Channel TIM Channel to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @param pData The source Buffer address. + * @param Length The length of data to be transferred from memory to TIM peripheral + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_OCN_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, const uint32_t *pData, + uint16_t Length) +{ + HAL_StatusTypeDef status = HAL_OK; + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, Channel)); + + /* Set the TIM complementary channel state */ + if (TIM_CHANNEL_N_STATE_GET(htim, Channel) == HAL_TIM_CHANNEL_STATE_BUSY) + { + return HAL_BUSY; + } + else if (TIM_CHANNEL_N_STATE_GET(htim, Channel) == HAL_TIM_CHANNEL_STATE_READY) + { + if ((pData == NULL) || (Length == 0U)) + { + return HAL_ERROR; + } + else + { + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + } + } + else + { + return HAL_ERROR; + } + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC1]->XferCpltCallback = TIM_DMADelayPulseNCplt; + htim->hdma[TIM_DMA_ID_CC1]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC1]->XferErrorCallback = TIM_DMAErrorCCxN ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)pData, (uint32_t)&htim->Instance->CCR1, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + /* Enable the TIM Output Compare DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC1); + break; + } + + case TIM_CHANNEL_2: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC2]->XferCpltCallback = TIM_DMADelayPulseNCplt; + htim->hdma[TIM_DMA_ID_CC2]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC2]->XferErrorCallback = TIM_DMAErrorCCxN ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC2], (uint32_t)pData, (uint32_t)&htim->Instance->CCR2, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + /* Enable the TIM Output Compare DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC2); + break; + } + + case TIM_CHANNEL_3: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC3]->XferCpltCallback = TIM_DMADelayPulseNCplt; + htim->hdma[TIM_DMA_ID_CC3]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC3]->XferErrorCallback = TIM_DMAErrorCCxN ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC3], (uint32_t)pData, (uint32_t)&htim->Instance->CCR3, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + /* Enable the TIM Output Compare DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC3); + break; + } + + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Enable the Capture compare channel N */ + TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_ENABLE); + + /* Enable the Main Output */ + __HAL_TIM_MOE_ENABLE(htim); + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } + } + + /* Return function status */ + return status; +} + +/** + * @brief Stops the TIM Output Compare signal generation in DMA mode + * on the complementary output. + * @param htim TIM Output Compare handle + * @param Channel TIM Channel to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_OCN_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, Channel)); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Disable the TIM Output Compare DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC1); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC1]); + break; + } + + case TIM_CHANNEL_2: + { + /* Disable the TIM Output Compare DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC2); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC2]); + break; + } + + case TIM_CHANNEL_3: + { + /* Disable the TIM Output Compare DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC3); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC3]); + break; + } + + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Disable the Capture compare channel N */ + TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_DISABLE); + + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM complementary channel state */ + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + } + + /* Return function status */ + return status; +} + +/** + * @} + */ + +/** @defgroup TIMEx_Exported_Functions_Group3 Extended Timer Complementary PWM functions + * @brief Timer Complementary PWM functions + * +@verbatim + ============================================================================== + ##### Timer Complementary PWM functions ##### + ============================================================================== + [..] + This section provides functions allowing to: + (+) Start the Complementary PWM. + (+) Stop the Complementary PWM. + (+) Start the Complementary PWM and enable interrupts. + (+) Stop the Complementary PWM and disable interrupts. + (+) Start the Complementary PWM and enable DMA transfers. + (+) Stop the Complementary PWM and disable DMA transfers. +@endverbatim + * @{ + */ + +/** + * @brief Starts the PWM signal generation on the complementary output. + * @param htim TIM handle + * @param Channel TIM Channel to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_PWMN_Start(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, Channel)); + + /* Check the TIM complementary channel state */ + if (TIM_CHANNEL_N_STATE_GET(htim, Channel) != HAL_TIM_CHANNEL_STATE_READY) + { + return HAL_ERROR; + } + + /* Set the TIM complementary channel state */ + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + + /* Enable the complementary PWM output */ + TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_ENABLE); + + /* Enable the Main Output */ + __HAL_TIM_MOE_ENABLE(htim); + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the PWM signal generation on the complementary output. + * @param htim TIM handle + * @param Channel TIM Channel to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_PWMN_Stop(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, Channel)); + + /* Disable the complementary PWM output */ + TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_DISABLE); + + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM complementary channel state */ + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts the PWM signal generation in interrupt mode on the + * complementary output. + * @param htim TIM handle + * @param Channel TIM Channel to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_PWMN_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + HAL_StatusTypeDef status = HAL_OK; + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, Channel)); + + /* Check the TIM complementary channel state */ + if (TIM_CHANNEL_N_STATE_GET(htim, Channel) != HAL_TIM_CHANNEL_STATE_READY) + { + return HAL_ERROR; + } + + /* Set the TIM complementary channel state */ + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Enable the TIM Capture/Compare 1 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC1); + break; + } + + case TIM_CHANNEL_2: + { + /* Enable the TIM Capture/Compare 2 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC2); + break; + } + + case TIM_CHANNEL_3: + { + /* Enable the TIM Capture/Compare 3 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC3); + break; + } + + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Enable the TIM Break interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_BREAK); + + /* Enable the complementary PWM output */ + TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_ENABLE); + + /* Enable the Main Output */ + __HAL_TIM_MOE_ENABLE(htim); + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } + } + + /* Return function status */ + return status; +} + +/** + * @brief Stops the PWM signal generation in interrupt mode on the + * complementary output. + * @param htim TIM handle + * @param Channel TIM Channel to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_PWMN_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + HAL_StatusTypeDef status = HAL_OK; + uint32_t tmpccer; + + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, Channel)); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Disable the TIM Capture/Compare 1 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC1); + break; + } + + case TIM_CHANNEL_2: + { + /* Disable the TIM Capture/Compare 2 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC2); + break; + } + + case TIM_CHANNEL_3: + { + /* Disable the TIM Capture/Compare 3 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC3); + break; + } + + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Disable the complementary PWM output */ + TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_DISABLE); + + /* Disable the TIM Break interrupt (only if no more channel is active) */ + tmpccer = htim->Instance->CCER; + if ((tmpccer & TIM_CCER_CCxNE_MASK) == (uint32_t)RESET) + { + __HAL_TIM_DISABLE_IT(htim, TIM_IT_BREAK); + } + + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM complementary channel state */ + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + } + + /* Return function status */ + return status; +} + +/** + * @brief Starts the TIM PWM signal generation in DMA mode on the + * complementary output + * @param htim TIM handle + * @param Channel TIM Channel to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @param pData The source Buffer address. + * @param Length The length of data to be transferred from memory to TIM peripheral + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_PWMN_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, const uint32_t *pData, + uint16_t Length) +{ + HAL_StatusTypeDef status = HAL_OK; + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, Channel)); + + /* Set the TIM complementary channel state */ + if (TIM_CHANNEL_N_STATE_GET(htim, Channel) == HAL_TIM_CHANNEL_STATE_BUSY) + { + return HAL_BUSY; + } + else if (TIM_CHANNEL_N_STATE_GET(htim, Channel) == HAL_TIM_CHANNEL_STATE_READY) + { + if ((pData == NULL) || (Length == 0U)) + { + return HAL_ERROR; + } + else + { + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + } + } + else + { + return HAL_ERROR; + } + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC1]->XferCpltCallback = TIM_DMADelayPulseNCplt; + htim->hdma[TIM_DMA_ID_CC1]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC1]->XferErrorCallback = TIM_DMAErrorCCxN ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)pData, (uint32_t)&htim->Instance->CCR1, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + /* Enable the TIM Capture/Compare 1 DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC1); + break; + } + + case TIM_CHANNEL_2: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC2]->XferCpltCallback = TIM_DMADelayPulseNCplt; + htim->hdma[TIM_DMA_ID_CC2]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC2]->XferErrorCallback = TIM_DMAErrorCCxN ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC2], (uint32_t)pData, (uint32_t)&htim->Instance->CCR2, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + /* Enable the TIM Capture/Compare 2 DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC2); + break; + } + + case TIM_CHANNEL_3: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC3]->XferCpltCallback = TIM_DMADelayPulseNCplt; + htim->hdma[TIM_DMA_ID_CC3]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC3]->XferErrorCallback = TIM_DMAErrorCCxN ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC3], (uint32_t)pData, (uint32_t)&htim->Instance->CCR3, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + /* Enable the TIM Capture/Compare 3 DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC3); + break; + } + + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Enable the complementary PWM output */ + TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_ENABLE); + + /* Enable the Main Output */ + __HAL_TIM_MOE_ENABLE(htim); + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } + } + + /* Return function status */ + return status; +} + +/** + * @brief Stops the TIM PWM signal generation in DMA mode on the complementary + * output + * @param htim TIM handle + * @param Channel TIM Channel to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_PWMN_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, Channel)); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Disable the TIM Capture/Compare 1 DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC1); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC1]); + break; + } + + case TIM_CHANNEL_2: + { + /* Disable the TIM Capture/Compare 2 DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC2); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC2]); + break; + } + + case TIM_CHANNEL_3: + { + /* Disable the TIM Capture/Compare 3 DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC3); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC3]); + break; + } + + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Disable the complementary PWM output */ + TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_DISABLE); + + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM complementary channel state */ + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + } + + /* Return function status */ + return status; +} + +/** + * @} + */ + +/** @defgroup TIMEx_Exported_Functions_Group4 Extended Timer Complementary One Pulse functions + * @brief Timer Complementary One Pulse functions + * +@verbatim + ============================================================================== + ##### Timer Complementary One Pulse functions ##### + ============================================================================== + [..] + This section provides functions allowing to: + (+) Start the Complementary One Pulse generation. + (+) Stop the Complementary One Pulse. + (+) Start the Complementary One Pulse and enable interrupts. + (+) Stop the Complementary One Pulse and disable interrupts. + +@endverbatim + * @{ + */ + +/** + * @brief Starts the TIM One Pulse signal generation on the complementary + * output. + * @note OutputChannel must match the pulse output channel chosen when calling + * @ref HAL_TIM_OnePulse_ConfigChannel(). + * @param htim TIM One Pulse handle + * @param OutputChannel pulse output channel to enable + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_OnePulseN_Start(TIM_HandleTypeDef *htim, uint32_t OutputChannel) +{ + uint32_t input_channel = (OutputChannel == TIM_CHANNEL_1) ? TIM_CHANNEL_2 : TIM_CHANNEL_1; + HAL_TIM_ChannelStateTypeDef channel_1_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef channel_2_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_2); + HAL_TIM_ChannelStateTypeDef complementary_channel_1_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef complementary_channel_2_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_2); + + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, OutputChannel)); + + /* Check the TIM channels state */ + if ((channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (channel_2_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_2_state != HAL_TIM_CHANNEL_STATE_READY)) + { + return HAL_ERROR; + } + + /* Set the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + + /* Enable the complementary One Pulse output channel and the Input Capture channel */ + TIM_CCxNChannelCmd(htim->Instance, OutputChannel, TIM_CCxN_ENABLE); + TIM_CCxChannelCmd(htim->Instance, input_channel, TIM_CCx_ENABLE); + + /* Enable the Main Output */ + __HAL_TIM_MOE_ENABLE(htim); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM One Pulse signal generation on the complementary + * output. + * @note OutputChannel must match the pulse output channel chosen when calling + * @ref HAL_TIM_OnePulse_ConfigChannel(). + * @param htim TIM One Pulse handle + * @param OutputChannel pulse output channel to disable + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_OnePulseN_Stop(TIM_HandleTypeDef *htim, uint32_t OutputChannel) +{ + uint32_t input_channel = (OutputChannel == TIM_CHANNEL_1) ? TIM_CHANNEL_2 : TIM_CHANNEL_1; + + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, OutputChannel)); + + /* Disable the complementary One Pulse output channel and the Input Capture channel */ + TIM_CCxNChannelCmd(htim->Instance, OutputChannel, TIM_CCxN_DISABLE); + TIM_CCxChannelCmd(htim->Instance, input_channel, TIM_CCx_DISABLE); + + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts the TIM One Pulse signal generation in interrupt mode on the + * complementary channel. + * @note OutputChannel must match the pulse output channel chosen when calling + * @ref HAL_TIM_OnePulse_ConfigChannel(). + * @param htim TIM One Pulse handle + * @param OutputChannel pulse output channel to enable + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_OnePulseN_Start_IT(TIM_HandleTypeDef *htim, uint32_t OutputChannel) +{ + uint32_t input_channel = (OutputChannel == TIM_CHANNEL_1) ? TIM_CHANNEL_2 : TIM_CHANNEL_1; + HAL_TIM_ChannelStateTypeDef channel_1_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef channel_2_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_2); + HAL_TIM_ChannelStateTypeDef complementary_channel_1_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef complementary_channel_2_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_2); + + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, OutputChannel)); + + /* Check the TIM channels state */ + if ((channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (channel_2_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_2_state != HAL_TIM_CHANNEL_STATE_READY)) + { + return HAL_ERROR; + } + + /* Set the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + + /* Enable the TIM Capture/Compare 1 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC1); + + /* Enable the TIM Capture/Compare 2 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC2); + + /* Enable the complementary One Pulse output channel and the Input Capture channel */ + TIM_CCxNChannelCmd(htim->Instance, OutputChannel, TIM_CCxN_ENABLE); + TIM_CCxChannelCmd(htim->Instance, input_channel, TIM_CCx_ENABLE); + + /* Enable the Main Output */ + __HAL_TIM_MOE_ENABLE(htim); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM One Pulse signal generation in interrupt mode on the + * complementary channel. + * @note OutputChannel must match the pulse output channel chosen when calling + * @ref HAL_TIM_OnePulse_ConfigChannel(). + * @param htim TIM One Pulse handle + * @param OutputChannel pulse output channel to disable + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_OnePulseN_Stop_IT(TIM_HandleTypeDef *htim, uint32_t OutputChannel) +{ + uint32_t input_channel = (OutputChannel == TIM_CHANNEL_1) ? TIM_CHANNEL_2 : TIM_CHANNEL_1; + + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, OutputChannel)); + + /* Disable the TIM Capture/Compare 1 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC1); + + /* Disable the TIM Capture/Compare 2 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC2); + + /* Disable the complementary One Pulse output channel and the Input Capture channel */ + TIM_CCxNChannelCmd(htim->Instance, OutputChannel, TIM_CCxN_DISABLE); + TIM_CCxChannelCmd(htim->Instance, input_channel, TIM_CCx_DISABLE); + + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + + /* Return function status */ + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup TIMEx_Exported_Functions_Group5 Extended Peripheral Control functions + * @brief Peripheral Control functions + * +@verbatim + ============================================================================== + ##### Peripheral Control functions ##### + ============================================================================== + [..] + This section provides functions allowing to: + (+) Configure the commutation event in case of use of the Hall sensor interface. + (+) Configure Output channels for OC and PWM mode. + + (+) Configure Complementary channels, break features and dead time. + (+) Configure Master synchronization. + (+) Configure timer remapping capabilities. + +@endverbatim + * @{ + */ + +/** + * @brief Configure the TIM commutation event sequence. + * @note This function is mandatory to use the commutation event in order to + * update the configuration at each commutation detection on the TRGI input of the Timer, + * the typical use of this feature is with the use of another Timer(interface Timer) + * configured in Hall sensor interface, this interface Timer will generate the + * commutation at its TRGO output (connected to Timer used in this function) each time + * the TI1 of the Interface Timer detect a commutation at its input TI1. + * @param htim TIM handle + * @param InputTrigger the Internal trigger corresponding to the Timer Interfacing with the Hall sensor + * This parameter can be one of the following values: + * @arg TIM_TS_ITR0: Internal trigger 0 selected + * @arg TIM_TS_ITR1: Internal trigger 1 selected + * @arg TIM_TS_ITR2: Internal trigger 2 selected + * @arg TIM_TS_ITR3: Internal trigger 3 selected + * @arg TIM_TS_NONE: No trigger is needed + * @param CommutationSource the Commutation Event source + * This parameter can be one of the following values: + * @arg TIM_COMMUTATION_TRGI: Commutation source is the TRGI of the Interface Timer + * @arg TIM_COMMUTATION_SOFTWARE: Commutation source is set by software using the COMG bit + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_ConfigCommutEvent(TIM_HandleTypeDef *htim, uint32_t InputTrigger, + uint32_t CommutationSource) +{ + /* Check the parameters */ + assert_param(IS_TIM_COMMUTATION_EVENT_INSTANCE(htim->Instance)); + assert_param(IS_TIM_INTERNAL_TRIGGEREVENT_SELECTION(InputTrigger)); + + __HAL_LOCK(htim); + + if ((InputTrigger == TIM_TS_ITR0) || (InputTrigger == TIM_TS_ITR1) || + (InputTrigger == TIM_TS_ITR2) || (InputTrigger == TIM_TS_ITR3)) + { + /* Select the Input trigger */ + htim->Instance->SMCR &= ~TIM_SMCR_TS; + htim->Instance->SMCR |= InputTrigger; + } + + /* Select the Capture Compare preload feature */ + htim->Instance->CR2 |= TIM_CR2_CCPC; + /* Select the Commutation event source */ + htim->Instance->CR2 &= ~TIM_CR2_CCUS; + htim->Instance->CR2 |= CommutationSource; + + /* Disable Commutation Interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_COM); + + /* Disable Commutation DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_COM); + + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Configure the TIM commutation event sequence with interrupt. + * @note This function is mandatory to use the commutation event in order to + * update the configuration at each commutation detection on the TRGI input of the Timer, + * the typical use of this feature is with the use of another Timer(interface Timer) + * configured in Hall sensor interface, this interface Timer will generate the + * commutation at its TRGO output (connected to Timer used in this function) each time + * the TI1 of the Interface Timer detect a commutation at its input TI1. + * @param htim TIM handle + * @param InputTrigger the Internal trigger corresponding to the Timer Interfacing with the Hall sensor + * This parameter can be one of the following values: + * @arg TIM_TS_ITR0: Internal trigger 0 selected + * @arg TIM_TS_ITR1: Internal trigger 1 selected + * @arg TIM_TS_ITR2: Internal trigger 2 selected + * @arg TIM_TS_ITR3: Internal trigger 3 selected + * @arg TIM_TS_NONE: No trigger is needed + * @param CommutationSource the Commutation Event source + * This parameter can be one of the following values: + * @arg TIM_COMMUTATION_TRGI: Commutation source is the TRGI of the Interface Timer + * @arg TIM_COMMUTATION_SOFTWARE: Commutation source is set by software using the COMG bit + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_ConfigCommutEvent_IT(TIM_HandleTypeDef *htim, uint32_t InputTrigger, + uint32_t CommutationSource) +{ + /* Check the parameters */ + assert_param(IS_TIM_COMMUTATION_EVENT_INSTANCE(htim->Instance)); + assert_param(IS_TIM_INTERNAL_TRIGGEREVENT_SELECTION(InputTrigger)); + + __HAL_LOCK(htim); + + if ((InputTrigger == TIM_TS_ITR0) || (InputTrigger == TIM_TS_ITR1) || + (InputTrigger == TIM_TS_ITR2) || (InputTrigger == TIM_TS_ITR3)) + { + /* Select the Input trigger */ + htim->Instance->SMCR &= ~TIM_SMCR_TS; + htim->Instance->SMCR |= InputTrigger; + } + + /* Select the Capture Compare preload feature */ + htim->Instance->CR2 |= TIM_CR2_CCPC; + /* Select the Commutation event source */ + htim->Instance->CR2 &= ~TIM_CR2_CCUS; + htim->Instance->CR2 |= CommutationSource; + + /* Disable Commutation DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_COM); + + /* Enable the Commutation Interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_COM); + + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Configure the TIM commutation event sequence with DMA. + * @note This function is mandatory to use the commutation event in order to + * update the configuration at each commutation detection on the TRGI input of the Timer, + * the typical use of this feature is with the use of another Timer(interface Timer) + * configured in Hall sensor interface, this interface Timer will generate the + * commutation at its TRGO output (connected to Timer used in this function) each time + * the TI1 of the Interface Timer detect a commutation at its input TI1. + * @note The user should configure the DMA in his own software, in This function only the COMDE bit is set + * @param htim TIM handle + * @param InputTrigger the Internal trigger corresponding to the Timer Interfacing with the Hall sensor + * This parameter can be one of the following values: + * @arg TIM_TS_ITR0: Internal trigger 0 selected + * @arg TIM_TS_ITR1: Internal trigger 1 selected + * @arg TIM_TS_ITR2: Internal trigger 2 selected + * @arg TIM_TS_ITR3: Internal trigger 3 selected + * @arg TIM_TS_NONE: No trigger is needed + * @param CommutationSource the Commutation Event source + * This parameter can be one of the following values: + * @arg TIM_COMMUTATION_TRGI: Commutation source is the TRGI of the Interface Timer + * @arg TIM_COMMUTATION_SOFTWARE: Commutation source is set by software using the COMG bit + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_ConfigCommutEvent_DMA(TIM_HandleTypeDef *htim, uint32_t InputTrigger, + uint32_t CommutationSource) +{ + /* Check the parameters */ + assert_param(IS_TIM_COMMUTATION_EVENT_INSTANCE(htim->Instance)); + assert_param(IS_TIM_INTERNAL_TRIGGEREVENT_SELECTION(InputTrigger)); + + __HAL_LOCK(htim); + + if ((InputTrigger == TIM_TS_ITR0) || (InputTrigger == TIM_TS_ITR1) || + (InputTrigger == TIM_TS_ITR2) || (InputTrigger == TIM_TS_ITR3)) + { + /* Select the Input trigger */ + htim->Instance->SMCR &= ~TIM_SMCR_TS; + htim->Instance->SMCR |= InputTrigger; + } + + /* Select the Capture Compare preload feature */ + htim->Instance->CR2 |= TIM_CR2_CCPC; + /* Select the Commutation event source */ + htim->Instance->CR2 &= ~TIM_CR2_CCUS; + htim->Instance->CR2 |= CommutationSource; + + /* Enable the Commutation DMA Request */ + /* Set the DMA Commutation Callback */ + htim->hdma[TIM_DMA_ID_COMMUTATION]->XferCpltCallback = TIMEx_DMACommutationCplt; + htim->hdma[TIM_DMA_ID_COMMUTATION]->XferHalfCpltCallback = TIMEx_DMACommutationHalfCplt; + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_COMMUTATION]->XferErrorCallback = TIM_DMAError; + + /* Disable Commutation Interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_COM); + + /* Enable the Commutation DMA Request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_COM); + + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Configures the TIM in master mode. + * @param htim TIM handle. + * @param sMasterConfig pointer to a TIM_MasterConfigTypeDef structure that + * contains the selected trigger output (TRGO) and the Master/Slave + * mode. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_MasterConfigSynchronization(TIM_HandleTypeDef *htim, + const TIM_MasterConfigTypeDef *sMasterConfig) +{ + uint32_t tmpcr2; + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_MASTER_INSTANCE(htim->Instance)); + assert_param(IS_TIM_TRGO_SOURCE(sMasterConfig->MasterOutputTrigger)); + assert_param(IS_TIM_MSM_STATE(sMasterConfig->MasterSlaveMode)); + + /* Check input state */ + __HAL_LOCK(htim); + + /* Change the handler state */ + htim->State = HAL_TIM_STATE_BUSY; + + /* Get the TIMx CR2 register value */ + tmpcr2 = htim->Instance->CR2; + + /* Get the TIMx SMCR register value */ + tmpsmcr = htim->Instance->SMCR; + + /* Reset the MMS Bits */ + tmpcr2 &= ~TIM_CR2_MMS; + /* Select the TRGO source */ + tmpcr2 |= sMasterConfig->MasterOutputTrigger; + + /* Update TIMx CR2 */ + htim->Instance->CR2 = tmpcr2; + + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + /* Reset the MSM Bit */ + tmpsmcr &= ~TIM_SMCR_MSM; + /* Set master mode */ + tmpsmcr |= sMasterConfig->MasterSlaveMode; + + /* Update TIMx SMCR */ + htim->Instance->SMCR = tmpsmcr; + } + + /* Change the htim state */ + htim->State = HAL_TIM_STATE_READY; + + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Configures the Break feature, dead time, Lock level, OSSI/OSSR State + * and the AOE(automatic output enable). + * @param htim TIM handle + * @param sBreakDeadTimeConfig pointer to a TIM_ConfigBreakDeadConfigTypeDef structure that + * contains the BDTR Register configuration information for the TIM peripheral. + * @note Interrupts can be generated when an active level is detected on the + * break input, the break 2 input or the system break input. Break + * interrupt can be enabled by calling the @ref __HAL_TIM_ENABLE_IT macro. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_ConfigBreakDeadTime(TIM_HandleTypeDef *htim, + const TIM_BreakDeadTimeConfigTypeDef *sBreakDeadTimeConfig) +{ + /* Keep this variable initialized to 0 as it is used to configure BDTR register */ + uint32_t tmpbdtr = 0U; + + /* Check the parameters */ + assert_param(IS_TIM_BREAK_INSTANCE(htim->Instance)); + assert_param(IS_TIM_OSSR_STATE(sBreakDeadTimeConfig->OffStateRunMode)); + assert_param(IS_TIM_OSSI_STATE(sBreakDeadTimeConfig->OffStateIDLEMode)); + assert_param(IS_TIM_LOCK_LEVEL(sBreakDeadTimeConfig->LockLevel)); + assert_param(IS_TIM_DEADTIME(sBreakDeadTimeConfig->DeadTime)); + assert_param(IS_TIM_BREAK_STATE(sBreakDeadTimeConfig->BreakState)); + assert_param(IS_TIM_BREAK_POLARITY(sBreakDeadTimeConfig->BreakPolarity)); + assert_param(IS_TIM_AUTOMATIC_OUTPUT_STATE(sBreakDeadTimeConfig->AutomaticOutput)); + + /* Check input state */ + __HAL_LOCK(htim); + + /* Set the Lock level, the Break enable Bit and the Polarity, the OSSR State, + the OSSI State, the dead time value and the Automatic Output Enable Bit */ + + /* Set the BDTR bits */ + MODIFY_REG(tmpbdtr, TIM_BDTR_DTG, sBreakDeadTimeConfig->DeadTime); + MODIFY_REG(tmpbdtr, TIM_BDTR_LOCK, sBreakDeadTimeConfig->LockLevel); + MODIFY_REG(tmpbdtr, TIM_BDTR_OSSI, sBreakDeadTimeConfig->OffStateIDLEMode); + MODIFY_REG(tmpbdtr, TIM_BDTR_OSSR, sBreakDeadTimeConfig->OffStateRunMode); + MODIFY_REG(tmpbdtr, TIM_BDTR_BKE, sBreakDeadTimeConfig->BreakState); + MODIFY_REG(tmpbdtr, TIM_BDTR_BKP, sBreakDeadTimeConfig->BreakPolarity); + MODIFY_REG(tmpbdtr, TIM_BDTR_AOE, sBreakDeadTimeConfig->AutomaticOutput); + + + /* Set TIMx_BDTR */ + htim->Instance->BDTR = tmpbdtr; + + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Configures the TIMx Remapping input capabilities. + * @param htim TIM handle. + * @param Remap specifies the TIM remapping source. + * For TIM1, the parameter can have the following values: (**) + * @arg TIM_TIM1_TIM3_TRGO: TIM1 ITR2 is connected to TIM3 TRGO + * @arg TIM_TIM1_LPTIM: TIM1 ITR2 is connected to LPTIM1 output + * + * For TIM2, the parameter can have the following values: (**) + * @arg TIM_TIM2_TIM8_TRGO: TIM2 ITR1 is connected to TIM8 TRGO (*) + * @arg TIM_TIM2_ETH_PTP: TIM2 ITR1 is connected to PTP trigger output (*) + * @arg TIM_TIM2_USBFS_SOF: TIM2 ITR1 is connected to OTG FS SOF + * @arg TIM_TIM2_USBHS_SOF: TIM2 ITR1 is connected to OTG FS SOF + * + * For TIM5, the parameter can have the following values: + * @arg TIM_TIM5_GPIO: TIM5 TI4 is connected to GPIO + * @arg TIM_TIM5_LSI: TIM5 TI4 is connected to LSI + * @arg TIM_TIM5_LSE: TIM5 TI4 is connected to LSE + * @arg TIM_TIM5_RTC: TIM5 TI4 is connected to the RTC wakeup interrupt + * @arg TIM_TIM5_TIM3_TRGO: TIM5 ITR1 is connected to TIM3 TRGO (*) + * @arg TIM_TIM5_LPTIM: TIM5 ITR1 is connected to LPTIM1 output (*) + * + * For TIM9, the parameter can have the following values: (**) + * @arg TIM_TIM9_TIM3_TRGO: TIM9 ITR1 is connected to TIM3 TRGO + * @arg TIM_TIM9_LPTIM: TIM9 ITR1 is connected to LPTIM1 output + * + * For TIM11, the parameter can have the following values: + * @arg TIM_TIM11_GPIO: TIM11 TI1 is connected to GPIO + * @arg TIM_TIM11_HSE: TIM11 TI1 is connected to HSE_RTC clock + * @arg TIM_TIM11_SPDIFRX: TIM11 TI1 is connected to SPDIFRX_FRAME_SYNC (*) + * + * (*) Value not defined in all devices. \n + * (**) Register not available in all devices. + * + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_RemapConfig(TIM_HandleTypeDef *htim, uint32_t Remap) +{ + /* Check parameters */ + assert_param(IS_TIM_REMAP(htim->Instance, Remap)); + + __HAL_LOCK(htim); + +#if defined(LPTIM_OR_TIM1_ITR2_RMP) && defined(LPTIM_OR_TIM5_ITR1_RMP) && defined(LPTIM_OR_TIM9_ITR1_RMP) + if ((Remap & LPTIM_REMAP_MASK) == LPTIM_REMAP_MASK) + { + /* Connect TIMx internal trigger to LPTIM1 output */ + __HAL_RCC_LPTIM1_CLK_ENABLE(); + MODIFY_REG(LPTIM1->OR, + (LPTIM_OR_TIM1_ITR2_RMP | LPTIM_OR_TIM5_ITR1_RMP | LPTIM_OR_TIM9_ITR1_RMP), + Remap & ~(LPTIM_REMAP_MASK)); + } + else + { + /* Set the Timer remapping configuration */ + WRITE_REG(htim->Instance->OR, Remap); + } +#else + /* Set the Timer remapping configuration */ + WRITE_REG(htim->Instance->OR, Remap); +#endif /* LPTIM_OR_TIM1_ITR2_RMP && LPTIM_OR_TIM5_ITR1_RMP && LPTIM_OR_TIM9_ITR1_RMP */ + + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup TIMEx_Exported_Functions_Group6 Extended Callbacks functions + * @brief Extended Callbacks functions + * +@verbatim + ============================================================================== + ##### Extended Callbacks functions ##### + ============================================================================== + [..] + This section provides Extended TIM callback functions: + (+) Timer Commutation callback + (+) Timer Break callback + +@endverbatim + * @{ + */ + +/** + * @brief Commutation callback in non-blocking mode + * @param htim TIM handle + * @retval None + */ +__weak void HAL_TIMEx_CommutCallback(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIMEx_CommutCallback could be implemented in the user file + */ +} +/** + * @brief Commutation half complete callback in non-blocking mode + * @param htim TIM handle + * @retval None + */ +__weak void HAL_TIMEx_CommutHalfCpltCallback(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIMEx_CommutHalfCpltCallback could be implemented in the user file + */ +} + +/** + * @brief Break detection callback in non-blocking mode + * @param htim TIM handle + * @retval None + */ +__weak void HAL_TIMEx_BreakCallback(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIMEx_BreakCallback could be implemented in the user file + */ +} +/** + * @} + */ + +/** @defgroup TIMEx_Exported_Functions_Group7 Extended Peripheral State functions + * @brief Extended Peripheral State functions + * +@verbatim + ============================================================================== + ##### Extended Peripheral State functions ##### + ============================================================================== + [..] + This subsection permits to get in run-time the status of the peripheral + and the data flow. + +@endverbatim + * @{ + */ + +/** + * @brief Return the TIM Hall Sensor interface handle state. + * @param htim TIM Hall Sensor handle + * @retval HAL state + */ +HAL_TIM_StateTypeDef HAL_TIMEx_HallSensor_GetState(const TIM_HandleTypeDef *htim) +{ + return htim->State; +} + +/** + * @brief Return actual state of the TIM complementary channel. + * @param htim TIM handle + * @param ChannelN TIM Complementary channel + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 + * @arg TIM_CHANNEL_2: TIM Channel 2 + * @arg TIM_CHANNEL_3: TIM Channel 3 + * @retval TIM Complementary channel state + */ +HAL_TIM_ChannelStateTypeDef HAL_TIMEx_GetChannelNState(const TIM_HandleTypeDef *htim, uint32_t ChannelN) +{ + HAL_TIM_ChannelStateTypeDef channel_state; + + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, ChannelN)); + + channel_state = TIM_CHANNEL_N_STATE_GET(htim, ChannelN); + + return channel_state; +} +/** + * @} + */ + +/** + * @} + */ + +/* Private functions ---------------------------------------------------------*/ +/** @defgroup TIMEx_Private_Functions TIM Extended Private Functions + * @{ + */ + +/** + * @brief TIM DMA Commutation callback. + * @param hdma pointer to DMA handle. + * @retval None + */ +void TIMEx_DMACommutationCplt(DMA_HandleTypeDef *hdma) +{ + TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + /* Change the htim state */ + htim->State = HAL_TIM_STATE_READY; + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->CommutationCallback(htim); +#else + HAL_TIMEx_CommutCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ +} + +/** + * @brief TIM DMA Commutation half complete callback. + * @param hdma pointer to DMA handle. + * @retval None + */ +void TIMEx_DMACommutationHalfCplt(DMA_HandleTypeDef *hdma) +{ + TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + /* Change the htim state */ + htim->State = HAL_TIM_STATE_READY; + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->CommutationHalfCpltCallback(htim); +#else + HAL_TIMEx_CommutHalfCpltCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ +} + + +/** + * @brief TIM DMA Delay Pulse complete callback (complementary channel). + * @param hdma pointer to DMA handle. + * @retval None + */ +static void TIM_DMADelayPulseNCplt(DMA_HandleTypeDef *hdma) +{ + TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + if (hdma == htim->hdma[TIM_DMA_ID_CC1]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_1; + + if (hdma->Init.Mode == DMA_NORMAL) + { + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + } + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC2]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_2; + + if (hdma->Init.Mode == DMA_NORMAL) + { + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + } + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC3]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_3; + + if (hdma->Init.Mode == DMA_NORMAL) + { + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_3, HAL_TIM_CHANNEL_STATE_READY); + } + } + else + { + /* nothing to do */ + } + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->PWM_PulseFinishedCallback(htim); +#else + HAL_TIM_PWM_PulseFinishedCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_CLEARED; +} + +/** + * @brief TIM DMA error callback (complementary channel) + * @param hdma pointer to DMA handle. + * @retval None + */ +static void TIM_DMAErrorCCxN(DMA_HandleTypeDef *hdma) +{ + TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + if (hdma == htim->hdma[TIM_DMA_ID_CC1]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_1; + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC2]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_2; + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC3]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_3; + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_3, HAL_TIM_CHANNEL_STATE_READY); + } + else + { + /* nothing to do */ + } + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->ErrorCallback(htim); +#else + HAL_TIM_ErrorCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_CLEARED; +} + +/** + * @brief Enables or disables the TIM Capture Compare Channel xN. + * @param TIMx to select the TIM peripheral + * @param Channel specifies the TIM Channel + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 + * @arg TIM_CHANNEL_2: TIM Channel 2 + * @arg TIM_CHANNEL_3: TIM Channel 3 + * @param ChannelNState specifies the TIM Channel CCxNE bit new state. + * This parameter can be: TIM_CCxN_ENABLE or TIM_CCxN_Disable. + * @retval None + */ +static void TIM_CCxNChannelCmd(TIM_TypeDef *TIMx, uint32_t Channel, uint32_t ChannelNState) +{ + uint32_t tmp; + + tmp = TIM_CCER_CC1NE << (Channel & 0xFU); /* 0xFU = 15 bits max shift */ + + /* Reset the CCxNE Bit */ + TIMx->CCER &= ~tmp; + + /* Set or reset the CCxNE Bit */ + TIMx->CCER |= (uint32_t)(ChannelNState << (Channel & 0xFU)); /* 0xFU = 15 bits max shift */ +} +/** + * @} + */ + +#endif /* HAL_TIM_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_uart.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_uart.c new file mode 100644 index 0000000..33a5f00 --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_uart.c @@ -0,0 +1,3771 @@ +/** + ****************************************************************************** + * @file stm32f4xx_hal_uart.c + * @author MCD Application Team + * @brief UART HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the Universal Asynchronous Receiver Transmitter Peripheral (UART). + * + Initialization and de-initialization functions + * + IO operation functions + * + Peripheral Control functions + * + Peripheral State and Errors functions + * + ****************************************************************************** + * @attention + * + * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + @verbatim + ============================================================================== + ##### How to use this driver ##### + ============================================================================== + [..] + The UART HAL driver can be used as follows: + + (#) Declare a UART_HandleTypeDef handle structure (eg. UART_HandleTypeDef huart). + (#) Initialize the UART low level resources by implementing the HAL_UART_MspInit() API: + (##) Enable the USARTx interface clock. + (##) UART pins configuration: + (+++) Enable the clock for the UART GPIOs. + (+++) Configure the UART TX/RX pins as alternate function pull-up. + (##) NVIC configuration if you need to use interrupt process (HAL_UART_Transmit_IT() + and HAL_UART_Receive_IT() APIs): + (+++) Configure the USARTx interrupt priority. + (+++) Enable the NVIC USART IRQ handle. + (##) DMA Configuration if you need to use DMA process (HAL_UART_Transmit_DMA() + and HAL_UART_Receive_DMA() APIs): + (+++) Declare a DMA handle structure for the Tx/Rx stream. + (+++) Enable the DMAx interface clock. + (+++) Configure the declared DMA handle structure with the required + Tx/Rx parameters. + (+++) Configure the DMA Tx/Rx stream. + (+++) Associate the initialized DMA handle to the UART DMA Tx/Rx handle. + (+++) Configure the priority and enable the NVIC for the transfer complete + interrupt on the DMA Tx/Rx stream. + (+++) Configure the USARTx interrupt priority and enable the NVIC USART IRQ handle + (used for last byte sending completion detection in DMA non circular mode) + + (#) Program the Baud Rate, Word Length, Stop Bit, Parity, Hardware + flow control and Mode(Receiver/Transmitter) in the huart Init structure. + + (#) For the UART asynchronous mode, initialize the UART registers by calling + the HAL_UART_Init() API. + + (#) For the UART Half duplex mode, initialize the UART registers by calling + the HAL_HalfDuplex_Init() API. + + (#) For the LIN mode, initialize the UART registers by calling the HAL_LIN_Init() API. + + (#) For the Multi-Processor mode, initialize the UART registers by calling + the HAL_MultiProcessor_Init() API. + + [..] + (@) The specific UART interrupts (Transmission complete interrupt, + RXNE interrupt and Error Interrupts) will be managed using the macros + __HAL_UART_ENABLE_IT() and __HAL_UART_DISABLE_IT() inside the transmit + and receive process. + + [..] + (@) These APIs (HAL_UART_Init() and HAL_HalfDuplex_Init()) configure also the + low level Hardware GPIO, CLOCK, CORTEX...etc) by calling the customized + HAL_UART_MspInit() API. + + ##### Callback registration ##### + ================================== + + [..] + The compilation define USE_HAL_UART_REGISTER_CALLBACKS when set to 1 + allows the user to configure dynamically the driver callbacks. + + [..] + Use Function HAL_UART_RegisterCallback() to register a user callback. + Function HAL_UART_RegisterCallback() allows to register following callbacks: + (+) TxHalfCpltCallback : Tx Half Complete Callback. + (+) TxCpltCallback : Tx Complete Callback. + (+) RxHalfCpltCallback : Rx Half Complete Callback. + (+) RxCpltCallback : Rx Complete Callback. + (+) ErrorCallback : Error Callback. + (+) AbortCpltCallback : Abort Complete Callback. + (+) AbortTransmitCpltCallback : Abort Transmit Complete Callback. + (+) AbortReceiveCpltCallback : Abort Receive Complete Callback. + (+) MspInitCallback : UART MspInit. + (+) MspDeInitCallback : UART MspDeInit. + This function takes as parameters the HAL peripheral handle, the Callback ID + and a pointer to the user callback function. + + [..] + Use function HAL_UART_UnRegisterCallback() to reset a callback to the default + weak (surcharged) function. + HAL_UART_UnRegisterCallback() takes as parameters the HAL peripheral handle, + and the Callback ID. + This function allows to reset following callbacks: + (+) TxHalfCpltCallback : Tx Half Complete Callback. + (+) TxCpltCallback : Tx Complete Callback. + (+) RxHalfCpltCallback : Rx Half Complete Callback. + (+) RxCpltCallback : Rx Complete Callback. + (+) ErrorCallback : Error Callback. + (+) AbortCpltCallback : Abort Complete Callback. + (+) AbortTransmitCpltCallback : Abort Transmit Complete Callback. + (+) AbortReceiveCpltCallback : Abort Receive Complete Callback. + (+) MspInitCallback : UART MspInit. + (+) MspDeInitCallback : UART MspDeInit. + + [..] + For specific callback RxEventCallback, use dedicated registration/reset functions: + respectively HAL_UART_RegisterRxEventCallback() , HAL_UART_UnRegisterRxEventCallback(). + + [..] + By default, after the HAL_UART_Init() and when the state is HAL_UART_STATE_RESET + all callbacks are set to the corresponding weak (surcharged) functions: + examples HAL_UART_TxCpltCallback(), HAL_UART_RxHalfCpltCallback(). + Exception done for MspInit and MspDeInit functions that are respectively + reset to the legacy weak (surcharged) functions in the HAL_UART_Init() + and HAL_UART_DeInit() only when these callbacks are null (not registered beforehand). + If not, MspInit or MspDeInit are not null, the HAL_UART_Init() and HAL_UART_DeInit() + keep and use the user MspInit/MspDeInit callbacks (registered beforehand). + + [..] + Callbacks can be registered/unregistered in HAL_UART_STATE_READY state only. + Exception done MspInit/MspDeInit that can be registered/unregistered + in HAL_UART_STATE_READY or HAL_UART_STATE_RESET state, thus registered (user) + MspInit/DeInit callbacks can be used during the Init/DeInit. + In that case first register the MspInit/MspDeInit user callbacks + using HAL_UART_RegisterCallback() before calling HAL_UART_DeInit() + or HAL_UART_Init() function. + + [..] + When The compilation define USE_HAL_UART_REGISTER_CALLBACKS is set to 0 or + not defined, the callback registration feature is not available + and weak (surcharged) callbacks are used. + + [..] + Three operation modes are available within this driver : + + *** Polling mode IO operation *** + ================================= + [..] + (+) Send an amount of data in blocking mode using HAL_UART_Transmit() + (+) Receive an amount of data in blocking mode using HAL_UART_Receive() + + *** Interrupt mode IO operation *** + =================================== + [..] + (+) Send an amount of data in non blocking mode using HAL_UART_Transmit_IT() + (+) At transmission end of transfer HAL_UART_TxCpltCallback is executed and user can + add his own code by customization of function pointer HAL_UART_TxCpltCallback + (+) Receive an amount of data in non blocking mode using HAL_UART_Receive_IT() + (+) At reception end of transfer HAL_UART_RxCpltCallback is executed and user can + add his own code by customization of function pointer HAL_UART_RxCpltCallback + (+) In case of transfer Error, HAL_UART_ErrorCallback() function is executed and user can + add his own code by customization of function pointer HAL_UART_ErrorCallback + + *** DMA mode IO operation *** + ============================== + [..] + (+) Send an amount of data in non blocking mode (DMA) using HAL_UART_Transmit_DMA() + (+) At transmission end of half transfer HAL_UART_TxHalfCpltCallback is executed and user can + add his own code by customization of function pointer HAL_UART_TxHalfCpltCallback + (+) At transmission end of transfer HAL_UART_TxCpltCallback is executed and user can + add his own code by customization of function pointer HAL_UART_TxCpltCallback + (+) Receive an amount of data in non blocking mode (DMA) using HAL_UART_Receive_DMA() + (+) At reception end of half transfer HAL_UART_RxHalfCpltCallback is executed and user can + add his own code by customization of function pointer HAL_UART_RxHalfCpltCallback + (+) At reception end of transfer HAL_UART_RxCpltCallback is executed and user can + add his own code by customization of function pointer HAL_UART_RxCpltCallback + (+) In case of transfer Error, HAL_UART_ErrorCallback() function is executed and user can + add his own code by customization of function pointer HAL_UART_ErrorCallback + (+) Pause the DMA Transfer using HAL_UART_DMAPause() + (+) Resume the DMA Transfer using HAL_UART_DMAResume() + (+) Stop the DMA Transfer using HAL_UART_DMAStop() + + + [..] This subsection also provides a set of additional functions providing enhanced reception + services to user. (For example, these functions allow application to handle use cases + where number of data to be received is unknown). + + (#) Compared to standard reception services which only consider number of received + data elements as reception completion criteria, these functions also consider additional events + as triggers for updating reception status to caller : + (+) Detection of inactivity period (RX line has not been active for a given period). + (++) RX inactivity detected by IDLE event, i.e. RX line has been in idle state (normally high state) + for 1 frame time, after last received byte. + + (#) There are two mode of transfer: + (+) Blocking mode: The reception is performed in polling mode, until either expected number of data is received, + or till IDLE event occurs. Reception is handled only during function execution. + When function exits, no data reception could occur. HAL status and number of actually received data elements, + are returned by function after finishing transfer. + (+) Non-Blocking mode: The reception is performed using Interrupts or DMA. + These API's return the HAL status. + The end of the data processing will be indicated through the + dedicated UART IRQ when using Interrupt mode or the DMA IRQ when using DMA mode. + The HAL_UARTEx_RxEventCallback() user callback will be executed during Receive process + The HAL_UART_ErrorCallback()user callback will be executed when a reception error is detected. + + (#) Blocking mode API: + (+) HAL_UARTEx_ReceiveToIdle() + + (#) Non-Blocking mode API with Interrupt: + (+) HAL_UARTEx_ReceiveToIdle_IT() + + (#) Non-Blocking mode API with DMA: + (+) HAL_UARTEx_ReceiveToIdle_DMA() + + + *** UART HAL driver macros list *** + ============================================= + [..] + Below the list of most used macros in UART HAL driver. + + (+) __HAL_UART_ENABLE: Enable the UART peripheral + (+) __HAL_UART_DISABLE: Disable the UART peripheral + (+) __HAL_UART_GET_FLAG : Check whether the specified UART flag is set or not + (+) __HAL_UART_CLEAR_FLAG : Clear the specified UART pending flag + (+) __HAL_UART_ENABLE_IT: Enable the specified UART interrupt + (+) __HAL_UART_DISABLE_IT: Disable the specified UART interrupt + (+) __HAL_UART_GET_IT_SOURCE: Check whether the specified UART interrupt has occurred or not + + [..] + (@) You can refer to the UART HAL driver header file for more useful macros + + @endverbatim + [..] + (@) Additional remark: If the parity is enabled, then the MSB bit of the data written + in the data register is transmitted but is changed by the parity bit. + Depending on the frame length defined by the M bit (8-bits or 9-bits), + the possible UART frame formats are as listed in the following table: + +-------------------------------------------------------------+ + | M bit | PCE bit | UART frame | + |---------------------|---------------------------------------| + | 0 | 0 | | SB | 8 bit data | STB | | + |---------|-----------|---------------------------------------| + | 0 | 1 | | SB | 7 bit data | PB | STB | | + |---------|-----------|---------------------------------------| + | 1 | 0 | | SB | 9 bit data | STB | | + |---------|-----------|---------------------------------------| + | 1 | 1 | | SB | 8 bit data | PB | STB | | + +-------------------------------------------------------------+ + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx_hal.h" + +/** @addtogroup STM32F4xx_HAL_Driver + * @{ + */ + +/** @defgroup UART UART + * @brief HAL UART module driver + * @{ + */ +#ifdef HAL_UART_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/** @addtogroup UART_Private_Constants + * @{ + */ +/** + * @} + */ +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/** @addtogroup UART_Private_Functions UART Private Functions + * @{ + */ + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) +void UART_InitCallbacksToDefault(UART_HandleTypeDef *huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ +static void UART_EndTxTransfer(UART_HandleTypeDef *huart); +static void UART_EndRxTransfer(UART_HandleTypeDef *huart); +static void UART_DMATransmitCplt(DMA_HandleTypeDef *hdma); +static void UART_DMAReceiveCplt(DMA_HandleTypeDef *hdma); +static void UART_DMATxHalfCplt(DMA_HandleTypeDef *hdma); +static void UART_DMARxHalfCplt(DMA_HandleTypeDef *hdma); +static void UART_DMAError(DMA_HandleTypeDef *hdma); +static void UART_DMAAbortOnError(DMA_HandleTypeDef *hdma); +static void UART_DMATxAbortCallback(DMA_HandleTypeDef *hdma); +static void UART_DMARxAbortCallback(DMA_HandleTypeDef *hdma); +static void UART_DMATxOnlyAbortCallback(DMA_HandleTypeDef *hdma); +static void UART_DMARxOnlyAbortCallback(DMA_HandleTypeDef *hdma); +static HAL_StatusTypeDef UART_Transmit_IT(UART_HandleTypeDef *huart); +static HAL_StatusTypeDef UART_EndTransmit_IT(UART_HandleTypeDef *huart); +static HAL_StatusTypeDef UART_Receive_IT(UART_HandleTypeDef *huart); +static HAL_StatusTypeDef UART_WaitOnFlagUntilTimeout(UART_HandleTypeDef *huart, uint32_t Flag, FlagStatus Status, + uint32_t Tickstart, uint32_t Timeout); +static void UART_SetConfig(UART_HandleTypeDef *huart); + +/** + * @} + */ + +/* Exported functions ---------------------------------------------------------*/ +/** @defgroup UART_Exported_Functions UART Exported Functions + * @{ + */ + +/** @defgroup UART_Exported_Functions_Group1 Initialization and de-initialization functions + * @brief Initialization and Configuration functions + * +@verbatim + =============================================================================== + ##### Initialization and Configuration functions ##### + =============================================================================== + [..] + This subsection provides a set of functions allowing to initialize the USARTx or the UARTy + in asynchronous mode. + (+) For the asynchronous mode only these parameters can be configured: + (++) Baud Rate + (++) Word Length + (++) Stop Bit + (++) Parity: If the parity is enabled, then the MSB bit of the data written + in the data register is transmitted but is changed by the parity bit. + Depending on the frame length defined by the M bit (8-bits or 9-bits), + please refer to Reference manual for possible UART frame formats. + (++) Hardware flow control + (++) Receiver/transmitter modes + (++) Over Sampling Method + [..] + The HAL_UART_Init(), HAL_HalfDuplex_Init(), HAL_LIN_Init() and HAL_MultiProcessor_Init() APIs + follow respectively the UART asynchronous, UART Half duplex, LIN and Multi-Processor configuration + procedures (details for the procedures are available in reference manual + (RM0430 for STM32F4X3xx MCUs and RM0402 for STM32F412xx MCUs + RM0383 for STM32F411xC/E MCUs and RM0401 for STM32F410xx MCUs + RM0090 for STM32F4X5xx/STM32F4X7xx/STM32F429xx/STM32F439xx MCUs + RM0390 for STM32F446xx MCUs and RM0386 for STM32F469xx/STM32F479xx MCUs)). + +@endverbatim + * @{ + */ + +/** + * @brief Initializes the UART mode according to the specified parameters in + * the UART_InitTypeDef and create the associated handle. + * @param huart Pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART module. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_Init(UART_HandleTypeDef *huart) +{ + /* Check the UART handle allocation */ + if (huart == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + if (huart->Init.HwFlowCtl != UART_HWCONTROL_NONE) + { + /* The hardware flow control is available only for USART1, USART2, USART3 and USART6. + Except for STM32F446xx devices, that is available for USART1, USART2, USART3, USART6, UART4 and UART5. + */ + assert_param(IS_UART_HWFLOW_INSTANCE(huart->Instance)); + assert_param(IS_UART_HARDWARE_FLOW_CONTROL(huart->Init.HwFlowCtl)); + } + else + { + assert_param(IS_UART_INSTANCE(huart->Instance)); + } + assert_param(IS_UART_WORD_LENGTH(huart->Init.WordLength)); + assert_param(IS_UART_OVERSAMPLING(huart->Init.OverSampling)); + + if (huart->gState == HAL_UART_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + huart->Lock = HAL_UNLOCKED; + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + UART_InitCallbacksToDefault(huart); + + if (huart->MspInitCallback == NULL) + { + huart->MspInitCallback = HAL_UART_MspInit; + } + + /* Init the low level hardware */ + huart->MspInitCallback(huart); +#else + /* Init the low level hardware : GPIO, CLOCK */ + HAL_UART_MspInit(huart); +#endif /* (USE_HAL_UART_REGISTER_CALLBACKS) */ + } + + huart->gState = HAL_UART_STATE_BUSY; + + /* Disable the peripheral */ + __HAL_UART_DISABLE(huart); + + /* Set the UART Communication parameters */ + UART_SetConfig(huart); + + /* In asynchronous mode, the following bits must be kept cleared: + - LINEN and CLKEN bits in the USART_CR2 register, + - SCEN, HDSEL and IREN bits in the USART_CR3 register.*/ + CLEAR_BIT(huart->Instance->CR2, (USART_CR2_LINEN | USART_CR2_CLKEN)); + CLEAR_BIT(huart->Instance->CR3, (USART_CR3_SCEN | USART_CR3_HDSEL | USART_CR3_IREN)); + + /* Enable the peripheral */ + __HAL_UART_ENABLE(huart); + + /* Initialize the UART state */ + huart->ErrorCode = HAL_UART_ERROR_NONE; + huart->gState = HAL_UART_STATE_READY; + huart->RxState = HAL_UART_STATE_READY; + huart->RxEventType = HAL_UART_RXEVENT_TC; + + return HAL_OK; +} + +/** + * @brief Initializes the half-duplex mode according to the specified + * parameters in the UART_InitTypeDef and create the associated handle. + * @param huart Pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART module. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_HalfDuplex_Init(UART_HandleTypeDef *huart) +{ + /* Check the UART handle allocation */ + if (huart == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_UART_HALFDUPLEX_INSTANCE(huart->Instance)); + assert_param(IS_UART_WORD_LENGTH(huart->Init.WordLength)); + assert_param(IS_UART_OVERSAMPLING(huart->Init.OverSampling)); + + if (huart->gState == HAL_UART_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + huart->Lock = HAL_UNLOCKED; + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + UART_InitCallbacksToDefault(huart); + + if (huart->MspInitCallback == NULL) + { + huart->MspInitCallback = HAL_UART_MspInit; + } + + /* Init the low level hardware */ + huart->MspInitCallback(huart); +#else + /* Init the low level hardware : GPIO, CLOCK */ + HAL_UART_MspInit(huart); +#endif /* (USE_HAL_UART_REGISTER_CALLBACKS) */ + } + + huart->gState = HAL_UART_STATE_BUSY; + + /* Disable the peripheral */ + __HAL_UART_DISABLE(huart); + + /* Set the UART Communication parameters */ + UART_SetConfig(huart); + + /* In half-duplex mode, the following bits must be kept cleared: + - LINEN and CLKEN bits in the USART_CR2 register, + - SCEN and IREN bits in the USART_CR3 register.*/ + CLEAR_BIT(huart->Instance->CR2, (USART_CR2_LINEN | USART_CR2_CLKEN)); + CLEAR_BIT(huart->Instance->CR3, (USART_CR3_IREN | USART_CR3_SCEN)); + + /* Enable the Half-Duplex mode by setting the HDSEL bit in the CR3 register */ + SET_BIT(huart->Instance->CR3, USART_CR3_HDSEL); + + /* Enable the peripheral */ + __HAL_UART_ENABLE(huart); + + /* Initialize the UART state*/ + huart->ErrorCode = HAL_UART_ERROR_NONE; + huart->gState = HAL_UART_STATE_READY; + huart->RxState = HAL_UART_STATE_READY; + huart->RxEventType = HAL_UART_RXEVENT_TC; + + return HAL_OK; +} + +/** + * @brief Initializes the LIN mode according to the specified + * parameters in the UART_InitTypeDef and create the associated handle. + * @param huart Pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART module. + * @param BreakDetectLength Specifies the LIN break detection length. + * This parameter can be one of the following values: + * @arg UART_LINBREAKDETECTLENGTH_10B: 10-bit break detection + * @arg UART_LINBREAKDETECTLENGTH_11B: 11-bit break detection + * @retval HAL status + */ +HAL_StatusTypeDef HAL_LIN_Init(UART_HandleTypeDef *huart, uint32_t BreakDetectLength) +{ + /* Check the UART handle allocation */ + if (huart == NULL) + { + return HAL_ERROR; + } + + /* Check the LIN UART instance */ + assert_param(IS_UART_LIN_INSTANCE(huart->Instance)); + + /* Check the Break detection length parameter */ + assert_param(IS_UART_LIN_BREAK_DETECT_LENGTH(BreakDetectLength)); + assert_param(IS_UART_LIN_WORD_LENGTH(huart->Init.WordLength)); + assert_param(IS_UART_LIN_OVERSAMPLING(huart->Init.OverSampling)); + + if (huart->gState == HAL_UART_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + huart->Lock = HAL_UNLOCKED; + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + UART_InitCallbacksToDefault(huart); + + if (huart->MspInitCallback == NULL) + { + huart->MspInitCallback = HAL_UART_MspInit; + } + + /* Init the low level hardware */ + huart->MspInitCallback(huart); +#else + /* Init the low level hardware : GPIO, CLOCK */ + HAL_UART_MspInit(huart); +#endif /* (USE_HAL_UART_REGISTER_CALLBACKS) */ + } + + huart->gState = HAL_UART_STATE_BUSY; + + /* Disable the peripheral */ + __HAL_UART_DISABLE(huart); + + /* Set the UART Communication parameters */ + UART_SetConfig(huart); + + /* In LIN mode, the following bits must be kept cleared: + - CLKEN bits in the USART_CR2 register, + - SCEN, HDSEL and IREN bits in the USART_CR3 register.*/ + CLEAR_BIT(huart->Instance->CR2, (USART_CR2_CLKEN)); + CLEAR_BIT(huart->Instance->CR3, (USART_CR3_HDSEL | USART_CR3_IREN | USART_CR3_SCEN)); + + /* Enable the LIN mode by setting the LINEN bit in the CR2 register */ + SET_BIT(huart->Instance->CR2, USART_CR2_LINEN); + + /* Set the USART LIN Break detection length. */ + CLEAR_BIT(huart->Instance->CR2, USART_CR2_LBDL); + SET_BIT(huart->Instance->CR2, BreakDetectLength); + + /* Enable the peripheral */ + __HAL_UART_ENABLE(huart); + + /* Initialize the UART state*/ + huart->ErrorCode = HAL_UART_ERROR_NONE; + huart->gState = HAL_UART_STATE_READY; + huart->RxState = HAL_UART_STATE_READY; + huart->RxEventType = HAL_UART_RXEVENT_TC; + + return HAL_OK; +} + +/** + * @brief Initializes the Multi-Processor mode according to the specified + * parameters in the UART_InitTypeDef and create the associated handle. + * @param huart Pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART module. + * @param Address USART address + * @param WakeUpMethod specifies the USART wake-up method. + * This parameter can be one of the following values: + * @arg UART_WAKEUPMETHOD_IDLELINE: Wake-up by an idle line detection + * @arg UART_WAKEUPMETHOD_ADDRESSMARK: Wake-up by an address mark + * @retval HAL status + */ +HAL_StatusTypeDef HAL_MultiProcessor_Init(UART_HandleTypeDef *huart, uint8_t Address, uint32_t WakeUpMethod) +{ + /* Check the UART handle allocation */ + if (huart == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_UART_INSTANCE(huart->Instance)); + + /* Check the Address & wake up method parameters */ + assert_param(IS_UART_WAKEUPMETHOD(WakeUpMethod)); + assert_param(IS_UART_ADDRESS(Address)); + assert_param(IS_UART_WORD_LENGTH(huart->Init.WordLength)); + assert_param(IS_UART_OVERSAMPLING(huart->Init.OverSampling)); + + if (huart->gState == HAL_UART_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + huart->Lock = HAL_UNLOCKED; + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + UART_InitCallbacksToDefault(huart); + + if (huart->MspInitCallback == NULL) + { + huart->MspInitCallback = HAL_UART_MspInit; + } + + /* Init the low level hardware */ + huart->MspInitCallback(huart); +#else + /* Init the low level hardware : GPIO, CLOCK */ + HAL_UART_MspInit(huart); +#endif /* (USE_HAL_UART_REGISTER_CALLBACKS) */ + } + + huart->gState = HAL_UART_STATE_BUSY; + + /* Disable the peripheral */ + __HAL_UART_DISABLE(huart); + + /* Set the UART Communication parameters */ + UART_SetConfig(huart); + + /* In Multi-Processor mode, the following bits must be kept cleared: + - LINEN and CLKEN bits in the USART_CR2 register, + - SCEN, HDSEL and IREN bits in the USART_CR3 register */ + CLEAR_BIT(huart->Instance->CR2, (USART_CR2_LINEN | USART_CR2_CLKEN)); + CLEAR_BIT(huart->Instance->CR3, (USART_CR3_SCEN | USART_CR3_HDSEL | USART_CR3_IREN)); + + /* Set the USART address node */ + CLEAR_BIT(huart->Instance->CR2, USART_CR2_ADD); + SET_BIT(huart->Instance->CR2, Address); + + /* Set the wake up method by setting the WAKE bit in the CR1 register */ + CLEAR_BIT(huart->Instance->CR1, USART_CR1_WAKE); + SET_BIT(huart->Instance->CR1, WakeUpMethod); + + /* Enable the peripheral */ + __HAL_UART_ENABLE(huart); + + /* Initialize the UART state */ + huart->ErrorCode = HAL_UART_ERROR_NONE; + huart->gState = HAL_UART_STATE_READY; + huart->RxState = HAL_UART_STATE_READY; + huart->RxEventType = HAL_UART_RXEVENT_TC; + + return HAL_OK; +} + +/** + * @brief DeInitializes the UART peripheral. + * @param huart Pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART module. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_DeInit(UART_HandleTypeDef *huart) +{ + /* Check the UART handle allocation */ + if (huart == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_UART_INSTANCE(huart->Instance)); + + huart->gState = HAL_UART_STATE_BUSY; + + /* Disable the Peripheral */ + __HAL_UART_DISABLE(huart); + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + if (huart->MspDeInitCallback == NULL) + { + huart->MspDeInitCallback = HAL_UART_MspDeInit; + } + /* DeInit the low level hardware */ + huart->MspDeInitCallback(huart); +#else + /* DeInit the low level hardware */ + HAL_UART_MspDeInit(huart); +#endif /* (USE_HAL_UART_REGISTER_CALLBACKS) */ + + huart->ErrorCode = HAL_UART_ERROR_NONE; + huart->gState = HAL_UART_STATE_RESET; + huart->RxState = HAL_UART_STATE_RESET; + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; + huart->RxEventType = HAL_UART_RXEVENT_TC; + + /* Process Unlock */ + __HAL_UNLOCK(huart); + + return HAL_OK; +} + +/** + * @brief UART MSP Init. + * @param huart Pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART module. + * @retval None + */ +__weak void HAL_UART_MspInit(UART_HandleTypeDef *huart) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(huart); + /* NOTE: This function should not be modified, when the callback is needed, + the HAL_UART_MspInit could be implemented in the user file + */ +} + +/** + * @brief UART MSP DeInit. + * @param huart Pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART module. + * @retval None + */ +__weak void HAL_UART_MspDeInit(UART_HandleTypeDef *huart) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(huart); + /* NOTE: This function should not be modified, when the callback is needed, + the HAL_UART_MspDeInit could be implemented in the user file + */ +} + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) +/** + * @brief Register a User UART Callback + * To be used instead of the weak predefined callback + * @note The HAL_UART_RegisterCallback() may be called before HAL_UART_Init(), HAL_HalfDuplex_Init(), HAL_LIN_Init(), + * HAL_MultiProcessor_Init() to register callbacks for HAL_UART_MSPINIT_CB_ID and HAL_UART_MSPDEINIT_CB_ID + * @param huart uart handle + * @param CallbackID ID of the callback to be registered + * This parameter can be one of the following values: + * @arg @ref HAL_UART_TX_HALFCOMPLETE_CB_ID Tx Half Complete Callback ID + * @arg @ref HAL_UART_TX_COMPLETE_CB_ID Tx Complete Callback ID + * @arg @ref HAL_UART_RX_HALFCOMPLETE_CB_ID Rx Half Complete Callback ID + * @arg @ref HAL_UART_RX_COMPLETE_CB_ID Rx Complete Callback ID + * @arg @ref HAL_UART_ERROR_CB_ID Error Callback ID + * @arg @ref HAL_UART_ABORT_COMPLETE_CB_ID Abort Complete Callback ID + * @arg @ref HAL_UART_ABORT_TRANSMIT_COMPLETE_CB_ID Abort Transmit Complete Callback ID + * @arg @ref HAL_UART_ABORT_RECEIVE_COMPLETE_CB_ID Abort Receive Complete Callback ID + * @arg @ref HAL_UART_MSPINIT_CB_ID MspInit Callback ID + * @arg @ref HAL_UART_MSPDEINIT_CB_ID MspDeInit Callback ID + * @param pCallback pointer to the Callback function + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_RegisterCallback(UART_HandleTypeDef *huart, HAL_UART_CallbackIDTypeDef CallbackID, + pUART_CallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + + if (pCallback == NULL) + { + /* Update the error code */ + huart->ErrorCode |= HAL_UART_ERROR_INVALID_CALLBACK; + + return HAL_ERROR; + } + + if (huart->gState == HAL_UART_STATE_READY) + { + switch (CallbackID) + { + case HAL_UART_TX_HALFCOMPLETE_CB_ID : + huart->TxHalfCpltCallback = pCallback; + break; + + case HAL_UART_TX_COMPLETE_CB_ID : + huart->TxCpltCallback = pCallback; + break; + + case HAL_UART_RX_HALFCOMPLETE_CB_ID : + huart->RxHalfCpltCallback = pCallback; + break; + + case HAL_UART_RX_COMPLETE_CB_ID : + huart->RxCpltCallback = pCallback; + break; + + case HAL_UART_ERROR_CB_ID : + huart->ErrorCallback = pCallback; + break; + + case HAL_UART_ABORT_COMPLETE_CB_ID : + huart->AbortCpltCallback = pCallback; + break; + + case HAL_UART_ABORT_TRANSMIT_COMPLETE_CB_ID : + huart->AbortTransmitCpltCallback = pCallback; + break; + + case HAL_UART_ABORT_RECEIVE_COMPLETE_CB_ID : + huart->AbortReceiveCpltCallback = pCallback; + break; + + case HAL_UART_MSPINIT_CB_ID : + huart->MspInitCallback = pCallback; + break; + + case HAL_UART_MSPDEINIT_CB_ID : + huart->MspDeInitCallback = pCallback; + break; + + default : + /* Update the error code */ + huart->ErrorCode |= HAL_UART_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else if (huart->gState == HAL_UART_STATE_RESET) + { + switch (CallbackID) + { + case HAL_UART_MSPINIT_CB_ID : + huart->MspInitCallback = pCallback; + break; + + case HAL_UART_MSPDEINIT_CB_ID : + huart->MspDeInitCallback = pCallback; + break; + + default : + /* Update the error code */ + huart->ErrorCode |= HAL_UART_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Update the error code */ + huart->ErrorCode |= HAL_UART_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + } + + return status; +} + +/** + * @brief Unregister an UART Callback + * UART callaback is redirected to the weak predefined callback + * @note The HAL_UART_UnRegisterCallback() may be called before HAL_UART_Init(), HAL_HalfDuplex_Init(), + * HAL_LIN_Init(), HAL_MultiProcessor_Init() to un-register callbacks for HAL_UART_MSPINIT_CB_ID + * and HAL_UART_MSPDEINIT_CB_ID + * @param huart uart handle + * @param CallbackID ID of the callback to be unregistered + * This parameter can be one of the following values: + * @arg @ref HAL_UART_TX_HALFCOMPLETE_CB_ID Tx Half Complete Callback ID + * @arg @ref HAL_UART_TX_COMPLETE_CB_ID Tx Complete Callback ID + * @arg @ref HAL_UART_RX_HALFCOMPLETE_CB_ID Rx Half Complete Callback ID + * @arg @ref HAL_UART_RX_COMPLETE_CB_ID Rx Complete Callback ID + * @arg @ref HAL_UART_ERROR_CB_ID Error Callback ID + * @arg @ref HAL_UART_ABORT_COMPLETE_CB_ID Abort Complete Callback ID + * @arg @ref HAL_UART_ABORT_TRANSMIT_COMPLETE_CB_ID Abort Transmit Complete Callback ID + * @arg @ref HAL_UART_ABORT_RECEIVE_COMPLETE_CB_ID Abort Receive Complete Callback ID + * @arg @ref HAL_UART_MSPINIT_CB_ID MspInit Callback ID + * @arg @ref HAL_UART_MSPDEINIT_CB_ID MspDeInit Callback ID + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_UnRegisterCallback(UART_HandleTypeDef *huart, HAL_UART_CallbackIDTypeDef CallbackID) +{ + HAL_StatusTypeDef status = HAL_OK; + + if (HAL_UART_STATE_READY == huart->gState) + { + switch (CallbackID) + { + case HAL_UART_TX_HALFCOMPLETE_CB_ID : + huart->TxHalfCpltCallback = HAL_UART_TxHalfCpltCallback; /* Legacy weak TxHalfCpltCallback */ + break; + + case HAL_UART_TX_COMPLETE_CB_ID : + huart->TxCpltCallback = HAL_UART_TxCpltCallback; /* Legacy weak TxCpltCallback */ + break; + + case HAL_UART_RX_HALFCOMPLETE_CB_ID : + huart->RxHalfCpltCallback = HAL_UART_RxHalfCpltCallback; /* Legacy weak RxHalfCpltCallback */ + break; + + case HAL_UART_RX_COMPLETE_CB_ID : + huart->RxCpltCallback = HAL_UART_RxCpltCallback; /* Legacy weak RxCpltCallback */ + break; + + case HAL_UART_ERROR_CB_ID : + huart->ErrorCallback = HAL_UART_ErrorCallback; /* Legacy weak ErrorCallback */ + break; + + case HAL_UART_ABORT_COMPLETE_CB_ID : + huart->AbortCpltCallback = HAL_UART_AbortCpltCallback; /* Legacy weak AbortCpltCallback */ + break; + + case HAL_UART_ABORT_TRANSMIT_COMPLETE_CB_ID : + huart->AbortTransmitCpltCallback = HAL_UART_AbortTransmitCpltCallback; /* Legacy weak AbortTransmitCpltCallback */ + break; + + case HAL_UART_ABORT_RECEIVE_COMPLETE_CB_ID : + huart->AbortReceiveCpltCallback = HAL_UART_AbortReceiveCpltCallback; /* Legacy weak AbortReceiveCpltCallback */ + break; + + case HAL_UART_MSPINIT_CB_ID : + huart->MspInitCallback = HAL_UART_MspInit; /* Legacy weak MspInitCallback */ + break; + + case HAL_UART_MSPDEINIT_CB_ID : + huart->MspDeInitCallback = HAL_UART_MspDeInit; /* Legacy weak MspDeInitCallback */ + break; + + default : + /* Update the error code */ + huart->ErrorCode |= HAL_UART_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else if (HAL_UART_STATE_RESET == huart->gState) + { + switch (CallbackID) + { + case HAL_UART_MSPINIT_CB_ID : + huart->MspInitCallback = HAL_UART_MspInit; + break; + + case HAL_UART_MSPDEINIT_CB_ID : + huart->MspDeInitCallback = HAL_UART_MspDeInit; + break; + + default : + /* Update the error code */ + huart->ErrorCode |= HAL_UART_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Update the error code */ + huart->ErrorCode |= HAL_UART_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + } + + return status; +} + +/** + * @brief Register a User UART Rx Event Callback + * To be used instead of the weak predefined callback + * @param huart Uart handle + * @param pCallback Pointer to the Rx Event Callback function + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_RegisterRxEventCallback(UART_HandleTypeDef *huart, pUART_RxEventCallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + + if (pCallback == NULL) + { + huart->ErrorCode |= HAL_UART_ERROR_INVALID_CALLBACK; + + return HAL_ERROR; + } + + /* Process locked */ + __HAL_LOCK(huart); + + if (huart->gState == HAL_UART_STATE_READY) + { + huart->RxEventCallback = pCallback; + } + else + { + huart->ErrorCode |= HAL_UART_ERROR_INVALID_CALLBACK; + + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(huart); + + return status; +} + +/** + * @brief UnRegister the UART Rx Event Callback + * UART Rx Event Callback is redirected to the weak HAL_UARTEx_RxEventCallback() predefined callback + * @param huart Uart handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_UnRegisterRxEventCallback(UART_HandleTypeDef *huart) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Process locked */ + __HAL_LOCK(huart); + + if (huart->gState == HAL_UART_STATE_READY) + { + huart->RxEventCallback = HAL_UARTEx_RxEventCallback; /* Legacy weak UART Rx Event Callback */ + } + else + { + huart->ErrorCode |= HAL_UART_ERROR_INVALID_CALLBACK; + + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(huart); + return status; +} +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + +/** + * @} + */ + +/** @defgroup UART_Exported_Functions_Group2 IO operation functions + * @brief UART Transmit and Receive functions + * +@verbatim + =============================================================================== + ##### IO operation functions ##### + =============================================================================== + This subsection provides a set of functions allowing to manage the UART asynchronous + and Half duplex data transfers. + + (#) There are two modes of transfer: + (+) Blocking mode: The communication is performed in polling mode. + The HAL status of all data processing is returned by the same function + after finishing transfer. + (+) Non-Blocking mode: The communication is performed using Interrupts + or DMA, these API's return the HAL status. + The end of the data processing will be indicated through the + dedicated UART IRQ when using Interrupt mode or the DMA IRQ when + using DMA mode. + The HAL_UART_TxCpltCallback(), HAL_UART_RxCpltCallback() user callbacks + will be executed respectively at the end of the transmit or receive process + The HAL_UART_ErrorCallback()user callback will be executed when a communication error is detected. + + (#) Blocking mode API's are : + (+) HAL_UART_Transmit() + (+) HAL_UART_Receive() + + (#) Non-Blocking mode API's with Interrupt are : + (+) HAL_UART_Transmit_IT() + (+) HAL_UART_Receive_IT() + (+) HAL_UART_IRQHandler() + + (#) Non-Blocking mode API's with DMA are : + (+) HAL_UART_Transmit_DMA() + (+) HAL_UART_Receive_DMA() + (+) HAL_UART_DMAPause() + (+) HAL_UART_DMAResume() + (+) HAL_UART_DMAStop() + + (#) A set of Transfer Complete Callbacks are provided in Non_Blocking mode: + (+) HAL_UART_TxHalfCpltCallback() + (+) HAL_UART_TxCpltCallback() + (+) HAL_UART_RxHalfCpltCallback() + (+) HAL_UART_RxCpltCallback() + (+) HAL_UART_ErrorCallback() + + (#) Non-Blocking mode transfers could be aborted using Abort API's : + (+) HAL_UART_Abort() + (+) HAL_UART_AbortTransmit() + (+) HAL_UART_AbortReceive() + (+) HAL_UART_Abort_IT() + (+) HAL_UART_AbortTransmit_IT() + (+) HAL_UART_AbortReceive_IT() + + (#) For Abort services based on interrupts (HAL_UART_Abortxxx_IT), a set of Abort Complete Callbacks are provided: + (+) HAL_UART_AbortCpltCallback() + (+) HAL_UART_AbortTransmitCpltCallback() + (+) HAL_UART_AbortReceiveCpltCallback() + + (#) A Rx Event Reception Callback (Rx event notification) is available for Non_Blocking modes of enhanced reception services: + (+) HAL_UARTEx_RxEventCallback() + + (#) In Non-Blocking mode transfers, possible errors are split into 2 categories. + Errors are handled as follows : + (+) Error is considered as Recoverable and non blocking : Transfer could go till end, but error severity is + to be evaluated by user : this concerns Frame Error, Parity Error or Noise Error in Interrupt mode reception . + Received character is then retrieved and stored in Rx buffer, Error code is set to allow user to identify error type, + and HAL_UART_ErrorCallback() user callback is executed. Transfer is kept ongoing on UART side. + If user wants to abort it, Abort services should be called by user. + (+) Error is considered as Blocking : Transfer could not be completed properly and is aborted. + This concerns Overrun Error In Interrupt mode reception and all errors in DMA mode. + Error code is set to allow user to identify error type, and HAL_UART_ErrorCallback() user callback is executed. + + -@- In the Half duplex communication, it is forbidden to run the transmit + and receive process in parallel, the UART state HAL_UART_STATE_BUSY_TX_RX can't be useful. + +@endverbatim + * @{ + */ + +/** + * @brief Sends an amount of data in blocking mode. + * @note When UART parity is not enabled (PCE = 0), and Word Length is configured to 9 bits (M1-M0 = 01), + * the sent data is handled as a set of u16. In this case, Size must indicate the number + * of u16 provided through pData. + * @param huart Pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART module. + * @param pData Pointer to data buffer (u8 or u16 data elements). + * @param Size Amount of data elements (u8 or u16) to be sent + * @param Timeout Timeout duration + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_Transmit(UART_HandleTypeDef *huart, const uint8_t *pData, uint16_t Size, uint32_t Timeout) +{ + const uint8_t *pdata8bits; + const uint16_t *pdata16bits; + uint32_t tickstart = 0U; + + /* Check that a Tx process is not already ongoing */ + if (huart->gState == HAL_UART_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + return HAL_ERROR; + } + + huart->ErrorCode = HAL_UART_ERROR_NONE; + huart->gState = HAL_UART_STATE_BUSY_TX; + + /* Init tickstart for timeout management */ + tickstart = HAL_GetTick(); + + huart->TxXferSize = Size; + huart->TxXferCount = Size; + + /* In case of 9bits/No Parity transfer, pData needs to be handled as a uint16_t pointer */ + if ((huart->Init.WordLength == UART_WORDLENGTH_9B) && (huart->Init.Parity == UART_PARITY_NONE)) + { + pdata8bits = NULL; + pdata16bits = (const uint16_t *) pData; + } + else + { + pdata8bits = pData; + pdata16bits = NULL; + } + + while (huart->TxXferCount > 0U) + { + if (UART_WaitOnFlagUntilTimeout(huart, UART_FLAG_TXE, RESET, tickstart, Timeout) != HAL_OK) + { + huart->gState = HAL_UART_STATE_READY; + + return HAL_TIMEOUT; + } + if (pdata8bits == NULL) + { + huart->Instance->DR = (uint16_t)(*pdata16bits & 0x01FFU); + pdata16bits++; + } + else + { + huart->Instance->DR = (uint8_t)(*pdata8bits & 0xFFU); + pdata8bits++; + } + huart->TxXferCount--; + } + + if (UART_WaitOnFlagUntilTimeout(huart, UART_FLAG_TC, RESET, tickstart, Timeout) != HAL_OK) + { + huart->gState = HAL_UART_STATE_READY; + + return HAL_TIMEOUT; + } + + /* At end of Tx process, restore huart->gState to Ready */ + huart->gState = HAL_UART_STATE_READY; + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Receives an amount of data in blocking mode. + * @note When UART parity is not enabled (PCE = 0), and Word Length is configured to 9 bits (M1-M0 = 01), + * the received data is handled as a set of u16. In this case, Size must indicate the number + * of u16 available through pData. + * @param huart Pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART module. + * @param pData Pointer to data buffer (u8 or u16 data elements). + * @param Size Amount of data elements (u8 or u16) to be received. + * @param Timeout Timeout duration + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_Receive(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size, uint32_t Timeout) +{ + uint8_t *pdata8bits; + uint16_t *pdata16bits; + uint32_t tickstart = 0U; + + /* Check that a Rx process is not already ongoing */ + if (huart->RxState == HAL_UART_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + return HAL_ERROR; + } + + huart->ErrorCode = HAL_UART_ERROR_NONE; + huart->RxState = HAL_UART_STATE_BUSY_RX; + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; + + /* Init tickstart for timeout management */ + tickstart = HAL_GetTick(); + + huart->RxXferSize = Size; + huart->RxXferCount = Size; + + /* In case of 9bits/No Parity transfer, pRxData needs to be handled as a uint16_t pointer */ + if ((huart->Init.WordLength == UART_WORDLENGTH_9B) && (huart->Init.Parity == UART_PARITY_NONE)) + { + pdata8bits = NULL; + pdata16bits = (uint16_t *) pData; + } + else + { + pdata8bits = pData; + pdata16bits = NULL; + } + + /* Check the remain data to be received */ + while (huart->RxXferCount > 0U) + { + if (UART_WaitOnFlagUntilTimeout(huart, UART_FLAG_RXNE, RESET, tickstart, Timeout) != HAL_OK) + { + huart->RxState = HAL_UART_STATE_READY; + + return HAL_TIMEOUT; + } + if (pdata8bits == NULL) + { + *pdata16bits = (uint16_t)(huart->Instance->DR & 0x01FF); + pdata16bits++; + } + else + { + if ((huart->Init.WordLength == UART_WORDLENGTH_9B) || ((huart->Init.WordLength == UART_WORDLENGTH_8B) && (huart->Init.Parity == UART_PARITY_NONE))) + { + *pdata8bits = (uint8_t)(huart->Instance->DR & (uint8_t)0x00FF); + } + else + { + *pdata8bits = (uint8_t)(huart->Instance->DR & (uint8_t)0x007F); + } + pdata8bits++; + } + huart->RxXferCount--; + } + + /* At end of Rx process, restore huart->RxState to Ready */ + huart->RxState = HAL_UART_STATE_READY; + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Sends an amount of data in non blocking mode. + * @note When UART parity is not enabled (PCE = 0), and Word Length is configured to 9 bits (M1-M0 = 01), + * the sent data is handled as a set of u16. In this case, Size must indicate the number + * of u16 provided through pData. + * @param huart Pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART module. + * @param pData Pointer to data buffer (u8 or u16 data elements). + * @param Size Amount of data elements (u8 or u16) to be sent + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_Transmit_IT(UART_HandleTypeDef *huart, const uint8_t *pData, uint16_t Size) +{ + /* Check that a Tx process is not already ongoing */ + if (huart->gState == HAL_UART_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + return HAL_ERROR; + } + + huart->pTxBuffPtr = pData; + huart->TxXferSize = Size; + huart->TxXferCount = Size; + + huart->ErrorCode = HAL_UART_ERROR_NONE; + huart->gState = HAL_UART_STATE_BUSY_TX; + + /* Enable the UART Transmit data register empty Interrupt */ + __HAL_UART_ENABLE_IT(huart, UART_IT_TXE); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Receives an amount of data in non blocking mode. + * @note When UART parity is not enabled (PCE = 0), and Word Length is configured to 9 bits (M1-M0 = 01), + * the received data is handled as a set of u16. In this case, Size must indicate the number + * of u16 available through pData. + * @param huart Pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART module. + * @param pData Pointer to data buffer (u8 or u16 data elements). + * @param Size Amount of data elements (u8 or u16) to be received. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_Receive_IT(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size) +{ + /* Check that a Rx process is not already ongoing */ + if (huart->RxState == HAL_UART_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + return HAL_ERROR; + } + + /* Set Reception type to Standard reception */ + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; + + return (UART_Start_Receive_IT(huart, pData, Size)); + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Sends an amount of data in DMA mode. + * @note When UART parity is not enabled (PCE = 0), and Word Length is configured to 9 bits (M1-M0 = 01), + * the sent data is handled as a set of u16. In this case, Size must indicate the number + * of u16 provided through pData. + * @param huart Pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART module. + * @param pData Pointer to data buffer (u8 or u16 data elements). + * @param Size Amount of data elements (u8 or u16) to be sent + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_Transmit_DMA(UART_HandleTypeDef *huart, const uint8_t *pData, uint16_t Size) +{ + const uint32_t *tmp; + + /* Check that a Tx process is not already ongoing */ + if (huart->gState == HAL_UART_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + return HAL_ERROR; + } + + huart->pTxBuffPtr = pData; + huart->TxXferSize = Size; + huart->TxXferCount = Size; + + huart->ErrorCode = HAL_UART_ERROR_NONE; + huart->gState = HAL_UART_STATE_BUSY_TX; + + /* Set the UART DMA transfer complete callback */ + huart->hdmatx->XferCpltCallback = UART_DMATransmitCplt; + + /* Set the UART DMA Half transfer complete callback */ + huart->hdmatx->XferHalfCpltCallback = UART_DMATxHalfCplt; + + /* Set the DMA error callback */ + huart->hdmatx->XferErrorCallback = UART_DMAError; + + /* Set the DMA abort callback */ + huart->hdmatx->XferAbortCallback = NULL; + + /* Enable the UART transmit DMA stream */ + tmp = (const uint32_t *)&pData; + HAL_DMA_Start_IT(huart->hdmatx, *(const uint32_t *)tmp, (uint32_t)&huart->Instance->DR, Size); + + /* Clear the TC flag in the SR register by writing 0 to it */ + __HAL_UART_CLEAR_FLAG(huart, UART_FLAG_TC); + + /* Enable the DMA transfer for transmit request by setting the DMAT bit + in the UART CR3 register */ + ATOMIC_SET_BIT(huart->Instance->CR3, USART_CR3_DMAT); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Receives an amount of data in DMA mode. + * @note When UART parity is not enabled (PCE = 0), and Word Length is configured to 9 bits (M1-M0 = 01), + * the received data is handled as a set of u16. In this case, Size must indicate the number + * of u16 available through pData. + * @param huart Pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART module. + * @param pData Pointer to data buffer (u8 or u16 data elements). + * @param Size Amount of data elements (u8 or u16) to be received. + * @note When the UART parity is enabled (PCE = 1) the received data contains the parity bit. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_Receive_DMA(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size) +{ + /* Check that a Rx process is not already ongoing */ + if (huart->RxState == HAL_UART_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + return HAL_ERROR; + } + + /* Set Reception type to Standard reception */ + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; + + return (UART_Start_Receive_DMA(huart, pData, Size)); + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Pauses the DMA Transfer. + * @param huart Pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART module. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_DMAPause(UART_HandleTypeDef *huart) +{ + uint32_t dmarequest = 0x00U; + + dmarequest = HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAT); + if ((huart->gState == HAL_UART_STATE_BUSY_TX) && dmarequest) + { + /* Disable the UART DMA Tx request */ + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAT); + } + + dmarequest = HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR); + if ((huart->RxState == HAL_UART_STATE_BUSY_RX) && dmarequest) + { + /* Disable RXNE, PE and ERR (Frame error, noise error, overrun error) interrupts */ + ATOMIC_CLEAR_BIT(huart->Instance->CR1, USART_CR1_PEIE); + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + + /* Disable the UART DMA Rx request */ + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); + } + + return HAL_OK; +} + +/** + * @brief Resumes the DMA Transfer. + * @param huart Pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART module. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_DMAResume(UART_HandleTypeDef *huart) +{ + + if (huart->gState == HAL_UART_STATE_BUSY_TX) + { + /* Enable the UART DMA Tx request */ + ATOMIC_SET_BIT(huart->Instance->CR3, USART_CR3_DMAT); + } + + if (huart->RxState == HAL_UART_STATE_BUSY_RX) + { + /* Clear the Overrun flag before resuming the Rx transfer*/ + __HAL_UART_CLEAR_OREFLAG(huart); + + /* Re-enable PE and ERR (Frame error, noise error, overrun error) interrupts */ + if (huart->Init.Parity != UART_PARITY_NONE) + { + ATOMIC_SET_BIT(huart->Instance->CR1, USART_CR1_PEIE); + } + ATOMIC_SET_BIT(huart->Instance->CR3, USART_CR3_EIE); + + /* Enable the UART DMA Rx request */ + ATOMIC_SET_BIT(huart->Instance->CR3, USART_CR3_DMAR); + } + + return HAL_OK; +} + +/** + * @brief Stops the DMA Transfer. + * @param huart Pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART module. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_DMAStop(UART_HandleTypeDef *huart) +{ + uint32_t dmarequest = 0x00U; + /* The Lock is not implemented on this API to allow the user application + to call the HAL UART API under callbacks HAL_UART_TxCpltCallback() / HAL_UART_RxCpltCallback(): + when calling HAL_DMA_Abort() API the DMA TX/RX Transfer complete interrupt is generated + and the correspond call back is executed HAL_UART_TxCpltCallback() / HAL_UART_RxCpltCallback() + */ + + /* Stop UART DMA Tx request if ongoing */ + dmarequest = HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAT); + if ((huart->gState == HAL_UART_STATE_BUSY_TX) && dmarequest) + { + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAT); + + /* Abort the UART DMA Tx stream */ + if (huart->hdmatx != NULL) + { + HAL_DMA_Abort(huart->hdmatx); + } + UART_EndTxTransfer(huart); + } + + /* Stop UART DMA Rx request if ongoing */ + dmarequest = HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR); + if ((huart->RxState == HAL_UART_STATE_BUSY_RX) && dmarequest) + { + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); + + /* Abort the UART DMA Rx stream */ + if (huart->hdmarx != NULL) + { + HAL_DMA_Abort(huart->hdmarx); + } + UART_EndRxTransfer(huart); + } + + return HAL_OK; +} + +/** + * @brief Receive an amount of data in blocking mode till either the expected number of data is received or an IDLE event occurs. + * @note HAL_OK is returned if reception is completed (expected number of data has been received) + * or if reception is stopped after IDLE event (less than the expected number of data has been received) + * In this case, RxLen output parameter indicates number of data available in reception buffer. + * @note When UART parity is not enabled (PCE = 0), and Word Length is configured to 9 bits (M = 01), + * the received data is handled as a set of uint16_t. In this case, Size must indicate the number + * of uint16_t available through pData. + * @param huart UART handle. + * @param pData Pointer to data buffer (uint8_t or uint16_t data elements). + * @param Size Amount of data elements (uint8_t or uint16_t) to be received. + * @param RxLen Number of data elements finally received (could be lower than Size, in case reception ends on IDLE event) + * @param Timeout Timeout duration expressed in ms (covers the whole reception sequence). + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UARTEx_ReceiveToIdle(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size, uint16_t *RxLen, + uint32_t Timeout) +{ + uint8_t *pdata8bits; + uint16_t *pdata16bits; + uint32_t tickstart; + + /* Check that a Rx process is not already ongoing */ + if (huart->RxState == HAL_UART_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + return HAL_ERROR; + } + + huart->ErrorCode = HAL_UART_ERROR_NONE; + huart->RxState = HAL_UART_STATE_BUSY_RX; + huart->ReceptionType = HAL_UART_RECEPTION_TOIDLE; + huart->RxEventType = HAL_UART_RXEVENT_TC; + + /* Init tickstart for timeout management */ + tickstart = HAL_GetTick(); + + huart->RxXferSize = Size; + huart->RxXferCount = Size; + + /* In case of 9bits/No Parity transfer, pRxData needs to be handled as a uint16_t pointer */ + if ((huart->Init.WordLength == UART_WORDLENGTH_9B) && (huart->Init.Parity == UART_PARITY_NONE)) + { + pdata8bits = NULL; + pdata16bits = (uint16_t *) pData; + } + else + { + pdata8bits = pData; + pdata16bits = NULL; + } + + /* Initialize output number of received elements */ + *RxLen = 0U; + + /* as long as data have to be received */ + while (huart->RxXferCount > 0U) + { + /* Check if IDLE flag is set */ + if (__HAL_UART_GET_FLAG(huart, UART_FLAG_IDLE)) + { + /* Clear IDLE flag in ISR */ + __HAL_UART_CLEAR_IDLEFLAG(huart); + + /* If Set, but no data ever received, clear flag without exiting loop */ + /* If Set, and data has already been received, this means Idle Event is valid : End reception */ + if (*RxLen > 0U) + { + huart->RxEventType = HAL_UART_RXEVENT_IDLE; + huart->RxState = HAL_UART_STATE_READY; + + return HAL_OK; + } + } + + /* Check if RXNE flag is set */ + if (__HAL_UART_GET_FLAG(huart, UART_FLAG_RXNE)) + { + if (pdata8bits == NULL) + { + *pdata16bits = (uint16_t)(huart->Instance->DR & (uint16_t)0x01FF); + pdata16bits++; + } + else + { + if ((huart->Init.WordLength == UART_WORDLENGTH_9B) || ((huart->Init.WordLength == UART_WORDLENGTH_8B) && (huart->Init.Parity == UART_PARITY_NONE))) + { + *pdata8bits = (uint8_t)(huart->Instance->DR & (uint8_t)0x00FF); + } + else + { + *pdata8bits = (uint8_t)(huart->Instance->DR & (uint8_t)0x007F); + } + + pdata8bits++; + } + /* Increment number of received elements */ + *RxLen += 1U; + huart->RxXferCount--; + } + + /* Check for the Timeout */ + if (Timeout != HAL_MAX_DELAY) + { + if (((HAL_GetTick() - tickstart) > Timeout) || (Timeout == 0U)) + { + huart->RxState = HAL_UART_STATE_READY; + + return HAL_TIMEOUT; + } + } + } + + /* Set number of received elements in output parameter : RxLen */ + *RxLen = huart->RxXferSize - huart->RxXferCount; + /* At end of Rx process, restore huart->RxState to Ready */ + huart->RxState = HAL_UART_STATE_READY; + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Receive an amount of data in interrupt mode till either the expected number of data is received or an IDLE event occurs. + * @note Reception is initiated by this function call. Further progress of reception is achieved thanks + * to UART interrupts raised by RXNE and IDLE events. Callback is called at end of reception indicating + * number of received data elements. + * @note When UART parity is not enabled (PCE = 0), and Word Length is configured to 9 bits (M = 01), + * the received data is handled as a set of uint16_t. In this case, Size must indicate the number + * of uint16_t available through pData. + * @param huart UART handle. + * @param pData Pointer to data buffer (uint8_t or uint16_t data elements). + * @param Size Amount of data elements (uint8_t or uint16_t) to be received. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UARTEx_ReceiveToIdle_IT(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size) +{ + HAL_StatusTypeDef status; + + /* Check that a Rx process is not already ongoing */ + if (huart->RxState == HAL_UART_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + return HAL_ERROR; + } + + /* Set Reception type to reception till IDLE Event*/ + huart->ReceptionType = HAL_UART_RECEPTION_TOIDLE; + huart->RxEventType = HAL_UART_RXEVENT_TC; + + status = UART_Start_Receive_IT(huart, pData, Size); + + /* Check Rx process has been successfully started */ + if (status == HAL_OK) + { + if (huart->ReceptionType == HAL_UART_RECEPTION_TOIDLE) + { + __HAL_UART_CLEAR_IDLEFLAG(huart); + ATOMIC_SET_BIT(huart->Instance->CR1, USART_CR1_IDLEIE); + } + else + { + /* In case of errors already pending when reception is started, + Interrupts may have already been raised and lead to reception abortion. + (Overrun error for instance). + In such case Reception Type has been reset to HAL_UART_RECEPTION_STANDARD. */ + status = HAL_ERROR; + } + } + + return status; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Receive an amount of data in DMA mode till either the expected number of data is received or an IDLE event occurs. + * @note Reception is initiated by this function call. Further progress of reception is achieved thanks + * to DMA services, transferring automatically received data elements in user reception buffer and + * calling registered callbacks at half/end of reception. UART IDLE events are also used to consider + * reception phase as ended. In all cases, callback execution will indicate number of received data elements. + * @note When the UART parity is enabled (PCE = 1), the received data contain + * the parity bit (MSB position). + * @note When UART parity is not enabled (PCE = 0), and Word Length is configured to 9 bits (M = 01), + * the received data is handled as a set of uint16_t. In this case, Size must indicate the number + * of uint16_t available through pData. + * @param huart UART handle. + * @param pData Pointer to data buffer (uint8_t or uint16_t data elements). + * @param Size Amount of data elements (uint8_t or uint16_t) to be received. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UARTEx_ReceiveToIdle_DMA(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size) +{ + HAL_StatusTypeDef status; + + /* Check that a Rx process is not already ongoing */ + if (huart->RxState == HAL_UART_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + return HAL_ERROR; + } + + /* Set Reception type to reception till IDLE Event*/ + huart->ReceptionType = HAL_UART_RECEPTION_TOIDLE; + huart->RxEventType = HAL_UART_RXEVENT_TC; + + status = UART_Start_Receive_DMA(huart, pData, Size); + + /* Check Rx process has been successfully started */ + if (status == HAL_OK) + { + if (huart->ReceptionType == HAL_UART_RECEPTION_TOIDLE) + { + __HAL_UART_CLEAR_IDLEFLAG(huart); + ATOMIC_SET_BIT(huart->Instance->CR1, USART_CR1_IDLEIE); + } + else + { + /* In case of errors already pending when reception is started, + Interrupts may have already been raised and lead to reception abortion. + (Overrun error for instance). + In such case Reception Type has been reset to HAL_UART_RECEPTION_STANDARD. */ + status = HAL_ERROR; + } + } + + return status; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Provide Rx Event type that has lead to RxEvent callback execution. + * @note When HAL_UARTEx_ReceiveToIdle_IT() or HAL_UARTEx_ReceiveToIdle_DMA() API are called, progress + * of reception process is provided to application through calls of Rx Event callback (either default one + * HAL_UARTEx_RxEventCallback() or user registered one). As several types of events could occur (IDLE event, + * Half Transfer, or Transfer Complete), this function allows to retrieve the Rx Event type that has lead + * to Rx Event callback execution. + * @note This function is expected to be called within the user implementation of Rx Event Callback, + * in order to provide the accurate value : + * In Interrupt Mode : + * - HAL_UART_RXEVENT_TC : when Reception has been completed (expected nb of data has been received) + * - HAL_UART_RXEVENT_IDLE : when Idle event occurred prior reception has been completed (nb of + * received data is lower than expected one) + * In DMA Mode : + * - HAL_UART_RXEVENT_TC : when Reception has been completed (expected nb of data has been received) + * - HAL_UART_RXEVENT_HT : when half of expected nb of data has been received + * - HAL_UART_RXEVENT_IDLE : when Idle event occurred prior reception has been completed (nb of + * received data is lower than expected one). + * In DMA mode, RxEvent callback could be called several times; + * When DMA is configured in Normal Mode, HT event does not stop Reception process; + * When DMA is configured in Circular Mode, HT, TC or IDLE events don't stop Reception process; + * @param huart UART handle. + * @retval Rx Event Type (returned value will be a value of @ref UART_RxEvent_Type_Values) + */ +HAL_UART_RxEventTypeTypeDef HAL_UARTEx_GetRxEventType(UART_HandleTypeDef *huart) +{ + /* Return Rx Event type value, as stored in UART handle */ + return(huart->RxEventType); +} + +/** + * @brief Abort ongoing transfers (blocking mode). + * @param huart UART handle. + * @note This procedure could be used for aborting any ongoing transfer started in Interrupt or DMA mode. + * This procedure performs following operations : + * - Disable UART Interrupts (Tx and Rx) + * - Disable the DMA transfer in the peripheral register (if enabled) + * - Abort DMA transfer by calling HAL_DMA_Abort (in case of transfer in DMA mode) + * - Set handle State to READY + * @note This procedure is executed in blocking mode : when exiting function, Abort is considered as completed. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_Abort(UART_HandleTypeDef *huart) +{ + /* Disable TXEIE, TCIE, RXNE, PE and ERR (Frame error, noise error, overrun error) interrupts */ + ATOMIC_CLEAR_BIT(huart->Instance->CR1, (USART_CR1_RXNEIE | USART_CR1_PEIE | USART_CR1_TXEIE | USART_CR1_TCIE)); + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + + /* If Reception till IDLE event was ongoing, disable IDLEIE interrupt */ + if (huart->ReceptionType == HAL_UART_RECEPTION_TOIDLE) + { + ATOMIC_CLEAR_BIT(huart->Instance->CR1, (USART_CR1_IDLEIE)); + } + + /* Disable the UART DMA Tx request if enabled */ + if (HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAT)) + { + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAT); + + /* Abort the UART DMA Tx stream: use blocking DMA Abort API (no callback) */ + if (huart->hdmatx != NULL) + { + /* Set the UART DMA Abort callback to Null. + No call back execution at end of DMA abort procedure */ + huart->hdmatx->XferAbortCallback = NULL; + + if (HAL_DMA_Abort(huart->hdmatx) != HAL_OK) + { + if (HAL_DMA_GetError(huart->hdmatx) == HAL_DMA_ERROR_TIMEOUT) + { + /* Set error code to DMA */ + huart->ErrorCode = HAL_UART_ERROR_DMA; + + return HAL_TIMEOUT; + } + } + } + } + + /* Disable the UART DMA Rx request if enabled */ + if (HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR)) + { + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); + + /* Abort the UART DMA Rx stream: use blocking DMA Abort API (no callback) */ + if (huart->hdmarx != NULL) + { + /* Set the UART DMA Abort callback to Null. + No call back execution at end of DMA abort procedure */ + huart->hdmarx->XferAbortCallback = NULL; + + if (HAL_DMA_Abort(huart->hdmarx) != HAL_OK) + { + if (HAL_DMA_GetError(huart->hdmarx) == HAL_DMA_ERROR_TIMEOUT) + { + /* Set error code to DMA */ + huart->ErrorCode = HAL_UART_ERROR_DMA; + + return HAL_TIMEOUT; + } + } + } + } + + /* Reset Tx and Rx transfer counters */ + huart->TxXferCount = 0x00U; + huart->RxXferCount = 0x00U; + + /* Reset ErrorCode */ + huart->ErrorCode = HAL_UART_ERROR_NONE; + + /* Restore huart->RxState and huart->gState to Ready */ + huart->RxState = HAL_UART_STATE_READY; + huart->gState = HAL_UART_STATE_READY; + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; + + return HAL_OK; +} + +/** + * @brief Abort ongoing Transmit transfer (blocking mode). + * @param huart UART handle. + * @note This procedure could be used for aborting any ongoing Tx transfer started in Interrupt or DMA mode. + * This procedure performs following operations : + * - Disable UART Interrupts (Tx) + * - Disable the DMA transfer in the peripheral register (if enabled) + * - Abort DMA transfer by calling HAL_DMA_Abort (in case of transfer in DMA mode) + * - Set handle State to READY + * @note This procedure is executed in blocking mode : when exiting function, Abort is considered as completed. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_AbortTransmit(UART_HandleTypeDef *huart) +{ + /* Disable TXEIE and TCIE interrupts */ + ATOMIC_CLEAR_BIT(huart->Instance->CR1, (USART_CR1_TXEIE | USART_CR1_TCIE)); + + /* Disable the UART DMA Tx request if enabled */ + if (HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAT)) + { + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAT); + + /* Abort the UART DMA Tx stream : use blocking DMA Abort API (no callback) */ + if (huart->hdmatx != NULL) + { + /* Set the UART DMA Abort callback to Null. + No call back execution at end of DMA abort procedure */ + huart->hdmatx->XferAbortCallback = NULL; + + if (HAL_DMA_Abort(huart->hdmatx) != HAL_OK) + { + if (HAL_DMA_GetError(huart->hdmatx) == HAL_DMA_ERROR_TIMEOUT) + { + /* Set error code to DMA */ + huart->ErrorCode = HAL_UART_ERROR_DMA; + + return HAL_TIMEOUT; + } + } + } + } + + /* Reset Tx transfer counter */ + huart->TxXferCount = 0x00U; + + /* Restore huart->gState to Ready */ + huart->gState = HAL_UART_STATE_READY; + + return HAL_OK; +} + +/** + * @brief Abort ongoing Receive transfer (blocking mode). + * @param huart UART handle. + * @note This procedure could be used for aborting any ongoing Rx transfer started in Interrupt or DMA mode. + * This procedure performs following operations : + * - Disable UART Interrupts (Rx) + * - Disable the DMA transfer in the peripheral register (if enabled) + * - Abort DMA transfer by calling HAL_DMA_Abort (in case of transfer in DMA mode) + * - Set handle State to READY + * @note This procedure is executed in blocking mode : when exiting function, Abort is considered as completed. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_AbortReceive(UART_HandleTypeDef *huart) +{ + /* Disable RXNE, PE and ERR (Frame error, noise error, overrun error) interrupts */ + ATOMIC_CLEAR_BIT(huart->Instance->CR1, (USART_CR1_RXNEIE | USART_CR1_PEIE)); + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + + /* If Reception till IDLE event was ongoing, disable IDLEIE interrupt */ + if (huart->ReceptionType == HAL_UART_RECEPTION_TOIDLE) + { + ATOMIC_CLEAR_BIT(huart->Instance->CR1, (USART_CR1_IDLEIE)); + } + + /* Disable the UART DMA Rx request if enabled */ + if (HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR)) + { + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); + + /* Abort the UART DMA Rx stream : use blocking DMA Abort API (no callback) */ + if (huart->hdmarx != NULL) + { + /* Set the UART DMA Abort callback to Null. + No call back execution at end of DMA abort procedure */ + huart->hdmarx->XferAbortCallback = NULL; + + if (HAL_DMA_Abort(huart->hdmarx) != HAL_OK) + { + if (HAL_DMA_GetError(huart->hdmarx) == HAL_DMA_ERROR_TIMEOUT) + { + /* Set error code to DMA */ + huart->ErrorCode = HAL_UART_ERROR_DMA; + + return HAL_TIMEOUT; + } + } + } + } + + /* Reset Rx transfer counter */ + huart->RxXferCount = 0x00U; + + /* Restore huart->RxState to Ready */ + huart->RxState = HAL_UART_STATE_READY; + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; + + return HAL_OK; +} + +/** + * @brief Abort ongoing transfers (Interrupt mode). + * @param huart UART handle. + * @note This procedure could be used for aborting any ongoing transfer started in Interrupt or DMA mode. + * This procedure performs following operations : + * - Disable UART Interrupts (Tx and Rx) + * - Disable the DMA transfer in the peripheral register (if enabled) + * - Abort DMA transfer by calling HAL_DMA_Abort_IT (in case of transfer in DMA mode) + * - Set handle State to READY + * - At abort completion, call user abort complete callback + * @note This procedure is executed in Interrupt mode, meaning that abort procedure could be + * considered as completed only when user abort complete callback is executed (not when exiting function). + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_Abort_IT(UART_HandleTypeDef *huart) +{ + uint32_t AbortCplt = 0x01U; + + /* Disable TXEIE, TCIE, RXNE, PE and ERR (Frame error, noise error, overrun error) interrupts */ + ATOMIC_CLEAR_BIT(huart->Instance->CR1, (USART_CR1_RXNEIE | USART_CR1_PEIE | USART_CR1_TXEIE | USART_CR1_TCIE)); + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + + /* If Reception till IDLE event was ongoing, disable IDLEIE interrupt */ + if (huart->ReceptionType == HAL_UART_RECEPTION_TOIDLE) + { + ATOMIC_CLEAR_BIT(huart->Instance->CR1, (USART_CR1_IDLEIE)); + } + + /* If DMA Tx and/or DMA Rx Handles are associated to UART Handle, DMA Abort complete callbacks should be initialised + before any call to DMA Abort functions */ + /* DMA Tx Handle is valid */ + if (huart->hdmatx != NULL) + { + /* Set DMA Abort Complete callback if UART DMA Tx request if enabled. + Otherwise, set it to NULL */ + if (HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAT)) + { + huart->hdmatx->XferAbortCallback = UART_DMATxAbortCallback; + } + else + { + huart->hdmatx->XferAbortCallback = NULL; + } + } + /* DMA Rx Handle is valid */ + if (huart->hdmarx != NULL) + { + /* Set DMA Abort Complete callback if UART DMA Rx request if enabled. + Otherwise, set it to NULL */ + if (HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR)) + { + huart->hdmarx->XferAbortCallback = UART_DMARxAbortCallback; + } + else + { + huart->hdmarx->XferAbortCallback = NULL; + } + } + + /* Disable the UART DMA Tx request if enabled */ + if (HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAT)) + { + /* Disable DMA Tx at UART level */ + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAT); + + /* Abort the UART DMA Tx stream : use non blocking DMA Abort API (callback) */ + if (huart->hdmatx != NULL) + { + /* UART Tx DMA Abort callback has already been initialised : + will lead to call HAL_UART_AbortCpltCallback() at end of DMA abort procedure */ + + /* Abort DMA TX */ + if (HAL_DMA_Abort_IT(huart->hdmatx) != HAL_OK) + { + huart->hdmatx->XferAbortCallback = NULL; + } + else + { + AbortCplt = 0x00U; + } + } + } + + /* Disable the UART DMA Rx request if enabled */ + if (HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR)) + { + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); + + /* Abort the UART DMA Rx stream : use non blocking DMA Abort API (callback) */ + if (huart->hdmarx != NULL) + { + /* UART Rx DMA Abort callback has already been initialised : + will lead to call HAL_UART_AbortCpltCallback() at end of DMA abort procedure */ + + /* Abort DMA RX */ + if (HAL_DMA_Abort_IT(huart->hdmarx) != HAL_OK) + { + huart->hdmarx->XferAbortCallback = NULL; + AbortCplt = 0x01U; + } + else + { + AbortCplt = 0x00U; + } + } + } + + /* if no DMA abort complete callback execution is required => call user Abort Complete callback */ + if (AbortCplt == 0x01U) + { + /* Reset Tx and Rx transfer counters */ + huart->TxXferCount = 0x00U; + huart->RxXferCount = 0x00U; + + /* Reset ErrorCode */ + huart->ErrorCode = HAL_UART_ERROR_NONE; + + /* Restore huart->gState and huart->RxState to Ready */ + huart->gState = HAL_UART_STATE_READY; + huart->RxState = HAL_UART_STATE_READY; + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; + + /* As no DMA to be aborted, call directly user Abort complete callback */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /* Call registered Abort complete callback */ + huart->AbortCpltCallback(huart); +#else + /* Call legacy weak Abort complete callback */ + HAL_UART_AbortCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + } + + return HAL_OK; +} + +/** + * @brief Abort ongoing Transmit transfer (Interrupt mode). + * @param huart UART handle. + * @note This procedure could be used for aborting any ongoing Tx transfer started in Interrupt or DMA mode. + * This procedure performs following operations : + * - Disable UART Interrupts (Tx) + * - Disable the DMA transfer in the peripheral register (if enabled) + * - Abort DMA transfer by calling HAL_DMA_Abort_IT (in case of transfer in DMA mode) + * - Set handle State to READY + * - At abort completion, call user abort complete callback + * @note This procedure is executed in Interrupt mode, meaning that abort procedure could be + * considered as completed only when user abort complete callback is executed (not when exiting function). + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_AbortTransmit_IT(UART_HandleTypeDef *huart) +{ + /* Disable TXEIE and TCIE interrupts */ + ATOMIC_CLEAR_BIT(huart->Instance->CR1, (USART_CR1_TXEIE | USART_CR1_TCIE)); + + /* Disable the UART DMA Tx request if enabled */ + if (HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAT)) + { + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAT); + + /* Abort the UART DMA Tx stream : use blocking DMA Abort API (no callback) */ + if (huart->hdmatx != NULL) + { + /* Set the UART DMA Abort callback : + will lead to call HAL_UART_AbortCpltCallback() at end of DMA abort procedure */ + huart->hdmatx->XferAbortCallback = UART_DMATxOnlyAbortCallback; + + /* Abort DMA TX */ + if (HAL_DMA_Abort_IT(huart->hdmatx) != HAL_OK) + { + /* Call Directly huart->hdmatx->XferAbortCallback function in case of error */ + huart->hdmatx->XferAbortCallback(huart->hdmatx); + } + } + else + { + /* Reset Tx transfer counter */ + huart->TxXferCount = 0x00U; + + /* Restore huart->gState to Ready */ + huart->gState = HAL_UART_STATE_READY; + + /* As no DMA to be aborted, call directly user Abort complete callback */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /* Call registered Abort Transmit Complete Callback */ + huart->AbortTransmitCpltCallback(huart); +#else + /* Call legacy weak Abort Transmit Complete Callback */ + HAL_UART_AbortTransmitCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + } + } + else + { + /* Reset Tx transfer counter */ + huart->TxXferCount = 0x00U; + + /* Restore huart->gState to Ready */ + huart->gState = HAL_UART_STATE_READY; + + /* As no DMA to be aborted, call directly user Abort complete callback */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /* Call registered Abort Transmit Complete Callback */ + huart->AbortTransmitCpltCallback(huart); +#else + /* Call legacy weak Abort Transmit Complete Callback */ + HAL_UART_AbortTransmitCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + } + + return HAL_OK; +} + +/** + * @brief Abort ongoing Receive transfer (Interrupt mode). + * @param huart UART handle. + * @note This procedure could be used for aborting any ongoing Rx transfer started in Interrupt or DMA mode. + * This procedure performs following operations : + * - Disable UART Interrupts (Rx) + * - Disable the DMA transfer in the peripheral register (if enabled) + * - Abort DMA transfer by calling HAL_DMA_Abort_IT (in case of transfer in DMA mode) + * - Set handle State to READY + * - At abort completion, call user abort complete callback + * @note This procedure is executed in Interrupt mode, meaning that abort procedure could be + * considered as completed only when user abort complete callback is executed (not when exiting function). + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_AbortReceive_IT(UART_HandleTypeDef *huart) +{ + /* Disable RXNE, PE and ERR (Frame error, noise error, overrun error) interrupts */ + ATOMIC_CLEAR_BIT(huart->Instance->CR1, (USART_CR1_RXNEIE | USART_CR1_PEIE)); + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + + /* If Reception till IDLE event was ongoing, disable IDLEIE interrupt */ + if (huart->ReceptionType == HAL_UART_RECEPTION_TOIDLE) + { + ATOMIC_CLEAR_BIT(huart->Instance->CR1, (USART_CR1_IDLEIE)); + } + + /* Disable the UART DMA Rx request if enabled */ + if (HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR)) + { + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); + + /* Abort the UART DMA Rx stream : use blocking DMA Abort API (no callback) */ + if (huart->hdmarx != NULL) + { + /* Set the UART DMA Abort callback : + will lead to call HAL_UART_AbortCpltCallback() at end of DMA abort procedure */ + huart->hdmarx->XferAbortCallback = UART_DMARxOnlyAbortCallback; + + /* Abort DMA RX */ + if (HAL_DMA_Abort_IT(huart->hdmarx) != HAL_OK) + { + /* Call Directly huart->hdmarx->XferAbortCallback function in case of error */ + huart->hdmarx->XferAbortCallback(huart->hdmarx); + } + } + else + { + /* Reset Rx transfer counter */ + huart->RxXferCount = 0x00U; + + /* Restore huart->RxState to Ready */ + huart->RxState = HAL_UART_STATE_READY; + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; + + /* As no DMA to be aborted, call directly user Abort complete callback */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /* Call registered Abort Receive Complete Callback */ + huart->AbortReceiveCpltCallback(huart); +#else + /* Call legacy weak Abort Receive Complete Callback */ + HAL_UART_AbortReceiveCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + } + } + else + { + /* Reset Rx transfer counter */ + huart->RxXferCount = 0x00U; + + /* Restore huart->RxState to Ready */ + huart->RxState = HAL_UART_STATE_READY; + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; + + /* As no DMA to be aborted, call directly user Abort complete callback */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /* Call registered Abort Receive Complete Callback */ + huart->AbortReceiveCpltCallback(huart); +#else + /* Call legacy weak Abort Receive Complete Callback */ + HAL_UART_AbortReceiveCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + } + + return HAL_OK; +} + +/** + * @brief This function handles UART interrupt request. + * @param huart Pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART module. + * @retval None + */ +void HAL_UART_IRQHandler(UART_HandleTypeDef *huart) +{ + uint32_t isrflags = READ_REG(huart->Instance->SR); + uint32_t cr1its = READ_REG(huart->Instance->CR1); + uint32_t cr3its = READ_REG(huart->Instance->CR3); + uint32_t errorflags = 0x00U; + uint32_t dmarequest = 0x00U; + + /* If no error occurs */ + errorflags = (isrflags & (uint32_t)(USART_SR_PE | USART_SR_FE | USART_SR_ORE | USART_SR_NE)); + if (errorflags == RESET) + { + /* UART in mode Receiver -------------------------------------------------*/ + if (((isrflags & USART_SR_RXNE) != RESET) && ((cr1its & USART_CR1_RXNEIE) != RESET)) + { + UART_Receive_IT(huart); + return; + } + } + + /* If some errors occur */ + if ((errorflags != RESET) && (((cr3its & USART_CR3_EIE) != RESET) + || ((cr1its & (USART_CR1_RXNEIE | USART_CR1_PEIE)) != RESET))) + { + /* UART parity error interrupt occurred ----------------------------------*/ + if (((isrflags & USART_SR_PE) != RESET) && ((cr1its & USART_CR1_PEIE) != RESET)) + { + huart->ErrorCode |= HAL_UART_ERROR_PE; + } + + /* UART noise error interrupt occurred -----------------------------------*/ + if (((isrflags & USART_SR_NE) != RESET) && ((cr3its & USART_CR3_EIE) != RESET)) + { + huart->ErrorCode |= HAL_UART_ERROR_NE; + } + + /* UART frame error interrupt occurred -----------------------------------*/ + if (((isrflags & USART_SR_FE) != RESET) && ((cr3its & USART_CR3_EIE) != RESET)) + { + huart->ErrorCode |= HAL_UART_ERROR_FE; + } + + /* UART Over-Run interrupt occurred --------------------------------------*/ + if (((isrflags & USART_SR_ORE) != RESET) && (((cr1its & USART_CR1_RXNEIE) != RESET) + || ((cr3its & USART_CR3_EIE) != RESET))) + { + huart->ErrorCode |= HAL_UART_ERROR_ORE; + } + + /* Call UART Error Call back function if need be --------------------------*/ + if (huart->ErrorCode != HAL_UART_ERROR_NONE) + { + /* UART in mode Receiver -----------------------------------------------*/ + if (((isrflags & USART_SR_RXNE) != RESET) && ((cr1its & USART_CR1_RXNEIE) != RESET)) + { + UART_Receive_IT(huart); + } + + /* If Overrun error occurs, or if any error occurs in DMA mode reception, + consider error as blocking */ + dmarequest = HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR); + if (((huart->ErrorCode & HAL_UART_ERROR_ORE) != RESET) || dmarequest) + { + /* Blocking error : transfer is aborted + Set the UART state ready to be able to start again the process, + Disable Rx Interrupts, and disable Rx DMA request, if ongoing */ + UART_EndRxTransfer(huart); + + /* Disable the UART DMA Rx request if enabled */ + if (HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR)) + { + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); + + /* Abort the UART DMA Rx stream */ + if (huart->hdmarx != NULL) + { + /* Set the UART DMA Abort callback : + will lead to call HAL_UART_ErrorCallback() at end of DMA abort procedure */ + huart->hdmarx->XferAbortCallback = UART_DMAAbortOnError; + if (HAL_DMA_Abort_IT(huart->hdmarx) != HAL_OK) + { + /* Call Directly XferAbortCallback function in case of error */ + huart->hdmarx->XferAbortCallback(huart->hdmarx); + } + } + else + { + /* Call user error callback */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered error callback*/ + huart->ErrorCallback(huart); +#else + /*Call legacy weak error callback*/ + HAL_UART_ErrorCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + } + } + else + { + /* Call user error callback */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered error callback*/ + huart->ErrorCallback(huart); +#else + /*Call legacy weak error callback*/ + HAL_UART_ErrorCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + } + } + else + { + /* Non Blocking error : transfer could go on. + Error is notified to user through user error callback */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered error callback*/ + huart->ErrorCallback(huart); +#else + /*Call legacy weak error callback*/ + HAL_UART_ErrorCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + + huart->ErrorCode = HAL_UART_ERROR_NONE; + } + } + return; + } /* End if some error occurs */ + + /* Check current reception Mode : + If Reception till IDLE event has been selected : */ + if ((huart->ReceptionType == HAL_UART_RECEPTION_TOIDLE) + && ((isrflags & USART_SR_IDLE) != 0U) + && ((cr1its & USART_SR_IDLE) != 0U)) + { + __HAL_UART_CLEAR_IDLEFLAG(huart); + + /* Check if DMA mode is enabled in UART */ + if (HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR)) + { + /* DMA mode enabled */ + /* Check received length : If all expected data are received, do nothing, + (DMA cplt callback will be called). + Otherwise, if at least one data has already been received, IDLE event is to be notified to user */ + uint16_t nb_remaining_rx_data = (uint16_t) __HAL_DMA_GET_COUNTER(huart->hdmarx); + if ((nb_remaining_rx_data > 0U) + && (nb_remaining_rx_data < huart->RxXferSize)) + { + /* Reception is not complete */ + huart->RxXferCount = nb_remaining_rx_data; + + /* In Normal mode, end DMA xfer and HAL UART Rx process*/ + if (huart->hdmarx->Init.Mode != DMA_CIRCULAR) + { + /* Disable PE and ERR (Frame error, noise error, overrun error) interrupts */ + ATOMIC_CLEAR_BIT(huart->Instance->CR1, USART_CR1_PEIE); + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + + /* Disable the DMA transfer for the receiver request by resetting the DMAR bit + in the UART CR3 register */ + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); + + /* At end of Rx process, restore huart->RxState to Ready */ + huart->RxState = HAL_UART_STATE_READY; + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; + + ATOMIC_CLEAR_BIT(huart->Instance->CR1, USART_CR1_IDLEIE); + + /* Last bytes received, so no need as the abort is immediate */ + (void)HAL_DMA_Abort(huart->hdmarx); + } + + /* Initialize type of RxEvent that correspond to RxEvent callback execution; + In this case, Rx Event type is Idle Event */ + huart->RxEventType = HAL_UART_RXEVENT_IDLE; + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered Rx Event callback*/ + huart->RxEventCallback(huart, (huart->RxXferSize - huart->RxXferCount)); +#else + /*Call legacy weak Rx Event callback*/ + HAL_UARTEx_RxEventCallback(huart, (huart->RxXferSize - huart->RxXferCount)); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + } + return; + } + else + { + /* DMA mode not enabled */ + /* Check received length : If all expected data are received, do nothing. + Otherwise, if at least one data has already been received, IDLE event is to be notified to user */ + uint16_t nb_rx_data = huart->RxXferSize - huart->RxXferCount; + if ((huart->RxXferCount > 0U) + && (nb_rx_data > 0U)) + { + /* Disable the UART Parity Error Interrupt and RXNE interrupts */ + ATOMIC_CLEAR_BIT(huart->Instance->CR1, (USART_CR1_RXNEIE | USART_CR1_PEIE)); + + /* Disable the UART Error Interrupt: (Frame error, noise error, overrun error) */ + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + + /* Rx process is completed, restore huart->RxState to Ready */ + huart->RxState = HAL_UART_STATE_READY; + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; + + ATOMIC_CLEAR_BIT(huart->Instance->CR1, USART_CR1_IDLEIE); + + /* Initialize type of RxEvent that correspond to RxEvent callback execution; + In this case, Rx Event type is Idle Event */ + huart->RxEventType = HAL_UART_RXEVENT_IDLE; + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered Rx complete callback*/ + huart->RxEventCallback(huart, nb_rx_data); +#else + /*Call legacy weak Rx Event callback*/ + HAL_UARTEx_RxEventCallback(huart, nb_rx_data); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + } + return; + } + } + + /* UART in mode Transmitter ------------------------------------------------*/ + if (((isrflags & USART_SR_TXE) != RESET) && ((cr1its & USART_CR1_TXEIE) != RESET)) + { + UART_Transmit_IT(huart); + return; + } + + /* UART in mode Transmitter end --------------------------------------------*/ + if (((isrflags & USART_SR_TC) != RESET) && ((cr1its & USART_CR1_TCIE) != RESET)) + { + UART_EndTransmit_IT(huart); + return; + } +} + +/** + * @brief Tx Transfer completed callbacks. + * @param huart Pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART module. + * @retval None + */ +__weak void HAL_UART_TxCpltCallback(UART_HandleTypeDef *huart) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(huart); + /* NOTE: This function should not be modified, when the callback is needed, + the HAL_UART_TxCpltCallback could be implemented in the user file + */ +} + +/** + * @brief Tx Half Transfer completed callbacks. + * @param huart Pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART module. + * @retval None + */ +__weak void HAL_UART_TxHalfCpltCallback(UART_HandleTypeDef *huart) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(huart); + /* NOTE: This function should not be modified, when the callback is needed, + the HAL_UART_TxHalfCpltCallback could be implemented in the user file + */ +} + +/** + * @brief Rx Transfer completed callbacks. + * @param huart Pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART module. + * @retval None + */ +__weak void HAL_UART_RxCpltCallback(UART_HandleTypeDef *huart) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(huart); + /* NOTE: This function should not be modified, when the callback is needed, + the HAL_UART_RxCpltCallback could be implemented in the user file + */ +} + +/** + * @brief Rx Half Transfer completed callbacks. + * @param huart Pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART module. + * @retval None + */ +__weak void HAL_UART_RxHalfCpltCallback(UART_HandleTypeDef *huart) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(huart); + /* NOTE: This function should not be modified, when the callback is needed, + the HAL_UART_RxHalfCpltCallback could be implemented in the user file + */ +} + +/** + * @brief UART error callbacks. + * @param huart Pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART module. + * @retval None + */ +__weak void HAL_UART_ErrorCallback(UART_HandleTypeDef *huart) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(huart); + /* NOTE: This function should not be modified, when the callback is needed, + the HAL_UART_ErrorCallback could be implemented in the user file + */ +} + +/** + * @brief UART Abort Complete callback. + * @param huart UART handle. + * @retval None + */ +__weak void HAL_UART_AbortCpltCallback(UART_HandleTypeDef *huart) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(huart); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_UART_AbortCpltCallback can be implemented in the user file. + */ +} + +/** + * @brief UART Abort Complete callback. + * @param huart UART handle. + * @retval None + */ +__weak void HAL_UART_AbortTransmitCpltCallback(UART_HandleTypeDef *huart) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(huart); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_UART_AbortTransmitCpltCallback can be implemented in the user file. + */ +} + +/** + * @brief UART Abort Receive Complete callback. + * @param huart UART handle. + * @retval None + */ +__weak void HAL_UART_AbortReceiveCpltCallback(UART_HandleTypeDef *huart) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(huart); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_UART_AbortReceiveCpltCallback can be implemented in the user file. + */ +} + +/** + * @brief Reception Event Callback (Rx event notification called after use of advanced reception service). + * @param huart UART handle + * @param Size Number of data available in application reception buffer (indicates a position in + * reception buffer until which, data are available) + * @retval None + */ +__weak void HAL_UARTEx_RxEventCallback(UART_HandleTypeDef *huart, uint16_t Size) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(huart); + UNUSED(Size); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_UARTEx_RxEventCallback can be implemented in the user file. + */ +} + +/** + * @} + */ + +/** @defgroup UART_Exported_Functions_Group3 Peripheral Control functions + * @brief UART control functions + * +@verbatim + ============================================================================== + ##### Peripheral Control functions ##### + ============================================================================== + [..] + This subsection provides a set of functions allowing to control the UART: + (+) HAL_LIN_SendBreak() API can be helpful to transmit the break character. + (+) HAL_MultiProcessor_EnterMuteMode() API can be helpful to enter the UART in mute mode. + (+) HAL_MultiProcessor_ExitMuteMode() API can be helpful to exit the UART mute mode by software. + (+) HAL_HalfDuplex_EnableTransmitter() API to enable the UART transmitter and disables the UART receiver in Half Duplex mode + (+) HAL_HalfDuplex_EnableReceiver() API to enable the UART receiver and disables the UART transmitter in Half Duplex mode + +@endverbatim + * @{ + */ + +/** + * @brief Transmits break characters. + * @param huart Pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART module. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_LIN_SendBreak(UART_HandleTypeDef *huart) +{ + /* Check the parameters */ + assert_param(IS_UART_INSTANCE(huart->Instance)); + + /* Process Locked */ + __HAL_LOCK(huart); + + huart->gState = HAL_UART_STATE_BUSY; + + /* Send break characters */ + ATOMIC_SET_BIT(huart->Instance->CR1, USART_CR1_SBK); + + huart->gState = HAL_UART_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(huart); + + return HAL_OK; +} + +/** + * @brief Enters the UART in mute mode. + * @param huart Pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART module. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_MultiProcessor_EnterMuteMode(UART_HandleTypeDef *huart) +{ + /* Check the parameters */ + assert_param(IS_UART_INSTANCE(huart->Instance)); + + /* Process Locked */ + __HAL_LOCK(huart); + + huart->gState = HAL_UART_STATE_BUSY; + + /* Enable the USART mute mode by setting the RWU bit in the CR1 register */ + ATOMIC_SET_BIT(huart->Instance->CR1, USART_CR1_RWU); + + huart->gState = HAL_UART_STATE_READY; + huart->RxEventType = HAL_UART_RXEVENT_TC; + + /* Process Unlocked */ + __HAL_UNLOCK(huart); + + return HAL_OK; +} + +/** + * @brief Exits the UART mute mode: wake up software. + * @param huart Pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART module. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_MultiProcessor_ExitMuteMode(UART_HandleTypeDef *huart) +{ + /* Check the parameters */ + assert_param(IS_UART_INSTANCE(huart->Instance)); + + /* Process Locked */ + __HAL_LOCK(huart); + + huart->gState = HAL_UART_STATE_BUSY; + + /* Disable the USART mute mode by clearing the RWU bit in the CR1 register */ + ATOMIC_CLEAR_BIT(huart->Instance->CR1, USART_CR1_RWU); + + huart->gState = HAL_UART_STATE_READY; + huart->RxEventType = HAL_UART_RXEVENT_TC; + + /* Process Unlocked */ + __HAL_UNLOCK(huart); + + return HAL_OK; +} + +/** + * @brief Enables the UART transmitter and disables the UART receiver. + * @param huart Pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART module. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_HalfDuplex_EnableTransmitter(UART_HandleTypeDef *huart) +{ + uint32_t tmpreg = 0x00U; + + /* Process Locked */ + __HAL_LOCK(huart); + + huart->gState = HAL_UART_STATE_BUSY; + + /*-------------------------- USART CR1 Configuration -----------------------*/ + tmpreg = huart->Instance->CR1; + + /* Clear TE and RE bits */ + tmpreg &= (uint32_t)~((uint32_t)(USART_CR1_TE | USART_CR1_RE)); + + /* Enable the USART's transmit interface by setting the TE bit in the USART CR1 register */ + tmpreg |= (uint32_t)USART_CR1_TE; + + /* Write to USART CR1 */ + WRITE_REG(huart->Instance->CR1, (uint32_t)tmpreg); + + huart->gState = HAL_UART_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(huart); + + return HAL_OK; +} + +/** + * @brief Enables the UART receiver and disables the UART transmitter. + * @param huart Pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART module. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_HalfDuplex_EnableReceiver(UART_HandleTypeDef *huart) +{ + uint32_t tmpreg = 0x00U; + + /* Process Locked */ + __HAL_LOCK(huart); + + huart->gState = HAL_UART_STATE_BUSY; + + /*-------------------------- USART CR1 Configuration -----------------------*/ + tmpreg = huart->Instance->CR1; + + /* Clear TE and RE bits */ + tmpreg &= (uint32_t)~((uint32_t)(USART_CR1_TE | USART_CR1_RE)); + + /* Enable the USART's receive interface by setting the RE bit in the USART CR1 register */ + tmpreg |= (uint32_t)USART_CR1_RE; + + /* Write to USART CR1 */ + WRITE_REG(huart->Instance->CR1, (uint32_t)tmpreg); + + huart->gState = HAL_UART_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(huart); + + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup UART_Exported_Functions_Group4 Peripheral State and Errors functions + * @brief UART State and Errors functions + * +@verbatim + ============================================================================== + ##### Peripheral State and Errors functions ##### + ============================================================================== + [..] + This subsection provides a set of functions allowing to return the State of + UART communication process, return Peripheral Errors occurred during communication + process + (+) HAL_UART_GetState() API can be helpful to check in run-time the state of the UART peripheral. + (+) HAL_UART_GetError() check in run-time errors that could be occurred during communication. + +@endverbatim + * @{ + */ + +/** + * @brief Returns the UART state. + * @param huart Pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART module. + * @retval HAL state + */ +HAL_UART_StateTypeDef HAL_UART_GetState(const UART_HandleTypeDef *huart) +{ + uint32_t temp1 = 0x00U, temp2 = 0x00U; + temp1 = huart->gState; + temp2 = huart->RxState; + + return (HAL_UART_StateTypeDef)(temp1 | temp2); +} + +/** + * @brief Return the UART error code + * @param huart Pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART. + * @retval UART Error Code + */ +uint32_t HAL_UART_GetError(const UART_HandleTypeDef *huart) +{ + return huart->ErrorCode; +} + +/** + * @} + */ + +/** + * @} + */ + +/** @defgroup UART_Private_Functions UART Private Functions + * @{ + */ + +/** + * @brief Initialize the callbacks to their default values. + * @param huart UART handle. + * @retval none + */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) +void UART_InitCallbacksToDefault(UART_HandleTypeDef *huart) +{ + /* Init the UART Callback settings */ + huart->TxHalfCpltCallback = HAL_UART_TxHalfCpltCallback; /* Legacy weak TxHalfCpltCallback */ + huart->TxCpltCallback = HAL_UART_TxCpltCallback; /* Legacy weak TxCpltCallback */ + huart->RxHalfCpltCallback = HAL_UART_RxHalfCpltCallback; /* Legacy weak RxHalfCpltCallback */ + huart->RxCpltCallback = HAL_UART_RxCpltCallback; /* Legacy weak RxCpltCallback */ + huart->ErrorCallback = HAL_UART_ErrorCallback; /* Legacy weak ErrorCallback */ + huart->AbortCpltCallback = HAL_UART_AbortCpltCallback; /* Legacy weak AbortCpltCallback */ + huart->AbortTransmitCpltCallback = HAL_UART_AbortTransmitCpltCallback; /* Legacy weak AbortTransmitCpltCallback */ + huart->AbortReceiveCpltCallback = HAL_UART_AbortReceiveCpltCallback; /* Legacy weak AbortReceiveCpltCallback */ + huart->RxEventCallback = HAL_UARTEx_RxEventCallback; /* Legacy weak RxEventCallback */ + +} +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + +/** + * @brief DMA UART transmit process complete callback. + * @param hdma Pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA module. + * @retval None + */ +static void UART_DMATransmitCplt(DMA_HandleTypeDef *hdma) +{ + UART_HandleTypeDef *huart = (UART_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + /* DMA Normal mode*/ + if ((hdma->Instance->CR & DMA_SxCR_CIRC) == 0U) + { + huart->TxXferCount = 0x00U; + + /* Disable the DMA transfer for transmit request by setting the DMAT bit + in the UART CR3 register */ + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAT); + + /* Enable the UART Transmit Complete Interrupt */ + ATOMIC_SET_BIT(huart->Instance->CR1, USART_CR1_TCIE); + + } + /* DMA Circular mode */ + else + { +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered Tx complete callback*/ + huart->TxCpltCallback(huart); +#else + /*Call legacy weak Tx complete callback*/ + HAL_UART_TxCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + } +} + +/** + * @brief DMA UART transmit process half complete callback + * @param hdma Pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA module. + * @retval None + */ +static void UART_DMATxHalfCplt(DMA_HandleTypeDef *hdma) +{ + UART_HandleTypeDef *huart = (UART_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered Tx complete callback*/ + huart->TxHalfCpltCallback(huart); +#else + /*Call legacy weak Tx complete callback*/ + HAL_UART_TxHalfCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ +} + +/** + * @brief DMA UART receive process complete callback. + * @param hdma Pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA module. + * @retval None + */ +static void UART_DMAReceiveCplt(DMA_HandleTypeDef *hdma) +{ + UART_HandleTypeDef *huart = (UART_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + /* DMA Normal mode*/ + if ((hdma->Instance->CR & DMA_SxCR_CIRC) == 0U) + { + huart->RxXferCount = 0U; + + /* Disable RXNE, PE and ERR (Frame error, noise error, overrun error) interrupts */ + ATOMIC_CLEAR_BIT(huart->Instance->CR1, USART_CR1_PEIE); + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + + /* Disable the DMA transfer for the receiver request by setting the DMAR bit + in the UART CR3 register */ + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); + + /* At end of Rx process, restore huart->RxState to Ready */ + huart->RxState = HAL_UART_STATE_READY; + + /* If Reception till IDLE event has been selected, Disable IDLE Interrupt */ + if (huart->ReceptionType == HAL_UART_RECEPTION_TOIDLE) + { + ATOMIC_CLEAR_BIT(huart->Instance->CR1, USART_CR1_IDLEIE); + } + } + + /* Initialize type of RxEvent that correspond to RxEvent callback execution; + In this case, Rx Event type is Transfer Complete */ + huart->RxEventType = HAL_UART_RXEVENT_TC; + + /* Check current reception Mode : + If Reception till IDLE event has been selected : use Rx Event callback */ + if (huart->ReceptionType == HAL_UART_RECEPTION_TOIDLE) + { +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered Rx Event callback*/ + huart->RxEventCallback(huart, huart->RxXferSize); +#else + /*Call legacy weak Rx Event callback*/ + HAL_UARTEx_RxEventCallback(huart, huart->RxXferSize); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + } + else + { + /* In other cases : use Rx Complete callback */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered Rx complete callback*/ + huart->RxCpltCallback(huart); +#else + /*Call legacy weak Rx complete callback*/ + HAL_UART_RxCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + } +} + +/** + * @brief DMA UART receive process half complete callback + * @param hdma Pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA module. + * @retval None + */ +static void UART_DMARxHalfCplt(DMA_HandleTypeDef *hdma) +{ + UART_HandleTypeDef *huart = (UART_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + /* Initialize type of RxEvent that correspond to RxEvent callback execution; + In this case, Rx Event type is Half Transfer */ + huart->RxEventType = HAL_UART_RXEVENT_HT; + + /* Check current reception Mode : + If Reception till IDLE event has been selected : use Rx Event callback */ + if (huart->ReceptionType == HAL_UART_RECEPTION_TOIDLE) + { +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered Rx Event callback*/ + huart->RxEventCallback(huart, huart->RxXferSize / 2U); +#else + /*Call legacy weak Rx Event callback*/ + HAL_UARTEx_RxEventCallback(huart, huart->RxXferSize / 2U); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + } + else + { + /* In other cases : use Rx Half Complete callback */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered Rx Half complete callback*/ + huart->RxHalfCpltCallback(huart); +#else + /*Call legacy weak Rx Half complete callback*/ + HAL_UART_RxHalfCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + } +} + +/** + * @brief DMA UART communication error callback. + * @param hdma Pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA module. + * @retval None + */ +static void UART_DMAError(DMA_HandleTypeDef *hdma) +{ + uint32_t dmarequest = 0x00U; + UART_HandleTypeDef *huart = (UART_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + /* Stop UART DMA Tx request if ongoing */ + dmarequest = HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAT); + if ((huart->gState == HAL_UART_STATE_BUSY_TX) && dmarequest) + { + huart->TxXferCount = 0x00U; + UART_EndTxTransfer(huart); + } + + /* Stop UART DMA Rx request if ongoing */ + dmarequest = HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR); + if ((huart->RxState == HAL_UART_STATE_BUSY_RX) && dmarequest) + { + huart->RxXferCount = 0x00U; + UART_EndRxTransfer(huart); + } + + huart->ErrorCode |= HAL_UART_ERROR_DMA; +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered error callback*/ + huart->ErrorCallback(huart); +#else + /*Call legacy weak error callback*/ + HAL_UART_ErrorCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ +} + +/** + * @brief This function handles UART Communication Timeout. It waits + * until a flag is no longer in the specified status. + * @param huart Pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART module. + * @param Flag specifies the UART flag to check. + * @param Status The actual Flag status (SET or RESET). + * @param Tickstart Tick start value + * @param Timeout Timeout duration + * @retval HAL status + */ +static HAL_StatusTypeDef UART_WaitOnFlagUntilTimeout(UART_HandleTypeDef *huart, uint32_t Flag, FlagStatus Status, + uint32_t Tickstart, uint32_t Timeout) +{ + /* Wait until flag is set */ + while ((__HAL_UART_GET_FLAG(huart, Flag) ? SET : RESET) == Status) + { + /* Check for the Timeout */ + if (Timeout != HAL_MAX_DELAY) + { + if (((HAL_GetTick() - Tickstart) > Timeout) || (Timeout == 0U)) + { + + return HAL_TIMEOUT; + } + + if ((READ_BIT(huart->Instance->CR1, USART_CR1_RE) != 0U) && (Flag != UART_FLAG_TXE) && (Flag != UART_FLAG_TC)) + { + if (__HAL_UART_GET_FLAG(huart, UART_FLAG_ORE) == SET) + { + /* Clear Overrun Error flag*/ + __HAL_UART_CLEAR_OREFLAG(huart); + + /* Blocking error : transfer is aborted + Set the UART state ready to be able to start again the process, + Disable Rx Interrupts if ongoing */ + UART_EndRxTransfer(huart); + + huart->ErrorCode = HAL_UART_ERROR_ORE; + + /* Process Unlocked */ + __HAL_UNLOCK(huart); + + return HAL_ERROR; + } + } + } + } + return HAL_OK; +} + +/** + * @brief Start Receive operation in interrupt mode. + * @note This function could be called by all HAL UART API providing reception in Interrupt mode. + * @note When calling this function, parameters validity is considered as already checked, + * i.e. Rx State, buffer address, ... + * UART Handle is assumed as Locked. + * @param huart UART handle. + * @param pData Pointer to data buffer (u8 or u16 data elements). + * @param Size Amount of data elements (u8 or u16) to be received. + * @retval HAL status + */ +HAL_StatusTypeDef UART_Start_Receive_IT(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size) +{ + huart->pRxBuffPtr = pData; + huart->RxXferSize = Size; + huart->RxXferCount = Size; + + huart->ErrorCode = HAL_UART_ERROR_NONE; + huart->RxState = HAL_UART_STATE_BUSY_RX; + + if (huart->Init.Parity != UART_PARITY_NONE) + { + /* Enable the UART Parity Error Interrupt */ + __HAL_UART_ENABLE_IT(huart, UART_IT_PE); + } + + /* Enable the UART Error Interrupt: (Frame error, noise error, overrun error) */ + __HAL_UART_ENABLE_IT(huart, UART_IT_ERR); + + /* Enable the UART Data Register not empty Interrupt */ + __HAL_UART_ENABLE_IT(huart, UART_IT_RXNE); + + return HAL_OK; +} + +/** + * @brief Start Receive operation in DMA mode. + * @note This function could be called by all HAL UART API providing reception in DMA mode. + * @note When calling this function, parameters validity is considered as already checked, + * i.e. Rx State, buffer address, ... + * UART Handle is assumed as Locked. + * @param huart UART handle. + * @param pData Pointer to data buffer (u8 or u16 data elements). + * @param Size Amount of data elements (u8 or u16) to be received. + * @retval HAL status + */ +HAL_StatusTypeDef UART_Start_Receive_DMA(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size) +{ + uint32_t *tmp; + + huart->pRxBuffPtr = pData; + huart->RxXferSize = Size; + + huart->ErrorCode = HAL_UART_ERROR_NONE; + huart->RxState = HAL_UART_STATE_BUSY_RX; + + /* Set the UART DMA transfer complete callback */ + huart->hdmarx->XferCpltCallback = UART_DMAReceiveCplt; + + /* Set the UART DMA Half transfer complete callback */ + huart->hdmarx->XferHalfCpltCallback = UART_DMARxHalfCplt; + + /* Set the DMA error callback */ + huart->hdmarx->XferErrorCallback = UART_DMAError; + + /* Set the DMA abort callback */ + huart->hdmarx->XferAbortCallback = NULL; + + /* Enable the DMA stream */ + tmp = (uint32_t *)&pData; + HAL_DMA_Start_IT(huart->hdmarx, (uint32_t)&huart->Instance->DR, *(uint32_t *)tmp, Size); + + /* Clear the Overrun flag just before enabling the DMA Rx request: can be mandatory for the second transfer */ + __HAL_UART_CLEAR_OREFLAG(huart); + + if (huart->Init.Parity != UART_PARITY_NONE) + { + /* Enable the UART Parity Error Interrupt */ + ATOMIC_SET_BIT(huart->Instance->CR1, USART_CR1_PEIE); + } + + /* Enable the UART Error Interrupt: (Frame error, noise error, overrun error) */ + ATOMIC_SET_BIT(huart->Instance->CR3, USART_CR3_EIE); + + /* Enable the DMA transfer for the receiver request by setting the DMAR bit + in the UART CR3 register */ + ATOMIC_SET_BIT(huart->Instance->CR3, USART_CR3_DMAR); + + return HAL_OK; +} + +/** + * @brief End ongoing Tx transfer on UART peripheral (following error detection or Transmit completion). + * @param huart UART handle. + * @retval None + */ +static void UART_EndTxTransfer(UART_HandleTypeDef *huart) +{ + /* Disable TXEIE and TCIE interrupts */ + ATOMIC_CLEAR_BIT(huart->Instance->CR1, (USART_CR1_TXEIE | USART_CR1_TCIE)); + + /* At end of Tx process, restore huart->gState to Ready */ + huart->gState = HAL_UART_STATE_READY; +} + +/** + * @brief End ongoing Rx transfer on UART peripheral (following error detection or Reception completion). + * @param huart UART handle. + * @retval None + */ +static void UART_EndRxTransfer(UART_HandleTypeDef *huart) +{ + /* Disable RXNE, PE and ERR (Frame error, noise error, overrun error) interrupts */ + ATOMIC_CLEAR_BIT(huart->Instance->CR1, (USART_CR1_RXNEIE | USART_CR1_PEIE)); + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + + /* In case of reception waiting for IDLE event, disable also the IDLE IE interrupt source */ + if (huart->ReceptionType == HAL_UART_RECEPTION_TOIDLE) + { + ATOMIC_CLEAR_BIT(huart->Instance->CR1, USART_CR1_IDLEIE); + } + + /* At end of Rx process, restore huart->RxState to Ready */ + huart->RxState = HAL_UART_STATE_READY; + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; +} + +/** + * @brief DMA UART communication abort callback, when initiated by HAL services on Error + * (To be called at end of DMA Abort procedure following error occurrence). + * @param hdma Pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA module. + * @retval None + */ +static void UART_DMAAbortOnError(DMA_HandleTypeDef *hdma) +{ + UART_HandleTypeDef *huart = (UART_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + huart->RxXferCount = 0x00U; + huart->TxXferCount = 0x00U; + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered error callback*/ + huart->ErrorCallback(huart); +#else + /*Call legacy weak error callback*/ + HAL_UART_ErrorCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ +} + +/** + * @brief DMA UART Tx communication abort callback, when initiated by user + * (To be called at end of DMA Tx Abort procedure following user abort request). + * @note When this callback is executed, User Abort complete call back is called only if no + * Abort still ongoing for Rx DMA Handle. + * @param hdma Pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA module. + * @retval None + */ +static void UART_DMATxAbortCallback(DMA_HandleTypeDef *hdma) +{ + UART_HandleTypeDef *huart = (UART_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + huart->hdmatx->XferAbortCallback = NULL; + + /* Check if an Abort process is still ongoing */ + if (huart->hdmarx != NULL) + { + if (huart->hdmarx->XferAbortCallback != NULL) + { + return; + } + } + + /* No Abort process still ongoing : All DMA channels are aborted, call user Abort Complete callback */ + huart->TxXferCount = 0x00U; + huart->RxXferCount = 0x00U; + + /* Reset ErrorCode */ + huart->ErrorCode = HAL_UART_ERROR_NONE; + + /* Restore huart->gState and huart->RxState to Ready */ + huart->gState = HAL_UART_STATE_READY; + huart->RxState = HAL_UART_STATE_READY; + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; + + /* Call user Abort complete callback */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /* Call registered Abort complete callback */ + huart->AbortCpltCallback(huart); +#else + /* Call legacy weak Abort complete callback */ + HAL_UART_AbortCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ +} + +/** + * @brief DMA UART Rx communication abort callback, when initiated by user + * (To be called at end of DMA Rx Abort procedure following user abort request). + * @note When this callback is executed, User Abort complete call back is called only if no + * Abort still ongoing for Tx DMA Handle. + * @param hdma Pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA module. + * @retval None + */ +static void UART_DMARxAbortCallback(DMA_HandleTypeDef *hdma) +{ + UART_HandleTypeDef *huart = (UART_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + huart->hdmarx->XferAbortCallback = NULL; + + /* Check if an Abort process is still ongoing */ + if (huart->hdmatx != NULL) + { + if (huart->hdmatx->XferAbortCallback != NULL) + { + return; + } + } + + /* No Abort process still ongoing : All DMA channels are aborted, call user Abort Complete callback */ + huart->TxXferCount = 0x00U; + huart->RxXferCount = 0x00U; + + /* Reset ErrorCode */ + huart->ErrorCode = HAL_UART_ERROR_NONE; + + /* Restore huart->gState and huart->RxState to Ready */ + huart->gState = HAL_UART_STATE_READY; + huart->RxState = HAL_UART_STATE_READY; + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; + + /* Call user Abort complete callback */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /* Call registered Abort complete callback */ + huart->AbortCpltCallback(huart); +#else + /* Call legacy weak Abort complete callback */ + HAL_UART_AbortCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ +} + +/** + * @brief DMA UART Tx communication abort callback, when initiated by user by a call to + * HAL_UART_AbortTransmit_IT API (Abort only Tx transfer) + * (This callback is executed at end of DMA Tx Abort procedure following user abort request, + * and leads to user Tx Abort Complete callback execution). + * @param hdma Pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA module. + * @retval None + */ +static void UART_DMATxOnlyAbortCallback(DMA_HandleTypeDef *hdma) +{ + UART_HandleTypeDef *huart = (UART_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + huart->TxXferCount = 0x00U; + + /* Restore huart->gState to Ready */ + huart->gState = HAL_UART_STATE_READY; + + /* Call user Abort complete callback */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /* Call registered Abort Transmit Complete Callback */ + huart->AbortTransmitCpltCallback(huart); +#else + /* Call legacy weak Abort Transmit Complete Callback */ + HAL_UART_AbortTransmitCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ +} + +/** + * @brief DMA UART Rx communication abort callback, when initiated by user by a call to + * HAL_UART_AbortReceive_IT API (Abort only Rx transfer) + * (This callback is executed at end of DMA Rx Abort procedure following user abort request, + * and leads to user Rx Abort Complete callback execution). + * @param hdma Pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA module. + * @retval None + */ +static void UART_DMARxOnlyAbortCallback(DMA_HandleTypeDef *hdma) +{ + UART_HandleTypeDef *huart = (UART_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + huart->RxXferCount = 0x00U; + + /* Restore huart->RxState to Ready */ + huart->RxState = HAL_UART_STATE_READY; + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; + + /* Call user Abort complete callback */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /* Call registered Abort Receive Complete Callback */ + huart->AbortReceiveCpltCallback(huart); +#else + /* Call legacy weak Abort Receive Complete Callback */ + HAL_UART_AbortReceiveCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ +} + +/** + * @brief Sends an amount of data in non blocking mode. + * @param huart Pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART module. + * @retval HAL status + */ +static HAL_StatusTypeDef UART_Transmit_IT(UART_HandleTypeDef *huart) +{ + const uint16_t *tmp; + + /* Check that a Tx process is ongoing */ + if (huart->gState == HAL_UART_STATE_BUSY_TX) + { + if ((huart->Init.WordLength == UART_WORDLENGTH_9B) && (huart->Init.Parity == UART_PARITY_NONE)) + { + tmp = (const uint16_t *) huart->pTxBuffPtr; + huart->Instance->DR = (uint16_t)(*tmp & (uint16_t)0x01FF); + huart->pTxBuffPtr += 2U; + } + else + { + huart->Instance->DR = (uint8_t)(*huart->pTxBuffPtr++ & (uint8_t)0x00FF); + } + + if (--huart->TxXferCount == 0U) + { + /* Disable the UART Transmit Data Register Empty Interrupt */ + __HAL_UART_DISABLE_IT(huart, UART_IT_TXE); + + /* Enable the UART Transmit Complete Interrupt */ + __HAL_UART_ENABLE_IT(huart, UART_IT_TC); + } + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Wraps up transmission in non blocking mode. + * @param huart Pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART module. + * @retval HAL status + */ +static HAL_StatusTypeDef UART_EndTransmit_IT(UART_HandleTypeDef *huart) +{ + /* Disable the UART Transmit Complete Interrupt */ + __HAL_UART_DISABLE_IT(huart, UART_IT_TC); + + /* Tx process is ended, restore huart->gState to Ready */ + huart->gState = HAL_UART_STATE_READY; + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered Tx complete callback*/ + huart->TxCpltCallback(huart); +#else + /*Call legacy weak Tx complete callback*/ + HAL_UART_TxCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + + return HAL_OK; +} + +/** + * @brief Receives an amount of data in non blocking mode + * @param huart Pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART module. + * @retval HAL status + */ +static HAL_StatusTypeDef UART_Receive_IT(UART_HandleTypeDef *huart) +{ + uint8_t *pdata8bits; + uint16_t *pdata16bits; + + /* Check that a Rx process is ongoing */ + if (huart->RxState == HAL_UART_STATE_BUSY_RX) + { + if ((huart->Init.WordLength == UART_WORDLENGTH_9B) && (huart->Init.Parity == UART_PARITY_NONE)) + { + pdata8bits = NULL; + pdata16bits = (uint16_t *) huart->pRxBuffPtr; + *pdata16bits = (uint16_t)(huart->Instance->DR & (uint16_t)0x01FF); + huart->pRxBuffPtr += 2U; + } + else + { + pdata8bits = (uint8_t *) huart->pRxBuffPtr; + pdata16bits = NULL; + + if ((huart->Init.WordLength == UART_WORDLENGTH_9B) || ((huart->Init.WordLength == UART_WORDLENGTH_8B) && (huart->Init.Parity == UART_PARITY_NONE))) + { + *pdata8bits = (uint8_t)(huart->Instance->DR & (uint8_t)0x00FF); + } + else + { + *pdata8bits = (uint8_t)(huart->Instance->DR & (uint8_t)0x007F); + } + huart->pRxBuffPtr += 1U; + } + + if (--huart->RxXferCount == 0U) + { + /* Disable the UART Data Register not empty Interrupt */ + __HAL_UART_DISABLE_IT(huart, UART_IT_RXNE); + + /* Disable the UART Parity Error Interrupt */ + __HAL_UART_DISABLE_IT(huart, UART_IT_PE); + + /* Disable the UART Error Interrupt: (Frame error, noise error, overrun error) */ + __HAL_UART_DISABLE_IT(huart, UART_IT_ERR); + + /* Rx process is completed, restore huart->RxState to Ready */ + huart->RxState = HAL_UART_STATE_READY; + + /* Initialize type of RxEvent to Transfer Complete */ + huart->RxEventType = HAL_UART_RXEVENT_TC; + + /* Check current reception Mode : + If Reception till IDLE event has been selected : */ + if (huart->ReceptionType == HAL_UART_RECEPTION_TOIDLE) + { + /* Set reception type to Standard */ + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; + + /* Disable IDLE interrupt */ + ATOMIC_CLEAR_BIT(huart->Instance->CR1, USART_CR1_IDLEIE); + + /* Check if IDLE flag is set */ + if (__HAL_UART_GET_FLAG(huart, UART_FLAG_IDLE)) + { + /* Clear IDLE flag in ISR */ + __HAL_UART_CLEAR_IDLEFLAG(huart); + } + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered Rx Event callback*/ + huart->RxEventCallback(huart, huart->RxXferSize); +#else + /*Call legacy weak Rx Event callback*/ + HAL_UARTEx_RxEventCallback(huart, huart->RxXferSize); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + } + else + { + /* Standard reception API called */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered Rx complete callback*/ + huart->RxCpltCallback(huart); +#else + /*Call legacy weak Rx complete callback*/ + HAL_UART_RxCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + } + + return HAL_OK; + } + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Configures the UART peripheral. + * @param huart Pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART module. + * @retval None + */ +static void UART_SetConfig(UART_HandleTypeDef *huart) +{ + uint32_t tmpreg; + uint32_t pclk; + + /* Check the parameters */ + assert_param(IS_UART_BAUDRATE(huart->Init.BaudRate)); + assert_param(IS_UART_STOPBITS(huart->Init.StopBits)); + assert_param(IS_UART_PARITY(huart->Init.Parity)); + assert_param(IS_UART_MODE(huart->Init.Mode)); + + /*-------------------------- USART CR2 Configuration -----------------------*/ + /* Configure the UART Stop Bits: Set STOP[13:12] bits + according to huart->Init.StopBits value */ + MODIFY_REG(huart->Instance->CR2, USART_CR2_STOP, huart->Init.StopBits); + + /*-------------------------- USART CR1 Configuration -----------------------*/ + /* Configure the UART Word Length, Parity and mode: + Set the M bits according to huart->Init.WordLength value + Set PCE and PS bits according to huart->Init.Parity value + Set TE and RE bits according to huart->Init.Mode value + Set OVER8 bit according to huart->Init.OverSampling value */ + + tmpreg = (uint32_t)huart->Init.WordLength | huart->Init.Parity | huart->Init.Mode | huart->Init.OverSampling; + MODIFY_REG(huart->Instance->CR1, + (uint32_t)(USART_CR1_M | USART_CR1_PCE | USART_CR1_PS | USART_CR1_TE | USART_CR1_RE | USART_CR1_OVER8), + tmpreg); + + /*-------------------------- USART CR3 Configuration -----------------------*/ + /* Configure the UART HFC: Set CTSE and RTSE bits according to huart->Init.HwFlowCtl value */ + MODIFY_REG(huart->Instance->CR3, (USART_CR3_RTSE | USART_CR3_CTSE), huart->Init.HwFlowCtl); + + +#if defined(USART6) && defined(UART9) && defined(UART10) + if ((huart->Instance == USART1) || (huart->Instance == USART6) || (huart->Instance == UART9) || (huart->Instance == UART10)) + { + pclk = HAL_RCC_GetPCLK2Freq(); + } +#elif defined(USART6) + if ((huart->Instance == USART1) || (huart->Instance == USART6)) + { + pclk = HAL_RCC_GetPCLK2Freq(); + } +#else + if (huart->Instance == USART1) + { + pclk = HAL_RCC_GetPCLK2Freq(); + } +#endif /* USART6 */ + else + { + pclk = HAL_RCC_GetPCLK1Freq(); + } + /*-------------------------- USART BRR Configuration ---------------------*/ + if (huart->Init.OverSampling == UART_OVERSAMPLING_8) + { + huart->Instance->BRR = UART_BRR_SAMPLING8(pclk, huart->Init.BaudRate); + } + else + { + huart->Instance->BRR = UART_BRR_SAMPLING16(pclk, huart->Init.BaudRate); + } +} + +/** + * @} + */ + +#endif /* HAL_UART_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ + diff --git a/LWIP/App/lwip.c b/LWIP/App/lwip.c new file mode 100644 index 0000000..7d4e583 --- /dev/null +++ b/LWIP/App/lwip.c @@ -0,0 +1,250 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * File Name : LWIP.c + * Description : This file provides initialization code for LWIP + * middleWare. + ****************************************************************************** + * @attention + * + * Copyright (c) 2024 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ +/* USER CODE END Header */ + +/* Includes ------------------------------------------------------------------*/ +#include "lwip.h" +#include "lwip/init.h" +#include "lwip/netif.h" +#if defined ( __CC_ARM ) /* MDK ARM Compiler */ +#include "lwip/sio.h" +#endif /* MDK ARM Compiler */ +#include "ethernetif.h" + +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ +/* Private function prototypes -----------------------------------------------*/ +static void ethernet_link_status_updated(struct netif *netif); +static void Ethernet_Link_Periodic_Handle(struct netif *netif); +/* ETH Variables initialization ----------------------------------------------*/ +void Error_Handler(void); + +/* USER CODE BEGIN 1 */ + +/* USER CODE END 1 */ +uint32_t EthernetLinkTimer; + +/* Variables Initialization */ +struct netif gnetif; +ip4_addr_t ipaddr; +ip4_addr_t netmask; +ip4_addr_t gw; +uint8_t IP_ADDRESS[4]; +uint8_t NETMASK_ADDRESS[4]; +uint8_t GATEWAY_ADDRESS[4]; + +/* USER CODE BEGIN 2 */ + +/* USER CODE END 2 */ + +/** + * LwIP initialization function + */ +void MX_LWIP_Init(void) +{ + /* IP addresses initialization */ + IP_ADDRESS[0] = 192; + IP_ADDRESS[1] = 168; + IP_ADDRESS[2] = 1; + IP_ADDRESS[3] = 100; + NETMASK_ADDRESS[0] = 255; + NETMASK_ADDRESS[1] = 255; + NETMASK_ADDRESS[2] = 255; + NETMASK_ADDRESS[3] = 0; + GATEWAY_ADDRESS[0] = 192; + GATEWAY_ADDRESS[1] = 168; + GATEWAY_ADDRESS[2] = 1; + GATEWAY_ADDRESS[3] = 29; + +/* USER CODE BEGIN IP_ADDRESSES */ +/* USER CODE END IP_ADDRESSES */ + + /* Initialize the LwIP stack without RTOS */ + lwip_init(); + + /* IP addresses initialization without DHCP (IPv4) */ + IP4_ADDR(&ipaddr, IP_ADDRESS[0], IP_ADDRESS[1], IP_ADDRESS[2], IP_ADDRESS[3]); + IP4_ADDR(&netmask, NETMASK_ADDRESS[0], NETMASK_ADDRESS[1] , NETMASK_ADDRESS[2], NETMASK_ADDRESS[3]); + IP4_ADDR(&gw, GATEWAY_ADDRESS[0], GATEWAY_ADDRESS[1], GATEWAY_ADDRESS[2], GATEWAY_ADDRESS[3]); + + /* add the network interface (IPv4/IPv6) without RTOS */ + netif_add(&gnetif, &ipaddr, &netmask, &gw, NULL, ðernetif_init, ðernet_input); + + /* Registers the default network interface */ + netif_set_default(&gnetif); + + /* We must always bring the network interface up connection or not... */ + netif_set_up(&gnetif); + + /* Set the link callback function, this function is called on change of link status*/ + netif_set_link_callback(&gnetif, ethernet_link_status_updated); + +/* USER CODE BEGIN 3 */ + +/* USER CODE END 3 */ +} + +#ifdef USE_OBSOLETE_USER_CODE_SECTION_4 +/* Kept to help code migration. (See new 4_1, 4_2... sections) */ +/* Avoid to use this user section which will become obsolete. */ +/* USER CODE BEGIN 4 */ +/* USER CODE END 4 */ +#endif + +/** + * @brief Ethernet Link periodic check + * @param netif + * @retval None + */ +static void Ethernet_Link_Periodic_Handle(struct netif *netif) +{ +/* USER CODE BEGIN 4_4_1 */ +/* USER CODE END 4_4_1 */ + + /* Ethernet Link every 100ms */ + if (HAL_GetTick() - EthernetLinkTimer >= 100) + { + EthernetLinkTimer = HAL_GetTick(); + ethernet_link_check_state(netif); + } +/* USER CODE BEGIN 4_4 */ +/* USER CODE END 4_4 */ +} + +/** + * ---------------------------------------------------------------------- + * Function given to help user to continue LwIP Initialization + * Up to user to complete or change this function ... + * Up to user to call this function in main.c in while (1) of main(void) + *----------------------------------------------------------------------- + * Read a received packet from the Ethernet buffers + * Send it to the lwIP stack for handling + * Handle timeouts if LWIP_TIMERS is set and without RTOS + * Handle the llink status if LWIP_NETIF_LINK_CALLBACK is set and without RTOS + */ +void MX_LWIP_Process(void) +{ +/* USER CODE BEGIN 4_1 */ +/* USER CODE END 4_1 */ + ethernetif_input(&gnetif); + +/* USER CODE BEGIN 4_2 */ +/* USER CODE END 4_2 */ + /* Handle timeouts */ + sys_check_timeouts(); + + Ethernet_Link_Periodic_Handle(&gnetif); + +/* USER CODE BEGIN 4_3 */ +/* USER CODE END 4_3 */ +} + +/** + * @brief Notify the User about the network interface config status + * @param netif: the network interface + * @retval None + */ +static void ethernet_link_status_updated(struct netif *netif) +{ + if (netif_is_up(netif)) + { +/* USER CODE BEGIN 5 */ +/* USER CODE END 5 */ + } + else /* netif is down */ + { +/* USER CODE BEGIN 6 */ +/* USER CODE END 6 */ + } +} + +#if defined ( __CC_ARM ) /* MDK ARM Compiler */ +/** + * Opens a serial device for communication. + * + * @param devnum device number + * @return handle to serial device if successful, NULL otherwise + */ +sio_fd_t sio_open(u8_t devnum) +{ + sio_fd_t sd; + +/* USER CODE BEGIN 7 */ + sd = 0; // dummy code +/* USER CODE END 7 */ + + return sd; +} + +/** + * Sends a single character to the serial device. + * + * @param c character to send + * @param fd serial device handle + * + * @note This function will block until the character can be sent. + */ +void sio_send(u8_t c, sio_fd_t fd) +{ +/* USER CODE BEGIN 8 */ +/* USER CODE END 8 */ +} + +/** + * Reads from the serial device. + * + * @param fd serial device handle + * @param data pointer to data buffer for receiving + * @param len maximum length (in bytes) of data to receive + * @return number of bytes actually received - may be 0 if aborted by sio_read_abort + * + * @note This function will block until data can be received. The blocking + * can be cancelled by calling sio_read_abort(). + */ +u32_t sio_read(sio_fd_t fd, u8_t *data, u32_t len) +{ + u32_t recved_bytes; + +/* USER CODE BEGIN 9 */ + recved_bytes = 0; // dummy code +/* USER CODE END 9 */ + return recved_bytes; +} + +/** + * Tries to read from the serial device. Same as sio_read but returns + * immediately if no data is available and never blocks. + * + * @param fd serial device handle + * @param data pointer to data buffer for receiving + * @param len maximum length (in bytes) of data to receive + * @return number of bytes actually received + */ +u32_t sio_tryread(sio_fd_t fd, u8_t *data, u32_t len) +{ + u32_t recved_bytes; + +/* USER CODE BEGIN 10 */ + recved_bytes = 0; // dummy code +/* USER CODE END 10 */ + return recved_bytes; +} +#endif /* MDK ARM Compiler */ + diff --git a/LWIP/App/lwip.h b/LWIP/App/lwip.h new file mode 100644 index 0000000..b14adc0 --- /dev/null +++ b/LWIP/App/lwip.h @@ -0,0 +1,76 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * File Name : LWIP.h + * Description : This file provides code for the configuration + * of the LWIP. + ****************************************************************************** + * @attention + * + * Copyright (c) 2024 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ************************************************************************* + + */ +/* USER CODE END Header */ +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __mx_lwip_H +#define __mx_lwip_H +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "lwip/opt.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "netif/etharp.h" +#include "lwip/dhcp.h" +#include "lwip/netif.h" +#include "lwip/timeouts.h" +#include "ethernetif.h" + +/* Includes for RTOS ---------------------------------------------------------*/ +#if WITH_RTOS +#include "lwip/tcpip.h" +#endif /* WITH_RTOS */ + +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ + +/* Global Variables ----------------------------------------------------------*/ +extern ETH_HandleTypeDef heth; + +/* LWIP init function */ +void MX_LWIP_Init(void); + +#if !WITH_RTOS +/* USER CODE BEGIN 1 */ +/* Function defined in lwip.c to: + * - Read a received packet from the Ethernet buffers + * - Send it to the lwIP stack for handling + * - Handle timeouts if NO_SYS_NO_TIMERS not set + */ +void MX_LWIP_Process(void); + +/* USER CODE END 1 */ +#endif /* WITH_RTOS */ + +#ifdef __cplusplus +} +#endif +#endif /*__ mx_lwip_H */ + +/** + * @} + */ + +/** + * @} + */ diff --git a/LWIP/Target/ethernetif.c b/LWIP/Target/ethernetif.c new file mode 100644 index 0000000..703d70c --- /dev/null +++ b/LWIP/Target/ethernetif.c @@ -0,0 +1,747 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * File Name : ethernetif.c + * Description : This file provides code for the configuration + * of the ethernetif.c MiddleWare. + ****************************************************************************** + * @attention + * + * Copyright (c) 2024 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ +/* USER CODE END Header */ + +/* Includes ------------------------------------------------------------------*/ +#include "main.h" +#include "lwip/opt.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/timeouts.h" +#include "netif/ethernet.h" +#include "netif/etharp.h" +#include "lwip/ethip6.h" +#include "ethernetif.h" +#include "lan8742.h" +#include + +/* Within 'USER CODE' section, code will be kept by default at each generation */ +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ + +/* Private define ------------------------------------------------------------*/ + +/* Network interface name */ +#define IFNAME0 's' +#define IFNAME1 't' + +/* ETH Setting */ +#define ETH_DMA_TRANSMIT_TIMEOUT ( 20U ) +#define ETH_TX_BUFFER_MAX ((ETH_TX_DESC_CNT) * 2U) + +/* USER CODE BEGIN 1 */ + +/* USER CODE END 1 */ + +/* Private variables ---------------------------------------------------------*/ +/* +@Note: This interface is implemented to operate in zero-copy mode only: + - Rx Buffers will be allocated from LwIP stack Rx memory pool, + then passed to ETH HAL driver. + - Tx Buffers will be allocated from LwIP stack memory heap, + then passed to ETH HAL driver. + +@Notes: + 1.a. ETH DMA Rx descriptors must be contiguous, the default count is 4, + to customize it please redefine ETH_RX_DESC_CNT in ETH GUI (Rx Descriptor Length) + so that updated value will be generated in stm32xxxx_hal_conf.h + 1.b. ETH DMA Tx descriptors must be contiguous, the default count is 4, + to customize it please redefine ETH_TX_DESC_CNT in ETH GUI (Tx Descriptor Length) + so that updated value will be generated in stm32xxxx_hal_conf.h + + 2.a. Rx Buffers number must be between ETH_RX_DESC_CNT and 2*ETH_RX_DESC_CNT + 2.b. Rx Buffers must have the same size: ETH_RX_BUF_SIZE, this value must + passed to ETH DMA in the init field (heth.Init.RxBuffLen) + 2.c The RX Ruffers addresses and sizes must be properly defined to be aligned + to L1-CACHE line size (32 bytes). +*/ + +/* Data Type Definitions */ +typedef enum +{ + RX_ALLOC_OK = 0x00, + RX_ALLOC_ERROR = 0x01 +} RxAllocStatusTypeDef; + +typedef struct +{ + struct pbuf_custom pbuf_custom; + uint8_t buff[(ETH_RX_BUF_SIZE + 31) & ~31] __ALIGNED(32); +} RxBuff_t; + +/* Memory Pool Declaration */ +#define ETH_RX_BUFFER_CNT 12U +LWIP_MEMPOOL_DECLARE(RX_POOL, ETH_RX_BUFFER_CNT, sizeof(RxBuff_t), "Zero-copy RX PBUF pool"); + +/* Variable Definitions */ +static uint8_t RxAllocStatus; + +ETH_DMADescTypeDef DMARxDscrTab[ETH_RX_DESC_CNT]; /* Ethernet Rx DMA Descriptors */ +ETH_DMADescTypeDef DMATxDscrTab[ETH_TX_DESC_CNT]; /* Ethernet Tx DMA Descriptors */ + +/* USER CODE BEGIN 2 */ + +/* USER CODE END 2 */ + +/* Global Ethernet handle */ +ETH_HandleTypeDef heth; +ETH_TxPacketConfig TxConfig; + +/* Private function prototypes -----------------------------------------------*/ +int32_t ETH_PHY_IO_Init(void); +int32_t ETH_PHY_IO_DeInit (void); +int32_t ETH_PHY_IO_ReadReg(uint32_t DevAddr, uint32_t RegAddr, uint32_t *pRegVal); +int32_t ETH_PHY_IO_WriteReg(uint32_t DevAddr, uint32_t RegAddr, uint32_t RegVal); +int32_t ETH_PHY_IO_GetTick(void); + +lan8742_Object_t LAN8742; +lan8742_IOCtx_t LAN8742_IOCtx = {ETH_PHY_IO_Init, + ETH_PHY_IO_DeInit, + ETH_PHY_IO_WriteReg, + ETH_PHY_IO_ReadReg, + ETH_PHY_IO_GetTick}; + +/* USER CODE BEGIN 3 */ + +/* USER CODE END 3 */ + +/* Private functions ---------------------------------------------------------*/ +void pbuf_free_custom(struct pbuf *p); + +/* USER CODE BEGIN 4 */ + +/* USER CODE END 4 */ + +/******************************************************************************* + LL Driver Interface ( LwIP stack --> ETH) +*******************************************************************************/ +/** + * @brief In this function, the hardware should be initialized. + * Called from ethernetif_init(). + * + * @param netif the already initialized lwip network interface structure + * for this ethernetif + */ +static void low_level_init(struct netif *netif) +{ + HAL_StatusTypeDef hal_eth_init_status = HAL_OK; + /* Start ETH HAL Init */ + + uint8_t MACAddr[6] ; + heth.Instance = ETH; + MACAddr[0] = 0x00; + MACAddr[1] = 0x80; + MACAddr[2] = 0xE1; + MACAddr[3] = 0x00; + MACAddr[4] = 0x00; + MACAddr[5] = 0x00; + heth.Init.MACAddr = &MACAddr[0]; + heth.Init.MediaInterface = HAL_ETH_RMII_MODE; + heth.Init.TxDesc = DMATxDscrTab; + heth.Init.RxDesc = DMARxDscrTab; + heth.Init.RxBuffLen = 1536; + + /* USER CODE BEGIN MACADDRESS */ + + /* USER CODE END MACADDRESS */ + + hal_eth_init_status = HAL_ETH_Init(&heth); + + memset(&TxConfig, 0 , sizeof(ETH_TxPacketConfig)); + TxConfig.Attributes = ETH_TX_PACKETS_FEATURES_CSUM | ETH_TX_PACKETS_FEATURES_CRCPAD; + TxConfig.ChecksumCtrl = ETH_CHECKSUM_IPHDR_PAYLOAD_INSERT_PHDR_CALC; + TxConfig.CRCPadCtrl = ETH_CRC_PAD_INSERT; + + /* End ETH HAL Init */ + + /* Initialize the RX POOL */ + LWIP_MEMPOOL_INIT(RX_POOL); + +#if LWIP_ARP || LWIP_ETHERNET + + /* set MAC hardware address length */ + netif->hwaddr_len = ETH_HWADDR_LEN; + + /* set MAC hardware address */ + netif->hwaddr[0] = heth.Init.MACAddr[0]; + netif->hwaddr[1] = heth.Init.MACAddr[1]; + netif->hwaddr[2] = heth.Init.MACAddr[2]; + netif->hwaddr[3] = heth.Init.MACAddr[3]; + netif->hwaddr[4] = heth.Init.MACAddr[4]; + netif->hwaddr[5] = heth.Init.MACAddr[5]; + + /* maximum transfer unit */ + netif->mtu = ETH_MAX_PAYLOAD; + + /* Accept broadcast address and ARP traffic */ + /* don't set NETIF_FLAG_ETHARP if this device is not an ethernet one */ + #if LWIP_ARP + netif->flags |= NETIF_FLAG_BROADCAST | NETIF_FLAG_ETHARP; + #else + netif->flags |= NETIF_FLAG_BROADCAST; + #endif /* LWIP_ARP */ + +/* USER CODE BEGIN PHY_PRE_CONFIG */ + +/* USER CODE END PHY_PRE_CONFIG */ + /* Set PHY IO functions */ + LAN8742_RegisterBusIO(&LAN8742, &LAN8742_IOCtx); + + /* Initialize the LAN8742 ETH PHY */ + if(LAN8742_Init(&LAN8742) != LAN8742_STATUS_OK) + { + netif_set_link_down(netif); + netif_set_down(netif); + return; + } + + if (hal_eth_init_status == HAL_OK) + { + /* Get link state */ + ethernet_link_check_state(netif); + } + else + { + Error_Handler(); + } +#endif /* LWIP_ARP || LWIP_ETHERNET */ + +/* USER CODE BEGIN LOW_LEVEL_INIT */ + +/* USER CODE END LOW_LEVEL_INIT */ +} + +/** + * @brief This function should do the actual transmission of the packet. The packet is + * contained in the pbuf that is passed to the function. This pbuf + * might be chained. + * + * @param netif the lwip network interface structure for this ethernetif + * @param p the MAC packet to send (e.g. IP packet including MAC addresses and type) + * @return ERR_OK if the packet could be sent + * an err_t value if the packet couldn't be sent + * + * @note Returning ERR_MEM here if a DMA queue of your MAC is full can lead to + * strange results. You might consider waiting for space in the DMA queue + * to become available since the stack doesn't retry to send a packet + * dropped because of memory failure (except for the TCP timers). + */ + +static err_t low_level_output(struct netif *netif, struct pbuf *p) +{ + uint32_t i = 0U; + struct pbuf *q = NULL; + err_t errval = ERR_OK; + ETH_BufferTypeDef Txbuffer[ETH_TX_DESC_CNT] = {0}; + + memset(Txbuffer, 0 , ETH_TX_DESC_CNT*sizeof(ETH_BufferTypeDef)); + + for(q = p; q != NULL; q = q->next) + { + if(i >= ETH_TX_DESC_CNT) + return ERR_IF; + + Txbuffer[i].buffer = q->payload; + Txbuffer[i].len = q->len; + + if(i>0) + { + Txbuffer[i-1].next = &Txbuffer[i]; + } + + if(q->next == NULL) + { + Txbuffer[i].next = NULL; + } + + i++; + } + + TxConfig.Length = p->tot_len; + TxConfig.TxBuffer = Txbuffer; + TxConfig.pData = p; + + HAL_ETH_Transmit(&heth, &TxConfig, ETH_DMA_TRANSMIT_TIMEOUT); + + return errval; +} + +/** + * @brief Should allocate a pbuf and transfer the bytes of the incoming + * packet from the interface into the pbuf. + * + * @param netif the lwip network interface structure for this ethernetif + * @return a pbuf filled with the received packet (including MAC header) + * NULL on memory error + */ +static struct pbuf * low_level_input(struct netif *netif) +{ + struct pbuf *p = NULL; + + if(RxAllocStatus == RX_ALLOC_OK) + { + HAL_ETH_ReadData(&heth, (void **)&p); + } + + return p; +} + +/** + * @brief This function should be called when a packet is ready to be read + * from the interface. It uses the function low_level_input() that + * should handle the actual reception of bytes from the network + * interface. Then the type of the received packet is determined and + * the appropriate input function is called. + * + * @param netif the lwip network interface structure for this ethernetif + */ +void ethernetif_input(struct netif *netif) +{ + struct pbuf *p = NULL; + + do + { + p = low_level_input( netif ); + if (p != NULL) + { + if (netif->input( p, netif) != ERR_OK ) + { + pbuf_free(p); + } + } + } while(p!=NULL); +} + +#if !LWIP_ARP +/** + * This function has to be completed by user in case of ARP OFF. + * + * @param netif the lwip network interface structure for this ethernetif + * @return ERR_OK if ... + */ +static err_t low_level_output_arp_off(struct netif *netif, struct pbuf *q, const ip4_addr_t *ipaddr) +{ + err_t errval; + errval = ERR_OK; + +/* USER CODE BEGIN 5 */ + +/* USER CODE END 5 */ + + return errval; + +} +#endif /* LWIP_ARP */ + +/** + * @brief Should be called at the beginning of the program to set up the + * network interface. It calls the function low_level_init() to do the + * actual setup of the hardware. + * + * This function should be passed as a parameter to netif_add(). + * + * @param netif the lwip network interface structure for this ethernetif + * @return ERR_OK if the loopif is initialized + * ERR_MEM if private data couldn't be allocated + * any other err_t on error + */ +err_t ethernetif_init(struct netif *netif) +{ + LWIP_ASSERT("netif != NULL", (netif != NULL)); + +#if LWIP_NETIF_HOSTNAME + /* Initialize interface hostname */ + netif->hostname = "lwip"; +#endif /* LWIP_NETIF_HOSTNAME */ + + /* + * Initialize the snmp variables and counters inside the struct netif. + * The last argument should be replaced with your link speed, in units + * of bits per second. + */ + // MIB2_INIT_NETIF(netif, snmp_ifType_ethernet_csmacd, LINK_SPEED_OF_YOUR_NETIF_IN_BPS); + + netif->name[0] = IFNAME0; + netif->name[1] = IFNAME1; + /* We directly use etharp_output() here to save a function call. + * You can instead declare your own function an call etharp_output() + * from it if you have to do some checks before sending (e.g. if link + * is available...) */ + +#if LWIP_IPV4 +#if LWIP_ARP || LWIP_ETHERNET +#if LWIP_ARP + netif->output = etharp_output; +#else + /* The user should write its own code in low_level_output_arp_off function */ + netif->output = low_level_output_arp_off; +#endif /* LWIP_ARP */ +#endif /* LWIP_ARP || LWIP_ETHERNET */ +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 + netif->output_ip6 = ethip6_output; +#endif /* LWIP_IPV6 */ + + netif->linkoutput = low_level_output; + + /* initialize the hardware */ + low_level_init(netif); + + return ERR_OK; +} + +/** + * @brief Custom Rx pbuf free callback + * @param pbuf: pbuf to be freed + * @retval None + */ +void pbuf_free_custom(struct pbuf *p) +{ + struct pbuf_custom* custom_pbuf = (struct pbuf_custom*)p; + LWIP_MEMPOOL_FREE(RX_POOL, custom_pbuf); + + /* If the Rx Buffer Pool was exhausted, signal the ethernetif_input task to + * call HAL_ETH_GetRxDataBuffer to rebuild the Rx descriptors. */ + + if (RxAllocStatus == RX_ALLOC_ERROR) + { + RxAllocStatus = RX_ALLOC_OK; + } +} + +/* USER CODE BEGIN 6 */ + +/** +* @brief Returns the current time in milliseconds +* when LWIP_TIMERS == 1 and NO_SYS == 1 +* @param None +* @retval Current Time value +*/ +u32_t sys_now(void) +{ + return HAL_GetTick(); +} + +/* USER CODE END 6 */ + +/** + * @brief Initializes the ETH MSP. + * @param ethHandle: ETH handle + * @retval None + */ + +void HAL_ETH_MspInit(ETH_HandleTypeDef* ethHandle) +{ + GPIO_InitTypeDef GPIO_InitStruct = {0}; + if(ethHandle->Instance==ETH) + { + /* USER CODE BEGIN ETH_MspInit 0 */ + + /* USER CODE END ETH_MspInit 0 */ + /* Enable Peripheral clock */ + __HAL_RCC_ETH_CLK_ENABLE(); + + __HAL_RCC_GPIOC_CLK_ENABLE(); + __HAL_RCC_GPIOA_CLK_ENABLE(); + __HAL_RCC_GPIOB_CLK_ENABLE(); + /**ETH GPIO Configuration + PC1 ------> ETH_MDC + PA1 ------> ETH_REF_CLK + PA2 ------> ETH_MDIO + PA7 ------> ETH_CRS_DV + PC4 ------> ETH_RXD0 + PC5 ------> ETH_RXD1 + PB11 ------> ETH_TX_EN + PB12 ------> ETH_TXD0 + PB13 ------> ETH_TXD1 + */ + GPIO_InitStruct.Pin = GPIO_PIN_1|GPIO_PIN_4|GPIO_PIN_5; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF11_ETH; + HAL_GPIO_Init(GPIOC, &GPIO_InitStruct); + + GPIO_InitStruct.Pin = GPIO_PIN_1|GPIO_PIN_2|GPIO_PIN_7; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF11_ETH; + HAL_GPIO_Init(GPIOA, &GPIO_InitStruct); + + GPIO_InitStruct.Pin = GPIO_PIN_11|GPIO_PIN_12|GPIO_PIN_13; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF11_ETH; + HAL_GPIO_Init(GPIOB, &GPIO_InitStruct); + + /* Peripheral interrupt init */ + HAL_NVIC_SetPriority(ETH_IRQn, 0, 0); + HAL_NVIC_EnableIRQ(ETH_IRQn); + /* USER CODE BEGIN ETH_MspInit 1 */ + + /* USER CODE END ETH_MspInit 1 */ + } +} + +void HAL_ETH_MspDeInit(ETH_HandleTypeDef* ethHandle) +{ + if(ethHandle->Instance==ETH) + { + /* USER CODE BEGIN ETH_MspDeInit 0 */ + + /* USER CODE END ETH_MspDeInit 0 */ + /* Peripheral clock disable */ + __HAL_RCC_ETH_CLK_DISABLE(); + + /**ETH GPIO Configuration + PC1 ------> ETH_MDC + PA1 ------> ETH_REF_CLK + PA2 ------> ETH_MDIO + PA7 ------> ETH_CRS_DV + PC4 ------> ETH_RXD0 + PC5 ------> ETH_RXD1 + PB11 ------> ETH_TX_EN + PB12 ------> ETH_TXD0 + PB13 ------> ETH_TXD1 + */ + HAL_GPIO_DeInit(GPIOC, GPIO_PIN_1|GPIO_PIN_4|GPIO_PIN_5); + + HAL_GPIO_DeInit(GPIOA, GPIO_PIN_1|GPIO_PIN_2|GPIO_PIN_7); + + HAL_GPIO_DeInit(GPIOB, GPIO_PIN_11|GPIO_PIN_12|GPIO_PIN_13); + + /* Peripheral interrupt Deinit*/ + HAL_NVIC_DisableIRQ(ETH_IRQn); + + /* USER CODE BEGIN ETH_MspDeInit 1 */ + + /* USER CODE END ETH_MspDeInit 1 */ + } +} + +/******************************************************************************* + PHI IO Functions +*******************************************************************************/ +/** + * @brief Initializes the MDIO interface GPIO and clocks. + * @param None + * @retval 0 if OK, -1 if ERROR + */ +int32_t ETH_PHY_IO_Init(void) +{ + /* We assume that MDIO GPIO configuration is already done + in the ETH_MspInit() else it should be done here + */ + + /* Configure the MDIO Clock */ + HAL_ETH_SetMDIOClockRange(&heth); + + return 0; +} + +/** + * @brief De-Initializes the MDIO interface . + * @param None + * @retval 0 if OK, -1 if ERROR + */ +int32_t ETH_PHY_IO_DeInit (void) +{ + return 0; +} + +/** + * @brief Read a PHY register through the MDIO interface. + * @param DevAddr: PHY port address + * @param RegAddr: PHY register address + * @param pRegVal: pointer to hold the register value + * @retval 0 if OK -1 if Error + */ +int32_t ETH_PHY_IO_ReadReg(uint32_t DevAddr, uint32_t RegAddr, uint32_t *pRegVal) +{ + if(HAL_ETH_ReadPHYRegister(&heth, DevAddr, RegAddr, pRegVal) != HAL_OK) + { + return -1; + } + + return 0; +} + +/** + * @brief Write a value to a PHY register through the MDIO interface. + * @param DevAddr: PHY port address + * @param RegAddr: PHY register address + * @param RegVal: Value to be written + * @retval 0 if OK -1 if Error + */ +int32_t ETH_PHY_IO_WriteReg(uint32_t DevAddr, uint32_t RegAddr, uint32_t RegVal) +{ + if(HAL_ETH_WritePHYRegister(&heth, DevAddr, RegAddr, RegVal) != HAL_OK) + { + return -1; + } + + return 0; +} + +/** + * @brief Get the time in millisecons used for internal PHY driver process. + * @retval Time value + */ +int32_t ETH_PHY_IO_GetTick(void) +{ + return HAL_GetTick(); +} + +/** + * @brief Check the ETH link state then update ETH driver and netif link accordingly. + * @retval None + */ +void ethernet_link_check_state(struct netif *netif) +{ + ETH_MACConfigTypeDef MACConf = {0}; + int32_t PHYLinkState = 0; + uint32_t linkchanged = 0U, speed = 0U, duplex = 0U; + + PHYLinkState = LAN8742_GetLinkState(&LAN8742); + + if(netif_is_link_up(netif) && (PHYLinkState <= LAN8742_STATUS_LINK_DOWN)) + { + HAL_ETH_Stop_IT(&heth); + netif_set_down(netif); + netif_set_link_down(netif); + } + else if(!netif_is_link_up(netif) && (PHYLinkState > LAN8742_STATUS_LINK_DOWN)) + { + switch (PHYLinkState) + { + case LAN8742_STATUS_100MBITS_FULLDUPLEX: + duplex = ETH_FULLDUPLEX_MODE; + speed = ETH_SPEED_100M; + linkchanged = 1; + break; + case LAN8742_STATUS_100MBITS_HALFDUPLEX: + duplex = ETH_HALFDUPLEX_MODE; + speed = ETH_SPEED_100M; + linkchanged = 1; + break; + case LAN8742_STATUS_10MBITS_FULLDUPLEX: + duplex = ETH_FULLDUPLEX_MODE; + speed = ETH_SPEED_10M; + linkchanged = 1; + break; + case LAN8742_STATUS_10MBITS_HALFDUPLEX: + duplex = ETH_HALFDUPLEX_MODE; + speed = ETH_SPEED_10M; + linkchanged = 1; + break; + default: + break; + } + + if(linkchanged) + { + /* Get MAC Config MAC */ + HAL_ETH_GetMACConfig(&heth, &MACConf); + MACConf.DuplexMode = duplex; + MACConf.Speed = speed; + HAL_ETH_SetMACConfig(&heth, &MACConf); + HAL_ETH_Start_IT(&heth); + netif_set_up(netif); + netif_set_link_up(netif); + } + } + +} + +void HAL_ETH_RxAllocateCallback(uint8_t **buff) +{ +/* USER CODE BEGIN HAL ETH RxAllocateCallback */ + struct pbuf_custom *p = LWIP_MEMPOOL_ALLOC(RX_POOL); + if (p) + { + /* Get the buff from the struct pbuf address. */ + *buff = (uint8_t *)p + offsetof(RxBuff_t, buff); + p->custom_free_function = pbuf_free_custom; + /* Initialize the struct pbuf. + * This must be performed whenever a buffer's allocated because it may be + * changed by lwIP or the app, e.g., pbuf_free decrements ref. */ + pbuf_alloced_custom(PBUF_RAW, 0, PBUF_REF, p, *buff, ETH_RX_BUF_SIZE); + } + else + { + RxAllocStatus = RX_ALLOC_ERROR; + *buff = NULL; + } +/* USER CODE END HAL ETH RxAllocateCallback */ +} + +void HAL_ETH_RxLinkCallback(void **pStart, void **pEnd, uint8_t *buff, uint16_t Length) +{ +/* USER CODE BEGIN HAL ETH RxLinkCallback */ + + struct pbuf **ppStart = (struct pbuf **)pStart; + struct pbuf **ppEnd = (struct pbuf **)pEnd; + struct pbuf *p = NULL; + + /* Get the struct pbuf from the buff address. */ + p = (struct pbuf *)(buff - offsetof(RxBuff_t, buff)); + p->next = NULL; + p->tot_len = 0; + p->len = Length; + + /* Chain the buffer. */ + if (!*ppStart) + { + /* The first buffer of the packet. */ + *ppStart = p; + } + else + { + /* Chain the buffer to the end of the packet. */ + (*ppEnd)->next = p; + } + *ppEnd = p; + + /* Update the total length of all the buffers of the chain. Each pbuf in the chain should have its tot_len + * set to its own length, plus the length of all the following pbufs in the chain. */ + for (p = *ppStart; p != NULL; p = p->next) + { + p->tot_len += Length; + } + +/* USER CODE END HAL ETH RxLinkCallback */ +} + +void HAL_ETH_TxFreeCallback(uint32_t * buff) +{ +/* USER CODE BEGIN HAL ETH TxFreeCallback */ + + pbuf_free((struct pbuf *)buff); + +/* USER CODE END HAL ETH TxFreeCallback */ +} + +/* USER CODE BEGIN 8 */ + +/* USER CODE END 8 */ + diff --git a/LWIP/Target/ethernetif.h b/LWIP/Target/ethernetif.h new file mode 100644 index 0000000..230fd62 --- /dev/null +++ b/LWIP/Target/ethernetif.h @@ -0,0 +1,45 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * File Name : ethernetif.h + * Description : This file provides initialization code for LWIP + * middleWare. + ****************************************************************************** + * @attention + * + * Copyright (c) 2024 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ +/* USER CODE END Header */ + +#ifndef __ETHERNETIF_H__ +#define __ETHERNETIF_H__ + +#include "lwip/err.h" +#include "lwip/netif.h" + +/* Within 'USER CODE' section, code will be kept by default at each generation */ +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ + +/* Exported functions ------------------------------------------------------- */ +err_t ethernetif_init(struct netif *netif); + +void ethernetif_input(struct netif *netif); +void ethernet_link_check_state(struct netif *netif); + +void Error_Handler(void); +u32_t sys_jiffies(void); +u32_t sys_now(void); + +/* USER CODE BEGIN 1 */ + +/* USER CODE END 1 */ +#endif diff --git a/LWIP/Target/lwipopts.h b/LWIP/Target/lwipopts.h new file mode 100644 index 0000000..a421c37 --- /dev/null +++ b/LWIP/Target/lwipopts.h @@ -0,0 +1,108 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * File Name : Target/lwipopts.h + * Description : This file overrides LwIP stack default configuration + * done in opt.h file. + ****************************************************************************** + * @attention + * + * Copyright (c) 2024 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ +/* USER CODE END Header */ + +/* Define to prevent recursive inclusion --------------------------------------*/ +#ifndef __LWIPOPTS__H__ +#define __LWIPOPTS__H__ + +#include "main.h" + +/*-----------------------------------------------------------------------------*/ +/* Current version of LwIP supported by CubeMx: 2.1.2 -*/ +/*-----------------------------------------------------------------------------*/ + +/* Within 'USER CODE' section, code will be kept by default at each generation */ +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ + +#ifdef __cplusplus + extern "C" { +#endif + +/* STM32CubeMX Specific Parameters (not defined in opt.h) ---------------------*/ +/* Parameters set in STM32CubeMX LwIP Configuration GUI -*/ +/*----- WITH_RTOS disabled (Since FREERTOS is not set) -----*/ +#define WITH_RTOS 0 +/*----- CHECKSUM_BY_HARDWARE enabled -----*/ +#define CHECKSUM_BY_HARDWARE 1 +/*-----------------------------------------------------------------------------*/ + +/* LwIP Stack Parameters (modified compared to initialization value in opt.h) -*/ +/* Parameters set in STM32CubeMX LwIP Configuration GUI -*/ +/*----- Default Value for LWIP_UDP: 1 ---*/ +#define LWIP_UDP 0 +/*----- Value in opt.h for NO_SYS: 0 -----*/ +#define NO_SYS 1 +/*----- Value in opt.h for SYS_LIGHTWEIGHT_PROT: 1 -----*/ +#define SYS_LIGHTWEIGHT_PROT 0 +/*----- Value in opt.h for MEM_ALIGNMENT: 1 -----*/ +#define MEM_ALIGNMENT 4 +/*----- Value in opt.h for LWIP_ETHERNET: LWIP_ARP || PPPOE_SUPPORT -*/ +#define LWIP_ETHERNET 1 +/*----- Value in opt.h for LWIP_DNS_SECURE: (LWIP_DNS_SECURE_RAND_XID | LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING | LWIP_DNS_SECURE_RAND_SRC_PORT) -*/ +#define LWIP_DNS_SECURE 7 +/*----- Value in opt.h for TCP_SND_QUEUELEN: (4*TCP_SND_BUF + (TCP_MSS - 1))/TCP_MSS -----*/ +#define TCP_SND_QUEUELEN 9 +/*----- Value in opt.h for TCP_SNDLOWAT: LWIP_MIN(LWIP_MAX(((TCP_SND_BUF)/2), (2 * TCP_MSS) + 1), (TCP_SND_BUF) - 1) -*/ +#define TCP_SNDLOWAT 1071 +/*----- Value in opt.h for TCP_SNDQUEUELOWAT: LWIP_MAX(TCP_SND_QUEUELEN)/2, 5) -*/ +#define TCP_SNDQUEUELOWAT 5 +/*----- Value in opt.h for TCP_WND_UPDATE_THRESHOLD: LWIP_MIN(TCP_WND/4, TCP_MSS*4) -----*/ +#define TCP_WND_UPDATE_THRESHOLD 536 +/*----- Value in opt.h for LWIP_NETIF_LINK_CALLBACK: 0 -----*/ +#define LWIP_NETIF_LINK_CALLBACK 1 +/*----- Value in opt.h for LWIP_NETCONN: 1 -----*/ +#define LWIP_NETCONN 0 +/*----- Value in opt.h for LWIP_SOCKET: 1 -----*/ +#define LWIP_SOCKET 0 +/*----- Value in opt.h for RECV_BUFSIZE_DEFAULT: INT_MAX -----*/ +#define RECV_BUFSIZE_DEFAULT 2000000000 +/*----- Value in opt.h for LWIP_STATS: 1 -----*/ +#define LWIP_STATS 0 +/*----- Value in opt.h for CHECKSUM_GEN_IP: 1 -----*/ +#define CHECKSUM_GEN_IP 0 +/*----- Value in opt.h for CHECKSUM_GEN_UDP: 1 -----*/ +#define CHECKSUM_GEN_UDP 0 +/*----- Value in opt.h for CHECKSUM_GEN_TCP: 1 -----*/ +#define CHECKSUM_GEN_TCP 0 +/*----- Value in opt.h for CHECKSUM_GEN_ICMP: 1 -----*/ +#define CHECKSUM_GEN_ICMP 0 +/*----- Value in opt.h for CHECKSUM_GEN_ICMP6: 1 -----*/ +#define CHECKSUM_GEN_ICMP6 0 +/*----- Value in opt.h for CHECKSUM_CHECK_IP: 1 -----*/ +#define CHECKSUM_CHECK_IP 0 +/*----- Value in opt.h for CHECKSUM_CHECK_UDP: 1 -----*/ +#define CHECKSUM_CHECK_UDP 0 +/*----- Value in opt.h for CHECKSUM_CHECK_TCP: 1 -----*/ +#define CHECKSUM_CHECK_TCP 0 +/*----- Value in opt.h for CHECKSUM_CHECK_ICMP: 1 -----*/ +#define CHECKSUM_CHECK_ICMP 0 +/*----- Value in opt.h for CHECKSUM_CHECK_ICMP6: 1 -----*/ +#define CHECKSUM_CHECK_ICMP6 0 +/*-----------------------------------------------------------------------------*/ +/* USER CODE BEGIN 1 */ + +/* USER CODE END 1 */ + +#ifdef __cplusplus +} +#endif +#endif /*__LWIPOPTS__H__ */ diff --git a/MDK-ARM/EventRecorderStub.scvd b/MDK-ARM/EventRecorderStub.scvd new file mode 100644 index 0000000..2956b29 --- /dev/null +++ b/MDK-ARM/EventRecorderStub.scvd @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/MDK-ARM/RTE/_TEST2/RTE_Components.h b/MDK-ARM/RTE/_TEST2/RTE_Components.h new file mode 100644 index 0000000..5d1cac6 --- /dev/null +++ b/MDK-ARM/RTE/_TEST2/RTE_Components.h @@ -0,0 +1,21 @@ + +/* + * Auto generated Run-Time-Environment Configuration File + * *** Do not modify ! *** + * + * Project: 'TEST2' + * Target: 'TEST2' + */ + +#ifndef RTE_COMPONENTS_H +#define RTE_COMPONENTS_H + + +/* + * Define the Device Header File: + */ +#define CMSIS_device_header "stm32f4xx.h" + + + +#endif /* RTE_COMPONENTS_H */ diff --git a/MDK-ARM/TEST2.uvoptx b/MDK-ARM/TEST2.uvoptx new file mode 100644 index 0000000..afece39 --- /dev/null +++ b/MDK-ARM/TEST2.uvoptx @@ -0,0 +1,1811 @@ + + + + 1.0 + +
### uVision Project, (C) Keil Software
+ + + *.c + *.s*; *.src; *.a* + *.obj; *.o + *.lib + *.txt; *.h; *.inc; *.md + *.plm + *.cpp + 0 + + + + 0 + 0 + + + + TEST2 + 0x4 + ARM-ADS + + 11059000 + + 1 + 1 + 0 + 1 + 0 + + + 1 + 65535 + 0 + 0 + 0 + + + 79 + 66 + 8 + + + + 1 + 1 + 1 + 0 + 1 + 1 + 0 + 1 + 0 + 0 + 0 + 0 + + + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 0 + 0 + + + 1 + 0 + 1 + + 18 + + 0 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 0 + 0 + 1 + 0 + 0 + 6 + + + + + + + + + + + STLink\ST-LINKIII-KEIL_SWO.dll + + + + 0 + ARMRTXEVENTFLAGS + -L70 -Z18 -C0 -M0 -T1 + + + 0 + DLGTARM + (1010=75,104,525,661,0)(1007=105,137,292,412,0)(1008=90,121,466,357,0)(1009=-1,-1,-1,-1,0)(1012=-1,-1,-1,-1,0) + + + 0 + ARMDBGFLAGS + + + + 0 + DLGUARM + (105=-1,-1,-1,-1,0) + + + 0 + UL2CM3 + UL2CM3(-S0 -C0 -P0 -FD20000000 -FC1000 -FN1 -FF0STM32F4xx_1024 -FS08000000 -FL0100000 -FP0($$Device:STM32F407VGTx$CMSIS\Flash\STM32F4xx_1024.FLM)) + + + 0 + ST-LINKIII-KEIL_SWO + -U420014000D0000504A51544E -O2254 -SF1800 -C0 -A0 -I0 -HNlocalhost -HP7184 -P1 -N00("ARM CoreSight SW-DP (ARM Core") -D00(2BA01477) -L00(0) -TO131090 -TC10000000 -TT10000000 -TP21 -TDS8000 -TDT0 -TDC1F -TIEFFFFFFFF -TIP8 -FO7 -FD20000000 -FC800 -FN1 -FF0STM32F4xx_1024.FLM -FS08000000 -FL0100000 -FP0($$Device:STM32F407VGTx$CMSIS\Flash\STM32F4xx_1024.FLM) + + + + + 0 + 0 + 198 + 1 +
134230312
+ 0 + 0 + 0 + 0 + 0 + 1 + ../Core/Src/main.c + + \\TEST2\../Core/Src/main.c\198 +
+
+ + + 0 + 1 + hart_flag + + + 1 + 1 + server_pcb,0x0A + + + 2 + 1 + tpcb + + + 3 + 1 + tpcb + + + 4 + 1 + server_pcb1 + + + 5 + 1 + send_buf + + + 6 + 1 + hart1_uart5,0x10 + + + 7 + 1 + huart5,0x10 + + + 8 + 1 + huart2 + + + 9 + 1 + huart4,0x0A + + + 10 + 1 + hdma_uart5_tx + + + 11 + 1 + hart2_uart2 + + + + + 1 + 0 + \\TEST2\../Core/Src/usart.c\huart4.pTxBuffPtr + 0 + + + + 0 + + + 0 + 1 + 1 + 0 + 0 + 0 + 0 + 1 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 1 + 0 + 0 + 0 + + + + 0 + 0 + 0 + + + + + + + + + + 1 + 0 + 0 + 2 + 1800000 + +
+
+ + + Application/MDK-ARM + 1 + 0 + 0 + 0 + + 1 + 1 + 2 + 0 + 0 + 0 + startup_stm32f407xx.s + startup_stm32f407xx.s + 0 + 0 + + + + + Application/User/Core + 1 + 0 + 0 + 0 + + 2 + 2 + 1 + 0 + 0 + 0 + ../Core/Src/main.c + main.c + 0 + 0 + + + 2 + 3 + 1 + 0 + 0 + 0 + ../Core/Src/gpio.c + gpio.c + 0 + 0 + + + 2 + 4 + 1 + 0 + 0 + 0 + ../Core/Src/dma.c + dma.c + 0 + 0 + + + 2 + 5 + 1 + 0 + 0 + 0 + ../Core/Src/spi.c + spi.c + 0 + 0 + + + 2 + 6 + 1 + 0 + 0 + 0 + ../Core/Src/tim.c + tim.c + 0 + 0 + + + 2 + 7 + 1 + 0 + 0 + 0 + ../Core/Src/usart.c + usart.c + 0 + 0 + + + 2 + 8 + 1 + 0 + 0 + 0 + ../Core/Src/stm32f4xx_it.c + stm32f4xx_it.c + 0 + 0 + + + 2 + 9 + 1 + 0 + 0 + 0 + ../Core/Src/stm32f4xx_hal_msp.c + stm32f4xx_hal_msp.c + 0 + 0 + + + + + Application/User/LWIP/Target + 0 + 0 + 0 + 0 + + 3 + 10 + 1 + 0 + 0 + 0 + ../LWIP/Target/ethernetif.c + ethernetif.c + 0 + 0 + + + + + Application/User/LWIP/App + 0 + 0 + 0 + 0 + + 4 + 11 + 1 + 0 + 0 + 0 + ../LWIP/App/lwip.c + lwip.c + 0 + 0 + + + + + Drivers/BSP/Components + 0 + 0 + 0 + 0 + + 5 + 12 + 1 + 0 + 0 + 0 + ../Drivers/BSP/Components/lan8742/lan8742.c + lan8742.c + 0 + 0 + + + + + Drivers/STM32F4xx_HAL_Driver + 0 + 0 + 0 + 0 + + 6 + 13 + 1 + 0 + 0 + 0 + ../Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_rcc.c + stm32f4xx_hal_rcc.c + 0 + 0 + + + 6 + 14 + 1 + 0 + 0 + 0 + ../Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_rcc_ex.c + stm32f4xx_hal_rcc_ex.c + 0 + 0 + + + 6 + 15 + 1 + 0 + 0 + 0 + ../Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_flash.c + stm32f4xx_hal_flash.c + 0 + 0 + + + 6 + 16 + 1 + 0 + 0 + 0 + ../Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_flash_ex.c + stm32f4xx_hal_flash_ex.c + 0 + 0 + + + 6 + 17 + 1 + 0 + 0 + 0 + ../Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_flash_ramfunc.c + stm32f4xx_hal_flash_ramfunc.c + 0 + 0 + + + 6 + 18 + 1 + 0 + 0 + 0 + ../Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_gpio.c + stm32f4xx_hal_gpio.c + 0 + 0 + + + 6 + 19 + 1 + 0 + 0 + 0 + ../Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_dma_ex.c + stm32f4xx_hal_dma_ex.c + 0 + 0 + + + 6 + 20 + 1 + 0 + 0 + 0 + ../Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_dma.c + stm32f4xx_hal_dma.c + 0 + 0 + + + 6 + 21 + 1 + 0 + 0 + 0 + ../Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_pwr.c + stm32f4xx_hal_pwr.c + 0 + 0 + + + 6 + 22 + 1 + 0 + 0 + 0 + ../Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_pwr_ex.c + stm32f4xx_hal_pwr_ex.c + 0 + 0 + + + 6 + 23 + 1 + 0 + 0 + 0 + ../Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_cortex.c + stm32f4xx_hal_cortex.c + 0 + 0 + + + 6 + 24 + 1 + 0 + 0 + 0 + ../Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal.c + stm32f4xx_hal.c + 0 + 0 + + + 6 + 25 + 1 + 0 + 0 + 0 + ../Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_exti.c + stm32f4xx_hal_exti.c + 0 + 0 + + + 6 + 26 + 1 + 0 + 0 + 0 + ../Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_eth.c + stm32f4xx_hal_eth.c + 0 + 0 + + + 6 + 27 + 1 + 0 + 0 + 0 + ../Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_spi.c + stm32f4xx_hal_spi.c + 0 + 0 + + + 6 + 28 + 1 + 0 + 0 + 0 + ../Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_tim.c + stm32f4xx_hal_tim.c + 0 + 0 + + + 6 + 29 + 1 + 0 + 0 + 0 + ../Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_tim_ex.c + stm32f4xx_hal_tim_ex.c + 0 + 0 + + + 6 + 30 + 1 + 0 + 0 + 0 + ../Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_uart.c + stm32f4xx_hal_uart.c + 0 + 0 + + + + + Drivers/CMSIS + 0 + 0 + 0 + 0 + + 7 + 31 + 1 + 0 + 0 + 0 + ../Core/Src/system_stm32f4xx.c + system_stm32f4xx.c + 0 + 0 + + + + + Middlewares/LwIP + 0 + 0 + 0 + 0 + + 8 + 32 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/auth.c + auth.c + 0 + 0 + + + 8 + 33 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/ccp.c + ccp.c + 0 + 0 + + + 8 + 34 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/chap_ms.c + chap_ms.c + 0 + 0 + + + 8 + 35 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/chap-md5.c + chap-md5.c + 0 + 0 + + + 8 + 36 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/chap-new.c + chap-new.c + 0 + 0 + + + 8 + 37 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/demand.c + demand.c + 0 + 0 + + + 8 + 38 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/eap.c + eap.c + 0 + 0 + + + 8 + 39 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/eui64.c + eui64.c + 0 + 0 + + + 8 + 40 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/fsm.c + fsm.c + 0 + 0 + + + 8 + 41 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/ipcp.c + ipcp.c + 0 + 0 + + + 8 + 42 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/ipv6cp.c + ipv6cp.c + 0 + 0 + + + 8 + 43 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/lcp.c + lcp.c + 0 + 0 + + + 8 + 44 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/magic.c + magic.c + 0 + 0 + + + 8 + 45 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/mppe.c + mppe.c + 0 + 0 + + + 8 + 46 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/multilink.c + multilink.c + 0 + 0 + + + 8 + 47 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/ppp.c + ppp.c + 0 + 0 + + + 8 + 48 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/pppapi.c + pppapi.c + 0 + 0 + + + 8 + 49 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/pppcrypt.c + pppcrypt.c + 0 + 0 + + + 8 + 50 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/pppoe.c + pppoe.c + 0 + 0 + + + 8 + 51 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/pppol2tp.c + pppol2tp.c + 0 + 0 + + + 8 + 52 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/pppos.c + pppos.c + 0 + 0 + + + 8 + 53 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/upap.c + upap.c + 0 + 0 + + + 8 + 54 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/utils.c + utils.c + 0 + 0 + + + 8 + 55 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/vj.c + vj.c + 0 + 0 + + + 8 + 56 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/netif/bridgeif.c + bridgeif.c + 0 + 0 + + + 8 + 57 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/netif/bridgeif_fdb.c + bridgeif_fdb.c + 0 + 0 + + + 8 + 58 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/netif/ethernet.c + ethernet.c + 0 + 0 + + + 8 + 59 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/netif/lowpan6.c + lowpan6.c + 0 + 0 + + + 8 + 60 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/netif/lowpan6_ble.c + lowpan6_ble.c + 0 + 0 + + + 8 + 61 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/netif/lowpan6_common.c + lowpan6_common.c + 0 + 0 + + + 8 + 62 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/netif/slipif.c + slipif.c + 0 + 0 + + + 8 + 63 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/netif/zepif.c + zepif.c + 0 + 0 + + + 8 + 64 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/ecp.c + ecp.c + 0 + 0 + + + 8 + 65 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/api/api_lib.c + api_lib.c + 0 + 0 + + + 8 + 66 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/api/api_msg.c + api_msg.c + 0 + 0 + + + 8 + 67 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/api/err.c + err.c + 0 + 0 + + + 8 + 68 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/api/if_api.c + if_api.c + 0 + 0 + + + 8 + 69 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/api/netbuf.c + netbuf.c + 0 + 0 + + + 8 + 70 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/api/netdb.c + netdb.c + 0 + 0 + + + 8 + 71 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/api/netifapi.c + netifapi.c + 0 + 0 + + + 8 + 72 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/api/sockets.c + sockets.c + 0 + 0 + + + 8 + 73 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/api/tcpip.c + tcpip.c + 0 + 0 + + + 8 + 74 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/core/altcp.c + altcp.c + 0 + 0 + + + 8 + 75 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/core/altcp_alloc.c + altcp_alloc.c + 0 + 0 + + + 8 + 76 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/core/altcp_tcp.c + altcp_tcp.c + 0 + 0 + + + 8 + 77 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/core/def.c + def.c + 0 + 0 + + + 8 + 78 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/core/dns.c + dns.c + 0 + 0 + + + 8 + 79 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/core/inet_chksum.c + inet_chksum.c + 0 + 0 + + + 8 + 80 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/core/init.c + init.c + 0 + 0 + + + 8 + 81 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/core/ip.c + ip.c + 0 + 0 + + + 8 + 82 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/core/mem.c + mem.c + 0 + 0 + + + 8 + 83 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/core/memp.c + memp.c + 0 + 0 + + + 8 + 84 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/core/netif.c + netif.c + 0 + 0 + + + 8 + 85 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/core/pbuf.c + pbuf.c + 0 + 0 + + + 8 + 86 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/core/raw.c + raw.c + 0 + 0 + + + 8 + 87 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/core/stats.c + stats.c + 0 + 0 + + + 8 + 88 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/core/sys.c + sys.c + 0 + 0 + + + 8 + 89 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/core/tcp.c + tcp.c + 0 + 0 + + + 8 + 90 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/core/tcp_in.c + tcp_in.c + 0 + 0 + + + 8 + 91 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/core/tcp_out.c + tcp_out.c + 0 + 0 + + + 8 + 92 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/core/timeouts.c + timeouts.c + 0 + 0 + + + 8 + 93 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/core/udp.c + udp.c + 0 + 0 + + + 8 + 94 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/core/ipv4/autoip.c + autoip.c + 0 + 0 + + + 8 + 95 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/core/ipv4/dhcp.c + dhcp.c + 0 + 0 + + + 8 + 96 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/core/ipv4/etharp.c + etharp.c + 0 + 0 + + + 8 + 97 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/core/ipv4/icmp.c + icmp.c + 0 + 0 + + + 8 + 98 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/core/ipv4/igmp.c + igmp.c + 0 + 0 + + + 8 + 99 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/core/ipv4/ip4.c + ip4.c + 0 + 0 + + + 8 + 100 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_addr.c + ip4_addr.c + 0 + 0 + + + 8 + 101 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_frag.c + ip4_frag.c + 0 + 0 + + + 8 + 102 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/core/ipv6/dhcp6.c + dhcp6.c + 0 + 0 + + + 8 + 103 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/core/ipv6/ethip6.c + ethip6.c + 0 + 0 + + + 8 + 104 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/core/ipv6/icmp6.c + icmp6.c + 0 + 0 + + + 8 + 105 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/core/ipv6/inet6.c + inet6.c + 0 + 0 + + + 8 + 106 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/core/ipv6/ip6.c + ip6.c + 0 + 0 + + + 8 + 107 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_addr.c + ip6_addr.c + 0 + 0 + + + 8 + 108 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_frag.c + ip6_frag.c + 0 + 0 + + + 8 + 109 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/core/ipv6/mld6.c + mld6.c + 0 + 0 + + + 8 + 110 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/core/ipv6/nd6.c + nd6.c + 0 + 0 + + + 8 + 111 + 1 + 0 + 0 + 0 + ../Middlewares/Third_Party/LwIP/src/apps/mqtt/mqtt.c + mqtt.c + 0 + 0 + + + + + User/board + 1 + 0 + 0 + 0 + + 9 + 112 + 1 + 0 + 0 + 0 + ..\User\board\src\leds.c + leds.c + 0 + 0 + + + + + User/application + 1 + 0 + 0 + 0 + + 10 + 113 + 1 + 0 + 0 + 0 + ..\User\application\src\tcpclient.c + tcpclient.c + 0 + 0 + + + 10 + 114 + 1 + 0 + 0 + 0 + ..\User\application\src\tcpserverc.c + tcpserverc.c + 0 + 0 + + + + + User/driver + 1 + 0 + 0 + 0 + + 11 + 115 + 1 + 0 + 0 + 0 + ..\User\driver\dac161s997.c + dac161s997.c + 0 + 0 + + + 11 + 116 + 1 + 0 + 0 + 0 + ..\User\driver\ad7124.c + ad7124.c + 0 + 0 + + + 11 + 117 + 1 + 0 + 0 + 0 + ..\User\driver\ht1200m.c + ht1200m.c + 0 + 0 + + + + + User/system + 0 + 0 + 0 + 0 + + 12 + 118 + 1 + 0 + 0 + 0 + ..\User\system\user_spi.c + user_spi.c + 0 + 0 + + + + + ::CMSIS + 0 + 0 + 0 + 1 + + +
diff --git a/MDK-ARM/TEST2.uvprojx b/MDK-ARM/TEST2.uvprojx new file mode 100644 index 0000000..20d0eb4 --- /dev/null +++ b/MDK-ARM/TEST2.uvprojx @@ -0,0 +1,1419 @@ + + + + 2.1 + +
### uVision Project, (C) Keil Software
+ + + + TEST2 + 0x4 + ARM-ADS + 5060960::V5.06 update 7 (build 960)::.\ARMCC + 0 + + + STM32F407VGTx + STMicroelectronics + Keil.STM32F4xx_DFP.2.17.1 + https://www.keil.com/pack/ + IRAM(0x20000000-0x2001BFFF) IRAM2(0x2001C000-0x2001FFFF) IROM(0x8000000-0x80FFFFF) CLOCK(25000000) FPU2 CPUTYPE("Cortex-M4") TZ + + + + 0 + + + + + + + + + + + $$Device:STM32F407VGTx$CMSIS\SVD\STM32F407.svd + 0 + 0 + + + + + + + 0 + 0 + 0 + 0 + 1 + + TEST2\ + TEST2 + 1 + 0 + 0 + 1 + 0 + + 1 + 0 + 0 + + 0 + 0 + + + 0 + 0 + 0 + 0 + + + 0 + 0 + + + 0 + 0 + 0 + 0 + + + 0 + 1 + + + 0 + 0 + 0 + 0 + + 1 + + + + 0 + 0 + 0 + 0 + 0 + 1 + 0 + 0 + 0 + 0 + 3 + + + 0 + + + SARMCM3.DLL + -REMAP -MPU + DCM.DLL + -pCM4 + SARMCM3.DLL + -MPU + TCM.DLL + -pCM4 + + + + 1 + 0 + 0 + 0 + 16 + + + + + 1 + 0 + 0 + 1 + 1 + 4101 + + 1 + BIN\UL2V8M.DLL + "" () + + + + + 0 + + + + 0 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 0 + 1 + 1 + 0 + 1 + 1 + 0 + 0 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 0 + 0 + "Cortex-M4" + + 0 + 0 + 0 + 1 + 1 + 0 + 0 + 2 + 0 + 0 + 1 + 0 + 8 + 1 + 0 + 0 + 0 + 3 + 4 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 1 + 0 + 0 + 0 + 0 + 1 + 1 + + + 0 + 0x0 + 0x0 + + + 0 + 0x0 + 0x0 + + + 0 + 0x0 + 0x0 + + + 0 + 0x0 + 0x0 + + + 0 + 0x0 + 0x0 + + + 0 + 0x0 + 0x0 + + + 0 + 0x20000000 + 0x1c000 + + + 1 + 0x8000000 + 0x100000 + + + 0 + 0x0 + 0x0 + + + 1 + 0x0 + 0x0 + + + 1 + 0x0 + 0x0 + + + 1 + 0x0 + 0x0 + + + 1 + 0x8000000 + 0x100000 + + + 1 + 0x0 + 0x0 + + + 0 + 0x0 + 0x0 + + + 0 + 0x0 + 0x0 + + + 0 + 0x0 + 0x0 + + + 0 + 0x20000000 + 0x1c000 + + + 0 + 0x2001c000 + 0x4000 + + + + + + 1 + 1 + 0 + 0 + 1 + 0 + 0 + 0 + 0 + 0 + 2 + 0 + 0 + 1 + 1 + 0 + 5 + 3 + 1 + 1 + 0 + 0 + 0 + + + USE_HAL_DRIVER,STM32F407xx + + ../Core/Inc;../LWIP/App;../LWIP/Target;../Middlewares/Third_Party/LwIP/src/include;../Middlewares/Third_Party/LwIP/system;../Drivers/STM32F4xx_HAL_Driver/Inc;../Drivers/STM32F4xx_HAL_Driver/Inc/Legacy;../Drivers/BSP/Components/lan8742;../Middlewares/Third_Party/LwIP/src/include/netif/ppp;../Drivers/CMSIS/Device/ST/STM32F4xx/Include;../Middlewares/Third_Party/LwIP/src/include/lwip;../Middlewares/Third_Party/LwIP/src/include/lwip/apps;../Middlewares/Third_Party/LwIP/src/include/lwip/priv;../Middlewares/Third_Party/LwIP/src/include/lwip/prot;../Middlewares/Third_Party/LwIP/src/include/netif;../Middlewares/Third_Party/LwIP/src/include/compat/posix;../Middlewares/Third_Party/LwIP/src/include/compat/posix/arpa;../Middlewares/Third_Party/LwIP/src/include/compat/posix/net;../Middlewares/Third_Party/LwIP/src/include/compat/posix/sys;../Middlewares/Third_Party/LwIP/src/include/compat/stdc;../Middlewares/Third_Party/LwIP/system/arch;../Drivers/CMSIS/Include;../User/board/inc;../User/application/inc;../User/driver;../User/system + + + + 1 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 1 + + + + + + + + + 1 + 0 + 0 + 0 + 1 + 0 + + + + + + + + + + + + + + + Application/MDK-ARM + + + startup_stm32f407xx.s + 2 + startup_stm32f407xx.s + + + + + Application/User/Core + + + main.c + 1 + ../Core/Src/main.c + + + gpio.c + 1 + ../Core/Src/gpio.c + + + dma.c + 1 + ../Core/Src/dma.c + + + 2 + 0 + 0 + 0 + 0 + 1 + 2 + 2 + 2 + 2 + 11 + + + 1 + + + + 2 + 0 + 2 + 2 + 2 + 2 + 2 + 2 + 2 + 2 + 0 + 2 + 2 + 2 + 2 + 2 + 0 + 0 + 2 + 2 + 2 + 2 + 2 + + + + + + + + + + + + spi.c + 1 + ../Core/Src/spi.c + + + 2 + 0 + 0 + 0 + 0 + 1 + 2 + 2 + 2 + 2 + 11 + + + 1 + + + + 2 + 0 + 2 + 2 + 2 + 2 + 2 + 2 + 2 + 2 + 0 + 2 + 2 + 2 + 2 + 2 + 0 + 0 + 2 + 2 + 2 + 2 + 2 + + + + + + + + + + + + tim.c + 1 + ../Core/Src/tim.c + + + usart.c + 1 + ../Core/Src/usart.c + + + 2 + 0 + 0 + 0 + 0 + 1 + 2 + 2 + 2 + 2 + 11 + + + 1 + + + + 2 + 0 + 2 + 2 + 2 + 2 + 2 + 2 + 2 + 2 + 0 + 2 + 2 + 2 + 2 + 2 + 0 + 0 + 2 + 2 + 2 + 2 + 2 + + + + + + + + + + + + stm32f4xx_it.c + 1 + ../Core/Src/stm32f4xx_it.c + + + stm32f4xx_hal_msp.c + 1 + ../Core/Src/stm32f4xx_hal_msp.c + + + + + Application/User/LWIP/Target + + + ethernetif.c + 1 + ../LWIP/Target/ethernetif.c + + + + + Application/User/LWIP/App + + + lwip.c + 1 + ../LWIP/App/lwip.c + + + + + Drivers/BSP/Components + + + lan8742.c + 1 + ../Drivers/BSP/Components/lan8742/lan8742.c + + + + + Drivers/STM32F4xx_HAL_Driver + + + stm32f4xx_hal_rcc.c + 1 + ../Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_rcc.c + + + stm32f4xx_hal_rcc_ex.c + 1 + ../Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_rcc_ex.c + + + stm32f4xx_hal_flash.c + 1 + ../Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_flash.c + + + stm32f4xx_hal_flash_ex.c + 1 + ../Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_flash_ex.c + + + stm32f4xx_hal_flash_ramfunc.c + 1 + ../Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_flash_ramfunc.c + + + stm32f4xx_hal_gpio.c + 1 + ../Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_gpio.c + + + stm32f4xx_hal_dma_ex.c + 1 + ../Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_dma_ex.c + + + stm32f4xx_hal_dma.c + 1 + ../Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_dma.c + + + stm32f4xx_hal_pwr.c + 1 + ../Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_pwr.c + + + stm32f4xx_hal_pwr_ex.c + 1 + ../Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_pwr_ex.c + + + stm32f4xx_hal_cortex.c + 1 + ../Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_cortex.c + + + stm32f4xx_hal.c + 1 + ../Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal.c + + + stm32f4xx_hal_exti.c + 1 + ../Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_exti.c + + + stm32f4xx_hal_eth.c + 1 + ../Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_eth.c + + + stm32f4xx_hal_spi.c + 1 + ../Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_spi.c + + + 2 + 0 + 0 + 0 + 0 + 1 + 2 + 2 + 2 + 2 + 11 + + + 1 + + + + 2 + 0 + 2 + 2 + 2 + 2 + 2 + 2 + 2 + 2 + 0 + 2 + 2 + 2 + 2 + 2 + 0 + 0 + 2 + 2 + 2 + 2 + 2 + + + + + + + + + + + + stm32f4xx_hal_tim.c + 1 + ../Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_tim.c + + + 2 + 0 + 0 + 0 + 0 + 1 + 2 + 2 + 2 + 2 + 11 + + + 1 + + + + 2 + 0 + 2 + 2 + 2 + 2 + 2 + 2 + 2 + 2 + 0 + 2 + 2 + 2 + 2 + 2 + 0 + 0 + 2 + 2 + 2 + 2 + 2 + + + + + + + + + + + + stm32f4xx_hal_tim_ex.c + 1 + ../Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_tim_ex.c + + + 2 + 0 + 0 + 0 + 0 + 1 + 2 + 2 + 2 + 2 + 11 + + + 1 + + + + 2 + 0 + 2 + 2 + 2 + 2 + 2 + 2 + 2 + 2 + 0 + 2 + 2 + 2 + 2 + 2 + 0 + 0 + 2 + 2 + 2 + 2 + 2 + + + + + + + + + + + + stm32f4xx_hal_uart.c + 1 + ../Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_uart.c + + + 2 + 0 + 0 + 0 + 0 + 1 + 2 + 2 + 2 + 2 + 11 + + + 1 + + + + 2 + 0 + 2 + 2 + 2 + 2 + 2 + 2 + 2 + 2 + 0 + 2 + 2 + 2 + 2 + 2 + 0 + 0 + 2 + 2 + 2 + 2 + 2 + + + + + + + + + + + + + + Drivers/CMSIS + + + system_stm32f4xx.c + 1 + ../Core/Src/system_stm32f4xx.c + + + + + Middlewares/LwIP + + + auth.c + 1 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/auth.c + + + ccp.c + 1 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/ccp.c + + + chap_ms.c + 1 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/chap_ms.c + + + chap-md5.c + 1 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/chap-md5.c + + + chap-new.c + 1 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/chap-new.c + + + demand.c + 1 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/demand.c + + + eap.c + 1 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/eap.c + + + eui64.c + 1 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/eui64.c + + + fsm.c + 1 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/fsm.c + + + ipcp.c + 1 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/ipcp.c + + + ipv6cp.c + 1 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/ipv6cp.c + + + lcp.c + 1 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/lcp.c + + + magic.c + 1 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/magic.c + + + mppe.c + 1 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/mppe.c + + + multilink.c + 1 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/multilink.c + + + ppp.c + 1 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/ppp.c + + + pppapi.c + 1 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/pppapi.c + + + pppcrypt.c + 1 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/pppcrypt.c + + + pppoe.c + 1 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/pppoe.c + + + pppol2tp.c + 1 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/pppol2tp.c + + + pppos.c + 1 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/pppos.c + + + upap.c + 1 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/upap.c + + + utils.c + 1 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/utils.c + + + vj.c + 1 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/vj.c + + + bridgeif.c + 1 + ../Middlewares/Third_Party/LwIP/src/netif/bridgeif.c + + + bridgeif_fdb.c + 1 + ../Middlewares/Third_Party/LwIP/src/netif/bridgeif_fdb.c + + + ethernet.c + 1 + ../Middlewares/Third_Party/LwIP/src/netif/ethernet.c + + + lowpan6.c + 1 + ../Middlewares/Third_Party/LwIP/src/netif/lowpan6.c + + + lowpan6_ble.c + 1 + ../Middlewares/Third_Party/LwIP/src/netif/lowpan6_ble.c + + + lowpan6_common.c + 1 + ../Middlewares/Third_Party/LwIP/src/netif/lowpan6_common.c + + + slipif.c + 1 + ../Middlewares/Third_Party/LwIP/src/netif/slipif.c + + + zepif.c + 1 + ../Middlewares/Third_Party/LwIP/src/netif/zepif.c + + + ecp.c + 1 + ../Middlewares/Third_Party/LwIP/src/netif/ppp/ecp.c + + + api_lib.c + 1 + ../Middlewares/Third_Party/LwIP/src/api/api_lib.c + + + api_msg.c + 1 + ../Middlewares/Third_Party/LwIP/src/api/api_msg.c + + + err.c + 1 + ../Middlewares/Third_Party/LwIP/src/api/err.c + + + if_api.c + 1 + ../Middlewares/Third_Party/LwIP/src/api/if_api.c + + + netbuf.c + 1 + ../Middlewares/Third_Party/LwIP/src/api/netbuf.c + + + netdb.c + 1 + ../Middlewares/Third_Party/LwIP/src/api/netdb.c + + + netifapi.c + 1 + ../Middlewares/Third_Party/LwIP/src/api/netifapi.c + + + sockets.c + 1 + ../Middlewares/Third_Party/LwIP/src/api/sockets.c + + + tcpip.c + 1 + ../Middlewares/Third_Party/LwIP/src/api/tcpip.c + + + altcp.c + 1 + ../Middlewares/Third_Party/LwIP/src/core/altcp.c + + + altcp_alloc.c + 1 + ../Middlewares/Third_Party/LwIP/src/core/altcp_alloc.c + + + altcp_tcp.c + 1 + ../Middlewares/Third_Party/LwIP/src/core/altcp_tcp.c + + + def.c + 1 + ../Middlewares/Third_Party/LwIP/src/core/def.c + + + dns.c + 1 + ../Middlewares/Third_Party/LwIP/src/core/dns.c + + + inet_chksum.c + 1 + ../Middlewares/Third_Party/LwIP/src/core/inet_chksum.c + + + init.c + 1 + ../Middlewares/Third_Party/LwIP/src/core/init.c + + + ip.c + 1 + ../Middlewares/Third_Party/LwIP/src/core/ip.c + + + mem.c + 1 + ../Middlewares/Third_Party/LwIP/src/core/mem.c + + + memp.c + 1 + ../Middlewares/Third_Party/LwIP/src/core/memp.c + + + netif.c + 1 + ../Middlewares/Third_Party/LwIP/src/core/netif.c + + + pbuf.c + 1 + ../Middlewares/Third_Party/LwIP/src/core/pbuf.c + + + raw.c + 1 + ../Middlewares/Third_Party/LwIP/src/core/raw.c + + + stats.c + 1 + ../Middlewares/Third_Party/LwIP/src/core/stats.c + + + sys.c + 1 + ../Middlewares/Third_Party/LwIP/src/core/sys.c + + + tcp.c + 1 + ../Middlewares/Third_Party/LwIP/src/core/tcp.c + + + tcp_in.c + 1 + ../Middlewares/Third_Party/LwIP/src/core/tcp_in.c + + + tcp_out.c + 1 + ../Middlewares/Third_Party/LwIP/src/core/tcp_out.c + + + timeouts.c + 1 + ../Middlewares/Third_Party/LwIP/src/core/timeouts.c + + + udp.c + 1 + ../Middlewares/Third_Party/LwIP/src/core/udp.c + + + autoip.c + 1 + ../Middlewares/Third_Party/LwIP/src/core/ipv4/autoip.c + + + dhcp.c + 1 + ../Middlewares/Third_Party/LwIP/src/core/ipv4/dhcp.c + + + etharp.c + 1 + ../Middlewares/Third_Party/LwIP/src/core/ipv4/etharp.c + + + icmp.c + 1 + ../Middlewares/Third_Party/LwIP/src/core/ipv4/icmp.c + + + igmp.c + 1 + ../Middlewares/Third_Party/LwIP/src/core/ipv4/igmp.c + + + ip4.c + 1 + ../Middlewares/Third_Party/LwIP/src/core/ipv4/ip4.c + + + ip4_addr.c + 1 + ../Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_addr.c + + + ip4_frag.c + 1 + ../Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_frag.c + + + dhcp6.c + 1 + ../Middlewares/Third_Party/LwIP/src/core/ipv6/dhcp6.c + + + ethip6.c + 1 + ../Middlewares/Third_Party/LwIP/src/core/ipv6/ethip6.c + + + icmp6.c + 1 + ../Middlewares/Third_Party/LwIP/src/core/ipv6/icmp6.c + + + inet6.c + 1 + ../Middlewares/Third_Party/LwIP/src/core/ipv6/inet6.c + + + ip6.c + 1 + ../Middlewares/Third_Party/LwIP/src/core/ipv6/ip6.c + + + ip6_addr.c + 1 + ../Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_addr.c + + + ip6_frag.c + 1 + ../Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_frag.c + + + mld6.c + 1 + ../Middlewares/Third_Party/LwIP/src/core/ipv6/mld6.c + + + nd6.c + 1 + ../Middlewares/Third_Party/LwIP/src/core/ipv6/nd6.c + + + mqtt.c + 1 + ../Middlewares/Third_Party/LwIP/src/apps/mqtt/mqtt.c + + + + + User/board + + + leds.c + 1 + ..\User\board\src\leds.c + + + + + User/application + + + tcpclient.c + 1 + ..\User\application\src\tcpclient.c + + + tcpserverc.c + 1 + ..\User\application\src\tcpserverc.c + + + + + User/driver + + + dac161s997.c + 1 + ..\User\driver\dac161s997.c + + + ad7124.c + 1 + ..\User\driver\ad7124.c + + + ht1200m.c + 1 + ..\User\driver\ht1200m.c + + + + + User/system + + + user_spi.c + 1 + ..\User\system\user_spi.c + + + + + ::CMSIS + + + + + + + + + + + + + + + + + + + + + + TEST2 + 1 + + + + +
diff --git a/MDK-ARM/TEST2/TEST2.sct b/MDK-ARM/TEST2/TEST2.sct new file mode 100644 index 0000000..a335d3e --- /dev/null +++ b/MDK-ARM/TEST2/TEST2.sct @@ -0,0 +1,19 @@ +; ************************************************************* +; *** Scatter-Loading Description File generated by uVision *** +; ************************************************************* + +LR_IROM1 0x08000000 0x00100000 { ; load region size_region + ER_IROM1 0x08000000 0x00100000 { ; load address = execution address + *.o (RESET, +First) + *(InRoot$$Sections) + .ANY (+RO) + .ANY (+XO) + } + RW_IRAM1 0x20000000 0x0001C000 { ; RW data + .ANY (+RW +ZI) + } + RW_IRAM2 0x2001C000 0x00004000 { + .ANY (+RW +ZI) + } +} + diff --git a/MDK-ARM/startup_stm32f407xx.s b/MDK-ARM/startup_stm32f407xx.s new file mode 100644 index 0000000..b0722a1 --- /dev/null +++ b/MDK-ARM/startup_stm32f407xx.s @@ -0,0 +1,422 @@ +;******************************************************************************* +;* File Name : startup_stm32f407xx.s +;* Author : MCD Application Team +;* Description : STM32F407xx devices vector table for MDK-ARM toolchain. +;* This module performs: +;* - Set the initial SP +;* - Set the initial PC == Reset_Handler +;* - Set the vector table entries with the exceptions ISR address +;* - Branches to __main in the C library (which eventually +;* calls main()). +;* After Reset the CortexM4 processor is in Thread mode, +;* priority is Privileged, and the Stack is set to Main. +;******************************************************************************* +;* @attention +;* +;* Copyright (c) 2017 STMicroelectronics. +;* All rights reserved. +;* +;* This software is licensed under terms that can be found in the LICENSE file +;* in the root directory of this software component. +;* If no LICENSE file comes with this software, it is provided AS-IS. +;* +;******************************************************************************* +;* <<< Use Configuration Wizard in Context Menu >>> +; +; Amount of memory (in bytes) allocated for Stack +; Tailor this value to your application needs +; Stack Configuration +; Stack Size (in Bytes) <0x0-0xFFFFFFFF:8> +; + +Stack_Size EQU 0x400 + + AREA STACK, NOINIT, READWRITE, ALIGN=3 +Stack_Mem SPACE Stack_Size +__initial_sp + + +; Heap Configuration +; Heap Size (in Bytes) <0x0-0xFFFFFFFF:8> +; + +Heap_Size EQU 0x200 + + AREA HEAP, NOINIT, READWRITE, ALIGN=3 +__heap_base +Heap_Mem SPACE Heap_Size +__heap_limit + + PRESERVE8 + THUMB + + +; Vector Table Mapped to Address 0 at Reset + AREA RESET, DATA, READONLY + EXPORT __Vectors + EXPORT __Vectors_End + EXPORT __Vectors_Size + +__Vectors DCD __initial_sp ; Top of Stack + DCD Reset_Handler ; Reset Handler + DCD NMI_Handler ; NMI Handler + DCD HardFault_Handler ; Hard Fault Handler + DCD MemManage_Handler ; MPU Fault Handler + DCD BusFault_Handler ; Bus Fault Handler + DCD UsageFault_Handler ; Usage Fault Handler + DCD 0 ; Reserved + DCD 0 ; Reserved + DCD 0 ; Reserved + DCD 0 ; Reserved + DCD SVC_Handler ; SVCall Handler + DCD DebugMon_Handler ; Debug Monitor Handler + DCD 0 ; Reserved + DCD PendSV_Handler ; PendSV Handler + DCD SysTick_Handler ; SysTick Handler + + ; External Interrupts + DCD WWDG_IRQHandler ; Window WatchDog + DCD PVD_IRQHandler ; PVD through EXTI Line detection + DCD TAMP_STAMP_IRQHandler ; Tamper and TimeStamps through the EXTI line + DCD RTC_WKUP_IRQHandler ; RTC Wakeup through the EXTI line + DCD FLASH_IRQHandler ; FLASH + DCD RCC_IRQHandler ; RCC + DCD EXTI0_IRQHandler ; EXTI Line0 + DCD EXTI1_IRQHandler ; EXTI Line1 + DCD EXTI2_IRQHandler ; EXTI Line2 + DCD EXTI3_IRQHandler ; EXTI Line3 + DCD EXTI4_IRQHandler ; EXTI Line4 + DCD DMA1_Stream0_IRQHandler ; DMA1 Stream 0 + DCD DMA1_Stream1_IRQHandler ; DMA1 Stream 1 + DCD DMA1_Stream2_IRQHandler ; DMA1 Stream 2 + DCD DMA1_Stream3_IRQHandler ; DMA1 Stream 3 + DCD DMA1_Stream4_IRQHandler ; DMA1 Stream 4 + DCD DMA1_Stream5_IRQHandler ; DMA1 Stream 5 + DCD DMA1_Stream6_IRQHandler ; DMA1 Stream 6 + DCD ADC_IRQHandler ; ADC1, ADC2 and ADC3s + DCD CAN1_TX_IRQHandler ; CAN1 TX + DCD CAN1_RX0_IRQHandler ; CAN1 RX0 + DCD CAN1_RX1_IRQHandler ; CAN1 RX1 + DCD CAN1_SCE_IRQHandler ; CAN1 SCE + DCD EXTI9_5_IRQHandler ; External Line[9:5]s + DCD TIM1_BRK_TIM9_IRQHandler ; TIM1 Break and TIM9 + DCD TIM1_UP_TIM10_IRQHandler ; TIM1 Update and TIM10 + DCD TIM1_TRG_COM_TIM11_IRQHandler ; TIM1 Trigger and Commutation and TIM11 + DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare + DCD TIM2_IRQHandler ; TIM2 + DCD TIM3_IRQHandler ; TIM3 + DCD TIM4_IRQHandler ; TIM4 + DCD I2C1_EV_IRQHandler ; I2C1 Event + DCD I2C1_ER_IRQHandler ; I2C1 Error + DCD I2C2_EV_IRQHandler ; I2C2 Event + DCD I2C2_ER_IRQHandler ; I2C2 Error + DCD SPI1_IRQHandler ; SPI1 + DCD SPI2_IRQHandler ; SPI2 + DCD USART1_IRQHandler ; USART1 + DCD USART2_IRQHandler ; USART2 + DCD USART3_IRQHandler ; USART3 + DCD EXTI15_10_IRQHandler ; External Line[15:10]s + DCD RTC_Alarm_IRQHandler ; RTC Alarm (A and B) through EXTI Line + DCD OTG_FS_WKUP_IRQHandler ; USB OTG FS Wakeup through EXTI line + DCD TIM8_BRK_TIM12_IRQHandler ; TIM8 Break and TIM12 + DCD TIM8_UP_TIM13_IRQHandler ; TIM8 Update and TIM13 + DCD TIM8_TRG_COM_TIM14_IRQHandler ; TIM8 Trigger and Commutation and TIM14 + DCD TIM8_CC_IRQHandler ; TIM8 Capture Compare + DCD DMA1_Stream7_IRQHandler ; DMA1 Stream7 + DCD FMC_IRQHandler ; FMC + DCD SDIO_IRQHandler ; SDIO + DCD TIM5_IRQHandler ; TIM5 + DCD SPI3_IRQHandler ; SPI3 + DCD UART4_IRQHandler ; UART4 + DCD UART5_IRQHandler ; UART5 + DCD TIM6_DAC_IRQHandler ; TIM6 and DAC1&2 underrun errors + DCD TIM7_IRQHandler ; TIM7 + DCD DMA2_Stream0_IRQHandler ; DMA2 Stream 0 + DCD DMA2_Stream1_IRQHandler ; DMA2 Stream 1 + DCD DMA2_Stream2_IRQHandler ; DMA2 Stream 2 + DCD DMA2_Stream3_IRQHandler ; DMA2 Stream 3 + DCD DMA2_Stream4_IRQHandler ; DMA2 Stream 4 + DCD ETH_IRQHandler ; Ethernet + DCD ETH_WKUP_IRQHandler ; Ethernet Wakeup through EXTI line + DCD CAN2_TX_IRQHandler ; CAN2 TX + DCD CAN2_RX0_IRQHandler ; CAN2 RX0 + DCD CAN2_RX1_IRQHandler ; CAN2 RX1 + DCD CAN2_SCE_IRQHandler ; CAN2 SCE + DCD OTG_FS_IRQHandler ; USB OTG FS + DCD DMA2_Stream5_IRQHandler ; DMA2 Stream 5 + DCD DMA2_Stream6_IRQHandler ; DMA2 Stream 6 + DCD DMA2_Stream7_IRQHandler ; DMA2 Stream 7 + DCD USART6_IRQHandler ; USART6 + DCD I2C3_EV_IRQHandler ; I2C3 event + DCD I2C3_ER_IRQHandler ; I2C3 error + DCD OTG_HS_EP1_OUT_IRQHandler ; USB OTG HS End Point 1 Out + DCD OTG_HS_EP1_IN_IRQHandler ; USB OTG HS End Point 1 In + DCD OTG_HS_WKUP_IRQHandler ; USB OTG HS Wakeup through EXTI + DCD OTG_HS_IRQHandler ; USB OTG HS + DCD DCMI_IRQHandler ; DCMI + DCD 0 ; Reserved + DCD HASH_RNG_IRQHandler ; Hash and Rng + DCD FPU_IRQHandler ; FPU + + +__Vectors_End + +__Vectors_Size EQU __Vectors_End - __Vectors + + AREA |.text|, CODE, READONLY + +; Reset handler +Reset_Handler PROC + EXPORT Reset_Handler [WEAK] + IMPORT SystemInit + IMPORT __main + + LDR R0, =SystemInit + BLX R0 + LDR R0, =__main + BX R0 + ENDP + +; Dummy Exception Handlers (infinite loops which can be modified) + +NMI_Handler PROC + EXPORT NMI_Handler [WEAK] + B . + ENDP +HardFault_Handler\ + PROC + EXPORT HardFault_Handler [WEAK] + B . + ENDP +MemManage_Handler\ + PROC + EXPORT MemManage_Handler [WEAK] + B . + ENDP +BusFault_Handler\ + PROC + EXPORT BusFault_Handler [WEAK] + B . + ENDP +UsageFault_Handler\ + PROC + EXPORT UsageFault_Handler [WEAK] + B . + ENDP +SVC_Handler PROC + EXPORT SVC_Handler [WEAK] + B . + ENDP +DebugMon_Handler\ + PROC + EXPORT DebugMon_Handler [WEAK] + B . + ENDP +PendSV_Handler PROC + EXPORT PendSV_Handler [WEAK] + B . + ENDP +SysTick_Handler PROC + EXPORT SysTick_Handler [WEAK] + B . + ENDP + +Default_Handler PROC + + EXPORT WWDG_IRQHandler [WEAK] + EXPORT PVD_IRQHandler [WEAK] + EXPORT TAMP_STAMP_IRQHandler [WEAK] + EXPORT RTC_WKUP_IRQHandler [WEAK] + EXPORT FLASH_IRQHandler [WEAK] + EXPORT RCC_IRQHandler [WEAK] + EXPORT EXTI0_IRQHandler [WEAK] + EXPORT EXTI1_IRQHandler [WEAK] + EXPORT EXTI2_IRQHandler [WEAK] + EXPORT EXTI3_IRQHandler [WEAK] + EXPORT EXTI4_IRQHandler [WEAK] + EXPORT DMA1_Stream0_IRQHandler [WEAK] + EXPORT DMA1_Stream1_IRQHandler [WEAK] + EXPORT DMA1_Stream2_IRQHandler [WEAK] + EXPORT DMA1_Stream3_IRQHandler [WEAK] + EXPORT DMA1_Stream4_IRQHandler [WEAK] + EXPORT DMA1_Stream5_IRQHandler [WEAK] + EXPORT DMA1_Stream6_IRQHandler [WEAK] + EXPORT ADC_IRQHandler [WEAK] + EXPORT CAN1_TX_IRQHandler [WEAK] + EXPORT CAN1_RX0_IRQHandler [WEAK] + EXPORT CAN1_RX1_IRQHandler [WEAK] + EXPORT CAN1_SCE_IRQHandler [WEAK] + EXPORT EXTI9_5_IRQHandler [WEAK] + EXPORT TIM1_BRK_TIM9_IRQHandler [WEAK] + EXPORT TIM1_UP_TIM10_IRQHandler [WEAK] + EXPORT TIM1_TRG_COM_TIM11_IRQHandler [WEAK] + EXPORT TIM1_CC_IRQHandler [WEAK] + EXPORT TIM2_IRQHandler [WEAK] + EXPORT TIM3_IRQHandler [WEAK] + EXPORT TIM4_IRQHandler [WEAK] + EXPORT I2C1_EV_IRQHandler [WEAK] + EXPORT I2C1_ER_IRQHandler [WEAK] + EXPORT I2C2_EV_IRQHandler [WEAK] + EXPORT I2C2_ER_IRQHandler [WEAK] + EXPORT SPI1_IRQHandler [WEAK] + EXPORT SPI2_IRQHandler [WEAK] + EXPORT USART1_IRQHandler [WEAK] + EXPORT USART2_IRQHandler [WEAK] + EXPORT USART3_IRQHandler [WEAK] + EXPORT EXTI15_10_IRQHandler [WEAK] + EXPORT RTC_Alarm_IRQHandler [WEAK] + EXPORT OTG_FS_WKUP_IRQHandler [WEAK] + EXPORT TIM8_BRK_TIM12_IRQHandler [WEAK] + EXPORT TIM8_UP_TIM13_IRQHandler [WEAK] + EXPORT TIM8_TRG_COM_TIM14_IRQHandler [WEAK] + EXPORT TIM8_CC_IRQHandler [WEAK] + EXPORT DMA1_Stream7_IRQHandler [WEAK] + EXPORT FMC_IRQHandler [WEAK] + EXPORT SDIO_IRQHandler [WEAK] + EXPORT TIM5_IRQHandler [WEAK] + EXPORT SPI3_IRQHandler [WEAK] + EXPORT UART4_IRQHandler [WEAK] + EXPORT UART5_IRQHandler [WEAK] + EXPORT TIM6_DAC_IRQHandler [WEAK] + EXPORT TIM7_IRQHandler [WEAK] + EXPORT DMA2_Stream0_IRQHandler [WEAK] + EXPORT DMA2_Stream1_IRQHandler [WEAK] + EXPORT DMA2_Stream2_IRQHandler [WEAK] + EXPORT DMA2_Stream3_IRQHandler [WEAK] + EXPORT DMA2_Stream4_IRQHandler [WEAK] + EXPORT ETH_IRQHandler [WEAK] + EXPORT ETH_WKUP_IRQHandler [WEAK] + EXPORT CAN2_TX_IRQHandler [WEAK] + EXPORT CAN2_RX0_IRQHandler [WEAK] + EXPORT CAN2_RX1_IRQHandler [WEAK] + EXPORT CAN2_SCE_IRQHandler [WEAK] + EXPORT OTG_FS_IRQHandler [WEAK] + EXPORT DMA2_Stream5_IRQHandler [WEAK] + EXPORT DMA2_Stream6_IRQHandler [WEAK] + EXPORT DMA2_Stream7_IRQHandler [WEAK] + EXPORT USART6_IRQHandler [WEAK] + EXPORT I2C3_EV_IRQHandler [WEAK] + EXPORT I2C3_ER_IRQHandler [WEAK] + EXPORT OTG_HS_EP1_OUT_IRQHandler [WEAK] + EXPORT OTG_HS_EP1_IN_IRQHandler [WEAK] + EXPORT OTG_HS_WKUP_IRQHandler [WEAK] + EXPORT OTG_HS_IRQHandler [WEAK] + EXPORT DCMI_IRQHandler [WEAK] + EXPORT HASH_RNG_IRQHandler [WEAK] + EXPORT FPU_IRQHandler [WEAK] + +WWDG_IRQHandler +PVD_IRQHandler +TAMP_STAMP_IRQHandler +RTC_WKUP_IRQHandler +FLASH_IRQHandler +RCC_IRQHandler +EXTI0_IRQHandler +EXTI1_IRQHandler +EXTI2_IRQHandler +EXTI3_IRQHandler +EXTI4_IRQHandler +DMA1_Stream0_IRQHandler +DMA1_Stream1_IRQHandler +DMA1_Stream2_IRQHandler +DMA1_Stream3_IRQHandler +DMA1_Stream4_IRQHandler +DMA1_Stream5_IRQHandler +DMA1_Stream6_IRQHandler +ADC_IRQHandler +CAN1_TX_IRQHandler +CAN1_RX0_IRQHandler +CAN1_RX1_IRQHandler +CAN1_SCE_IRQHandler +EXTI9_5_IRQHandler +TIM1_BRK_TIM9_IRQHandler +TIM1_UP_TIM10_IRQHandler +TIM1_TRG_COM_TIM11_IRQHandler +TIM1_CC_IRQHandler +TIM2_IRQHandler +TIM3_IRQHandler +TIM4_IRQHandler +I2C1_EV_IRQHandler +I2C1_ER_IRQHandler +I2C2_EV_IRQHandler +I2C2_ER_IRQHandler +SPI1_IRQHandler +SPI2_IRQHandler +USART1_IRQHandler +USART2_IRQHandler +USART3_IRQHandler +EXTI15_10_IRQHandler +RTC_Alarm_IRQHandler +OTG_FS_WKUP_IRQHandler +TIM8_BRK_TIM12_IRQHandler +TIM8_UP_TIM13_IRQHandler +TIM8_TRG_COM_TIM14_IRQHandler +TIM8_CC_IRQHandler +DMA1_Stream7_IRQHandler +FMC_IRQHandler +SDIO_IRQHandler +TIM5_IRQHandler +SPI3_IRQHandler +UART4_IRQHandler +UART5_IRQHandler +TIM6_DAC_IRQHandler +TIM7_IRQHandler +DMA2_Stream0_IRQHandler +DMA2_Stream1_IRQHandler +DMA2_Stream2_IRQHandler +DMA2_Stream3_IRQHandler +DMA2_Stream4_IRQHandler +ETH_IRQHandler +ETH_WKUP_IRQHandler +CAN2_TX_IRQHandler +CAN2_RX0_IRQHandler +CAN2_RX1_IRQHandler +CAN2_SCE_IRQHandler +OTG_FS_IRQHandler +DMA2_Stream5_IRQHandler +DMA2_Stream6_IRQHandler +DMA2_Stream7_IRQHandler +USART6_IRQHandler +I2C3_EV_IRQHandler +I2C3_ER_IRQHandler +OTG_HS_EP1_OUT_IRQHandler +OTG_HS_EP1_IN_IRQHandler +OTG_HS_WKUP_IRQHandler +OTG_HS_IRQHandler +DCMI_IRQHandler +HASH_RNG_IRQHandler +FPU_IRQHandler + + B . + + ENDP + + ALIGN + +;******************************************************************************* +; User Stack and Heap initialization +;******************************************************************************* + IF :DEF:__MICROLIB + + EXPORT __initial_sp + EXPORT __heap_base + EXPORT __heap_limit + + ELSE + + IMPORT __use_two_region_memory + EXPORT __user_initial_stackheap + +__user_initial_stackheap + + LDR R0, = Heap_Mem + LDR R1, =(Stack_Mem + Stack_Size) + LDR R2, = (Heap_Mem + Heap_Size) + LDR R3, = Stack_Mem + BX LR + + ALIGN + + ENDIF + + END diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..a585c2f --- /dev/null +++ b/Makefile @@ -0,0 +1,18 @@ +current_time=$(shell date +%Y%m%d%H%M) +# 获得当前git的用户邮箱 +current_user=$(shell git config user.email) + +.PHONY:clean cc + +all: clean + +cc: + git rm -r --cached . + git add . + git commit -m "$(current_user) batch push $(current_time)" + git push origin develop + +clean: + cmd /c keilkill.bat + + diff --git a/Middlewares/Third_Party/LwIP/src/api/api_lib.c b/Middlewares/Third_Party/LwIP/src/api/api_lib.c new file mode 100644 index 0000000..e03b8b7 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/api/api_lib.c @@ -0,0 +1,1367 @@ +/** + * @file + * Sequential API External module + * + * @defgroup netconn Netconn API + * @ingroup sequential_api + * Thread-safe, to be called from non-TCPIP threads only. + * TX/RX handling based on @ref netbuf (containing @ref pbuf) + * to avoid copying data around. + * + * @defgroup netconn_common Common functions + * @ingroup netconn + * For use with TCP and UDP + * + * @defgroup netconn_tcp TCP only + * @ingroup netconn + * TCP only functions + * + * @defgroup netconn_udp UDP only + * @ingroup netconn + * UDP only functions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + */ + +/* This is the part of the API that is linked with + the application */ + +#include "lwip/opt.h" + +#if LWIP_NETCONN /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/api.h" +#include "lwip/memp.h" + +#include "lwip/ip.h" +#include "lwip/raw.h" +#include "lwip/udp.h" +#include "lwip/priv/api_msg.h" +#include "lwip/priv/tcp_priv.h" +#include "lwip/priv/tcpip_priv.h" + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +#include + +#define API_MSG_VAR_REF(name) API_VAR_REF(name) +#define API_MSG_VAR_DECLARE(name) API_VAR_DECLARE(struct api_msg, name) +#define API_MSG_VAR_ALLOC(name) API_VAR_ALLOC(struct api_msg, MEMP_API_MSG, name, ERR_MEM) +#define API_MSG_VAR_ALLOC_RETURN_NULL(name) API_VAR_ALLOC(struct api_msg, MEMP_API_MSG, name, NULL) +#define API_MSG_VAR_FREE(name) API_VAR_FREE(MEMP_API_MSG, name) + +#if TCP_LISTEN_BACKLOG +/* need to allocate API message for accept so empty message pool does not result in event loss + * see bug #47512: MPU_COMPATIBLE may fail on empty pool */ +#define API_MSG_VAR_ALLOC_ACCEPT(msg) API_MSG_VAR_ALLOC(msg) +#define API_MSG_VAR_FREE_ACCEPT(msg) API_MSG_VAR_FREE(msg) +#else /* TCP_LISTEN_BACKLOG */ +#define API_MSG_VAR_ALLOC_ACCEPT(msg) +#define API_MSG_VAR_FREE_ACCEPT(msg) +#endif /* TCP_LISTEN_BACKLOG */ + +#if LWIP_NETCONN_FULLDUPLEX +#define NETCONN_RECVMBOX_WAITABLE(conn) (sys_mbox_valid(&(conn)->recvmbox) && (((conn)->flags & NETCONN_FLAG_MBOXINVALID) == 0)) +#define NETCONN_ACCEPTMBOX_WAITABLE(conn) (sys_mbox_valid(&(conn)->acceptmbox) && (((conn)->flags & (NETCONN_FLAG_MBOXCLOSED|NETCONN_FLAG_MBOXINVALID)) == 0)) +#define NETCONN_MBOX_WAITING_INC(conn) SYS_ARCH_INC(conn->mbox_threads_waiting, 1) +#define NETCONN_MBOX_WAITING_DEC(conn) SYS_ARCH_DEC(conn->mbox_threads_waiting, 1) +#else /* LWIP_NETCONN_FULLDUPLEX */ +#define NETCONN_RECVMBOX_WAITABLE(conn) sys_mbox_valid(&(conn)->recvmbox) +#define NETCONN_ACCEPTMBOX_WAITABLE(conn) (sys_mbox_valid(&(conn)->acceptmbox) && (((conn)->flags & NETCONN_FLAG_MBOXCLOSED) == 0)) +#define NETCONN_MBOX_WAITING_INC(conn) +#define NETCONN_MBOX_WAITING_DEC(conn) +#endif /* LWIP_NETCONN_FULLDUPLEX */ + +static err_t netconn_close_shutdown(struct netconn *conn, u8_t how); + +/** + * Call the lower part of a netconn_* function + * This function is then running in the thread context + * of tcpip_thread and has exclusive access to lwIP core code. + * + * @param fn function to call + * @param apimsg a struct containing the function to call and its parameters + * @return ERR_OK if the function was called, another err_t if not + */ +static err_t +netconn_apimsg(tcpip_callback_fn fn, struct api_msg *apimsg) +{ + err_t err; + +#ifdef LWIP_DEBUG + /* catch functions that don't set err */ + apimsg->err = ERR_VAL; +#endif /* LWIP_DEBUG */ + +#if LWIP_NETCONN_SEM_PER_THREAD + apimsg->op_completed_sem = LWIP_NETCONN_THREAD_SEM_GET(); +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + + err = tcpip_send_msg_wait_sem(fn, apimsg, LWIP_API_MSG_SEM(apimsg)); + if (err == ERR_OK) { + return apimsg->err; + } + return err; +} + +/** + * Create a new netconn (of a specific type) that has a callback function. + * The corresponding pcb is also created. + * + * @param t the type of 'connection' to create (@see enum netconn_type) + * @param proto the IP protocol for RAW IP pcbs + * @param callback a function to call on status changes (RX available, TX'ed) + * @return a newly allocated struct netconn or + * NULL on memory error + */ +struct netconn * +netconn_new_with_proto_and_callback(enum netconn_type t, u8_t proto, netconn_callback callback) +{ + struct netconn *conn; + API_MSG_VAR_DECLARE(msg); + API_MSG_VAR_ALLOC_RETURN_NULL(msg); + + conn = netconn_alloc(t, callback); + if (conn != NULL) { + err_t err; + + API_MSG_VAR_REF(msg).msg.n.proto = proto; + API_MSG_VAR_REF(msg).conn = conn; + err = netconn_apimsg(lwip_netconn_do_newconn, &API_MSG_VAR_REF(msg)); + if (err != ERR_OK) { + LWIP_ASSERT("freeing conn without freeing pcb", conn->pcb.tcp == NULL); + LWIP_ASSERT("conn has no recvmbox", sys_mbox_valid(&conn->recvmbox)); +#if LWIP_TCP + LWIP_ASSERT("conn->acceptmbox shouldn't exist", !sys_mbox_valid(&conn->acceptmbox)); +#endif /* LWIP_TCP */ +#if !LWIP_NETCONN_SEM_PER_THREAD + LWIP_ASSERT("conn has no op_completed", sys_sem_valid(&conn->op_completed)); + sys_sem_free(&conn->op_completed); +#endif /* !LWIP_NETCONN_SEM_PER_THREAD */ + sys_mbox_free(&conn->recvmbox); + memp_free(MEMP_NETCONN, conn); + API_MSG_VAR_FREE(msg); + return NULL; + } + } + API_MSG_VAR_FREE(msg); + return conn; +} + +/** + * @ingroup netconn_common + * Close a netconn 'connection' and free all its resources but not the netconn itself. + * UDP and RAW connection are completely closed, TCP pcbs might still be in a waitstate + * after this returns. + * + * @param conn the netconn to delete + * @return ERR_OK if the connection was deleted + */ +err_t +netconn_prepare_delete(struct netconn *conn) +{ + err_t err; + API_MSG_VAR_DECLARE(msg); + + /* No ASSERT here because possible to get a (conn == NULL) if we got an accept error */ + if (conn == NULL) { + return ERR_OK; + } + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; +#if LWIP_SO_SNDTIMEO || LWIP_SO_LINGER + /* get the time we started, which is later compared to + sys_now() + conn->send_timeout */ + API_MSG_VAR_REF(msg).msg.sd.time_started = sys_now(); +#else /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ +#if LWIP_TCP + API_MSG_VAR_REF(msg).msg.sd.polls_left = + ((LWIP_TCP_CLOSE_TIMEOUT_MS_DEFAULT + TCP_SLOW_INTERVAL - 1) / TCP_SLOW_INTERVAL) + 1; +#endif /* LWIP_TCP */ +#endif /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ + err = netconn_apimsg(lwip_netconn_do_delconn, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + if (err != ERR_OK) { + return err; + } + return ERR_OK; +} + +/** + * @ingroup netconn_common + * Close a netconn 'connection' and free its resources. + * UDP and RAW connection are completely closed, TCP pcbs might still be in a waitstate + * after this returns. + * + * @param conn the netconn to delete + * @return ERR_OK if the connection was deleted + */ +err_t +netconn_delete(struct netconn *conn) +{ + err_t err; + + /* No ASSERT here because possible to get a (conn == NULL) if we got an accept error */ + if (conn == NULL) { + return ERR_OK; + } + +#if LWIP_NETCONN_FULLDUPLEX + if (conn->flags & NETCONN_FLAG_MBOXINVALID) { + /* Already called netconn_prepare_delete() before */ + err = ERR_OK; + } else +#endif /* LWIP_NETCONN_FULLDUPLEX */ + { + err = netconn_prepare_delete(conn); + } + if (err == ERR_OK) { + netconn_free(conn); + } + return err; +} + +/** + * Get the local or remote IP address and port of a netconn. + * For RAW netconns, this returns the protocol instead of a port! + * + * @param conn the netconn to query + * @param addr a pointer to which to save the IP address + * @param port a pointer to which to save the port (or protocol for RAW) + * @param local 1 to get the local IP address, 0 to get the remote one + * @return ERR_CONN for invalid connections + * ERR_OK if the information was retrieved + */ +err_t +netconn_getaddr(struct netconn *conn, ip_addr_t *addr, u16_t *port, u8_t local) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_getaddr: invalid conn", (conn != NULL), return ERR_ARG;); + LWIP_ERROR("netconn_getaddr: invalid addr", (addr != NULL), return ERR_ARG;); + LWIP_ERROR("netconn_getaddr: invalid port", (port != NULL), return ERR_ARG;); + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.ad.local = local; +#if LWIP_MPU_COMPATIBLE + err = netconn_apimsg(lwip_netconn_do_getaddr, &API_MSG_VAR_REF(msg)); + *addr = msg->msg.ad.ipaddr; + *port = msg->msg.ad.port; +#else /* LWIP_MPU_COMPATIBLE */ + msg.msg.ad.ipaddr = addr; + msg.msg.ad.port = port; + err = netconn_apimsg(lwip_netconn_do_getaddr, &msg); +#endif /* LWIP_MPU_COMPATIBLE */ + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_common + * Bind a netconn to a specific local IP address and port. + * Binding one netconn twice might not always be checked correctly! + * + * @param conn the netconn to bind + * @param addr the local IP address to bind the netconn to + * (use IP4_ADDR_ANY/IP6_ADDR_ANY to bind to all addresses) + * @param port the local port to bind the netconn to (not used for RAW) + * @return ERR_OK if bound, any other err_t on failure + */ +err_t +netconn_bind(struct netconn *conn, const ip_addr_t *addr, u16_t port) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_bind: invalid conn", (conn != NULL), return ERR_ARG;); + +#if LWIP_IPV4 + /* Don't propagate NULL pointer (IP_ADDR_ANY alias) to subsequent functions */ + if (addr == NULL) { + addr = IP4_ADDR_ANY; + } +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV4 && LWIP_IPV6 + /* "Socket API like" dual-stack support: If IP to bind to is IP6_ADDR_ANY, + * and NETCONN_FLAG_IPV6_V6ONLY is 0, use IP_ANY_TYPE to bind + */ + if ((netconn_get_ipv6only(conn) == 0) && + ip_addr_cmp(addr, IP6_ADDR_ANY)) { + addr = IP_ANY_TYPE; + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.bc.ipaddr = API_MSG_VAR_REF(addr); + API_MSG_VAR_REF(msg).msg.bc.port = port; + err = netconn_apimsg(lwip_netconn_do_bind, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_common + * Bind a netconn to a specific interface and port. + * Binding one netconn twice might not always be checked correctly! + * + * @param conn the netconn to bind + * @param if_idx the local interface index to bind the netconn to + * @return ERR_OK if bound, any other err_t on failure + */ +err_t +netconn_bind_if(struct netconn *conn, u8_t if_idx) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_bind_if: invalid conn", (conn != NULL), return ERR_ARG;); + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.bc.if_idx = if_idx; + err = netconn_apimsg(lwip_netconn_do_bind_if, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_common + * Connect a netconn to a specific remote IP address and port. + * + * @param conn the netconn to connect + * @param addr the remote IP address to connect to + * @param port the remote port to connect to (no used for RAW) + * @return ERR_OK if connected, return value of tcp_/udp_/raw_connect otherwise + */ +err_t +netconn_connect(struct netconn *conn, const ip_addr_t *addr, u16_t port) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_connect: invalid conn", (conn != NULL), return ERR_ARG;); + +#if LWIP_IPV4 + /* Don't propagate NULL pointer (IP_ADDR_ANY alias) to subsequent functions */ + if (addr == NULL) { + addr = IP4_ADDR_ANY; + } +#endif /* LWIP_IPV4 */ + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.bc.ipaddr = API_MSG_VAR_REF(addr); + API_MSG_VAR_REF(msg).msg.bc.port = port; + err = netconn_apimsg(lwip_netconn_do_connect, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_udp + * Disconnect a netconn from its current peer (only valid for UDP netconns). + * + * @param conn the netconn to disconnect + * @return See @ref err_t + */ +err_t +netconn_disconnect(struct netconn *conn) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_disconnect: invalid conn", (conn != NULL), return ERR_ARG;); + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; + err = netconn_apimsg(lwip_netconn_do_disconnect, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_tcp + * Set a TCP netconn into listen mode + * + * @param conn the tcp netconn to set to listen mode + * @param backlog the listen backlog, only used if TCP_LISTEN_BACKLOG==1 + * @return ERR_OK if the netconn was set to listen (UDP and RAW netconns + * don't return any error (yet?)) + */ +err_t +netconn_listen_with_backlog(struct netconn *conn, u8_t backlog) +{ +#if LWIP_TCP + API_MSG_VAR_DECLARE(msg); + err_t err; + + /* This does no harm. If TCP_LISTEN_BACKLOG is off, backlog is unused. */ + LWIP_UNUSED_ARG(backlog); + + LWIP_ERROR("netconn_listen: invalid conn", (conn != NULL), return ERR_ARG;); + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; +#if TCP_LISTEN_BACKLOG + API_MSG_VAR_REF(msg).msg.lb.backlog = backlog; +#endif /* TCP_LISTEN_BACKLOG */ + err = netconn_apimsg(lwip_netconn_do_listen, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +#else /* LWIP_TCP */ + LWIP_UNUSED_ARG(conn); + LWIP_UNUSED_ARG(backlog); + return ERR_ARG; +#endif /* LWIP_TCP */ +} + +/** + * @ingroup netconn_tcp + * Accept a new connection on a TCP listening netconn. + * + * @param conn the TCP listen netconn + * @param new_conn pointer where the new connection is stored + * @return ERR_OK if a new connection has been received or an error + * code otherwise + */ +err_t +netconn_accept(struct netconn *conn, struct netconn **new_conn) +{ +#if LWIP_TCP + err_t err; + void *accept_ptr; + struct netconn *newconn; +#if TCP_LISTEN_BACKLOG + API_MSG_VAR_DECLARE(msg); +#endif /* TCP_LISTEN_BACKLOG */ + + LWIP_ERROR("netconn_accept: invalid pointer", (new_conn != NULL), return ERR_ARG;); + *new_conn = NULL; + LWIP_ERROR("netconn_accept: invalid conn", (conn != NULL), return ERR_ARG;); + + /* NOTE: Although the opengroup spec says a pending error shall be returned to + send/recv/getsockopt(SO_ERROR) only, we return it for listening + connections also, to handle embedded-system errors */ + err = netconn_err(conn); + if (err != ERR_OK) { + /* return pending error */ + return err; + } + if (!NETCONN_ACCEPTMBOX_WAITABLE(conn)) { + /* don't accept if closed: this might block the application task + waiting on acceptmbox forever! */ + return ERR_CLSD; + } + + API_MSG_VAR_ALLOC_ACCEPT(msg); + + NETCONN_MBOX_WAITING_INC(conn); + if (netconn_is_nonblocking(conn)) { + if (sys_arch_mbox_tryfetch(&conn->acceptmbox, &accept_ptr) == SYS_ARCH_TIMEOUT) { + API_MSG_VAR_FREE_ACCEPT(msg); + NETCONN_MBOX_WAITING_DEC(conn); + return ERR_WOULDBLOCK; + } + } else { +#if LWIP_SO_RCVTIMEO + if (sys_arch_mbox_fetch(&conn->acceptmbox, &accept_ptr, conn->recv_timeout) == SYS_ARCH_TIMEOUT) { + API_MSG_VAR_FREE_ACCEPT(msg); + NETCONN_MBOX_WAITING_DEC(conn); + return ERR_TIMEOUT; + } +#else + sys_arch_mbox_fetch(&conn->acceptmbox, &accept_ptr, 0); +#endif /* LWIP_SO_RCVTIMEO*/ + } + NETCONN_MBOX_WAITING_DEC(conn); +#if LWIP_NETCONN_FULLDUPLEX + if (conn->flags & NETCONN_FLAG_MBOXINVALID) { + if (lwip_netconn_is_deallocated_msg(accept_ptr)) { + /* the netconn has been closed from another thread */ + API_MSG_VAR_FREE_ACCEPT(msg); + return ERR_CONN; + } + } +#endif + + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVMINUS, 0); + + if (lwip_netconn_is_err_msg(accept_ptr, &err)) { + /* a connection has been aborted: e.g. out of pcbs or out of netconns during accept */ + API_MSG_VAR_FREE_ACCEPT(msg); + return err; + } + if (accept_ptr == NULL) { + /* connection has been aborted */ + API_MSG_VAR_FREE_ACCEPT(msg); + return ERR_CLSD; + } + newconn = (struct netconn *)accept_ptr; +#if TCP_LISTEN_BACKLOG + /* Let the stack know that we have accepted the connection. */ + API_MSG_VAR_REF(msg).conn = newconn; + /* don't care for the return value of lwip_netconn_do_recv */ + netconn_apimsg(lwip_netconn_do_accepted, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); +#endif /* TCP_LISTEN_BACKLOG */ + + *new_conn = newconn; + /* don't set conn->last_err: it's only ERR_OK, anyway */ + return ERR_OK; +#else /* LWIP_TCP */ + LWIP_UNUSED_ARG(conn); + LWIP_UNUSED_ARG(new_conn); + return ERR_ARG; +#endif /* LWIP_TCP */ +} + +/** + * @ingroup netconn_common + * Receive data: actual implementation that doesn't care whether pbuf or netbuf + * is received (this is internal, it's just here for describing common errors) + * + * @param conn the netconn from which to receive data + * @param new_buf pointer where a new pbuf/netbuf is stored when received data + * @param apiflags flags that control function behaviour. For now only: + * - NETCONN_DONTBLOCK: only read data that is available now, don't wait for more data + * @return ERR_OK if data has been received, an error code otherwise (timeout, + * memory error or another error) + * ERR_CONN if not connected + * ERR_CLSD if TCP connection has been closed + * ERR_WOULDBLOCK if the netconn is nonblocking but would block to wait for data + * ERR_TIMEOUT if the netconn has a receive timeout and no data was received + */ +static err_t +netconn_recv_data(struct netconn *conn, void **new_buf, u8_t apiflags) +{ + void *buf = NULL; + u16_t len; + + LWIP_ERROR("netconn_recv: invalid pointer", (new_buf != NULL), return ERR_ARG;); + *new_buf = NULL; + LWIP_ERROR("netconn_recv: invalid conn", (conn != NULL), return ERR_ARG;); + + if (!NETCONN_RECVMBOX_WAITABLE(conn)) { + err_t err = netconn_err(conn); + if (err != ERR_OK) { + /* return pending error */ + return err; + } + return ERR_CONN; + } + + NETCONN_MBOX_WAITING_INC(conn); + if (netconn_is_nonblocking(conn) || (apiflags & NETCONN_DONTBLOCK) || + (conn->flags & NETCONN_FLAG_MBOXCLOSED) || (conn->pending_err != ERR_OK)) { + if (sys_arch_mbox_tryfetch(&conn->recvmbox, &buf) == SYS_ARCH_TIMEOUT) { + err_t err; + NETCONN_MBOX_WAITING_DEC(conn); + err = netconn_err(conn); + if (err != ERR_OK) { + /* return pending error */ + return err; + } + if (conn->flags & NETCONN_FLAG_MBOXCLOSED) { + return ERR_CONN; + } + return ERR_WOULDBLOCK; + } + } else { +#if LWIP_SO_RCVTIMEO + if (sys_arch_mbox_fetch(&conn->recvmbox, &buf, conn->recv_timeout) == SYS_ARCH_TIMEOUT) { + NETCONN_MBOX_WAITING_DEC(conn); + return ERR_TIMEOUT; + } +#else + sys_arch_mbox_fetch(&conn->recvmbox, &buf, 0); +#endif /* LWIP_SO_RCVTIMEO*/ + } + NETCONN_MBOX_WAITING_DEC(conn); +#if LWIP_NETCONN_FULLDUPLEX + if (conn->flags & NETCONN_FLAG_MBOXINVALID) { + if (lwip_netconn_is_deallocated_msg(buf)) { + /* the netconn has been closed from another thread */ + API_MSG_VAR_FREE_ACCEPT(msg); + return ERR_CONN; + } + } +#endif + +#if LWIP_TCP +#if (LWIP_UDP || LWIP_RAW) + if (NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP) +#endif /* (LWIP_UDP || LWIP_RAW) */ + { + err_t err; + /* Check if this is an error message or a pbuf */ + if (lwip_netconn_is_err_msg(buf, &err)) { + /* new_buf has been zeroed above already */ + if (err == ERR_CLSD) { + /* connection closed translates to ERR_OK with *new_buf == NULL */ + return ERR_OK; + } + return err; + } + len = ((struct pbuf *)buf)->tot_len; + } +#endif /* LWIP_TCP */ +#if LWIP_TCP && (LWIP_UDP || LWIP_RAW) + else +#endif /* LWIP_TCP && (LWIP_UDP || LWIP_RAW) */ +#if (LWIP_UDP || LWIP_RAW) + { + LWIP_ASSERT("buf != NULL", buf != NULL); + len = netbuf_len((struct netbuf *)buf); + } +#endif /* (LWIP_UDP || LWIP_RAW) */ + +#if LWIP_SO_RCVBUF + SYS_ARCH_DEC(conn->recv_avail, len); +#endif /* LWIP_SO_RCVBUF */ + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVMINUS, len); + + LWIP_DEBUGF(API_LIB_DEBUG, ("netconn_recv_data: received %p, len=%"U16_F"\n", buf, len)); + + *new_buf = buf; + /* don't set conn->last_err: it's only ERR_OK, anyway */ + return ERR_OK; +} + +#if LWIP_TCP +static err_t +netconn_tcp_recvd_msg(struct netconn *conn, size_t len, struct api_msg *msg) +{ + LWIP_ERROR("netconn_recv_tcp_pbuf: invalid conn", (conn != NULL) && + NETCONNTYPE_GROUP(netconn_type(conn)) == NETCONN_TCP, return ERR_ARG;); + + msg->conn = conn; + msg->msg.r.len = len; + + return netconn_apimsg(lwip_netconn_do_recv, msg); +} + +err_t +netconn_tcp_recvd(struct netconn *conn, size_t len) +{ + err_t err; + API_MSG_VAR_DECLARE(msg); + LWIP_ERROR("netconn_recv_tcp_pbuf: invalid conn", (conn != NULL) && + NETCONNTYPE_GROUP(netconn_type(conn)) == NETCONN_TCP, return ERR_ARG;); + + API_MSG_VAR_ALLOC(msg); + err = netconn_tcp_recvd_msg(conn, len, &API_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + return err; +} + +static err_t +netconn_recv_data_tcp(struct netconn *conn, struct pbuf **new_buf, u8_t apiflags) +{ + err_t err; + struct pbuf *buf; + API_MSG_VAR_DECLARE(msg); +#if LWIP_MPU_COMPATIBLE + msg = NULL; +#endif + + if (!NETCONN_RECVMBOX_WAITABLE(conn)) { + /* This only happens when calling this function more than once *after* receiving FIN */ + return ERR_CONN; + } + if (netconn_is_flag_set(conn, NETCONN_FIN_RX_PENDING)) { + netconn_clear_flags(conn, NETCONN_FIN_RX_PENDING); + goto handle_fin; + } + + if (!(apiflags & NETCONN_NOAUTORCVD)) { + /* need to allocate API message here so empty message pool does not result in event loss + * see bug #47512: MPU_COMPATIBLE may fail on empty pool */ + API_MSG_VAR_ALLOC(msg); + } + + err = netconn_recv_data(conn, (void **)new_buf, apiflags); + if (err != ERR_OK) { + if (!(apiflags & NETCONN_NOAUTORCVD)) { + API_MSG_VAR_FREE(msg); + } + return err; + } + buf = *new_buf; + if (!(apiflags & NETCONN_NOAUTORCVD)) { + /* Let the stack know that we have taken the data. */ + u16_t len = buf ? buf->tot_len : 1; + /* don't care for the return value of lwip_netconn_do_recv */ + /* @todo: this should really be fixed, e.g. by retrying in poll on error */ + netconn_tcp_recvd_msg(conn, len, &API_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + } + + /* If we are closed, we indicate that we no longer wish to use the socket */ + if (buf == NULL) { + if (apiflags & NETCONN_NOFIN) { + /* received a FIN but the caller cannot handle it right now: + re-enqueue it and return "no data" */ + netconn_set_flags(conn, NETCONN_FIN_RX_PENDING); + return ERR_WOULDBLOCK; + } else { +handle_fin: + API_EVENT(conn, NETCONN_EVT_RCVMINUS, 0); + if (conn->pcb.ip == NULL) { + /* race condition: RST during recv */ + err = netconn_err(conn); + if (err != ERR_OK) { + return err; + } + return ERR_RST; + } + /* RX side is closed, so deallocate the recvmbox */ + netconn_close_shutdown(conn, NETCONN_SHUT_RD); + /* Don' store ERR_CLSD as conn->err since we are only half-closed */ + return ERR_CLSD; + } + } + return err; +} + +/** + * @ingroup netconn_tcp + * Receive data (in form of a pbuf) from a TCP netconn + * + * @param conn the netconn from which to receive data + * @param new_buf pointer where a new pbuf is stored when received data + * @return ERR_OK if data has been received, an error code otherwise (timeout, + * memory error or another error, @see netconn_recv_data) + * ERR_ARG if conn is not a TCP netconn + */ +err_t +netconn_recv_tcp_pbuf(struct netconn *conn, struct pbuf **new_buf) +{ + LWIP_ERROR("netconn_recv_tcp_pbuf: invalid conn", (conn != NULL) && + NETCONNTYPE_GROUP(netconn_type(conn)) == NETCONN_TCP, return ERR_ARG;); + + return netconn_recv_data_tcp(conn, new_buf, 0); +} + +/** + * @ingroup netconn_tcp + * Receive data (in form of a pbuf) from a TCP netconn + * + * @param conn the netconn from which to receive data + * @param new_buf pointer where a new pbuf is stored when received data + * @param apiflags flags that control function behaviour. For now only: + * - NETCONN_DONTBLOCK: only read data that is available now, don't wait for more data + * @return ERR_OK if data has been received, an error code otherwise (timeout, + * memory error or another error, @see netconn_recv_data) + * ERR_ARG if conn is not a TCP netconn + */ +err_t +netconn_recv_tcp_pbuf_flags(struct netconn *conn, struct pbuf **new_buf, u8_t apiflags) +{ + LWIP_ERROR("netconn_recv_tcp_pbuf: invalid conn", (conn != NULL) && + NETCONNTYPE_GROUP(netconn_type(conn)) == NETCONN_TCP, return ERR_ARG;); + + return netconn_recv_data_tcp(conn, new_buf, apiflags); +} +#endif /* LWIP_TCP */ + +/** + * Receive data (in form of a netbuf) from a UDP or RAW netconn + * + * @param conn the netconn from which to receive data + * @param new_buf pointer where a new netbuf is stored when received data + * @return ERR_OK if data has been received, an error code otherwise (timeout, + * memory error or another error) + * ERR_ARG if conn is not a UDP/RAW netconn + */ +err_t +netconn_recv_udp_raw_netbuf(struct netconn *conn, struct netbuf **new_buf) +{ + LWIP_ERROR("netconn_recv_udp_raw_netbuf: invalid conn", (conn != NULL) && + NETCONNTYPE_GROUP(netconn_type(conn)) != NETCONN_TCP, return ERR_ARG;); + + return netconn_recv_data(conn, (void **)new_buf, 0); +} + +/** + * Receive data (in form of a netbuf) from a UDP or RAW netconn + * + * @param conn the netconn from which to receive data + * @param new_buf pointer where a new netbuf is stored when received data + * @param apiflags flags that control function behaviour. For now only: + * - NETCONN_DONTBLOCK: only read data that is available now, don't wait for more data + * @return ERR_OK if data has been received, an error code otherwise (timeout, + * memory error or another error) + * ERR_ARG if conn is not a UDP/RAW netconn + */ +err_t +netconn_recv_udp_raw_netbuf_flags(struct netconn *conn, struct netbuf **new_buf, u8_t apiflags) +{ + LWIP_ERROR("netconn_recv_udp_raw_netbuf: invalid conn", (conn != NULL) && + NETCONNTYPE_GROUP(netconn_type(conn)) != NETCONN_TCP, return ERR_ARG;); + + return netconn_recv_data(conn, (void **)new_buf, apiflags); +} + +/** + * @ingroup netconn_common + * Receive data (in form of a netbuf containing a packet buffer) from a netconn + * + * @param conn the netconn from which to receive data + * @param new_buf pointer where a new netbuf is stored when received data + * @return ERR_OK if data has been received, an error code otherwise (timeout, + * memory error or another error) + */ +err_t +netconn_recv(struct netconn *conn, struct netbuf **new_buf) +{ +#if LWIP_TCP + struct netbuf *buf = NULL; + err_t err; +#endif /* LWIP_TCP */ + + LWIP_ERROR("netconn_recv: invalid pointer", (new_buf != NULL), return ERR_ARG;); + *new_buf = NULL; + LWIP_ERROR("netconn_recv: invalid conn", (conn != NULL), return ERR_ARG;); + +#if LWIP_TCP +#if (LWIP_UDP || LWIP_RAW) + if (NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP) +#endif /* (LWIP_UDP || LWIP_RAW) */ + { + struct pbuf *p = NULL; + /* This is not a listening netconn, since recvmbox is set */ + + buf = (struct netbuf *)memp_malloc(MEMP_NETBUF); + if (buf == NULL) { + return ERR_MEM; + } + + err = netconn_recv_data_tcp(conn, &p, 0); + if (err != ERR_OK) { + memp_free(MEMP_NETBUF, buf); + return err; + } + LWIP_ASSERT("p != NULL", p != NULL); + + buf->p = p; + buf->ptr = p; + buf->port = 0; + ip_addr_set_zero(&buf->addr); + *new_buf = buf; + /* don't set conn->last_err: it's only ERR_OK, anyway */ + return ERR_OK; + } +#endif /* LWIP_TCP */ +#if LWIP_TCP && (LWIP_UDP || LWIP_RAW) + else +#endif /* LWIP_TCP && (LWIP_UDP || LWIP_RAW) */ + { +#if (LWIP_UDP || LWIP_RAW) + return netconn_recv_data(conn, (void **)new_buf, 0); +#endif /* (LWIP_UDP || LWIP_RAW) */ + } +} + +/** + * @ingroup netconn_udp + * Send data (in form of a netbuf) to a specific remote IP address and port. + * Only to be used for UDP and RAW netconns (not TCP). + * + * @param conn the netconn over which to send data + * @param buf a netbuf containing the data to send + * @param addr the remote IP address to which to send the data + * @param port the remote port to which to send the data + * @return ERR_OK if data was sent, any other err_t on error + */ +err_t +netconn_sendto(struct netconn *conn, struct netbuf *buf, const ip_addr_t *addr, u16_t port) +{ + if (buf != NULL) { + ip_addr_set(&buf->addr, addr); + buf->port = port; + return netconn_send(conn, buf); + } + return ERR_VAL; +} + +/** + * @ingroup netconn_udp + * Send data over a UDP or RAW netconn (that is already connected). + * + * @param conn the UDP or RAW netconn over which to send data + * @param buf a netbuf containing the data to send + * @return ERR_OK if data was sent, any other err_t on error + */ +err_t +netconn_send(struct netconn *conn, struct netbuf *buf) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_send: invalid conn", (conn != NULL), return ERR_ARG;); + + LWIP_DEBUGF(API_LIB_DEBUG, ("netconn_send: sending %"U16_F" bytes\n", buf->p->tot_len)); + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.b = buf; + err = netconn_apimsg(lwip_netconn_do_send, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_tcp + * Send data over a TCP netconn. + * + * @param conn the TCP netconn over which to send data + * @param dataptr pointer to the application buffer that contains the data to send + * @param size size of the application data to send + * @param apiflags combination of following flags : + * - NETCONN_COPY: data will be copied into memory belonging to the stack + * - NETCONN_MORE: for TCP connection, PSH flag will be set on last segment sent + * - NETCONN_DONTBLOCK: only write the data if all data can be written at once + * @param bytes_written pointer to a location that receives the number of written bytes + * @return ERR_OK if data was sent, any other err_t on error + */ +err_t +netconn_write_partly(struct netconn *conn, const void *dataptr, size_t size, + u8_t apiflags, size_t *bytes_written) +{ + struct netvector vector; + vector.ptr = dataptr; + vector.len = size; + return netconn_write_vectors_partly(conn, &vector, 1, apiflags, bytes_written); +} + +/** + * Send vectorized data atomically over a TCP netconn. + * + * @param conn the TCP netconn over which to send data + * @param vectors array of vectors containing data to send + * @param vectorcnt number of vectors in the array + * @param apiflags combination of following flags : + * - NETCONN_COPY: data will be copied into memory belonging to the stack + * - NETCONN_MORE: for TCP connection, PSH flag will be set on last segment sent + * - NETCONN_DONTBLOCK: only write the data if all data can be written at once + * @param bytes_written pointer to a location that receives the number of written bytes + * @return ERR_OK if data was sent, any other err_t on error + */ +err_t +netconn_write_vectors_partly(struct netconn *conn, struct netvector *vectors, u16_t vectorcnt, + u8_t apiflags, size_t *bytes_written) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + u8_t dontblock; + size_t size; + int i; + + LWIP_ERROR("netconn_write: invalid conn", (conn != NULL), return ERR_ARG;); + LWIP_ERROR("netconn_write: invalid conn->type", (NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP), return ERR_VAL;); + dontblock = netconn_is_nonblocking(conn) || (apiflags & NETCONN_DONTBLOCK); +#if LWIP_SO_SNDTIMEO + if (conn->send_timeout != 0) { + dontblock = 1; + } +#endif /* LWIP_SO_SNDTIMEO */ + if (dontblock && !bytes_written) { + /* This implies netconn_write() cannot be used for non-blocking send, since + it has no way to return the number of bytes written. */ + return ERR_VAL; + } + + /* sum up the total size */ + size = 0; + for (i = 0; i < vectorcnt; i++) { + size += vectors[i].len; + if (size < vectors[i].len) { + /* overflow */ + return ERR_VAL; + } + } + if (size == 0) { + return ERR_OK; + } else if (size > SSIZE_MAX) { + ssize_t limited; + /* this is required by the socket layer (cannot send full size_t range) */ + if (!bytes_written) { + return ERR_VAL; + } + /* limit the amount of data to send */ + limited = SSIZE_MAX; + size = (size_t)limited; + } + + API_MSG_VAR_ALLOC(msg); + /* non-blocking write sends as much */ + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.w.vector = vectors; + API_MSG_VAR_REF(msg).msg.w.vector_cnt = vectorcnt; + API_MSG_VAR_REF(msg).msg.w.vector_off = 0; + API_MSG_VAR_REF(msg).msg.w.apiflags = apiflags; + API_MSG_VAR_REF(msg).msg.w.len = size; + API_MSG_VAR_REF(msg).msg.w.offset = 0; +#if LWIP_SO_SNDTIMEO + if (conn->send_timeout != 0) { + /* get the time we started, which is later compared to + sys_now() + conn->send_timeout */ + API_MSG_VAR_REF(msg).msg.w.time_started = sys_now(); + } else { + API_MSG_VAR_REF(msg).msg.w.time_started = 0; + } +#endif /* LWIP_SO_SNDTIMEO */ + + /* For locking the core: this _can_ be delayed on low memory/low send buffer, + but if it is, this is done inside api_msg.c:do_write(), so we can use the + non-blocking version here. */ + err = netconn_apimsg(lwip_netconn_do_write, &API_MSG_VAR_REF(msg)); + if (err == ERR_OK) { + if (bytes_written != NULL) { + *bytes_written = API_MSG_VAR_REF(msg).msg.w.offset; + } + /* for blocking, check all requested bytes were written, NOTE: send_timeout is + treated as dontblock (see dontblock assignment above) */ + if (!dontblock) { + LWIP_ASSERT("do_write failed to write all bytes", API_MSG_VAR_REF(msg).msg.w.offset == size); + } + } + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_tcp + * Close or shutdown a TCP netconn (doesn't delete it). + * + * @param conn the TCP netconn to close or shutdown + * @param how fully close or only shutdown one side? + * @return ERR_OK if the netconn was closed, any other err_t on error + */ +static err_t +netconn_close_shutdown(struct netconn *conn, u8_t how) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + LWIP_UNUSED_ARG(how); + + LWIP_ERROR("netconn_close: invalid conn", (conn != NULL), return ERR_ARG;); + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; +#if LWIP_TCP + /* shutting down both ends is the same as closing */ + API_MSG_VAR_REF(msg).msg.sd.shut = how; +#if LWIP_SO_SNDTIMEO || LWIP_SO_LINGER + /* get the time we started, which is later compared to + sys_now() + conn->send_timeout */ + API_MSG_VAR_REF(msg).msg.sd.time_started = sys_now(); +#else /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ + API_MSG_VAR_REF(msg).msg.sd.polls_left = + ((LWIP_TCP_CLOSE_TIMEOUT_MS_DEFAULT + TCP_SLOW_INTERVAL - 1) / TCP_SLOW_INTERVAL) + 1; +#endif /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ +#endif /* LWIP_TCP */ + err = netconn_apimsg(lwip_netconn_do_close, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_tcp + * Close a TCP netconn (doesn't delete it). + * + * @param conn the TCP netconn to close + * @return ERR_OK if the netconn was closed, any other err_t on error + */ +err_t +netconn_close(struct netconn *conn) +{ + /* shutting down both ends is the same as closing */ + return netconn_close_shutdown(conn, NETCONN_SHUT_RDWR); +} + +/** + * @ingroup netconn_common + * Get and reset pending error on a netconn + * + * @param conn the netconn to get the error from + * @return and pending error or ERR_OK if no error was pending + */ +err_t +netconn_err(struct netconn *conn) +{ + err_t err; + SYS_ARCH_DECL_PROTECT(lev); + if (conn == NULL) { + return ERR_OK; + } + SYS_ARCH_PROTECT(lev); + err = conn->pending_err; + conn->pending_err = ERR_OK; + SYS_ARCH_UNPROTECT(lev); + return err; +} + +/** + * @ingroup netconn_tcp + * Shut down one or both sides of a TCP netconn (doesn't delete it). + * + * @param conn the TCP netconn to shut down + * @param shut_rx shut down the RX side (no more read possible after this) + * @param shut_tx shut down the TX side (no more write possible after this) + * @return ERR_OK if the netconn was closed, any other err_t on error + */ +err_t +netconn_shutdown(struct netconn *conn, u8_t shut_rx, u8_t shut_tx) +{ + return netconn_close_shutdown(conn, (u8_t)((shut_rx ? NETCONN_SHUT_RD : 0) | (shut_tx ? NETCONN_SHUT_WR : 0))); +} + +#if LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) +/** + * @ingroup netconn_udp + * Join multicast groups for UDP netconns. + * + * @param conn the UDP netconn for which to change multicast addresses + * @param multiaddr IP address of the multicast group to join or leave + * @param netif_addr the IP address of the network interface on which to send + * the igmp message + * @param join_or_leave flag whether to send a join- or leave-message + * @return ERR_OK if the action was taken, any err_t on error + */ +err_t +netconn_join_leave_group(struct netconn *conn, + const ip_addr_t *multiaddr, + const ip_addr_t *netif_addr, + enum netconn_igmp join_or_leave) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_join_leave_group: invalid conn", (conn != NULL), return ERR_ARG;); + + API_MSG_VAR_ALLOC(msg); + +#if LWIP_IPV4 + /* Don't propagate NULL pointer (IP_ADDR_ANY alias) to subsequent functions */ + if (multiaddr == NULL) { + multiaddr = IP4_ADDR_ANY; + } + if (netif_addr == NULL) { + netif_addr = IP4_ADDR_ANY; + } +#endif /* LWIP_IPV4 */ + + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.jl.multiaddr = API_MSG_VAR_REF(multiaddr); + API_MSG_VAR_REF(msg).msg.jl.netif_addr = API_MSG_VAR_REF(netif_addr); + API_MSG_VAR_REF(msg).msg.jl.join_or_leave = join_or_leave; + err = netconn_apimsg(lwip_netconn_do_join_leave_group, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} +/** + * @ingroup netconn_udp + * Join multicast groups for UDP netconns. + * + * @param conn the UDP netconn for which to change multicast addresses + * @param multiaddr IP address of the multicast group to join or leave + * @param if_idx the index of the netif + * @param join_or_leave flag whether to send a join- or leave-message + * @return ERR_OK if the action was taken, any err_t on error + */ +err_t +netconn_join_leave_group_netif(struct netconn *conn, + const ip_addr_t *multiaddr, + u8_t if_idx, + enum netconn_igmp join_or_leave) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_join_leave_group: invalid conn", (conn != NULL), return ERR_ARG;); + + API_MSG_VAR_ALLOC(msg); + +#if LWIP_IPV4 + /* Don't propagate NULL pointer (IP_ADDR_ANY alias) to subsequent functions */ + if (multiaddr == NULL) { + multiaddr = IP4_ADDR_ANY; + } + if (if_idx == NETIF_NO_INDEX) { + return ERR_IF; + } +#endif /* LWIP_IPV4 */ + + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.jl.multiaddr = API_MSG_VAR_REF(multiaddr); + API_MSG_VAR_REF(msg).msg.jl.if_idx = if_idx; + API_MSG_VAR_REF(msg).msg.jl.join_or_leave = join_or_leave; + err = netconn_apimsg(lwip_netconn_do_join_leave_group_netif, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} +#endif /* LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) */ + +#if LWIP_DNS +/** + * @ingroup netconn_common + * Execute a DNS query, only one IP address is returned + * + * @param name a string representation of the DNS host name to query + * @param addr a preallocated ip_addr_t where to store the resolved IP address + * @param dns_addrtype IP address type (IPv4 / IPv6) + * @return ERR_OK: resolving succeeded + * ERR_MEM: memory error, try again later + * ERR_ARG: dns client not initialized or invalid hostname + * ERR_VAL: dns server response was invalid + */ +#if LWIP_IPV4 && LWIP_IPV6 +err_t +netconn_gethostbyname_addrtype(const char *name, ip_addr_t *addr, u8_t dns_addrtype) +#else +err_t +netconn_gethostbyname(const char *name, ip_addr_t *addr) +#endif +{ + API_VAR_DECLARE(struct dns_api_msg, msg); +#if !LWIP_MPU_COMPATIBLE + sys_sem_t sem; +#endif /* LWIP_MPU_COMPATIBLE */ + err_t err; + err_t cberr; + + LWIP_ERROR("netconn_gethostbyname: invalid name", (name != NULL), return ERR_ARG;); + LWIP_ERROR("netconn_gethostbyname: invalid addr", (addr != NULL), return ERR_ARG;); +#if LWIP_MPU_COMPATIBLE + if (strlen(name) >= DNS_MAX_NAME_LENGTH) { + return ERR_ARG; + } +#endif + +#ifdef LWIP_HOOK_NETCONN_EXTERNAL_RESOLVE +#if LWIP_IPV4 && LWIP_IPV6 + if (LWIP_HOOK_NETCONN_EXTERNAL_RESOLVE(name, addr, dns_addrtype, &err)) { +#else + if (LWIP_HOOK_NETCONN_EXTERNAL_RESOLVE(name, addr, NETCONN_DNS_DEFAULT, &err)) { +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + return err; + } +#endif /* LWIP_HOOK_NETCONN_EXTERNAL_RESOLVE */ + + API_VAR_ALLOC(struct dns_api_msg, MEMP_DNS_API_MSG, msg, ERR_MEM); +#if LWIP_MPU_COMPATIBLE + strncpy(API_VAR_REF(msg).name, name, DNS_MAX_NAME_LENGTH - 1); + API_VAR_REF(msg).name[DNS_MAX_NAME_LENGTH - 1] = 0; +#else /* LWIP_MPU_COMPATIBLE */ + msg.err = &err; + msg.sem = &sem; + API_VAR_REF(msg).addr = API_VAR_REF(addr); + API_VAR_REF(msg).name = name; +#endif /* LWIP_MPU_COMPATIBLE */ +#if LWIP_IPV4 && LWIP_IPV6 + API_VAR_REF(msg).dns_addrtype = dns_addrtype; +#endif /* LWIP_IPV4 && LWIP_IPV6 */ +#if LWIP_NETCONN_SEM_PER_THREAD + API_VAR_REF(msg).sem = LWIP_NETCONN_THREAD_SEM_GET(); +#else /* LWIP_NETCONN_SEM_PER_THREAD*/ + err = sys_sem_new(API_EXPR_REF(API_VAR_REF(msg).sem), 0); + if (err != ERR_OK) { + API_VAR_FREE(MEMP_DNS_API_MSG, msg); + return err; + } +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + + cberr = tcpip_send_msg_wait_sem(lwip_netconn_do_gethostbyname, &API_VAR_REF(msg), API_EXPR_REF(API_VAR_REF(msg).sem)); +#if !LWIP_NETCONN_SEM_PER_THREAD + sys_sem_free(API_EXPR_REF(API_VAR_REF(msg).sem)); +#endif /* !LWIP_NETCONN_SEM_PER_THREAD */ + if (cberr != ERR_OK) { + API_VAR_FREE(MEMP_DNS_API_MSG, msg); + return cberr; + } + +#if LWIP_MPU_COMPATIBLE + *addr = msg->addr; + err = msg->err; +#endif /* LWIP_MPU_COMPATIBLE */ + + API_VAR_FREE(MEMP_DNS_API_MSG, msg); + return err; +} +#endif /* LWIP_DNS*/ + +#if LWIP_NETCONN_SEM_PER_THREAD +void +netconn_thread_init(void) +{ + sys_sem_t *sem = LWIP_NETCONN_THREAD_SEM_GET(); + if ((sem == NULL) || !sys_sem_valid(sem)) { + /* call alloc only once */ + LWIP_NETCONN_THREAD_SEM_ALLOC(); + LWIP_ASSERT("LWIP_NETCONN_THREAD_SEM_ALLOC() failed", sys_sem_valid(LWIP_NETCONN_THREAD_SEM_GET())); + } +} + +void +netconn_thread_cleanup(void) +{ + sys_sem_t *sem = LWIP_NETCONN_THREAD_SEM_GET(); + if ((sem != NULL) && sys_sem_valid(sem)) { + /* call free only once */ + LWIP_NETCONN_THREAD_SEM_FREE(); + } +} +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + +#endif /* LWIP_NETCONN */ diff --git a/Middlewares/Third_Party/LwIP/src/api/api_msg.c b/Middlewares/Third_Party/LwIP/src/api/api_msg.c new file mode 100644 index 0000000..3953102 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/api/api_msg.c @@ -0,0 +1,2173 @@ +/** + * @file + * Sequential API Internal module + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_NETCONN /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/priv/api_msg.h" + +#include "lwip/ip.h" +#include "lwip/ip_addr.h" +#include "lwip/udp.h" +#include "lwip/tcp.h" +#include "lwip/raw.h" + +#include "lwip/memp.h" +#include "lwip/igmp.h" +#include "lwip/dns.h" +#include "lwip/mld6.h" +#include "lwip/priv/tcpip_priv.h" + +#include + +/* netconns are polled once per second (e.g. continue write on memory error) */ +#define NETCONN_TCP_POLL_INTERVAL 2 + +#define SET_NONBLOCKING_CONNECT(conn, val) do { if (val) { \ + netconn_set_flags(conn, NETCONN_FLAG_IN_NONBLOCKING_CONNECT); \ +} else { \ + netconn_clear_flags(conn, NETCONN_FLAG_IN_NONBLOCKING_CONNECT); }} while(0) +#define IN_NONBLOCKING_CONNECT(conn) netconn_is_flag_set(conn, NETCONN_FLAG_IN_NONBLOCKING_CONNECT) + +#if LWIP_NETCONN_FULLDUPLEX +#define NETCONN_MBOX_VALID(conn, mbox) (sys_mbox_valid(mbox) && ((conn->flags & NETCONN_FLAG_MBOXINVALID) == 0)) +#else +#define NETCONN_MBOX_VALID(conn, mbox) sys_mbox_valid(mbox) +#endif + +/* forward declarations */ +#if LWIP_TCP +#if LWIP_TCPIP_CORE_LOCKING +#define WRITE_DELAYED , 1 +#define WRITE_DELAYED_PARAM , u8_t delayed +#else /* LWIP_TCPIP_CORE_LOCKING */ +#define WRITE_DELAYED +#define WRITE_DELAYED_PARAM +#endif /* LWIP_TCPIP_CORE_LOCKING */ +static err_t lwip_netconn_do_writemore(struct netconn *conn WRITE_DELAYED_PARAM); +static err_t lwip_netconn_do_close_internal(struct netconn *conn WRITE_DELAYED_PARAM); +#endif + +static void netconn_drain(struct netconn *conn); + +#if LWIP_TCPIP_CORE_LOCKING +#define TCPIP_APIMSG_ACK(m) +#else /* LWIP_TCPIP_CORE_LOCKING */ +#define TCPIP_APIMSG_ACK(m) do { sys_sem_signal(LWIP_API_MSG_SEM(m)); } while(0) +#endif /* LWIP_TCPIP_CORE_LOCKING */ + +#if LWIP_NETCONN_FULLDUPLEX +const u8_t netconn_deleted = 0; + +int +lwip_netconn_is_deallocated_msg(void *msg) +{ + if (msg == &netconn_deleted) { + return 1; + } + return 0; +} +#endif /* LWIP_NETCONN_FULLDUPLEX */ + +#if LWIP_TCP +const u8_t netconn_aborted = 0; +const u8_t netconn_reset = 0; +const u8_t netconn_closed = 0; + +/** Translate an error to a unique void* passed via an mbox */ +static void * +lwip_netconn_err_to_msg(err_t err) +{ + switch (err) { + case ERR_ABRT: + return LWIP_CONST_CAST(void *, &netconn_aborted); + case ERR_RST: + return LWIP_CONST_CAST(void *, &netconn_reset); + case ERR_CLSD: + return LWIP_CONST_CAST(void *, &netconn_closed); + default: + LWIP_ASSERT("unhandled error", err == ERR_OK); + return NULL; + } +} + +int +lwip_netconn_is_err_msg(void *msg, err_t *err) +{ + LWIP_ASSERT("err != NULL", err != NULL); + + if (msg == &netconn_aborted) { + *err = ERR_ABRT; + return 1; + } else if (msg == &netconn_reset) { + *err = ERR_RST; + return 1; + } else if (msg == &netconn_closed) { + *err = ERR_CLSD; + return 1; + } + return 0; +} +#endif /* LWIP_TCP */ + + +#if LWIP_RAW +/** + * Receive callback function for RAW netconns. + * Doesn't 'eat' the packet, only copies it and sends it to + * conn->recvmbox + * + * @see raw.h (struct raw_pcb.recv) for parameters and return value + */ +static u8_t +recv_raw(void *arg, struct raw_pcb *pcb, struct pbuf *p, + const ip_addr_t *addr) +{ + struct pbuf *q; + struct netbuf *buf; + struct netconn *conn; + + LWIP_UNUSED_ARG(addr); + conn = (struct netconn *)arg; + + if ((conn != NULL) && NETCONN_MBOX_VALID(conn, &conn->recvmbox)) { +#if LWIP_SO_RCVBUF + int recv_avail; + SYS_ARCH_GET(conn->recv_avail, recv_avail); + if ((recv_avail + (int)(p->tot_len)) > conn->recv_bufsize) { + return 0; + } +#endif /* LWIP_SO_RCVBUF */ + /* copy the whole packet into new pbufs */ + q = pbuf_clone(PBUF_RAW, PBUF_RAM, p); + if (q != NULL) { + u16_t len; + buf = (struct netbuf *)memp_malloc(MEMP_NETBUF); + if (buf == NULL) { + pbuf_free(q); + return 0; + } + + buf->p = q; + buf->ptr = q; + ip_addr_copy(buf->addr, *ip_current_src_addr()); + buf->port = pcb->protocol; + + len = q->tot_len; + if (sys_mbox_trypost(&conn->recvmbox, buf) != ERR_OK) { + netbuf_delete(buf); + return 0; + } else { +#if LWIP_SO_RCVBUF + SYS_ARCH_INC(conn->recv_avail, len); +#endif /* LWIP_SO_RCVBUF */ + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVPLUS, len); + } + } + } + + return 0; /* do not eat the packet */ +} +#endif /* LWIP_RAW*/ + +#if LWIP_UDP +/** + * Receive callback function for UDP netconns. + * Posts the packet to conn->recvmbox or deletes it on memory error. + * + * @see udp.h (struct udp_pcb.recv) for parameters + */ +static void +recv_udp(void *arg, struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *addr, u16_t port) +{ + struct netbuf *buf; + struct netconn *conn; + u16_t len; +#if LWIP_SO_RCVBUF + int recv_avail; +#endif /* LWIP_SO_RCVBUF */ + + LWIP_UNUSED_ARG(pcb); /* only used for asserts... */ + LWIP_ASSERT("recv_udp must have a pcb argument", pcb != NULL); + LWIP_ASSERT("recv_udp must have an argument", arg != NULL); + conn = (struct netconn *)arg; + + if (conn == NULL) { + pbuf_free(p); + return; + } + + LWIP_ASSERT("recv_udp: recv for wrong pcb!", conn->pcb.udp == pcb); + +#if LWIP_SO_RCVBUF + SYS_ARCH_GET(conn->recv_avail, recv_avail); + if (!NETCONN_MBOX_VALID(conn, &conn->recvmbox) || + ((recv_avail + (int)(p->tot_len)) > conn->recv_bufsize)) { +#else /* LWIP_SO_RCVBUF */ + if (!NETCONN_MBOX_VALID(conn, &conn->recvmbox)) { +#endif /* LWIP_SO_RCVBUF */ + pbuf_free(p); + return; + } + + buf = (struct netbuf *)memp_malloc(MEMP_NETBUF); + if (buf == NULL) { + pbuf_free(p); + return; + } else { + buf->p = p; + buf->ptr = p; + ip_addr_set(&buf->addr, addr); + buf->port = port; +#if LWIP_NETBUF_RECVINFO + if (conn->flags & NETCONN_FLAG_PKTINFO) { + /* get the UDP header - always in the first pbuf, ensured by udp_input */ + const struct udp_hdr *udphdr = (const struct udp_hdr *)ip_next_header_ptr(); + buf->flags = NETBUF_FLAG_DESTADDR; + ip_addr_set(&buf->toaddr, ip_current_dest_addr()); + buf->toport_chksum = udphdr->dest; + } +#endif /* LWIP_NETBUF_RECVINFO */ + } + + len = p->tot_len; + if (sys_mbox_trypost(&conn->recvmbox, buf) != ERR_OK) { + netbuf_delete(buf); + return; + } else { +#if LWIP_SO_RCVBUF + SYS_ARCH_INC(conn->recv_avail, len); +#endif /* LWIP_SO_RCVBUF */ + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVPLUS, len); + } +} +#endif /* LWIP_UDP */ + +#if LWIP_TCP +/** + * Receive callback function for TCP netconns. + * Posts the packet to conn->recvmbox, but doesn't delete it on errors. + * + * @see tcp.h (struct tcp_pcb.recv) for parameters and return value + */ +static err_t +recv_tcp(void *arg, struct tcp_pcb *pcb, struct pbuf *p, err_t err) +{ + struct netconn *conn; + u16_t len; + void *msg; + + LWIP_UNUSED_ARG(pcb); + LWIP_ASSERT("recv_tcp must have a pcb argument", pcb != NULL); + LWIP_ASSERT("recv_tcp must have an argument", arg != NULL); + LWIP_ASSERT("err != ERR_OK unhandled", err == ERR_OK); + LWIP_UNUSED_ARG(err); /* for LWIP_NOASSERT */ + conn = (struct netconn *)arg; + + if (conn == NULL) { + return ERR_VAL; + } + LWIP_ASSERT("recv_tcp: recv for wrong pcb!", conn->pcb.tcp == pcb); + + if (!NETCONN_MBOX_VALID(conn, &conn->recvmbox)) { + /* recvmbox already deleted */ + if (p != NULL) { + tcp_recved(pcb, p->tot_len); + pbuf_free(p); + } + return ERR_OK; + } + /* Unlike for UDP or RAW pcbs, don't check for available space + using recv_avail since that could break the connection + (data is already ACKed) */ + + if (p != NULL) { + msg = p; + len = p->tot_len; + } else { + msg = LWIP_CONST_CAST(void *, &netconn_closed); + len = 0; + } + + if (sys_mbox_trypost(&conn->recvmbox, msg) != ERR_OK) { + /* don't deallocate p: it is presented to us later again from tcp_fasttmr! */ + return ERR_MEM; + } else { +#if LWIP_SO_RCVBUF + SYS_ARCH_INC(conn->recv_avail, len); +#endif /* LWIP_SO_RCVBUF */ + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVPLUS, len); + } + + return ERR_OK; +} + +/** + * Poll callback function for TCP netconns. + * Wakes up an application thread that waits for a connection to close + * or data to be sent. The application thread then takes the + * appropriate action to go on. + * + * Signals the conn->sem. + * netconn_close waits for conn->sem if closing failed. + * + * @see tcp.h (struct tcp_pcb.poll) for parameters and return value + */ +static err_t +poll_tcp(void *arg, struct tcp_pcb *pcb) +{ + struct netconn *conn = (struct netconn *)arg; + + LWIP_UNUSED_ARG(pcb); + LWIP_ASSERT("conn != NULL", (conn != NULL)); + + if (conn->state == NETCONN_WRITE) { + lwip_netconn_do_writemore(conn WRITE_DELAYED); + } else if (conn->state == NETCONN_CLOSE) { +#if !LWIP_SO_SNDTIMEO && !LWIP_SO_LINGER + if (conn->current_msg && conn->current_msg->msg.sd.polls_left) { + conn->current_msg->msg.sd.polls_left--; + } +#endif /* !LWIP_SO_SNDTIMEO && !LWIP_SO_LINGER */ + lwip_netconn_do_close_internal(conn WRITE_DELAYED); + } + /* @todo: implement connect timeout here? */ + + /* Did a nonblocking write fail before? Then check available write-space. */ + if (conn->flags & NETCONN_FLAG_CHECK_WRITESPACE) { + /* If the queued byte- or pbuf-count drops below the configured low-water limit, + let select mark this pcb as writable again. */ + if ((conn->pcb.tcp != NULL) && (tcp_sndbuf(conn->pcb.tcp) > TCP_SNDLOWAT) && + (tcp_sndqueuelen(conn->pcb.tcp) < TCP_SNDQUEUELOWAT)) { + netconn_clear_flags(conn, NETCONN_FLAG_CHECK_WRITESPACE); + API_EVENT(conn, NETCONN_EVT_SENDPLUS, 0); + } + } + + return ERR_OK; +} + +/** + * Sent callback function for TCP netconns. + * Signals the conn->sem and calls API_EVENT. + * netconn_write waits for conn->sem if send buffer is low. + * + * @see tcp.h (struct tcp_pcb.sent) for parameters and return value + */ +static err_t +sent_tcp(void *arg, struct tcp_pcb *pcb, u16_t len) +{ + struct netconn *conn = (struct netconn *)arg; + + LWIP_UNUSED_ARG(pcb); + LWIP_ASSERT("conn != NULL", (conn != NULL)); + + if (conn) { + if (conn->state == NETCONN_WRITE) { + lwip_netconn_do_writemore(conn WRITE_DELAYED); + } else if (conn->state == NETCONN_CLOSE) { + lwip_netconn_do_close_internal(conn WRITE_DELAYED); + } + + /* If the queued byte- or pbuf-count drops below the configured low-water limit, + let select mark this pcb as writable again. */ + if ((conn->pcb.tcp != NULL) && (tcp_sndbuf(conn->pcb.tcp) > TCP_SNDLOWAT) && + (tcp_sndqueuelen(conn->pcb.tcp) < TCP_SNDQUEUELOWAT)) { + netconn_clear_flags(conn, NETCONN_FLAG_CHECK_WRITESPACE); + API_EVENT(conn, NETCONN_EVT_SENDPLUS, len); + } + } + + return ERR_OK; +} + +/** + * Error callback function for TCP netconns. + * Signals conn->sem, posts to all conn mboxes and calls API_EVENT. + * The application thread has then to decide what to do. + * + * @see tcp.h (struct tcp_pcb.err) for parameters + */ +static void +err_tcp(void *arg, err_t err) +{ + struct netconn *conn; + enum netconn_state old_state; + void *mbox_msg; + SYS_ARCH_DECL_PROTECT(lev); + + conn = (struct netconn *)arg; + LWIP_ASSERT("conn != NULL", (conn != NULL)); + + SYS_ARCH_PROTECT(lev); + + /* when err is called, the pcb is deallocated, so delete the reference */ + conn->pcb.tcp = NULL; + /* store pending error */ + conn->pending_err = err; + /* prevent application threads from blocking on 'recvmbox'/'acceptmbox' */ + conn->flags |= NETCONN_FLAG_MBOXCLOSED; + + /* reset conn->state now before waking up other threads */ + old_state = conn->state; + conn->state = NETCONN_NONE; + + SYS_ARCH_UNPROTECT(lev); + + /* Notify the user layer about a connection error. Used to signal select. */ + API_EVENT(conn, NETCONN_EVT_ERROR, 0); + /* Try to release selects pending on 'read' or 'write', too. + They will get an error if they actually try to read or write. */ + API_EVENT(conn, NETCONN_EVT_RCVPLUS, 0); + API_EVENT(conn, NETCONN_EVT_SENDPLUS, 0); + + mbox_msg = lwip_netconn_err_to_msg(err); + /* pass error message to recvmbox to wake up pending recv */ + if (NETCONN_MBOX_VALID(conn, &conn->recvmbox)) { + /* use trypost to prevent deadlock */ + sys_mbox_trypost(&conn->recvmbox, mbox_msg); + } + /* pass error message to acceptmbox to wake up pending accept */ + if (NETCONN_MBOX_VALID(conn, &conn->acceptmbox)) { + /* use trypost to preven deadlock */ + sys_mbox_trypost(&conn->acceptmbox, mbox_msg); + } + + if ((old_state == NETCONN_WRITE) || (old_state == NETCONN_CLOSE) || + (old_state == NETCONN_CONNECT)) { + /* calling lwip_netconn_do_writemore/lwip_netconn_do_close_internal is not necessary + since the pcb has already been deleted! */ + int was_nonblocking_connect = IN_NONBLOCKING_CONNECT(conn); + SET_NONBLOCKING_CONNECT(conn, 0); + + if (!was_nonblocking_connect) { + sys_sem_t *op_completed_sem; + /* set error return code */ + LWIP_ASSERT("conn->current_msg != NULL", conn->current_msg != NULL); + if (old_state == NETCONN_CLOSE) { + /* let close succeed: the connection is closed after all... */ + conn->current_msg->err = ERR_OK; + } else { + /* Write and connect fail */ + conn->current_msg->err = err; + } + op_completed_sem = LWIP_API_MSG_SEM(conn->current_msg); + LWIP_ASSERT("inavlid op_completed_sem", sys_sem_valid(op_completed_sem)); + conn->current_msg = NULL; + /* wake up the waiting task */ + sys_sem_signal(op_completed_sem); + } else { + /* @todo: test what happens for error on nonblocking connect */ + } + } else { + LWIP_ASSERT("conn->current_msg == NULL", conn->current_msg == NULL); + } +} + +/** + * Setup a tcp_pcb with the correct callback function pointers + * and their arguments. + * + * @param conn the TCP netconn to setup + */ +static void +setup_tcp(struct netconn *conn) +{ + struct tcp_pcb *pcb; + + pcb = conn->pcb.tcp; + tcp_arg(pcb, conn); + tcp_recv(pcb, recv_tcp); + tcp_sent(pcb, sent_tcp); + tcp_poll(pcb, poll_tcp, NETCONN_TCP_POLL_INTERVAL); + tcp_err(pcb, err_tcp); +} + +/** + * Accept callback function for TCP netconns. + * Allocates a new netconn and posts that to conn->acceptmbox. + * + * @see tcp.h (struct tcp_pcb_listen.accept) for parameters and return value + */ +static err_t +accept_function(void *arg, struct tcp_pcb *newpcb, err_t err) +{ + struct netconn *newconn; + struct netconn *conn = (struct netconn *)arg; + + if (conn == NULL) { + return ERR_VAL; + } + if (!NETCONN_MBOX_VALID(conn, &conn->acceptmbox)) { + LWIP_DEBUGF(API_MSG_DEBUG, ("accept_function: acceptmbox already deleted\n")); + return ERR_VAL; + } + + if (newpcb == NULL) { + /* out-of-pcbs during connect: pass on this error to the application */ + if (sys_mbox_trypost(&conn->acceptmbox, lwip_netconn_err_to_msg(ERR_ABRT)) == ERR_OK) { + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVPLUS, 0); + } + return ERR_VAL; + } + LWIP_ASSERT("expect newpcb == NULL or err == ERR_OK", err == ERR_OK); + LWIP_UNUSED_ARG(err); /* for LWIP_NOASSERT */ + + LWIP_DEBUGF(API_MSG_DEBUG, ("accept_function: newpcb->state: %s\n", tcp_debug_state_str(newpcb->state))); + + /* We have to set the callback here even though + * the new socket is unknown. newconn->socket is marked as -1. */ + newconn = netconn_alloc(conn->type, conn->callback); + if (newconn == NULL) { + /* outof netconns: pass on this error to the application */ + if (sys_mbox_trypost(&conn->acceptmbox, lwip_netconn_err_to_msg(ERR_ABRT)) == ERR_OK) { + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVPLUS, 0); + } + return ERR_MEM; + } + newconn->pcb.tcp = newpcb; + setup_tcp(newconn); + + /* handle backlog counter */ + tcp_backlog_delayed(newpcb); + + if (sys_mbox_trypost(&conn->acceptmbox, newconn) != ERR_OK) { + /* When returning != ERR_OK, the pcb is aborted in tcp_process(), + so do nothing here! */ + /* remove all references to this netconn from the pcb */ + struct tcp_pcb *pcb = newconn->pcb.tcp; + tcp_arg(pcb, NULL); + tcp_recv(pcb, NULL); + tcp_sent(pcb, NULL); + tcp_poll(pcb, NULL, 0); + tcp_err(pcb, NULL); + /* remove reference from to the pcb from this netconn */ + newconn->pcb.tcp = NULL; + /* no need to drain since we know the recvmbox is empty. */ + sys_mbox_free(&newconn->recvmbox); + sys_mbox_set_invalid(&newconn->recvmbox); + netconn_free(newconn); + return ERR_MEM; + } else { + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVPLUS, 0); + } + + return ERR_OK; +} +#endif /* LWIP_TCP */ + +/** + * Create a new pcb of a specific type. + * Called from lwip_netconn_do_newconn(). + * + * @param msg the api_msg describing the connection type + */ +static void +pcb_new(struct api_msg *msg) +{ + enum lwip_ip_addr_type iptype = IPADDR_TYPE_V4; + + LWIP_ASSERT("pcb_new: pcb already allocated", msg->conn->pcb.tcp == NULL); + +#if LWIP_IPV6 && LWIP_IPV4 + /* IPv6: Dual-stack by default, unless netconn_set_ipv6only() is called */ + if (NETCONNTYPE_ISIPV6(netconn_type(msg->conn))) { + iptype = IPADDR_TYPE_ANY; + } +#endif + + /* Allocate a PCB for this connection */ + switch (NETCONNTYPE_GROUP(msg->conn->type)) { +#if LWIP_RAW + case NETCONN_RAW: + msg->conn->pcb.raw = raw_new_ip_type(iptype, msg->msg.n.proto); + if (msg->conn->pcb.raw != NULL) { +#if LWIP_IPV6 + /* ICMPv6 packets should always have checksum calculated by the stack as per RFC 3542 chapter 3.1 */ + if (NETCONNTYPE_ISIPV6(msg->conn->type) && msg->conn->pcb.raw->protocol == IP6_NEXTH_ICMP6) { + msg->conn->pcb.raw->chksum_reqd = 1; + msg->conn->pcb.raw->chksum_offset = 2; + } +#endif /* LWIP_IPV6 */ + raw_recv(msg->conn->pcb.raw, recv_raw, msg->conn); + } + break; +#endif /* LWIP_RAW */ +#if LWIP_UDP + case NETCONN_UDP: + msg->conn->pcb.udp = udp_new_ip_type(iptype); + if (msg->conn->pcb.udp != NULL) { +#if LWIP_UDPLITE + if (NETCONNTYPE_ISUDPLITE(msg->conn->type)) { + udp_setflags(msg->conn->pcb.udp, UDP_FLAGS_UDPLITE); + } +#endif /* LWIP_UDPLITE */ + if (NETCONNTYPE_ISUDPNOCHKSUM(msg->conn->type)) { + udp_setflags(msg->conn->pcb.udp, UDP_FLAGS_NOCHKSUM); + } + udp_recv(msg->conn->pcb.udp, recv_udp, msg->conn); + } + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case NETCONN_TCP: + msg->conn->pcb.tcp = tcp_new_ip_type(iptype); + if (msg->conn->pcb.tcp != NULL) { + setup_tcp(msg->conn); + } + break; +#endif /* LWIP_TCP */ + default: + /* Unsupported netconn type, e.g. protocol disabled */ + msg->err = ERR_VAL; + return; + } + if (msg->conn->pcb.ip == NULL) { + msg->err = ERR_MEM; + } +} + +/** + * Create a new pcb of a specific type inside a netconn. + * Called from netconn_new_with_proto_and_callback. + * + * @param m the api_msg describing the connection type + */ +void +lwip_netconn_do_newconn(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + + msg->err = ERR_OK; + if (msg->conn->pcb.tcp == NULL) { + pcb_new(msg); + } + /* Else? This "new" connection already has a PCB allocated. */ + /* Is this an error condition? Should it be deleted? */ + /* We currently just are happy and return. */ + + TCPIP_APIMSG_ACK(msg); +} + +/** + * Create a new netconn (of a specific type) that has a callback function. + * The corresponding pcb is NOT created! + * + * @param t the type of 'connection' to create (@see enum netconn_type) + * @param callback a function to call on status changes (RX available, TX'ed) + * @return a newly allocated struct netconn or + * NULL on memory error + */ +struct netconn * +netconn_alloc(enum netconn_type t, netconn_callback callback) +{ + struct netconn *conn; + int size; + u8_t init_flags = 0; + + conn = (struct netconn *)memp_malloc(MEMP_NETCONN); + if (conn == NULL) { + return NULL; + } + + conn->pending_err = ERR_OK; + conn->type = t; + conn->pcb.tcp = NULL; + + /* If all sizes are the same, every compiler should optimize this switch to nothing */ + switch (NETCONNTYPE_GROUP(t)) { +#if LWIP_RAW + case NETCONN_RAW: + size = DEFAULT_RAW_RECVMBOX_SIZE; + break; +#endif /* LWIP_RAW */ +#if LWIP_UDP + case NETCONN_UDP: + size = DEFAULT_UDP_RECVMBOX_SIZE; +#if LWIP_NETBUF_RECVINFO + init_flags |= NETCONN_FLAG_PKTINFO; +#endif /* LWIP_NETBUF_RECVINFO */ + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case NETCONN_TCP: + size = DEFAULT_TCP_RECVMBOX_SIZE; + break; +#endif /* LWIP_TCP */ + default: + LWIP_ASSERT("netconn_alloc: undefined netconn_type", 0); + goto free_and_return; + } + + if (sys_mbox_new(&conn->recvmbox, size) != ERR_OK) { + goto free_and_return; + } +#if !LWIP_NETCONN_SEM_PER_THREAD + if (sys_sem_new(&conn->op_completed, 0) != ERR_OK) { + sys_mbox_free(&conn->recvmbox); + goto free_and_return; + } +#endif + +#if LWIP_TCP + sys_mbox_set_invalid(&conn->acceptmbox); +#endif + conn->state = NETCONN_NONE; +#if LWIP_SOCKET + /* initialize socket to -1 since 0 is a valid socket */ + conn->socket = -1; +#endif /* LWIP_SOCKET */ + conn->callback = callback; +#if LWIP_TCP + conn->current_msg = NULL; +#endif /* LWIP_TCP */ +#if LWIP_SO_SNDTIMEO + conn->send_timeout = 0; +#endif /* LWIP_SO_SNDTIMEO */ +#if LWIP_SO_RCVTIMEO + conn->recv_timeout = 0; +#endif /* LWIP_SO_RCVTIMEO */ +#if LWIP_SO_RCVBUF + conn->recv_bufsize = RECV_BUFSIZE_DEFAULT; + conn->recv_avail = 0; +#endif /* LWIP_SO_RCVBUF */ +#if LWIP_SO_LINGER + conn->linger = -1; +#endif /* LWIP_SO_LINGER */ + conn->flags = init_flags; + return conn; +free_and_return: + memp_free(MEMP_NETCONN, conn); + return NULL; +} + +/** + * Delete a netconn and all its resources. + * The pcb is NOT freed (since we might not be in the right thread context do this). + * + * @param conn the netconn to free + */ +void +netconn_free(struct netconn *conn) +{ + LWIP_ASSERT("PCB must be deallocated outside this function", conn->pcb.tcp == NULL); + +#if LWIP_NETCONN_FULLDUPLEX + /* in fullduplex, netconn is drained here */ + netconn_drain(conn); +#endif /* LWIP_NETCONN_FULLDUPLEX */ + + LWIP_ASSERT("recvmbox must be deallocated before calling this function", + !sys_mbox_valid(&conn->recvmbox)); +#if LWIP_TCP + LWIP_ASSERT("acceptmbox must be deallocated before calling this function", + !sys_mbox_valid(&conn->acceptmbox)); +#endif /* LWIP_TCP */ + +#if !LWIP_NETCONN_SEM_PER_THREAD + sys_sem_free(&conn->op_completed); + sys_sem_set_invalid(&conn->op_completed); +#endif + + memp_free(MEMP_NETCONN, conn); +} + +/** + * Delete rcvmbox and acceptmbox of a netconn and free the left-over data in + * these mboxes + * + * @param conn the netconn to free + * @bytes_drained bytes drained from recvmbox + * @accepts_drained pending connections drained from acceptmbox + */ +static void +netconn_drain(struct netconn *conn) +{ + void *mem; + + /* This runs when mbox and netconn are marked as closed, + so we don't need to lock against rx packets */ +#if LWIP_NETCONN_FULLDUPLEX + LWIP_ASSERT("netconn marked closed", conn->flags & NETCONN_FLAG_MBOXINVALID); +#endif /* LWIP_NETCONN_FULLDUPLEX */ + + /* Delete and drain the recvmbox. */ + if (sys_mbox_valid(&conn->recvmbox)) { + while (sys_mbox_tryfetch(&conn->recvmbox, &mem) != SYS_MBOX_EMPTY) { +#if LWIP_NETCONN_FULLDUPLEX + if (!lwip_netconn_is_deallocated_msg(mem)) +#endif /* LWIP_NETCONN_FULLDUPLEX */ + { +#if LWIP_TCP + if (NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP) { + err_t err; + if (!lwip_netconn_is_err_msg(mem, &err)) { + pbuf_free((struct pbuf *)mem); + } + } else +#endif /* LWIP_TCP */ + { + netbuf_delete((struct netbuf *)mem); + } + } + } + sys_mbox_free(&conn->recvmbox); + sys_mbox_set_invalid(&conn->recvmbox); + } + + /* Delete and drain the acceptmbox. */ +#if LWIP_TCP + if (sys_mbox_valid(&conn->acceptmbox)) { + while (sys_mbox_tryfetch(&conn->acceptmbox, &mem) != SYS_MBOX_EMPTY) { +#if LWIP_NETCONN_FULLDUPLEX + if (!lwip_netconn_is_deallocated_msg(mem)) +#endif /* LWIP_NETCONN_FULLDUPLEX */ + { + err_t err; + if (!lwip_netconn_is_err_msg(mem, &err)) { + struct netconn *newconn = (struct netconn *)mem; + /* Only tcp pcbs have an acceptmbox, so no need to check conn->type */ + /* pcb might be set to NULL already by err_tcp() */ + /* drain recvmbox */ + netconn_drain(newconn); + if (newconn->pcb.tcp != NULL) { + tcp_abort(newconn->pcb.tcp); + newconn->pcb.tcp = NULL; + } + netconn_free(newconn); + } + } + } + sys_mbox_free(&conn->acceptmbox); + sys_mbox_set_invalid(&conn->acceptmbox); + } +#endif /* LWIP_TCP */ +} + +#if LWIP_NETCONN_FULLDUPLEX +static void +netconn_mark_mbox_invalid(struct netconn *conn) +{ + int i, num_waiting; + void *msg = LWIP_CONST_CAST(void *, &netconn_deleted); + + /* Prevent new calls/threads from reading from the mbox */ + conn->flags |= NETCONN_FLAG_MBOXINVALID; + + SYS_ARCH_LOCKED(num_waiting = conn->mbox_threads_waiting); + for (i = 0; i < num_waiting; i++) { + if (sys_mbox_valid_val(conn->recvmbox)) { + sys_mbox_trypost(&conn->recvmbox, msg); + } else { + sys_mbox_trypost(&conn->acceptmbox, msg); + } + } +} +#endif /* LWIP_NETCONN_FULLDUPLEX */ + +#if LWIP_TCP +/** + * Internal helper function to close a TCP netconn: since this sometimes + * doesn't work at the first attempt, this function is called from multiple + * places. + * + * @param conn the TCP netconn to close + */ +static err_t +lwip_netconn_do_close_internal(struct netconn *conn WRITE_DELAYED_PARAM) +{ + err_t err; + u8_t shut, shut_rx, shut_tx, shut_close; + u8_t close_finished = 0; + struct tcp_pcb *tpcb; +#if LWIP_SO_LINGER + u8_t linger_wait_required = 0; +#endif /* LWIP_SO_LINGER */ + + LWIP_ASSERT("invalid conn", (conn != NULL)); + LWIP_ASSERT("this is for tcp netconns only", (NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP)); + LWIP_ASSERT("conn must be in state NETCONN_CLOSE", (conn->state == NETCONN_CLOSE)); + LWIP_ASSERT("pcb already closed", (conn->pcb.tcp != NULL)); + LWIP_ASSERT("conn->current_msg != NULL", conn->current_msg != NULL); + + tpcb = conn->pcb.tcp; + shut = conn->current_msg->msg.sd.shut; + shut_rx = shut & NETCONN_SHUT_RD; + shut_tx = shut & NETCONN_SHUT_WR; + /* shutting down both ends is the same as closing + (also if RD or WR side was shut down before already) */ + if (shut == NETCONN_SHUT_RDWR) { + shut_close = 1; + } else if (shut_rx && + ((tpcb->state == FIN_WAIT_1) || + (tpcb->state == FIN_WAIT_2) || + (tpcb->state == CLOSING))) { + shut_close = 1; + } else if (shut_tx && ((tpcb->flags & TF_RXCLOSED) != 0)) { + shut_close = 1; + } else { + shut_close = 0; + } + + /* Set back some callback pointers */ + if (shut_close) { + tcp_arg(tpcb, NULL); + } + if (tpcb->state == LISTEN) { + tcp_accept(tpcb, NULL); + } else { + /* some callbacks have to be reset if tcp_close is not successful */ + if (shut_rx) { + tcp_recv(tpcb, NULL); + tcp_accept(tpcb, NULL); + } + if (shut_tx) { + tcp_sent(tpcb, NULL); + } + if (shut_close) { + tcp_poll(tpcb, NULL, 0); + tcp_err(tpcb, NULL); + } + } + /* Try to close the connection */ + if (shut_close) { +#if LWIP_SO_LINGER + /* check linger possibilites before calling tcp_close */ + err = ERR_OK; + /* linger enabled/required at all? (i.e. is there untransmitted data left?) */ + if ((conn->linger >= 0) && (conn->pcb.tcp->unsent || conn->pcb.tcp->unacked)) { + if ((conn->linger == 0)) { + /* data left but linger prevents waiting */ + tcp_abort(tpcb); + tpcb = NULL; + } else if (conn->linger > 0) { + /* data left and linger says we should wait */ + if (netconn_is_nonblocking(conn)) { + /* data left on a nonblocking netconn -> cannot linger */ + err = ERR_WOULDBLOCK; + } else if ((s32_t)(sys_now() - conn->current_msg->msg.sd.time_started) >= + (conn->linger * 1000)) { + /* data left but linger timeout has expired (this happens on further + calls to this function through poll_tcp */ + tcp_abort(tpcb); + tpcb = NULL; + } else { + /* data left -> need to wait for ACK after successful close */ + linger_wait_required = 1; + } + } + } + if ((err == ERR_OK) && (tpcb != NULL)) +#endif /* LWIP_SO_LINGER */ + { + err = tcp_close(tpcb); + } + } else { + err = tcp_shutdown(tpcb, shut_rx, shut_tx); + } + if (err == ERR_OK) { + close_finished = 1; +#if LWIP_SO_LINGER + if (linger_wait_required) { + /* wait for ACK of all unsent/unacked data by just getting called again */ + close_finished = 0; + err = ERR_INPROGRESS; + } +#endif /* LWIP_SO_LINGER */ + } else { + if (err == ERR_MEM) { + /* Closing failed because of memory shortage, try again later. Even for + nonblocking netconns, we have to wait since no standard socket application + is prepared for close failing because of resource shortage. + Check the timeout: this is kind of an lwip addition to the standard sockets: + we wait for some time when failing to allocate a segment for the FIN */ +#if LWIP_SO_SNDTIMEO || LWIP_SO_LINGER + s32_t close_timeout = LWIP_TCP_CLOSE_TIMEOUT_MS_DEFAULT; +#if LWIP_SO_SNDTIMEO + if (conn->send_timeout > 0) { + close_timeout = conn->send_timeout; + } +#endif /* LWIP_SO_SNDTIMEO */ +#if LWIP_SO_LINGER + if (conn->linger >= 0) { + /* use linger timeout (seconds) */ + close_timeout = conn->linger * 1000U; + } +#endif + if ((s32_t)(sys_now() - conn->current_msg->msg.sd.time_started) >= close_timeout) { +#else /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ + if (conn->current_msg->msg.sd.polls_left == 0) { +#endif /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ + close_finished = 1; + if (shut_close) { + /* in this case, we want to RST the connection */ + tcp_abort(tpcb); + err = ERR_OK; + } + } + } else { + /* Closing failed for a non-memory error: give up */ + close_finished = 1; + } + } + if (close_finished) { + /* Closing done (succeeded, non-memory error, nonblocking error or timeout) */ + sys_sem_t *op_completed_sem = LWIP_API_MSG_SEM(conn->current_msg); + conn->current_msg->err = err; + conn->current_msg = NULL; + conn->state = NETCONN_NONE; + if (err == ERR_OK) { + if (shut_close) { + /* Set back some callback pointers as conn is going away */ + conn->pcb.tcp = NULL; + /* Trigger select() in socket layer. Make sure everybody notices activity + on the connection, error first! */ + API_EVENT(conn, NETCONN_EVT_ERROR, 0); + } + if (shut_rx) { + API_EVENT(conn, NETCONN_EVT_RCVPLUS, 0); + } + if (shut_tx) { + API_EVENT(conn, NETCONN_EVT_SENDPLUS, 0); + } + } +#if LWIP_TCPIP_CORE_LOCKING + if (delayed) +#endif + { + /* wake up the application task */ + sys_sem_signal(op_completed_sem); + } + return ERR_OK; + } + if (!close_finished) { + /* Closing failed and we want to wait: restore some of the callbacks */ + /* Closing of listen pcb will never fail! */ + LWIP_ASSERT("Closing a listen pcb may not fail!", (tpcb->state != LISTEN)); + if (shut_tx) { + tcp_sent(tpcb, sent_tcp); + } + /* when waiting for close, set up poll interval to 500ms */ + tcp_poll(tpcb, poll_tcp, 1); + tcp_err(tpcb, err_tcp); + tcp_arg(tpcb, conn); + /* don't restore recv callback: we don't want to receive any more data */ + } + /* If closing didn't succeed, we get called again either + from poll_tcp or from sent_tcp */ + LWIP_ASSERT("err != ERR_OK", err != ERR_OK); + return err; +} +#endif /* LWIP_TCP */ + +/** + * Delete the pcb inside a netconn. + * Called from netconn_delete. + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_delconn(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + + enum netconn_state state = msg->conn->state; + LWIP_ASSERT("netconn state error", /* this only happens for TCP netconns */ + (state == NETCONN_NONE) || (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_TCP)); +#if LWIP_NETCONN_FULLDUPLEX + /* In full duplex mode, blocking write/connect is aborted with ERR_CLSD */ + if (state != NETCONN_NONE) { + if ((state == NETCONN_WRITE) || + ((state == NETCONN_CONNECT) && !IN_NONBLOCKING_CONNECT(msg->conn))) { + /* close requested, abort running write/connect */ + sys_sem_t *op_completed_sem; + LWIP_ASSERT("msg->conn->current_msg != NULL", msg->conn->current_msg != NULL); + op_completed_sem = LWIP_API_MSG_SEM(msg->conn->current_msg); + msg->conn->current_msg->err = ERR_CLSD; + msg->conn->current_msg = NULL; + msg->conn->state = NETCONN_NONE; + sys_sem_signal(op_completed_sem); + } + } +#else /* LWIP_NETCONN_FULLDUPLEX */ + if (((state != NETCONN_NONE) && + (state != NETCONN_LISTEN) && + (state != NETCONN_CONNECT)) || + ((state == NETCONN_CONNECT) && !IN_NONBLOCKING_CONNECT(msg->conn))) { + /* This means either a blocking write or blocking connect is running + (nonblocking write returns and sets state to NONE) */ + msg->err = ERR_INPROGRESS; + } else +#endif /* LWIP_NETCONN_FULLDUPLEX */ + { + LWIP_ASSERT("blocking connect in progress", + (state != NETCONN_CONNECT) || IN_NONBLOCKING_CONNECT(msg->conn)); + msg->err = ERR_OK; +#if LWIP_NETCONN_FULLDUPLEX + /* Mark mboxes invalid */ + netconn_mark_mbox_invalid(msg->conn); +#else /* LWIP_NETCONN_FULLDUPLEX */ + netconn_drain(msg->conn); +#endif /* LWIP_NETCONN_FULLDUPLEX */ + + if (msg->conn->pcb.tcp != NULL) { + + switch (NETCONNTYPE_GROUP(msg->conn->type)) { +#if LWIP_RAW + case NETCONN_RAW: + raw_remove(msg->conn->pcb.raw); + break; +#endif /* LWIP_RAW */ +#if LWIP_UDP + case NETCONN_UDP: + msg->conn->pcb.udp->recv_arg = NULL; + udp_remove(msg->conn->pcb.udp); + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case NETCONN_TCP: + LWIP_ASSERT("already writing or closing", msg->conn->current_msg == NULL); + msg->conn->state = NETCONN_CLOSE; + msg->msg.sd.shut = NETCONN_SHUT_RDWR; + msg->conn->current_msg = msg; +#if LWIP_TCPIP_CORE_LOCKING + if (lwip_netconn_do_close_internal(msg->conn, 0) != ERR_OK) { + LWIP_ASSERT("state!", msg->conn->state == NETCONN_CLOSE); + UNLOCK_TCPIP_CORE(); + sys_arch_sem_wait(LWIP_API_MSG_SEM(msg), 0); + LOCK_TCPIP_CORE(); + LWIP_ASSERT("state!", msg->conn->state == NETCONN_NONE); + } +#else /* LWIP_TCPIP_CORE_LOCKING */ + lwip_netconn_do_close_internal(msg->conn); +#endif /* LWIP_TCPIP_CORE_LOCKING */ + /* API_EVENT is called inside lwip_netconn_do_close_internal, before releasing + the application thread, so we can return at this point! */ + return; +#endif /* LWIP_TCP */ + default: + break; + } + msg->conn->pcb.tcp = NULL; + } + /* tcp netconns don't come here! */ + + /* @todo: this lets select make the socket readable and writable, + which is wrong! errfd instead? */ + API_EVENT(msg->conn, NETCONN_EVT_RCVPLUS, 0); + API_EVENT(msg->conn, NETCONN_EVT_SENDPLUS, 0); + } + if (sys_sem_valid(LWIP_API_MSG_SEM(msg))) { + TCPIP_APIMSG_ACK(msg); + } +} + +/** + * Bind a pcb contained in a netconn + * Called from netconn_bind. + * + * @param m the api_msg pointing to the connection and containing + * the IP address and port to bind to + */ +void +lwip_netconn_do_bind(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + err_t err; + + if (msg->conn->pcb.tcp != NULL) { + switch (NETCONNTYPE_GROUP(msg->conn->type)) { +#if LWIP_RAW + case NETCONN_RAW: + err = raw_bind(msg->conn->pcb.raw, API_EXPR_REF(msg->msg.bc.ipaddr)); + break; +#endif /* LWIP_RAW */ +#if LWIP_UDP + case NETCONN_UDP: + err = udp_bind(msg->conn->pcb.udp, API_EXPR_REF(msg->msg.bc.ipaddr), msg->msg.bc.port); + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case NETCONN_TCP: + err = tcp_bind(msg->conn->pcb.tcp, API_EXPR_REF(msg->msg.bc.ipaddr), msg->msg.bc.port); + break; +#endif /* LWIP_TCP */ + default: + err = ERR_VAL; + break; + } + } else { + err = ERR_VAL; + } + msg->err = err; + TCPIP_APIMSG_ACK(msg); +} +/** + * Bind a pcb contained in a netconn to an interface + * Called from netconn_bind_if. + * + * @param m the api_msg pointing to the connection and containing + * the IP address and port to bind to + */ +void +lwip_netconn_do_bind_if(void *m) +{ + struct netif *netif; + struct api_msg *msg = (struct api_msg *)m; + err_t err; + + netif = netif_get_by_index(msg->msg.bc.if_idx); + + if ((netif != NULL) && (msg->conn->pcb.tcp != NULL)) { + err = ERR_OK; + switch (NETCONNTYPE_GROUP(msg->conn->type)) { +#if LWIP_RAW + case NETCONN_RAW: + raw_bind_netif(msg->conn->pcb.raw, netif); + break; +#endif /* LWIP_RAW */ +#if LWIP_UDP + case NETCONN_UDP: + udp_bind_netif(msg->conn->pcb.udp, netif); + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case NETCONN_TCP: + tcp_bind_netif(msg->conn->pcb.tcp, netif); + break; +#endif /* LWIP_TCP */ + default: + err = ERR_VAL; + break; + } + } else { + err = ERR_VAL; + } + msg->err = err; + TCPIP_APIMSG_ACK(msg); +} + +#if LWIP_TCP +/** + * TCP callback function if a connection (opened by tcp_connect/lwip_netconn_do_connect) has + * been established (or reset by the remote host). + * + * @see tcp.h (struct tcp_pcb.connected) for parameters and return values + */ +static err_t +lwip_netconn_do_connected(void *arg, struct tcp_pcb *pcb, err_t err) +{ + struct netconn *conn; + int was_blocking; + sys_sem_t *op_completed_sem = NULL; + + LWIP_UNUSED_ARG(pcb); + + conn = (struct netconn *)arg; + + if (conn == NULL) { + return ERR_VAL; + } + + LWIP_ASSERT("conn->state == NETCONN_CONNECT", conn->state == NETCONN_CONNECT); + LWIP_ASSERT("(conn->current_msg != NULL) || conn->in_non_blocking_connect", + (conn->current_msg != NULL) || IN_NONBLOCKING_CONNECT(conn)); + + if (conn->current_msg != NULL) { + conn->current_msg->err = err; + op_completed_sem = LWIP_API_MSG_SEM(conn->current_msg); + } + if ((NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP) && (err == ERR_OK)) { + setup_tcp(conn); + } + was_blocking = !IN_NONBLOCKING_CONNECT(conn); + SET_NONBLOCKING_CONNECT(conn, 0); + LWIP_ASSERT("blocking connect state error", + (was_blocking && op_completed_sem != NULL) || + (!was_blocking && op_completed_sem == NULL)); + conn->current_msg = NULL; + conn->state = NETCONN_NONE; + API_EVENT(conn, NETCONN_EVT_SENDPLUS, 0); + + if (was_blocking) { + sys_sem_signal(op_completed_sem); + } + return ERR_OK; +} +#endif /* LWIP_TCP */ + +/** + * Connect a pcb contained inside a netconn + * Called from netconn_connect. + * + * @param m the api_msg pointing to the connection and containing + * the IP address and port to connect to + */ +void +lwip_netconn_do_connect(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + err_t err; + + if (msg->conn->pcb.tcp == NULL) { + /* This may happen when calling netconn_connect() a second time */ + err = ERR_CLSD; + } else { + switch (NETCONNTYPE_GROUP(msg->conn->type)) { +#if LWIP_RAW + case NETCONN_RAW: + err = raw_connect(msg->conn->pcb.raw, API_EXPR_REF(msg->msg.bc.ipaddr)); + break; +#endif /* LWIP_RAW */ +#if LWIP_UDP + case NETCONN_UDP: + err = udp_connect(msg->conn->pcb.udp, API_EXPR_REF(msg->msg.bc.ipaddr), msg->msg.bc.port); + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case NETCONN_TCP: + /* Prevent connect while doing any other action. */ + if (msg->conn->state == NETCONN_CONNECT) { + err = ERR_ALREADY; + } else if (msg->conn->state != NETCONN_NONE) { + err = ERR_ISCONN; + } else { + setup_tcp(msg->conn); + err = tcp_connect(msg->conn->pcb.tcp, API_EXPR_REF(msg->msg.bc.ipaddr), + msg->msg.bc.port, lwip_netconn_do_connected); + if (err == ERR_OK) { + u8_t non_blocking = netconn_is_nonblocking(msg->conn); + msg->conn->state = NETCONN_CONNECT; + SET_NONBLOCKING_CONNECT(msg->conn, non_blocking); + if (non_blocking) { + err = ERR_INPROGRESS; + } else { + msg->conn->current_msg = msg; + /* sys_sem_signal() is called from lwip_netconn_do_connected (or err_tcp()), + when the connection is established! */ +#if LWIP_TCPIP_CORE_LOCKING + LWIP_ASSERT("state!", msg->conn->state == NETCONN_CONNECT); + UNLOCK_TCPIP_CORE(); + sys_arch_sem_wait(LWIP_API_MSG_SEM(msg), 0); + LOCK_TCPIP_CORE(); + LWIP_ASSERT("state!", msg->conn->state != NETCONN_CONNECT); +#endif /* LWIP_TCPIP_CORE_LOCKING */ + return; + } + } + } + break; +#endif /* LWIP_TCP */ + default: + LWIP_ERROR("Invalid netconn type", 0, do { + err = ERR_VAL; + } while (0)); + break; + } + } + msg->err = err; + /* For all other protocols, netconn_connect() calls netconn_apimsg(), + so use TCPIP_APIMSG_ACK() here. */ + TCPIP_APIMSG_ACK(msg); +} + +/** + * Disconnect a pcb contained inside a netconn + * Only used for UDP netconns. + * Called from netconn_disconnect. + * + * @param m the api_msg pointing to the connection to disconnect + */ +void +lwip_netconn_do_disconnect(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + +#if LWIP_UDP + if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_UDP) { + udp_disconnect(msg->conn->pcb.udp); + msg->err = ERR_OK; + } else +#endif /* LWIP_UDP */ + { + msg->err = ERR_VAL; + } + TCPIP_APIMSG_ACK(msg); +} + +#if LWIP_TCP +/** + * Set a TCP pcb contained in a netconn into listen mode + * Called from netconn_listen. + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_listen(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + err_t err; + + if (msg->conn->pcb.tcp != NULL) { + if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_TCP) { + if (msg->conn->state == NETCONN_NONE) { + struct tcp_pcb *lpcb; + if (msg->conn->pcb.tcp->state != CLOSED) { + /* connection is not closed, cannot listen */ + err = ERR_VAL; + } else { + u8_t backlog; +#if TCP_LISTEN_BACKLOG + backlog = msg->msg.lb.backlog; +#else /* TCP_LISTEN_BACKLOG */ + backlog = TCP_DEFAULT_LISTEN_BACKLOG; +#endif /* TCP_LISTEN_BACKLOG */ +#if LWIP_IPV4 && LWIP_IPV6 + /* "Socket API like" dual-stack support: If IP to listen to is IP6_ADDR_ANY, + * and NETCONN_FLAG_IPV6_V6ONLY is NOT set, use IP_ANY_TYPE to listen + */ + if (ip_addr_cmp(&msg->conn->pcb.ip->local_ip, IP6_ADDR_ANY) && + (netconn_get_ipv6only(msg->conn) == 0)) { + /* change PCB type to IPADDR_TYPE_ANY */ + IP_SET_TYPE_VAL(msg->conn->pcb.tcp->local_ip, IPADDR_TYPE_ANY); + IP_SET_TYPE_VAL(msg->conn->pcb.tcp->remote_ip, IPADDR_TYPE_ANY); + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + lpcb = tcp_listen_with_backlog_and_err(msg->conn->pcb.tcp, backlog, &err); + + if (lpcb == NULL) { + /* in this case, the old pcb is still allocated */ + } else { + /* delete the recvmbox and allocate the acceptmbox */ + if (sys_mbox_valid(&msg->conn->recvmbox)) { + /** @todo: should we drain the recvmbox here? */ + sys_mbox_free(&msg->conn->recvmbox); + sys_mbox_set_invalid(&msg->conn->recvmbox); + } + err = ERR_OK; + if (!sys_mbox_valid(&msg->conn->acceptmbox)) { + err = sys_mbox_new(&msg->conn->acceptmbox, DEFAULT_ACCEPTMBOX_SIZE); + } + if (err == ERR_OK) { + msg->conn->state = NETCONN_LISTEN; + msg->conn->pcb.tcp = lpcb; + tcp_arg(msg->conn->pcb.tcp, msg->conn); + tcp_accept(msg->conn->pcb.tcp, accept_function); + } else { + /* since the old pcb is already deallocated, free lpcb now */ + tcp_close(lpcb); + msg->conn->pcb.tcp = NULL; + } + } + } + } else if (msg->conn->state == NETCONN_LISTEN) { + /* already listening, allow updating of the backlog */ + err = ERR_OK; + tcp_backlog_set(msg->conn->pcb.tcp, msg->msg.lb.backlog); + } else { + err = ERR_CONN; + } + } else { + err = ERR_ARG; + } + } else { + err = ERR_CONN; + } + msg->err = err; + TCPIP_APIMSG_ACK(msg); +} +#endif /* LWIP_TCP */ + +/** + * Send some data on a RAW or UDP pcb contained in a netconn + * Called from netconn_send + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_send(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + + err_t err = netconn_err(msg->conn); + if (err == ERR_OK) { + if (msg->conn->pcb.tcp != NULL) { + switch (NETCONNTYPE_GROUP(msg->conn->type)) { +#if LWIP_RAW + case NETCONN_RAW: + if (ip_addr_isany(&msg->msg.b->addr) || IP_IS_ANY_TYPE_VAL(msg->msg.b->addr)) { + err = raw_send(msg->conn->pcb.raw, msg->msg.b->p); + } else { + err = raw_sendto(msg->conn->pcb.raw, msg->msg.b->p, &msg->msg.b->addr); + } + break; +#endif +#if LWIP_UDP + case NETCONN_UDP: +#if LWIP_CHECKSUM_ON_COPY + if (ip_addr_isany(&msg->msg.b->addr) || IP_IS_ANY_TYPE_VAL(msg->msg.b->addr)) { + err = udp_send_chksum(msg->conn->pcb.udp, msg->msg.b->p, + msg->msg.b->flags & NETBUF_FLAG_CHKSUM, msg->msg.b->toport_chksum); + } else { + err = udp_sendto_chksum(msg->conn->pcb.udp, msg->msg.b->p, + &msg->msg.b->addr, msg->msg.b->port, + msg->msg.b->flags & NETBUF_FLAG_CHKSUM, msg->msg.b->toport_chksum); + } +#else /* LWIP_CHECKSUM_ON_COPY */ + if (ip_addr_isany_val(msg->msg.b->addr) || IP_IS_ANY_TYPE_VAL(msg->msg.b->addr)) { + err = udp_send(msg->conn->pcb.udp, msg->msg.b->p); + } else { + err = udp_sendto(msg->conn->pcb.udp, msg->msg.b->p, &msg->msg.b->addr, msg->msg.b->port); + } +#endif /* LWIP_CHECKSUM_ON_COPY */ + break; +#endif /* LWIP_UDP */ + default: + err = ERR_CONN; + break; + } + } else { + err = ERR_CONN; + } + } + msg->err = err; + TCPIP_APIMSG_ACK(msg); +} + +#if LWIP_TCP +/** + * Indicate data has been received from a TCP pcb contained in a netconn + * Called from netconn_recv + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_recv(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + + msg->err = ERR_OK; + if (msg->conn->pcb.tcp != NULL) { + if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_TCP) { + size_t remaining = msg->msg.r.len; + do { + u16_t recved = (u16_t)((remaining > 0xffff) ? 0xffff : remaining); + tcp_recved(msg->conn->pcb.tcp, recved); + remaining -= recved; + } while (remaining != 0); + } + } + TCPIP_APIMSG_ACK(msg); +} + +#if TCP_LISTEN_BACKLOG +/** Indicate that a TCP pcb has been accepted + * Called from netconn_accept + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_accepted(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + + msg->err = ERR_OK; + if (msg->conn->pcb.tcp != NULL) { + if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_TCP) { + tcp_backlog_accepted(msg->conn->pcb.tcp); + } + } + TCPIP_APIMSG_ACK(msg); +} +#endif /* TCP_LISTEN_BACKLOG */ + +/** + * See if more data needs to be written from a previous call to netconn_write. + * Called initially from lwip_netconn_do_write. If the first call can't send all data + * (because of low memory or empty send-buffer), this function is called again + * from sent_tcp() or poll_tcp() to send more data. If all data is sent, the + * blocking application thread (waiting in netconn_write) is released. + * + * @param conn netconn (that is currently in state NETCONN_WRITE) to process + * @return ERR_OK + * ERR_MEM if LWIP_TCPIP_CORE_LOCKING=1 and sending hasn't yet finished + */ +static err_t +lwip_netconn_do_writemore(struct netconn *conn WRITE_DELAYED_PARAM) +{ + err_t err; + const void *dataptr; + u16_t len, available; + u8_t write_finished = 0; + size_t diff; + u8_t dontblock; + u8_t apiflags; + u8_t write_more; + + LWIP_ASSERT("conn != NULL", conn != NULL); + LWIP_ASSERT("conn->state == NETCONN_WRITE", (conn->state == NETCONN_WRITE)); + LWIP_ASSERT("conn->current_msg != NULL", conn->current_msg != NULL); + LWIP_ASSERT("conn->pcb.tcp != NULL", conn->pcb.tcp != NULL); + LWIP_ASSERT("conn->current_msg->msg.w.offset < conn->current_msg->msg.w.len", + conn->current_msg->msg.w.offset < conn->current_msg->msg.w.len); + LWIP_ASSERT("conn->current_msg->msg.w.vector_cnt > 0", conn->current_msg->msg.w.vector_cnt > 0); + + apiflags = conn->current_msg->msg.w.apiflags; + dontblock = netconn_is_nonblocking(conn) || (apiflags & NETCONN_DONTBLOCK); + +#if LWIP_SO_SNDTIMEO + if ((conn->send_timeout != 0) && + ((s32_t)(sys_now() - conn->current_msg->msg.w.time_started) >= conn->send_timeout)) { + write_finished = 1; + if (conn->current_msg->msg.w.offset == 0) { + /* nothing has been written */ + err = ERR_WOULDBLOCK; + } else { + /* partial write */ + err = ERR_OK; + } + } else +#endif /* LWIP_SO_SNDTIMEO */ + { + do { + dataptr = (const u8_t *)conn->current_msg->msg.w.vector->ptr + conn->current_msg->msg.w.vector_off; + diff = conn->current_msg->msg.w.vector->len - conn->current_msg->msg.w.vector_off; + if (diff > 0xffffUL) { /* max_u16_t */ + len = 0xffff; + apiflags |= TCP_WRITE_FLAG_MORE; + } else { + len = (u16_t)diff; + } + available = tcp_sndbuf(conn->pcb.tcp); + if (available < len) { + /* don't try to write more than sendbuf */ + len = available; + if (dontblock) { + if (!len) { + /* set error according to partial write or not */ + err = (conn->current_msg->msg.w.offset == 0) ? ERR_WOULDBLOCK : ERR_OK; + goto err_mem; + } + } else { + apiflags |= TCP_WRITE_FLAG_MORE; + } + } + LWIP_ASSERT("lwip_netconn_do_writemore: invalid length!", + ((conn->current_msg->msg.w.vector_off + len) <= conn->current_msg->msg.w.vector->len)); + /* we should loop around for more sending in the following cases: + 1) We couldn't finish the current vector because of 16-bit size limitations. + tcp_write() and tcp_sndbuf() both are limited to 16-bit sizes + 2) We are sending the remainder of the current vector and have more */ + if ((len == 0xffff && diff > 0xffffUL) || + (len == (u16_t)diff && conn->current_msg->msg.w.vector_cnt > 1)) { + write_more = 1; + apiflags |= TCP_WRITE_FLAG_MORE; + } else { + write_more = 0; + } + err = tcp_write(conn->pcb.tcp, dataptr, len, apiflags); + if (err == ERR_OK) { + conn->current_msg->msg.w.offset += len; + conn->current_msg->msg.w.vector_off += len; + /* check if current vector is finished */ + if (conn->current_msg->msg.w.vector_off == conn->current_msg->msg.w.vector->len) { + conn->current_msg->msg.w.vector_cnt--; + /* if we have additional vectors, move on to them */ + if (conn->current_msg->msg.w.vector_cnt > 0) { + conn->current_msg->msg.w.vector++; + conn->current_msg->msg.w.vector_off = 0; + } + } + } + } while (write_more && err == ERR_OK); + /* if OK or memory error, check available space */ + if ((err == ERR_OK) || (err == ERR_MEM)) { +err_mem: + if (dontblock && (conn->current_msg->msg.w.offset < conn->current_msg->msg.w.len)) { + /* non-blocking write did not write everything: mark the pcb non-writable + and let poll_tcp check writable space to mark the pcb writable again */ + API_EVENT(conn, NETCONN_EVT_SENDMINUS, 0); + conn->flags |= NETCONN_FLAG_CHECK_WRITESPACE; + } else if ((tcp_sndbuf(conn->pcb.tcp) <= TCP_SNDLOWAT) || + (tcp_sndqueuelen(conn->pcb.tcp) >= TCP_SNDQUEUELOWAT)) { + /* The queued byte- or pbuf-count exceeds the configured low-water limit, + let select mark this pcb as non-writable. */ + API_EVENT(conn, NETCONN_EVT_SENDMINUS, 0); + } + } + + if (err == ERR_OK) { + err_t out_err; + if ((conn->current_msg->msg.w.offset == conn->current_msg->msg.w.len) || dontblock) { + /* return sent length (caller reads length from msg.w.offset) */ + write_finished = 1; + } + out_err = tcp_output(conn->pcb.tcp); + if (out_err == ERR_RTE) { + /* If tcp_output fails because no route is found, + don't try writing any more but return the error + to the application thread. */ + err = out_err; + write_finished = 1; + } + } else if (err == ERR_MEM) { + /* If ERR_MEM, we wait for sent_tcp or poll_tcp to be called. + For blocking sockets, we do NOT return to the application + thread, since ERR_MEM is only a temporary error! Non-blocking + will remain non-writable until sent_tcp/poll_tcp is called */ + + /* tcp_write returned ERR_MEM, try tcp_output anyway */ + err_t out_err = tcp_output(conn->pcb.tcp); + if (out_err == ERR_RTE) { + /* If tcp_output fails because no route is found, + don't try writing any more but return the error + to the application thread. */ + err = out_err; + write_finished = 1; + } else if (dontblock) { + /* non-blocking write is done on ERR_MEM, set error according + to partial write or not */ + err = (conn->current_msg->msg.w.offset == 0) ? ERR_WOULDBLOCK : ERR_OK; + write_finished = 1; + } + } else { + /* On errors != ERR_MEM, we don't try writing any more but return + the error to the application thread. */ + write_finished = 1; + } + } + if (write_finished) { + /* everything was written: set back connection state + and back to application task */ + sys_sem_t *op_completed_sem = LWIP_API_MSG_SEM(conn->current_msg); + conn->current_msg->err = err; + conn->current_msg = NULL; + conn->state = NETCONN_NONE; +#if LWIP_TCPIP_CORE_LOCKING + if (delayed) +#endif + { + sys_sem_signal(op_completed_sem); + } + } +#if LWIP_TCPIP_CORE_LOCKING + else { + return ERR_MEM; + } +#endif + return ERR_OK; +} +#endif /* LWIP_TCP */ + +/** + * Send some data on a TCP pcb contained in a netconn + * Called from netconn_write + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_write(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + + err_t err = netconn_err(msg->conn); + if (err == ERR_OK) { + if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_TCP) { +#if LWIP_TCP + if (msg->conn->state != NETCONN_NONE) { + /* netconn is connecting, closing or in blocking write */ + err = ERR_INPROGRESS; + } else if (msg->conn->pcb.tcp != NULL) { + msg->conn->state = NETCONN_WRITE; + /* set all the variables used by lwip_netconn_do_writemore */ + LWIP_ASSERT("already writing or closing", msg->conn->current_msg == NULL); + LWIP_ASSERT("msg->msg.w.len != 0", msg->msg.w.len != 0); + msg->conn->current_msg = msg; +#if LWIP_TCPIP_CORE_LOCKING + if (lwip_netconn_do_writemore(msg->conn, 0) != ERR_OK) { + LWIP_ASSERT("state!", msg->conn->state == NETCONN_WRITE); + UNLOCK_TCPIP_CORE(); + sys_arch_sem_wait(LWIP_API_MSG_SEM(msg), 0); + LOCK_TCPIP_CORE(); + LWIP_ASSERT("state!", msg->conn->state != NETCONN_WRITE); + } +#else /* LWIP_TCPIP_CORE_LOCKING */ + lwip_netconn_do_writemore(msg->conn); +#endif /* LWIP_TCPIP_CORE_LOCKING */ + /* for both cases: if lwip_netconn_do_writemore was called, don't ACK the APIMSG + since lwip_netconn_do_writemore ACKs it! */ + return; + } else { + err = ERR_CONN; + } +#else /* LWIP_TCP */ + err = ERR_VAL; +#endif /* LWIP_TCP */ +#if (LWIP_UDP || LWIP_RAW) + } else { + err = ERR_VAL; +#endif /* (LWIP_UDP || LWIP_RAW) */ + } + } + msg->err = err; + TCPIP_APIMSG_ACK(msg); +} + +/** + * Return a connection's local or remote address + * Called from netconn_getaddr + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_getaddr(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + + if (msg->conn->pcb.ip != NULL) { + if (msg->msg.ad.local) { + ip_addr_copy(API_EXPR_DEREF(msg->msg.ad.ipaddr), + msg->conn->pcb.ip->local_ip); + } else { + ip_addr_copy(API_EXPR_DEREF(msg->msg.ad.ipaddr), + msg->conn->pcb.ip->remote_ip); + } + + msg->err = ERR_OK; + switch (NETCONNTYPE_GROUP(msg->conn->type)) { +#if LWIP_RAW + case NETCONN_RAW: + if (msg->msg.ad.local) { + API_EXPR_DEREF(msg->msg.ad.port) = msg->conn->pcb.raw->protocol; + } else { + /* return an error as connecting is only a helper for upper layers */ + msg->err = ERR_CONN; + } + break; +#endif /* LWIP_RAW */ +#if LWIP_UDP + case NETCONN_UDP: + if (msg->msg.ad.local) { + API_EXPR_DEREF(msg->msg.ad.port) = msg->conn->pcb.udp->local_port; + } else { + if ((msg->conn->pcb.udp->flags & UDP_FLAGS_CONNECTED) == 0) { + msg->err = ERR_CONN; + } else { + API_EXPR_DEREF(msg->msg.ad.port) = msg->conn->pcb.udp->remote_port; + } + } + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case NETCONN_TCP: + if ((msg->msg.ad.local == 0) && + ((msg->conn->pcb.tcp->state == CLOSED) || (msg->conn->pcb.tcp->state == LISTEN))) { + /* pcb is not connected and remote name is requested */ + msg->err = ERR_CONN; + } else { + API_EXPR_DEREF(msg->msg.ad.port) = (msg->msg.ad.local ? msg->conn->pcb.tcp->local_port : msg->conn->pcb.tcp->remote_port); + } + break; +#endif /* LWIP_TCP */ + default: + LWIP_ASSERT("invalid netconn_type", 0); + break; + } + } else { + msg->err = ERR_CONN; + } + TCPIP_APIMSG_ACK(msg); +} + +/** + * Close or half-shutdown a TCP pcb contained in a netconn + * Called from netconn_close + * In contrast to closing sockets, the netconn is not deallocated. + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_close(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + +#if LWIP_TCP + enum netconn_state state = msg->conn->state; + /* First check if this is a TCP netconn and if it is in a correct state + (LISTEN doesn't support half shutdown) */ + if ((msg->conn->pcb.tcp != NULL) && + (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_TCP) && + ((msg->msg.sd.shut == NETCONN_SHUT_RDWR) || (state != NETCONN_LISTEN))) { + /* Check if we are in a connected state */ + if (state == NETCONN_CONNECT) { + /* TCP connect in progress: cannot shutdown */ + msg->err = ERR_CONN; + } else if (state == NETCONN_WRITE) { +#if LWIP_NETCONN_FULLDUPLEX + if (msg->msg.sd.shut & NETCONN_SHUT_WR) { + /* close requested, abort running write */ + sys_sem_t *write_completed_sem; + LWIP_ASSERT("msg->conn->current_msg != NULL", msg->conn->current_msg != NULL); + write_completed_sem = LWIP_API_MSG_SEM(msg->conn->current_msg); + msg->conn->current_msg->err = ERR_CLSD; + msg->conn->current_msg = NULL; + msg->conn->state = NETCONN_NONE; + state = NETCONN_NONE; + sys_sem_signal(write_completed_sem); + } else { + LWIP_ASSERT("msg->msg.sd.shut == NETCONN_SHUT_RD", msg->msg.sd.shut == NETCONN_SHUT_RD); + /* In this case, let the write continue and do not interfere with + conn->current_msg or conn->state! */ + msg->err = tcp_shutdown(msg->conn->pcb.tcp, 1, 0); + } + } + if (state == NETCONN_NONE) { +#else /* LWIP_NETCONN_FULLDUPLEX */ + msg->err = ERR_INPROGRESS; + } else { +#endif /* LWIP_NETCONN_FULLDUPLEX */ + if (msg->msg.sd.shut & NETCONN_SHUT_RD) { +#if LWIP_NETCONN_FULLDUPLEX + /* Mark mboxes invalid */ + netconn_mark_mbox_invalid(msg->conn); +#else /* LWIP_NETCONN_FULLDUPLEX */ + netconn_drain(msg->conn); +#endif /* LWIP_NETCONN_FULLDUPLEX */ + } + LWIP_ASSERT("already writing or closing", msg->conn->current_msg == NULL); + msg->conn->state = NETCONN_CLOSE; + msg->conn->current_msg = msg; +#if LWIP_TCPIP_CORE_LOCKING + if (lwip_netconn_do_close_internal(msg->conn, 0) != ERR_OK) { + LWIP_ASSERT("state!", msg->conn->state == NETCONN_CLOSE); + UNLOCK_TCPIP_CORE(); + sys_arch_sem_wait(LWIP_API_MSG_SEM(msg), 0); + LOCK_TCPIP_CORE(); + LWIP_ASSERT("state!", msg->conn->state == NETCONN_NONE); + } +#else /* LWIP_TCPIP_CORE_LOCKING */ + lwip_netconn_do_close_internal(msg->conn); +#endif /* LWIP_TCPIP_CORE_LOCKING */ + /* for tcp netconns, lwip_netconn_do_close_internal ACKs the message */ + return; + } + } else +#endif /* LWIP_TCP */ + { + msg->err = ERR_CONN; + } + TCPIP_APIMSG_ACK(msg); +} + +#if LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) +/** + * Join multicast groups for UDP netconns. + * Called from netconn_join_leave_group + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_join_leave_group(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + + msg->err = ERR_CONN; + if (msg->conn->pcb.tcp != NULL) { + if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_UDP) { +#if LWIP_UDP +#if LWIP_IPV6 && LWIP_IPV6_MLD + if (NETCONNTYPE_ISIPV6(msg->conn->type)) { + if (msg->msg.jl.join_or_leave == NETCONN_JOIN) { + msg->err = mld6_joingroup(ip_2_ip6(API_EXPR_REF(msg->msg.jl.netif_addr)), + ip_2_ip6(API_EXPR_REF(msg->msg.jl.multiaddr))); + } else { + msg->err = mld6_leavegroup(ip_2_ip6(API_EXPR_REF(msg->msg.jl.netif_addr)), + ip_2_ip6(API_EXPR_REF(msg->msg.jl.multiaddr))); + } + } else +#endif /* LWIP_IPV6 && LWIP_IPV6_MLD */ + { +#if LWIP_IGMP + if (msg->msg.jl.join_or_leave == NETCONN_JOIN) { + msg->err = igmp_joingroup(ip_2_ip4(API_EXPR_REF(msg->msg.jl.netif_addr)), + ip_2_ip4(API_EXPR_REF(msg->msg.jl.multiaddr))); + } else { + msg->err = igmp_leavegroup(ip_2_ip4(API_EXPR_REF(msg->msg.jl.netif_addr)), + ip_2_ip4(API_EXPR_REF(msg->msg.jl.multiaddr))); + } +#endif /* LWIP_IGMP */ + } +#endif /* LWIP_UDP */ +#if (LWIP_TCP || LWIP_RAW) + } else { + msg->err = ERR_VAL; +#endif /* (LWIP_TCP || LWIP_RAW) */ + } + } + TCPIP_APIMSG_ACK(msg); +} +/** + * Join multicast groups for UDP netconns. + * Called from netconn_join_leave_group_netif + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_join_leave_group_netif(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + struct netif *netif; + + netif = netif_get_by_index(msg->msg.jl.if_idx); + if (netif == NULL) { + msg->err = ERR_IF; + goto done; + } + + msg->err = ERR_CONN; + if (msg->conn->pcb.tcp != NULL) { + if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_UDP) { +#if LWIP_UDP +#if LWIP_IPV6 && LWIP_IPV6_MLD + if (NETCONNTYPE_ISIPV6(msg->conn->type)) { + if (msg->msg.jl.join_or_leave == NETCONN_JOIN) { + msg->err = mld6_joingroup_netif(netif, + ip_2_ip6(API_EXPR_REF(msg->msg.jl.multiaddr))); + } else { + msg->err = mld6_leavegroup_netif(netif, + ip_2_ip6(API_EXPR_REF(msg->msg.jl.multiaddr))); + } + } else +#endif /* LWIP_IPV6 && LWIP_IPV6_MLD */ + { +#if LWIP_IGMP + if (msg->msg.jl.join_or_leave == NETCONN_JOIN) { + msg->err = igmp_joingroup_netif(netif, + ip_2_ip4(API_EXPR_REF(msg->msg.jl.multiaddr))); + } else { + msg->err = igmp_leavegroup_netif(netif, + ip_2_ip4(API_EXPR_REF(msg->msg.jl.multiaddr))); + } +#endif /* LWIP_IGMP */ + } +#endif /* LWIP_UDP */ +#if (LWIP_TCP || LWIP_RAW) + } else { + msg->err = ERR_VAL; +#endif /* (LWIP_TCP || LWIP_RAW) */ + } + } + +done: + TCPIP_APIMSG_ACK(msg); +} +#endif /* LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) */ + +#if LWIP_DNS +/** + * Callback function that is called when DNS name is resolved + * (or on timeout). A waiting application thread is waked up by + * signaling the semaphore. + */ +static void +lwip_netconn_do_dns_found(const char *name, const ip_addr_t *ipaddr, void *arg) +{ + struct dns_api_msg *msg = (struct dns_api_msg *)arg; + + /* we trust the internal implementation to be correct :-) */ + LWIP_UNUSED_ARG(name); + + if (ipaddr == NULL) { + /* timeout or memory error */ + API_EXPR_DEREF(msg->err) = ERR_VAL; + } else { + /* address was resolved */ + API_EXPR_DEREF(msg->err) = ERR_OK; + API_EXPR_DEREF(msg->addr) = *ipaddr; + } + /* wake up the application task waiting in netconn_gethostbyname */ + sys_sem_signal(API_EXPR_REF_SEM(msg->sem)); +} + +/** + * Execute a DNS query + * Called from netconn_gethostbyname + * + * @param arg the dns_api_msg pointing to the query + */ +void +lwip_netconn_do_gethostbyname(void *arg) +{ + struct dns_api_msg *msg = (struct dns_api_msg *)arg; + u8_t addrtype = +#if LWIP_IPV4 && LWIP_IPV6 + msg->dns_addrtype; +#else + LWIP_DNS_ADDRTYPE_DEFAULT; +#endif + + API_EXPR_DEREF(msg->err) = dns_gethostbyname_addrtype(msg->name, + API_EXPR_REF(msg->addr), lwip_netconn_do_dns_found, msg, addrtype); +#if LWIP_TCPIP_CORE_LOCKING + /* For core locking, only block if we need to wait for answer/timeout */ + if (API_EXPR_DEREF(msg->err) == ERR_INPROGRESS) { + UNLOCK_TCPIP_CORE(); + sys_sem_wait(API_EXPR_REF_SEM(msg->sem)); + LOCK_TCPIP_CORE(); + LWIP_ASSERT("do_gethostbyname still in progress!!", API_EXPR_DEREF(msg->err) != ERR_INPROGRESS); + } +#else /* LWIP_TCPIP_CORE_LOCKING */ + if (API_EXPR_DEREF(msg->err) != ERR_INPROGRESS) { + /* on error or immediate success, wake up the application + * task waiting in netconn_gethostbyname */ + sys_sem_signal(API_EXPR_REF_SEM(msg->sem)); + } +#endif /* LWIP_TCPIP_CORE_LOCKING */ +} +#endif /* LWIP_DNS */ + +#endif /* LWIP_NETCONN */ diff --git a/Middlewares/Third_Party/LwIP/src/api/err.c b/Middlewares/Third_Party/LwIP/src/api/err.c new file mode 100644 index 0000000..dd2b62d --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/api/err.c @@ -0,0 +1,115 @@ +/** + * @file + * Error Management module + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/err.h" +#include "lwip/def.h" +#include "lwip/sys.h" + +#include "lwip/errno.h" + +#if !NO_SYS +/** Table to quickly map an lwIP error (err_t) to a socket error + * by using -err as an index */ +static const int err_to_errno_table[] = { + 0, /* ERR_OK 0 No error, everything OK. */ + ENOMEM, /* ERR_MEM -1 Out of memory error. */ + ENOBUFS, /* ERR_BUF -2 Buffer error. */ + EWOULDBLOCK, /* ERR_TIMEOUT -3 Timeout */ + EHOSTUNREACH, /* ERR_RTE -4 Routing problem. */ + EINPROGRESS, /* ERR_INPROGRESS -5 Operation in progress */ + EINVAL, /* ERR_VAL -6 Illegal value. */ + EWOULDBLOCK, /* ERR_WOULDBLOCK -7 Operation would block. */ + EADDRINUSE, /* ERR_USE -8 Address in use. */ + EALREADY, /* ERR_ALREADY -9 Already connecting. */ + EISCONN, /* ERR_ISCONN -10 Conn already established.*/ + ENOTCONN, /* ERR_CONN -11 Not connected. */ + -1, /* ERR_IF -12 Low-level netif error */ + ECONNABORTED, /* ERR_ABRT -13 Connection aborted. */ + ECONNRESET, /* ERR_RST -14 Connection reset. */ + ENOTCONN, /* ERR_CLSD -15 Connection closed. */ + EIO /* ERR_ARG -16 Illegal argument. */ +}; + +int +err_to_errno(err_t err) +{ + if ((err > 0) || (-err >= (err_t)LWIP_ARRAYSIZE(err_to_errno_table))) { + return EIO; + } + return err_to_errno_table[-err]; +} +#endif /* !NO_SYS */ + +#ifdef LWIP_DEBUG + +static const char *err_strerr[] = { + "Ok.", /* ERR_OK 0 */ + "Out of memory error.", /* ERR_MEM -1 */ + "Buffer error.", /* ERR_BUF -2 */ + "Timeout.", /* ERR_TIMEOUT -3 */ + "Routing problem.", /* ERR_RTE -4 */ + "Operation in progress.", /* ERR_INPROGRESS -5 */ + "Illegal value.", /* ERR_VAL -6 */ + "Operation would block.", /* ERR_WOULDBLOCK -7 */ + "Address in use.", /* ERR_USE -8 */ + "Already connecting.", /* ERR_ALREADY -9 */ + "Already connected.", /* ERR_ISCONN -10 */ + "Not connected.", /* ERR_CONN -11 */ + "Low-level netif error.", /* ERR_IF -12 */ + "Connection aborted.", /* ERR_ABRT -13 */ + "Connection reset.", /* ERR_RST -14 */ + "Connection closed.", /* ERR_CLSD -15 */ + "Illegal argument." /* ERR_ARG -16 */ +}; + +/** + * Convert an lwip internal error to a string representation. + * + * @param err an lwip internal err_t + * @return a string representation for err + */ +const char * +lwip_strerr(err_t err) +{ + if ((err > 0) || (-err >= (err_t)LWIP_ARRAYSIZE(err_strerr))) { + return "Unknown error."; + } + return err_strerr[-err]; +} + +#endif /* LWIP_DEBUG */ diff --git a/Middlewares/Third_Party/LwIP/src/api/if_api.c b/Middlewares/Third_Party/LwIP/src/api/if_api.c new file mode 100644 index 0000000..8e094d0 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/api/if_api.c @@ -0,0 +1,102 @@ +/** + * @file + * Interface Identification APIs from: + * RFC 3493: Basic Socket Interface Extensions for IPv6 + * Section 4: Interface Identification + * + * @defgroup if_api Interface Identification API + * @ingroup socket + */ + +/* + * Copyright (c) 2017 Joel Cunningham, Garmin International, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Joel Cunningham + * + */ +#include "lwip/opt.h" + +#if LWIP_SOCKET + +#include "lwip/errno.h" +#include "lwip/if_api.h" +#include "lwip/netifapi.h" +#include "lwip/priv/sockets_priv.h" + +/** + * @ingroup if_api + * Maps an interface index to its corresponding name. + * @param ifindex interface index + * @param ifname shall point to a buffer of at least {IF_NAMESIZE} bytes + * @return If ifindex is an interface index, then the function shall return the + * value supplied in ifname, which points to a buffer now containing the interface name. + * Otherwise, the function shall return a NULL pointer. + */ +char * +lwip_if_indextoname(unsigned int ifindex, char *ifname) +{ +#if LWIP_NETIF_API + if (ifindex <= 0xff) { + err_t err = netifapi_netif_index_to_name((u8_t)ifindex, ifname); + if (!err && ifname[0] != '\0') { + return ifname; + } + } +#else /* LWIP_NETIF_API */ + LWIP_UNUSED_ARG(ifindex); + LWIP_UNUSED_ARG(ifname); +#endif /* LWIP_NETIF_API */ + set_errno(ENXIO); + return NULL; +} + +/** + * @ingroup if_api + * Returs the interface index corresponding to name ifname. + * @param ifname Interface name + * @return The corresponding index if ifname is the name of an interface; + * otherwise, zero. + */ +unsigned int +lwip_if_nametoindex(const char *ifname) +{ +#if LWIP_NETIF_API + err_t err; + u8_t idx; + + err = netifapi_netif_name_to_index(ifname, &idx); + if (!err) { + return idx; + } +#else /* LWIP_NETIF_API */ + LWIP_UNUSED_ARG(ifname); +#endif /* LWIP_NETIF_API */ + return 0; /* invalid index */ +} + +#endif /* LWIP_SOCKET */ diff --git a/Middlewares/Third_Party/LwIP/src/api/netbuf.c b/Middlewares/Third_Party/LwIP/src/api/netbuf.c new file mode 100644 index 0000000..3b910de --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/api/netbuf.c @@ -0,0 +1,250 @@ +/** + * @file + * Network buffer management + * + * @defgroup netbuf Network buffers + * @ingroup netconn + * Network buffer descriptor for @ref netconn. Based on @ref pbuf internally + * to avoid copying data around.\n + * Buffers must not be shared accross multiple threads, all functions except + * netbuf_new() and netbuf_delete() are not thread-safe. + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_NETCONN /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/netbuf.h" +#include "lwip/memp.h" + +#include + +/** + * @ingroup netbuf + * Create (allocate) and initialize a new netbuf. + * The netbuf doesn't yet contain a packet buffer! + * + * @return a pointer to a new netbuf + * NULL on lack of memory + */ +struct +netbuf *netbuf_new(void) +{ + struct netbuf *buf; + + buf = (struct netbuf *)memp_malloc(MEMP_NETBUF); + if (buf != NULL) { + memset(buf, 0, sizeof(struct netbuf)); + } + return buf; +} + +/** + * @ingroup netbuf + * Deallocate a netbuf allocated by netbuf_new(). + * + * @param buf pointer to a netbuf allocated by netbuf_new() + */ +void +netbuf_delete(struct netbuf *buf) +{ + if (buf != NULL) { + if (buf->p != NULL) { + pbuf_free(buf->p); + buf->p = buf->ptr = NULL; + } + memp_free(MEMP_NETBUF, buf); + } +} + +/** + * @ingroup netbuf + * Allocate memory for a packet buffer for a given netbuf. + * + * @param buf the netbuf for which to allocate a packet buffer + * @param size the size of the packet buffer to allocate + * @return pointer to the allocated memory + * NULL if no memory could be allocated + */ +void * +netbuf_alloc(struct netbuf *buf, u16_t size) +{ + LWIP_ERROR("netbuf_alloc: invalid buf", (buf != NULL), return NULL;); + + /* Deallocate any previously allocated memory. */ + if (buf->p != NULL) { + pbuf_free(buf->p); + } + buf->p = pbuf_alloc(PBUF_TRANSPORT, size, PBUF_RAM); + if (buf->p == NULL) { + return NULL; + } + LWIP_ASSERT("check that first pbuf can hold size", + (buf->p->len >= size)); + buf->ptr = buf->p; + return buf->p->payload; +} + +/** + * @ingroup netbuf + * Free the packet buffer included in a netbuf + * + * @param buf pointer to the netbuf which contains the packet buffer to free + */ +void +netbuf_free(struct netbuf *buf) +{ + LWIP_ERROR("netbuf_free: invalid buf", (buf != NULL), return;); + if (buf->p != NULL) { + pbuf_free(buf->p); + } + buf->p = buf->ptr = NULL; +#if LWIP_CHECKSUM_ON_COPY + buf->flags = 0; + buf->toport_chksum = 0; +#endif /* LWIP_CHECKSUM_ON_COPY */ +} + +/** + * @ingroup netbuf + * Let a netbuf reference existing (non-volatile) data. + * + * @param buf netbuf which should reference the data + * @param dataptr pointer to the data to reference + * @param size size of the data + * @return ERR_OK if data is referenced + * ERR_MEM if data couldn't be referenced due to lack of memory + */ +err_t +netbuf_ref(struct netbuf *buf, const void *dataptr, u16_t size) +{ + LWIP_ERROR("netbuf_ref: invalid buf", (buf != NULL), return ERR_ARG;); + if (buf->p != NULL) { + pbuf_free(buf->p); + } + buf->p = pbuf_alloc(PBUF_TRANSPORT, 0, PBUF_REF); + if (buf->p == NULL) { + buf->ptr = NULL; + return ERR_MEM; + } + ((struct pbuf_rom *)buf->p)->payload = dataptr; + buf->p->len = buf->p->tot_len = size; + buf->ptr = buf->p; + return ERR_OK; +} + +/** + * @ingroup netbuf + * Chain one netbuf to another (@see pbuf_chain) + * + * @param head the first netbuf + * @param tail netbuf to chain after head, freed by this function, may not be reference after returning + */ +void +netbuf_chain(struct netbuf *head, struct netbuf *tail) +{ + LWIP_ERROR("netbuf_chain: invalid head", (head != NULL), return;); + LWIP_ERROR("netbuf_chain: invalid tail", (tail != NULL), return;); + pbuf_cat(head->p, tail->p); + head->ptr = head->p; + memp_free(MEMP_NETBUF, tail); +} + +/** + * @ingroup netbuf + * Get the data pointer and length of the data inside a netbuf. + * + * @param buf netbuf to get the data from + * @param dataptr pointer to a void pointer where to store the data pointer + * @param len pointer to an u16_t where the length of the data is stored + * @return ERR_OK if the information was retrieved, + * ERR_BUF on error. + */ +err_t +netbuf_data(struct netbuf *buf, void **dataptr, u16_t *len) +{ + LWIP_ERROR("netbuf_data: invalid buf", (buf != NULL), return ERR_ARG;); + LWIP_ERROR("netbuf_data: invalid dataptr", (dataptr != NULL), return ERR_ARG;); + LWIP_ERROR("netbuf_data: invalid len", (len != NULL), return ERR_ARG;); + + if (buf->ptr == NULL) { + return ERR_BUF; + } + *dataptr = buf->ptr->payload; + *len = buf->ptr->len; + return ERR_OK; +} + +/** + * @ingroup netbuf + * Move the current data pointer of a packet buffer contained in a netbuf + * to the next part. + * The packet buffer itself is not modified. + * + * @param buf the netbuf to modify + * @return -1 if there is no next part + * 1 if moved to the next part but now there is no next part + * 0 if moved to the next part and there are still more parts + */ +s8_t +netbuf_next(struct netbuf *buf) +{ + LWIP_ERROR("netbuf_next: invalid buf", (buf != NULL), return -1;); + if (buf->ptr->next == NULL) { + return -1; + } + buf->ptr = buf->ptr->next; + if (buf->ptr->next == NULL) { + return 1; + } + return 0; +} + +/** + * @ingroup netbuf + * Move the current data pointer of a packet buffer contained in a netbuf + * to the beginning of the packet. + * The packet buffer itself is not modified. + * + * @param buf the netbuf to modify + */ +void +netbuf_first(struct netbuf *buf) +{ + LWIP_ERROR("netbuf_first: invalid buf", (buf != NULL), return;); + buf->ptr = buf->p; +} + +#endif /* LWIP_NETCONN */ diff --git a/Middlewares/Third_Party/LwIP/src/api/netdb.c b/Middlewares/Third_Party/LwIP/src/api/netdb.c new file mode 100644 index 0000000..8771425 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/api/netdb.c @@ -0,0 +1,414 @@ +/** + * @file + * API functions for name resolving + * + * @defgroup netdbapi NETDB API + * @ingroup socket + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#include "lwip/netdb.h" + +#if LWIP_DNS && LWIP_SOCKET + +#include "lwip/err.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/ip_addr.h" +#include "lwip/api.h" +#include "lwip/dns.h" + +#include /* memset */ +#include /* atoi */ + +/** helper struct for gethostbyname_r to access the char* buffer */ +struct gethostbyname_r_helper { + ip_addr_t *addr_list[2]; + ip_addr_t addr; + char *aliases; +}; + +/** h_errno is exported in netdb.h for access by applications. */ +#if LWIP_DNS_API_DECLARE_H_ERRNO +int h_errno; +#endif /* LWIP_DNS_API_DECLARE_H_ERRNO */ + +/** define "hostent" variables storage: 0 if we use a static (but unprotected) + * set of variables for lwip_gethostbyname, 1 if we use a local storage */ +#ifndef LWIP_DNS_API_HOSTENT_STORAGE +#define LWIP_DNS_API_HOSTENT_STORAGE 0 +#endif + +/** define "hostent" variables storage */ +#if LWIP_DNS_API_HOSTENT_STORAGE +#define HOSTENT_STORAGE +#else +#define HOSTENT_STORAGE static +#endif /* LWIP_DNS_API_STATIC_HOSTENT */ + +/** + * Returns an entry containing addresses of address family AF_INET + * for the host with name name. + * Due to dns_gethostbyname limitations, only one address is returned. + * + * @param name the hostname to resolve + * @return an entry containing addresses of address family AF_INET + * for the host with name name + */ +struct hostent * +lwip_gethostbyname(const char *name) +{ + err_t err; + ip_addr_t addr; + + /* buffer variables for lwip_gethostbyname() */ + HOSTENT_STORAGE struct hostent s_hostent; + HOSTENT_STORAGE char *s_aliases; + HOSTENT_STORAGE ip_addr_t s_hostent_addr; + HOSTENT_STORAGE ip_addr_t *s_phostent_addr[2]; + HOSTENT_STORAGE char s_hostname[DNS_MAX_NAME_LENGTH + 1]; + + /* query host IP address */ + err = netconn_gethostbyname(name, &addr); + if (err != ERR_OK) { + LWIP_DEBUGF(DNS_DEBUG, ("lwip_gethostbyname(%s) failed, err=%d\n", name, err)); + h_errno = HOST_NOT_FOUND; + return NULL; + } + + /* fill hostent */ + s_hostent_addr = addr; + s_phostent_addr[0] = &s_hostent_addr; + s_phostent_addr[1] = NULL; + strncpy(s_hostname, name, DNS_MAX_NAME_LENGTH); + s_hostname[DNS_MAX_NAME_LENGTH] = 0; + s_hostent.h_name = s_hostname; + s_aliases = NULL; + s_hostent.h_aliases = &s_aliases; + s_hostent.h_addrtype = AF_INET; + s_hostent.h_length = sizeof(ip_addr_t); + s_hostent.h_addr_list = (char **)&s_phostent_addr; + +#if DNS_DEBUG + /* dump hostent */ + LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_name == %s\n", s_hostent.h_name)); + LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_aliases == %p\n", (void *)s_hostent.h_aliases)); + /* h_aliases are always empty */ + LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_addrtype == %d\n", s_hostent.h_addrtype)); + LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_length == %d\n", s_hostent.h_length)); + LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_addr_list == %p\n", (void *)s_hostent.h_addr_list)); + if (s_hostent.h_addr_list != NULL) { + u8_t idx; + for (idx = 0; s_hostent.h_addr_list[idx]; idx++) { + LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_addr_list[%i] == %p\n", idx, s_hostent.h_addr_list[idx])); + LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_addr_list[%i]-> == %s\n", idx, ipaddr_ntoa((ip_addr_t *)s_hostent.h_addr_list[idx]))); + } + } +#endif /* DNS_DEBUG */ + +#if LWIP_DNS_API_HOSTENT_STORAGE + /* this function should return the "per-thread" hostent after copy from s_hostent */ + return sys_thread_hostent(&s_hostent); +#else + return &s_hostent; +#endif /* LWIP_DNS_API_HOSTENT_STORAGE */ +} + +/** + * Thread-safe variant of lwip_gethostbyname: instead of using a static + * buffer, this function takes buffer and errno pointers as arguments + * and uses these for the result. + * + * @param name the hostname to resolve + * @param ret pre-allocated struct where to store the result + * @param buf pre-allocated buffer where to store additional data + * @param buflen the size of buf + * @param result pointer to a hostent pointer that is set to ret on success + * and set to zero on error + * @param h_errnop pointer to an int where to store errors (instead of modifying + * the global h_errno) + * @return 0 on success, non-zero on error, additional error information + * is stored in *h_errnop instead of h_errno to be thread-safe + */ +int +lwip_gethostbyname_r(const char *name, struct hostent *ret, char *buf, + size_t buflen, struct hostent **result, int *h_errnop) +{ + err_t err; + struct gethostbyname_r_helper *h; + char *hostname; + size_t namelen; + int lh_errno; + + if (h_errnop == NULL) { + /* ensure h_errnop is never NULL */ + h_errnop = &lh_errno; + } + + if (result == NULL) { + /* not all arguments given */ + *h_errnop = EINVAL; + return -1; + } + /* first thing to do: set *result to nothing */ + *result = NULL; + if ((name == NULL) || (ret == NULL) || (buf == NULL)) { + /* not all arguments given */ + *h_errnop = EINVAL; + return -1; + } + + namelen = strlen(name); + if (buflen < (sizeof(struct gethostbyname_r_helper) + LWIP_MEM_ALIGN_BUFFER(namelen + 1))) { + /* buf can't hold the data needed + a copy of name */ + *h_errnop = ERANGE; + return -1; + } + + h = (struct gethostbyname_r_helper *)LWIP_MEM_ALIGN(buf); + hostname = ((char *)h) + sizeof(struct gethostbyname_r_helper); + + /* query host IP address */ + err = netconn_gethostbyname(name, &h->addr); + if (err != ERR_OK) { + LWIP_DEBUGF(DNS_DEBUG, ("lwip_gethostbyname(%s) failed, err=%d\n", name, err)); + *h_errnop = HOST_NOT_FOUND; + return -1; + } + + /* copy the hostname into buf */ + MEMCPY(hostname, name, namelen); + hostname[namelen] = 0; + + /* fill hostent */ + h->addr_list[0] = &h->addr; + h->addr_list[1] = NULL; + h->aliases = NULL; + ret->h_name = hostname; + ret->h_aliases = &h->aliases; + ret->h_addrtype = AF_INET; + ret->h_length = sizeof(ip_addr_t); + ret->h_addr_list = (char **)&h->addr_list; + + /* set result != NULL */ + *result = ret; + + /* return success */ + return 0; +} + +/** + * Frees one or more addrinfo structures returned by getaddrinfo(), along with + * any additional storage associated with those structures. If the ai_next field + * of the structure is not null, the entire list of structures is freed. + * + * @param ai struct addrinfo to free + */ +void +lwip_freeaddrinfo(struct addrinfo *ai) +{ + struct addrinfo *next; + + while (ai != NULL) { + next = ai->ai_next; + memp_free(MEMP_NETDB, ai); + ai = next; + } +} + +/** + * Translates the name of a service location (for example, a host name) and/or + * a service name and returns a set of socket addresses and associated + * information to be used in creating a socket with which to address the + * specified service. + * Memory for the result is allocated internally and must be freed by calling + * lwip_freeaddrinfo()! + * + * Due to a limitation in dns_gethostbyname, only the first address of a + * host is returned. + * Also, service names are not supported (only port numbers)! + * + * @param nodename descriptive name or address string of the host + * (may be NULL -> local address) + * @param servname port number as string of NULL + * @param hints structure containing input values that set socktype and protocol + * @param res pointer to a pointer where to store the result (set to NULL on failure) + * @return 0 on success, non-zero on failure + * + * @todo: implement AI_V4MAPPED, AI_ADDRCONFIG + */ +int +lwip_getaddrinfo(const char *nodename, const char *servname, + const struct addrinfo *hints, struct addrinfo **res) +{ + err_t err; + ip_addr_t addr; + struct addrinfo *ai; + struct sockaddr_storage *sa = NULL; + int port_nr = 0; + size_t total_size; + size_t namelen = 0; + int ai_family; + + if (res == NULL) { + return EAI_FAIL; + } + *res = NULL; + if ((nodename == NULL) && (servname == NULL)) { + return EAI_NONAME; + } + + if (hints != NULL) { + ai_family = hints->ai_family; + if ((ai_family != AF_UNSPEC) +#if LWIP_IPV4 + && (ai_family != AF_INET) +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 + && (ai_family != AF_INET6) +#endif /* LWIP_IPV6 */ + ) { + return EAI_FAMILY; + } + } else { + ai_family = AF_UNSPEC; + } + + if (servname != NULL) { + /* service name specified: convert to port number + * @todo?: currently, only ASCII integers (port numbers) are supported (AI_NUMERICSERV)! */ + port_nr = atoi(servname); + if ((port_nr <= 0) || (port_nr > 0xffff)) { + return EAI_SERVICE; + } + } + + if (nodename != NULL) { + /* service location specified, try to resolve */ + if ((hints != NULL) && (hints->ai_flags & AI_NUMERICHOST)) { + /* no DNS lookup, just parse for an address string */ + if (!ipaddr_aton(nodename, &addr)) { + return EAI_NONAME; + } +#if LWIP_IPV4 && LWIP_IPV6 + if ((IP_IS_V6_VAL(addr) && ai_family == AF_INET) || + (IP_IS_V4_VAL(addr) && ai_family == AF_INET6)) { + return EAI_NONAME; + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + } else { +#if LWIP_IPV4 && LWIP_IPV6 + /* AF_UNSPEC: prefer IPv4 */ + u8_t type = NETCONN_DNS_IPV4_IPV6; + if (ai_family == AF_INET) { + type = NETCONN_DNS_IPV4; + } else if (ai_family == AF_INET6) { + type = NETCONN_DNS_IPV6; + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + err = netconn_gethostbyname_addrtype(nodename, &addr, type); + if (err != ERR_OK) { + return EAI_FAIL; + } + } + } else { + /* service location specified, use loopback address */ + if ((hints != NULL) && (hints->ai_flags & AI_PASSIVE)) { + ip_addr_set_any_val(ai_family == AF_INET6, addr); + } else { + ip_addr_set_loopback_val(ai_family == AF_INET6, addr); + } + } + + total_size = sizeof(struct addrinfo) + sizeof(struct sockaddr_storage); + if (nodename != NULL) { + namelen = strlen(nodename); + if (namelen > DNS_MAX_NAME_LENGTH) { + /* invalid name length */ + return EAI_FAIL; + } + LWIP_ASSERT("namelen is too long", total_size + namelen + 1 > total_size); + total_size += namelen + 1; + } + /* If this fails, please report to lwip-devel! :-) */ + LWIP_ASSERT("total_size <= NETDB_ELEM_SIZE: please report this!", + total_size <= NETDB_ELEM_SIZE); + ai = (struct addrinfo *)memp_malloc(MEMP_NETDB); + if (ai == NULL) { + return EAI_MEMORY; + } + memset(ai, 0, total_size); + /* cast through void* to get rid of alignment warnings */ + sa = (struct sockaddr_storage *)(void *)((u8_t *)ai + sizeof(struct addrinfo)); + if (IP_IS_V6_VAL(addr)) { +#if LWIP_IPV6 + struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *)sa; + /* set up sockaddr */ + inet6_addr_from_ip6addr(&sa6->sin6_addr, ip_2_ip6(&addr)); + sa6->sin6_family = AF_INET6; + sa6->sin6_len = sizeof(struct sockaddr_in6); + sa6->sin6_port = lwip_htons((u16_t)port_nr); + sa6->sin6_scope_id = ip6_addr_zone(ip_2_ip6(&addr)); + ai->ai_family = AF_INET6; +#endif /* LWIP_IPV6 */ + } else { +#if LWIP_IPV4 + struct sockaddr_in *sa4 = (struct sockaddr_in *)sa; + /* set up sockaddr */ + inet_addr_from_ip4addr(&sa4->sin_addr, ip_2_ip4(&addr)); + sa4->sin_family = AF_INET; + sa4->sin_len = sizeof(struct sockaddr_in); + sa4->sin_port = lwip_htons((u16_t)port_nr); + ai->ai_family = AF_INET; +#endif /* LWIP_IPV4 */ + } + + /* set up addrinfo */ + if (hints != NULL) { + /* copy socktype & protocol from hints if specified */ + ai->ai_socktype = hints->ai_socktype; + ai->ai_protocol = hints->ai_protocol; + } + if (nodename != NULL) { + /* copy nodename to canonname if specified */ + ai->ai_canonname = ((char *)ai + sizeof(struct addrinfo) + sizeof(struct sockaddr_storage)); + MEMCPY(ai->ai_canonname, nodename, namelen); + ai->ai_canonname[namelen] = 0; + } + ai->ai_addrlen = sizeof(struct sockaddr_storage); + ai->ai_addr = (struct sockaddr *)sa; + + *res = ai; + + return 0; +} + +#endif /* LWIP_DNS && LWIP_SOCKET */ diff --git a/Middlewares/Third_Party/LwIP/src/api/netifapi.c b/Middlewares/Third_Party/LwIP/src/api/netifapi.c new file mode 100644 index 0000000..25957cd --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/api/netifapi.c @@ -0,0 +1,380 @@ +/** + * @file + * Network Interface Sequential API module + * + * @defgroup netifapi NETIF API + * @ingroup sequential_api + * Thread-safe functions to be called from non-TCPIP threads + * + * @defgroup netifapi_netif NETIF related + * @ingroup netifapi + * To be called from non-TCPIP threads + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "lwip/opt.h" + +#if LWIP_NETIF_API /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/etharp.h" +#include "lwip/netifapi.h" +#include "lwip/memp.h" +#include "lwip/priv/tcpip_priv.h" + +#include /* strncpy */ + +#define NETIFAPI_VAR_REF(name) API_VAR_REF(name) +#define NETIFAPI_VAR_DECLARE(name) API_VAR_DECLARE(struct netifapi_msg, name) +#define NETIFAPI_VAR_ALLOC(name) API_VAR_ALLOC(struct netifapi_msg, MEMP_NETIFAPI_MSG, name, ERR_MEM) +#define NETIFAPI_VAR_FREE(name) API_VAR_FREE(MEMP_NETIFAPI_MSG, name) + +/** + * Call netif_add() inside the tcpip_thread context. + */ +static err_t +netifapi_do_netif_add(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct netifapi_msg */ + struct netifapi_msg *msg = (struct netifapi_msg *)(void *)m; + + if (!netif_add( msg->netif, +#if LWIP_IPV4 + API_EXPR_REF(msg->msg.add.ipaddr), + API_EXPR_REF(msg->msg.add.netmask), + API_EXPR_REF(msg->msg.add.gw), +#endif /* LWIP_IPV4 */ + msg->msg.add.state, + msg->msg.add.init, + msg->msg.add.input)) { + return ERR_IF; + } else { + return ERR_OK; + } +} + +#if LWIP_IPV4 +/** + * Call netif_set_addr() inside the tcpip_thread context. + */ +static err_t +netifapi_do_netif_set_addr(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct netifapi_msg */ + struct netifapi_msg *msg = (struct netifapi_msg *)(void *)m; + + netif_set_addr( msg->netif, + API_EXPR_REF(msg->msg.add.ipaddr), + API_EXPR_REF(msg->msg.add.netmask), + API_EXPR_REF(msg->msg.add.gw)); + return ERR_OK; +} +#endif /* LWIP_IPV4 */ + +/** +* Call netif_name_to_index() inside the tcpip_thread context. +*/ +static err_t +netifapi_do_name_to_index(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct netifapi_msg */ + struct netifapi_msg *msg = (struct netifapi_msg *)(void *)m; + + msg->msg.ifs.index = netif_name_to_index(msg->msg.ifs.name); + return ERR_OK; +} + +/** +* Call netif_index_to_name() inside the tcpip_thread context. +*/ +static err_t +netifapi_do_index_to_name(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct netifapi_msg */ + struct netifapi_msg *msg = (struct netifapi_msg *)(void *)m; + + if (!netif_index_to_name(msg->msg.ifs.index, msg->msg.ifs.name)) { + /* return failure via empty name */ + msg->msg.ifs.name[0] = '\0'; + } + return ERR_OK; +} + +/** + * Call the "errtfunc" (or the "voidfunc" if "errtfunc" is NULL) inside the + * tcpip_thread context. + */ +static err_t +netifapi_do_netif_common(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct netifapi_msg */ + struct netifapi_msg *msg = (struct netifapi_msg *)(void *)m; + + if (msg->msg.common.errtfunc != NULL) { + return msg->msg.common.errtfunc(msg->netif); + } else { + msg->msg.common.voidfunc(msg->netif); + return ERR_OK; + } +} + +#if LWIP_ARP && LWIP_IPV4 +/** + * @ingroup netifapi_arp + * Add or update an entry in the ARP cache. + * For an update, ipaddr is used to find the cache entry. + * + * @param ipaddr IPv4 address of cache entry + * @param ethaddr hardware address mapped to ipaddr + * @param type type of ARP cache entry + * @return ERR_OK: entry added/updated, else error from err_t + */ +err_t +netifapi_arp_add(const ip4_addr_t *ipaddr, struct eth_addr *ethaddr, enum netifapi_arp_entry type) +{ + err_t err; + + /* We only support permanent entries currently */ + LWIP_UNUSED_ARG(type); + +#if ETHARP_SUPPORT_STATIC_ENTRIES && LWIP_TCPIP_CORE_LOCKING + LOCK_TCPIP_CORE(); + err = etharp_add_static_entry(ipaddr, ethaddr); + UNLOCK_TCPIP_CORE(); +#else + /* @todo add new vars to struct netifapi_msg and create a 'do' func */ + LWIP_UNUSED_ARG(ipaddr); + LWIP_UNUSED_ARG(ethaddr); + err = ERR_VAL; +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES && LWIP_TCPIP_CORE_LOCKING */ + + return err; +} + +/** + * @ingroup netifapi_arp + * Remove an entry in the ARP cache identified by ipaddr + * + * @param ipaddr IPv4 address of cache entry + * @param type type of ARP cache entry + * @return ERR_OK: entry removed, else error from err_t + */ +err_t +netifapi_arp_remove(const ip4_addr_t *ipaddr, enum netifapi_arp_entry type) +{ + err_t err; + + /* We only support permanent entries currently */ + LWIP_UNUSED_ARG(type); + +#if ETHARP_SUPPORT_STATIC_ENTRIES && LWIP_TCPIP_CORE_LOCKING + LOCK_TCPIP_CORE(); + err = etharp_remove_static_entry(ipaddr); + UNLOCK_TCPIP_CORE(); +#else + /* @todo add new vars to struct netifapi_msg and create a 'do' func */ + LWIP_UNUSED_ARG(ipaddr); + err = ERR_VAL; +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES && LWIP_TCPIP_CORE_LOCKING */ + + return err; +} +#endif /* LWIP_ARP && LWIP_IPV4 */ + +/** + * @ingroup netifapi_netif + * Call netif_add() in a thread-safe way by running that function inside the + * tcpip_thread context. + * + * @note for params @see netif_add() + */ +err_t +netifapi_netif_add(struct netif *netif, +#if LWIP_IPV4 + const ip4_addr_t *ipaddr, const ip4_addr_t *netmask, const ip4_addr_t *gw, +#endif /* LWIP_IPV4 */ + void *state, netif_init_fn init, netif_input_fn input) +{ + err_t err; + NETIFAPI_VAR_DECLARE(msg); + NETIFAPI_VAR_ALLOC(msg); + +#if LWIP_IPV4 + if (ipaddr == NULL) { + ipaddr = IP4_ADDR_ANY4; + } + if (netmask == NULL) { + netmask = IP4_ADDR_ANY4; + } + if (gw == NULL) { + gw = IP4_ADDR_ANY4; + } +#endif /* LWIP_IPV4 */ + + NETIFAPI_VAR_REF(msg).netif = netif; +#if LWIP_IPV4 + NETIFAPI_VAR_REF(msg).msg.add.ipaddr = NETIFAPI_VAR_REF(ipaddr); + NETIFAPI_VAR_REF(msg).msg.add.netmask = NETIFAPI_VAR_REF(netmask); + NETIFAPI_VAR_REF(msg).msg.add.gw = NETIFAPI_VAR_REF(gw); +#endif /* LWIP_IPV4 */ + NETIFAPI_VAR_REF(msg).msg.add.state = state; + NETIFAPI_VAR_REF(msg).msg.add.init = init; + NETIFAPI_VAR_REF(msg).msg.add.input = input; + err = tcpip_api_call(netifapi_do_netif_add, &API_VAR_REF(msg).call); + NETIFAPI_VAR_FREE(msg); + return err; +} + +#if LWIP_IPV4 +/** + * @ingroup netifapi_netif + * Call netif_set_addr() in a thread-safe way by running that function inside the + * tcpip_thread context. + * + * @note for params @see netif_set_addr() + */ +err_t +netifapi_netif_set_addr(struct netif *netif, + const ip4_addr_t *ipaddr, + const ip4_addr_t *netmask, + const ip4_addr_t *gw) +{ + err_t err; + NETIFAPI_VAR_DECLARE(msg); + NETIFAPI_VAR_ALLOC(msg); + + if (ipaddr == NULL) { + ipaddr = IP4_ADDR_ANY4; + } + if (netmask == NULL) { + netmask = IP4_ADDR_ANY4; + } + if (gw == NULL) { + gw = IP4_ADDR_ANY4; + } + + NETIFAPI_VAR_REF(msg).netif = netif; + NETIFAPI_VAR_REF(msg).msg.add.ipaddr = NETIFAPI_VAR_REF(ipaddr); + NETIFAPI_VAR_REF(msg).msg.add.netmask = NETIFAPI_VAR_REF(netmask); + NETIFAPI_VAR_REF(msg).msg.add.gw = NETIFAPI_VAR_REF(gw); + err = tcpip_api_call(netifapi_do_netif_set_addr, &API_VAR_REF(msg).call); + NETIFAPI_VAR_FREE(msg); + return err; +} +#endif /* LWIP_IPV4 */ + +/** + * call the "errtfunc" (or the "voidfunc" if "errtfunc" is NULL) in a thread-safe + * way by running that function inside the tcpip_thread context. + * + * @note use only for functions where there is only "netif" parameter. + */ +err_t +netifapi_netif_common(struct netif *netif, netifapi_void_fn voidfunc, + netifapi_errt_fn errtfunc) +{ + err_t err; + NETIFAPI_VAR_DECLARE(msg); + NETIFAPI_VAR_ALLOC(msg); + + NETIFAPI_VAR_REF(msg).netif = netif; + NETIFAPI_VAR_REF(msg).msg.common.voidfunc = voidfunc; + NETIFAPI_VAR_REF(msg).msg.common.errtfunc = errtfunc; + err = tcpip_api_call(netifapi_do_netif_common, &API_VAR_REF(msg).call); + NETIFAPI_VAR_FREE(msg); + return err; +} + +/** +* @ingroup netifapi_netif +* Call netif_name_to_index() in a thread-safe way by running that function inside the +* tcpip_thread context. +* +* @param name the interface name of the netif +* @param idx output index of the found netif +*/ +err_t +netifapi_netif_name_to_index(const char *name, u8_t *idx) +{ + err_t err; + NETIFAPI_VAR_DECLARE(msg); + NETIFAPI_VAR_ALLOC(msg); + + *idx = 0; + +#if LWIP_MPU_COMPATIBLE + strncpy(NETIFAPI_VAR_REF(msg).msg.ifs.name, name, NETIF_NAMESIZE - 1); + NETIFAPI_VAR_REF(msg).msg.ifs.name[NETIF_NAMESIZE - 1] = '\0'; +#else + NETIFAPI_VAR_REF(msg).msg.ifs.name = LWIP_CONST_CAST(char *, name); +#endif /* LWIP_MPU_COMPATIBLE */ + err = tcpip_api_call(netifapi_do_name_to_index, &API_VAR_REF(msg).call); + if (!err) { + *idx = NETIFAPI_VAR_REF(msg).msg.ifs.index; + } + NETIFAPI_VAR_FREE(msg); + return err; +} + +/** +* @ingroup netifapi_netif +* Call netif_index_to_name() in a thread-safe way by running that function inside the +* tcpip_thread context. +* +* @param idx the interface index of the netif +* @param name output name of the found netif, empty '\0' string if netif not found. +* name should be of at least NETIF_NAMESIZE bytes +*/ +err_t +netifapi_netif_index_to_name(u8_t idx, char *name) +{ + err_t err; + NETIFAPI_VAR_DECLARE(msg); + NETIFAPI_VAR_ALLOC(msg); + + NETIFAPI_VAR_REF(msg).msg.ifs.index = idx; +#if !LWIP_MPU_COMPATIBLE + NETIFAPI_VAR_REF(msg).msg.ifs.name = name; +#endif /* LWIP_MPU_COMPATIBLE */ + err = tcpip_api_call(netifapi_do_index_to_name, &API_VAR_REF(msg).call); +#if LWIP_MPU_COMPATIBLE + if (!err) { + strncpy(name, NETIFAPI_VAR_REF(msg).msg.ifs.name, NETIF_NAMESIZE - 1); + name[NETIF_NAMESIZE - 1] = '\0'; + } +#endif /* LWIP_MPU_COMPATIBLE */ + NETIFAPI_VAR_FREE(msg); + return err; +} + +#endif /* LWIP_NETIF_API */ diff --git a/Middlewares/Third_Party/LwIP/src/api/sockets.c b/Middlewares/Third_Party/LwIP/src/api/sockets.c new file mode 100644 index 0000000..cb7df91 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/api/sockets.c @@ -0,0 +1,4160 @@ +/** + * @file + * Sockets BSD-Like API module + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + * Improved by Marc Boucher and David Haas + * + */ + +#include "lwip/opt.h" + +#if LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/sockets.h" +#include "lwip/priv/sockets_priv.h" +#include "lwip/api.h" +#include "lwip/igmp.h" +#include "lwip/inet.h" +#include "lwip/tcp.h" +#include "lwip/raw.h" +#include "lwip/udp.h" +#include "lwip/memp.h" +#include "lwip/pbuf.h" +#include "lwip/netif.h" +#include "lwip/priv/tcpip_priv.h" +#include "lwip/mld6.h" +#if LWIP_CHECKSUM_ON_COPY +#include "lwip/inet_chksum.h" +#endif + +#if LWIP_COMPAT_SOCKETS == 2 && LWIP_POSIX_SOCKETS_IO_NAMES +#include +#endif + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +/* If the netconn API is not required publicly, then we include the necessary + files here to get the implementation */ +#if !LWIP_NETCONN +#undef LWIP_NETCONN +#define LWIP_NETCONN 1 +#include "api_msg.c" +#include "api_lib.c" +#include "netbuf.c" +#undef LWIP_NETCONN +#define LWIP_NETCONN 0 +#endif + +#define API_SELECT_CB_VAR_REF(name) API_VAR_REF(name) +#define API_SELECT_CB_VAR_DECLARE(name) API_VAR_DECLARE(struct lwip_select_cb, name) +#define API_SELECT_CB_VAR_ALLOC(name, retblock) API_VAR_ALLOC_EXT(struct lwip_select_cb, MEMP_SELECT_CB, name, retblock) +#define API_SELECT_CB_VAR_FREE(name) API_VAR_FREE(MEMP_SELECT_CB, name) + +#if LWIP_IPV4 +#define IP4ADDR_PORT_TO_SOCKADDR(sin, ipaddr, port) do { \ + (sin)->sin_len = sizeof(struct sockaddr_in); \ + (sin)->sin_family = AF_INET; \ + (sin)->sin_port = lwip_htons((port)); \ + inet_addr_from_ip4addr(&(sin)->sin_addr, ipaddr); \ + memset((sin)->sin_zero, 0, SIN_ZERO_LEN); }while(0) +#define SOCKADDR4_TO_IP4ADDR_PORT(sin, ipaddr, port) do { \ + inet_addr_to_ip4addr(ip_2_ip4(ipaddr), &((sin)->sin_addr)); \ + (port) = lwip_ntohs((sin)->sin_port); }while(0) +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 +#define IP6ADDR_PORT_TO_SOCKADDR(sin6, ipaddr, port) do { \ + (sin6)->sin6_len = sizeof(struct sockaddr_in6); \ + (sin6)->sin6_family = AF_INET6; \ + (sin6)->sin6_port = lwip_htons((port)); \ + (sin6)->sin6_flowinfo = 0; \ + inet6_addr_from_ip6addr(&(sin6)->sin6_addr, ipaddr); \ + (sin6)->sin6_scope_id = ip6_addr_zone(ipaddr); }while(0) +#define SOCKADDR6_TO_IP6ADDR_PORT(sin6, ipaddr, port) do { \ + inet6_addr_to_ip6addr(ip_2_ip6(ipaddr), &((sin6)->sin6_addr)); \ + if (ip6_addr_has_scope(ip_2_ip6(ipaddr), IP6_UNKNOWN)) { \ + ip6_addr_set_zone(ip_2_ip6(ipaddr), (u8_t)((sin6)->sin6_scope_id)); \ + } \ + (port) = lwip_ntohs((sin6)->sin6_port); }while(0) +#endif /* LWIP_IPV6 */ + +#if LWIP_IPV4 && LWIP_IPV6 +static void sockaddr_to_ipaddr_port(const struct sockaddr *sockaddr, ip_addr_t *ipaddr, u16_t *port); + +#define IS_SOCK_ADDR_LEN_VALID(namelen) (((namelen) == sizeof(struct sockaddr_in)) || \ + ((namelen) == sizeof(struct sockaddr_in6))) +#define IS_SOCK_ADDR_TYPE_VALID(name) (((name)->sa_family == AF_INET) || \ + ((name)->sa_family == AF_INET6)) +#define SOCK_ADDR_TYPE_MATCH(name, sock) \ + ((((name)->sa_family == AF_INET) && !(NETCONNTYPE_ISIPV6((sock)->conn->type))) || \ + (((name)->sa_family == AF_INET6) && (NETCONNTYPE_ISIPV6((sock)->conn->type)))) +#define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) do { \ + if (IP_IS_ANY_TYPE_VAL(*ipaddr) || IP_IS_V6_VAL(*ipaddr)) { \ + IP6ADDR_PORT_TO_SOCKADDR((struct sockaddr_in6*)(void*)(sockaddr), ip_2_ip6(ipaddr), port); \ + } else { \ + IP4ADDR_PORT_TO_SOCKADDR((struct sockaddr_in*)(void*)(sockaddr), ip_2_ip4(ipaddr), port); \ + } } while(0) +#define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) sockaddr_to_ipaddr_port(sockaddr, ipaddr, &(port)) +#define DOMAIN_TO_NETCONN_TYPE(domain, type) (((domain) == AF_INET) ? \ + (type) : (enum netconn_type)((type) | NETCONN_TYPE_IPV6)) +#elif LWIP_IPV6 /* LWIP_IPV4 && LWIP_IPV6 */ +#define IS_SOCK_ADDR_LEN_VALID(namelen) ((namelen) == sizeof(struct sockaddr_in6)) +#define IS_SOCK_ADDR_TYPE_VALID(name) ((name)->sa_family == AF_INET6) +#define SOCK_ADDR_TYPE_MATCH(name, sock) 1 +#define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) \ + IP6ADDR_PORT_TO_SOCKADDR((struct sockaddr_in6*)(void*)(sockaddr), ip_2_ip6(ipaddr), port) +#define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) \ + SOCKADDR6_TO_IP6ADDR_PORT((const struct sockaddr_in6*)(const void*)(sockaddr), ipaddr, port) +#define DOMAIN_TO_NETCONN_TYPE(domain, netconn_type) (netconn_type) +#else /*-> LWIP_IPV4: LWIP_IPV4 && LWIP_IPV6 */ +#define IS_SOCK_ADDR_LEN_VALID(namelen) ((namelen) == sizeof(struct sockaddr_in)) +#define IS_SOCK_ADDR_TYPE_VALID(name) ((name)->sa_family == AF_INET) +#define SOCK_ADDR_TYPE_MATCH(name, sock) 1 +#define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) \ + IP4ADDR_PORT_TO_SOCKADDR((struct sockaddr_in*)(void*)(sockaddr), ip_2_ip4(ipaddr), port) +#define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) \ + SOCKADDR4_TO_IP4ADDR_PORT((const struct sockaddr_in*)(const void*)(sockaddr), ipaddr, port) +#define DOMAIN_TO_NETCONN_TYPE(domain, netconn_type) (netconn_type) +#endif /* LWIP_IPV6 */ + +#define IS_SOCK_ADDR_TYPE_VALID_OR_UNSPEC(name) (((name)->sa_family == AF_UNSPEC) || \ + IS_SOCK_ADDR_TYPE_VALID(name)) +#define SOCK_ADDR_TYPE_MATCH_OR_UNSPEC(name, sock) (((name)->sa_family == AF_UNSPEC) || \ + SOCK_ADDR_TYPE_MATCH(name, sock)) +#define IS_SOCK_ADDR_ALIGNED(name) ((((mem_ptr_t)(name)) % 4) == 0) + + +#define LWIP_SOCKOPT_CHECK_OPTLEN(sock, optlen, opttype) do { if ((optlen) < sizeof(opttype)) { done_socket(sock); return EINVAL; }}while(0) +#define LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, opttype) do { \ + LWIP_SOCKOPT_CHECK_OPTLEN(sock, optlen, opttype); \ + if ((sock)->conn == NULL) { done_socket(sock); return EINVAL; } }while(0) +#define LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, opttype) do { \ + LWIP_SOCKOPT_CHECK_OPTLEN(sock, optlen, opttype); \ + if (((sock)->conn == NULL) || ((sock)->conn->pcb.tcp == NULL)) { done_socket(sock); return EINVAL; } }while(0) +#define LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, opttype, netconntype) do { \ + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, opttype); \ + if (NETCONNTYPE_GROUP(netconn_type((sock)->conn)) != netconntype) { done_socket(sock); return ENOPROTOOPT; } }while(0) + + +#define LWIP_SETGETSOCKOPT_DATA_VAR_REF(name) API_VAR_REF(name) +#define LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(name) API_VAR_DECLARE(struct lwip_setgetsockopt_data, name) +#define LWIP_SETGETSOCKOPT_DATA_VAR_FREE(name) API_VAR_FREE(MEMP_SOCKET_SETGETSOCKOPT_DATA, name) +#if LWIP_MPU_COMPATIBLE +#define LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(name, sock) do { \ + name = (struct lwip_setgetsockopt_data *)memp_malloc(MEMP_SOCKET_SETGETSOCKOPT_DATA); \ + if (name == NULL) { \ + sock_set_errno(sock, ENOMEM); \ + done_socket(sock); \ + return -1; \ + } }while(0) +#else /* LWIP_MPU_COMPATIBLE */ +#define LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(name, sock) +#endif /* LWIP_MPU_COMPATIBLE */ + +#if LWIP_SO_SNDRCVTIMEO_NONSTANDARD +#define LWIP_SO_SNDRCVTIMEO_OPTTYPE int +#define LWIP_SO_SNDRCVTIMEO_SET(optval, val) (*(int *)(optval) = (val)) +#define LWIP_SO_SNDRCVTIMEO_GET_MS(optval) ((long)*(const int*)(optval)) +#else +#define LWIP_SO_SNDRCVTIMEO_OPTTYPE struct timeval +#define LWIP_SO_SNDRCVTIMEO_SET(optval, val) do { \ + u32_t loc = (val); \ + ((struct timeval *)(optval))->tv_sec = (long)((loc) / 1000U); \ + ((struct timeval *)(optval))->tv_usec = (long)(((loc) % 1000U) * 1000U); }while(0) +#define LWIP_SO_SNDRCVTIMEO_GET_MS(optval) ((((const struct timeval *)(optval))->tv_sec * 1000) + (((const struct timeval *)(optval))->tv_usec / 1000)) +#endif + + +/** A struct sockaddr replacement that has the same alignment as sockaddr_in/ + * sockaddr_in6 if instantiated. + */ +union sockaddr_aligned { + struct sockaddr sa; +#if LWIP_IPV6 + struct sockaddr_in6 sin6; +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 + struct sockaddr_in sin; +#endif /* LWIP_IPV4 */ +}; + +/* Define the number of IPv4 multicast memberships, default is one per socket */ +#ifndef LWIP_SOCKET_MAX_MEMBERSHIPS +#define LWIP_SOCKET_MAX_MEMBERSHIPS NUM_SOCKETS +#endif + +#if LWIP_IGMP +/* This is to keep track of IP_ADD_MEMBERSHIP calls to drop the membership when + a socket is closed */ +struct lwip_socket_multicast_pair { + /** the socket */ + struct lwip_sock *sock; + /** the interface address */ + ip4_addr_t if_addr; + /** the group address */ + ip4_addr_t multi_addr; +}; + +static struct lwip_socket_multicast_pair socket_ipv4_multicast_memberships[LWIP_SOCKET_MAX_MEMBERSHIPS]; + +static int lwip_socket_register_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr); +static void lwip_socket_unregister_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr); +static void lwip_socket_drop_registered_memberships(int s); +#endif /* LWIP_IGMP */ + +#if LWIP_IPV6_MLD +/* This is to keep track of IP_JOIN_GROUP calls to drop the membership when + a socket is closed */ +struct lwip_socket_multicast_mld6_pair { + /** the socket */ + struct lwip_sock *sock; + /** the interface index */ + u8_t if_idx; + /** the group address */ + ip6_addr_t multi_addr; +}; + +static struct lwip_socket_multicast_mld6_pair socket_ipv6_multicast_memberships[LWIP_SOCKET_MAX_MEMBERSHIPS]; + +static int lwip_socket_register_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr); +static void lwip_socket_unregister_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr); +static void lwip_socket_drop_registered_mld6_memberships(int s); +#endif /* LWIP_IPV6_MLD */ + +/** The global array of available sockets */ +static struct lwip_sock sockets[NUM_SOCKETS]; + +#if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL +#if LWIP_TCPIP_CORE_LOCKING +/* protect the select_cb_list using core lock */ +#define LWIP_SOCKET_SELECT_DECL_PROTECT(lev) +#define LWIP_SOCKET_SELECT_PROTECT(lev) LOCK_TCPIP_CORE() +#define LWIP_SOCKET_SELECT_UNPROTECT(lev) UNLOCK_TCPIP_CORE() +#else /* LWIP_TCPIP_CORE_LOCKING */ +/* protect the select_cb_list using SYS_LIGHTWEIGHT_PROT */ +#define LWIP_SOCKET_SELECT_DECL_PROTECT(lev) SYS_ARCH_DECL_PROTECT(lev) +#define LWIP_SOCKET_SELECT_PROTECT(lev) SYS_ARCH_PROTECT(lev) +#define LWIP_SOCKET_SELECT_UNPROTECT(lev) SYS_ARCH_UNPROTECT(lev) +/** This counter is increased from lwip_select when the list is changed + and checked in select_check_waiters to see if it has changed. */ +static volatile int select_cb_ctr; +#endif /* LWIP_TCPIP_CORE_LOCKING */ +/** The global list of tasks waiting for select */ +static struct lwip_select_cb *select_cb_list; +#endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */ + +#define sock_set_errno(sk, e) do { \ + const int sockerr = (e); \ + set_errno(sockerr); \ +} while (0) + +/* Forward declaration of some functions */ +#if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL +static void event_callback(struct netconn *conn, enum netconn_evt evt, u16_t len); +#define DEFAULT_SOCKET_EVENTCB event_callback +static void select_check_waiters(int s, int has_recvevent, int has_sendevent, int has_errevent); +#else +#define DEFAULT_SOCKET_EVENTCB NULL +#endif +#if !LWIP_TCPIP_CORE_LOCKING +static void lwip_getsockopt_callback(void *arg); +static void lwip_setsockopt_callback(void *arg); +#endif +static int lwip_getsockopt_impl(int s, int level, int optname, void *optval, socklen_t *optlen); +static int lwip_setsockopt_impl(int s, int level, int optname, const void *optval, socklen_t optlen); +static int free_socket_locked(struct lwip_sock *sock, int is_tcp, struct netconn **conn, + union lwip_sock_lastdata *lastdata); +static void free_socket_free_elements(int is_tcp, struct netconn *conn, union lwip_sock_lastdata *lastdata); + +#if LWIP_IPV4 && LWIP_IPV6 +static void +sockaddr_to_ipaddr_port(const struct sockaddr *sockaddr, ip_addr_t *ipaddr, u16_t *port) +{ + if ((sockaddr->sa_family) == AF_INET6) { + SOCKADDR6_TO_IP6ADDR_PORT((const struct sockaddr_in6 *)(const void *)(sockaddr), ipaddr, *port); + ipaddr->type = IPADDR_TYPE_V6; + } else { + SOCKADDR4_TO_IP4ADDR_PORT((const struct sockaddr_in *)(const void *)(sockaddr), ipaddr, *port); + ipaddr->type = IPADDR_TYPE_V4; + } +} +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + +/** LWIP_NETCONN_SEM_PER_THREAD==1: initialize thread-local semaphore */ +void +lwip_socket_thread_init(void) +{ + netconn_thread_init(); +} + +/** LWIP_NETCONN_SEM_PER_THREAD==1: destroy thread-local semaphore */ +void +lwip_socket_thread_cleanup(void) +{ + netconn_thread_cleanup(); +} + +#if LWIP_NETCONN_FULLDUPLEX +/* Thread-safe increment of sock->fd_used, with overflow check */ +static int +sock_inc_used(struct lwip_sock *sock) +{ + int ret; + SYS_ARCH_DECL_PROTECT(lev); + + LWIP_ASSERT("sock != NULL", sock != NULL); + + SYS_ARCH_PROTECT(lev); + if (sock->fd_free_pending) { + /* prevent new usage of this socket if free is pending */ + ret = 0; + } else { + ++sock->fd_used; + ret = 1; + LWIP_ASSERT("sock->fd_used != 0", sock->fd_used != 0); + } + SYS_ARCH_UNPROTECT(lev); + return ret; +} + +/* Like sock_inc_used(), but called under SYS_ARCH_PROTECT lock. */ +static int +sock_inc_used_locked(struct lwip_sock *sock) +{ + LWIP_ASSERT("sock != NULL", sock != NULL); + + if (sock->fd_free_pending) { + LWIP_ASSERT("sock->fd_used != 0", sock->fd_used != 0); + return 0; + } + + ++sock->fd_used; + LWIP_ASSERT("sock->fd_used != 0", sock->fd_used != 0); + return 1; +} + +/* In full-duplex mode,sock->fd_used != 0 prevents a socket descriptor from being + * released (and possibly reused) when used from more than one thread + * (e.g. read-while-write or close-while-write, etc) + * This function is called at the end of functions using (try)get_socket*(). + */ +static void +done_socket(struct lwip_sock *sock) +{ + int freed = 0; + int is_tcp = 0; + struct netconn *conn = NULL; + union lwip_sock_lastdata lastdata; + SYS_ARCH_DECL_PROTECT(lev); + LWIP_ASSERT("sock != NULL", sock != NULL); + + SYS_ARCH_PROTECT(lev); + LWIP_ASSERT("sock->fd_used > 0", sock->fd_used > 0); + if (--sock->fd_used == 0) { + if (sock->fd_free_pending) { + /* free the socket */ + sock->fd_used = 1; + is_tcp = sock->fd_free_pending & LWIP_SOCK_FD_FREE_TCP; + freed = free_socket_locked(sock, is_tcp, &conn, &lastdata); + } + } + SYS_ARCH_UNPROTECT(lev); + + if (freed) { + free_socket_free_elements(is_tcp, conn, &lastdata); + } +} + +#else /* LWIP_NETCONN_FULLDUPLEX */ +#define sock_inc_used(sock) 1 +#define sock_inc_used_locked(sock) 1 +#define done_socket(sock) +#endif /* LWIP_NETCONN_FULLDUPLEX */ + +/* Translate a socket 'int' into a pointer (only fails if the index is invalid) */ +static struct lwip_sock * +tryget_socket_unconn_nouse(int fd) +{ + int s = fd - LWIP_SOCKET_OFFSET; + if ((s < 0) || (s >= NUM_SOCKETS)) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("tryget_socket_unconn(%d): invalid\n", fd)); + return NULL; + } + return &sockets[s]; +} + +struct lwip_sock * +lwip_socket_dbg_get_socket(int fd) +{ + return tryget_socket_unconn_nouse(fd); +} + +/* Translate a socket 'int' into a pointer (only fails if the index is invalid) */ +static struct lwip_sock * +tryget_socket_unconn(int fd) +{ + struct lwip_sock *ret = tryget_socket_unconn_nouse(fd); + if (ret != NULL) { + if (!sock_inc_used(ret)) { + return NULL; + } + } + return ret; +} + +/* Like tryget_socket_unconn(), but called under SYS_ARCH_PROTECT lock. */ +static struct lwip_sock * +tryget_socket_unconn_locked(int fd) +{ + struct lwip_sock *ret = tryget_socket_unconn_nouse(fd); + if (ret != NULL) { + if (!sock_inc_used_locked(ret)) { + return NULL; + } + } + return ret; +} + +/** + * Same as get_socket but doesn't set errno + * + * @param fd externally used socket index + * @return struct lwip_sock for the socket or NULL if not found + */ +static struct lwip_sock * +tryget_socket(int fd) +{ + struct lwip_sock *sock = tryget_socket_unconn(fd); + if (sock != NULL) { + if (sock->conn) { + return sock; + } + done_socket(sock); + } + return NULL; +} + +/** + * Map a externally used socket index to the internal socket representation. + * + * @param fd externally used socket index + * @return struct lwip_sock for the socket or NULL if not found + */ +static struct lwip_sock * +get_socket(int fd) +{ + struct lwip_sock *sock = tryget_socket(fd); + if (!sock) { + if ((fd < LWIP_SOCKET_OFFSET) || (fd >= (LWIP_SOCKET_OFFSET + NUM_SOCKETS))) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("get_socket(%d): invalid\n", fd)); + } + set_errno(EBADF); + return NULL; + } + return sock; +} + +/** + * Allocate a new socket for a given netconn. + * + * @param newconn the netconn for which to allocate a socket + * @param accepted 1 if socket has been created by accept(), + * 0 if socket has been created by socket() + * @return the index of the new socket; -1 on error + */ +static int +alloc_socket(struct netconn *newconn, int accepted) +{ + int i; + SYS_ARCH_DECL_PROTECT(lev); + LWIP_UNUSED_ARG(accepted); + + /* allocate a new socket identifier */ + for (i = 0; i < NUM_SOCKETS; ++i) { + /* Protect socket array */ + SYS_ARCH_PROTECT(lev); + if (!sockets[i].conn) { +#if LWIP_NETCONN_FULLDUPLEX + if (sockets[i].fd_used) { + SYS_ARCH_UNPROTECT(lev); + continue; + } + sockets[i].fd_used = 1; + sockets[i].fd_free_pending = 0; +#endif + sockets[i].conn = newconn; + /* The socket is not yet known to anyone, so no need to protect + after having marked it as used. */ + SYS_ARCH_UNPROTECT(lev); + sockets[i].lastdata.pbuf = NULL; +#if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL + LWIP_ASSERT("sockets[i].select_waiting == 0", sockets[i].select_waiting == 0); + sockets[i].rcvevent = 0; + /* TCP sendbuf is empty, but the socket is not yet writable until connected + * (unless it has been created by accept()). */ + sockets[i].sendevent = (NETCONNTYPE_GROUP(newconn->type) == NETCONN_TCP ? (accepted != 0) : 1); + sockets[i].errevent = 0; +#endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */ + return i + LWIP_SOCKET_OFFSET; + } + SYS_ARCH_UNPROTECT(lev); + } + return -1; +} + +/** Free a socket (under lock) + * + * @param sock the socket to free + * @param is_tcp != 0 for TCP sockets, used to free lastdata + * @param conn the socekt's netconn is stored here, must be freed externally + * @param lastdata lastdata is stored here, must be freed externally + */ +static int +free_socket_locked(struct lwip_sock *sock, int is_tcp, struct netconn **conn, + union lwip_sock_lastdata *lastdata) +{ +#if LWIP_NETCONN_FULLDUPLEX + LWIP_ASSERT("sock->fd_used > 0", sock->fd_used > 0); + sock->fd_used--; + if (sock->fd_used > 0) { + sock->fd_free_pending = LWIP_SOCK_FD_FREE_FREE | (is_tcp ? LWIP_SOCK_FD_FREE_TCP : 0); + return 0; + } +#else /* LWIP_NETCONN_FULLDUPLEX */ + LWIP_UNUSED_ARG(is_tcp); +#endif /* LWIP_NETCONN_FULLDUPLEX */ + + *lastdata = sock->lastdata; + sock->lastdata.pbuf = NULL; + *conn = sock->conn; + sock->conn = NULL; + return 1; +} + +/** Free a socket's leftover members. + */ +static void +free_socket_free_elements(int is_tcp, struct netconn *conn, union lwip_sock_lastdata *lastdata) +{ + if (lastdata->pbuf != NULL) { + if (is_tcp) { + pbuf_free(lastdata->pbuf); + } else { + netbuf_delete(lastdata->netbuf); + } + } + if (conn != NULL) { + /* netconn_prepare_delete() has already been called, here we only free the conn */ + netconn_delete(conn); + } +} + +/** Free a socket. The socket's netconn must have been + * delete before! + * + * @param sock the socket to free + * @param is_tcp != 0 for TCP sockets, used to free lastdata + */ +static void +free_socket(struct lwip_sock *sock, int is_tcp) +{ + int freed; + struct netconn *conn; + union lwip_sock_lastdata lastdata; + SYS_ARCH_DECL_PROTECT(lev); + + /* Protect socket array */ + SYS_ARCH_PROTECT(lev); + + freed = free_socket_locked(sock, is_tcp, &conn, &lastdata); + SYS_ARCH_UNPROTECT(lev); + /* don't use 'sock' after this line, as another task might have allocated it */ + + if (freed) { + free_socket_free_elements(is_tcp, conn, &lastdata); + } +} + +/* Below this, the well-known socket functions are implemented. + * Use google.com or opengroup.org to get a good description :-) + * + * Exceptions are documented! + */ + +int +lwip_accept(int s, struct sockaddr *addr, socklen_t *addrlen) +{ + struct lwip_sock *sock, *nsock; + struct netconn *newconn; + ip_addr_t naddr; + u16_t port = 0; + int newsock; + err_t err; + int recvevent; + SYS_ARCH_DECL_PROTECT(lev); + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d)...\n", s)); + sock = get_socket(s); + if (!sock) { + return -1; + } + + /* wait for a new connection */ + err = netconn_accept(sock->conn, &newconn); + if (err != ERR_OK) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d): netconn_acept failed, err=%d\n", s, err)); + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) { + sock_set_errno(sock, EOPNOTSUPP); + } else if (err == ERR_CLSD) { + sock_set_errno(sock, EINVAL); + } else { + sock_set_errno(sock, err_to_errno(err)); + } + done_socket(sock); + return -1; + } + LWIP_ASSERT("newconn != NULL", newconn != NULL); + + newsock = alloc_socket(newconn, 1); + if (newsock == -1) { + netconn_delete(newconn); + sock_set_errno(sock, ENFILE); + done_socket(sock); + return -1; + } + LWIP_ASSERT("invalid socket index", (newsock >= LWIP_SOCKET_OFFSET) && (newsock < NUM_SOCKETS + LWIP_SOCKET_OFFSET)); + nsock = &sockets[newsock - LWIP_SOCKET_OFFSET]; + + /* See event_callback: If data comes in right away after an accept, even + * though the server task might not have created a new socket yet. + * In that case, newconn->socket is counted down (newconn->socket--), + * so nsock->rcvevent is >= 1 here! + */ + SYS_ARCH_PROTECT(lev); + recvevent = (s16_t)(-1 - newconn->socket); + newconn->socket = newsock; + SYS_ARCH_UNPROTECT(lev); + + if (newconn->callback) { + LOCK_TCPIP_CORE(); + while (recvevent > 0) { + recvevent--; + newconn->callback(newconn, NETCONN_EVT_RCVPLUS, 0); + } + UNLOCK_TCPIP_CORE(); + } + + /* Note that POSIX only requires us to check addr is non-NULL. addrlen must + * not be NULL if addr is valid. + */ + if ((addr != NULL) && (addrlen != NULL)) { + union sockaddr_aligned tempaddr; + /* get the IP address and port of the remote host */ + err = netconn_peer(newconn, &naddr, &port); + if (err != ERR_OK) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d): netconn_peer failed, err=%d\n", s, err)); + netconn_delete(newconn); + free_socket(nsock, 1); + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return -1; + } + + IPADDR_PORT_TO_SOCKADDR(&tempaddr, &naddr, port); + if (*addrlen > tempaddr.sa.sa_len) { + *addrlen = tempaddr.sa.sa_len; + } + MEMCPY(addr, &tempaddr, *addrlen); + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d) returning new sock=%d addr=", s, newsock)); + ip_addr_debug_print_val(SOCKETS_DEBUG, naddr); + LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F"\n", port)); + } else { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d) returning new sock=%d", s, newsock)); + } + + sock_set_errno(sock, 0); + done_socket(sock); + done_socket(nsock); + return newsock; +} + +int +lwip_bind(int s, const struct sockaddr *name, socklen_t namelen) +{ + struct lwip_sock *sock; + ip_addr_t local_addr; + u16_t local_port; + err_t err; + + sock = get_socket(s); + if (!sock) { + return -1; + } + + if (!SOCK_ADDR_TYPE_MATCH(name, sock)) { + /* sockaddr does not match socket type (IPv4/IPv6) */ + sock_set_errno(sock, err_to_errno(ERR_VAL)); + done_socket(sock); + return -1; + } + + /* check size, family and alignment of 'name' */ + LWIP_ERROR("lwip_bind: invalid address", (IS_SOCK_ADDR_LEN_VALID(namelen) && + IS_SOCK_ADDR_TYPE_VALID(name) && IS_SOCK_ADDR_ALIGNED(name)), + sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;); + LWIP_UNUSED_ARG(namelen); + + SOCKADDR_TO_IPADDR_PORT(name, &local_addr, local_port); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d, addr=", s)); + ip_addr_debug_print_val(SOCKETS_DEBUG, local_addr); + LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", local_port)); + +#if LWIP_IPV4 && LWIP_IPV6 + /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */ + if (IP_IS_V6_VAL(local_addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&local_addr))) { + unmap_ipv4_mapped_ipv6(ip_2_ip4(&local_addr), ip_2_ip6(&local_addr)); + IP_SET_TYPE_VAL(local_addr, IPADDR_TYPE_V4); + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + err = netconn_bind(sock->conn, &local_addr, local_port); + + if (err != ERR_OK) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d) failed, err=%d\n", s, err)); + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return -1; + } + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d) succeeded\n", s)); + sock_set_errno(sock, 0); + done_socket(sock); + return 0; +} + +int +lwip_close(int s) +{ + struct lwip_sock *sock; + int is_tcp = 0; + err_t err; + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_close(%d)\n", s)); + + sock = get_socket(s); + if (!sock) { + return -1; + } + + if (sock->conn != NULL) { + is_tcp = NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP; + } else { + LWIP_ASSERT("sock->lastdata == NULL", sock->lastdata.pbuf == NULL); + } + +#if LWIP_IGMP + /* drop all possibly joined IGMP memberships */ + lwip_socket_drop_registered_memberships(s); +#endif /* LWIP_IGMP */ +#if LWIP_IPV6_MLD + /* drop all possibly joined MLD6 memberships */ + lwip_socket_drop_registered_mld6_memberships(s); +#endif /* LWIP_IPV6_MLD */ + + err = netconn_prepare_delete(sock->conn); + if (err != ERR_OK) { + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return -1; + } + + free_socket(sock, is_tcp); + set_errno(0); + return 0; +} + +int +lwip_connect(int s, const struct sockaddr *name, socklen_t namelen) +{ + struct lwip_sock *sock; + err_t err; + + sock = get_socket(s); + if (!sock) { + return -1; + } + + if (!SOCK_ADDR_TYPE_MATCH_OR_UNSPEC(name, sock)) { + /* sockaddr does not match socket type (IPv4/IPv6) */ + sock_set_errno(sock, err_to_errno(ERR_VAL)); + done_socket(sock); + return -1; + } + + LWIP_UNUSED_ARG(namelen); + if (name->sa_family == AF_UNSPEC) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d, AF_UNSPEC)\n", s)); + err = netconn_disconnect(sock->conn); + } else { + ip_addr_t remote_addr; + u16_t remote_port; + + /* check size, family and alignment of 'name' */ + LWIP_ERROR("lwip_connect: invalid address", IS_SOCK_ADDR_LEN_VALID(namelen) && + IS_SOCK_ADDR_TYPE_VALID_OR_UNSPEC(name) && IS_SOCK_ADDR_ALIGNED(name), + sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;); + + SOCKADDR_TO_IPADDR_PORT(name, &remote_addr, remote_port); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d, addr=", s)); + ip_addr_debug_print_val(SOCKETS_DEBUG, remote_addr); + LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", remote_port)); + +#if LWIP_IPV4 && LWIP_IPV6 + /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */ + if (IP_IS_V6_VAL(remote_addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&remote_addr))) { + unmap_ipv4_mapped_ipv6(ip_2_ip4(&remote_addr), ip_2_ip6(&remote_addr)); + IP_SET_TYPE_VAL(remote_addr, IPADDR_TYPE_V4); + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + err = netconn_connect(sock->conn, &remote_addr, remote_port); + } + + if (err != ERR_OK) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d) failed, err=%d\n", s, err)); + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return -1; + } + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d) succeeded\n", s)); + sock_set_errno(sock, 0); + done_socket(sock); + return 0; +} + +/** + * Set a socket into listen mode. + * The socket may not have been used for another connection previously. + * + * @param s the socket to set to listening mode + * @param backlog (ATTENTION: needs TCP_LISTEN_BACKLOG=1) + * @return 0 on success, non-zero on failure + */ +int +lwip_listen(int s, int backlog) +{ + struct lwip_sock *sock; + err_t err; + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_listen(%d, backlog=%d)\n", s, backlog)); + + sock = get_socket(s); + if (!sock) { + return -1; + } + + /* limit the "backlog" parameter to fit in an u8_t */ + backlog = LWIP_MIN(LWIP_MAX(backlog, 0), 0xff); + + err = netconn_listen_with_backlog(sock->conn, (u8_t)backlog); + + if (err != ERR_OK) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_listen(%d) failed, err=%d\n", s, err)); + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) { + sock_set_errno(sock, EOPNOTSUPP); + } else { + sock_set_errno(sock, err_to_errno(err)); + } + done_socket(sock); + return -1; + } + + sock_set_errno(sock, 0); + done_socket(sock); + return 0; +} + +#if LWIP_TCP +/* Helper function to loop over receiving pbufs from netconn + * until "len" bytes are received or we're otherwise done. + * Keeps sock->lastdata for peeking or partly copying. + */ +static ssize_t +lwip_recv_tcp(struct lwip_sock *sock, void *mem, size_t len, int flags) +{ + u8_t apiflags = NETCONN_NOAUTORCVD; + ssize_t recvd = 0; + ssize_t recv_left = (len <= SSIZE_MAX) ? (ssize_t)len : SSIZE_MAX; + + LWIP_ASSERT("no socket given", sock != NULL); + LWIP_ASSERT("this should be checked internally", NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP); + + if (flags & MSG_DONTWAIT) { + apiflags |= NETCONN_DONTBLOCK; + } + + do { + struct pbuf *p; + err_t err; + u16_t copylen; + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: top while sock->lastdata=%p\n", (void *)sock->lastdata.pbuf)); + /* Check if there is data left from the last recv operation. */ + if (sock->lastdata.pbuf) { + p = sock->lastdata.pbuf; + } else { + /* No data was left from the previous operation, so we try to get + some from the network. */ + err = netconn_recv_tcp_pbuf_flags(sock->conn, &p, apiflags); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: netconn_recv err=%d, pbuf=%p\n", + err, (void *)p)); + + if (err != ERR_OK) { + if (recvd > 0) { + /* already received data, return that (this trusts in getting the same error from + netconn layer again next time netconn_recv is called) */ + goto lwip_recv_tcp_done; + } + /* We should really do some error checking here. */ + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: p == NULL, error is \"%s\"!\n", + lwip_strerr(err))); + sock_set_errno(sock, err_to_errno(err)); + if (err == ERR_CLSD) { + return 0; + } else { + return -1; + } + } + LWIP_ASSERT("p != NULL", p != NULL); + sock->lastdata.pbuf = p; + } + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: buflen=%"U16_F" recv_left=%d off=%d\n", + p->tot_len, (int)recv_left, (int)recvd)); + + if (recv_left > p->tot_len) { + copylen = p->tot_len; + } else { + copylen = (u16_t)recv_left; + } + if (recvd + copylen < recvd) { + /* overflow */ + copylen = (u16_t)(SSIZE_MAX - recvd); + } + + /* copy the contents of the received buffer into + the supplied memory pointer mem */ + pbuf_copy_partial(p, (u8_t *)mem + recvd, copylen, 0); + + recvd += copylen; + + /* TCP combines multiple pbufs for one recv */ + LWIP_ASSERT("invalid copylen, len would underflow", recv_left >= copylen); + recv_left -= copylen; + + /* Unless we peek the incoming message... */ + if ((flags & MSG_PEEK) == 0) { + /* ... check if there is data left in the pbuf */ + LWIP_ASSERT("invalid copylen", p->tot_len >= copylen); + if (p->tot_len - copylen > 0) { + /* If so, it should be saved in the sock structure for the next recv call. + We store the pbuf but hide/free the consumed data: */ + sock->lastdata.pbuf = pbuf_free_header(p, copylen); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: lastdata now pbuf=%p\n", (void *)sock->lastdata.pbuf)); + } else { + sock->lastdata.pbuf = NULL; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: deleting pbuf=%p\n", (void *)p)); + pbuf_free(p); + } + } + /* once we have some data to return, only add more if we don't need to wait */ + apiflags |= NETCONN_DONTBLOCK | NETCONN_NOFIN; + /* @todo: do we need to support peeking more than one pbuf? */ + } while ((recv_left > 0) && !(flags & MSG_PEEK)); +lwip_recv_tcp_done: + if ((recvd > 0) && !(flags & MSG_PEEK)) { + /* ensure window update after copying all data */ + netconn_tcp_recvd(sock->conn, (size_t)recvd); + } + sock_set_errno(sock, 0); + return recvd; +} +#endif + +/* Convert a netbuf's address data to struct sockaddr */ +static int +lwip_sock_make_addr(struct netconn *conn, ip_addr_t *fromaddr, u16_t port, + struct sockaddr *from, socklen_t *fromlen) +{ + int truncated = 0; + union sockaddr_aligned saddr; + + LWIP_UNUSED_ARG(conn); + + LWIP_ASSERT("fromaddr != NULL", fromaddr != NULL); + LWIP_ASSERT("from != NULL", from != NULL); + LWIP_ASSERT("fromlen != NULL", fromlen != NULL); + +#if LWIP_IPV4 && LWIP_IPV6 + /* Dual-stack: Map IPv4 addresses to IPv4 mapped IPv6 */ + if (NETCONNTYPE_ISIPV6(netconn_type(conn)) && IP_IS_V4(fromaddr)) { + ip4_2_ipv4_mapped_ipv6(ip_2_ip6(fromaddr), ip_2_ip4(fromaddr)); + IP_SET_TYPE(fromaddr, IPADDR_TYPE_V6); + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + IPADDR_PORT_TO_SOCKADDR(&saddr, fromaddr, port); + if (*fromlen < saddr.sa.sa_len) { + truncated = 1; + } else if (*fromlen > saddr.sa.sa_len) { + *fromlen = saddr.sa.sa_len; + } + MEMCPY(from, &saddr, *fromlen); + return truncated; +} + +#if LWIP_TCP +/* Helper function to get a tcp socket's remote address info */ +static int +lwip_recv_tcp_from(struct lwip_sock *sock, struct sockaddr *from, socklen_t *fromlen, const char *dbg_fn, int dbg_s, ssize_t dbg_ret) +{ + if (sock == NULL) { + return 0; + } + LWIP_UNUSED_ARG(dbg_fn); + LWIP_UNUSED_ARG(dbg_s); + LWIP_UNUSED_ARG(dbg_ret); + +#if !SOCKETS_DEBUG + if (from && fromlen) +#endif /* !SOCKETS_DEBUG */ + { + /* get remote addr/port from tcp_pcb */ + u16_t port; + ip_addr_t tmpaddr; + netconn_getaddr(sock->conn, &tmpaddr, &port, 0); + LWIP_DEBUGF(SOCKETS_DEBUG, ("%s(%d): addr=", dbg_fn, dbg_s)); + ip_addr_debug_print_val(SOCKETS_DEBUG, tmpaddr); + LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F" len=%d\n", port, (int)dbg_ret)); + if (from && fromlen) { + return lwip_sock_make_addr(sock->conn, &tmpaddr, port, from, fromlen); + } + } + return 0; +} +#endif + +/* Helper function to receive a netbuf from a udp or raw netconn. + * Keeps sock->lastdata for peeking. + */ +static err_t +lwip_recvfrom_udp_raw(struct lwip_sock *sock, int flags, struct msghdr *msg, u16_t *datagram_len, int dbg_s) +{ + struct netbuf *buf; + u8_t apiflags; + err_t err; + u16_t buflen, copylen, copied; + int i; + + LWIP_UNUSED_ARG(dbg_s); + LWIP_ERROR("lwip_recvfrom_udp_raw: invalid arguments", (msg->msg_iov != NULL) || (msg->msg_iovlen <= 0), return ERR_ARG;); + + if (flags & MSG_DONTWAIT) { + apiflags = NETCONN_DONTBLOCK; + } else { + apiflags = 0; + } + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw[UDP/RAW]: top sock->lastdata=%p\n", (void *)sock->lastdata.netbuf)); + /* Check if there is data left from the last recv operation. */ + buf = sock->lastdata.netbuf; + if (buf == NULL) { + /* No data was left from the previous operation, so we try to get + some from the network. */ + err = netconn_recv_udp_raw_netbuf_flags(sock->conn, &buf, apiflags); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw[UDP/RAW]: netconn_recv err=%d, netbuf=%p\n", + err, (void *)buf)); + + if (err != ERR_OK) { + return err; + } + LWIP_ASSERT("buf != NULL", buf != NULL); + sock->lastdata.netbuf = buf; + } + buflen = buf->p->tot_len; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw: buflen=%"U16_F"\n", buflen)); + + copied = 0; + /* copy the pbuf payload into the iovs */ + for (i = 0; (i < msg->msg_iovlen) && (copied < buflen); i++) { + u16_t len_left = (u16_t)(buflen - copied); + if (msg->msg_iov[i].iov_len > len_left) { + copylen = len_left; + } else { + copylen = (u16_t)msg->msg_iov[i].iov_len; + } + + /* copy the contents of the received buffer into + the supplied memory buffer */ + pbuf_copy_partial(buf->p, (u8_t *)msg->msg_iov[i].iov_base, copylen, copied); + copied = (u16_t)(copied + copylen); + } + + /* Check to see from where the data was.*/ +#if !SOCKETS_DEBUG + if (msg->msg_name && msg->msg_namelen) +#endif /* !SOCKETS_DEBUG */ + { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw(%d): addr=", dbg_s)); + ip_addr_debug_print_val(SOCKETS_DEBUG, *netbuf_fromaddr(buf)); + LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F" len=%d\n", netbuf_fromport(buf), copied)); + if (msg->msg_name && msg->msg_namelen) { + lwip_sock_make_addr(sock->conn, netbuf_fromaddr(buf), netbuf_fromport(buf), + (struct sockaddr *)msg->msg_name, &msg->msg_namelen); + } + } + + /* Initialize flag output */ + msg->msg_flags = 0; + + if (msg->msg_control) { + u8_t wrote_msg = 0; +#if LWIP_NETBUF_RECVINFO + /* Check if packet info was recorded */ + if (buf->flags & NETBUF_FLAG_DESTADDR) { + if (IP_IS_V4(&buf->toaddr)) { +#if LWIP_IPV4 + if (msg->msg_controllen >= CMSG_SPACE(sizeof(struct in_pktinfo))) { + struct cmsghdr *chdr = CMSG_FIRSTHDR(msg); /* This will always return a header!! */ + struct in_pktinfo *pkti = (struct in_pktinfo *)CMSG_DATA(chdr); + chdr->cmsg_level = IPPROTO_IP; + chdr->cmsg_type = IP_PKTINFO; + chdr->cmsg_len = CMSG_LEN(sizeof(struct in_pktinfo)); + pkti->ipi_ifindex = buf->p->if_idx; + inet_addr_from_ip4addr(&pkti->ipi_addr, ip_2_ip4(netbuf_destaddr(buf))); + msg->msg_controllen = CMSG_SPACE(sizeof(struct in_pktinfo)); + wrote_msg = 1; + } else { + msg->msg_flags |= MSG_CTRUNC; + } +#endif /* LWIP_IPV4 */ + } + } +#endif /* LWIP_NETBUF_RECVINFO */ + + if (!wrote_msg) { + msg->msg_controllen = 0; + } + } + + /* If we don't peek the incoming message: zero lastdata pointer and free the netbuf */ + if ((flags & MSG_PEEK) == 0) { + sock->lastdata.netbuf = NULL; + netbuf_delete(buf); + } + if (datagram_len) { + *datagram_len = buflen; + } + return ERR_OK; +} + +ssize_t +lwip_recvfrom(int s, void *mem, size_t len, int flags, + struct sockaddr *from, socklen_t *fromlen) +{ + struct lwip_sock *sock; + ssize_t ret; + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom(%d, %p, %"SZT_F", 0x%x, ..)\n", s, mem, len, flags)); + sock = get_socket(s); + if (!sock) { + return -1; + } +#if LWIP_TCP + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) { + ret = lwip_recv_tcp(sock, mem, len, flags); + lwip_recv_tcp_from(sock, from, fromlen, "lwip_recvfrom", s, ret); + done_socket(sock); + return ret; + } else +#endif + { + u16_t datagram_len = 0; + struct iovec vec; + struct msghdr msg; + err_t err; + vec.iov_base = mem; + vec.iov_len = len; + msg.msg_control = NULL; + msg.msg_controllen = 0; + msg.msg_flags = 0; + msg.msg_iov = &vec; + msg.msg_iovlen = 1; + msg.msg_name = from; + msg.msg_namelen = (fromlen ? *fromlen : 0); + err = lwip_recvfrom_udp_raw(sock, flags, &msg, &datagram_len, s); + if (err != ERR_OK) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom[UDP/RAW](%d): buf == NULL, error is \"%s\"!\n", + s, lwip_strerr(err))); + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return -1; + } + ret = (ssize_t)LWIP_MIN(LWIP_MIN(len, datagram_len), SSIZE_MAX); + if (fromlen) { + *fromlen = msg.msg_namelen; + } + } + + sock_set_errno(sock, 0); + done_socket(sock); + return ret; +} + +ssize_t +lwip_read(int s, void *mem, size_t len) +{ + return lwip_recvfrom(s, mem, len, 0, NULL, NULL); +} + +ssize_t +lwip_readv(int s, const struct iovec *iov, int iovcnt) +{ + struct msghdr msg; + + msg.msg_name = NULL; + msg.msg_namelen = 0; + /* Hack: we have to cast via number to cast from 'const' pointer to non-const. + Blame the opengroup standard for this inconsistency. */ + msg.msg_iov = LWIP_CONST_CAST(struct iovec *, iov); + msg.msg_iovlen = iovcnt; + msg.msg_control = NULL; + msg.msg_controllen = 0; + msg.msg_flags = 0; + return lwip_recvmsg(s, &msg, 0); +} + +ssize_t +lwip_recv(int s, void *mem, size_t len, int flags) +{ + return lwip_recvfrom(s, mem, len, flags, NULL, NULL); +} + +ssize_t +lwip_recvmsg(int s, struct msghdr *message, int flags) +{ + struct lwip_sock *sock; + int i; + ssize_t buflen; + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvmsg(%d, message=%p, flags=0x%x)\n", s, (void *)message, flags)); + LWIP_ERROR("lwip_recvmsg: invalid message pointer", message != NULL, return ERR_ARG;); + LWIP_ERROR("lwip_recvmsg: unsupported flags", (flags & ~(MSG_PEEK|MSG_DONTWAIT)) == 0, + set_errno(EOPNOTSUPP); return -1;); + + if ((message->msg_iovlen <= 0) || (message->msg_iovlen > IOV_MAX)) { + set_errno(EMSGSIZE); + return -1; + } + + sock = get_socket(s); + if (!sock) { + return -1; + } + + /* check for valid vectors */ + buflen = 0; + for (i = 0; i < message->msg_iovlen; i++) { + if ((message->msg_iov[i].iov_base == NULL) || ((ssize_t)message->msg_iov[i].iov_len <= 0) || + ((size_t)(ssize_t)message->msg_iov[i].iov_len != message->msg_iov[i].iov_len) || + ((ssize_t)(buflen + (ssize_t)message->msg_iov[i].iov_len) <= 0)) { + sock_set_errno(sock, err_to_errno(ERR_VAL)); + done_socket(sock); + return -1; + } + buflen = (ssize_t)(buflen + (ssize_t)message->msg_iov[i].iov_len); + } + + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) { +#if LWIP_TCP + int recv_flags = flags; + message->msg_flags = 0; + /* recv the data */ + buflen = 0; + for (i = 0; i < message->msg_iovlen; i++) { + /* try to receive into this vector's buffer */ + ssize_t recvd_local = lwip_recv_tcp(sock, message->msg_iov[i].iov_base, message->msg_iov[i].iov_len, recv_flags); + if (recvd_local > 0) { + /* sum up received bytes */ + buflen += recvd_local; + } + if ((recvd_local < 0) || (recvd_local < (int)message->msg_iov[i].iov_len) || + (flags & MSG_PEEK)) { + /* returned prematurely (or peeking, which might actually be limitated to the first iov) */ + if (buflen <= 0) { + /* nothing received at all, propagate the error */ + buflen = recvd_local; + } + break; + } + /* pass MSG_DONTWAIT to lwip_recv_tcp() to prevent waiting for more data */ + recv_flags |= MSG_DONTWAIT; + } + if (buflen > 0) { + /* reset socket error since we have received something */ + sock_set_errno(sock, 0); + } + /* " If the socket is connected, the msg_name and msg_namelen members shall be ignored." */ + done_socket(sock); + return buflen; +#else /* LWIP_TCP */ + sock_set_errno(sock, err_to_errno(ERR_ARG)); + done_socket(sock); + return -1; +#endif /* LWIP_TCP */ + } + /* else, UDP and RAW NETCONNs */ +#if LWIP_UDP || LWIP_RAW + { + u16_t datagram_len = 0; + err_t err; + err = lwip_recvfrom_udp_raw(sock, flags, message, &datagram_len, s); + if (err != ERR_OK) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvmsg[UDP/RAW](%d): buf == NULL, error is \"%s\"!\n", + s, lwip_strerr(err))); + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return -1; + } + if (datagram_len > buflen) { + message->msg_flags |= MSG_TRUNC; + } + + sock_set_errno(sock, 0); + done_socket(sock); + return (int)datagram_len; + } +#else /* LWIP_UDP || LWIP_RAW */ + sock_set_errno(sock, err_to_errno(ERR_ARG)); + done_socket(sock); + return -1; +#endif /* LWIP_UDP || LWIP_RAW */ +} + +ssize_t +lwip_send(int s, const void *data, size_t size, int flags) +{ + struct lwip_sock *sock; + err_t err; + u8_t write_flags; + size_t written; + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_send(%d, data=%p, size=%"SZT_F", flags=0x%x)\n", + s, data, size, flags)); + + sock = get_socket(s); + if (!sock) { + return -1; + } + + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) { +#if (LWIP_UDP || LWIP_RAW) + done_socket(sock); + return lwip_sendto(s, data, size, flags, NULL, 0); +#else /* (LWIP_UDP || LWIP_RAW) */ + sock_set_errno(sock, err_to_errno(ERR_ARG)); + done_socket(sock); + return -1; +#endif /* (LWIP_UDP || LWIP_RAW) */ + } + + write_flags = (u8_t)(NETCONN_COPY | + ((flags & MSG_MORE) ? NETCONN_MORE : 0) | + ((flags & MSG_DONTWAIT) ? NETCONN_DONTBLOCK : 0)); + written = 0; + err = netconn_write_partly(sock->conn, data, size, write_flags, &written); + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_send(%d) err=%d written=%"SZT_F"\n", s, err, written)); + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + /* casting 'written' to ssize_t is OK here since the netconn API limits it to SSIZE_MAX */ + return (err == ERR_OK ? (ssize_t)written : -1); +} + +ssize_t +lwip_sendmsg(int s, const struct msghdr *msg, int flags) +{ + struct lwip_sock *sock; +#if LWIP_TCP + u8_t write_flags; + size_t written; +#endif + err_t err = ERR_OK; + + sock = get_socket(s); + if (!sock) { + return -1; + } + + LWIP_ERROR("lwip_sendmsg: invalid msghdr", msg != NULL, + sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;); + LWIP_ERROR("lwip_sendmsg: invalid msghdr iov", msg->msg_iov != NULL, + sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;); + LWIP_ERROR("lwip_sendmsg: maximum iovs exceeded", (msg->msg_iovlen > 0) && (msg->msg_iovlen <= IOV_MAX), + sock_set_errno(sock, EMSGSIZE); done_socket(sock); return -1;); + LWIP_ERROR("lwip_sendmsg: unsupported flags", (flags & ~(MSG_DONTWAIT | MSG_MORE)) == 0, + sock_set_errno(sock, EOPNOTSUPP); done_socket(sock); return -1;); + + LWIP_UNUSED_ARG(msg->msg_control); + LWIP_UNUSED_ARG(msg->msg_controllen); + LWIP_UNUSED_ARG(msg->msg_flags); + + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) { +#if LWIP_TCP + write_flags = (u8_t)(NETCONN_COPY | + ((flags & MSG_MORE) ? NETCONN_MORE : 0) | + ((flags & MSG_DONTWAIT) ? NETCONN_DONTBLOCK : 0)); + + written = 0; + err = netconn_write_vectors_partly(sock->conn, (struct netvector *)msg->msg_iov, (u16_t)msg->msg_iovlen, write_flags, &written); + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + /* casting 'written' to ssize_t is OK here since the netconn API limits it to SSIZE_MAX */ + return (err == ERR_OK ? (ssize_t)written : -1); +#else /* LWIP_TCP */ + sock_set_errno(sock, err_to_errno(ERR_ARG)); + done_socket(sock); + return -1; +#endif /* LWIP_TCP */ + } + /* else, UDP and RAW NETCONNs */ +#if LWIP_UDP || LWIP_RAW + { + struct netbuf chain_buf; + int i; + ssize_t size = 0; + + LWIP_UNUSED_ARG(flags); + LWIP_ERROR("lwip_sendmsg: invalid msghdr name", (((msg->msg_name == NULL) && (msg->msg_namelen == 0)) || + IS_SOCK_ADDR_LEN_VALID(msg->msg_namelen)), + sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;); + + /* initialize chain buffer with destination */ + memset(&chain_buf, 0, sizeof(struct netbuf)); + if (msg->msg_name) { + u16_t remote_port; + SOCKADDR_TO_IPADDR_PORT((const struct sockaddr *)msg->msg_name, &chain_buf.addr, remote_port); + netbuf_fromport(&chain_buf) = remote_port; + } +#if LWIP_NETIF_TX_SINGLE_PBUF + for (i = 0; i < msg->msg_iovlen; i++) { + size += msg->msg_iov[i].iov_len; + if ((msg->msg_iov[i].iov_len > INT_MAX) || (size < (int)msg->msg_iov[i].iov_len)) { + /* overflow */ + goto sendmsg_emsgsize; + } + } + if (size > 0xFFFF) { + /* overflow */ + goto sendmsg_emsgsize; + } + /* Allocate a new netbuf and copy the data into it. */ + if (netbuf_alloc(&chain_buf, (u16_t)size) == NULL) { + err = ERR_MEM; + } else { + /* flatten the IO vectors */ + size_t offset = 0; + for (i = 0; i < msg->msg_iovlen; i++) { + MEMCPY(&((u8_t *)chain_buf.p->payload)[offset], msg->msg_iov[i].iov_base, msg->msg_iov[i].iov_len); + offset += msg->msg_iov[i].iov_len; + } +#if LWIP_CHECKSUM_ON_COPY + { + /* This can be improved by using LWIP_CHKSUM_COPY() and aggregating the checksum for each IO vector */ + u16_t chksum = ~inet_chksum_pbuf(chain_buf.p); + netbuf_set_chksum(&chain_buf, chksum); + } +#endif /* LWIP_CHECKSUM_ON_COPY */ + err = ERR_OK; + } +#else /* LWIP_NETIF_TX_SINGLE_PBUF */ + /* create a chained netbuf from the IO vectors. NOTE: we assemble a pbuf chain + manually to avoid having to allocate, chain, and delete a netbuf for each iov */ + for (i = 0; i < msg->msg_iovlen; i++) { + struct pbuf *p; + if (msg->msg_iov[i].iov_len > 0xFFFF) { + /* overflow */ + goto sendmsg_emsgsize; + } + p = pbuf_alloc(PBUF_TRANSPORT, 0, PBUF_REF); + if (p == NULL) { + err = ERR_MEM; /* let netbuf_delete() cleanup chain_buf */ + break; + } + p->payload = msg->msg_iov[i].iov_base; + p->len = p->tot_len = (u16_t)msg->msg_iov[i].iov_len; + /* netbuf empty, add new pbuf */ + if (chain_buf.p == NULL) { + chain_buf.p = chain_buf.ptr = p; + /* add pbuf to existing pbuf chain */ + } else { + if (chain_buf.p->tot_len + p->len > 0xffff) { + /* overflow */ + pbuf_free(p); + goto sendmsg_emsgsize; + } + pbuf_cat(chain_buf.p, p); + } + } + /* save size of total chain */ + if (err == ERR_OK) { + size = netbuf_len(&chain_buf); + } +#endif /* LWIP_NETIF_TX_SINGLE_PBUF */ + + if (err == ERR_OK) { +#if LWIP_IPV4 && LWIP_IPV6 + /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */ + if (IP_IS_V6_VAL(chain_buf.addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&chain_buf.addr))) { + unmap_ipv4_mapped_ipv6(ip_2_ip4(&chain_buf.addr), ip_2_ip6(&chain_buf.addr)); + IP_SET_TYPE_VAL(chain_buf.addr, IPADDR_TYPE_V4); + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + /* send the data */ + err = netconn_send(sock->conn, &chain_buf); + } + + /* deallocated the buffer */ + netbuf_free(&chain_buf); + + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return (err == ERR_OK ? size : -1); +sendmsg_emsgsize: + sock_set_errno(sock, EMSGSIZE); + netbuf_free(&chain_buf); + done_socket(sock); + return -1; + } +#else /* LWIP_UDP || LWIP_RAW */ + sock_set_errno(sock, err_to_errno(ERR_ARG)); + done_socket(sock); + return -1; +#endif /* LWIP_UDP || LWIP_RAW */ +} + +ssize_t +lwip_sendto(int s, const void *data, size_t size, int flags, + const struct sockaddr *to, socklen_t tolen) +{ + struct lwip_sock *sock; + err_t err; + u16_t short_size; + u16_t remote_port; + struct netbuf buf; + + sock = get_socket(s); + if (!sock) { + return -1; + } + + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) { +#if LWIP_TCP + done_socket(sock); + return lwip_send(s, data, size, flags); +#else /* LWIP_TCP */ + LWIP_UNUSED_ARG(flags); + sock_set_errno(sock, err_to_errno(ERR_ARG)); + done_socket(sock); + return -1; +#endif /* LWIP_TCP */ + } + + if (size > LWIP_MIN(0xFFFF, SSIZE_MAX)) { + /* cannot fit into one datagram (at least for us) */ + sock_set_errno(sock, EMSGSIZE); + done_socket(sock); + return -1; + } + short_size = (u16_t)size; + LWIP_ERROR("lwip_sendto: invalid address", (((to == NULL) && (tolen == 0)) || + (IS_SOCK_ADDR_LEN_VALID(tolen) && + ((to != NULL) && (IS_SOCK_ADDR_TYPE_VALID(to) && IS_SOCK_ADDR_ALIGNED(to))))), + sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;); + LWIP_UNUSED_ARG(tolen); + + /* initialize a buffer */ + buf.p = buf.ptr = NULL; +#if LWIP_CHECKSUM_ON_COPY + buf.flags = 0; +#endif /* LWIP_CHECKSUM_ON_COPY */ + if (to) { + SOCKADDR_TO_IPADDR_PORT(to, &buf.addr, remote_port); + } else { + remote_port = 0; + ip_addr_set_any(NETCONNTYPE_ISIPV6(netconn_type(sock->conn)), &buf.addr); + } + netbuf_fromport(&buf) = remote_port; + + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_sendto(%d, data=%p, short_size=%"U16_F", flags=0x%x to=", + s, data, short_size, flags)); + ip_addr_debug_print_val(SOCKETS_DEBUG, buf.addr); + LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F"\n", remote_port)); + + /* make the buffer point to the data that should be sent */ +#if LWIP_NETIF_TX_SINGLE_PBUF + /* Allocate a new netbuf and copy the data into it. */ + if (netbuf_alloc(&buf, short_size) == NULL) { + err = ERR_MEM; + } else { +#if LWIP_CHECKSUM_ON_COPY + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_RAW) { + u16_t chksum = LWIP_CHKSUM_COPY(buf.p->payload, data, short_size); + netbuf_set_chksum(&buf, chksum); + } else +#endif /* LWIP_CHECKSUM_ON_COPY */ + { + MEMCPY(buf.p->payload, data, short_size); + } + err = ERR_OK; + } +#else /* LWIP_NETIF_TX_SINGLE_PBUF */ + err = netbuf_ref(&buf, data, short_size); +#endif /* LWIP_NETIF_TX_SINGLE_PBUF */ + if (err == ERR_OK) { +#if LWIP_IPV4 && LWIP_IPV6 + /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */ + if (IP_IS_V6_VAL(buf.addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&buf.addr))) { + unmap_ipv4_mapped_ipv6(ip_2_ip4(&buf.addr), ip_2_ip6(&buf.addr)); + IP_SET_TYPE_VAL(buf.addr, IPADDR_TYPE_V4); + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + /* send the data */ + err = netconn_send(sock->conn, &buf); + } + + /* deallocated the buffer */ + netbuf_free(&buf); + + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return (err == ERR_OK ? short_size : -1); +} + +int +lwip_socket(int domain, int type, int protocol) +{ + struct netconn *conn; + int i; + + LWIP_UNUSED_ARG(domain); /* @todo: check this */ + + /* create a netconn */ + switch (type) { + case SOCK_RAW: + conn = netconn_new_with_proto_and_callback(DOMAIN_TO_NETCONN_TYPE(domain, NETCONN_RAW), + (u8_t)protocol, DEFAULT_SOCKET_EVENTCB); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_RAW, %d) = ", + domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol)); + break; + case SOCK_DGRAM: + conn = netconn_new_with_callback(DOMAIN_TO_NETCONN_TYPE(domain, + ((protocol == IPPROTO_UDPLITE) ? NETCONN_UDPLITE : NETCONN_UDP)), + DEFAULT_SOCKET_EVENTCB); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_DGRAM, %d) = ", + domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol)); +#if LWIP_NETBUF_RECVINFO + if (conn) { + /* netconn layer enables pktinfo by default, sockets default to off */ + conn->flags &= ~NETCONN_FLAG_PKTINFO; + } +#endif /* LWIP_NETBUF_RECVINFO */ + break; + case SOCK_STREAM: + conn = netconn_new_with_callback(DOMAIN_TO_NETCONN_TYPE(domain, NETCONN_TCP), DEFAULT_SOCKET_EVENTCB); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_STREAM, %d) = ", + domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol)); + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%d, %d/UNKNOWN, %d) = -1\n", + domain, type, protocol)); + set_errno(EINVAL); + return -1; + } + + if (!conn) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("-1 / ENOBUFS (could not create netconn)\n")); + set_errno(ENOBUFS); + return -1; + } + + i = alloc_socket(conn, 0); + + if (i == -1) { + netconn_delete(conn); + set_errno(ENFILE); + return -1; + } + conn->socket = i; + done_socket(&sockets[i - LWIP_SOCKET_OFFSET]); + LWIP_DEBUGF(SOCKETS_DEBUG, ("%d\n", i)); + set_errno(0); + return i; +} + +ssize_t +lwip_write(int s, const void *data, size_t size) +{ + return lwip_send(s, data, size, 0); +} + +ssize_t +lwip_writev(int s, const struct iovec *iov, int iovcnt) +{ + struct msghdr msg; + + msg.msg_name = NULL; + msg.msg_namelen = 0; + /* Hack: we have to cast via number to cast from 'const' pointer to non-const. + Blame the opengroup standard for this inconsistency. */ + msg.msg_iov = LWIP_CONST_CAST(struct iovec *, iov); + msg.msg_iovlen = iovcnt; + msg.msg_control = NULL; + msg.msg_controllen = 0; + msg.msg_flags = 0; + return lwip_sendmsg(s, &msg, 0); +} + +#if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL +/* Add select_cb to select_cb_list. */ +static void +lwip_link_select_cb(struct lwip_select_cb *select_cb) +{ + LWIP_SOCKET_SELECT_DECL_PROTECT(lev); + + /* Protect the select_cb_list */ + LWIP_SOCKET_SELECT_PROTECT(lev); + + /* Put this select_cb on top of list */ + select_cb->next = select_cb_list; + if (select_cb_list != NULL) { + select_cb_list->prev = select_cb; + } + select_cb_list = select_cb; +#if !LWIP_TCPIP_CORE_LOCKING + /* Increasing this counter tells select_check_waiters that the list has changed. */ + select_cb_ctr++; +#endif + + /* Now we can safely unprotect */ + LWIP_SOCKET_SELECT_UNPROTECT(lev); +} + +/* Remove select_cb from select_cb_list. */ +static void +lwip_unlink_select_cb(struct lwip_select_cb *select_cb) +{ + LWIP_SOCKET_SELECT_DECL_PROTECT(lev); + + /* Take us off the list */ + LWIP_SOCKET_SELECT_PROTECT(lev); + if (select_cb->next != NULL) { + select_cb->next->prev = select_cb->prev; + } + if (select_cb_list == select_cb) { + LWIP_ASSERT("select_cb->prev == NULL", select_cb->prev == NULL); + select_cb_list = select_cb->next; + } else { + LWIP_ASSERT("select_cb->prev != NULL", select_cb->prev != NULL); + select_cb->prev->next = select_cb->next; + } +#if !LWIP_TCPIP_CORE_LOCKING + /* Increasing this counter tells select_check_waiters that the list has changed. */ + select_cb_ctr++; +#endif + LWIP_SOCKET_SELECT_UNPROTECT(lev); +} +#endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */ + +#if LWIP_SOCKET_SELECT +/** + * Go through the readset and writeset lists and see which socket of the sockets + * set in the sets has events. On return, readset, writeset and exceptset have + * the sockets enabled that had events. + * + * @param maxfdp1 the highest socket index in the sets + * @param readset_in set of sockets to check for read events + * @param writeset_in set of sockets to check for write events + * @param exceptset_in set of sockets to check for error events + * @param readset_out set of sockets that had read events + * @param writeset_out set of sockets that had write events + * @param exceptset_out set os sockets that had error events + * @return number of sockets that had events (read/write/exception) (>= 0) + */ +static int +lwip_selscan(int maxfdp1, fd_set *readset_in, fd_set *writeset_in, fd_set *exceptset_in, + fd_set *readset_out, fd_set *writeset_out, fd_set *exceptset_out) +{ + int i, nready = 0; + fd_set lreadset, lwriteset, lexceptset; + struct lwip_sock *sock; + SYS_ARCH_DECL_PROTECT(lev); + + FD_ZERO(&lreadset); + FD_ZERO(&lwriteset); + FD_ZERO(&lexceptset); + + /* Go through each socket in each list to count number of sockets which + currently match */ + for (i = LWIP_SOCKET_OFFSET; i < maxfdp1; i++) { + /* if this FD is not in the set, continue */ + if (!(readset_in && FD_ISSET(i, readset_in)) && + !(writeset_in && FD_ISSET(i, writeset_in)) && + !(exceptset_in && FD_ISSET(i, exceptset_in))) { + continue; + } + /* First get the socket's status (protected)... */ + SYS_ARCH_PROTECT(lev); + sock = tryget_socket_unconn_locked(i); + if (sock != NULL) { + void *lastdata = sock->lastdata.pbuf; + s16_t rcvevent = sock->rcvevent; + u16_t sendevent = sock->sendevent; + u16_t errevent = sock->errevent; + SYS_ARCH_UNPROTECT(lev); + + /* ... then examine it: */ + /* See if netconn of this socket is ready for read */ + if (readset_in && FD_ISSET(i, readset_in) && ((lastdata != NULL) || (rcvevent > 0))) { + FD_SET(i, &lreadset); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for reading\n", i)); + nready++; + } + /* See if netconn of this socket is ready for write */ + if (writeset_in && FD_ISSET(i, writeset_in) && (sendevent != 0)) { + FD_SET(i, &lwriteset); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for writing\n", i)); + nready++; + } + /* See if netconn of this socket had an error */ + if (exceptset_in && FD_ISSET(i, exceptset_in) && (errevent != 0)) { + FD_SET(i, &lexceptset); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for exception\n", i)); + nready++; + } + done_socket(sock); + } else { + SYS_ARCH_UNPROTECT(lev); + /* no a valid open socket */ + return -1; + } + } + /* copy local sets to the ones provided as arguments */ + *readset_out = lreadset; + *writeset_out = lwriteset; + *exceptset_out = lexceptset; + + LWIP_ASSERT("nready >= 0", nready >= 0); + return nready; +} + +#if LWIP_NETCONN_FULLDUPLEX +/* Mark all of the set sockets in one of the three fdsets passed to select as used. + * All sockets are marked (and later unmarked), whether they are open or not. + * This is OK as lwip_selscan aborts select when non-open sockets are found. + */ +static void +lwip_select_inc_sockets_used_set(int maxfdp, fd_set *fdset, fd_set *used_sockets) +{ + SYS_ARCH_DECL_PROTECT(lev); + if (fdset) { + int i; + for (i = LWIP_SOCKET_OFFSET; i < maxfdp; i++) { + /* if this FD is in the set, lock it (unless already done) */ + if (FD_ISSET(i, fdset) && !FD_ISSET(i, used_sockets)) { + struct lwip_sock *sock; + SYS_ARCH_PROTECT(lev); + sock = tryget_socket_unconn_locked(i); + if (sock != NULL) { + /* leave the socket used until released by lwip_select_dec_sockets_used */ + FD_SET(i, used_sockets); + } + SYS_ARCH_UNPROTECT(lev); + } + } + } +} + +/* Mark all sockets passed to select as used to prevent them from being freed + * from other threads while select is running. + * Marked sockets are added to 'used_sockets' to mark them only once an be able + * to unmark them correctly. + */ +static void +lwip_select_inc_sockets_used(int maxfdp, fd_set *fdset1, fd_set *fdset2, fd_set *fdset3, fd_set *used_sockets) +{ + FD_ZERO(used_sockets); + lwip_select_inc_sockets_used_set(maxfdp, fdset1, used_sockets); + lwip_select_inc_sockets_used_set(maxfdp, fdset2, used_sockets); + lwip_select_inc_sockets_used_set(maxfdp, fdset3, used_sockets); +} + +/* Let go all sockets that were marked as used when starting select */ +static void +lwip_select_dec_sockets_used(int maxfdp, fd_set *used_sockets) +{ + int i; + for (i = LWIP_SOCKET_OFFSET; i < maxfdp; i++) { + /* if this FD is not in the set, continue */ + if (FD_ISSET(i, used_sockets)) { + struct lwip_sock *sock = tryget_socket_unconn_nouse(i); + LWIP_ASSERT("socket gone at the end of select", sock != NULL); + if (sock != NULL) { + done_socket(sock); + } + } + } +} +#else /* LWIP_NETCONN_FULLDUPLEX */ +#define lwip_select_inc_sockets_used(maxfdp1, readset, writeset, exceptset, used_sockets) +#define lwip_select_dec_sockets_used(maxfdp1, used_sockets) +#endif /* LWIP_NETCONN_FULLDUPLEX */ + +int +lwip_select(int maxfdp1, fd_set *readset, fd_set *writeset, fd_set *exceptset, + struct timeval *timeout) +{ + u32_t waitres = 0; + int nready; + fd_set lreadset, lwriteset, lexceptset; + u32_t msectimeout; + int i; + int maxfdp2; +#if LWIP_NETCONN_SEM_PER_THREAD + int waited = 0; +#endif +#if LWIP_NETCONN_FULLDUPLEX + fd_set used_sockets; +#endif + SYS_ARCH_DECL_PROTECT(lev); + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select(%d, %p, %p, %p, tvsec=%"S32_F" tvusec=%"S32_F")\n", + maxfdp1, (void *)readset, (void *) writeset, (void *) exceptset, + timeout ? (s32_t)timeout->tv_sec : (s32_t) - 1, + timeout ? (s32_t)timeout->tv_usec : (s32_t) - 1)); + + if ((maxfdp1 < 0) || (maxfdp1 > LWIP_SELECT_MAXNFDS)) { + set_errno(EINVAL); + return -1; + } + + lwip_select_inc_sockets_used(maxfdp1, readset, writeset, exceptset, &used_sockets); + + /* Go through each socket in each list to count number of sockets which + currently match */ + nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset); + + if (nready < 0) { + /* one of the sockets in one of the fd_sets was invalid */ + set_errno(EBADF); + lwip_select_dec_sockets_used(maxfdp1, &used_sockets); + return -1; + } else if (nready > 0) { + /* one or more sockets are set, no need to wait */ + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: nready=%d\n", nready)); + } else { + /* If we don't have any current events, then suspend if we are supposed to */ + if (timeout && timeout->tv_sec == 0 && timeout->tv_usec == 0) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: no timeout, returning 0\n")); + /* This is OK as the local fdsets are empty and nready is zero, + or we would have returned earlier. */ + } else { + /* None ready: add our semaphore to list: + We don't actually need any dynamic memory. Our entry on the + list is only valid while we are in this function, so it's ok + to use local variables (unless we're running in MPU compatible + mode). */ + API_SELECT_CB_VAR_DECLARE(select_cb); + API_SELECT_CB_VAR_ALLOC(select_cb, set_errno(ENOMEM); lwip_select_dec_sockets_used(maxfdp1, &used_sockets); return -1); + memset(&API_SELECT_CB_VAR_REF(select_cb), 0, sizeof(struct lwip_select_cb)); + + API_SELECT_CB_VAR_REF(select_cb).readset = readset; + API_SELECT_CB_VAR_REF(select_cb).writeset = writeset; + API_SELECT_CB_VAR_REF(select_cb).exceptset = exceptset; +#if LWIP_NETCONN_SEM_PER_THREAD + API_SELECT_CB_VAR_REF(select_cb).sem = LWIP_NETCONN_THREAD_SEM_GET(); +#else /* LWIP_NETCONN_SEM_PER_THREAD */ + if (sys_sem_new(&API_SELECT_CB_VAR_REF(select_cb).sem, 0) != ERR_OK) { + /* failed to create semaphore */ + set_errno(ENOMEM); + lwip_select_dec_sockets_used(maxfdp1, &used_sockets); + API_SELECT_CB_VAR_FREE(select_cb); + return -1; + } +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + + lwip_link_select_cb(&API_SELECT_CB_VAR_REF(select_cb)); + + /* Increase select_waiting for each socket we are interested in */ + maxfdp2 = maxfdp1; + for (i = LWIP_SOCKET_OFFSET; i < maxfdp1; i++) { + if ((readset && FD_ISSET(i, readset)) || + (writeset && FD_ISSET(i, writeset)) || + (exceptset && FD_ISSET(i, exceptset))) { + struct lwip_sock *sock; + SYS_ARCH_PROTECT(lev); + sock = tryget_socket_unconn_locked(i); + if (sock != NULL) { + sock->select_waiting++; + if (sock->select_waiting == 0) { + /* overflow - too many threads waiting */ + sock->select_waiting--; + nready = -1; + maxfdp2 = i; + SYS_ARCH_UNPROTECT(lev); + done_socket(sock); + set_errno(EBUSY); + break; + } + SYS_ARCH_UNPROTECT(lev); + done_socket(sock); + } else { + /* Not a valid socket */ + nready = -1; + maxfdp2 = i; + SYS_ARCH_UNPROTECT(lev); + set_errno(EBADF); + break; + } + } + } + + if (nready >= 0) { + /* Call lwip_selscan again: there could have been events between + the last scan (without us on the list) and putting us on the list! */ + nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset); + if (!nready) { + /* Still none ready, just wait to be woken */ + if (timeout == 0) { + /* Wait forever */ + msectimeout = 0; + } else { + long msecs_long = ((timeout->tv_sec * 1000) + ((timeout->tv_usec + 500) / 1000)); + if (msecs_long <= 0) { + /* Wait 1ms at least (0 means wait forever) */ + msectimeout = 1; + } else { + msectimeout = (u32_t)msecs_long; + } + } + + waitres = sys_arch_sem_wait(SELECT_SEM_PTR(API_SELECT_CB_VAR_REF(select_cb).sem), msectimeout); +#if LWIP_NETCONN_SEM_PER_THREAD + waited = 1; +#endif + } + } + + /* Decrease select_waiting for each socket we are interested in */ + for (i = LWIP_SOCKET_OFFSET; i < maxfdp2; i++) { + if ((readset && FD_ISSET(i, readset)) || + (writeset && FD_ISSET(i, writeset)) || + (exceptset && FD_ISSET(i, exceptset))) { + struct lwip_sock *sock; + SYS_ARCH_PROTECT(lev); + sock = tryget_socket_unconn_locked(i); + if (sock != NULL) { + /* for now, handle select_waiting==0... */ + LWIP_ASSERT("sock->select_waiting > 0", sock->select_waiting > 0); + if (sock->select_waiting > 0) { + sock->select_waiting--; + } + SYS_ARCH_UNPROTECT(lev); + done_socket(sock); + } else { + SYS_ARCH_UNPROTECT(lev); + /* Not a valid socket */ + nready = -1; + set_errno(EBADF); + } + } + } + + lwip_unlink_select_cb(&API_SELECT_CB_VAR_REF(select_cb)); + +#if LWIP_NETCONN_SEM_PER_THREAD + if (API_SELECT_CB_VAR_REF(select_cb).sem_signalled && (!waited || (waitres == SYS_ARCH_TIMEOUT))) { + /* don't leave the thread-local semaphore signalled */ + sys_arch_sem_wait(API_SELECT_CB_VAR_REF(select_cb).sem, 1); + } +#else /* LWIP_NETCONN_SEM_PER_THREAD */ + sys_sem_free(&API_SELECT_CB_VAR_REF(select_cb).sem); +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + API_SELECT_CB_VAR_FREE(select_cb); + + if (nready < 0) { + /* This happens when a socket got closed while waiting */ + lwip_select_dec_sockets_used(maxfdp1, &used_sockets); + return -1; + } + + if (waitres == SYS_ARCH_TIMEOUT) { + /* Timeout */ + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: timeout expired\n")); + /* This is OK as the local fdsets are empty and nready is zero, + or we would have returned earlier. */ + } else { + /* See what's set now after waiting */ + nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: nready=%d\n", nready)); + } + } + } + + lwip_select_dec_sockets_used(maxfdp1, &used_sockets); + set_errno(0); + if (readset) { + *readset = lreadset; + } + if (writeset) { + *writeset = lwriteset; + } + if (exceptset) { + *exceptset = lexceptset; + } + return nready; +} +#endif /* LWIP_SOCKET_SELECT */ + +#if LWIP_SOCKET_POLL +/** Options for the lwip_pollscan function. */ +enum lwip_pollscan_opts +{ + /** Clear revents in each struct pollfd. */ + LWIP_POLLSCAN_CLEAR = 1, + + /** Increment select_waiting in each struct lwip_sock. */ + LWIP_POLLSCAN_INC_WAIT = 2, + + /** Decrement select_waiting in each struct lwip_sock. */ + LWIP_POLLSCAN_DEC_WAIT = 4 +}; + +/** + * Update revents in each struct pollfd. + * Optionally update select_waiting in struct lwip_sock. + * + * @param fds array of structures to update + * @param nfds number of structures in fds + * @param opts what to update and how + * @return number of structures that have revents != 0 + */ +static int +lwip_pollscan(struct pollfd *fds, nfds_t nfds, enum lwip_pollscan_opts opts) +{ + int nready = 0; + nfds_t fdi; + struct lwip_sock *sock; + SYS_ARCH_DECL_PROTECT(lev); + + /* Go through each struct pollfd in the array. */ + for (fdi = 0; fdi < nfds; fdi++) { + if ((opts & LWIP_POLLSCAN_CLEAR) != 0) { + fds[fdi].revents = 0; + } + + /* Negative fd means the caller wants us to ignore this struct. + POLLNVAL means we already detected that the fd is invalid; + if another thread has since opened a new socket with that fd, + we must not use that socket. */ + if (fds[fdi].fd >= 0 && (fds[fdi].revents & POLLNVAL) == 0) { + /* First get the socket's status (protected)... */ + SYS_ARCH_PROTECT(lev); + sock = tryget_socket_unconn_locked(fds[fdi].fd); + if (sock != NULL) { + void* lastdata = sock->lastdata.pbuf; + s16_t rcvevent = sock->rcvevent; + u16_t sendevent = sock->sendevent; + u16_t errevent = sock->errevent; + + if ((opts & LWIP_POLLSCAN_INC_WAIT) != 0) { + sock->select_waiting++; + if (sock->select_waiting == 0) { + /* overflow - too many threads waiting */ + sock->select_waiting--; + nready = -1; + SYS_ARCH_UNPROTECT(lev); + done_socket(sock); + break; + } + } else if ((opts & LWIP_POLLSCAN_DEC_WAIT) != 0) { + /* for now, handle select_waiting==0... */ + LWIP_ASSERT("sock->select_waiting > 0", sock->select_waiting > 0); + if (sock->select_waiting > 0) { + sock->select_waiting--; + } + } + SYS_ARCH_UNPROTECT(lev); + done_socket(sock); + + /* ... then examine it: */ + /* See if netconn of this socket is ready for read */ + if ((fds[fdi].events & POLLIN) != 0 && ((lastdata != NULL) || (rcvevent > 0))) { + fds[fdi].revents |= POLLIN; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_pollscan: fd=%d ready for reading\n", fds[fdi].fd)); + } + /* See if netconn of this socket is ready for write */ + if ((fds[fdi].events & POLLOUT) != 0 && (sendevent != 0)) { + fds[fdi].revents |= POLLOUT; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_pollscan: fd=%d ready for writing\n", fds[fdi].fd)); + } + /* See if netconn of this socket had an error */ + if (errevent != 0) { + /* POLLERR is output only. */ + fds[fdi].revents |= POLLERR; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_pollscan: fd=%d ready for exception\n", fds[fdi].fd)); + } + } else { + /* Not a valid socket */ + SYS_ARCH_UNPROTECT(lev); + /* POLLNVAL is output only. */ + fds[fdi].revents |= POLLNVAL; + return -1; + } + } + + /* Will return the number of structures that have events, + not the number of events. */ + if (fds[fdi].revents != 0) { + nready++; + } + } + + LWIP_ASSERT("nready >= 0", nready >= 0); + return nready; +} + +#if LWIP_NETCONN_FULLDUPLEX +/* Mark all sockets as used. + * + * All sockets are marked (and later unmarked), whether they are open or not. + * This is OK as lwip_pollscan aborts select when non-open sockets are found. + */ +static void +lwip_poll_inc_sockets_used(struct pollfd *fds, nfds_t nfds) +{ + nfds_t fdi; + + if(fds) { + /* Go through each struct pollfd in the array. */ + for (fdi = 0; fdi < nfds; fdi++) { + /* Increase the reference counter */ + tryget_socket_unconn(fds[fdi].fd); + } + } +} + +/* Let go all sockets that were marked as used when starting poll */ +static void +lwip_poll_dec_sockets_used(struct pollfd *fds, nfds_t nfds) +{ + nfds_t fdi; + + if(fds) { + /* Go through each struct pollfd in the array. */ + for (fdi = 0; fdi < nfds; fdi++) { + struct lwip_sock *sock = tryget_socket_unconn_nouse(fds[fdi].fd); + if (sock != NULL) { + done_socket(sock); + } + } + } +} +#else /* LWIP_NETCONN_FULLDUPLEX */ +#define lwip_poll_inc_sockets_used(fds, nfds) +#define lwip_poll_dec_sockets_used(fds, nfds) +#endif /* LWIP_NETCONN_FULLDUPLEX */ + +int +lwip_poll(struct pollfd *fds, nfds_t nfds, int timeout) +{ + u32_t waitres = 0; + int nready; + u32_t msectimeout; +#if LWIP_NETCONN_SEM_PER_THREAD + int waited = 0; +#endif + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll(%p, %d, %d)\n", + (void*)fds, (int)nfds, timeout)); + LWIP_ERROR("lwip_poll: invalid fds", ((fds != NULL && nfds > 0) || (fds == NULL && nfds == 0)), + set_errno(EINVAL); return -1;); + + lwip_poll_inc_sockets_used(fds, nfds); + + /* Go through each struct pollfd to count number of structures + which currently match */ + nready = lwip_pollscan(fds, nfds, LWIP_POLLSCAN_CLEAR); + + if (nready < 0) { + lwip_poll_dec_sockets_used(fds, nfds); + return -1; + } + + /* If we don't have any current events, then suspend if we are supposed to */ + if (!nready) { + API_SELECT_CB_VAR_DECLARE(select_cb); + + if (timeout == 0) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll: no timeout, returning 0\n")); + goto return_success; + } + API_SELECT_CB_VAR_ALLOC(select_cb, set_errno(EAGAIN); lwip_poll_dec_sockets_used(fds, nfds); return -1); + memset(&API_SELECT_CB_VAR_REF(select_cb), 0, sizeof(struct lwip_select_cb)); + + /* None ready: add our semaphore to list: + We don't actually need any dynamic memory. Our entry on the + list is only valid while we are in this function, so it's ok + to use local variables. */ + + API_SELECT_CB_VAR_REF(select_cb).poll_fds = fds; + API_SELECT_CB_VAR_REF(select_cb).poll_nfds = nfds; +#if LWIP_NETCONN_SEM_PER_THREAD + API_SELECT_CB_VAR_REF(select_cb).sem = LWIP_NETCONN_THREAD_SEM_GET(); +#else /* LWIP_NETCONN_SEM_PER_THREAD */ + if (sys_sem_new(&API_SELECT_CB_VAR_REF(select_cb).sem, 0) != ERR_OK) { + /* failed to create semaphore */ + set_errno(EAGAIN); + lwip_poll_dec_sockets_used(fds, nfds); + API_SELECT_CB_VAR_FREE(select_cb); + return -1; + } +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + + lwip_link_select_cb(&API_SELECT_CB_VAR_REF(select_cb)); + + /* Increase select_waiting for each socket we are interested in. + Also, check for events again: there could have been events between + the last scan (without us on the list) and putting us on the list! */ + nready = lwip_pollscan(fds, nfds, LWIP_POLLSCAN_INC_WAIT); + + if (!nready) { + /* Still none ready, just wait to be woken */ + if (timeout < 0) { + /* Wait forever */ + msectimeout = 0; + } else { + /* timeout == 0 would have been handled earlier. */ + LWIP_ASSERT("timeout > 0", timeout > 0); + msectimeout = timeout; + } + waitres = sys_arch_sem_wait(SELECT_SEM_PTR(API_SELECT_CB_VAR_REF(select_cb).sem), msectimeout); +#if LWIP_NETCONN_SEM_PER_THREAD + waited = 1; +#endif + } + + /* Decrease select_waiting for each socket we are interested in, + and check which events occurred while we waited. */ + nready = lwip_pollscan(fds, nfds, LWIP_POLLSCAN_DEC_WAIT); + + lwip_unlink_select_cb(&API_SELECT_CB_VAR_REF(select_cb)); + +#if LWIP_NETCONN_SEM_PER_THREAD + if (select_cb.sem_signalled && (!waited || (waitres == SYS_ARCH_TIMEOUT))) { + /* don't leave the thread-local semaphore signalled */ + sys_arch_sem_wait(API_SELECT_CB_VAR_REF(select_cb).sem, 1); + } +#else /* LWIP_NETCONN_SEM_PER_THREAD */ + sys_sem_free(&API_SELECT_CB_VAR_REF(select_cb).sem); +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + API_SELECT_CB_VAR_FREE(select_cb); + + if (nready < 0) { + /* This happens when a socket got closed while waiting */ + lwip_poll_dec_sockets_used(fds, nfds); + return -1; + } + + if (waitres == SYS_ARCH_TIMEOUT) { + /* Timeout */ + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll: timeout expired\n")); + goto return_success; + } + } + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll: nready=%d\n", nready)); +return_success: + lwip_poll_dec_sockets_used(fds, nfds); + set_errno(0); + return nready; +} + +/** + * Check whether event_callback should wake up a thread waiting in + * lwip_poll. + */ +static int +lwip_poll_should_wake(const struct lwip_select_cb *scb, int fd, int has_recvevent, int has_sendevent, int has_errevent) +{ + nfds_t fdi; + for (fdi = 0; fdi < scb->poll_nfds; fdi++) { + const struct pollfd *pollfd = &scb->poll_fds[fdi]; + if (pollfd->fd == fd) { + /* Do not update pollfd->revents right here; + that would be a data race because lwip_pollscan + accesses revents without protecting. */ + if (has_recvevent && (pollfd->events & POLLIN) != 0) { + return 1; + } + if (has_sendevent && (pollfd->events & POLLOUT) != 0) { + return 1; + } + if (has_errevent) { + /* POLLERR is output only. */ + return 1; + } + } + } + return 0; +} +#endif /* LWIP_SOCKET_POLL */ + +#if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL +/** + * Callback registered in the netconn layer for each socket-netconn. + * Processes recvevent (data available) and wakes up tasks waiting for select. + * + * @note for LWIP_TCPIP_CORE_LOCKING any caller of this function + * must have the core lock held when signaling the following events + * as they might cause select_list_cb to be checked: + * NETCONN_EVT_RCVPLUS + * NETCONN_EVT_SENDPLUS + * NETCONN_EVT_ERROR + * This requirement will be asserted in select_check_waiters() + */ +static void +event_callback(struct netconn *conn, enum netconn_evt evt, u16_t len) +{ + int s, check_waiters; + struct lwip_sock *sock; + SYS_ARCH_DECL_PROTECT(lev); + + LWIP_UNUSED_ARG(len); + + /* Get socket */ + if (conn) { + s = conn->socket; + if (s < 0) { + /* Data comes in right away after an accept, even though + * the server task might not have created a new socket yet. + * Just count down (or up) if that's the case and we + * will use the data later. Note that only receive events + * can happen before the new socket is set up. */ + SYS_ARCH_PROTECT(lev); + if (conn->socket < 0) { + if (evt == NETCONN_EVT_RCVPLUS) { + /* conn->socket is -1 on initialization + lwip_accept adjusts sock->recvevent if conn->socket < -1 */ + conn->socket--; + } + SYS_ARCH_UNPROTECT(lev); + return; + } + s = conn->socket; + SYS_ARCH_UNPROTECT(lev); + } + + sock = get_socket(s); + if (!sock) { + return; + } + } else { + return; + } + + check_waiters = 1; + SYS_ARCH_PROTECT(lev); + /* Set event as required */ + switch (evt) { + case NETCONN_EVT_RCVPLUS: + sock->rcvevent++; + if (sock->rcvevent > 1) { + check_waiters = 0; + } + break; + case NETCONN_EVT_RCVMINUS: + sock->rcvevent--; + check_waiters = 0; + break; + case NETCONN_EVT_SENDPLUS: + if (sock->sendevent) { + check_waiters = 0; + } + sock->sendevent = 1; + break; + case NETCONN_EVT_SENDMINUS: + sock->sendevent = 0; + check_waiters = 0; + break; + case NETCONN_EVT_ERROR: + sock->errevent = 1; + break; + default: + LWIP_ASSERT("unknown event", 0); + break; + } + + if (sock->select_waiting && check_waiters) { + /* Save which events are active */ + int has_recvevent, has_sendevent, has_errevent; + has_recvevent = sock->rcvevent > 0; + has_sendevent = sock->sendevent != 0; + has_errevent = sock->errevent != 0; + SYS_ARCH_UNPROTECT(lev); + /* Check any select calls waiting on this socket */ + select_check_waiters(s, has_recvevent, has_sendevent, has_errevent); + } else { + SYS_ARCH_UNPROTECT(lev); + } + done_socket(sock); +} + +/** + * Check if any select waiters are waiting on this socket and its events + * + * @note on synchronization of select_cb_list: + * LWIP_TCPIP_CORE_LOCKING: the select_cb_list must only be accessed while holding + * the core lock. We do a single pass through the list and signal any waiters. + * Core lock should already be held when calling here!!!! + + * !LWIP_TCPIP_CORE_LOCKING: we use SYS_ARCH_PROTECT but unlock on each iteration + * of the loop, thus creating a possibility where a thread could modify the + * select_cb_list during our UNPROTECT/PROTECT. We use a generational counter to + * detect this change and restart the list walk. The list is expected to be small + */ +static void select_check_waiters(int s, int has_recvevent, int has_sendevent, int has_errevent) +{ + struct lwip_select_cb *scb; +#if !LWIP_TCPIP_CORE_LOCKING + int last_select_cb_ctr; + SYS_ARCH_DECL_PROTECT(lev); +#endif /* !LWIP_TCPIP_CORE_LOCKING */ + + LWIP_ASSERT_CORE_LOCKED(); + +#if !LWIP_TCPIP_CORE_LOCKING + SYS_ARCH_PROTECT(lev); +again: + /* remember the state of select_cb_list to detect changes */ + last_select_cb_ctr = select_cb_ctr; +#endif /* !LWIP_TCPIP_CORE_LOCKING */ + for (scb = select_cb_list; scb != NULL; scb = scb->next) { + if (scb->sem_signalled == 0) { + /* semaphore not signalled yet */ + int do_signal = 0; +#if LWIP_SOCKET_POLL + if (scb->poll_fds != NULL) { + do_signal = lwip_poll_should_wake(scb, s, has_recvevent, has_sendevent, has_errevent); + } +#endif /* LWIP_SOCKET_POLL */ +#if LWIP_SOCKET_SELECT && LWIP_SOCKET_POLL + else +#endif /* LWIP_SOCKET_SELECT && LWIP_SOCKET_POLL */ +#if LWIP_SOCKET_SELECT + { + /* Test this select call for our socket */ + if (has_recvevent) { + if (scb->readset && FD_ISSET(s, scb->readset)) { + do_signal = 1; + } + } + if (has_sendevent) { + if (!do_signal && scb->writeset && FD_ISSET(s, scb->writeset)) { + do_signal = 1; + } + } + if (has_errevent) { + if (!do_signal && scb->exceptset && FD_ISSET(s, scb->exceptset)) { + do_signal = 1; + } + } + } +#endif /* LWIP_SOCKET_SELECT */ + if (do_signal) { + scb->sem_signalled = 1; + /* For !LWIP_TCPIP_CORE_LOCKING, we don't call SYS_ARCH_UNPROTECT() before signaling + the semaphore, as this might lead to the select thread taking itself off the list, + invalidating the semaphore. */ + sys_sem_signal(SELECT_SEM_PTR(scb->sem)); + } + } +#if LWIP_TCPIP_CORE_LOCKING + } +#else + /* unlock interrupts with each step */ + SYS_ARCH_UNPROTECT(lev); + /* this makes sure interrupt protection time is short */ + SYS_ARCH_PROTECT(lev); + if (last_select_cb_ctr != select_cb_ctr) { + /* someone has changed select_cb_list, restart at the beginning */ + goto again; + } + /* remember the state of select_cb_list to detect changes */ + last_select_cb_ctr = select_cb_ctr; + } + SYS_ARCH_UNPROTECT(lev); +#endif +} +#endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */ + +/** + * Close one end of a full-duplex connection. + */ +int +lwip_shutdown(int s, int how) +{ + struct lwip_sock *sock; + err_t err; + u8_t shut_rx = 0, shut_tx = 0; + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_shutdown(%d, how=%d)\n", s, how)); + + sock = get_socket(s); + if (!sock) { + return -1; + } + + if (sock->conn != NULL) { + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) { + sock_set_errno(sock, EOPNOTSUPP); + done_socket(sock); + return -1; + } + } else { + sock_set_errno(sock, ENOTCONN); + done_socket(sock); + return -1; + } + + if (how == SHUT_RD) { + shut_rx = 1; + } else if (how == SHUT_WR) { + shut_tx = 1; + } else if (how == SHUT_RDWR) { + shut_rx = 1; + shut_tx = 1; + } else { + sock_set_errno(sock, EINVAL); + done_socket(sock); + return -1; + } + err = netconn_shutdown(sock->conn, shut_rx, shut_tx); + + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return (err == ERR_OK ? 0 : -1); +} + +static int +lwip_getaddrname(int s, struct sockaddr *name, socklen_t *namelen, u8_t local) +{ + struct lwip_sock *sock; + union sockaddr_aligned saddr; + ip_addr_t naddr; + u16_t port; + err_t err; + + sock = get_socket(s); + if (!sock) { + return -1; + } + + /* get the IP address and port */ + err = netconn_getaddr(sock->conn, &naddr, &port, local); + if (err != ERR_OK) { + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return -1; + } + +#if LWIP_IPV4 && LWIP_IPV6 + /* Dual-stack: Map IPv4 addresses to IPv4 mapped IPv6 */ + if (NETCONNTYPE_ISIPV6(netconn_type(sock->conn)) && + IP_IS_V4_VAL(naddr)) { + ip4_2_ipv4_mapped_ipv6(ip_2_ip6(&naddr), ip_2_ip4(&naddr)); + IP_SET_TYPE_VAL(naddr, IPADDR_TYPE_V6); + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + IPADDR_PORT_TO_SOCKADDR(&saddr, &naddr, port); + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getaddrname(%d, addr=", s)); + ip_addr_debug_print_val(SOCKETS_DEBUG, naddr); + LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", port)); + + if (*namelen > saddr.sa.sa_len) { + *namelen = saddr.sa.sa_len; + } + MEMCPY(name, &saddr, *namelen); + + sock_set_errno(sock, 0); + done_socket(sock); + return 0; +} + +int +lwip_getpeername(int s, struct sockaddr *name, socklen_t *namelen) +{ + return lwip_getaddrname(s, name, namelen, 0); +} + +int +lwip_getsockname(int s, struct sockaddr *name, socklen_t *namelen) +{ + return lwip_getaddrname(s, name, namelen, 1); +} + +int +lwip_getsockopt(int s, int level, int optname, void *optval, socklen_t *optlen) +{ + int err; + struct lwip_sock *sock = get_socket(s); +#if !LWIP_TCPIP_CORE_LOCKING + err_t cberr; + LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(data); +#endif /* !LWIP_TCPIP_CORE_LOCKING */ + + if (!sock) { + return -1; + } + + if ((NULL == optval) || (NULL == optlen)) { + sock_set_errno(sock, EFAULT); + done_socket(sock); + return -1; + } + +#if LWIP_TCPIP_CORE_LOCKING + /* core-locking can just call the -impl function */ + LOCK_TCPIP_CORE(); + err = lwip_getsockopt_impl(s, level, optname, optval, optlen); + UNLOCK_TCPIP_CORE(); + +#else /* LWIP_TCPIP_CORE_LOCKING */ + +#if LWIP_MPU_COMPATIBLE + /* MPU_COMPATIBLE copies the optval data, so check for max size here */ + if (*optlen > LWIP_SETGETSOCKOPT_MAXOPTLEN) { + sock_set_errno(sock, ENOBUFS); + done_socket(sock); + return -1; + } +#endif /* LWIP_MPU_COMPATIBLE */ + + LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(data, sock); + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).s = s; + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).level = level; + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optname = optname; + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen = *optlen; +#if !LWIP_MPU_COMPATIBLE + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval.p = optval; +#endif /* !LWIP_MPU_COMPATIBLE */ + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err = 0; +#if LWIP_NETCONN_SEM_PER_THREAD + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = LWIP_NETCONN_THREAD_SEM_GET(); +#else + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = &sock->conn->op_completed; +#endif + cberr = tcpip_callback(lwip_getsockopt_callback, &LWIP_SETGETSOCKOPT_DATA_VAR_REF(data)); + if (cberr != ERR_OK) { + LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data); + sock_set_errno(sock, err_to_errno(cberr)); + done_socket(sock); + return -1; + } + sys_arch_sem_wait((sys_sem_t *)(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem), 0); + + /* write back optlen and optval */ + *optlen = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen; +#if LWIP_MPU_COMPATIBLE + MEMCPY(optval, LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval, + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen); +#endif /* LWIP_MPU_COMPATIBLE */ + + /* maybe lwip_getsockopt_internal has changed err */ + err = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err; + LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data); +#endif /* LWIP_TCPIP_CORE_LOCKING */ + + sock_set_errno(sock, err); + done_socket(sock); + return err ? -1 : 0; +} + +#if !LWIP_TCPIP_CORE_LOCKING +/** lwip_getsockopt_callback: only used without CORE_LOCKING + * to get into the tcpip_thread + */ +static void +lwip_getsockopt_callback(void *arg) +{ + struct lwip_setgetsockopt_data *data; + LWIP_ASSERT("arg != NULL", arg != NULL); + data = (struct lwip_setgetsockopt_data *)arg; + + data->err = lwip_getsockopt_impl(data->s, data->level, data->optname, +#if LWIP_MPU_COMPATIBLE + data->optval, +#else /* LWIP_MPU_COMPATIBLE */ + data->optval.p, +#endif /* LWIP_MPU_COMPATIBLE */ + &data->optlen); + + sys_sem_signal((sys_sem_t *)(data->completed_sem)); +} +#endif /* LWIP_TCPIP_CORE_LOCKING */ + +static int +lwip_sockopt_to_ipopt(int optname) +{ + /* Map SO_* values to our internal SOF_* values + * We should not rely on #defines in socket.h + * being in sync with ip.h. + */ + switch (optname) { + case SO_BROADCAST: + return SOF_BROADCAST; + case SO_KEEPALIVE: + return SOF_KEEPALIVE; + case SO_REUSEADDR: + return SOF_REUSEADDR; + default: + LWIP_ASSERT("Unknown socket option", 0); + return 0; + } +} + +/** lwip_getsockopt_impl: the actual implementation of getsockopt: + * same argument as lwip_getsockopt, either called directly or through callback + */ +static int +lwip_getsockopt_impl(int s, int level, int optname, void *optval, socklen_t *optlen) +{ + int err = 0; + struct lwip_sock *sock = tryget_socket(s); + if (!sock) { + return EBADF; + } + +#ifdef LWIP_HOOK_SOCKETS_GETSOCKOPT + if (LWIP_HOOK_SOCKETS_GETSOCKOPT(s, sock, level, optname, optval, optlen, &err)) { + return err; + } +#endif + + switch (level) { + + /* Level: SOL_SOCKET */ + case SOL_SOCKET: + switch (optname) { + +#if LWIP_TCP + case SO_ACCEPTCONN: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int); + if (NETCONNTYPE_GROUP(sock->conn->type) != NETCONN_TCP) { + done_socket(sock); + return ENOPROTOOPT; + } + if ((sock->conn->pcb.tcp != NULL) && (sock->conn->pcb.tcp->state == LISTEN)) { + *(int *)optval = 1; + } else { + *(int *)optval = 0; + } + break; +#endif /* LWIP_TCP */ + + /* The option flags */ + case SO_BROADCAST: + case SO_KEEPALIVE: +#if SO_REUSE + case SO_REUSEADDR: +#endif /* SO_REUSE */ + if ((optname == SO_BROADCAST) && + (NETCONNTYPE_GROUP(sock->conn->type) != NETCONN_UDP)) { + done_socket(sock); + return ENOPROTOOPT; + } + + optname = lwip_sockopt_to_ipopt(optname); + + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int); + *(int *)optval = ip_get_option(sock->conn->pcb.ip, optname); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, optname=0x%x, ..) = %s\n", + s, optname, (*(int *)optval ? "on" : "off"))); + break; + + case SO_TYPE: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int); + switch (NETCONNTYPE_GROUP(netconn_type(sock->conn))) { + case NETCONN_RAW: + *(int *)optval = SOCK_RAW; + break; + case NETCONN_TCP: + *(int *)optval = SOCK_STREAM; + break; + case NETCONN_UDP: + *(int *)optval = SOCK_DGRAM; + break; + default: /* unrecognized socket type */ + *(int *)optval = netconn_type(sock->conn); + LWIP_DEBUGF(SOCKETS_DEBUG, + ("lwip_getsockopt(%d, SOL_SOCKET, SO_TYPE): unrecognized socket type %d\n", + s, *(int *)optval)); + } /* switch (netconn_type(sock->conn)) */ + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, SO_TYPE) = %d\n", + s, *(int *)optval)); + break; + + case SO_ERROR: + LWIP_SOCKOPT_CHECK_OPTLEN(sock, *optlen, int); + *(int *)optval = err_to_errno(netconn_err(sock->conn)); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, SO_ERROR) = %d\n", + s, *(int *)optval)); + break; + +#if LWIP_SO_SNDTIMEO + case SO_SNDTIMEO: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE); + LWIP_SO_SNDRCVTIMEO_SET(optval, netconn_get_sendtimeout(sock->conn)); + break; +#endif /* LWIP_SO_SNDTIMEO */ +#if LWIP_SO_RCVTIMEO + case SO_RCVTIMEO: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE); + LWIP_SO_SNDRCVTIMEO_SET(optval, netconn_get_recvtimeout(sock->conn)); + break; +#endif /* LWIP_SO_RCVTIMEO */ +#if LWIP_SO_RCVBUF + case SO_RCVBUF: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int); + *(int *)optval = netconn_get_recvbufsize(sock->conn); + break; +#endif /* LWIP_SO_RCVBUF */ +#if LWIP_SO_LINGER + case SO_LINGER: { + s16_t conn_linger; + struct linger *linger = (struct linger *)optval; + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, struct linger); + conn_linger = sock->conn->linger; + if (conn_linger >= 0) { + linger->l_onoff = 1; + linger->l_linger = (int)conn_linger; + } else { + linger->l_onoff = 0; + linger->l_linger = 0; + } + } + break; +#endif /* LWIP_SO_LINGER */ +#if LWIP_UDP + case SO_NO_CHECK: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_UDP); +#if LWIP_UDPLITE + if (udp_is_flag_set(sock->conn->pcb.udp, UDP_FLAGS_UDPLITE)) { + /* this flag is only available for UDP, not for UDP lite */ + done_socket(sock); + return EAFNOSUPPORT; + } +#endif /* LWIP_UDPLITE */ + *(int *)optval = udp_is_flag_set(sock->conn->pcb.udp, UDP_FLAGS_NOCHKSUM) ? 1 : 0; + break; +#endif /* LWIP_UDP*/ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; + + /* Level: IPPROTO_IP */ + case IPPROTO_IP: + switch (optname) { + case IP_TTL: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int); + *(int *)optval = sock->conn->pcb.ip->ttl; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_TTL) = %d\n", + s, *(int *)optval)); + break; + case IP_TOS: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int); + *(int *)optval = sock->conn->pcb.ip->tos; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_TOS) = %d\n", + s, *(int *)optval)); + break; +#if LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP + case IP_MULTICAST_TTL: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, u8_t); + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_UDP) { + done_socket(sock); + return ENOPROTOOPT; + } + *(u8_t *)optval = udp_get_multicast_ttl(sock->conn->pcb.udp); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_TTL) = %d\n", + s, *(int *)optval)); + break; + case IP_MULTICAST_IF: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, struct in_addr); + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_UDP) { + done_socket(sock); + return ENOPROTOOPT; + } + inet_addr_from_ip4addr((struct in_addr *)optval, udp_get_multicast_netif_addr(sock->conn->pcb.udp)); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_IF) = 0x%"X32_F"\n", + s, *(u32_t *)optval)); + break; + case IP_MULTICAST_LOOP: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, u8_t); + if ((sock->conn->pcb.udp->flags & UDP_FLAGS_MULTICAST_LOOP) != 0) { + *(u8_t *)optval = 1; + } else { + *(u8_t *)optval = 0; + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_LOOP) = %d\n", + s, *(int *)optval)); + break; +#endif /* LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP */ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; + +#if LWIP_TCP + /* Level: IPPROTO_TCP */ + case IPPROTO_TCP: + /* Special case: all IPPROTO_TCP option take an int */ + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_TCP); + if (sock->conn->pcb.tcp->state == LISTEN) { + done_socket(sock); + return EINVAL; + } + switch (optname) { + case TCP_NODELAY: + *(int *)optval = tcp_nagle_disabled(sock->conn->pcb.tcp); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_NODELAY) = %s\n", + s, (*(int *)optval) ? "on" : "off") ); + break; + case TCP_KEEPALIVE: + *(int *)optval = (int)sock->conn->pcb.tcp->keep_idle; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPALIVE) = %d\n", + s, *(int *)optval)); + break; + +#if LWIP_TCP_KEEPALIVE + case TCP_KEEPIDLE: + *(int *)optval = (int)(sock->conn->pcb.tcp->keep_idle / 1000); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPIDLE) = %d\n", + s, *(int *)optval)); + break; + case TCP_KEEPINTVL: + *(int *)optval = (int)(sock->conn->pcb.tcp->keep_intvl / 1000); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPINTVL) = %d\n", + s, *(int *)optval)); + break; + case TCP_KEEPCNT: + *(int *)optval = (int)sock->conn->pcb.tcp->keep_cnt; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPCNT) = %d\n", + s, *(int *)optval)); + break; +#endif /* LWIP_TCP_KEEPALIVE */ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; +#endif /* LWIP_TCP */ + +#if LWIP_IPV6 + /* Level: IPPROTO_IPV6 */ + case IPPROTO_IPV6: + switch (optname) { + case IPV6_V6ONLY: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int); + *(int *)optval = (netconn_get_ipv6only(sock->conn) ? 1 : 0); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IPV6, IPV6_V6ONLY) = %d\n", + s, *(int *)optval)); + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IPV6, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; +#endif /* LWIP_IPV6 */ + +#if LWIP_UDP && LWIP_UDPLITE + /* Level: IPPROTO_UDPLITE */ + case IPPROTO_UDPLITE: + /* Special case: all IPPROTO_UDPLITE option take an int */ + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int); + /* If this is no UDP lite socket, ignore any options. */ + if (!NETCONNTYPE_ISUDPLITE(netconn_type(sock->conn))) { + done_socket(sock); + return ENOPROTOOPT; + } + switch (optname) { + case UDPLITE_SEND_CSCOV: + *(int *)optval = sock->conn->pcb.udp->chksum_len_tx; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UDPLITE_SEND_CSCOV) = %d\n", + s, (*(int *)optval)) ); + break; + case UDPLITE_RECV_CSCOV: + *(int *)optval = sock->conn->pcb.udp->chksum_len_rx; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UDPLITE_RECV_CSCOV) = %d\n", + s, (*(int *)optval)) ); + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; +#endif /* LWIP_UDP */ + /* Level: IPPROTO_RAW */ + case IPPROTO_RAW: + switch (optname) { +#if LWIP_IPV6 && LWIP_RAW + case IPV6_CHECKSUM: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_RAW); + if (sock->conn->pcb.raw->chksum_reqd == 0) { + *(int *)optval = -1; + } else { + *(int *)optval = sock->conn->pcb.raw->chksum_offset; + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_RAW, IPV6_CHECKSUM) = %d\n", + s, (*(int *)optval)) ); + break; +#endif /* LWIP_IPV6 && LWIP_RAW */ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_RAW, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, level=0x%x, UNIMPL: optname=0x%x, ..)\n", + s, level, optname)); + err = ENOPROTOOPT; + break; + } /* switch (level) */ + + done_socket(sock); + return err; +} + +int +lwip_setsockopt(int s, int level, int optname, const void *optval, socklen_t optlen) +{ + int err = 0; + struct lwip_sock *sock = get_socket(s); +#if !LWIP_TCPIP_CORE_LOCKING + err_t cberr; + LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(data); +#endif /* !LWIP_TCPIP_CORE_LOCKING */ + + if (!sock) { + return -1; + } + + if (NULL == optval) { + sock_set_errno(sock, EFAULT); + done_socket(sock); + return -1; + } + +#if LWIP_TCPIP_CORE_LOCKING + /* core-locking can just call the -impl function */ + LOCK_TCPIP_CORE(); + err = lwip_setsockopt_impl(s, level, optname, optval, optlen); + UNLOCK_TCPIP_CORE(); + +#else /* LWIP_TCPIP_CORE_LOCKING */ + +#if LWIP_MPU_COMPATIBLE + /* MPU_COMPATIBLE copies the optval data, so check for max size here */ + if (optlen > LWIP_SETGETSOCKOPT_MAXOPTLEN) { + sock_set_errno(sock, ENOBUFS); + done_socket(sock); + return -1; + } +#endif /* LWIP_MPU_COMPATIBLE */ + + LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(data, sock); + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).s = s; + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).level = level; + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optname = optname; + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen = optlen; +#if LWIP_MPU_COMPATIBLE + MEMCPY(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval, optval, optlen); +#else /* LWIP_MPU_COMPATIBLE */ + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval.pc = (const void *)optval; +#endif /* LWIP_MPU_COMPATIBLE */ + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err = 0; +#if LWIP_NETCONN_SEM_PER_THREAD + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = LWIP_NETCONN_THREAD_SEM_GET(); +#else + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = &sock->conn->op_completed; +#endif + cberr = tcpip_callback(lwip_setsockopt_callback, &LWIP_SETGETSOCKOPT_DATA_VAR_REF(data)); + if (cberr != ERR_OK) { + LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data); + sock_set_errno(sock, err_to_errno(cberr)); + done_socket(sock); + return -1; + } + sys_arch_sem_wait((sys_sem_t *)(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem), 0); + + /* maybe lwip_getsockopt_internal has changed err */ + err = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err; + LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data); +#endif /* LWIP_TCPIP_CORE_LOCKING */ + + sock_set_errno(sock, err); + done_socket(sock); + return err ? -1 : 0; +} + +#if !LWIP_TCPIP_CORE_LOCKING +/** lwip_setsockopt_callback: only used without CORE_LOCKING + * to get into the tcpip_thread + */ +static void +lwip_setsockopt_callback(void *arg) +{ + struct lwip_setgetsockopt_data *data; + LWIP_ASSERT("arg != NULL", arg != NULL); + data = (struct lwip_setgetsockopt_data *)arg; + + data->err = lwip_setsockopt_impl(data->s, data->level, data->optname, +#if LWIP_MPU_COMPATIBLE + data->optval, +#else /* LWIP_MPU_COMPATIBLE */ + data->optval.pc, +#endif /* LWIP_MPU_COMPATIBLE */ + data->optlen); + + sys_sem_signal((sys_sem_t *)(data->completed_sem)); +} +#endif /* LWIP_TCPIP_CORE_LOCKING */ + +/** lwip_setsockopt_impl: the actual implementation of setsockopt: + * same argument as lwip_setsockopt, either called directly or through callback + */ +static int +lwip_setsockopt_impl(int s, int level, int optname, const void *optval, socklen_t optlen) +{ + int err = 0; + struct lwip_sock *sock = tryget_socket(s); + if (!sock) { + return EBADF; + } + +#ifdef LWIP_HOOK_SOCKETS_SETSOCKOPT + if (LWIP_HOOK_SOCKETS_SETSOCKOPT(s, sock, level, optname, optval, optlen, &err)) { + return err; + } +#endif + + switch (level) { + + /* Level: SOL_SOCKET */ + case SOL_SOCKET: + switch (optname) { + + /* SO_ACCEPTCONN is get-only */ + + /* The option flags */ + case SO_BROADCAST: + case SO_KEEPALIVE: +#if SO_REUSE + case SO_REUSEADDR: +#endif /* SO_REUSE */ + if ((optname == SO_BROADCAST) && + (NETCONNTYPE_GROUP(sock->conn->type) != NETCONN_UDP)) { + done_socket(sock); + return ENOPROTOOPT; + } + + optname = lwip_sockopt_to_ipopt(optname); + + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int); + if (*(const int *)optval) { + ip_set_option(sock->conn->pcb.ip, optname); + } else { + ip_reset_option(sock->conn->pcb.ip, optname); + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, SOL_SOCKET, optname=0x%x, ..) -> %s\n", + s, optname, (*(const int *)optval ? "on" : "off"))); + break; + + /* SO_TYPE is get-only */ + /* SO_ERROR is get-only */ + +#if LWIP_SO_SNDTIMEO + case SO_SNDTIMEO: { + long ms_long; + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE); + ms_long = LWIP_SO_SNDRCVTIMEO_GET_MS(optval); + if (ms_long < 0) { + done_socket(sock); + return EINVAL; + } + netconn_set_sendtimeout(sock->conn, ms_long); + break; + } +#endif /* LWIP_SO_SNDTIMEO */ +#if LWIP_SO_RCVTIMEO + case SO_RCVTIMEO: { + long ms_long; + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE); + ms_long = LWIP_SO_SNDRCVTIMEO_GET_MS(optval); + if (ms_long < 0) { + done_socket(sock); + return EINVAL; + } + netconn_set_recvtimeout(sock->conn, (u32_t)ms_long); + break; + } +#endif /* LWIP_SO_RCVTIMEO */ +#if LWIP_SO_RCVBUF + case SO_RCVBUF: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, int); + netconn_set_recvbufsize(sock->conn, *(const int *)optval); + break; +#endif /* LWIP_SO_RCVBUF */ +#if LWIP_SO_LINGER + case SO_LINGER: { + const struct linger *linger = (const struct linger *)optval; + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, struct linger); + if (linger->l_onoff) { + int lingersec = linger->l_linger; + if (lingersec < 0) { + done_socket(sock); + return EINVAL; + } + if (lingersec > 0xFFFF) { + lingersec = 0xFFFF; + } + sock->conn->linger = (s16_t)lingersec; + } else { + sock->conn->linger = -1; + } + } + break; +#endif /* LWIP_SO_LINGER */ +#if LWIP_UDP + case SO_NO_CHECK: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_UDP); +#if LWIP_UDPLITE + if (udp_is_flag_set(sock->conn->pcb.udp, UDP_FLAGS_UDPLITE)) { + /* this flag is only available for UDP, not for UDP lite */ + done_socket(sock); + return EAFNOSUPPORT; + } +#endif /* LWIP_UDPLITE */ + if (*(const int *)optval) { + udp_set_flags(sock->conn->pcb.udp, UDP_FLAGS_NOCHKSUM); + } else { + udp_clear_flags(sock->conn->pcb.udp, UDP_FLAGS_NOCHKSUM); + } + break; +#endif /* LWIP_UDP */ + case SO_BINDTODEVICE: { + const struct ifreq *iface; + struct netif *n = NULL; + + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, struct ifreq); + + iface = (const struct ifreq *)optval; + if (iface->ifr_name[0] != 0) { + n = netif_find(iface->ifr_name); + if (n == NULL) { + done_socket(sock); + return ENODEV; + } + } + + switch (NETCONNTYPE_GROUP(netconn_type(sock->conn))) { +#if LWIP_TCP + case NETCONN_TCP: + tcp_bind_netif(sock->conn->pcb.tcp, n); + break; +#endif +#if LWIP_UDP + case NETCONN_UDP: + udp_bind_netif(sock->conn->pcb.udp, n); + break; +#endif +#if LWIP_RAW + case NETCONN_RAW: + raw_bind_netif(sock->conn->pcb.raw, n); + break; +#endif + default: + LWIP_ASSERT("Unhandled netconn type in SO_BINDTODEVICE", 0); + break; + } + } + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, SOL_SOCKET, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; + + /* Level: IPPROTO_IP */ + case IPPROTO_IP: + switch (optname) { + case IP_TTL: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int); + sock->conn->pcb.ip->ttl = (u8_t)(*(const int *)optval); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, IP_TTL, ..) -> %d\n", + s, sock->conn->pcb.ip->ttl)); + break; + case IP_TOS: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int); + sock->conn->pcb.ip->tos = (u8_t)(*(const int *)optval); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, IP_TOS, ..)-> %d\n", + s, sock->conn->pcb.ip->tos)); + break; +#if LWIP_NETBUF_RECVINFO + case IP_PKTINFO: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_UDP); + if (*(const int *)optval) { + sock->conn->flags |= NETCONN_FLAG_PKTINFO; + } else { + sock->conn->flags &= ~NETCONN_FLAG_PKTINFO; + } + break; +#endif /* LWIP_NETBUF_RECVINFO */ +#if LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP + case IP_MULTICAST_TTL: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, u8_t, NETCONN_UDP); + udp_set_multicast_ttl(sock->conn->pcb.udp, (u8_t)(*(const u8_t *)optval)); + break; + case IP_MULTICAST_IF: { + ip4_addr_t if_addr; + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct in_addr, NETCONN_UDP); + inet_addr_to_ip4addr(&if_addr, (const struct in_addr *)optval); + udp_set_multicast_netif_addr(sock->conn->pcb.udp, &if_addr); + } + break; + case IP_MULTICAST_LOOP: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, u8_t, NETCONN_UDP); + if (*(const u8_t *)optval) { + udp_set_flags(sock->conn->pcb.udp, UDP_FLAGS_MULTICAST_LOOP); + } else { + udp_clear_flags(sock->conn->pcb.udp, UDP_FLAGS_MULTICAST_LOOP); + } + break; +#endif /* LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP */ +#if LWIP_IGMP + case IP_ADD_MEMBERSHIP: + case IP_DROP_MEMBERSHIP: { + /* If this is a TCP or a RAW socket, ignore these options. */ + err_t igmp_err; + const struct ip_mreq *imr = (const struct ip_mreq *)optval; + ip4_addr_t if_addr; + ip4_addr_t multi_addr; + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct ip_mreq, NETCONN_UDP); + inet_addr_to_ip4addr(&if_addr, &imr->imr_interface); + inet_addr_to_ip4addr(&multi_addr, &imr->imr_multiaddr); + if (optname == IP_ADD_MEMBERSHIP) { + if (!lwip_socket_register_membership(s, &if_addr, &multi_addr)) { + /* cannot track membership (out of memory) */ + err = ENOMEM; + igmp_err = ERR_OK; + } else { + igmp_err = igmp_joingroup(&if_addr, &multi_addr); + } + } else { + igmp_err = igmp_leavegroup(&if_addr, &multi_addr); + lwip_socket_unregister_membership(s, &if_addr, &multi_addr); + } + if (igmp_err != ERR_OK) { + err = EADDRNOTAVAIL; + } + } + break; +#endif /* LWIP_IGMP */ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; + +#if LWIP_TCP + /* Level: IPPROTO_TCP */ + case IPPROTO_TCP: + /* Special case: all IPPROTO_TCP option take an int */ + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_TCP); + if (sock->conn->pcb.tcp->state == LISTEN) { + done_socket(sock); + return EINVAL; + } + switch (optname) { + case TCP_NODELAY: + if (*(const int *)optval) { + tcp_nagle_disable(sock->conn->pcb.tcp); + } else { + tcp_nagle_enable(sock->conn->pcb.tcp); + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_NODELAY) -> %s\n", + s, (*(const int *)optval) ? "on" : "off") ); + break; + case TCP_KEEPALIVE: + sock->conn->pcb.tcp->keep_idle = (u32_t)(*(const int *)optval); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPALIVE) -> %"U32_F"\n", + s, sock->conn->pcb.tcp->keep_idle)); + break; + +#if LWIP_TCP_KEEPALIVE + case TCP_KEEPIDLE: + sock->conn->pcb.tcp->keep_idle = 1000 * (u32_t)(*(const int *)optval); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPIDLE) -> %"U32_F"\n", + s, sock->conn->pcb.tcp->keep_idle)); + break; + case TCP_KEEPINTVL: + sock->conn->pcb.tcp->keep_intvl = 1000 * (u32_t)(*(const int *)optval); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPINTVL) -> %"U32_F"\n", + s, sock->conn->pcb.tcp->keep_intvl)); + break; + case TCP_KEEPCNT: + sock->conn->pcb.tcp->keep_cnt = (u32_t)(*(const int *)optval); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPCNT) -> %"U32_F"\n", + s, sock->conn->pcb.tcp->keep_cnt)); + break; +#endif /* LWIP_TCP_KEEPALIVE */ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; +#endif /* LWIP_TCP*/ + +#if LWIP_IPV6 + /* Level: IPPROTO_IPV6 */ + case IPPROTO_IPV6: + switch (optname) { + case IPV6_V6ONLY: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int); + if (*(const int *)optval) { + netconn_set_ipv6only(sock->conn, 1); + } else { + netconn_set_ipv6only(sock->conn, 0); + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IPV6, IPV6_V6ONLY, ..) -> %d\n", + s, (netconn_get_ipv6only(sock->conn) ? 1 : 0))); + break; +#if LWIP_IPV6_MLD + case IPV6_JOIN_GROUP: + case IPV6_LEAVE_GROUP: { + /* If this is a TCP or a RAW socket, ignore these options. */ + err_t mld6_err; + struct netif *netif; + ip6_addr_t multi_addr; + const struct ipv6_mreq *imr = (const struct ipv6_mreq *)optval; + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct ipv6_mreq, NETCONN_UDP); + inet6_addr_to_ip6addr(&multi_addr, &imr->ipv6mr_multiaddr); + LWIP_ASSERT("Invalid netif index", imr->ipv6mr_interface <= 0xFFu); + netif = netif_get_by_index((u8_t)imr->ipv6mr_interface); + if (netif == NULL) { + err = EADDRNOTAVAIL; + break; + } + + if (optname == IPV6_JOIN_GROUP) { + if (!lwip_socket_register_mld6_membership(s, imr->ipv6mr_interface, &multi_addr)) { + /* cannot track membership (out of memory) */ + err = ENOMEM; + mld6_err = ERR_OK; + } else { + mld6_err = mld6_joingroup_netif(netif, &multi_addr); + } + } else { + mld6_err = mld6_leavegroup_netif(netif, &multi_addr); + lwip_socket_unregister_mld6_membership(s, imr->ipv6mr_interface, &multi_addr); + } + if (mld6_err != ERR_OK) { + err = EADDRNOTAVAIL; + } + } + break; +#endif /* LWIP_IPV6_MLD */ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IPV6, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; +#endif /* LWIP_IPV6 */ + +#if LWIP_UDP && LWIP_UDPLITE + /* Level: IPPROTO_UDPLITE */ + case IPPROTO_UDPLITE: + /* Special case: all IPPROTO_UDPLITE option take an int */ + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int); + /* If this is no UDP lite socket, ignore any options. */ + if (!NETCONNTYPE_ISUDPLITE(netconn_type(sock->conn))) { + done_socket(sock); + return ENOPROTOOPT; + } + switch (optname) { + case UDPLITE_SEND_CSCOV: + if ((*(const int *)optval != 0) && ((*(const int *)optval < 8) || (*(const int *)optval > 0xffff))) { + /* don't allow illegal values! */ + sock->conn->pcb.udp->chksum_len_tx = 8; + } else { + sock->conn->pcb.udp->chksum_len_tx = (u16_t) * (const int *)optval; + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UDPLITE_SEND_CSCOV) -> %d\n", + s, (*(const int *)optval)) ); + break; + case UDPLITE_RECV_CSCOV: + if ((*(const int *)optval != 0) && ((*(const int *)optval < 8) || (*(const int *)optval > 0xffff))) { + /* don't allow illegal values! */ + sock->conn->pcb.udp->chksum_len_rx = 8; + } else { + sock->conn->pcb.udp->chksum_len_rx = (u16_t) * (const int *)optval; + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UDPLITE_RECV_CSCOV) -> %d\n", + s, (*(const int *)optval)) ); + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; +#endif /* LWIP_UDP */ + /* Level: IPPROTO_RAW */ + case IPPROTO_RAW: + switch (optname) { +#if LWIP_IPV6 && LWIP_RAW + case IPV6_CHECKSUM: + /* It should not be possible to disable the checksum generation with ICMPv6 + * as per RFC 3542 chapter 3.1 */ + if (sock->conn->pcb.raw->protocol == IPPROTO_ICMPV6) { + done_socket(sock); + return EINVAL; + } + + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_RAW); + if (*(const int *)optval < 0) { + sock->conn->pcb.raw->chksum_reqd = 0; + } else if (*(const int *)optval & 1) { + /* Per RFC3542, odd offsets are not allowed */ + done_socket(sock); + return EINVAL; + } else { + sock->conn->pcb.raw->chksum_reqd = 1; + sock->conn->pcb.raw->chksum_offset = (u16_t) * (const int *)optval; + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_RAW, IPV6_CHECKSUM, ..) -> %d\n", + s, sock->conn->pcb.raw->chksum_reqd)); + break; +#endif /* LWIP_IPV6 && LWIP_RAW */ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_RAW, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, level=0x%x, UNIMPL: optname=0x%x, ..)\n", + s, level, optname)); + err = ENOPROTOOPT; + break; + } /* switch (level) */ + + done_socket(sock); + return err; +} + +int +lwip_ioctl(int s, long cmd, void *argp) +{ + struct lwip_sock *sock = get_socket(s); + u8_t val; +#if LWIP_SO_RCVBUF + int recv_avail; +#endif /* LWIP_SO_RCVBUF */ + + if (!sock) { + return -1; + } + + switch (cmd) { +#if LWIP_SO_RCVBUF || LWIP_FIONREAD_LINUXMODE + case FIONREAD: + if (!argp) { + sock_set_errno(sock, EINVAL); + done_socket(sock); + return -1; + } +#if LWIP_FIONREAD_LINUXMODE + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) { + struct netbuf *nb; + if (sock->lastdata.netbuf) { + nb = sock->lastdata.netbuf; + *((int *)argp) = nb->p->tot_len; + } else { + struct netbuf *rxbuf; + err_t err = netconn_recv_udp_raw_netbuf_flags(sock->conn, &rxbuf, NETCONN_DONTBLOCK); + if (err != ERR_OK) { + *((int *)argp) = 0; + } else { + sock->lastdata.netbuf = rxbuf; + *((int *)argp) = rxbuf->p->tot_len; + } + } + done_socket(sock); + return 0; + } +#endif /* LWIP_FIONREAD_LINUXMODE */ + +#if LWIP_SO_RCVBUF + /* we come here if either LWIP_FIONREAD_LINUXMODE==0 or this is a TCP socket */ + SYS_ARCH_GET(sock->conn->recv_avail, recv_avail); + if (recv_avail < 0) { + recv_avail = 0; + } + + /* Check if there is data left from the last recv operation. /maq 041215 */ + if (sock->lastdata.netbuf) { + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) { + recv_avail += sock->lastdata.pbuf->tot_len; + } else { + recv_avail += sock->lastdata.netbuf->p->tot_len; + } + } + *((int *)argp) = recv_avail; + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, FIONREAD, %p) = %"U16_F"\n", s, argp, *((u16_t *)argp))); + sock_set_errno(sock, 0); + done_socket(sock); + return 0; +#else /* LWIP_SO_RCVBUF */ + break; +#endif /* LWIP_SO_RCVBUF */ +#endif /* LWIP_SO_RCVBUF || LWIP_FIONREAD_LINUXMODE */ + + case (long)FIONBIO: + val = 0; + if (argp && *(int *)argp) { + val = 1; + } + netconn_set_nonblocking(sock->conn, val); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, FIONBIO, %d)\n", s, val)); + sock_set_errno(sock, 0); + done_socket(sock); + return 0; + + default: + break; + } /* switch (cmd) */ + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, UNIMPL: 0x%lx, %p)\n", s, cmd, argp)); + sock_set_errno(sock, ENOSYS); /* not yet implemented */ + done_socket(sock); + return -1; +} + +/** A minimal implementation of fcntl. + * Currently only the commands F_GETFL and F_SETFL are implemented. + * The flag O_NONBLOCK and access modes are supported for F_GETFL, only + * the flag O_NONBLOCK is implemented for F_SETFL. + */ +int +lwip_fcntl(int s, int cmd, int val) +{ + struct lwip_sock *sock = get_socket(s); + int ret = -1; + int op_mode = 0; + + if (!sock) { + return -1; + } + + switch (cmd) { + case F_GETFL: + ret = netconn_is_nonblocking(sock->conn) ? O_NONBLOCK : 0; + sock_set_errno(sock, 0); + + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) { +#if LWIP_TCPIP_CORE_LOCKING + LOCK_TCPIP_CORE(); +#else + SYS_ARCH_DECL_PROTECT(lev); + /* the proper thing to do here would be to get into the tcpip_thread, + but locking should be OK as well since we only *read* some flags */ + SYS_ARCH_PROTECT(lev); +#endif +#if LWIP_TCP + if (sock->conn->pcb.tcp) { + if (!(sock->conn->pcb.tcp->flags & TF_RXCLOSED)) { + op_mode |= O_RDONLY; + } + if (!(sock->conn->pcb.tcp->flags & TF_FIN)) { + op_mode |= O_WRONLY; + } + } +#endif +#if LWIP_TCPIP_CORE_LOCKING + UNLOCK_TCPIP_CORE(); +#else + SYS_ARCH_UNPROTECT(lev); +#endif + } else { + op_mode |= O_RDWR; + } + + /* ensure O_RDWR for (O_RDONLY|O_WRONLY) != O_RDWR cases */ + ret |= (op_mode == (O_RDONLY | O_WRONLY)) ? O_RDWR : op_mode; + + break; + case F_SETFL: + /* Bits corresponding to the file access mode and the file creation flags [..] that are set in arg shall be ignored */ + val &= ~(O_RDONLY | O_WRONLY | O_RDWR); + if ((val & ~O_NONBLOCK) == 0) { + /* only O_NONBLOCK, all other bits are zero */ + netconn_set_nonblocking(sock->conn, val & O_NONBLOCK); + ret = 0; + sock_set_errno(sock, 0); + } else { + sock_set_errno(sock, ENOSYS); /* not yet implemented */ + } + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_fcntl(%d, UNIMPL: %d, %d)\n", s, cmd, val)); + sock_set_errno(sock, ENOSYS); /* not yet implemented */ + break; + } + done_socket(sock); + return ret; +} + +#if LWIP_COMPAT_SOCKETS == 2 && LWIP_POSIX_SOCKETS_IO_NAMES +int +fcntl(int s, int cmd, ...) +{ + va_list ap; + int val; + + va_start(ap, cmd); + val = va_arg(ap, int); + va_end(ap); + return lwip_fcntl(s, cmd, val); +} +#endif + +const char * +lwip_inet_ntop(int af, const void *src, char *dst, socklen_t size) +{ + const char *ret = NULL; + int size_int = (int)size; + if (size_int < 0) { + set_errno(ENOSPC); + return NULL; + } + switch (af) { +#if LWIP_IPV4 + case AF_INET: + ret = ip4addr_ntoa_r((const ip4_addr_t *)src, dst, size_int); + if (ret == NULL) { + set_errno(ENOSPC); + } + break; +#endif +#if LWIP_IPV6 + case AF_INET6: + ret = ip6addr_ntoa_r((const ip6_addr_t *)src, dst, size_int); + if (ret == NULL) { + set_errno(ENOSPC); + } + break; +#endif + default: + set_errno(EAFNOSUPPORT); + break; + } + return ret; +} + +int +lwip_inet_pton(int af, const char *src, void *dst) +{ + int err; + switch (af) { +#if LWIP_IPV4 + case AF_INET: + err = ip4addr_aton(src, (ip4_addr_t *)dst); + break; +#endif +#if LWIP_IPV6 + case AF_INET6: { + /* convert into temporary variable since ip6_addr_t might be larger + than in6_addr when scopes are enabled */ + ip6_addr_t addr; + err = ip6addr_aton(src, &addr); + if (err) { + memcpy(dst, &addr.addr, sizeof(addr.addr)); + } + break; + } +#endif + default: + err = -1; + set_errno(EAFNOSUPPORT); + break; + } + return err; +} + +#if LWIP_IGMP +/** Register a new IGMP membership. On socket close, the membership is dropped automatically. + * + * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK). + * + * @return 1 on success, 0 on failure + */ +static int +lwip_socket_register_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr) +{ + struct lwip_sock *sock = get_socket(s); + int i; + + if (!sock) { + return 0; + } + + for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) { + if (socket_ipv4_multicast_memberships[i].sock == NULL) { + socket_ipv4_multicast_memberships[i].sock = sock; + ip4_addr_copy(socket_ipv4_multicast_memberships[i].if_addr, *if_addr); + ip4_addr_copy(socket_ipv4_multicast_memberships[i].multi_addr, *multi_addr); + done_socket(sock); + return 1; + } + } + done_socket(sock); + return 0; +} + +/** Unregister a previously registered membership. This prevents dropping the membership + * on socket close. + * + * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK). + */ +static void +lwip_socket_unregister_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr) +{ + struct lwip_sock *sock = get_socket(s); + int i; + + if (!sock) { + return; + } + + for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) { + if ((socket_ipv4_multicast_memberships[i].sock == sock) && + ip4_addr_cmp(&socket_ipv4_multicast_memberships[i].if_addr, if_addr) && + ip4_addr_cmp(&socket_ipv4_multicast_memberships[i].multi_addr, multi_addr)) { + socket_ipv4_multicast_memberships[i].sock = NULL; + ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].if_addr); + ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].multi_addr); + break; + } + } + done_socket(sock); +} + +/** Drop all memberships of a socket that were not dropped explicitly via setsockopt. + * + * ATTENTION: this function is NOT called from tcpip_thread (or under CORE_LOCK). + */ +static void +lwip_socket_drop_registered_memberships(int s) +{ + struct lwip_sock *sock = get_socket(s); + int i; + + if (!sock) { + return; + } + + for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) { + if (socket_ipv4_multicast_memberships[i].sock == sock) { + ip_addr_t multi_addr, if_addr; + ip_addr_copy_from_ip4(multi_addr, socket_ipv4_multicast_memberships[i].multi_addr); + ip_addr_copy_from_ip4(if_addr, socket_ipv4_multicast_memberships[i].if_addr); + socket_ipv4_multicast_memberships[i].sock = NULL; + ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].if_addr); + ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].multi_addr); + + netconn_join_leave_group(sock->conn, &multi_addr, &if_addr, NETCONN_LEAVE); + } + } + done_socket(sock); +} +#endif /* LWIP_IGMP */ + +#if LWIP_IPV6_MLD +/** Register a new MLD6 membership. On socket close, the membership is dropped automatically. + * + * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK). + * + * @return 1 on success, 0 on failure + */ +static int +lwip_socket_register_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr) +{ + struct lwip_sock *sock = get_socket(s); + int i; + + if (!sock) { + return 0; + } + + for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) { + if (socket_ipv6_multicast_memberships[i].sock == NULL) { + socket_ipv6_multicast_memberships[i].sock = sock; + socket_ipv6_multicast_memberships[i].if_idx = (u8_t)if_idx; + ip6_addr_copy(socket_ipv6_multicast_memberships[i].multi_addr, *multi_addr); + done_socket(sock); + return 1; + } + } + done_socket(sock); + return 0; +} + +/** Unregister a previously registered MLD6 membership. This prevents dropping the membership + * on socket close. + * + * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK). + */ +static void +lwip_socket_unregister_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr) +{ + struct lwip_sock *sock = get_socket(s); + int i; + + if (!sock) { + return; + } + + for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) { + if ((socket_ipv6_multicast_memberships[i].sock == sock) && + (socket_ipv6_multicast_memberships[i].if_idx == if_idx) && + ip6_addr_cmp(&socket_ipv6_multicast_memberships[i].multi_addr, multi_addr)) { + socket_ipv6_multicast_memberships[i].sock = NULL; + socket_ipv6_multicast_memberships[i].if_idx = NETIF_NO_INDEX; + ip6_addr_set_zero(&socket_ipv6_multicast_memberships[i].multi_addr); + break; + } + } + done_socket(sock); +} + +/** Drop all MLD6 memberships of a socket that were not dropped explicitly via setsockopt. + * + * ATTENTION: this function is NOT called from tcpip_thread (or under CORE_LOCK). + */ +static void +lwip_socket_drop_registered_mld6_memberships(int s) +{ + struct lwip_sock *sock = get_socket(s); + int i; + + if (!sock) { + return; + } + + for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) { + if (socket_ipv6_multicast_memberships[i].sock == sock) { + ip_addr_t multi_addr; + u8_t if_idx; + + ip_addr_copy_from_ip6(multi_addr, socket_ipv6_multicast_memberships[i].multi_addr); + if_idx = socket_ipv6_multicast_memberships[i].if_idx; + + socket_ipv6_multicast_memberships[i].sock = NULL; + socket_ipv6_multicast_memberships[i].if_idx = NETIF_NO_INDEX; + ip6_addr_set_zero(&socket_ipv6_multicast_memberships[i].multi_addr); + + netconn_join_leave_group_netif(sock->conn, &multi_addr, if_idx, NETCONN_LEAVE); + } + } + done_socket(sock); +} +#endif /* LWIP_IPV6_MLD */ + +#endif /* LWIP_SOCKET */ diff --git a/Middlewares/Third_Party/LwIP/src/api/tcpip.c b/Middlewares/Third_Party/LwIP/src/api/tcpip.c new file mode 100644 index 0000000..743553a --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/api/tcpip.c @@ -0,0 +1,658 @@ +/** + * @file + * Sequential API Main thread module + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if !NO_SYS /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/priv/tcpip_priv.h" +#include "lwip/sys.h" +#include "lwip/memp.h" +#include "lwip/mem.h" +#include "lwip/init.h" +#include "lwip/ip.h" +#include "lwip/pbuf.h" +#include "lwip/etharp.h" +#include "netif/ethernet.h" + +#define TCPIP_MSG_VAR_REF(name) API_VAR_REF(name) +#define TCPIP_MSG_VAR_DECLARE(name) API_VAR_DECLARE(struct tcpip_msg, name) +#define TCPIP_MSG_VAR_ALLOC(name) API_VAR_ALLOC(struct tcpip_msg, MEMP_TCPIP_MSG_API, name, ERR_MEM) +#define TCPIP_MSG_VAR_FREE(name) API_VAR_FREE(MEMP_TCPIP_MSG_API, name) + +/* global variables */ +static tcpip_init_done_fn tcpip_init_done; +static void *tcpip_init_done_arg; +static sys_mbox_t tcpip_mbox; + +#if LWIP_TCPIP_CORE_LOCKING +/** The global semaphore to lock the stack. */ +sys_mutex_t lock_tcpip_core; +#endif /* LWIP_TCPIP_CORE_LOCKING */ + +static void tcpip_thread_handle_msg(struct tcpip_msg *msg); + +#if !LWIP_TIMERS +/* wait for a message with timers disabled (e.g. pass a timer-check trigger into tcpip_thread) */ +#define TCPIP_MBOX_FETCH(mbox, msg) sys_mbox_fetch(mbox, msg) +#else /* !LWIP_TIMERS */ +/* wait for a message, timeouts are processed while waiting */ +#define TCPIP_MBOX_FETCH(mbox, msg) tcpip_timeouts_mbox_fetch(mbox, msg) +/** + * Wait (forever) for a message to arrive in an mbox. + * While waiting, timeouts are processed. + * + * @param mbox the mbox to fetch the message from + * @param msg the place to store the message + */ +static void +tcpip_timeouts_mbox_fetch(sys_mbox_t *mbox, void **msg) +{ + u32_t sleeptime, res; + +again: + LWIP_ASSERT_CORE_LOCKED(); + + sleeptime = sys_timeouts_sleeptime(); + if (sleeptime == SYS_TIMEOUTS_SLEEPTIME_INFINITE) { + UNLOCK_TCPIP_CORE(); + sys_arch_mbox_fetch(mbox, msg, 0); + LOCK_TCPIP_CORE(); + return; + } else if (sleeptime == 0) { + sys_check_timeouts(); + /* We try again to fetch a message from the mbox. */ + goto again; + } + + UNLOCK_TCPIP_CORE(); + res = sys_arch_mbox_fetch(mbox, msg, sleeptime); + LOCK_TCPIP_CORE(); + if (res == SYS_ARCH_TIMEOUT) { + /* If a SYS_ARCH_TIMEOUT value is returned, a timeout occurred + before a message could be fetched. */ + sys_check_timeouts(); + /* We try again to fetch a message from the mbox. */ + goto again; + } +} +#endif /* !LWIP_TIMERS */ + +/** + * The main lwIP thread. This thread has exclusive access to lwIP core functions + * (unless access to them is not locked). Other threads communicate with this + * thread using message boxes. + * + * It also starts all the timers to make sure they are running in the right + * thread context. + * + * @param arg unused argument + */ +static void +tcpip_thread(void *arg) +{ + struct tcpip_msg *msg; + LWIP_UNUSED_ARG(arg); + + LWIP_MARK_TCPIP_THREAD(); + + LOCK_TCPIP_CORE(); + if (tcpip_init_done != NULL) { + tcpip_init_done(tcpip_init_done_arg); + } + + while (1) { /* MAIN Loop */ + LWIP_TCPIP_THREAD_ALIVE(); + /* wait for a message, timeouts are processed while waiting */ + TCPIP_MBOX_FETCH(&tcpip_mbox, (void **)&msg); + if (msg == NULL) { + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: invalid message: NULL\n")); + LWIP_ASSERT("tcpip_thread: invalid message", 0); + continue; + } + tcpip_thread_handle_msg(msg); + } +} + +/* Handle a single tcpip_msg + * This is in its own function for access by tests only. + */ +static void +tcpip_thread_handle_msg(struct tcpip_msg *msg) +{ + switch (msg->type) { +#if !LWIP_TCPIP_CORE_LOCKING + case TCPIP_MSG_API: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: API message %p\n", (void *)msg)); + msg->msg.api_msg.function(msg->msg.api_msg.msg); + break; + case TCPIP_MSG_API_CALL: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: API CALL message %p\n", (void *)msg)); + msg->msg.api_call.arg->err = msg->msg.api_call.function(msg->msg.api_call.arg); + sys_sem_signal(msg->msg.api_call.sem); + break; +#endif /* !LWIP_TCPIP_CORE_LOCKING */ + +#if !LWIP_TCPIP_CORE_LOCKING_INPUT + case TCPIP_MSG_INPKT: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: PACKET %p\n", (void *)msg)); + if (msg->msg.inp.input_fn(msg->msg.inp.p, msg->msg.inp.netif) != ERR_OK) { + pbuf_free(msg->msg.inp.p); + } + memp_free(MEMP_TCPIP_MSG_INPKT, msg); + break; +#endif /* !LWIP_TCPIP_CORE_LOCKING_INPUT */ + +#if LWIP_TCPIP_TIMEOUT && LWIP_TIMERS + case TCPIP_MSG_TIMEOUT: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: TIMEOUT %p\n", (void *)msg)); + sys_timeout(msg->msg.tmo.msecs, msg->msg.tmo.h, msg->msg.tmo.arg); + memp_free(MEMP_TCPIP_MSG_API, msg); + break; + case TCPIP_MSG_UNTIMEOUT: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: UNTIMEOUT %p\n", (void *)msg)); + sys_untimeout(msg->msg.tmo.h, msg->msg.tmo.arg); + memp_free(MEMP_TCPIP_MSG_API, msg); + break; +#endif /* LWIP_TCPIP_TIMEOUT && LWIP_TIMERS */ + + case TCPIP_MSG_CALLBACK: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: CALLBACK %p\n", (void *)msg)); + msg->msg.cb.function(msg->msg.cb.ctx); + memp_free(MEMP_TCPIP_MSG_API, msg); + break; + + case TCPIP_MSG_CALLBACK_STATIC: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: CALLBACK_STATIC %p\n", (void *)msg)); + msg->msg.cb.function(msg->msg.cb.ctx); + break; + + default: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: invalid message: %d\n", msg->type)); + LWIP_ASSERT("tcpip_thread: invalid message", 0); + break; + } +} + +#ifdef TCPIP_THREAD_TEST +/** Work on queued items in single-threaded test mode */ +int +tcpip_thread_poll_one(void) +{ + int ret = 0; + struct tcpip_msg *msg; + + if (sys_arch_mbox_tryfetch(&tcpip_mbox, (void **)&msg) != SYS_ARCH_TIMEOUT) { + LOCK_TCPIP_CORE(); + if (msg != NULL) { + tcpip_thread_handle_msg(msg); + ret = 1; + } + UNLOCK_TCPIP_CORE(); + } + return ret; +} +#endif + +/** + * Pass a received packet to tcpip_thread for input processing + * + * @param p the received packet + * @param inp the network interface on which the packet was received + * @param input_fn input function to call + */ +err_t +tcpip_inpkt(struct pbuf *p, struct netif *inp, netif_input_fn input_fn) +{ +#if LWIP_TCPIP_CORE_LOCKING_INPUT + err_t ret; + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_inpkt: PACKET %p/%p\n", (void *)p, (void *)inp)); + LOCK_TCPIP_CORE(); + ret = input_fn(p, inp); + UNLOCK_TCPIP_CORE(); + return ret; +#else /* LWIP_TCPIP_CORE_LOCKING_INPUT */ + struct tcpip_msg *msg; + + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox)); + + msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_INPKT); + if (msg == NULL) { + return ERR_MEM; + } + + msg->type = TCPIP_MSG_INPKT; + msg->msg.inp.p = p; + msg->msg.inp.netif = inp; + msg->msg.inp.input_fn = input_fn; + if (sys_mbox_trypost(&tcpip_mbox, msg) != ERR_OK) { + memp_free(MEMP_TCPIP_MSG_INPKT, msg); + return ERR_MEM; + } + return ERR_OK; +#endif /* LWIP_TCPIP_CORE_LOCKING_INPUT */ +} + +/** + * @ingroup lwip_os + * Pass a received packet to tcpip_thread for input processing with + * ethernet_input or ip_input. Don't call directly, pass to netif_add() + * and call netif->input(). + * + * @param p the received packet, p->payload pointing to the Ethernet header or + * to an IP header (if inp doesn't have NETIF_FLAG_ETHARP or + * NETIF_FLAG_ETHERNET flags) + * @param inp the network interface on which the packet was received + */ +err_t +tcpip_input(struct pbuf *p, struct netif *inp) +{ +#if LWIP_ETHERNET + if (inp->flags & (NETIF_FLAG_ETHARP | NETIF_FLAG_ETHERNET)) { + return tcpip_inpkt(p, inp, ethernet_input); + } else +#endif /* LWIP_ETHERNET */ + return tcpip_inpkt(p, inp, ip_input); +} + +/** + * @ingroup lwip_os + * Call a specific function in the thread context of + * tcpip_thread for easy access synchronization. + * A function called in that way may access lwIP core code + * without fearing concurrent access. + * Blocks until the request is posted. + * Must not be called from interrupt context! + * + * @param function the function to call + * @param ctx parameter passed to f + * @return ERR_OK if the function was called, another err_t if not + * + * @see tcpip_try_callback + */ +err_t +tcpip_callback(tcpip_callback_fn function, void *ctx) +{ + struct tcpip_msg *msg; + + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox)); + + msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_API); + if (msg == NULL) { + return ERR_MEM; + } + + msg->type = TCPIP_MSG_CALLBACK; + msg->msg.cb.function = function; + msg->msg.cb.ctx = ctx; + + sys_mbox_post(&tcpip_mbox, msg); + return ERR_OK; +} + +/** + * @ingroup lwip_os + * Call a specific function in the thread context of + * tcpip_thread for easy access synchronization. + * A function called in that way may access lwIP core code + * without fearing concurrent access. + * Does NOT block when the request cannot be posted because the + * tcpip_mbox is full, but returns ERR_MEM instead. + * Can be called from interrupt context. + * + * @param function the function to call + * @param ctx parameter passed to f + * @return ERR_OK if the function was called, another err_t if not + * + * @see tcpip_callback + */ +err_t +tcpip_try_callback(tcpip_callback_fn function, void *ctx) +{ + struct tcpip_msg *msg; + + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox)); + + msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_API); + if (msg == NULL) { + return ERR_MEM; + } + + msg->type = TCPIP_MSG_CALLBACK; + msg->msg.cb.function = function; + msg->msg.cb.ctx = ctx; + + if (sys_mbox_trypost(&tcpip_mbox, msg) != ERR_OK) { + memp_free(MEMP_TCPIP_MSG_API, msg); + return ERR_MEM; + } + return ERR_OK; +} + +#if LWIP_TCPIP_TIMEOUT && LWIP_TIMERS +/** + * call sys_timeout in tcpip_thread + * + * @param msecs time in milliseconds for timeout + * @param h function to be called on timeout + * @param arg argument to pass to timeout function h + * @return ERR_MEM on memory error, ERR_OK otherwise + */ +err_t +tcpip_timeout(u32_t msecs, sys_timeout_handler h, void *arg) +{ + struct tcpip_msg *msg; + + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox)); + + msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_API); + if (msg == NULL) { + return ERR_MEM; + } + + msg->type = TCPIP_MSG_TIMEOUT; + msg->msg.tmo.msecs = msecs; + msg->msg.tmo.h = h; + msg->msg.tmo.arg = arg; + sys_mbox_post(&tcpip_mbox, msg); + return ERR_OK; +} + +/** + * call sys_untimeout in tcpip_thread + * + * @param h function to be called on timeout + * @param arg argument to pass to timeout function h + * @return ERR_MEM on memory error, ERR_OK otherwise + */ +err_t +tcpip_untimeout(sys_timeout_handler h, void *arg) +{ + struct tcpip_msg *msg; + + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox)); + + msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_API); + if (msg == NULL) { + return ERR_MEM; + } + + msg->type = TCPIP_MSG_UNTIMEOUT; + msg->msg.tmo.h = h; + msg->msg.tmo.arg = arg; + sys_mbox_post(&tcpip_mbox, msg); + return ERR_OK; +} +#endif /* LWIP_TCPIP_TIMEOUT && LWIP_TIMERS */ + + +/** + * Sends a message to TCPIP thread to call a function. Caller thread blocks on + * on a provided semaphore, which ist NOT automatically signalled by TCPIP thread, + * this has to be done by the user. + * It is recommended to use LWIP_TCPIP_CORE_LOCKING since this is the way + * with least runtime overhead. + * + * @param fn function to be called from TCPIP thread + * @param apimsg argument to API function + * @param sem semaphore to wait on + * @return ERR_OK if the function was called, another err_t if not + */ +err_t +tcpip_send_msg_wait_sem(tcpip_callback_fn fn, void *apimsg, sys_sem_t *sem) +{ +#if LWIP_TCPIP_CORE_LOCKING + LWIP_UNUSED_ARG(sem); + LOCK_TCPIP_CORE(); + fn(apimsg); + UNLOCK_TCPIP_CORE(); + return ERR_OK; +#else /* LWIP_TCPIP_CORE_LOCKING */ + TCPIP_MSG_VAR_DECLARE(msg); + + LWIP_ASSERT("semaphore not initialized", sys_sem_valid(sem)); + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox)); + + TCPIP_MSG_VAR_ALLOC(msg); + TCPIP_MSG_VAR_REF(msg).type = TCPIP_MSG_API; + TCPIP_MSG_VAR_REF(msg).msg.api_msg.function = fn; + TCPIP_MSG_VAR_REF(msg).msg.api_msg.msg = apimsg; + sys_mbox_post(&tcpip_mbox, &TCPIP_MSG_VAR_REF(msg)); + sys_arch_sem_wait(sem, 0); + TCPIP_MSG_VAR_FREE(msg); + return ERR_OK; +#endif /* LWIP_TCPIP_CORE_LOCKING */ +} + +/** + * Synchronously calls function in TCPIP thread and waits for its completion. + * It is recommended to use LWIP_TCPIP_CORE_LOCKING (preferred) or + * LWIP_NETCONN_SEM_PER_THREAD. + * If not, a semaphore is created and destroyed on every call which is usually + * an expensive/slow operation. + * @param fn Function to call + * @param call Call parameters + * @return Return value from tcpip_api_call_fn + */ +err_t +tcpip_api_call(tcpip_api_call_fn fn, struct tcpip_api_call_data *call) +{ +#if LWIP_TCPIP_CORE_LOCKING + err_t err; + LOCK_TCPIP_CORE(); + err = fn(call); + UNLOCK_TCPIP_CORE(); + return err; +#else /* LWIP_TCPIP_CORE_LOCKING */ + TCPIP_MSG_VAR_DECLARE(msg); + +#if !LWIP_NETCONN_SEM_PER_THREAD + err_t err = sys_sem_new(&call->sem, 0); + if (err != ERR_OK) { + return err; + } +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox)); + + TCPIP_MSG_VAR_ALLOC(msg); + TCPIP_MSG_VAR_REF(msg).type = TCPIP_MSG_API_CALL; + TCPIP_MSG_VAR_REF(msg).msg.api_call.arg = call; + TCPIP_MSG_VAR_REF(msg).msg.api_call.function = fn; +#if LWIP_NETCONN_SEM_PER_THREAD + TCPIP_MSG_VAR_REF(msg).msg.api_call.sem = LWIP_NETCONN_THREAD_SEM_GET(); +#else /* LWIP_NETCONN_SEM_PER_THREAD */ + TCPIP_MSG_VAR_REF(msg).msg.api_call.sem = &call->sem; +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + sys_mbox_post(&tcpip_mbox, &TCPIP_MSG_VAR_REF(msg)); + sys_arch_sem_wait(TCPIP_MSG_VAR_REF(msg).msg.api_call.sem, 0); + TCPIP_MSG_VAR_FREE(msg); + +#if !LWIP_NETCONN_SEM_PER_THREAD + sys_sem_free(&call->sem); +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + + return call->err; +#endif /* LWIP_TCPIP_CORE_LOCKING */ +} + +/** + * @ingroup lwip_os + * Allocate a structure for a static callback message and initialize it. + * The message has a special type such that lwIP never frees it. + * This is intended to be used to send "static" messages from interrupt context, + * e.g. the message is allocated once and posted several times from an IRQ + * using tcpip_callbackmsg_trycallback(). + * Example usage: Trigger execution of an ethernet IRQ DPC routine in lwIP thread context. + * + * @param function the function to call + * @param ctx parameter passed to function + * @return a struct pointer to pass to tcpip_callbackmsg_trycallback(). + * + * @see tcpip_callbackmsg_trycallback() + * @see tcpip_callbackmsg_delete() + */ +struct tcpip_callback_msg * +tcpip_callbackmsg_new(tcpip_callback_fn function, void *ctx) +{ + struct tcpip_msg *msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_API); + if (msg == NULL) { + return NULL; + } + msg->type = TCPIP_MSG_CALLBACK_STATIC; + msg->msg.cb.function = function; + msg->msg.cb.ctx = ctx; + return (struct tcpip_callback_msg *)msg; +} + +/** + * @ingroup lwip_os + * Free a callback message allocated by tcpip_callbackmsg_new(). + * + * @param msg the message to free + * + * @see tcpip_callbackmsg_new() + */ +void +tcpip_callbackmsg_delete(struct tcpip_callback_msg *msg) +{ + memp_free(MEMP_TCPIP_MSG_API, msg); +} + +/** + * @ingroup lwip_os + * Try to post a callback-message to the tcpip_thread tcpip_mbox. + * + * @param msg pointer to the message to post + * @return sys_mbox_trypost() return code + * + * @see tcpip_callbackmsg_new() + */ +err_t +tcpip_callbackmsg_trycallback(struct tcpip_callback_msg *msg) +{ + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox)); + return sys_mbox_trypost(&tcpip_mbox, msg); +} + +/** + * @ingroup lwip_os + * Try to post a callback-message to the tcpip_thread mbox. + * Same as @ref tcpip_callbackmsg_trycallback but calls sys_mbox_trypost_fromisr(), + * mainly to help FreeRTOS, where calls differ between task level and ISR level. + * + * @param msg pointer to the message to post + * @return sys_mbox_trypost_fromisr() return code (without change, so this + * knowledge can be used to e.g. propagate "bool needs_scheduling") + * + * @see tcpip_callbackmsg_new() + */ +err_t +tcpip_callbackmsg_trycallback_fromisr(struct tcpip_callback_msg *msg) +{ + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox)); + return sys_mbox_trypost_fromisr(&tcpip_mbox, msg); +} + +/** + * @ingroup lwip_os + * Initialize this module: + * - initialize all sub modules + * - start the tcpip_thread + * + * @param initfunc a function to call when tcpip_thread is running and finished initializing + * @param arg argument to pass to initfunc + */ +void +tcpip_init(tcpip_init_done_fn initfunc, void *arg) +{ + lwip_init(); + + tcpip_init_done = initfunc; + tcpip_init_done_arg = arg; + if (sys_mbox_new(&tcpip_mbox, TCPIP_MBOX_SIZE) != ERR_OK) { + LWIP_ASSERT("failed to create tcpip_thread mbox", 0); + } +#if LWIP_TCPIP_CORE_LOCKING + if (sys_mutex_new(&lock_tcpip_core) != ERR_OK) { + LWIP_ASSERT("failed to create lock_tcpip_core", 0); + } +#endif /* LWIP_TCPIP_CORE_LOCKING */ + + sys_thread_new(TCPIP_THREAD_NAME, tcpip_thread, NULL, TCPIP_THREAD_STACKSIZE, TCPIP_THREAD_PRIO); +} + +/** + * Simple callback function used with tcpip_callback to free a pbuf + * (pbuf_free has a wrong signature for tcpip_callback) + * + * @param p The pbuf (chain) to be dereferenced. + */ +static void +pbuf_free_int(void *p) +{ + struct pbuf *q = (struct pbuf *)p; + pbuf_free(q); +} + +/** + * A simple wrapper function that allows you to free a pbuf from interrupt context. + * + * @param p The pbuf (chain) to be dereferenced. + * @return ERR_OK if callback could be enqueued, an err_t if not + */ +err_t +pbuf_free_callback(struct pbuf *p) +{ + return tcpip_try_callback(pbuf_free_int, p); +} + +/** + * A simple wrapper function that allows you to free heap memory from + * interrupt context. + * + * @param m the heap memory to free + * @return ERR_OK if callback could be enqueued, an err_t if not + */ +err_t +mem_free_callback(void *m) +{ + return tcpip_try_callback(mem_free, m); +} + +#endif /* !NO_SYS */ diff --git a/Middlewares/Third_Party/LwIP/src/apps/mqtt/mqtt.c b/Middlewares/Third_Party/LwIP/src/apps/mqtt/mqtt.c new file mode 100644 index 0000000..269f4a4 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/apps/mqtt/mqtt.c @@ -0,0 +1,1463 @@ +/** + * @file + * MQTT client + * + * @defgroup mqtt MQTT client + * @ingroup apps + * @verbinclude mqtt_client.txt + */ + +/* + * Copyright (c) 2016 Erik Andersson + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack + * + * Author: Erik Andersson + * + * + * @todo: + * - Handle large outgoing payloads for PUBLISH messages + * - Fix restriction of a single topic in each (UN)SUBSCRIBE message (protocol has support for multiple topics) + * - Add support for legacy MQTT protocol version + * + * Please coordinate changes and requests with Erik Andersson + * Erik Andersson + * + */ +#include "lwip/apps/mqtt.h" +#include "lwip/apps/mqtt_priv.h" +#include "lwip/timeouts.h" +#include "lwip/ip_addr.h" +#include "lwip/mem.h" +#include "lwip/err.h" +#include "lwip/pbuf.h" +#include "lwip/altcp.h" +#include "lwip/altcp_tcp.h" +#include "lwip/altcp_tls.h" +#include + +#if LWIP_TCP && LWIP_CALLBACK_API + +/** + * MQTT_DEBUG: Default is off. + */ +#if !defined MQTT_DEBUG || defined __DOXYGEN__ +#define MQTT_DEBUG LWIP_DBG_OFF +#endif + +#define MQTT_DEBUG_TRACE (MQTT_DEBUG | LWIP_DBG_TRACE) +#define MQTT_DEBUG_STATE (MQTT_DEBUG | LWIP_DBG_STATE) +#define MQTT_DEBUG_WARN (MQTT_DEBUG | LWIP_DBG_LEVEL_WARNING) +#define MQTT_DEBUG_WARN_STATE (MQTT_DEBUG | LWIP_DBG_LEVEL_WARNING | LWIP_DBG_STATE) +#define MQTT_DEBUG_SERIOUS (MQTT_DEBUG | LWIP_DBG_LEVEL_SERIOUS) + + + +/** + * MQTT client connection states + */ +enum { + TCP_DISCONNECTED, + TCP_CONNECTING, + MQTT_CONNECTING, + MQTT_CONNECTED +}; + +/** + * MQTT control message types + */ +enum mqtt_message_type { + MQTT_MSG_TYPE_CONNECT = 1, + MQTT_MSG_TYPE_CONNACK = 2, + MQTT_MSG_TYPE_PUBLISH = 3, + MQTT_MSG_TYPE_PUBACK = 4, + MQTT_MSG_TYPE_PUBREC = 5, + MQTT_MSG_TYPE_PUBREL = 6, + MQTT_MSG_TYPE_PUBCOMP = 7, + MQTT_MSG_TYPE_SUBSCRIBE = 8, + MQTT_MSG_TYPE_SUBACK = 9, + MQTT_MSG_TYPE_UNSUBSCRIBE = 10, + MQTT_MSG_TYPE_UNSUBACK = 11, + MQTT_MSG_TYPE_PINGREQ = 12, + MQTT_MSG_TYPE_PINGRESP = 13, + MQTT_MSG_TYPE_DISCONNECT = 14 +}; + +/** Helpers to extract control packet type and qos from first byte in fixed header */ +#define MQTT_CTL_PACKET_TYPE(fixed_hdr_byte0) ((fixed_hdr_byte0 & 0xf0) >> 4) +#define MQTT_CTL_PACKET_QOS(fixed_hdr_byte0) ((fixed_hdr_byte0 & 0x6) >> 1) + +/** + * MQTT connect flags, only used in CONNECT message + */ +enum mqtt_connect_flag { + MQTT_CONNECT_FLAG_USERNAME = 1 << 7, + MQTT_CONNECT_FLAG_PASSWORD = 1 << 6, + MQTT_CONNECT_FLAG_WILL_RETAIN = 1 << 5, + MQTT_CONNECT_FLAG_WILL = 1 << 2, + MQTT_CONNECT_FLAG_CLEAN_SESSION = 1 << 1 +}; + + +static void mqtt_cyclic_timer(void *arg); + +#if defined(LWIP_DEBUG) +static const char *const mqtt_message_type_str[15] = { + "UNDEFINED", + "CONNECT", + "CONNACK", + "PUBLISH", + "PUBACK", + "PUBREC", + "PUBREL", + "PUBCOMP", + "SUBSCRIBE", + "SUBACK", + "UNSUBSCRIBE", + "UNSUBACK", + "PINGREQ", + "PINGRESP", + "DISCONNECT" +}; + +/** + * Message type value to string + * @param msg_type see enum mqtt_message_type + * + * @return Control message type text string + */ +static const char * +mqtt_msg_type_to_str(u8_t msg_type) +{ + if (msg_type >= LWIP_ARRAYSIZE(mqtt_message_type_str)) { + msg_type = 0; + } + return mqtt_message_type_str[msg_type]; +} + +#endif + + +/** + * Generate MQTT packet identifier + * @param client MQTT client + * @return New packet identifier, range 1 to 65535 + */ +static u16_t +msg_generate_packet_id(mqtt_client_t *client) +{ + client->pkt_id_seq++; + if (client->pkt_id_seq == 0) { + client->pkt_id_seq++; + } + return client->pkt_id_seq; +} + +/*--------------------------------------------------------------------------------------------------------------------- */ +/* Output ring buffer */ + +/** Add single item to ring buffer */ +static void +mqtt_ringbuf_put(struct mqtt_ringbuf_t *rb, u8_t item) +{ + rb->buf[rb->put] = item; + rb->put++; + if (rb->put >= MQTT_OUTPUT_RINGBUF_SIZE) { + rb->put = 0; + } +} + +/** Return pointer to ring buffer get position */ +static u8_t * +mqtt_ringbuf_get_ptr(struct mqtt_ringbuf_t *rb) +{ + return &rb->buf[rb->get]; +} + +static void +mqtt_ringbuf_advance_get_idx(struct mqtt_ringbuf_t *rb, u16_t len) +{ + LWIP_ASSERT("mqtt_ringbuf_advance_get_idx: len < MQTT_OUTPUT_RINGBUF_SIZE", len < MQTT_OUTPUT_RINGBUF_SIZE); + + rb->get += len; + if (rb->get >= MQTT_OUTPUT_RINGBUF_SIZE) { + rb->get = rb->get - MQTT_OUTPUT_RINGBUF_SIZE; + } +} + +/** Return number of bytes in ring buffer */ +static u16_t +mqtt_ringbuf_len(struct mqtt_ringbuf_t *rb) +{ + u32_t len = rb->put - rb->get; + if (len > 0xFFFF) { + len += MQTT_OUTPUT_RINGBUF_SIZE; + } + return (u16_t)len; +} + +/** Return number of bytes free in ring buffer */ +#define mqtt_ringbuf_free(rb) (MQTT_OUTPUT_RINGBUF_SIZE - mqtt_ringbuf_len(rb)) + +/** Return number of bytes possible to read without wrapping around */ +#define mqtt_ringbuf_linear_read_length(rb) LWIP_MIN(mqtt_ringbuf_len(rb), (MQTT_OUTPUT_RINGBUF_SIZE - (rb)->get)) + +/** + * Try send as many bytes as possible from output ring buffer + * @param rb Output ring buffer + * @param tpcb TCP connection handle + */ +static void +mqtt_output_send(struct mqtt_ringbuf_t *rb, struct altcp_pcb *tpcb) +{ + err_t err; + u8_t wrap = 0; + u16_t ringbuf_lin_len = mqtt_ringbuf_linear_read_length(rb); + u16_t send_len = altcp_sndbuf(tpcb); + LWIP_ASSERT("mqtt_output_send: tpcb != NULL", tpcb != NULL); + + if (send_len == 0 || ringbuf_lin_len == 0) { + return; + } + + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_output_send: tcp_sndbuf: %d bytes, ringbuf_linear_available: %d, get %d, put %d\n", + send_len, ringbuf_lin_len, rb->get, rb->put)); + + if (send_len > ringbuf_lin_len) { + /* Space in TCP output buffer is larger than available in ring buffer linear portion */ + send_len = ringbuf_lin_len; + /* Wrap around if more data in ring buffer after linear portion */ + wrap = (mqtt_ringbuf_len(rb) > ringbuf_lin_len); + } + err = altcp_write(tpcb, mqtt_ringbuf_get_ptr(rb), send_len, TCP_WRITE_FLAG_COPY | (wrap ? TCP_WRITE_FLAG_MORE : 0)); + if ((err == ERR_OK) && wrap) { + mqtt_ringbuf_advance_get_idx(rb, send_len); + /* Use the lesser one of ring buffer linear length and TCP send buffer size */ + send_len = LWIP_MIN(altcp_sndbuf(tpcb), mqtt_ringbuf_linear_read_length(rb)); + err = altcp_write(tpcb, mqtt_ringbuf_get_ptr(rb), send_len, TCP_WRITE_FLAG_COPY); + } + + if (err == ERR_OK) { + mqtt_ringbuf_advance_get_idx(rb, send_len); + /* Flush */ + altcp_output(tpcb); + } else { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_output_send: Send failed with err %d (\"%s\")\n", err, lwip_strerr(err))); + } +} + + + +/*--------------------------------------------------------------------------------------------------------------------- */ +/* Request queue */ + +/** + * Create request item + * @param r_objs Pointer to request objects + * @param r_objs_len Number of array entries + * @param pkt_id Packet identifier of request + * @param cb Packet callback to call when requests lifetime ends + * @param arg Parameter following callback + * @return Request or NULL if failed to create + */ +static struct mqtt_request_t * +mqtt_create_request(struct mqtt_request_t *r_objs, size_t r_objs_len, u16_t pkt_id, mqtt_request_cb_t cb, void *arg) +{ + struct mqtt_request_t *r = NULL; + u8_t n; + LWIP_ASSERT("mqtt_create_request: r_objs != NULL", r_objs != NULL); + for (n = 0; n < r_objs_len; n++) { + /* Item point to itself if not in use */ + if (r_objs[n].next == &r_objs[n]) { + r = &r_objs[n]; + r->next = NULL; + r->cb = cb; + r->arg = arg; + r->pkt_id = pkt_id; + break; + } + } + return r; +} + + +/** + * Append request to pending request queue + * @param tail Pointer to request queue tail pointer + * @param r Request to append + */ +static void +mqtt_append_request(struct mqtt_request_t **tail, struct mqtt_request_t *r) +{ + struct mqtt_request_t *head = NULL; + s16_t time_before = 0; + struct mqtt_request_t *iter; + + LWIP_ASSERT("mqtt_append_request: tail != NULL", tail != NULL); + + /* Iterate trough queue to find head, and count total timeout time */ + for (iter = *tail; iter != NULL; iter = iter->next) { + time_before += iter->timeout_diff; + head = iter; + } + + LWIP_ASSERT("mqtt_append_request: time_before <= MQTT_REQ_TIMEOUT", time_before <= MQTT_REQ_TIMEOUT); + r->timeout_diff = MQTT_REQ_TIMEOUT - time_before; + if (head == NULL) { + *tail = r; + } else { + head->next = r; + } +} + + +/** + * Delete request item + * @param r Request item to delete + */ +static void +mqtt_delete_request(struct mqtt_request_t *r) +{ + if (r != NULL) { + r->next = r; + } +} + +/** + * Remove a request item with a specific packet identifier from request queue + * @param tail Pointer to request queue tail pointer + * @param pkt_id Packet identifier of request to take + * @return Request item if found, NULL if not + */ +static struct mqtt_request_t * +mqtt_take_request(struct mqtt_request_t **tail, u16_t pkt_id) +{ + struct mqtt_request_t *iter = NULL, *prev = NULL; + LWIP_ASSERT("mqtt_take_request: tail != NULL", tail != NULL); + /* Search all request for pkt_id */ + for (iter = *tail; iter != NULL; iter = iter->next) { + if (iter->pkt_id == pkt_id) { + break; + } + prev = iter; + } + + /* If request was found */ + if (iter != NULL) { + /* unchain */ + if (prev == NULL) { + *tail = iter->next; + } else { + prev->next = iter->next; + } + /* If exists, add remaining timeout time for the request to next */ + if (iter->next != NULL) { + iter->next->timeout_diff += iter->timeout_diff; + } + iter->next = NULL; + } + return iter; +} + +/** + * Handle requests timeout + * @param tail Pointer to request queue tail pointer + * @param t Time since last call in seconds + */ +static void +mqtt_request_time_elapsed(struct mqtt_request_t **tail, u8_t t) +{ + struct mqtt_request_t *r; + LWIP_ASSERT("mqtt_request_time_elapsed: tail != NULL", tail != NULL); + r = *tail; + while (t > 0 && r != NULL) { + if (t >= r->timeout_diff) { + t -= (u8_t)r->timeout_diff; + /* Unchain */ + *tail = r->next; + /* Notify upper layer about timeout */ + if (r->cb != NULL) { + r->cb(r->arg, ERR_TIMEOUT); + } + mqtt_delete_request(r); + /* Tail might be be modified in callback, so re-read it in every iteration */ + r = *(struct mqtt_request_t *const volatile *)tail; + } else { + r->timeout_diff -= t; + t = 0; + } + } +} + +/** + * Free all request items + * @param tail Pointer to request queue tail pointer + */ +static void +mqtt_clear_requests(struct mqtt_request_t **tail) +{ + struct mqtt_request_t *iter, *next; + LWIP_ASSERT("mqtt_clear_requests: tail != NULL", tail != NULL); + for (iter = *tail; iter != NULL; iter = next) { + next = iter->next; + mqtt_delete_request(iter); + } + *tail = NULL; +} +/** + * Initialize all request items + * @param r_objs Pointer to request objects + * @param r_objs_len Number of array entries + */ +static void +mqtt_init_requests(struct mqtt_request_t *r_objs, size_t r_objs_len) +{ + u8_t n; + LWIP_ASSERT("mqtt_init_requests: r_objs != NULL", r_objs != NULL); + for (n = 0; n < r_objs_len; n++) { + /* Item pointing to itself indicates unused */ + r_objs[n].next = &r_objs[n]; + } +} + +/*--------------------------------------------------------------------------------------------------------------------- */ +/* Output message build helpers */ + + +static void +mqtt_output_append_u8(struct mqtt_ringbuf_t *rb, u8_t value) +{ + mqtt_ringbuf_put(rb, value); +} + +static +void mqtt_output_append_u16(struct mqtt_ringbuf_t *rb, u16_t value) +{ + mqtt_ringbuf_put(rb, value >> 8); + mqtt_ringbuf_put(rb, value & 0xff); +} + +static void +mqtt_output_append_buf(struct mqtt_ringbuf_t *rb, const void *data, u16_t length) +{ + u16_t n; + for (n = 0; n < length; n++) { + mqtt_ringbuf_put(rb, ((const u8_t *)data)[n]); + } +} + +static void +mqtt_output_append_string(struct mqtt_ringbuf_t *rb, const char *str, u16_t length) +{ + u16_t n; + mqtt_ringbuf_put(rb, length >> 8); + mqtt_ringbuf_put(rb, length & 0xff); + for (n = 0; n < length; n++) { + mqtt_ringbuf_put(rb, str[n]); + } +} + +/** + * Append fixed header + * @param rb Output ring buffer + * @param msg_type see enum mqtt_message_type + * @param fdup MQTT DUP flag + * @param fqos MQTT QoS field + * @param fretain MQTT retain flag + * @param r_length Remaining length after fixed header + */ + +static void +mqtt_output_append_fixed_header(struct mqtt_ringbuf_t *rb, u8_t msg_type, u8_t fdup, + u8_t fqos, u8_t fretain, u16_t r_length) +{ + /* Start with control byte */ + mqtt_output_append_u8(rb, (((msg_type & 0x0f) << 4) | ((fdup & 1) << 3) | ((fqos & 3) << 1) | (fretain & 1))); + /* Encode remaining length field */ + do { + mqtt_output_append_u8(rb, (r_length & 0x7f) | (r_length >= 128 ? 0x80 : 0)); + r_length >>= 7; + } while (r_length > 0); +} + + +/** + * Check output buffer space + * @param rb Output ring buffer + * @param r_length Remaining length after fixed header + * @return 1 if message will fit, 0 if not enough buffer space + */ +static u8_t +mqtt_output_check_space(struct mqtt_ringbuf_t *rb, u16_t r_length) +{ + /* Start with length of type byte + remaining length */ + u16_t total_len = 1 + r_length; + + LWIP_ASSERT("mqtt_output_check_space: rb != NULL", rb != NULL); + + /* Calculate number of required bytes to contain the remaining bytes field and add to total*/ + do { + total_len++; + r_length >>= 7; + } while (r_length > 0); + + return (total_len <= mqtt_ringbuf_free(rb)); +} + + +/** + * Close connection to server + * @param client MQTT client + * @param reason Reason for disconnection + */ +static void +mqtt_close(mqtt_client_t *client, mqtt_connection_status_t reason) +{ + LWIP_ASSERT("mqtt_close: client != NULL", client != NULL); + + /* Bring down TCP connection if not already done */ + if (client->conn != NULL) { + err_t res; + altcp_recv(client->conn, NULL); + altcp_err(client->conn, NULL); + altcp_sent(client->conn, NULL); + res = altcp_close(client->conn); + if (res != ERR_OK) { + altcp_abort(client->conn); + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_close: Close err=%s\n", lwip_strerr(res))); + } + client->conn = NULL; + } + + /* Remove all pending requests */ + mqtt_clear_requests(&client->pend_req_queue); + /* Stop cyclic timer */ + sys_untimeout(mqtt_cyclic_timer, client); + + /* Notify upper layer of disconnection if changed state */ + if (client->conn_state != TCP_DISCONNECTED) { + + client->conn_state = TCP_DISCONNECTED; + if (client->connect_cb != NULL) { + client->connect_cb(client, client->connect_arg, reason); + } + } +} + + +/** + * Interval timer, called every MQTT_CYCLIC_TIMER_INTERVAL seconds in MQTT_CONNECTING and MQTT_CONNECTED states + * @param arg MQTT client + */ +static void +mqtt_cyclic_timer(void *arg) +{ + u8_t restart_timer = 1; + mqtt_client_t *client = (mqtt_client_t *)arg; + LWIP_ASSERT("mqtt_cyclic_timer: client != NULL", client != NULL); + + if (client->conn_state == MQTT_CONNECTING) { + client->cyclic_tick++; + if ((client->cyclic_tick * MQTT_CYCLIC_TIMER_INTERVAL) >= MQTT_CONNECT_TIMOUT) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_cyclic_timer: CONNECT attempt to server timed out\n")); + /* Disconnect TCP */ + mqtt_close(client, MQTT_CONNECT_TIMEOUT); + restart_timer = 0; + } + } else if (client->conn_state == MQTT_CONNECTED) { + /* Handle timeout for pending requests */ + mqtt_request_time_elapsed(&client->pend_req_queue, MQTT_CYCLIC_TIMER_INTERVAL); + + /* keep_alive > 0 means keep alive functionality shall be used */ + if (client->keep_alive > 0) { + + client->server_watchdog++; + /* If reception from server has been idle for 1.5*keep_alive time, server is considered unresponsive */ + if ((client->server_watchdog * MQTT_CYCLIC_TIMER_INTERVAL) > (client->keep_alive + client->keep_alive / 2)) { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_cyclic_timer: Server incoming keep-alive timeout\n")); + mqtt_close(client, MQTT_CONNECT_TIMEOUT); + restart_timer = 0; + } + + /* If time for a keep alive message to be sent, transmission has been idle for keep_alive time */ + if ((client->cyclic_tick * MQTT_CYCLIC_TIMER_INTERVAL) >= client->keep_alive) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_cyclic_timer: Sending keep-alive message to server\n")); + if (mqtt_output_check_space(&client->output, 0) != 0) { + mqtt_output_append_fixed_header(&client->output, MQTT_MSG_TYPE_PINGREQ, 0, 0, 0, 0); + client->cyclic_tick = 0; + } + } else { + client->cyclic_tick++; + } + } + } else { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_cyclic_timer: Timer should not be running in state %d\n", client->conn_state)); + restart_timer = 0; + } + if (restart_timer) { + sys_timeout(MQTT_CYCLIC_TIMER_INTERVAL * 1000, mqtt_cyclic_timer, arg); + } +} + + +/** + * Send PUBACK, PUBREC or PUBREL response message + * @param client MQTT client + * @param msg PUBACK, PUBREC or PUBREL + * @param pkt_id Packet identifier + * @param qos QoS value + * @return ERR_OK if successful, ERR_MEM if out of memory + */ +static err_t +pub_ack_rec_rel_response(mqtt_client_t *client, u8_t msg, u16_t pkt_id, u8_t qos) +{ + err_t err = ERR_OK; + if (mqtt_output_check_space(&client->output, 2)) { + mqtt_output_append_fixed_header(&client->output, msg, 0, qos, 0, 2); + mqtt_output_append_u16(&client->output, pkt_id); + mqtt_output_send(&client->output, client->conn); + } else { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("pub_ack_rec_rel_response: OOM creating response: %s with pkt_id: %d\n", + mqtt_msg_type_to_str(msg), pkt_id)); + err = ERR_MEM; + } + return err; +} + +/** + * Subscribe response from server + * @param r Matching request + * @param result Result code from server + */ +static void +mqtt_incomming_suback(struct mqtt_request_t *r, u8_t result) +{ + if (r->cb != NULL) { + r->cb(r->arg, result < 3 ? ERR_OK : ERR_ABRT); + } +} + + +/** + * Complete MQTT message received or buffer full + * @param client MQTT client + * @param fixed_hdr_idx header index + * @param length length received part + * @param remaining_length Remaining length of complete message + */ +static mqtt_connection_status_t +mqtt_message_received(mqtt_client_t *client, u8_t fixed_hdr_idx, u16_t length, u32_t remaining_length) +{ + mqtt_connection_status_t res = MQTT_CONNECT_ACCEPTED; + + u8_t *var_hdr_payload = client->rx_buffer + fixed_hdr_idx; + size_t var_hdr_payload_bufsize = sizeof(client->rx_buffer) - fixed_hdr_idx; + + /* Control packet type */ + u8_t pkt_type = MQTT_CTL_PACKET_TYPE(client->rx_buffer[0]); + u16_t pkt_id = 0; + + LWIP_ASSERT("client->msg_idx < MQTT_VAR_HEADER_BUFFER_LEN", client->msg_idx < MQTT_VAR_HEADER_BUFFER_LEN); + LWIP_ASSERT("fixed_hdr_idx <= client->msg_idx", fixed_hdr_idx <= client->msg_idx); + LWIP_ERROR("buffer length mismatch", fixed_hdr_idx + length <= MQTT_VAR_HEADER_BUFFER_LEN, + return MQTT_CONNECT_DISCONNECTED); + + if (pkt_type == MQTT_MSG_TYPE_CONNACK) { + if (client->conn_state == MQTT_CONNECTING) { + if (length < 2) { + LWIP_DEBUGF(MQTT_DEBUG_WARN,( "mqtt_message_received: Received short CONNACK message\n")); + goto out_disconnect; + } + /* Get result code from CONNACK */ + res = (mqtt_connection_status_t)var_hdr_payload[1]; + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_message_received: Connect response code %d\n", res)); + if (res == MQTT_CONNECT_ACCEPTED) { + /* Reset cyclic_tick when changing to connected state */ + client->cyclic_tick = 0; + client->conn_state = MQTT_CONNECTED; + /* Notify upper layer */ + if (client->connect_cb != 0) { + client->connect_cb(client, client->connect_arg, res); + } + } + } else { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_message_received: Received CONNACK in connected state\n")); + } + } else if (pkt_type == MQTT_MSG_TYPE_PINGRESP) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ( "mqtt_message_received: Received PINGRESP from server\n")); + + } else if (pkt_type == MQTT_MSG_TYPE_PUBLISH) { + u16_t payload_offset = 0; + u16_t payload_length = length; + u8_t qos = MQTT_CTL_PACKET_QOS(client->rx_buffer[0]); + + if (client->msg_idx <= MQTT_VAR_HEADER_BUFFER_LEN) { + /* Should have topic and pkt id*/ + u8_t *topic; + u16_t after_topic; + u8_t bkp; + u16_t topic_len; + u16_t qos_len = (qos ? 2U : 0U); + if (length < 2 + qos_len) { + LWIP_DEBUGF(MQTT_DEBUG_WARN,( "mqtt_message_received: Received short PUBLISH packet\n")); + goto out_disconnect; + } + topic_len = var_hdr_payload[0]; + topic_len = (topic_len << 8) + (u16_t)(var_hdr_payload[1]); + if ((topic_len > length - (2 + qos_len)) || + (topic_len > var_hdr_payload_bufsize - (2 + qos_len))) { + LWIP_DEBUGF(MQTT_DEBUG_WARN,( "mqtt_message_received: Received short PUBLISH packet (topic)\n")); + goto out_disconnect; + } + + topic = var_hdr_payload + 2; + after_topic = 2 + topic_len; + /* Check buffer length, add one byte even for QoS 0 so that zero termination will fit */ + if ((after_topic + (qos ? 2U : 1U)) > var_hdr_payload_bufsize) { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_message_received: Receive buffer can not fit topic + pkt_id\n")); + goto out_disconnect; + } + + /* id for QoS 1 and 2 */ + if (qos > 0) { + if (length < after_topic + 2U) { + LWIP_DEBUGF(MQTT_DEBUG_WARN,( "mqtt_message_received: Received short PUBLISH packet (after_topic)\n")); + goto out_disconnect; + } + client->inpub_pkt_id = ((u16_t)var_hdr_payload[after_topic] << 8) + (u16_t)var_hdr_payload[after_topic + 1]; + after_topic += 2; + } else { + client->inpub_pkt_id = 0; + } + /* Take backup of byte after topic */ + bkp = topic[topic_len]; + /* Zero terminate string */ + topic[topic_len] = 0; + /* Payload data remaining in receive buffer */ + payload_length = length - after_topic; + payload_offset = after_topic; + + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_incomming_publish: Received message with QoS %d at topic: %s, payload length %"U32_F"\n", + qos, topic, remaining_length + payload_length)); + if (client->pub_cb != NULL) { + client->pub_cb(client->inpub_arg, (const char *)topic, remaining_length + payload_length); + } + /* Restore byte after topic */ + topic[topic_len] = bkp; + } + if (payload_length > 0 || remaining_length == 0) { + if (length < (size_t)(payload_offset + payload_length)) { + LWIP_DEBUGF(MQTT_DEBUG_WARN,( "mqtt_message_received: Received short packet (payload)\n")); + goto out_disconnect; + } + client->data_cb(client->inpub_arg, var_hdr_payload + payload_offset, payload_length, remaining_length == 0 ? MQTT_DATA_FLAG_LAST : 0); + /* Reply if QoS > 0 */ + if (remaining_length == 0 && qos > 0) { + /* Send PUBACK for QoS 1 or PUBREC for QoS 2 */ + u8_t resp_msg = (qos == 1) ? MQTT_MSG_TYPE_PUBACK : MQTT_MSG_TYPE_PUBREC; + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_incomming_publish: Sending publish response: %s with pkt_id: %d\n", + mqtt_msg_type_to_str(resp_msg), client->inpub_pkt_id)); + pub_ack_rec_rel_response(client, resp_msg, client->inpub_pkt_id, 0); + } + } + } else { + /* Get packet identifier */ + pkt_id = (u16_t)var_hdr_payload[0] << 8; + pkt_id |= (u16_t)var_hdr_payload[1]; + if (pkt_id == 0) { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_message_received: Got message with illegal packet identifier: 0\n")); + goto out_disconnect; + } + if (pkt_type == MQTT_MSG_TYPE_PUBREC) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_message_received: PUBREC, sending PUBREL with pkt_id: %d\n", pkt_id)); + pub_ack_rec_rel_response(client, MQTT_MSG_TYPE_PUBREL, pkt_id, 1); + + } else if (pkt_type == MQTT_MSG_TYPE_PUBREL) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_message_received: PUBREL, sending PUBCOMP response with pkt_id: %d\n", pkt_id)); + pub_ack_rec_rel_response(client, MQTT_MSG_TYPE_PUBCOMP, pkt_id, 0); + + } else if (pkt_type == MQTT_MSG_TYPE_SUBACK || pkt_type == MQTT_MSG_TYPE_UNSUBACK || + pkt_type == MQTT_MSG_TYPE_PUBCOMP || pkt_type == MQTT_MSG_TYPE_PUBACK) { + struct mqtt_request_t *r = mqtt_take_request(&client->pend_req_queue, pkt_id); + if (r != NULL) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_message_received: %s response with id %d\n", mqtt_msg_type_to_str(pkt_type), pkt_id)); + if (pkt_type == MQTT_MSG_TYPE_SUBACK) { + if (length < 3) { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_message_received: To small SUBACK packet\n")); + goto out_disconnect; + } else { + mqtt_incomming_suback(r, var_hdr_payload[2]); + } + } else if (r->cb != NULL) { + r->cb(r->arg, ERR_OK); + } + mqtt_delete_request(r); + } else { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ( "mqtt_message_received: Received %s reply, with wrong pkt_id: %d\n", mqtt_msg_type_to_str(pkt_type), pkt_id)); + } + } else { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ( "mqtt_message_received: Received unknown message type: %d\n", pkt_type)); + goto out_disconnect; + } + } + return res; +out_disconnect: + return MQTT_CONNECT_DISCONNECTED; +} + + +/** + * MQTT incoming message parser + * @param client MQTT client + * @param p PBUF chain of received data + * @return Connection status + */ +static mqtt_connection_status_t +mqtt_parse_incoming(mqtt_client_t *client, struct pbuf *p) +{ + u16_t in_offset = 0; + u32_t msg_rem_len = 0; + u8_t fixed_hdr_idx = 0; + u8_t b = 0; + + while (p->tot_len > in_offset) { + /* We ALWAYS parse the header here first. Even if the header was not + included in this segment, we re-parse it here by buffering it in + client->rx_buffer. client->msg_idx keeps track of this. */ + if ((fixed_hdr_idx < 2) || ((b & 0x80) != 0)) { + + if (fixed_hdr_idx < client->msg_idx) { + /* parse header from old pbuf (buffered in client->rx_buffer) */ + b = client->rx_buffer[fixed_hdr_idx]; + } else { + /* parse header from this pbuf and save it in client->rx_buffer in case + it comes in segmented */ + b = pbuf_get_at(p, in_offset++); + client->rx_buffer[client->msg_idx++] = b; + } + fixed_hdr_idx++; + + if (fixed_hdr_idx >= 2) { + /* fixed header contains at least 2 bytes but can contain more, depending on + 'remaining length'. All bytes but the last of this have 0x80 set to + indicate more bytes are coming. */ + msg_rem_len |= (u32_t)(b & 0x7f) << ((fixed_hdr_idx - 2) * 7); + if ((b & 0x80) == 0) { + /* fixed header is done */ + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_parse_incoming: Remaining length after fixed header: %"U32_F"\n", msg_rem_len)); + if (msg_rem_len == 0) { + /* Complete message with no extra headers of payload received */ + mqtt_message_received(client, fixed_hdr_idx, 0, 0); + client->msg_idx = 0; + fixed_hdr_idx = 0; + } else { + /* Bytes remaining in message (changes remaining length if this is + not the first segment of this message) */ + msg_rem_len = (msg_rem_len + fixed_hdr_idx) - client->msg_idx; + } + } + } + } else { + /* Fixed header has been parsed, parse variable header */ + u16_t cpy_len, cpy_start, buffer_space; + + cpy_start = (client->msg_idx - fixed_hdr_idx) % (MQTT_VAR_HEADER_BUFFER_LEN - fixed_hdr_idx) + fixed_hdr_idx; + + /* Allow to copy the lesser one of available length in input data or bytes remaining in message */ + cpy_len = (u16_t)LWIP_MIN((u16_t)(p->tot_len - in_offset), msg_rem_len); + + /* Limit to available space in buffer */ + buffer_space = MQTT_VAR_HEADER_BUFFER_LEN - cpy_start; + if (cpy_len > buffer_space) { + cpy_len = buffer_space; + } + pbuf_copy_partial(p, client->rx_buffer + cpy_start, cpy_len, in_offset); + + /* Advance get and put indexes */ + client->msg_idx += cpy_len; + in_offset += cpy_len; + msg_rem_len -= cpy_len; + + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_parse_incoming: msg_idx: %"U32_F", cpy_len: %"U16_F", remaining %"U32_F"\n", client->msg_idx, cpy_len, msg_rem_len)); + if ((msg_rem_len == 0) || (cpy_len == buffer_space)) { + /* Whole message received or buffer is full */ + mqtt_connection_status_t res = mqtt_message_received(client, fixed_hdr_idx, (cpy_start + cpy_len) - fixed_hdr_idx, msg_rem_len); + if (res != MQTT_CONNECT_ACCEPTED) { + return res; + } + if (msg_rem_len == 0) { + /* Reset parser state */ + client->msg_idx = 0; + /* msg_tot_len = 0; */ + fixed_hdr_idx = 0; + } + } + } + } + return MQTT_CONNECT_ACCEPTED; +} + + +/** + * TCP received callback function. @see tcp_recv_fn + * @param arg MQTT client + * @param p PBUF chain of received data + * @param err Passed as return value if not ERR_OK + * @return ERR_OK or err passed into callback + */ +static err_t +mqtt_tcp_recv_cb(void *arg, struct altcp_pcb *pcb, struct pbuf *p, err_t err) +{ + mqtt_client_t *client = (mqtt_client_t *)arg; + LWIP_ASSERT("mqtt_tcp_recv_cb: client != NULL", client != NULL); + LWIP_ASSERT("mqtt_tcp_recv_cb: client->conn == pcb", client->conn == pcb); + + if (p == NULL) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_tcp_recv_cb: Recv pbuf=NULL, remote has closed connection\n")); + mqtt_close(client, MQTT_CONNECT_DISCONNECTED); + } else { + mqtt_connection_status_t res; + if (err != ERR_OK) { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_tcp_recv_cb: Recv err=%d\n", err)); + pbuf_free(p); + return err; + } + + /* Tell remote that data has been received */ + altcp_recved(pcb, p->tot_len); + res = mqtt_parse_incoming(client, p); + pbuf_free(p); + + if (res != MQTT_CONNECT_ACCEPTED) { + mqtt_close(client, res); + } + /* If keep alive functionality is used */ + if (client->keep_alive != 0) { + /* Reset server alive watchdog */ + client->server_watchdog = 0; + } + + } + return ERR_OK; +} + + +/** + * TCP data sent callback function. @see tcp_sent_fn + * @param arg MQTT client + * @param tpcb TCP connection handle + * @param len Number of bytes sent + * @return ERR_OK + */ +static err_t +mqtt_tcp_sent_cb(void *arg, struct altcp_pcb *tpcb, u16_t len) +{ + mqtt_client_t *client = (mqtt_client_t *)arg; + + LWIP_UNUSED_ARG(tpcb); + LWIP_UNUSED_ARG(len); + + if (client->conn_state == MQTT_CONNECTED) { + struct mqtt_request_t *r; + + /* Reset keep-alive send timer and server watchdog */ + client->cyclic_tick = 0; + client->server_watchdog = 0; + /* QoS 0 publish has no response from server, so call its callbacks here */ + while ((r = mqtt_take_request(&client->pend_req_queue, 0)) != NULL) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_tcp_sent_cb: Calling QoS 0 publish complete callback\n")); + if (r->cb != NULL) { + r->cb(r->arg, ERR_OK); + } + mqtt_delete_request(r); + } + /* Try send any remaining buffers from output queue */ + mqtt_output_send(&client->output, client->conn); + } + return ERR_OK; +} + +/** + * TCP error callback function. @see tcp_err_fn + * @param arg MQTT client + * @param err Error encountered + */ +static void +mqtt_tcp_err_cb(void *arg, err_t err) +{ + mqtt_client_t *client = (mqtt_client_t *)arg; + LWIP_UNUSED_ARG(err); /* only used for debug output */ + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_tcp_err_cb: TCP error callback: error %d, arg: %p\n", err, arg)); + LWIP_ASSERT("mqtt_tcp_err_cb: client != NULL", client != NULL); + /* Set conn to null before calling close as pcb is already deallocated*/ + client->conn = 0; + mqtt_close(client, MQTT_CONNECT_DISCONNECTED); +} + +/** + * TCP poll callback function. @see tcp_poll_fn + * @param arg MQTT client + * @param tpcb TCP connection handle + * @return err ERR_OK + */ +static err_t +mqtt_tcp_poll_cb(void *arg, struct altcp_pcb *tpcb) +{ + mqtt_client_t *client = (mqtt_client_t *)arg; + if (client->conn_state == MQTT_CONNECTED) { + /* Try send any remaining buffers from output queue */ + mqtt_output_send(&client->output, tpcb); + } + return ERR_OK; +} + +/** + * TCP connect callback function. @see tcp_connected_fn + * @param arg MQTT client + * @param err Always ERR_OK, mqtt_tcp_err_cb is called in case of error + * @return ERR_OK + */ +static err_t +mqtt_tcp_connect_cb(void *arg, struct altcp_pcb *tpcb, err_t err) +{ + mqtt_client_t *client = (mqtt_client_t *)arg; + + if (err != ERR_OK) { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_tcp_connect_cb: TCP connect error %d\n", err)); + return err; + } + + /* Initiate receiver state */ + client->msg_idx = 0; + + /* Setup TCP callbacks */ + altcp_recv(tpcb, mqtt_tcp_recv_cb); + altcp_sent(tpcb, mqtt_tcp_sent_cb); + altcp_poll(tpcb, mqtt_tcp_poll_cb, 2); + + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_tcp_connect_cb: TCP connection established to server\n")); + /* Enter MQTT connect state */ + client->conn_state = MQTT_CONNECTING; + + /* Start cyclic timer */ + sys_timeout(MQTT_CYCLIC_TIMER_INTERVAL * 1000, mqtt_cyclic_timer, client); + client->cyclic_tick = 0; + + /* Start transmission from output queue, connect message is the first one out*/ + mqtt_output_send(&client->output, client->conn); + + return ERR_OK; +} + + + +/*---------------------------------------------------------------------------------------------------- */ +/* Public API */ + + +/** + * @ingroup mqtt + * MQTT publish function. + * @param client MQTT client + * @param topic Publish topic string + * @param payload Data to publish (NULL is allowed) + * @param payload_length Length of payload (0 is allowed) + * @param qos Quality of service, 0 1 or 2 + * @param retain MQTT retain flag + * @param cb Callback to call when publish is complete or has timed out + * @param arg User supplied argument to publish callback + * @return ERR_OK if successful + * ERR_CONN if client is disconnected + * ERR_MEM if short on memory + */ +err_t +mqtt_publish(mqtt_client_t *client, const char *topic, const void *payload, u16_t payload_length, u8_t qos, u8_t retain, + mqtt_request_cb_t cb, void *arg) +{ + struct mqtt_request_t *r; + u16_t pkt_id; + size_t topic_strlen; + size_t total_len; + u16_t topic_len; + u16_t remaining_length; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("mqtt_publish: client != NULL", client); + LWIP_ASSERT("mqtt_publish: topic != NULL", topic); + LWIP_ERROR("mqtt_publish: TCP disconnected", (client->conn_state != TCP_DISCONNECTED), return ERR_CONN); + + topic_strlen = strlen(topic); + LWIP_ERROR("mqtt_publish: topic length overflow", (topic_strlen <= (0xFFFF - 2)), return ERR_ARG); + topic_len = (u16_t)topic_strlen; + total_len = 2 + topic_len + payload_length; + + if (qos > 0) { + total_len += 2; + /* Generate pkt_id id for QoS1 and 2 */ + pkt_id = msg_generate_packet_id(client); + } else { + /* Use reserved value pkt_id 0 for QoS 0 in request handle */ + pkt_id = 0; + } + LWIP_ERROR("mqtt_publish: total length overflow", (total_len <= 0xFFFF), return ERR_ARG); + remaining_length = (u16_t)total_len; + + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_publish: Publish with payload length %d to topic \"%s\"\n", payload_length, topic)); + + r = mqtt_create_request(client->req_list, LWIP_ARRAYSIZE(client->req_list), pkt_id, cb, arg); + if (r == NULL) { + return ERR_MEM; + } + + if (mqtt_output_check_space(&client->output, remaining_length) == 0) { + mqtt_delete_request(r); + return ERR_MEM; + } + /* Append fixed header */ + mqtt_output_append_fixed_header(&client->output, MQTT_MSG_TYPE_PUBLISH, 0, qos, retain, remaining_length); + + /* Append Topic */ + mqtt_output_append_string(&client->output, topic, topic_len); + + /* Append packet if for QoS 1 and 2*/ + if (qos > 0) { + mqtt_output_append_u16(&client->output, pkt_id); + } + + /* Append optional publish payload */ + if ((payload != NULL) && (payload_length > 0)) { + mqtt_output_append_buf(&client->output, payload, payload_length); + } + + mqtt_append_request(&client->pend_req_queue, r); + mqtt_output_send(&client->output, client->conn); + return ERR_OK; +} + + +/** + * @ingroup mqtt + * MQTT subscribe/unsubscribe function. + * @param client MQTT client + * @param topic topic to subscribe to + * @param qos Quality of service, 0 1 or 2 (only used for subscribe) + * @param cb Callback to call when subscribe/unsubscribe reponse is received + * @param arg User supplied argument to publish callback + * @param sub 1 for subscribe, 0 for unsubscribe + * @return ERR_OK if successful, @see err_t enum for other results + */ +err_t +mqtt_sub_unsub(mqtt_client_t *client, const char *topic, u8_t qos, mqtt_request_cb_t cb, void *arg, u8_t sub) +{ + size_t topic_strlen; + size_t total_len; + u16_t topic_len; + u16_t remaining_length; + u16_t pkt_id; + struct mqtt_request_t *r; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("mqtt_sub_unsub: client != NULL", client); + LWIP_ASSERT("mqtt_sub_unsub: topic != NULL", topic); + + topic_strlen = strlen(topic); + LWIP_ERROR("mqtt_sub_unsub: topic length overflow", (topic_strlen <= (0xFFFF - 2)), return ERR_ARG); + topic_len = (u16_t)topic_strlen; + /* Topic string, pkt_id, qos for subscribe */ + total_len = topic_len + 2 + 2 + (sub != 0); + LWIP_ERROR("mqtt_sub_unsub: total length overflow", (total_len <= 0xFFFF), return ERR_ARG); + remaining_length = (u16_t)total_len; + + LWIP_ASSERT("mqtt_sub_unsub: qos < 3", qos < 3); + if (client->conn_state == TCP_DISCONNECTED) { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_sub_unsub: Can not (un)subscribe in disconnected state\n")); + return ERR_CONN; + } + + pkt_id = msg_generate_packet_id(client); + r = mqtt_create_request(client->req_list, LWIP_ARRAYSIZE(client->req_list), pkt_id, cb, arg); + if (r == NULL) { + return ERR_MEM; + } + + if (mqtt_output_check_space(&client->output, remaining_length) == 0) { + mqtt_delete_request(r); + return ERR_MEM; + } + + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_sub_unsub: Client (un)subscribe to topic \"%s\", id: %d\n", topic, pkt_id)); + + mqtt_output_append_fixed_header(&client->output, sub ? MQTT_MSG_TYPE_SUBSCRIBE : MQTT_MSG_TYPE_UNSUBSCRIBE, 0, 1, 0, remaining_length); + /* Packet id */ + mqtt_output_append_u16(&client->output, pkt_id); + /* Topic */ + mqtt_output_append_string(&client->output, topic, topic_len); + /* QoS */ + if (sub != 0) { + mqtt_output_append_u8(&client->output, LWIP_MIN(qos, 2)); + } + + mqtt_append_request(&client->pend_req_queue, r); + mqtt_output_send(&client->output, client->conn); + return ERR_OK; +} + + +/** + * @ingroup mqtt + * Set callback to handle incoming publish requests from server + * @param client MQTT client + * @param pub_cb Callback invoked when publish starts, contain topic and total length of payload + * @param data_cb Callback for each fragment of payload that arrives + * @param arg User supplied argument to both callbacks + */ +void +mqtt_set_inpub_callback(mqtt_client_t *client, mqtt_incoming_publish_cb_t pub_cb, + mqtt_incoming_data_cb_t data_cb, void *arg) +{ + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("mqtt_set_inpub_callback: client != NULL", client != NULL); + client->data_cb = data_cb; + client->pub_cb = pub_cb; + client->inpub_arg = arg; +} + +/** + * @ingroup mqtt + * Create a new MQTT client instance + * @return Pointer to instance on success, NULL otherwise + */ +mqtt_client_t * +mqtt_client_new(void) +{ + LWIP_ASSERT_CORE_LOCKED(); + return (mqtt_client_t *)mem_calloc(1, sizeof(mqtt_client_t)); +} + +/** + * @ingroup mqtt + * Free MQTT client instance + * @param client Pointer to instance to be freed + */ +void +mqtt_client_free(mqtt_client_t *client) +{ + mem_free(client); +} + +/** + * @ingroup mqtt + * Connect to MQTT server + * @param client MQTT client + * @param ip_addr Server IP + * @param port Server port + * @param cb Connection state change callback + * @param arg User supplied argument to connection callback + * @param client_info Client identification and connection options + * @return ERR_OK if successful, @see err_t enum for other results + */ +err_t +mqtt_client_connect(mqtt_client_t *client, const ip_addr_t *ip_addr, u16_t port, mqtt_connection_cb_t cb, void *arg, + const struct mqtt_connect_client_info_t *client_info) +{ + err_t err; + size_t len; + u16_t client_id_length; + /* Length is the sum of 2+"MQTT", protocol level, flags and keep alive */ + u16_t remaining_length = 2 + 4 + 1 + 1 + 2; + u8_t flags = 0, will_topic_len = 0, will_msg_len = 0; + u16_t client_user_len = 0, client_pass_len = 0; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("mqtt_client_connect: client != NULL", client != NULL); + LWIP_ASSERT("mqtt_client_connect: ip_addr != NULL", ip_addr != NULL); + LWIP_ASSERT("mqtt_client_connect: client_info != NULL", client_info != NULL); + LWIP_ASSERT("mqtt_client_connect: client_info->client_id != NULL", client_info->client_id != NULL); + + if (client->conn_state != TCP_DISCONNECTED) { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_client_connect: Already connected\n")); + return ERR_ISCONN; + } + + /* Wipe clean */ + memset(client, 0, sizeof(mqtt_client_t)); + client->connect_arg = arg; + client->connect_cb = cb; + client->keep_alive = client_info->keep_alive; + mqtt_init_requests(client->req_list, LWIP_ARRAYSIZE(client->req_list)); + + /* Build connect message */ + if (client_info->will_topic != NULL && client_info->will_msg != NULL) { + flags |= MQTT_CONNECT_FLAG_WILL; + flags |= (client_info->will_qos & 3) << 3; + if (client_info->will_retain) { + flags |= MQTT_CONNECT_FLAG_WILL_RETAIN; + } + len = strlen(client_info->will_topic); + LWIP_ERROR("mqtt_client_connect: client_info->will_topic length overflow", len <= 0xFF, return ERR_VAL); + LWIP_ERROR("mqtt_client_connect: client_info->will_topic length must be > 0", len > 0, return ERR_VAL); + will_topic_len = (u8_t)len; + len = strlen(client_info->will_msg); + LWIP_ERROR("mqtt_client_connect: client_info->will_msg length overflow", len <= 0xFF, return ERR_VAL); + will_msg_len = (u8_t)len; + len = remaining_length + 2 + will_topic_len + 2 + will_msg_len; + LWIP_ERROR("mqtt_client_connect: remaining_length overflow", len <= 0xFFFF, return ERR_VAL); + remaining_length = (u16_t)len; + } + if (client_info->client_user != NULL) { + flags |= MQTT_CONNECT_FLAG_USERNAME; + len = strlen(client_info->client_user); + LWIP_ERROR("mqtt_client_connect: client_info->client_user length overflow", len <= 0xFFFF, return ERR_VAL); + LWIP_ERROR("mqtt_client_connect: client_info->client_user length must be > 0", len > 0, return ERR_VAL); + client_user_len = (u16_t)len; + len = remaining_length + 2 + client_user_len; + LWIP_ERROR("mqtt_client_connect: remaining_length overflow", len <= 0xFFFF, return ERR_VAL); + remaining_length = (u16_t)len; + } + if (client_info->client_pass != NULL) { + flags |= MQTT_CONNECT_FLAG_PASSWORD; + len = strlen(client_info->client_pass); + LWIP_ERROR("mqtt_client_connect: client_info->client_pass length overflow", len <= 0xFFFF, return ERR_VAL); + LWIP_ERROR("mqtt_client_connect: client_info->client_pass length must be > 0", len > 0, return ERR_VAL); + client_pass_len = (u16_t)len; + len = remaining_length + 2 + client_pass_len; + LWIP_ERROR("mqtt_client_connect: remaining_length overflow", len <= 0xFFFF, return ERR_VAL); + remaining_length = (u16_t)len; + } + + /* Don't complicate things, always connect using clean session */ + flags |= MQTT_CONNECT_FLAG_CLEAN_SESSION; + + len = strlen(client_info->client_id); + LWIP_ERROR("mqtt_client_connect: client_info->client_id length overflow", len <= 0xFFFF, return ERR_VAL); + client_id_length = (u16_t)len; + len = remaining_length + 2 + client_id_length; + LWIP_ERROR("mqtt_client_connect: remaining_length overflow", len <= 0xFFFF, return ERR_VAL); + remaining_length = (u16_t)len; + + if (mqtt_output_check_space(&client->output, remaining_length) == 0) { + return ERR_MEM; + } + +#if LWIP_ALTCP && LWIP_ALTCP_TLS + if (client_info->tls_config) { + client->conn = altcp_tls_new(client_info->tls_config, IP_GET_TYPE(ip_addr)); + } else +#endif + { + client->conn = altcp_tcp_new_ip_type(IP_GET_TYPE(ip_addr)); + } + if (client->conn == NULL) { + return ERR_MEM; + } + + /* Set arg pointer for callbacks */ + altcp_arg(client->conn, client); + /* Any local address, pick random local port number */ + err = altcp_bind(client->conn, IP_ADDR_ANY, 0); + if (err != ERR_OK) { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_client_connect: Error binding to local ip/port, %d\n", err)); + goto tcp_fail; + } + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_client_connect: Connecting to host: %s at port:%"U16_F"\n", ipaddr_ntoa(ip_addr), port)); + + /* Connect to server */ + err = altcp_connect(client->conn, ip_addr, port, mqtt_tcp_connect_cb); + if (err != ERR_OK) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_client_connect: Error connecting to remote ip/port, %d\n", err)); + goto tcp_fail; + } + /* Set error callback */ + altcp_err(client->conn, mqtt_tcp_err_cb); + client->conn_state = TCP_CONNECTING; + + /* Append fixed header */ + mqtt_output_append_fixed_header(&client->output, MQTT_MSG_TYPE_CONNECT, 0, 0, 0, remaining_length); + /* Append Protocol string */ + mqtt_output_append_string(&client->output, "MQTT", 4); + /* Append Protocol level */ + mqtt_output_append_u8(&client->output, 4); + /* Append connect flags */ + mqtt_output_append_u8(&client->output, flags); + /* Append keep-alive */ + mqtt_output_append_u16(&client->output, client_info->keep_alive); + /* Append client id */ + mqtt_output_append_string(&client->output, client_info->client_id, client_id_length); + /* Append will message if used */ + if ((flags & MQTT_CONNECT_FLAG_WILL) != 0) { + mqtt_output_append_string(&client->output, client_info->will_topic, will_topic_len); + mqtt_output_append_string(&client->output, client_info->will_msg, will_msg_len); + } + /* Append user name if given */ + if ((flags & MQTT_CONNECT_FLAG_USERNAME) != 0) { + mqtt_output_append_string(&client->output, client_info->client_user, client_user_len); + } + /* Append password if given */ + if ((flags & MQTT_CONNECT_FLAG_PASSWORD) != 0) { + mqtt_output_append_string(&client->output, client_info->client_pass, client_pass_len); + } + return ERR_OK; + +tcp_fail: + altcp_abort(client->conn); + client->conn = NULL; + return err; +} + + +/** + * @ingroup mqtt + * Disconnect from MQTT server + * @param client MQTT client + */ +void +mqtt_disconnect(mqtt_client_t *client) +{ + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("mqtt_disconnect: client != NULL", client); + /* If connection in not already closed */ + if (client->conn_state != TCP_DISCONNECTED) { + /* Set conn_state before calling mqtt_close to prevent callback from being called */ + client->conn_state = TCP_DISCONNECTED; + mqtt_close(client, (mqtt_connection_status_t)0); + } +} + +/** + * @ingroup mqtt + * Check connection with server + * @param client MQTT client + * @return 1 if connected to server, 0 otherwise + */ +u8_t +mqtt_client_is_connected(mqtt_client_t *client) +{ + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("mqtt_client_is_connected: client != NULL", client); + return client->conn_state == MQTT_CONNECTED; +} + +#endif /* LWIP_TCP && LWIP_CALLBACK_API */ diff --git a/Middlewares/Third_Party/LwIP/src/core/altcp.c b/Middlewares/Third_Party/LwIP/src/core/altcp.c new file mode 100644 index 0000000..d46d6cd --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/altcp.c @@ -0,0 +1,681 @@ +/** + * @file + * @defgroup altcp Application layered TCP Functions + * @ingroup altcp_api + * + * This file contains the common functions for altcp to work. + * For more details see @ref altcp_api. + */ + +/** + * @defgroup altcp_api Application layered TCP Introduction + * @ingroup callbackstyle_api + * + * Overview + * -------- + * altcp (application layered TCP connection API; to be used from TCPIP thread) + * is an abstraction layer that prevents applications linking hard against the + * @ref tcp.h functions while providing the same functionality. It is used to + * e.g. add SSL/TLS (see LWIP_ALTCP_TLS) or proxy-connect support to an application + * written for the tcp callback API without that application knowing the + * protocol details. + * + * * This interface mimics the tcp callback API to the application while preventing + * direct linking (much like virtual functions). + * * This way, an application can make use of other application layer protocols + * on top of TCP without knowing the details (e.g. TLS, proxy connection). + * * This is achieved by simply including "lwip/altcp.h" instead of "lwip/tcp.h", + * replacing "struct tcp_pcb" with "struct altcp_pcb" and prefixing all functions + * with "altcp_" instead of "tcp_". + * + * With altcp support disabled (LWIP_ALTCP==0), applications written against the + * altcp API can still be compiled but are directly linked against the tcp.h + * callback API and then cannot use layered protocols. To minimize code changes + * in this case, the use of altcp_allocators is strongly suggested. + * + * Usage + * ----- + * To make use of this API from an existing tcp raw API application: + * * Include "lwip/altcp.h" instead of "lwip/tcp.h" + * * Replace "struct tcp_pcb" with "struct altcp_pcb" + * * Prefix all called tcp API functions with "altcp_" instead of "tcp_" to link + * against the altcp functions + * * @ref altcp_new (and @ref altcp_new_ip_type/@ref altcp_new_ip6) take + * an @ref altcp_allocator_t as an argument, whereas the original tcp API + * functions take no arguments. + * * An @ref altcp_allocator_t allocator is an object that holds a pointer to an + * allocator object and a corresponding state (e.g. for TLS, the corresponding + * state may hold certificates or keys). This way, the application does not + * even need to know if it uses TLS or pure TCP, this is handled at runtime + * by passing a specific allocator. + * * An application can alternatively bind hard to the altcp_tls API by calling + * @ref altcp_tls_new or @ref altcp_tls_wrap. + * * The TLS layer is not directly implemented by lwIP, but a port to mbedTLS is + * provided. + * * Another altcp layer is proxy-connect to use TLS behind a HTTP proxy (see + * @ref altcp_proxyconnect.h) + * + * altcp_allocator_t + * ----------------- + * An altcp allocator is created by the application by combining an allocator + * callback function and a corresponding state, e.g.:\code{.c} + * static const unsigned char cert[] = {0x2D, ... (see mbedTLS doc for how to create this)}; + * struct altcp_tls_config * conf = altcp_tls_create_config_client(cert, sizeof(cert)); + * altcp_allocator_t tls_allocator = { + * altcp_tls_alloc, conf + * }; + * \endcode + * + * + * struct altcp_tls_config + * ----------------------- + * The struct altcp_tls_config holds state that is needed to create new TLS client + * or server connections (e.g. certificates and private keys). + * + * It is not defined by lwIP itself but by the TLS port (e.g. altcp_tls to mbedTLS + * adaption). However, the parameters used to create it are defined in @ref + * altcp_tls.h (see @ref altcp_tls_create_config_server_privkey_cert for servers + * and @ref altcp_tls_create_config_client/@ref altcp_tls_create_config_client_2wayauth + * for clients). + * + * For mbedTLS, ensure that certificates can be parsed by 'mbedtls_x509_crt_parse()' and + * private keys can be parsed by 'mbedtls_pk_parse_key()'. + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/altcp.h" +#include "lwip/priv/altcp_priv.h" +#include "lwip/altcp_tcp.h" +#include "lwip/tcp.h" +#include "lwip/mem.h" + +#include + +extern const struct altcp_functions altcp_tcp_functions; + +/** + * For altcp layer implementations only: allocate a new struct altcp_pcb from the pool + * and zero the memory + */ +struct altcp_pcb * +altcp_alloc(void) +{ + struct altcp_pcb *ret = (struct altcp_pcb *)memp_malloc(MEMP_ALTCP_PCB); + if (ret != NULL) { + memset(ret, 0, sizeof(struct altcp_pcb)); + } + return ret; +} + +/** + * For altcp layer implementations only: return a struct altcp_pcb to the pool + */ +void +altcp_free(struct altcp_pcb *conn) +{ + if (conn) { + if (conn->fns && conn->fns->dealloc) { + conn->fns->dealloc(conn); + } + memp_free(MEMP_ALTCP_PCB, conn); + } +} + +/** + * @ingroup altcp + * altcp_new_ip6: @ref altcp_new for IPv6 + */ +struct altcp_pcb * +altcp_new_ip6(altcp_allocator_t *allocator) +{ + return altcp_new_ip_type(allocator, IPADDR_TYPE_V6); +} + +/** + * @ingroup altcp + * altcp_new: @ref altcp_new for IPv4 + */ +struct altcp_pcb * +altcp_new(altcp_allocator_t *allocator) +{ + return altcp_new_ip_type(allocator, IPADDR_TYPE_V4); +} + +/** + * @ingroup altcp + * altcp_new_ip_type: called by applications to allocate a new pcb with the help of an + * allocator function. + * + * @param allocator allocator function and argument + * @param ip_type IP version of the pcb (@ref lwip_ip_addr_type) + * @return a new altcp_pcb or NULL on error + */ +struct altcp_pcb * +altcp_new_ip_type(altcp_allocator_t *allocator, u8_t ip_type) +{ + struct altcp_pcb *conn; + if (allocator == NULL) { + /* no allocator given, create a simple TCP connection */ + return altcp_tcp_new_ip_type(ip_type); + } + if (allocator->alloc == NULL) { + /* illegal allocator */ + return NULL; + } + conn = allocator->alloc(allocator->arg, ip_type); + if (conn == NULL) { + /* allocation failed */ + return NULL; + } + return conn; +} + +/** + * @ingroup altcp + * @see tcp_arg() + */ +void +altcp_arg(struct altcp_pcb *conn, void *arg) +{ + if (conn) { + conn->arg = arg; + } +} + +/** + * @ingroup altcp + * @see tcp_accept() + */ +void +altcp_accept(struct altcp_pcb *conn, altcp_accept_fn accept) +{ + if (conn != NULL) { + conn->accept = accept; + } +} + +/** + * @ingroup altcp + * @see tcp_recv() + */ +void +altcp_recv(struct altcp_pcb *conn, altcp_recv_fn recv) +{ + if (conn) { + conn->recv = recv; + } +} + +/** + * @ingroup altcp + * @see tcp_sent() + */ +void +altcp_sent(struct altcp_pcb *conn, altcp_sent_fn sent) +{ + if (conn) { + conn->sent = sent; + } +} + +/** + * @ingroup altcp + * @see tcp_poll() + */ +void +altcp_poll(struct altcp_pcb *conn, altcp_poll_fn poll, u8_t interval) +{ + if (conn) { + conn->poll = poll; + conn->pollinterval = interval; + if (conn->fns && conn->fns->set_poll) { + conn->fns->set_poll(conn, interval); + } + } +} + +/** + * @ingroup altcp + * @see tcp_err() + */ +void +altcp_err(struct altcp_pcb *conn, altcp_err_fn err) +{ + if (conn) { + conn->err = err; + } +} + +/* Generic functions calling the "virtual" ones */ + +/** + * @ingroup altcp + * @see tcp_recved() + */ +void +altcp_recved(struct altcp_pcb *conn, u16_t len) +{ + if (conn && conn->fns && conn->fns->recved) { + conn->fns->recved(conn, len); + } +} + +/** + * @ingroup altcp + * @see tcp_bind() + */ +err_t +altcp_bind(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port) +{ + if (conn && conn->fns && conn->fns->bind) { + return conn->fns->bind(conn, ipaddr, port); + } + return ERR_VAL; +} + +/** + * @ingroup altcp + * @see tcp_connect() + */ +err_t +altcp_connect(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port, altcp_connected_fn connected) +{ + if (conn && conn->fns && conn->fns->connect) { + return conn->fns->connect(conn, ipaddr, port, connected); + } + return ERR_VAL; +} + +/** + * @ingroup altcp + * @see tcp_listen_with_backlog_and_err() + */ +struct altcp_pcb * +altcp_listen_with_backlog_and_err(struct altcp_pcb *conn, u8_t backlog, err_t *err) +{ + if (conn && conn->fns && conn->fns->listen) { + return conn->fns->listen(conn, backlog, err); + } + return NULL; +} + +/** + * @ingroup altcp + * @see tcp_abort() + */ +void +altcp_abort(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->abort) { + conn->fns->abort(conn); + } +} + +/** + * @ingroup altcp + * @see tcp_close() + */ +err_t +altcp_close(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->close) { + return conn->fns->close(conn); + } + return ERR_VAL; +} + +/** + * @ingroup altcp + * @see tcp_shutdown() + */ +err_t +altcp_shutdown(struct altcp_pcb *conn, int shut_rx, int shut_tx) +{ + if (conn && conn->fns && conn->fns->shutdown) { + return conn->fns->shutdown(conn, shut_rx, shut_tx); + } + return ERR_VAL; +} + +/** + * @ingroup altcp + * @see tcp_write() + */ +err_t +altcp_write(struct altcp_pcb *conn, const void *dataptr, u16_t len, u8_t apiflags) +{ + if (conn && conn->fns && conn->fns->write) { + return conn->fns->write(conn, dataptr, len, apiflags); + } + return ERR_VAL; +} + +/** + * @ingroup altcp + * @see tcp_output() + */ +err_t +altcp_output(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->output) { + return conn->fns->output(conn); + } + return ERR_VAL; +} + +/** + * @ingroup altcp + * @see tcp_mss() + */ +u16_t +altcp_mss(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->mss) { + return conn->fns->mss(conn); + } + return 0; +} + +/** + * @ingroup altcp + * @see tcp_sndbuf() + */ +u16_t +altcp_sndbuf(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->sndbuf) { + return conn->fns->sndbuf(conn); + } + return 0; +} + +/** + * @ingroup altcp + * @see tcp_sndqueuelen() + */ +u16_t +altcp_sndqueuelen(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->sndqueuelen) { + return conn->fns->sndqueuelen(conn); + } + return 0; +} + +void +altcp_nagle_disable(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->nagle_disable) { + conn->fns->nagle_disable(conn); + } +} + +void +altcp_nagle_enable(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->nagle_enable) { + conn->fns->nagle_enable(conn); + } +} + +int +altcp_nagle_disabled(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->nagle_disabled) { + return conn->fns->nagle_disabled(conn); + } + return 0; +} + +/** + * @ingroup altcp + * @see tcp_setprio() + */ +void +altcp_setprio(struct altcp_pcb *conn, u8_t prio) +{ + if (conn && conn->fns && conn->fns->setprio) { + conn->fns->setprio(conn, prio); + } +} + +err_t +altcp_get_tcp_addrinfo(struct altcp_pcb *conn, int local, ip_addr_t *addr, u16_t *port) +{ + if (conn && conn->fns && conn->fns->addrinfo) { + return conn->fns->addrinfo(conn, local, addr, port); + } + return ERR_VAL; +} + +ip_addr_t * +altcp_get_ip(struct altcp_pcb *conn, int local) +{ + if (conn && conn->fns && conn->fns->getip) { + return conn->fns->getip(conn, local); + } + return NULL; +} + +u16_t +altcp_get_port(struct altcp_pcb *conn, int local) +{ + if (conn && conn->fns && conn->fns->getport) { + return conn->fns->getport(conn, local); + } + return 0; +} + +#ifdef LWIP_DEBUG +enum tcp_state +altcp_dbg_get_tcp_state(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->dbg_get_tcp_state) { + return conn->fns->dbg_get_tcp_state(conn); + } + return CLOSED; +} +#endif + +/* Default implementations for the "virtual" functions */ + +void +altcp_default_set_poll(struct altcp_pcb *conn, u8_t interval) +{ + if (conn && conn->inner_conn) { + altcp_poll(conn->inner_conn, conn->poll, interval); + } +} + +void +altcp_default_recved(struct altcp_pcb *conn, u16_t len) +{ + if (conn && conn->inner_conn) { + altcp_recved(conn->inner_conn, len); + } +} + +err_t +altcp_default_bind(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port) +{ + if (conn && conn->inner_conn) { + return altcp_bind(conn->inner_conn, ipaddr, port); + } + return ERR_VAL; +} + +err_t +altcp_default_shutdown(struct altcp_pcb *conn, int shut_rx, int shut_tx) +{ + if (conn) { + if (shut_rx && shut_tx && conn->fns && conn->fns->close) { + /* default shutdown for both sides is close */ + return conn->fns->close(conn); + } + if (conn->inner_conn) { + return altcp_shutdown(conn->inner_conn, shut_rx, shut_tx); + } + } + return ERR_VAL; +} + +err_t +altcp_default_write(struct altcp_pcb *conn, const void *dataptr, u16_t len, u8_t apiflags) +{ + if (conn && conn->inner_conn) { + return altcp_write(conn->inner_conn, dataptr, len, apiflags); + } + return ERR_VAL; +} + +err_t +altcp_default_output(struct altcp_pcb *conn) +{ + if (conn && conn->inner_conn) { + return altcp_output(conn->inner_conn); + } + return ERR_VAL; +} + +u16_t +altcp_default_mss(struct altcp_pcb *conn) +{ + if (conn && conn->inner_conn) { + return altcp_mss(conn->inner_conn); + } + return 0; +} + +u16_t +altcp_default_sndbuf(struct altcp_pcb *conn) +{ + if (conn && conn->inner_conn) { + return altcp_sndbuf(conn->inner_conn); + } + return 0; +} + +u16_t +altcp_default_sndqueuelen(struct altcp_pcb *conn) +{ + if (conn && conn->inner_conn) { + return altcp_sndqueuelen(conn->inner_conn); + } + return 0; +} + +void +altcp_default_nagle_disable(struct altcp_pcb *conn) +{ + if (conn && conn->inner_conn) { + altcp_nagle_disable(conn->inner_conn); + } +} + +void +altcp_default_nagle_enable(struct altcp_pcb *conn) +{ + if (conn && conn->inner_conn) { + altcp_nagle_enable(conn->inner_conn); + } +} + +int +altcp_default_nagle_disabled(struct altcp_pcb *conn) +{ + if (conn && conn->inner_conn) { + return altcp_nagle_disabled(conn->inner_conn); + } + return 0; +} + +void +altcp_default_setprio(struct altcp_pcb *conn, u8_t prio) +{ + if (conn && conn->inner_conn) { + altcp_setprio(conn->inner_conn, prio); + } +} + +void +altcp_default_dealloc(struct altcp_pcb *conn) +{ + LWIP_UNUSED_ARG(conn); + /* nothing to do */ +} + +err_t +altcp_default_get_tcp_addrinfo(struct altcp_pcb *conn, int local, ip_addr_t *addr, u16_t *port) +{ + if (conn && conn->inner_conn) { + return altcp_get_tcp_addrinfo(conn->inner_conn, local, addr, port); + } + return ERR_VAL; +} + +ip_addr_t * +altcp_default_get_ip(struct altcp_pcb *conn, int local) +{ + if (conn && conn->inner_conn) { + return altcp_get_ip(conn->inner_conn, local); + } + return NULL; +} + +u16_t +altcp_default_get_port(struct altcp_pcb *conn, int local) +{ + if (conn && conn->inner_conn) { + return altcp_get_port(conn->inner_conn, local); + } + return 0; +} + +#ifdef LWIP_DEBUG +enum tcp_state +altcp_default_dbg_get_tcp_state(struct altcp_pcb *conn) +{ + if (conn && conn->inner_conn) { + return altcp_dbg_get_tcp_state(conn->inner_conn); + } + return CLOSED; +} +#endif + + +#endif /* LWIP_ALTCP */ diff --git a/Middlewares/Third_Party/LwIP/src/core/altcp_alloc.c b/Middlewares/Third_Party/LwIP/src/core/altcp_alloc.c new file mode 100644 index 0000000..cd619bc --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/altcp_alloc.c @@ -0,0 +1,87 @@ +/** + * @file + * Application layered TCP connection API (to be used from TCPIP thread)\n + * This interface mimics the tcp callback API to the application while preventing + * direct linking (much like virtual functions). + * This way, an application can make use of other application layer protocols + * on top of TCP without knowing the details (e.g. TLS, proxy connection). + * + * This file contains allocation implementation that combine several layers. + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/altcp.h" +#include "lwip/altcp_tcp.h" +#include "lwip/altcp_tls.h" +#include "lwip/priv/altcp_priv.h" +#include "lwip/mem.h" + +#include + +#if LWIP_ALTCP_TLS + +/** This standard allocator function creates an altcp pcb for + * TLS over TCP */ +struct altcp_pcb * +altcp_tls_new(struct altcp_tls_config *config, u8_t ip_type) +{ + struct altcp_pcb *inner_conn, *ret; + LWIP_UNUSED_ARG(ip_type); + + inner_conn = altcp_tcp_new_ip_type(ip_type); + if (inner_conn == NULL) { + return NULL; + } + ret = altcp_tls_wrap(config, inner_conn); + if (ret == NULL) { + altcp_close(inner_conn); + } + return ret; +} + +/** This standard allocator function creates an altcp pcb for + * TLS over TCP */ +struct altcp_pcb * +altcp_tls_alloc(void *arg, u8_t ip_type) +{ + return altcp_tls_new((struct altcp_tls_config *)arg, ip_type); +} + +#endif /* LWIP_ALTCP_TLS */ + +#endif /* LWIP_ALTCP */ diff --git a/Middlewares/Third_Party/LwIP/src/core/altcp_tcp.c b/Middlewares/Third_Party/LwIP/src/core/altcp_tcp.c new file mode 100644 index 0000000..b715f04 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/altcp_tcp.c @@ -0,0 +1,543 @@ +/** + * @file + * Application layered TCP connection API (to be used from TCPIP thread)\n + * This interface mimics the tcp callback API to the application while preventing + * direct linking (much like virtual functions). + * This way, an application can make use of other application layer protocols + * on top of TCP without knowing the details (e.g. TLS, proxy connection). + * + * This file contains the base implementation calling into tcp. + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/altcp.h" +#include "lwip/altcp_tcp.h" +#include "lwip/priv/altcp_priv.h" +#include "lwip/tcp.h" +#include "lwip/mem.h" + +#include + +#define ALTCP_TCP_ASSERT_CONN(conn) do { \ + LWIP_ASSERT("conn->inner_conn == NULL", (conn)->inner_conn == NULL); \ + LWIP_UNUSED_ARG(conn); /* for LWIP_NOASSERT */ } while(0) +#define ALTCP_TCP_ASSERT_CONN_PCB(conn, tpcb) do { \ + LWIP_ASSERT("pcb mismatch", (conn)->state == tpcb); \ + LWIP_UNUSED_ARG(tpcb); /* for LWIP_NOASSERT */ \ + ALTCP_TCP_ASSERT_CONN(conn); } while(0) + + +/* Variable prototype, the actual declaration is at the end of this file + since it contains pointers to static functions declared here */ +extern const struct altcp_functions altcp_tcp_functions; + +static void altcp_tcp_setup(struct altcp_pcb *conn, struct tcp_pcb *tpcb); + +/* callback functions for TCP */ +static err_t +altcp_tcp_accept(void *arg, struct tcp_pcb *new_tpcb, err_t err) +{ + struct altcp_pcb *listen_conn = (struct altcp_pcb *)arg; + if (listen_conn && listen_conn->accept) { + /* create a new altcp_conn to pass to the next 'accept' callback */ + struct altcp_pcb *new_conn = altcp_alloc(); + if (new_conn == NULL) { + return ERR_MEM; + } + altcp_tcp_setup(new_conn, new_tpcb); + return listen_conn->accept(listen_conn->arg, new_conn, err); + } + return ERR_ARG; +} + +static err_t +altcp_tcp_connected(void *arg, struct tcp_pcb *tpcb, err_t err) +{ + struct altcp_pcb *conn = (struct altcp_pcb *)arg; + if (conn) { + ALTCP_TCP_ASSERT_CONN_PCB(conn, tpcb); + if (conn->connected) { + return conn->connected(conn->arg, conn, err); + } + } + return ERR_OK; +} + +static err_t +altcp_tcp_recv(void *arg, struct tcp_pcb *tpcb, struct pbuf *p, err_t err) +{ + struct altcp_pcb *conn = (struct altcp_pcb *)arg; + if (conn) { + ALTCP_TCP_ASSERT_CONN_PCB(conn, tpcb); + if (conn->recv) { + return conn->recv(conn->arg, conn, p, err); + } + } + if (p != NULL) { + /* prevent memory leaks */ + pbuf_free(p); + } + return ERR_OK; +} + +static err_t +altcp_tcp_sent(void *arg, struct tcp_pcb *tpcb, u16_t len) +{ + struct altcp_pcb *conn = (struct altcp_pcb *)arg; + if (conn) { + ALTCP_TCP_ASSERT_CONN_PCB(conn, tpcb); + if (conn->sent) { + return conn->sent(conn->arg, conn, len); + } + } + return ERR_OK; +} + +static err_t +altcp_tcp_poll(void *arg, struct tcp_pcb *tpcb) +{ + struct altcp_pcb *conn = (struct altcp_pcb *)arg; + if (conn) { + ALTCP_TCP_ASSERT_CONN_PCB(conn, tpcb); + if (conn->poll) { + return conn->poll(conn->arg, conn); + } + } + return ERR_OK; +} + +static void +altcp_tcp_err(void *arg, err_t err) +{ + struct altcp_pcb *conn = (struct altcp_pcb *)arg; + if (conn) { + conn->state = NULL; /* already freed */ + if (conn->err) { + conn->err(conn->arg, err); + } + altcp_free(conn); + } +} + +/* setup functions */ + +static void +altcp_tcp_remove_callbacks(struct tcp_pcb *tpcb) +{ + tcp_arg(tpcb, NULL); + tcp_recv(tpcb, NULL); + tcp_sent(tpcb, NULL); + tcp_err(tpcb, NULL); + tcp_poll(tpcb, NULL, tpcb->pollinterval); +} + +static void +altcp_tcp_setup_callbacks(struct altcp_pcb *conn, struct tcp_pcb *tpcb) +{ + tcp_arg(tpcb, conn); + tcp_recv(tpcb, altcp_tcp_recv); + tcp_sent(tpcb, altcp_tcp_sent); + tcp_err(tpcb, altcp_tcp_err); + /* tcp_poll is set when interval is set by application */ + /* listen is set totally different :-) */ +} + +static void +altcp_tcp_setup(struct altcp_pcb *conn, struct tcp_pcb *tpcb) +{ + altcp_tcp_setup_callbacks(conn, tpcb); + conn->state = tpcb; + conn->fns = &altcp_tcp_functions; +} + +struct altcp_pcb * +altcp_tcp_new_ip_type(u8_t ip_type) +{ + /* Allocate the tcp pcb first to invoke the priority handling code + if we're out of pcbs */ + struct tcp_pcb *tpcb = tcp_new_ip_type(ip_type); + if (tpcb != NULL) { + struct altcp_pcb *ret = altcp_alloc(); + if (ret != NULL) { + altcp_tcp_setup(ret, tpcb); + return ret; + } else { + /* altcp_pcb allocation failed -> free the tcp_pcb too */ + tcp_close(tpcb); + } + } + return NULL; +} + +/** altcp_tcp allocator function fitting to @ref altcp_allocator_t / @ref altcp_new. +* +* arg pointer is not used for TCP. +*/ +struct altcp_pcb * +altcp_tcp_alloc(void *arg, u8_t ip_type) +{ + LWIP_UNUSED_ARG(arg); + return altcp_tcp_new_ip_type(ip_type); +} + +struct altcp_pcb * +altcp_tcp_wrap(struct tcp_pcb *tpcb) +{ + if (tpcb != NULL) { + struct altcp_pcb *ret = altcp_alloc(); + if (ret != NULL) { + altcp_tcp_setup(ret, tpcb); + return ret; + } + } + return NULL; +} + + +/* "virtual" functions calling into tcp */ +static void +altcp_tcp_set_poll(struct altcp_pcb *conn, u8_t interval) +{ + if (conn != NULL) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + tcp_poll(pcb, altcp_tcp_poll, interval); + } +} + +static void +altcp_tcp_recved(struct altcp_pcb *conn, u16_t len) +{ + if (conn != NULL) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + tcp_recved(pcb, len); + } +} + +static err_t +altcp_tcp_bind(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port) +{ + struct tcp_pcb *pcb; + if (conn == NULL) { + return ERR_VAL; + } + ALTCP_TCP_ASSERT_CONN(conn); + pcb = (struct tcp_pcb *)conn->state; + return tcp_bind(pcb, ipaddr, port); +} + +static err_t +altcp_tcp_connect(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port, altcp_connected_fn connected) +{ + struct tcp_pcb *pcb; + if (conn == NULL) { + return ERR_VAL; + } + ALTCP_TCP_ASSERT_CONN(conn); + conn->connected = connected; + pcb = (struct tcp_pcb *)conn->state; + return tcp_connect(pcb, ipaddr, port, altcp_tcp_connected); +} + +static struct altcp_pcb * +altcp_tcp_listen(struct altcp_pcb *conn, u8_t backlog, err_t *err) +{ + struct tcp_pcb *pcb; + struct tcp_pcb *lpcb; + if (conn == NULL) { + return NULL; + } + ALTCP_TCP_ASSERT_CONN(conn); + pcb = (struct tcp_pcb *)conn->state; + lpcb = tcp_listen_with_backlog_and_err(pcb, backlog, err); + if (lpcb != NULL) { + conn->state = lpcb; + tcp_accept(lpcb, altcp_tcp_accept); + return conn; + } + return NULL; +} + +static void +altcp_tcp_abort(struct altcp_pcb *conn) +{ + if (conn != NULL) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + if (pcb) { + tcp_abort(pcb); + } + } +} + +static err_t +altcp_tcp_close(struct altcp_pcb *conn) +{ + struct tcp_pcb *pcb; + if (conn == NULL) { + return ERR_VAL; + } + ALTCP_TCP_ASSERT_CONN(conn); + pcb = (struct tcp_pcb *)conn->state; + if (pcb) { + err_t err; + tcp_poll_fn oldpoll = pcb->poll; + altcp_tcp_remove_callbacks(pcb); + err = tcp_close(pcb); + if (err != ERR_OK) { + /* not closed, set up all callbacks again */ + altcp_tcp_setup_callbacks(conn, pcb); + /* poll callback is not included in the above */ + tcp_poll(pcb, oldpoll, pcb->pollinterval); + return err; + } + conn->state = NULL; /* unsafe to reference pcb after tcp_close(). */ + } + altcp_free(conn); + return ERR_OK; +} + +static err_t +altcp_tcp_shutdown(struct altcp_pcb *conn, int shut_rx, int shut_tx) +{ + struct tcp_pcb *pcb; + if (conn == NULL) { + return ERR_VAL; + } + ALTCP_TCP_ASSERT_CONN(conn); + pcb = (struct tcp_pcb *)conn->state; + return tcp_shutdown(pcb, shut_rx, shut_tx); +} + +static err_t +altcp_tcp_write(struct altcp_pcb *conn, const void *dataptr, u16_t len, u8_t apiflags) +{ + struct tcp_pcb *pcb; + if (conn == NULL) { + return ERR_VAL; + } + ALTCP_TCP_ASSERT_CONN(conn); + pcb = (struct tcp_pcb *)conn->state; + return tcp_write(pcb, dataptr, len, apiflags); +} + +static err_t +altcp_tcp_output(struct altcp_pcb *conn) +{ + struct tcp_pcb *pcb; + if (conn == NULL) { + return ERR_VAL; + } + ALTCP_TCP_ASSERT_CONN(conn); + pcb = (struct tcp_pcb *)conn->state; + return tcp_output(pcb); +} + +static u16_t +altcp_tcp_mss(struct altcp_pcb *conn) +{ + struct tcp_pcb *pcb; + if (conn == NULL) { + return 0; + } + ALTCP_TCP_ASSERT_CONN(conn); + pcb = (struct tcp_pcb *)conn->state; + return tcp_mss(pcb); +} + +static u16_t +altcp_tcp_sndbuf(struct altcp_pcb *conn) +{ + struct tcp_pcb *pcb; + if (conn == NULL) { + return 0; + } + ALTCP_TCP_ASSERT_CONN(conn); + pcb = (struct tcp_pcb *)conn->state; + return tcp_sndbuf(pcb); +} + +static u16_t +altcp_tcp_sndqueuelen(struct altcp_pcb *conn) +{ + struct tcp_pcb *pcb; + if (conn == NULL) { + return 0; + } + ALTCP_TCP_ASSERT_CONN(conn); + pcb = (struct tcp_pcb *)conn->state; + return tcp_sndqueuelen(pcb); +} + +static void +altcp_tcp_nagle_disable(struct altcp_pcb *conn) +{ + if (conn && conn->state) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + tcp_nagle_disable(pcb); + } +} + +static void +altcp_tcp_nagle_enable(struct altcp_pcb *conn) +{ + if (conn && conn->state) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + tcp_nagle_enable(pcb); + } +} + +static int +altcp_tcp_nagle_disabled(struct altcp_pcb *conn) +{ + if (conn && conn->state) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + return tcp_nagle_disabled(pcb); + } + return 0; +} + +static void +altcp_tcp_setprio(struct altcp_pcb *conn, u8_t prio) +{ + if (conn != NULL) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + tcp_setprio(pcb, prio); + } +} + +static void +altcp_tcp_dealloc(struct altcp_pcb *conn) +{ + LWIP_UNUSED_ARG(conn); + ALTCP_TCP_ASSERT_CONN(conn); + /* no private state to clean up */ +} + +static err_t +altcp_tcp_get_tcp_addrinfo(struct altcp_pcb *conn, int local, ip_addr_t *addr, u16_t *port) +{ + if (conn) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + return tcp_tcp_get_tcp_addrinfo(pcb, local, addr, port); + } + return ERR_VAL; +} + +static ip_addr_t * +altcp_tcp_get_ip(struct altcp_pcb *conn, int local) +{ + if (conn) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + if (pcb) { + if (local) { + return &pcb->local_ip; + } else { + return &pcb->remote_ip; + } + } + } + return NULL; +} + +static u16_t +altcp_tcp_get_port(struct altcp_pcb *conn, int local) +{ + if (conn) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + if (pcb) { + if (local) { + return pcb->local_port; + } else { + return pcb->remote_port; + } + } + } + return 0; +} + +#ifdef LWIP_DEBUG +static enum tcp_state +altcp_tcp_dbg_get_tcp_state(struct altcp_pcb *conn) +{ + if (conn) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + if (pcb) { + return pcb->state; + } + } + return CLOSED; +} +#endif +const struct altcp_functions altcp_tcp_functions = { + altcp_tcp_set_poll, + altcp_tcp_recved, + altcp_tcp_bind, + altcp_tcp_connect, + altcp_tcp_listen, + altcp_tcp_abort, + altcp_tcp_close, + altcp_tcp_shutdown, + altcp_tcp_write, + altcp_tcp_output, + altcp_tcp_mss, + altcp_tcp_sndbuf, + altcp_tcp_sndqueuelen, + altcp_tcp_nagle_disable, + altcp_tcp_nagle_enable, + altcp_tcp_nagle_disabled, + altcp_tcp_setprio, + altcp_tcp_dealloc, + altcp_tcp_get_tcp_addrinfo, + altcp_tcp_get_ip, + altcp_tcp_get_port +#ifdef LWIP_DEBUG + , altcp_tcp_dbg_get_tcp_state +#endif +}; + +#endif /* LWIP_ALTCP */ diff --git a/Middlewares/Third_Party/LwIP/src/core/def.c b/Middlewares/Third_Party/LwIP/src/core/def.c new file mode 100644 index 0000000..9da36fe --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/def.c @@ -0,0 +1,240 @@ +/** + * @file + * Common functions used throughout the stack. + * + * These are reference implementations of the byte swapping functions. + * Again with the aim of being simple, correct and fully portable. + * Byte swapping is the second thing you would want to optimize. You will + * need to port it to your architecture and in your cc.h: + * + * \#define lwip_htons(x) your_htons + * \#define lwip_htonl(x) your_htonl + * + * Note lwip_ntohs() and lwip_ntohl() are merely references to the htonx counterparts. + * + * If you \#define them to htons() and htonl(), you should + * \#define LWIP_DONT_PROVIDE_BYTEORDER_FUNCTIONS to prevent lwIP from + * defining htonx/ntohx compatibility macros. + + * @defgroup sys_nonstandard Non-standard functions + * @ingroup sys_layer + * lwIP provides default implementations for non-standard functions. + * These can be mapped to OS functions to reduce code footprint if desired. + * All defines related to this section must not be placed in lwipopts.h, + * but in arch/cc.h! + * These options cannot be \#defined in lwipopts.h since they are not options + * of lwIP itself, but options of the lwIP port to your system. + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#include "lwip/opt.h" +#include "lwip/def.h" + +#include + +#if BYTE_ORDER == LITTLE_ENDIAN + +#if !defined(lwip_htons) +/** + * Convert an u16_t from host- to network byte order. + * + * @param n u16_t in host byte order + * @return n in network byte order + */ +u16_t +lwip_htons(u16_t n) +{ + return PP_HTONS(n); +} +#endif /* lwip_htons */ + +#if !defined(lwip_htonl) +/** + * Convert an u32_t from host- to network byte order. + * + * @param n u32_t in host byte order + * @return n in network byte order + */ +u32_t +lwip_htonl(u32_t n) +{ + return PP_HTONL(n); +} +#endif /* lwip_htonl */ + +#endif /* BYTE_ORDER == LITTLE_ENDIAN */ + +#ifndef lwip_strnstr +/** + * @ingroup sys_nonstandard + * lwIP default implementation for strnstr() non-standard function. + * This can be \#defined to strnstr() depending on your platform port. + */ +char * +lwip_strnstr(const char *buffer, const char *token, size_t n) +{ + const char *p; + size_t tokenlen = strlen(token); + if (tokenlen == 0) { + return LWIP_CONST_CAST(char *, buffer); + } + for (p = buffer; *p && (p + tokenlen <= buffer + n); p++) { + if ((*p == *token) && (strncmp(p, token, tokenlen) == 0)) { + return LWIP_CONST_CAST(char *, p); + } + } + return NULL; +} +#endif + +#ifndef lwip_stricmp +/** + * @ingroup sys_nonstandard + * lwIP default implementation for stricmp() non-standard function. + * This can be \#defined to stricmp() depending on your platform port. + */ +int +lwip_stricmp(const char *str1, const char *str2) +{ + char c1, c2; + + do { + c1 = *str1++; + c2 = *str2++; + if (c1 != c2) { + char c1_upc = c1 | 0x20; + if ((c1_upc >= 'a') && (c1_upc <= 'z')) { + /* characters are not equal an one is in the alphabet range: + downcase both chars and check again */ + char c2_upc = c2 | 0x20; + if (c1_upc != c2_upc) { + /* still not equal */ + /* don't care for < or > */ + return 1; + } + } else { + /* characters are not equal but none is in the alphabet range */ + return 1; + } + } + } while (c1 != 0); + return 0; +} +#endif + +#ifndef lwip_strnicmp +/** + * @ingroup sys_nonstandard + * lwIP default implementation for strnicmp() non-standard function. + * This can be \#defined to strnicmp() depending on your platform port. + */ +int +lwip_strnicmp(const char *str1, const char *str2, size_t len) +{ + char c1, c2; + + do { + c1 = *str1++; + c2 = *str2++; + if (c1 != c2) { + char c1_upc = c1 | 0x20; + if ((c1_upc >= 'a') && (c1_upc <= 'z')) { + /* characters are not equal an one is in the alphabet range: + downcase both chars and check again */ + char c2_upc = c2 | 0x20; + if (c1_upc != c2_upc) { + /* still not equal */ + /* don't care for < or > */ + return 1; + } + } else { + /* characters are not equal but none is in the alphabet range */ + return 1; + } + } + len--; + } while ((len != 0) && (c1 != 0)); + return 0; +} +#endif + +#ifndef lwip_itoa +/** + * @ingroup sys_nonstandard + * lwIP default implementation for itoa() non-standard function. + * This can be \#defined to itoa() or snprintf(result, bufsize, "%d", number) depending on your platform port. + */ +void +lwip_itoa(char *result, size_t bufsize, int number) +{ + char *res = result; + char *tmp = result + bufsize - 1; + int n = (number >= 0) ? number : -number; + + /* handle invalid bufsize */ + if (bufsize < 2) { + if (bufsize == 1) { + *result = 0; + } + return; + } + + /* First, add sign */ + if (number < 0) { + *res++ = '-'; + } + /* Then create the string from the end and stop if buffer full, + and ensure output string is zero terminated */ + *tmp = 0; + while ((n != 0) && (tmp > res)) { + char val = (char)('0' + (n % 10)); + tmp--; + *tmp = val; + n = n / 10; + } + if (n) { + /* buffer is too small */ + *result = 0; + return; + } + if (*tmp == 0) { + /* Nothing added? */ + *res++ = '0'; + *res++ = 0; + return; + } + /* move from temporary buffer to output buffer (sign is not moved) */ + memmove(res, tmp, (size_t)((result + bufsize) - tmp)); +} +#endif diff --git a/Middlewares/Third_Party/LwIP/src/core/dns.c b/Middlewares/Third_Party/LwIP/src/core/dns.c new file mode 100644 index 0000000..9d2f61e --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/dns.c @@ -0,0 +1,1631 @@ +/** + * @file + * DNS - host name to IP address resolver. + * + * @defgroup dns DNS + * @ingroup callbackstyle_api + * + * Implements a DNS host name to IP address resolver. + * + * The lwIP DNS resolver functions are used to lookup a host name and + * map it to a numerical IP address. It maintains a list of resolved + * hostnames that can be queried with the dns_lookup() function. + * New hostnames can be resolved using the dns_query() function. + * + * The lwIP version of the resolver also adds a non-blocking version of + * gethostbyname() that will work with a raw API application. This function + * checks for an IP address string first and converts it if it is valid. + * gethostbyname() then does a dns_lookup() to see if the name is + * already in the table. If so, the IP is returned. If not, a query is + * issued and the function returns with a ERR_INPROGRESS status. The app + * using the dns client must then go into a waiting state. + * + * Once a hostname has been resolved (or found to be non-existent), + * the resolver code calls a specified callback function (which + * must be implemented by the module that uses the resolver). + * + * Multicast DNS queries are supported for names ending on ".local". + * However, only "One-Shot Multicast DNS Queries" are supported (RFC 6762 + * chapter 5.1), this is not a fully compliant implementation of continuous + * mDNS querying! + * + * All functions must be called from TCPIP thread. + * + * @see DNS_MAX_SERVERS + * @see LWIP_DHCP_MAX_DNS_SERVERS + * @see @ref netconn_common for thread-safe access. + */ + +/* + * Port to lwIP from uIP + * by Jim Pettinato April 2007 + * + * security fixes and more by Simon Goldschmidt + * + * uIP version Copyright (c) 2002-2003, Adam Dunkels. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/*----------------------------------------------------------------------------- + * RFC 1035 - Domain names - implementation and specification + * RFC 2181 - Clarifications to the DNS Specification + *----------------------------------------------------------------------------*/ + +/** @todo: define good default values (rfc compliance) */ +/** @todo: improve answer parsing, more checkings... */ +/** @todo: check RFC1035 - 7.3. Processing responses */ +/** @todo: one-shot mDNS: dual-stack fallback to another IP version */ + +/*----------------------------------------------------------------------------- + * Includes + *----------------------------------------------------------------------------*/ + +#include "lwip/opt.h" + +#if LWIP_DNS /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/def.h" +#include "lwip/udp.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/dns.h" +#include "lwip/prot/dns.h" + +#include + +/** Random generator function to create random TXIDs and source ports for queries */ +#ifndef DNS_RAND_TXID +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_XID) != 0) +#define DNS_RAND_TXID LWIP_RAND +#else +static u16_t dns_txid; +#define DNS_RAND_TXID() (++dns_txid) +#endif +#endif + +/** Limits the source port to be >= 1024 by default */ +#ifndef DNS_PORT_ALLOWED +#define DNS_PORT_ALLOWED(port) ((port) >= 1024) +#endif + +/** DNS resource record max. TTL (one week as default) */ +#ifndef DNS_MAX_TTL +#define DNS_MAX_TTL 604800 +#elif DNS_MAX_TTL > 0x7FFFFFFF +#error DNS_MAX_TTL must be a positive 32-bit value +#endif + +#if DNS_TABLE_SIZE > 255 +#error DNS_TABLE_SIZE must fit into an u8_t +#endif +#if DNS_MAX_SERVERS > 255 +#error DNS_MAX_SERVERS must fit into an u8_t +#endif + +/* The number of parallel requests (i.e. calls to dns_gethostbyname + * that cannot be answered from the DNS table. + * This is set to the table size by default. + */ +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING) != 0) +#ifndef DNS_MAX_REQUESTS +#define DNS_MAX_REQUESTS DNS_TABLE_SIZE +#else +#if DNS_MAX_REQUESTS > 255 +#error DNS_MAX_REQUESTS must fit into an u8_t +#endif +#endif +#else +/* In this configuration, both arrays have to have the same size and are used + * like one entry (used/free) */ +#define DNS_MAX_REQUESTS DNS_TABLE_SIZE +#endif + +/* The number of UDP source ports used in parallel */ +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) +#ifndef DNS_MAX_SOURCE_PORTS +#define DNS_MAX_SOURCE_PORTS DNS_MAX_REQUESTS +#else +#if DNS_MAX_SOURCE_PORTS > 255 +#error DNS_MAX_SOURCE_PORTS must fit into an u8_t +#endif +#endif +#else +#ifdef DNS_MAX_SOURCE_PORTS +#undef DNS_MAX_SOURCE_PORTS +#endif +#define DNS_MAX_SOURCE_PORTS 1 +#endif + +#if LWIP_IPV4 && LWIP_IPV6 +#define LWIP_DNS_ADDRTYPE_IS_IPV6(t) (((t) == LWIP_DNS_ADDRTYPE_IPV6_IPV4) || ((t) == LWIP_DNS_ADDRTYPE_IPV6)) +#define LWIP_DNS_ADDRTYPE_MATCH_IP(t, ip) (IP_IS_V6_VAL(ip) ? LWIP_DNS_ADDRTYPE_IS_IPV6(t) : (!LWIP_DNS_ADDRTYPE_IS_IPV6(t))) +#define LWIP_DNS_ADDRTYPE_ARG(x) , x +#define LWIP_DNS_ADDRTYPE_ARG_OR_ZERO(x) x +#define LWIP_DNS_SET_ADDRTYPE(x, y) do { x = y; } while(0) +#else +#if LWIP_IPV6 +#define LWIP_DNS_ADDRTYPE_IS_IPV6(t) 1 +#else +#define LWIP_DNS_ADDRTYPE_IS_IPV6(t) 0 +#endif +#define LWIP_DNS_ADDRTYPE_MATCH_IP(t, ip) 1 +#define LWIP_DNS_ADDRTYPE_ARG(x) +#define LWIP_DNS_ADDRTYPE_ARG_OR_ZERO(x) 0 +#define LWIP_DNS_SET_ADDRTYPE(x, y) +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + +#if LWIP_DNS_SUPPORT_MDNS_QUERIES +#define LWIP_DNS_ISMDNS_ARG(x) , x +#else +#define LWIP_DNS_ISMDNS_ARG(x) +#endif + +/** DNS query message structure. + No packing needed: only used locally on the stack. */ +struct dns_query { + /* DNS query record starts with either a domain name or a pointer + to a name already present somewhere in the packet. */ + u16_t type; + u16_t cls; +}; +#define SIZEOF_DNS_QUERY 4 + +/** DNS answer message structure. + No packing needed: only used locally on the stack. */ +struct dns_answer { + /* DNS answer record starts with either a domain name or a pointer + to a name already present somewhere in the packet. */ + u16_t type; + u16_t cls; + u32_t ttl; + u16_t len; +}; +#define SIZEOF_DNS_ANSWER 10 +/* maximum allowed size for the struct due to non-packed */ +#define SIZEOF_DNS_ANSWER_ASSERT 12 + +/* DNS table entry states */ +typedef enum { + DNS_STATE_UNUSED = 0, + DNS_STATE_NEW = 1, + DNS_STATE_ASKING = 2, + DNS_STATE_DONE = 3 +} dns_state_enum_t; + +/** DNS table entry */ +struct dns_table_entry { + u32_t ttl; + ip_addr_t ipaddr; + u16_t txid; + u8_t state; + u8_t server_idx; + u8_t tmr; + u8_t retries; + u8_t seqno; +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) + u8_t pcb_idx; +#endif + char name[DNS_MAX_NAME_LENGTH]; +#if LWIP_IPV4 && LWIP_IPV6 + u8_t reqaddrtype; +#endif /* LWIP_IPV4 && LWIP_IPV6 */ +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + u8_t is_mdns; +#endif +}; + +/** DNS request table entry: used when dns_gehostbyname cannot answer the + * request from the DNS table */ +struct dns_req_entry { + /* pointer to callback on DNS query done */ + dns_found_callback found; + /* argument passed to the callback function */ + void *arg; +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING) != 0) + u8_t dns_table_idx; +#endif +#if LWIP_IPV4 && LWIP_IPV6 + u8_t reqaddrtype; +#endif /* LWIP_IPV4 && LWIP_IPV6 */ +}; + +#if DNS_LOCAL_HOSTLIST + +#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC +/** Local host-list. For hostnames in this list, no + * external name resolution is performed */ +static struct local_hostlist_entry *local_hostlist_dynamic; +#else /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + +/** Defining this allows the local_hostlist_static to be placed in a different + * linker section (e.g. FLASH) */ +#ifndef DNS_LOCAL_HOSTLIST_STORAGE_PRE +#define DNS_LOCAL_HOSTLIST_STORAGE_PRE static +#endif /* DNS_LOCAL_HOSTLIST_STORAGE_PRE */ +/** Defining this allows the local_hostlist_static to be placed in a different + * linker section (e.g. FLASH) */ +#ifndef DNS_LOCAL_HOSTLIST_STORAGE_POST +#define DNS_LOCAL_HOSTLIST_STORAGE_POST +#endif /* DNS_LOCAL_HOSTLIST_STORAGE_POST */ +DNS_LOCAL_HOSTLIST_STORAGE_PRE struct local_hostlist_entry local_hostlist_static[] + DNS_LOCAL_HOSTLIST_STORAGE_POST = DNS_LOCAL_HOSTLIST_INIT; + +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + +static void dns_init_local(void); +static err_t dns_lookup_local(const char *hostname, ip_addr_t *addr LWIP_DNS_ADDRTYPE_ARG(u8_t dns_addrtype)); +#endif /* DNS_LOCAL_HOSTLIST */ + + +/* forward declarations */ +static void dns_recv(void *s, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port); +static void dns_check_entries(void); +static void dns_call_found(u8_t idx, ip_addr_t *addr); + +/*----------------------------------------------------------------------------- + * Globals + *----------------------------------------------------------------------------*/ + +/* DNS variables */ +static struct udp_pcb *dns_pcbs[DNS_MAX_SOURCE_PORTS]; +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) +static u8_t dns_last_pcb_idx; +#endif +static u8_t dns_seqno; +static struct dns_table_entry dns_table[DNS_TABLE_SIZE]; +static struct dns_req_entry dns_requests[DNS_MAX_REQUESTS]; +static ip_addr_t dns_servers[DNS_MAX_SERVERS]; + +#if LWIP_IPV4 +const ip_addr_t dns_mquery_v4group = DNS_MQUERY_IPV4_GROUP_INIT; +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 +const ip_addr_t dns_mquery_v6group = DNS_MQUERY_IPV6_GROUP_INIT; +#endif /* LWIP_IPV6 */ + +/** + * Initialize the resolver: set up the UDP pcb and configure the default server + * (if DNS_SERVER_ADDRESS is set). + */ +void +dns_init(void) +{ +#ifdef DNS_SERVER_ADDRESS + /* initialize default DNS server address */ + ip_addr_t dnsserver; + DNS_SERVER_ADDRESS(&dnsserver); + dns_setserver(0, &dnsserver); +#endif /* DNS_SERVER_ADDRESS */ + + LWIP_ASSERT("sanity check SIZEOF_DNS_QUERY", + sizeof(struct dns_query) == SIZEOF_DNS_QUERY); + LWIP_ASSERT("sanity check SIZEOF_DNS_ANSWER", + sizeof(struct dns_answer) <= SIZEOF_DNS_ANSWER_ASSERT); + + LWIP_DEBUGF(DNS_DEBUG, ("dns_init: initializing\n")); + + /* if dns client not yet initialized... */ +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) == 0) + if (dns_pcbs[0] == NULL) { + dns_pcbs[0] = udp_new_ip_type(IPADDR_TYPE_ANY); + LWIP_ASSERT("dns_pcbs[0] != NULL", dns_pcbs[0] != NULL); + + /* initialize DNS table not needed (initialized to zero since it is a + * global variable) */ + LWIP_ASSERT("For implicit initialization to work, DNS_STATE_UNUSED needs to be 0", + DNS_STATE_UNUSED == 0); + + /* initialize DNS client */ + udp_bind(dns_pcbs[0], IP_ANY_TYPE, 0); + udp_recv(dns_pcbs[0], dns_recv, NULL); + } +#endif + +#if DNS_LOCAL_HOSTLIST + dns_init_local(); +#endif +} + +/** + * @ingroup dns + * Initialize one of the DNS servers. + * + * @param numdns the index of the DNS server to set must be < DNS_MAX_SERVERS + * @param dnsserver IP address of the DNS server to set + */ +void +dns_setserver(u8_t numdns, const ip_addr_t *dnsserver) +{ + if (numdns < DNS_MAX_SERVERS) { + if (dnsserver != NULL) { + dns_servers[numdns] = (*dnsserver); + } else { + dns_servers[numdns] = *IP_ADDR_ANY; + } + } +} + +/** + * @ingroup dns + * Obtain one of the currently configured DNS server. + * + * @param numdns the index of the DNS server + * @return IP address of the indexed DNS server or "ip_addr_any" if the DNS + * server has not been configured. + */ +const ip_addr_t * +dns_getserver(u8_t numdns) +{ + if (numdns < DNS_MAX_SERVERS) { + return &dns_servers[numdns]; + } else { + return IP_ADDR_ANY; + } +} + +/** + * The DNS resolver client timer - handle retries and timeouts and should + * be called every DNS_TMR_INTERVAL milliseconds (every second by default). + */ +void +dns_tmr(void) +{ + LWIP_DEBUGF(DNS_DEBUG, ("dns_tmr: dns_check_entries\n")); + dns_check_entries(); +} + +#if DNS_LOCAL_HOSTLIST +static void +dns_init_local(void) +{ +#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC && defined(DNS_LOCAL_HOSTLIST_INIT) + size_t i; + struct local_hostlist_entry *entry; + /* Dynamic: copy entries from DNS_LOCAL_HOSTLIST_INIT to list */ + struct local_hostlist_entry local_hostlist_init[] = DNS_LOCAL_HOSTLIST_INIT; + size_t namelen; + for (i = 0; i < LWIP_ARRAYSIZE(local_hostlist_init); i++) { + struct local_hostlist_entry *init_entry = &local_hostlist_init[i]; + LWIP_ASSERT("invalid host name (NULL)", init_entry->name != NULL); + namelen = strlen(init_entry->name); + LWIP_ASSERT("namelen <= DNS_LOCAL_HOSTLIST_MAX_NAMELEN", namelen <= DNS_LOCAL_HOSTLIST_MAX_NAMELEN); + entry = (struct local_hostlist_entry *)memp_malloc(MEMP_LOCALHOSTLIST); + LWIP_ASSERT("mem-error in dns_init_local", entry != NULL); + if (entry != NULL) { + char *entry_name = (char *)entry + sizeof(struct local_hostlist_entry); + MEMCPY(entry_name, init_entry->name, namelen); + entry_name[namelen] = 0; + entry->name = entry_name; + entry->addr = init_entry->addr; + entry->next = local_hostlist_dynamic; + local_hostlist_dynamic = entry; + } + } +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC && defined(DNS_LOCAL_HOSTLIST_INIT) */ +} + +/** + * @ingroup dns + * Iterate the local host-list for a hostname. + * + * @param iterator_fn a function that is called for every entry in the local host-list + * @param iterator_arg 3rd argument passed to iterator_fn + * @return the number of entries in the local host-list + */ +size_t +dns_local_iterate(dns_found_callback iterator_fn, void *iterator_arg) +{ + size_t i; +#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC + struct local_hostlist_entry *entry = local_hostlist_dynamic; + i = 0; + while (entry != NULL) { + if (iterator_fn != NULL) { + iterator_fn(entry->name, &entry->addr, iterator_arg); + } + i++; + entry = entry->next; + } +#else /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + for (i = 0; i < LWIP_ARRAYSIZE(local_hostlist_static); i++) { + if (iterator_fn != NULL) { + iterator_fn(local_hostlist_static[i].name, &local_hostlist_static[i].addr, iterator_arg); + } + } +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + return i; +} + +/** + * @ingroup dns + * Scans the local host-list for a hostname. + * + * @param hostname Hostname to look for in the local host-list + * @param addr the first IP address for the hostname in the local host-list or + * IPADDR_NONE if not found. + * @param dns_addrtype - LWIP_DNS_ADDRTYPE_IPV4_IPV6: try to resolve IPv4 (ATTENTION: no fallback here!) + * - LWIP_DNS_ADDRTYPE_IPV6_IPV4: try to resolve IPv6 (ATTENTION: no fallback here!) + * - LWIP_DNS_ADDRTYPE_IPV4: try to resolve IPv4 only + * - LWIP_DNS_ADDRTYPE_IPV6: try to resolve IPv6 only + * @return ERR_OK if found, ERR_ARG if not found + */ +err_t +dns_local_lookup(const char *hostname, ip_addr_t *addr, u8_t dns_addrtype) +{ + LWIP_UNUSED_ARG(dns_addrtype); + return dns_lookup_local(hostname, addr LWIP_DNS_ADDRTYPE_ARG(dns_addrtype)); +} + +/* Internal implementation for dns_local_lookup and dns_lookup */ +static err_t +dns_lookup_local(const char *hostname, ip_addr_t *addr LWIP_DNS_ADDRTYPE_ARG(u8_t dns_addrtype)) +{ +#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC + struct local_hostlist_entry *entry = local_hostlist_dynamic; + while (entry != NULL) { + if ((lwip_stricmp(entry->name, hostname) == 0) && + LWIP_DNS_ADDRTYPE_MATCH_IP(dns_addrtype, entry->addr)) { + if (addr) { + ip_addr_copy(*addr, entry->addr); + } + return ERR_OK; + } + entry = entry->next; + } +#else /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + size_t i; + for (i = 0; i < LWIP_ARRAYSIZE(local_hostlist_static); i++) { + if ((lwip_stricmp(local_hostlist_static[i].name, hostname) == 0) && + LWIP_DNS_ADDRTYPE_MATCH_IP(dns_addrtype, local_hostlist_static[i].addr)) { + if (addr) { + ip_addr_copy(*addr, local_hostlist_static[i].addr); + } + return ERR_OK; + } + } +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + return ERR_ARG; +} + +#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC +/** + * @ingroup dns + * Remove all entries from the local host-list for a specific hostname + * and/or IP address + * + * @param hostname hostname for which entries shall be removed from the local + * host-list + * @param addr address for which entries shall be removed from the local host-list + * @return the number of removed entries + */ +int +dns_local_removehost(const char *hostname, const ip_addr_t *addr) +{ + int removed = 0; + struct local_hostlist_entry *entry = local_hostlist_dynamic; + struct local_hostlist_entry *last_entry = NULL; + while (entry != NULL) { + if (((hostname == NULL) || !lwip_stricmp(entry->name, hostname)) && + ((addr == NULL) || ip_addr_cmp(&entry->addr, addr))) { + struct local_hostlist_entry *free_entry; + if (last_entry != NULL) { + last_entry->next = entry->next; + } else { + local_hostlist_dynamic = entry->next; + } + free_entry = entry; + entry = entry->next; + memp_free(MEMP_LOCALHOSTLIST, free_entry); + removed++; + } else { + last_entry = entry; + entry = entry->next; + } + } + return removed; +} + +/** + * @ingroup dns + * Add a hostname/IP address pair to the local host-list. + * Duplicates are not checked. + * + * @param hostname hostname of the new entry + * @param addr IP address of the new entry + * @return ERR_OK if succeeded or ERR_MEM on memory error + */ +err_t +dns_local_addhost(const char *hostname, const ip_addr_t *addr) +{ + struct local_hostlist_entry *entry; + size_t namelen; + char *entry_name; + LWIP_ASSERT("invalid host name (NULL)", hostname != NULL); + namelen = strlen(hostname); + LWIP_ASSERT("namelen <= DNS_LOCAL_HOSTLIST_MAX_NAMELEN", namelen <= DNS_LOCAL_HOSTLIST_MAX_NAMELEN); + entry = (struct local_hostlist_entry *)memp_malloc(MEMP_LOCALHOSTLIST); + if (entry == NULL) { + return ERR_MEM; + } + entry_name = (char *)entry + sizeof(struct local_hostlist_entry); + MEMCPY(entry_name, hostname, namelen); + entry_name[namelen] = 0; + entry->name = entry_name; + ip_addr_copy(entry->addr, *addr); + entry->next = local_hostlist_dynamic; + local_hostlist_dynamic = entry; + return ERR_OK; +} +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC*/ +#endif /* DNS_LOCAL_HOSTLIST */ + +/** + * @ingroup dns + * Look up a hostname in the array of known hostnames. + * + * @note This function only looks in the internal array of known + * hostnames, it does not send out a query for the hostname if none + * was found. The function dns_enqueue() can be used to send a query + * for a hostname. + * + * @param name the hostname to look up + * @param addr the hostname's IP address, as u32_t (instead of ip_addr_t to + * better check for failure: != IPADDR_NONE) or IPADDR_NONE if the hostname + * was not found in the cached dns_table. + * @return ERR_OK if found, ERR_ARG if not found + */ +static err_t +dns_lookup(const char *name, ip_addr_t *addr LWIP_DNS_ADDRTYPE_ARG(u8_t dns_addrtype)) +{ + u8_t i; +#if DNS_LOCAL_HOSTLIST + if (dns_lookup_local(name, addr LWIP_DNS_ADDRTYPE_ARG(dns_addrtype)) == ERR_OK) { + return ERR_OK; + } +#endif /* DNS_LOCAL_HOSTLIST */ +#ifdef DNS_LOOKUP_LOCAL_EXTERN + if (DNS_LOOKUP_LOCAL_EXTERN(name, addr, LWIP_DNS_ADDRTYPE_ARG_OR_ZERO(dns_addrtype)) == ERR_OK) { + return ERR_OK; + } +#endif /* DNS_LOOKUP_LOCAL_EXTERN */ + + /* Walk through name list, return entry if found. If not, return NULL. */ + for (i = 0; i < DNS_TABLE_SIZE; ++i) { + if ((dns_table[i].state == DNS_STATE_DONE) && + (lwip_strnicmp(name, dns_table[i].name, sizeof(dns_table[i].name)) == 0) && + LWIP_DNS_ADDRTYPE_MATCH_IP(dns_addrtype, dns_table[i].ipaddr)) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_lookup: \"%s\": found = ", name)); + ip_addr_debug_print_val(DNS_DEBUG, dns_table[i].ipaddr); + LWIP_DEBUGF(DNS_DEBUG, ("\n")); + if (addr) { + ip_addr_copy(*addr, dns_table[i].ipaddr); + } + return ERR_OK; + } + } + + return ERR_ARG; +} + +/** + * Compare the "dotted" name "query" with the encoded name "response" + * to make sure an answer from the DNS server matches the current dns_table + * entry (otherwise, answers might arrive late for hostname not on the list + * any more). + * + * For now, this function compares case-insensitive to cope with all kinds of + * servers. This also means that "dns 0x20 bit encoding" must be checked + * externally, if we want to implement it. + * Currently, the request is sent exactly as passed in by he user request. + * + * @param query hostname (not encoded) from the dns_table + * @param p pbuf containing the encoded hostname in the DNS response + * @param start_offset offset into p where the name starts + * @return 0xFFFF: names differ, other: names equal -> offset behind name + */ +static u16_t +dns_compare_name(const char *query, struct pbuf *p, u16_t start_offset) +{ + int n; + u16_t response_offset = start_offset; + + do { + n = pbuf_try_get_at(p, response_offset); + if ((n < 0) || (response_offset == 0xFFFF)) { + /* error or overflow */ + return 0xFFFF; + } + response_offset++; + /** @see RFC 1035 - 4.1.4. Message compression */ + if ((n & 0xc0) == 0xc0) { + /* Compressed name: cannot be equal since we don't send them */ + return 0xFFFF; + } else { + /* Not compressed name */ + while (n > 0) { + int c = pbuf_try_get_at(p, response_offset); + if (c < 0) { + return 0xFFFF; + } + if (lwip_tolower((*query)) != lwip_tolower((u8_t)c)) { + return 0xFFFF; + } + if (response_offset == 0xFFFF) { + /* would overflow */ + return 0xFFFF; + } + response_offset++; + ++query; + --n; + } + ++query; + } + n = pbuf_try_get_at(p, response_offset); + if (n < 0) { + return 0xFFFF; + } + } while (n != 0); + + if (response_offset == 0xFFFF) { + /* would overflow */ + return 0xFFFF; + } + return (u16_t)(response_offset + 1); +} + +/** + * Walk through a compact encoded DNS name and return the end of the name. + * + * @param p pbuf containing the name + * @param query_idx start index into p pointing to encoded DNS name in the DNS server response + * @return index to end of the name + */ +static u16_t +dns_skip_name(struct pbuf *p, u16_t query_idx) +{ + int n; + u16_t offset = query_idx; + + do { + n = pbuf_try_get_at(p, offset++); + if ((n < 0) || (offset == 0)) { + return 0xFFFF; + } + /** @see RFC 1035 - 4.1.4. Message compression */ + if ((n & 0xc0) == 0xc0) { + /* Compressed name: since we only want to skip it (not check it), stop here */ + break; + } else { + /* Not compressed name */ + if (offset + n >= p->tot_len) { + return 0xFFFF; + } + offset = (u16_t)(offset + n); + } + n = pbuf_try_get_at(p, offset); + if (n < 0) { + return 0xFFFF; + } + } while (n != 0); + + if (offset == 0xFFFF) { + return 0xFFFF; + } + return (u16_t)(offset + 1); +} + +/** + * Send a DNS query packet. + * + * @param idx the DNS table entry index for which to send a request + * @return ERR_OK if packet is sent; an err_t indicating the problem otherwise + */ +static err_t +dns_send(u8_t idx) +{ + err_t err; + struct dns_hdr hdr; + struct dns_query qry; + struct pbuf *p; + u16_t query_idx, copy_len; + const char *hostname, *hostname_part; + u8_t n; + u8_t pcb_idx; + struct dns_table_entry *entry = &dns_table[idx]; + + LWIP_DEBUGF(DNS_DEBUG, ("dns_send: dns_servers[%"U16_F"] \"%s\": request\n", + (u16_t)(entry->server_idx), entry->name)); + LWIP_ASSERT("dns server out of array", entry->server_idx < DNS_MAX_SERVERS); + if (ip_addr_isany_val(dns_servers[entry->server_idx]) +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + && !entry->is_mdns +#endif + ) { + /* DNS server not valid anymore, e.g. PPP netif has been shut down */ + /* call specified callback function if provided */ + dns_call_found(idx, NULL); + /* flush this entry */ + entry->state = DNS_STATE_UNUSED; + return ERR_OK; + } + + /* if here, we have either a new query or a retry on a previous query to process */ + p = pbuf_alloc(PBUF_TRANSPORT, (u16_t)(SIZEOF_DNS_HDR + strlen(entry->name) + 2 + + SIZEOF_DNS_QUERY), PBUF_RAM); + if (p != NULL) { + const ip_addr_t *dst; + u16_t dst_port; + /* fill dns header */ + memset(&hdr, 0, SIZEOF_DNS_HDR); + hdr.id = lwip_htons(entry->txid); + hdr.flags1 = DNS_FLAG1_RD; + hdr.numquestions = PP_HTONS(1); + pbuf_take(p, &hdr, SIZEOF_DNS_HDR); + hostname = entry->name; + --hostname; + + /* convert hostname into suitable query format. */ + query_idx = SIZEOF_DNS_HDR; + do { + ++hostname; + hostname_part = hostname; + for (n = 0; *hostname != '.' && *hostname != 0; ++hostname) { + ++n; + } + copy_len = (u16_t)(hostname - hostname_part); + if (query_idx + n + 1 > 0xFFFF) { + /* u16_t overflow */ + goto overflow_return; + } + pbuf_put_at(p, query_idx, n); + pbuf_take_at(p, hostname_part, copy_len, (u16_t)(query_idx + 1)); + query_idx = (u16_t)(query_idx + n + 1); + } while (*hostname != 0); + pbuf_put_at(p, query_idx, 0); + query_idx++; + + /* fill dns query */ + if (LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype)) { + qry.type = PP_HTONS(DNS_RRTYPE_AAAA); + } else { + qry.type = PP_HTONS(DNS_RRTYPE_A); + } + qry.cls = PP_HTONS(DNS_RRCLASS_IN); + pbuf_take_at(p, &qry, SIZEOF_DNS_QUERY, query_idx); + +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) + pcb_idx = entry->pcb_idx; +#else + pcb_idx = 0; +#endif + /* send dns packet */ + LWIP_DEBUGF(DNS_DEBUG, ("sending DNS request ID %d for name \"%s\" to server %d\r\n", + entry->txid, entry->name, entry->server_idx)); +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + if (entry->is_mdns) { + dst_port = DNS_MQUERY_PORT; +#if LWIP_IPV6 + if (LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype)) { + dst = &dns_mquery_v6group; + } +#endif +#if LWIP_IPV4 && LWIP_IPV6 + else +#endif +#if LWIP_IPV4 + { + dst = &dns_mquery_v4group; + } +#endif + } else +#endif /* LWIP_DNS_SUPPORT_MDNS_QUERIES */ + { + dst_port = DNS_SERVER_PORT; + dst = &dns_servers[entry->server_idx]; + } + err = udp_sendto(dns_pcbs[pcb_idx], p, dst, dst_port); + + /* free pbuf */ + pbuf_free(p); + } else { + err = ERR_MEM; + } + + return err; +overflow_return: + pbuf_free(p); + return ERR_VAL; +} + +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) +static struct udp_pcb * +dns_alloc_random_port(void) +{ + err_t err; + struct udp_pcb *pcb; + + pcb = udp_new_ip_type(IPADDR_TYPE_ANY); + if (pcb == NULL) { + /* out of memory, have to reuse an existing pcb */ + return NULL; + } + do { + u16_t port = (u16_t)DNS_RAND_TXID(); + if (DNS_PORT_ALLOWED(port)) { + err = udp_bind(pcb, IP_ANY_TYPE, port); + } else { + /* this port is not allowed, try again */ + err = ERR_USE; + } + } while (err == ERR_USE); + if (err != ERR_OK) { + udp_remove(pcb); + return NULL; + } + udp_recv(pcb, dns_recv, NULL); + return pcb; +} + +/** + * dns_alloc_pcb() - allocates a new pcb (or reuses an existing one) to be used + * for sending a request + * + * @return an index into dns_pcbs + */ +static u8_t +dns_alloc_pcb(void) +{ + u8_t i; + u8_t idx; + + for (i = 0; i < DNS_MAX_SOURCE_PORTS; i++) { + if (dns_pcbs[i] == NULL) { + break; + } + } + if (i < DNS_MAX_SOURCE_PORTS) { + dns_pcbs[i] = dns_alloc_random_port(); + if (dns_pcbs[i] != NULL) { + /* succeeded */ + dns_last_pcb_idx = i; + return i; + } + } + /* if we come here, creating a new UDP pcb failed, so we have to use + an already existing one (so overflow is no issue) */ + for (i = 0, idx = (u8_t)(dns_last_pcb_idx + 1); i < DNS_MAX_SOURCE_PORTS; i++, idx++) { + if (idx >= DNS_MAX_SOURCE_PORTS) { + idx = 0; + } + if (dns_pcbs[idx] != NULL) { + dns_last_pcb_idx = idx; + return idx; + } + } + return DNS_MAX_SOURCE_PORTS; +} +#endif /* ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) */ + +/** + * dns_call_found() - call the found callback and check if there are duplicate + * entries for the given hostname. If there are any, their found callback will + * be called and they will be removed. + * + * @param idx dns table index of the entry that is resolved or removed + * @param addr IP address for the hostname (or NULL on error or memory shortage) + */ +static void +dns_call_found(u8_t idx, ip_addr_t *addr) +{ +#if ((LWIP_DNS_SECURE & (LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING | LWIP_DNS_SECURE_RAND_SRC_PORT)) != 0) + u8_t i; +#endif + +#if LWIP_IPV4 && LWIP_IPV6 + if (addr != NULL) { + /* check that address type matches the request and adapt the table entry */ + if (IP_IS_V6_VAL(*addr)) { + LWIP_ASSERT("invalid response", LWIP_DNS_ADDRTYPE_IS_IPV6(dns_table[idx].reqaddrtype)); + dns_table[idx].reqaddrtype = LWIP_DNS_ADDRTYPE_IPV6; + } else { + LWIP_ASSERT("invalid response", !LWIP_DNS_ADDRTYPE_IS_IPV6(dns_table[idx].reqaddrtype)); + dns_table[idx].reqaddrtype = LWIP_DNS_ADDRTYPE_IPV4; + } + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING) != 0) + for (i = 0; i < DNS_MAX_REQUESTS; i++) { + if (dns_requests[i].found && (dns_requests[i].dns_table_idx == idx)) { + (*dns_requests[i].found)(dns_table[idx].name, addr, dns_requests[i].arg); + /* flush this entry */ + dns_requests[i].found = NULL; + } + } +#else + if (dns_requests[idx].found) { + (*dns_requests[idx].found)(dns_table[idx].name, addr, dns_requests[idx].arg); + } + dns_requests[idx].found = NULL; +#endif +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) + /* close the pcb used unless other request are using it */ + for (i = 0; i < DNS_MAX_REQUESTS; i++) { + if (i == idx) { + continue; /* only check other requests */ + } + if (dns_table[i].state == DNS_STATE_ASKING) { + if (dns_table[i].pcb_idx == dns_table[idx].pcb_idx) { + /* another request is still using the same pcb */ + dns_table[idx].pcb_idx = DNS_MAX_SOURCE_PORTS; + break; + } + } + } + if (dns_table[idx].pcb_idx < DNS_MAX_SOURCE_PORTS) { + /* if we come here, the pcb is not used any more and can be removed */ + udp_remove(dns_pcbs[dns_table[idx].pcb_idx]); + dns_pcbs[dns_table[idx].pcb_idx] = NULL; + dns_table[idx].pcb_idx = DNS_MAX_SOURCE_PORTS; + } +#endif +} + +/* Create a query transmission ID that is unique for all outstanding queries */ +static u16_t +dns_create_txid(void) +{ + u16_t txid; + u8_t i; + +again: + txid = (u16_t)DNS_RAND_TXID(); + + /* check whether the ID is unique */ + for (i = 0; i < DNS_TABLE_SIZE; i++) { + if ((dns_table[i].state == DNS_STATE_ASKING) && + (dns_table[i].txid == txid)) { + /* ID already used by another pending query */ + goto again; + } + } + + return txid; +} + +/** + * Check whether there are other backup DNS servers available to try + */ +static u8_t +dns_backupserver_available(struct dns_table_entry *pentry) +{ + u8_t ret = 0; + + if (pentry) { + if ((pentry->server_idx + 1 < DNS_MAX_SERVERS) && !ip_addr_isany_val(dns_servers[pentry->server_idx + 1])) { + ret = 1; + } + } + + return ret; +} + +/** + * dns_check_entry() - see if entry has not yet been queried and, if so, sends out a query. + * Check an entry in the dns_table: + * - send out query for new entries + * - retry old pending entries on timeout (also with different servers) + * - remove completed entries from the table if their TTL has expired + * + * @param i index of the dns_table entry to check + */ +static void +dns_check_entry(u8_t i) +{ + err_t err; + struct dns_table_entry *entry = &dns_table[i]; + + LWIP_ASSERT("array index out of bounds", i < DNS_TABLE_SIZE); + + switch (entry->state) { + case DNS_STATE_NEW: + /* initialize new entry */ + entry->txid = dns_create_txid(); + entry->state = DNS_STATE_ASKING; + entry->server_idx = 0; + entry->tmr = 1; + entry->retries = 0; + + /* send DNS packet for this entry */ + err = dns_send(i); + if (err != ERR_OK) { + LWIP_DEBUGF(DNS_DEBUG | LWIP_DBG_LEVEL_WARNING, + ("dns_send returned error: %s\n", lwip_strerr(err))); + } + break; + case DNS_STATE_ASKING: + if (--entry->tmr == 0) { + if (++entry->retries == DNS_MAX_RETRIES) { + if (dns_backupserver_available(entry) +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + && !entry->is_mdns +#endif /* LWIP_DNS_SUPPORT_MDNS_QUERIES */ + ) { + /* change of server */ + entry->server_idx++; + entry->tmr = 1; + entry->retries = 0; + } else { + LWIP_DEBUGF(DNS_DEBUG, ("dns_check_entry: \"%s\": timeout\n", entry->name)); + /* call specified callback function if provided */ + dns_call_found(i, NULL); + /* flush this entry */ + entry->state = DNS_STATE_UNUSED; + break; + } + } else { + /* wait longer for the next retry */ + entry->tmr = entry->retries; + } + + /* send DNS packet for this entry */ + err = dns_send(i); + if (err != ERR_OK) { + LWIP_DEBUGF(DNS_DEBUG | LWIP_DBG_LEVEL_WARNING, + ("dns_send returned error: %s\n", lwip_strerr(err))); + } + } + break; + case DNS_STATE_DONE: + /* if the time to live is nul */ + if ((entry->ttl == 0) || (--entry->ttl == 0)) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_check_entry: \"%s\": flush\n", entry->name)); + /* flush this entry, there cannot be any related pending entries in this state */ + entry->state = DNS_STATE_UNUSED; + } + break; + case DNS_STATE_UNUSED: + /* nothing to do */ + break; + default: + LWIP_ASSERT("unknown dns_table entry state:", 0); + break; + } +} + +/** + * Call dns_check_entry for each entry in dns_table - check all entries. + */ +static void +dns_check_entries(void) +{ + u8_t i; + + for (i = 0; i < DNS_TABLE_SIZE; ++i) { + dns_check_entry(i); + } +} + +/** + * Save TTL and call dns_call_found for correct response. + */ +static void +dns_correct_response(u8_t idx, u32_t ttl) +{ + struct dns_table_entry *entry = &dns_table[idx]; + + entry->state = DNS_STATE_DONE; + + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": response = ", entry->name)); + ip_addr_debug_print_val(DNS_DEBUG, entry->ipaddr); + LWIP_DEBUGF(DNS_DEBUG, ("\n")); + + /* read the answer resource record's TTL, and maximize it if needed */ + entry->ttl = ttl; + if (entry->ttl > DNS_MAX_TTL) { + entry->ttl = DNS_MAX_TTL; + } + dns_call_found(idx, &entry->ipaddr); + + if (entry->ttl == 0) { + /* RFC 883, page 29: "Zero values are + interpreted to mean that the RR can only be used for the + transaction in progress, and should not be cached." + -> flush this entry now */ + /* entry reused during callback? */ + if (entry->state == DNS_STATE_DONE) { + entry->state = DNS_STATE_UNUSED; + } + } +} + +/** + * Receive input function for DNS response packets arriving for the dns UDP pcb. + */ +static void +dns_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port) +{ + u8_t i; + u16_t txid; + u16_t res_idx; + struct dns_hdr hdr; + struct dns_answer ans; + struct dns_query qry; + u16_t nquestions, nanswers; + + LWIP_UNUSED_ARG(arg); + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(port); + + /* is the dns message big enough ? */ + if (p->tot_len < (SIZEOF_DNS_HDR + SIZEOF_DNS_QUERY)) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: pbuf too small\n")); + /* free pbuf and return */ + goto ignore_packet; + } + + /* copy dns payload inside static buffer for processing */ + if (pbuf_copy_partial(p, &hdr, SIZEOF_DNS_HDR, 0) == SIZEOF_DNS_HDR) { + /* Match the ID in the DNS header with the name table. */ + txid = lwip_htons(hdr.id); + for (i = 0; i < DNS_TABLE_SIZE; i++) { + struct dns_table_entry *entry = &dns_table[i]; + if ((entry->state == DNS_STATE_ASKING) && + (entry->txid == txid)) { + + /* We only care about the question(s) and the answers. The authrr + and the extrarr are simply discarded. */ + nquestions = lwip_htons(hdr.numquestions); + nanswers = lwip_htons(hdr.numanswers); + + /* Check for correct response. */ + if ((hdr.flags1 & DNS_FLAG1_RESPONSE) == 0) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": not a response\n", entry->name)); + goto ignore_packet; /* ignore this packet */ + } + if (nquestions != 1) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": response not match to query\n", entry->name)); + goto ignore_packet; /* ignore this packet */ + } + +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + if (!entry->is_mdns) +#endif /* LWIP_DNS_SUPPORT_MDNS_QUERIES */ + { + /* Check whether response comes from the same network address to which the + question was sent. (RFC 5452) */ + if (!ip_addr_cmp(addr, &dns_servers[entry->server_idx])) { + goto ignore_packet; /* ignore this packet */ + } + } + + /* Check if the name in the "question" part match with the name in the entry and + skip it if equal. */ + res_idx = dns_compare_name(entry->name, p, SIZEOF_DNS_HDR); + if (res_idx == 0xFFFF) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": response not match to query\n", entry->name)); + goto ignore_packet; /* ignore this packet */ + } + + /* check if "question" part matches the request */ + if (pbuf_copy_partial(p, &qry, SIZEOF_DNS_QUERY, res_idx) != SIZEOF_DNS_QUERY) { + goto ignore_packet; /* ignore this packet */ + } + if ((qry.cls != PP_HTONS(DNS_RRCLASS_IN)) || + (LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype) && (qry.type != PP_HTONS(DNS_RRTYPE_AAAA))) || + (!LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype) && (qry.type != PP_HTONS(DNS_RRTYPE_A)))) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": response not match to query\n", entry->name)); + goto ignore_packet; /* ignore this packet */ + } + /* skip the rest of the "question" part */ + if (res_idx + SIZEOF_DNS_QUERY > 0xFFFF) { + goto ignore_packet; + } + res_idx = (u16_t)(res_idx + SIZEOF_DNS_QUERY); + + /* Check for error. If so, call callback to inform. */ + if (hdr.flags2 & DNS_FLAG2_ERR_MASK) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": error in flags\n", entry->name)); + + /* if there is another backup DNS server to try + * then don't stop the DNS request + */ + if (dns_backupserver_available(entry)) { + /* avoid retrying the same server */ + entry->retries = DNS_MAX_RETRIES-1; + entry->tmr = 1; + + /* contact next available server for this entry */ + dns_check_entry(i); + + goto ignore_packet; + } + } else { + while ((nanswers > 0) && (res_idx < p->tot_len)) { + /* skip answer resource record's host name */ + res_idx = dns_skip_name(p, res_idx); + if (res_idx == 0xFFFF) { + goto ignore_packet; /* ignore this packet */ + } + + /* Check for IP address type and Internet class. Others are discarded. */ + if (pbuf_copy_partial(p, &ans, SIZEOF_DNS_ANSWER, res_idx) != SIZEOF_DNS_ANSWER) { + goto ignore_packet; /* ignore this packet */ + } + if (res_idx + SIZEOF_DNS_ANSWER > 0xFFFF) { + goto ignore_packet; + } + res_idx = (u16_t)(res_idx + SIZEOF_DNS_ANSWER); + + if (ans.cls == PP_HTONS(DNS_RRCLASS_IN)) { +#if LWIP_IPV4 + if ((ans.type == PP_HTONS(DNS_RRTYPE_A)) && (ans.len == PP_HTONS(sizeof(ip4_addr_t)))) { +#if LWIP_IPV4 && LWIP_IPV6 + if (!LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype)) +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + { + ip4_addr_t ip4addr; + /* read the IP address after answer resource record's header */ + if (pbuf_copy_partial(p, &ip4addr, sizeof(ip4_addr_t), res_idx) != sizeof(ip4_addr_t)) { + goto ignore_packet; /* ignore this packet */ + } + ip_addr_copy_from_ip4(dns_table[i].ipaddr, ip4addr); + pbuf_free(p); + /* handle correct response */ + dns_correct_response(i, lwip_ntohl(ans.ttl)); + return; + } + } +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 + if ((ans.type == PP_HTONS(DNS_RRTYPE_AAAA)) && (ans.len == PP_HTONS(sizeof(ip6_addr_p_t)))) { +#if LWIP_IPV4 && LWIP_IPV6 + if (LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype)) +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + { + ip6_addr_p_t ip6addr; + /* read the IP address after answer resource record's header */ + if (pbuf_copy_partial(p, &ip6addr, sizeof(ip6_addr_p_t), res_idx) != sizeof(ip6_addr_p_t)) { + goto ignore_packet; /* ignore this packet */ + } + /* @todo: scope ip6addr? Might be required for link-local addresses at least? */ + ip_addr_copy_from_ip6_packed(dns_table[i].ipaddr, ip6addr); + pbuf_free(p); + /* handle correct response */ + dns_correct_response(i, lwip_ntohl(ans.ttl)); + return; + } + } +#endif /* LWIP_IPV6 */ + } + /* skip this answer */ + if ((int)(res_idx + lwip_htons(ans.len)) > 0xFFFF) { + goto ignore_packet; /* ignore this packet */ + } + res_idx = (u16_t)(res_idx + lwip_htons(ans.len)); + --nanswers; + } +#if LWIP_IPV4 && LWIP_IPV6 + if ((entry->reqaddrtype == LWIP_DNS_ADDRTYPE_IPV4_IPV6) || + (entry->reqaddrtype == LWIP_DNS_ADDRTYPE_IPV6_IPV4)) { + if (entry->reqaddrtype == LWIP_DNS_ADDRTYPE_IPV4_IPV6) { + /* IPv4 failed, try IPv6 */ + dns_table[i].reqaddrtype = LWIP_DNS_ADDRTYPE_IPV6; + } else { + /* IPv6 failed, try IPv4 */ + dns_table[i].reqaddrtype = LWIP_DNS_ADDRTYPE_IPV4; + } + pbuf_free(p); + dns_table[i].state = DNS_STATE_NEW; + dns_check_entry(i); + return; + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": error in response\n", entry->name)); + } + /* call callback to indicate error, clean up memory and return */ + pbuf_free(p); + dns_call_found(i, NULL); + dns_table[i].state = DNS_STATE_UNUSED; + return; + } + } + } + +ignore_packet: + /* deallocate memory and return */ + pbuf_free(p); + return; +} + +/** + * Queues a new hostname to resolve and sends out a DNS query for that hostname + * + * @param name the hostname that is to be queried + * @param hostnamelen length of the hostname + * @param found a callback function to be called on success, failure or timeout + * @param callback_arg argument to pass to the callback function + * @return err_t return code. + */ +static err_t +dns_enqueue(const char *name, size_t hostnamelen, dns_found_callback found, + void *callback_arg LWIP_DNS_ADDRTYPE_ARG(u8_t dns_addrtype) LWIP_DNS_ISMDNS_ARG(u8_t is_mdns)) +{ + u8_t i; + u8_t lseq, lseqi; + struct dns_table_entry *entry = NULL; + size_t namelen; + struct dns_req_entry *req; + +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING) != 0) + u8_t r; + /* check for duplicate entries */ + for (i = 0; i < DNS_TABLE_SIZE; i++) { + if ((dns_table[i].state == DNS_STATE_ASKING) && + (lwip_strnicmp(name, dns_table[i].name, sizeof(dns_table[i].name)) == 0)) { +#if LWIP_IPV4 && LWIP_IPV6 + if (dns_table[i].reqaddrtype != dns_addrtype) { + /* requested address types don't match + this can lead to 2 concurrent requests, but mixing the address types + for the same host should not be that common */ + continue; + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + /* this is a duplicate entry, find a free request entry */ + for (r = 0; r < DNS_MAX_REQUESTS; r++) { + if (dns_requests[r].found == 0) { + dns_requests[r].found = found; + dns_requests[r].arg = callback_arg; + dns_requests[r].dns_table_idx = i; + LWIP_DNS_SET_ADDRTYPE(dns_requests[r].reqaddrtype, dns_addrtype); + LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": duplicate request\n", name)); + return ERR_INPROGRESS; + } + } + } + } + /* no duplicate entries found */ +#endif + + /* search an unused entry, or the oldest one */ + lseq = 0; + lseqi = DNS_TABLE_SIZE; + for (i = 0; i < DNS_TABLE_SIZE; ++i) { + entry = &dns_table[i]; + /* is it an unused entry ? */ + if (entry->state == DNS_STATE_UNUSED) { + break; + } + /* check if this is the oldest completed entry */ + if (entry->state == DNS_STATE_DONE) { + u8_t age = (u8_t)(dns_seqno - entry->seqno); + if (age > lseq) { + lseq = age; + lseqi = i; + } + } + } + + /* if we don't have found an unused entry, use the oldest completed one */ + if (i == DNS_TABLE_SIZE) { + if ((lseqi >= DNS_TABLE_SIZE) || (dns_table[lseqi].state != DNS_STATE_DONE)) { + /* no entry can be used now, table is full */ + LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": DNS entries table is full\n", name)); + return ERR_MEM; + } else { + /* use the oldest completed one */ + i = lseqi; + entry = &dns_table[i]; + } + } + +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING) != 0) + /* find a free request entry */ + req = NULL; + for (r = 0; r < DNS_MAX_REQUESTS; r++) { + if (dns_requests[r].found == NULL) { + req = &dns_requests[r]; + break; + } + } + if (req == NULL) { + /* no request entry can be used now, table is full */ + LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": DNS request entries table is full\n", name)); + return ERR_MEM; + } + req->dns_table_idx = i; +#else + /* in this configuration, the entry index is the same as the request index */ + req = &dns_requests[i]; +#endif + + /* use this entry */ + LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": use DNS entry %"U16_F"\n", name, (u16_t)(i))); + + /* fill the entry */ + entry->state = DNS_STATE_NEW; + entry->seqno = dns_seqno; + LWIP_DNS_SET_ADDRTYPE(entry->reqaddrtype, dns_addrtype); + LWIP_DNS_SET_ADDRTYPE(req->reqaddrtype, dns_addrtype); + req->found = found; + req->arg = callback_arg; + namelen = LWIP_MIN(hostnamelen, DNS_MAX_NAME_LENGTH - 1); + MEMCPY(entry->name, name, namelen); + entry->name[namelen] = 0; + +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) + entry->pcb_idx = dns_alloc_pcb(); + if (entry->pcb_idx >= DNS_MAX_SOURCE_PORTS) { + /* failed to get a UDP pcb */ + LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": failed to allocate a pcb\n", name)); + entry->state = DNS_STATE_UNUSED; + req->found = NULL; + return ERR_MEM; + } + LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": use DNS pcb %"U16_F"\n", name, (u16_t)(entry->pcb_idx))); +#endif + +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + entry->is_mdns = is_mdns; +#endif + + dns_seqno++; + + /* force to send query without waiting timer */ + dns_check_entry(i); + + /* dns query is enqueued */ + return ERR_INPROGRESS; +} + +/** + * @ingroup dns + * Resolve a hostname (string) into an IP address. + * NON-BLOCKING callback version for use with raw API!!! + * + * Returns immediately with one of err_t return codes: + * - ERR_OK if hostname is a valid IP address string or the host + * name is already in the local names table. + * - ERR_INPROGRESS enqueue a request to be sent to the DNS server + * for resolution if no errors are present. + * - ERR_ARG: dns client not initialized or invalid hostname + * + * @param hostname the hostname that is to be queried + * @param addr pointer to a ip_addr_t where to store the address if it is already + * cached in the dns_table (only valid if ERR_OK is returned!) + * @param found a callback function to be called on success, failure or timeout (only if + * ERR_INPROGRESS is returned!) + * @param callback_arg argument to pass to the callback function + * @return a err_t return code. + */ +err_t +dns_gethostbyname(const char *hostname, ip_addr_t *addr, dns_found_callback found, + void *callback_arg) +{ + return dns_gethostbyname_addrtype(hostname, addr, found, callback_arg, LWIP_DNS_ADDRTYPE_DEFAULT); +} + +/** + * @ingroup dns + * Like dns_gethostbyname, but returned address type can be controlled: + * @param hostname the hostname that is to be queried + * @param addr pointer to a ip_addr_t where to store the address if it is already + * cached in the dns_table (only valid if ERR_OK is returned!) + * @param found a callback function to be called on success, failure or timeout (only if + * ERR_INPROGRESS is returned!) + * @param callback_arg argument to pass to the callback function + * @param dns_addrtype - LWIP_DNS_ADDRTYPE_IPV4_IPV6: try to resolve IPv4 first, try IPv6 if IPv4 fails only + * - LWIP_DNS_ADDRTYPE_IPV6_IPV4: try to resolve IPv6 first, try IPv4 if IPv6 fails only + * - LWIP_DNS_ADDRTYPE_IPV4: try to resolve IPv4 only + * - LWIP_DNS_ADDRTYPE_IPV6: try to resolve IPv6 only + */ +err_t +dns_gethostbyname_addrtype(const char *hostname, ip_addr_t *addr, dns_found_callback found, + void *callback_arg, u8_t dns_addrtype) +{ + size_t hostnamelen; +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + u8_t is_mdns; +#endif + /* not initialized or no valid server yet, or invalid addr pointer + * or invalid hostname or invalid hostname length */ + if ((addr == NULL) || + (!hostname) || (!hostname[0])) { + return ERR_ARG; + } +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) == 0) + if (dns_pcbs[0] == NULL) { + return ERR_ARG; + } +#endif + hostnamelen = strlen(hostname); + if (hostnamelen >= DNS_MAX_NAME_LENGTH) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_gethostbyname: name too long to resolve")); + return ERR_ARG; + } + + +#if LWIP_HAVE_LOOPIF + if (strcmp(hostname, "localhost") == 0) { + ip_addr_set_loopback(LWIP_DNS_ADDRTYPE_IS_IPV6(dns_addrtype), addr); + return ERR_OK; + } +#endif /* LWIP_HAVE_LOOPIF */ + + /* host name already in octet notation? set ip addr and return ERR_OK */ + if (ipaddr_aton(hostname, addr)) { +#if LWIP_IPV4 && LWIP_IPV6 + if ((IP_IS_V6(addr) && (dns_addrtype != LWIP_DNS_ADDRTYPE_IPV4)) || + (IP_IS_V4(addr) && (dns_addrtype != LWIP_DNS_ADDRTYPE_IPV6))) +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + { + return ERR_OK; + } + } + /* already have this address cached? */ + if (dns_lookup(hostname, addr LWIP_DNS_ADDRTYPE_ARG(dns_addrtype)) == ERR_OK) { + return ERR_OK; + } +#if LWIP_IPV4 && LWIP_IPV6 + if ((dns_addrtype == LWIP_DNS_ADDRTYPE_IPV4_IPV6) || (dns_addrtype == LWIP_DNS_ADDRTYPE_IPV6_IPV4)) { + /* fallback to 2nd IP type and try again to lookup */ + u8_t fallback; + if (dns_addrtype == LWIP_DNS_ADDRTYPE_IPV4_IPV6) { + fallback = LWIP_DNS_ADDRTYPE_IPV6; + } else { + fallback = LWIP_DNS_ADDRTYPE_IPV4; + } + if (dns_lookup(hostname, addr LWIP_DNS_ADDRTYPE_ARG(fallback)) == ERR_OK) { + return ERR_OK; + } + } +#else /* LWIP_IPV4 && LWIP_IPV6 */ + LWIP_UNUSED_ARG(dns_addrtype); +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + if (strstr(hostname, ".local") == &hostname[hostnamelen] - 6) { + is_mdns = 1; + } else { + is_mdns = 0; + } + + if (!is_mdns) +#endif /* LWIP_DNS_SUPPORT_MDNS_QUERIES */ + { + /* prevent calling found callback if no server is set, return error instead */ + if (ip_addr_isany_val(dns_servers[0])) { + return ERR_VAL; + } + } + + /* queue query with specified callback */ + return dns_enqueue(hostname, hostnamelen, found, callback_arg LWIP_DNS_ADDRTYPE_ARG(dns_addrtype) + LWIP_DNS_ISMDNS_ARG(is_mdns)); +} + +#endif /* LWIP_DNS */ diff --git a/Middlewares/Third_Party/LwIP/src/core/inet_chksum.c b/Middlewares/Third_Party/LwIP/src/core/inet_chksum.c new file mode 100644 index 0000000..818c68f --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/inet_chksum.c @@ -0,0 +1,608 @@ +/** + * @file + * Internet checksum functions.\n + * + * These are some reference implementations of the checksum algorithm, with the + * aim of being simple, correct and fully portable. Checksumming is the + * first thing you would want to optimize for your platform. If you create + * your own version, link it in and in your cc.h put: + * + * \#define LWIP_CHKSUM your_checksum_routine + * + * Or you can select from the implementations below by defining + * LWIP_CHKSUM_ALGORITHM to 1, 2 or 3. + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#include "lwip/inet_chksum.h" +#include "lwip/def.h" +#include "lwip/ip_addr.h" + +#include + +#ifndef LWIP_CHKSUM +# define LWIP_CHKSUM lwip_standard_chksum +# ifndef LWIP_CHKSUM_ALGORITHM +# define LWIP_CHKSUM_ALGORITHM 2 +# endif +u16_t lwip_standard_chksum(const void *dataptr, int len); +#endif +/* If none set: */ +#ifndef LWIP_CHKSUM_ALGORITHM +# define LWIP_CHKSUM_ALGORITHM 0 +#endif + +#if (LWIP_CHKSUM_ALGORITHM == 1) /* Version #1 */ +/** + * lwip checksum + * + * @param dataptr points to start of data to be summed at any boundary + * @param len length of data to be summed + * @return host order (!) lwip checksum (non-inverted Internet sum) + * + * @note accumulator size limits summable length to 64k + * @note host endianess is irrelevant (p3 RFC1071) + */ +u16_t +lwip_standard_chksum(const void *dataptr, int len) +{ + u32_t acc; + u16_t src; + const u8_t *octetptr; + + acc = 0; + /* dataptr may be at odd or even addresses */ + octetptr = (const u8_t *)dataptr; + while (len > 1) { + /* declare first octet as most significant + thus assume network order, ignoring host order */ + src = (*octetptr) << 8; + octetptr++; + /* declare second octet as least significant */ + src |= (*octetptr); + octetptr++; + acc += src; + len -= 2; + } + if (len > 0) { + /* accumulate remaining octet */ + src = (*octetptr) << 8; + acc += src; + } + /* add deferred carry bits */ + acc = (acc >> 16) + (acc & 0x0000ffffUL); + if ((acc & 0xffff0000UL) != 0) { + acc = (acc >> 16) + (acc & 0x0000ffffUL); + } + /* This maybe a little confusing: reorder sum using lwip_htons() + instead of lwip_ntohs() since it has a little less call overhead. + The caller must invert bits for Internet sum ! */ + return lwip_htons((u16_t)acc); +} +#endif + +#if (LWIP_CHKSUM_ALGORITHM == 2) /* Alternative version #2 */ +/* + * Curt McDowell + * Broadcom Corp. + * csm@broadcom.com + * + * IP checksum two bytes at a time with support for + * unaligned buffer. + * Works for len up to and including 0x20000. + * by Curt McDowell, Broadcom Corp. 12/08/2005 + * + * @param dataptr points to start of data to be summed at any boundary + * @param len length of data to be summed + * @return host order (!) lwip checksum (non-inverted Internet sum) + */ +u16_t +lwip_standard_chksum(const void *dataptr, int len) +{ + const u8_t *pb = (const u8_t *)dataptr; + const u16_t *ps; + u16_t t = 0; + u32_t sum = 0; + int odd = ((mem_ptr_t)pb & 1); + + /* Get aligned to u16_t */ + if (odd && len > 0) { + ((u8_t *)&t)[1] = *pb++; + len--; + } + + /* Add the bulk of the data */ + ps = (const u16_t *)(const void *)pb; + while (len > 1) { + sum += *ps++; + len -= 2; + } + + /* Consume left-over byte, if any */ + if (len > 0) { + ((u8_t *)&t)[0] = *(const u8_t *)ps; + } + + /* Add end bytes */ + sum += t; + + /* Fold 32-bit sum to 16 bits + calling this twice is probably faster than if statements... */ + sum = FOLD_U32T(sum); + sum = FOLD_U32T(sum); + + /* Swap if alignment was odd */ + if (odd) { + sum = SWAP_BYTES_IN_WORD(sum); + } + + return (u16_t)sum; +} +#endif + +#if (LWIP_CHKSUM_ALGORITHM == 3) /* Alternative version #3 */ +/** + * An optimized checksum routine. Basically, it uses loop-unrolling on + * the checksum loop, treating the head and tail bytes specially, whereas + * the inner loop acts on 8 bytes at a time. + * + * @arg start of buffer to be checksummed. May be an odd byte address. + * @len number of bytes in the buffer to be checksummed. + * @return host order (!) lwip checksum (non-inverted Internet sum) + * + * by Curt McDowell, Broadcom Corp. December 8th, 2005 + */ +u16_t +lwip_standard_chksum(const void *dataptr, int len) +{ + const u8_t *pb = (const u8_t *)dataptr; + const u16_t *ps; + u16_t t = 0; + const u32_t *pl; + u32_t sum = 0, tmp; + /* starts at odd byte address? */ + int odd = ((mem_ptr_t)pb & 1); + + if (odd && len > 0) { + ((u8_t *)&t)[1] = *pb++; + len--; + } + + ps = (const u16_t *)(const void *)pb; + + if (((mem_ptr_t)ps & 3) && len > 1) { + sum += *ps++; + len -= 2; + } + + pl = (const u32_t *)(const void *)ps; + + while (len > 7) { + tmp = sum + *pl++; /* ping */ + if (tmp < sum) { + tmp++; /* add back carry */ + } + + sum = tmp + *pl++; /* pong */ + if (sum < tmp) { + sum++; /* add back carry */ + } + + len -= 8; + } + + /* make room in upper bits */ + sum = FOLD_U32T(sum); + + ps = (const u16_t *)pl; + + /* 16-bit aligned word remaining? */ + while (len > 1) { + sum += *ps++; + len -= 2; + } + + /* dangling tail byte remaining? */ + if (len > 0) { /* include odd byte */ + ((u8_t *)&t)[0] = *(const u8_t *)ps; + } + + sum += t; /* add end bytes */ + + /* Fold 32-bit sum to 16 bits + calling this twice is probably faster than if statements... */ + sum = FOLD_U32T(sum); + sum = FOLD_U32T(sum); + + if (odd) { + sum = SWAP_BYTES_IN_WORD(sum); + } + + return (u16_t)sum; +} +#endif + +/** Parts of the pseudo checksum which are common to IPv4 and IPv6 */ +static u16_t +inet_cksum_pseudo_base(struct pbuf *p, u8_t proto, u16_t proto_len, u32_t acc) +{ + struct pbuf *q; + int swapped = 0; + + /* iterate through all pbuf in chain */ + for (q = p; q != NULL; q = q->next) { + LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): checksumming pbuf %p (has next %p) \n", + (void *)q, (void *)q->next)); + acc += LWIP_CHKSUM(q->payload, q->len); + /*LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): unwrapped lwip_chksum()=%"X32_F" \n", acc));*/ + /* just executing this next line is probably faster that the if statement needed + to check whether we really need to execute it, and does no harm */ + acc = FOLD_U32T(acc); + if (q->len % 2 != 0) { + swapped = !swapped; + acc = SWAP_BYTES_IN_WORD(acc); + } + /*LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): wrapped lwip_chksum()=%"X32_F" \n", acc));*/ + } + + if (swapped) { + acc = SWAP_BYTES_IN_WORD(acc); + } + + acc += (u32_t)lwip_htons((u16_t)proto); + acc += (u32_t)lwip_htons(proto_len); + + /* Fold 32-bit sum to 16 bits + calling this twice is probably faster than if statements... */ + acc = FOLD_U32T(acc); + acc = FOLD_U32T(acc); + LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): pbuf chain lwip_chksum()=%"X32_F"\n", acc)); + return (u16_t)~(acc & 0xffffUL); +} + +#if LWIP_IPV4 +/* inet_chksum_pseudo: + * + * Calculates the IPv4 pseudo Internet checksum used by TCP and UDP for a pbuf chain. + * IP addresses are expected to be in network byte order. + * + * @param p chain of pbufs over that a checksum should be calculated (ip data part) + * @param src source ip address (used for checksum of pseudo header) + * @param dst destination ip address (used for checksum of pseudo header) + * @param proto ip protocol (used for checksum of pseudo header) + * @param proto_len length of the ip data part (used for checksum of pseudo header) + * @return checksum (as u16_t) to be saved directly in the protocol header + */ +u16_t +inet_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len, + const ip4_addr_t *src, const ip4_addr_t *dest) +{ + u32_t acc; + u32_t addr; + + addr = ip4_addr_get_u32(src); + acc = (addr & 0xffffUL); + acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL)); + addr = ip4_addr_get_u32(dest); + acc = (u32_t)(acc + (addr & 0xffffUL)); + acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL)); + /* fold down to 16 bits */ + acc = FOLD_U32T(acc); + acc = FOLD_U32T(acc); + + return inet_cksum_pseudo_base(p, proto, proto_len, acc); +} +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 +/** + * Calculates the checksum with IPv6 pseudo header used by TCP and UDP for a pbuf chain. + * IPv6 addresses are expected to be in network byte order. + * + * @param p chain of pbufs over that a checksum should be calculated (ip data part) + * @param proto ipv6 protocol/next header (used for checksum of pseudo header) + * @param proto_len length of the ipv6 payload (used for checksum of pseudo header) + * @param src source ipv6 address (used for checksum of pseudo header) + * @param dest destination ipv6 address (used for checksum of pseudo header) + * @return checksum (as u16_t) to be saved directly in the protocol header + */ +u16_t +ip6_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len, + const ip6_addr_t *src, const ip6_addr_t *dest) +{ + u32_t acc = 0; + u32_t addr; + u8_t addr_part; + + for (addr_part = 0; addr_part < 4; addr_part++) { + addr = src->addr[addr_part]; + acc = (u32_t)(acc + (addr & 0xffffUL)); + acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL)); + addr = dest->addr[addr_part]; + acc = (u32_t)(acc + (addr & 0xffffUL)); + acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL)); + } + /* fold down to 16 bits */ + acc = FOLD_U32T(acc); + acc = FOLD_U32T(acc); + + return inet_cksum_pseudo_base(p, proto, proto_len, acc); +} +#endif /* LWIP_IPV6 */ + +/* ip_chksum_pseudo: + * + * Calculates the IPv4 or IPv6 pseudo Internet checksum used by TCP and UDP for a pbuf chain. + * IP addresses are expected to be in network byte order. + * + * @param p chain of pbufs over that a checksum should be calculated (ip data part) + * @param src source ip address (used for checksum of pseudo header) + * @param dst destination ip address (used for checksum of pseudo header) + * @param proto ip protocol (used for checksum of pseudo header) + * @param proto_len length of the ip data part (used for checksum of pseudo header) + * @return checksum (as u16_t) to be saved directly in the protocol header + */ +u16_t +ip_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len, + const ip_addr_t *src, const ip_addr_t *dest) +{ +#if LWIP_IPV6 + if (IP_IS_V6(dest)) { + return ip6_chksum_pseudo(p, proto, proto_len, ip_2_ip6(src), ip_2_ip6(dest)); + } +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 && LWIP_IPV6 + else +#endif /* LWIP_IPV4 && LWIP_IPV6 */ +#if LWIP_IPV4 + { + return inet_chksum_pseudo(p, proto, proto_len, ip_2_ip4(src), ip_2_ip4(dest)); + } +#endif /* LWIP_IPV4 */ +} + +/** Parts of the pseudo checksum which are common to IPv4 and IPv6 */ +static u16_t +inet_cksum_pseudo_partial_base(struct pbuf *p, u8_t proto, u16_t proto_len, + u16_t chksum_len, u32_t acc) +{ + struct pbuf *q; + int swapped = 0; + u16_t chklen; + + /* iterate through all pbuf in chain */ + for (q = p; (q != NULL) && (chksum_len > 0); q = q->next) { + LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): checksumming pbuf %p (has next %p) \n", + (void *)q, (void *)q->next)); + chklen = q->len; + if (chklen > chksum_len) { + chklen = chksum_len; + } + acc += LWIP_CHKSUM(q->payload, chklen); + chksum_len = (u16_t)(chksum_len - chklen); + LWIP_ASSERT("delete me", chksum_len < 0x7fff); + /*LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): unwrapped lwip_chksum()=%"X32_F" \n", acc));*/ + /* fold the upper bit down */ + acc = FOLD_U32T(acc); + if (q->len % 2 != 0) { + swapped = !swapped; + acc = SWAP_BYTES_IN_WORD(acc); + } + /*LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): wrapped lwip_chksum()=%"X32_F" \n", acc));*/ + } + + if (swapped) { + acc = SWAP_BYTES_IN_WORD(acc); + } + + acc += (u32_t)lwip_htons((u16_t)proto); + acc += (u32_t)lwip_htons(proto_len); + + /* Fold 32-bit sum to 16 bits + calling this twice is probably faster than if statements... */ + acc = FOLD_U32T(acc); + acc = FOLD_U32T(acc); + LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): pbuf chain lwip_chksum()=%"X32_F"\n", acc)); + return (u16_t)~(acc & 0xffffUL); +} + +#if LWIP_IPV4 +/* inet_chksum_pseudo_partial: + * + * Calculates the IPv4 pseudo Internet checksum used by TCP and UDP for a pbuf chain. + * IP addresses are expected to be in network byte order. + * + * @param p chain of pbufs over that a checksum should be calculated (ip data part) + * @param src source ip address (used for checksum of pseudo header) + * @param dst destination ip address (used for checksum of pseudo header) + * @param proto ip protocol (used for checksum of pseudo header) + * @param proto_len length of the ip data part (used for checksum of pseudo header) + * @return checksum (as u16_t) to be saved directly in the protocol header + */ +u16_t +inet_chksum_pseudo_partial(struct pbuf *p, u8_t proto, u16_t proto_len, + u16_t chksum_len, const ip4_addr_t *src, const ip4_addr_t *dest) +{ + u32_t acc; + u32_t addr; + + addr = ip4_addr_get_u32(src); + acc = (addr & 0xffffUL); + acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL)); + addr = ip4_addr_get_u32(dest); + acc = (u32_t)(acc + (addr & 0xffffUL)); + acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL)); + /* fold down to 16 bits */ + acc = FOLD_U32T(acc); + acc = FOLD_U32T(acc); + + return inet_cksum_pseudo_partial_base(p, proto, proto_len, chksum_len, acc); +} +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 +/** + * Calculates the checksum with IPv6 pseudo header used by TCP and UDP for a pbuf chain. + * IPv6 addresses are expected to be in network byte order. Will only compute for a + * portion of the payload. + * + * @param p chain of pbufs over that a checksum should be calculated (ip data part) + * @param proto ipv6 protocol/next header (used for checksum of pseudo header) + * @param proto_len length of the ipv6 payload (used for checksum of pseudo header) + * @param chksum_len number of payload bytes used to compute chksum + * @param src source ipv6 address (used for checksum of pseudo header) + * @param dest destination ipv6 address (used for checksum of pseudo header) + * @return checksum (as u16_t) to be saved directly in the protocol header + */ +u16_t +ip6_chksum_pseudo_partial(struct pbuf *p, u8_t proto, u16_t proto_len, + u16_t chksum_len, const ip6_addr_t *src, const ip6_addr_t *dest) +{ + u32_t acc = 0; + u32_t addr; + u8_t addr_part; + + for (addr_part = 0; addr_part < 4; addr_part++) { + addr = src->addr[addr_part]; + acc = (u32_t)(acc + (addr & 0xffffUL)); + acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL)); + addr = dest->addr[addr_part]; + acc = (u32_t)(acc + (addr & 0xffffUL)); + acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL)); + } + /* fold down to 16 bits */ + acc = FOLD_U32T(acc); + acc = FOLD_U32T(acc); + + return inet_cksum_pseudo_partial_base(p, proto, proto_len, chksum_len, acc); +} +#endif /* LWIP_IPV6 */ + +/* ip_chksum_pseudo_partial: + * + * Calculates the IPv4 or IPv6 pseudo Internet checksum used by TCP and UDP for a pbuf chain. + * + * @param p chain of pbufs over that a checksum should be calculated (ip data part) + * @param src source ip address (used for checksum of pseudo header) + * @param dst destination ip address (used for checksum of pseudo header) + * @param proto ip protocol (used for checksum of pseudo header) + * @param proto_len length of the ip data part (used for checksum of pseudo header) + * @return checksum (as u16_t) to be saved directly in the protocol header + */ +u16_t +ip_chksum_pseudo_partial(struct pbuf *p, u8_t proto, u16_t proto_len, + u16_t chksum_len, const ip_addr_t *src, const ip_addr_t *dest) +{ +#if LWIP_IPV6 + if (IP_IS_V6(dest)) { + return ip6_chksum_pseudo_partial(p, proto, proto_len, chksum_len, ip_2_ip6(src), ip_2_ip6(dest)); + } +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 && LWIP_IPV6 + else +#endif /* LWIP_IPV4 && LWIP_IPV6 */ +#if LWIP_IPV4 + { + return inet_chksum_pseudo_partial(p, proto, proto_len, chksum_len, ip_2_ip4(src), ip_2_ip4(dest)); + } +#endif /* LWIP_IPV4 */ +} + +/* inet_chksum: + * + * Calculates the Internet checksum over a portion of memory. Used primarily for IP + * and ICMP. + * + * @param dataptr start of the buffer to calculate the checksum (no alignment needed) + * @param len length of the buffer to calculate the checksum + * @return checksum (as u16_t) to be saved directly in the protocol header + */ + +u16_t +inet_chksum(const void *dataptr, u16_t len) +{ + return (u16_t)~(unsigned int)LWIP_CHKSUM(dataptr, len); +} + +/** + * Calculate a checksum over a chain of pbufs (without pseudo-header, much like + * inet_chksum only pbufs are used). + * + * @param p pbuf chain over that the checksum should be calculated + * @return checksum (as u16_t) to be saved directly in the protocol header + */ +u16_t +inet_chksum_pbuf(struct pbuf *p) +{ + u32_t acc; + struct pbuf *q; + int swapped = 0; + + acc = 0; + for (q = p; q != NULL; q = q->next) { + acc += LWIP_CHKSUM(q->payload, q->len); + acc = FOLD_U32T(acc); + if (q->len % 2 != 0) { + swapped = !swapped; + acc = SWAP_BYTES_IN_WORD(acc); + } + } + + if (swapped) { + acc = SWAP_BYTES_IN_WORD(acc); + } + return (u16_t)~(acc & 0xffffUL); +} + +/* These are some implementations for LWIP_CHKSUM_COPY, which copies data + * like MEMCPY but generates a checksum at the same time. Since this is a + * performance-sensitive function, you might want to create your own version + * in assembly targeted at your hardware by defining it in lwipopts.h: + * #define LWIP_CHKSUM_COPY(dst, src, len) your_chksum_copy(dst, src, len) + */ + +#if (LWIP_CHKSUM_COPY_ALGORITHM == 1) /* Version #1 */ +/** Safe but slow: first call MEMCPY, then call LWIP_CHKSUM. + * For architectures with big caches, data might still be in cache when + * generating the checksum after copying. + */ +u16_t +lwip_chksum_copy(void *dst, const void *src, u16_t len) +{ + MEMCPY(dst, src, len); + return LWIP_CHKSUM(dst, len); +} +#endif /* (LWIP_CHKSUM_COPY_ALGORITHM == 1) */ diff --git a/Middlewares/Third_Party/LwIP/src/core/init.c b/Middlewares/Third_Party/LwIP/src/core/init.c new file mode 100644 index 0000000..b3737a3 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/init.c @@ -0,0 +1,380 @@ +/** + * @file + * Modules initialization + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + */ + +#include "lwip/opt.h" + +#include "lwip/init.h" +#include "lwip/stats.h" +#include "lwip/sys.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/pbuf.h" +#include "lwip/netif.h" +#include "lwip/sockets.h" +#include "lwip/ip.h" +#include "lwip/raw.h" +#include "lwip/udp.h" +#include "lwip/priv/tcp_priv.h" +#include "lwip/igmp.h" +#include "lwip/dns.h" +#include "lwip/timeouts.h" +#include "lwip/etharp.h" +#include "lwip/ip6.h" +#include "lwip/nd6.h" +#include "lwip/mld6.h" +#include "lwip/api.h" + +#include "netif/ppp/ppp_opts.h" +#include "netif/ppp/ppp_impl.h" + +#ifndef LWIP_SKIP_PACKING_CHECK + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct packed_struct_test { + PACK_STRUCT_FLD_8(u8_t dummy1); + PACK_STRUCT_FIELD(u32_t dummy2); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +#define PACKED_STRUCT_TEST_EXPECTED_SIZE 5 + +#endif + +/* Compile-time sanity checks for configuration errors. + * These can be done independently of LWIP_DEBUG, without penalty. + */ +#ifndef BYTE_ORDER +#error "BYTE_ORDER is not defined, you have to define it in your cc.h" +#endif +#if (!IP_SOF_BROADCAST && IP_SOF_BROADCAST_RECV) +#error "If you want to use broadcast filter per pcb on recv operations, you have to define IP_SOF_BROADCAST=1 in your lwipopts.h" +#endif +#if (!LWIP_UDP && LWIP_UDPLITE) +#error "If you want to use UDP Lite, you have to define LWIP_UDP=1 in your lwipopts.h" +#endif +#if (!LWIP_UDP && LWIP_DHCP) +#error "If you want to use DHCP, you have to define LWIP_UDP=1 in your lwipopts.h" +#endif +#if (!LWIP_UDP && !LWIP_RAW && LWIP_MULTICAST_TX_OPTIONS) +#error "If you want to use LWIP_MULTICAST_TX_OPTIONS, you have to define LWIP_UDP=1 and/or LWIP_RAW=1 in your lwipopts.h" +#endif +#if (!LWIP_UDP && LWIP_DNS) +#error "If you want to use DNS, you have to define LWIP_UDP=1 in your lwipopts.h" +#endif +#if !MEMP_MEM_MALLOC /* MEMP_NUM_* checks are disabled when not using the pool allocator */ +#if (LWIP_ARP && ARP_QUEUEING && (MEMP_NUM_ARP_QUEUE<=0)) +#error "If you want to use ARP Queueing, you have to define MEMP_NUM_ARP_QUEUE>=1 in your lwipopts.h" +#endif +#if (LWIP_RAW && (MEMP_NUM_RAW_PCB<=0)) +#error "If you want to use RAW, you have to define MEMP_NUM_RAW_PCB>=1 in your lwipopts.h" +#endif +#if (LWIP_UDP && (MEMP_NUM_UDP_PCB<=0)) +#error "If you want to use UDP, you have to define MEMP_NUM_UDP_PCB>=1 in your lwipopts.h" +#endif +#if (LWIP_TCP && (MEMP_NUM_TCP_PCB<=0)) +#error "If you want to use TCP, you have to define MEMP_NUM_TCP_PCB>=1 in your lwipopts.h" +#endif +#if (LWIP_IGMP && (MEMP_NUM_IGMP_GROUP<=1)) +#error "If you want to use IGMP, you have to define MEMP_NUM_IGMP_GROUP>1 in your lwipopts.h" +#endif +#if (LWIP_IGMP && !LWIP_MULTICAST_TX_OPTIONS) +#error "If you want to use IGMP, you have to define LWIP_MULTICAST_TX_OPTIONS==1 in your lwipopts.h" +#endif +#if (LWIP_IGMP && !LWIP_IPV4) +#error "IGMP needs LWIP_IPV4 enabled in your lwipopts.h" +#endif +#if ((LWIP_NETCONN || LWIP_SOCKET) && (MEMP_NUM_TCPIP_MSG_API<=0)) +#error "If you want to use Sequential API, you have to define MEMP_NUM_TCPIP_MSG_API>=1 in your lwipopts.h" +#endif +/* There must be sufficient timeouts, taking into account requirements of the subsystems. */ +#if LWIP_TIMERS && (MEMP_NUM_SYS_TIMEOUT < LWIP_NUM_SYS_TIMEOUT_INTERNAL) +#error "MEMP_NUM_SYS_TIMEOUT is too low to accomodate all required timeouts" +#endif +#if (IP_REASSEMBLY && (MEMP_NUM_REASSDATA > IP_REASS_MAX_PBUFS)) +#error "MEMP_NUM_REASSDATA > IP_REASS_MAX_PBUFS doesn't make sense since each struct ip_reassdata must hold 2 pbufs at least!" +#endif +#endif /* !MEMP_MEM_MALLOC */ +#if LWIP_WND_SCALE +#if (LWIP_TCP && (TCP_WND > 0xffffffff)) +#error "If you want to use TCP, TCP_WND must fit in an u32_t, so, you have to reduce it in your lwipopts.h" +#endif +#if (LWIP_TCP && (TCP_RCV_SCALE > 14)) +#error "The maximum valid window scale value is 14!" +#endif +#if (LWIP_TCP && (TCP_WND > (0xFFFFU << TCP_RCV_SCALE))) +#error "TCP_WND is bigger than the configured LWIP_WND_SCALE allows!" +#endif +#if (LWIP_TCP && ((TCP_WND >> TCP_RCV_SCALE) == 0)) +#error "TCP_WND is too small for the configured LWIP_WND_SCALE (results in zero window)!" +#endif +#else /* LWIP_WND_SCALE */ +#if (LWIP_TCP && (TCP_WND > 0xffff)) +#error "If you want to use TCP, TCP_WND must fit in an u16_t, so, you have to reduce it in your lwipopts.h (or enable window scaling)" +#endif +#endif /* LWIP_WND_SCALE */ +#if (LWIP_TCP && (TCP_SND_QUEUELEN > 0xffff)) +#error "If you want to use TCP, TCP_SND_QUEUELEN must fit in an u16_t, so, you have to reduce it in your lwipopts.h" +#endif +#if (LWIP_TCP && (TCP_SND_QUEUELEN < 2)) +#error "TCP_SND_QUEUELEN must be at least 2 for no-copy TCP writes to work" +#endif +#if (LWIP_TCP && ((TCP_MAXRTX > 12) || (TCP_SYNMAXRTX > 12))) +#error "If you want to use TCP, TCP_MAXRTX and TCP_SYNMAXRTX must less or equal to 12 (due to tcp_backoff table), so, you have to reduce them in your lwipopts.h" +#endif +#if (LWIP_TCP && TCP_LISTEN_BACKLOG && ((TCP_DEFAULT_LISTEN_BACKLOG < 0) || (TCP_DEFAULT_LISTEN_BACKLOG > 0xff))) +#error "If you want to use TCP backlog, TCP_DEFAULT_LISTEN_BACKLOG must fit into an u8_t" +#endif +#if (LWIP_TCP && LWIP_TCP_SACK_OUT && !TCP_QUEUE_OOSEQ) +#error "To use LWIP_TCP_SACK_OUT, TCP_QUEUE_OOSEQ needs to be enabled" +#endif +#if (LWIP_TCP && LWIP_TCP_SACK_OUT && (LWIP_TCP_MAX_SACK_NUM < 1)) +#error "LWIP_TCP_MAX_SACK_NUM must be greater than 0" +#endif +#if (LWIP_NETIF_API && (NO_SYS==1)) +#error "If you want to use NETIF API, you have to define NO_SYS=0 in your lwipopts.h" +#endif +#if ((LWIP_SOCKET || LWIP_NETCONN) && (NO_SYS==1)) +#error "If you want to use Sequential API, you have to define NO_SYS=0 in your lwipopts.h" +#endif +#if (LWIP_PPP_API && (NO_SYS==1)) +#error "If you want to use PPP API, you have to define NO_SYS=0 in your lwipopts.h" +#endif +#if (LWIP_PPP_API && (PPP_SUPPORT==0)) +#error "If you want to use PPP API, you have to enable PPP_SUPPORT in your lwipopts.h" +#endif +#if (((!LWIP_DHCP) || (!LWIP_AUTOIP)) && LWIP_DHCP_AUTOIP_COOP) +#error "If you want to use DHCP/AUTOIP cooperation mode, you have to define LWIP_DHCP=1 and LWIP_AUTOIP=1 in your lwipopts.h" +#endif +#if (((!LWIP_DHCP) || (!LWIP_ARP)) && DHCP_DOES_ARP_CHECK) +#error "If you want to use DHCP ARP checking, you have to define LWIP_DHCP=1 and LWIP_ARP=1 in your lwipopts.h" +#endif +#if (!LWIP_ARP && LWIP_AUTOIP) +#error "If you want to use AUTOIP, you have to define LWIP_ARP=1 in your lwipopts.h" +#endif +#if (LWIP_TCP && ((LWIP_EVENT_API && LWIP_CALLBACK_API) || (!LWIP_EVENT_API && !LWIP_CALLBACK_API))) +#error "One and exactly one of LWIP_EVENT_API and LWIP_CALLBACK_API has to be enabled in your lwipopts.h" +#endif +#if (LWIP_ALTCP && LWIP_EVENT_API) +#error "The application layered tcp API does not work with LWIP_EVENT_API" +#endif +#if (MEM_LIBC_MALLOC && MEM_USE_POOLS) +#error "MEM_LIBC_MALLOC and MEM_USE_POOLS may not both be simultaneously enabled in your lwipopts.h" +#endif +#if (MEM_USE_POOLS && !MEMP_USE_CUSTOM_POOLS) +#error "MEM_USE_POOLS requires custom pools (MEMP_USE_CUSTOM_POOLS) to be enabled in your lwipopts.h" +#endif +#if (PBUF_POOL_BUFSIZE <= MEM_ALIGNMENT) +#error "PBUF_POOL_BUFSIZE must be greater than MEM_ALIGNMENT or the offset may take the full first pbuf" +#endif +#if (DNS_LOCAL_HOSTLIST && !DNS_LOCAL_HOSTLIST_IS_DYNAMIC && !(defined(DNS_LOCAL_HOSTLIST_INIT))) +#error "you have to define define DNS_LOCAL_HOSTLIST_INIT {{'host1', 0x123}, {'host2', 0x234}} to initialize DNS_LOCAL_HOSTLIST" +#endif +#if PPP_SUPPORT && !PPPOS_SUPPORT && !PPPOE_SUPPORT && !PPPOL2TP_SUPPORT +#error "PPP_SUPPORT needs at least one of PPPOS_SUPPORT, PPPOE_SUPPORT or PPPOL2TP_SUPPORT turned on" +#endif +#if PPP_SUPPORT && !PPP_IPV4_SUPPORT && !PPP_IPV6_SUPPORT +#error "PPP_SUPPORT needs PPP_IPV4_SUPPORT and/or PPP_IPV6_SUPPORT turned on" +#endif +#if PPP_SUPPORT && PPP_IPV4_SUPPORT && !LWIP_IPV4 +#error "PPP_IPV4_SUPPORT needs LWIP_IPV4 turned on" +#endif +#if PPP_SUPPORT && PPP_IPV6_SUPPORT && !LWIP_IPV6 +#error "PPP_IPV6_SUPPORT needs LWIP_IPV6 turned on" +#endif +#if !LWIP_ETHERNET && (LWIP_ARP || PPPOE_SUPPORT) +#error "LWIP_ETHERNET needs to be turned on for LWIP_ARP or PPPOE_SUPPORT" +#endif +#if LWIP_TCPIP_CORE_LOCKING_INPUT && !LWIP_TCPIP_CORE_LOCKING +#error "When using LWIP_TCPIP_CORE_LOCKING_INPUT, LWIP_TCPIP_CORE_LOCKING must be enabled, too" +#endif +#if LWIP_TCP && LWIP_NETIF_TX_SINGLE_PBUF && !TCP_OVERSIZE +#error "LWIP_NETIF_TX_SINGLE_PBUF needs TCP_OVERSIZE enabled to create single-pbuf TCP packets" +#endif +#if LWIP_NETCONN && LWIP_TCP +#if NETCONN_COPY != TCP_WRITE_FLAG_COPY +#error "NETCONN_COPY != TCP_WRITE_FLAG_COPY" +#endif +#if NETCONN_MORE != TCP_WRITE_FLAG_MORE +#error "NETCONN_MORE != TCP_WRITE_FLAG_MORE" +#endif +#endif /* LWIP_NETCONN && LWIP_TCP */ +#if LWIP_SOCKET +#endif /* LWIP_SOCKET */ + + +/* Compile-time checks for deprecated options. + */ +#ifdef MEMP_NUM_TCPIP_MSG +#error "MEMP_NUM_TCPIP_MSG option is deprecated. Remove it from your lwipopts.h." +#endif +#ifdef TCP_REXMIT_DEBUG +#error "TCP_REXMIT_DEBUG option is deprecated. Remove it from your lwipopts.h." +#endif +#ifdef RAW_STATS +#error "RAW_STATS option is deprecated. Remove it from your lwipopts.h." +#endif +#ifdef ETHARP_QUEUE_FIRST +#error "ETHARP_QUEUE_FIRST option is deprecated. Remove it from your lwipopts.h." +#endif +#ifdef ETHARP_ALWAYS_INSERT +#error "ETHARP_ALWAYS_INSERT option is deprecated. Remove it from your lwipopts.h." +#endif +#if !NO_SYS && LWIP_TCPIP_CORE_LOCKING && LWIP_COMPAT_MUTEX && !defined(LWIP_COMPAT_MUTEX_ALLOWED) +#error "LWIP_COMPAT_MUTEX cannot prevent priority inversion. It is recommended to implement priority-aware mutexes. (Define LWIP_COMPAT_MUTEX_ALLOWED to disable this error.)" +#endif + +#ifndef LWIP_DISABLE_TCP_SANITY_CHECKS +#define LWIP_DISABLE_TCP_SANITY_CHECKS 0 +#endif +#ifndef LWIP_DISABLE_MEMP_SANITY_CHECKS +#define LWIP_DISABLE_MEMP_SANITY_CHECKS 0 +#endif + +/* MEMP sanity checks */ +#if MEMP_MEM_MALLOC +#if !LWIP_DISABLE_MEMP_SANITY_CHECKS +#if LWIP_NETCONN || LWIP_SOCKET +#if !MEMP_NUM_NETCONN && LWIP_SOCKET +#error "lwip_sanity_check: WARNING: MEMP_NUM_NETCONN cannot be 0 when using sockets!" +#endif +#else /* MEMP_MEM_MALLOC */ +#if MEMP_NUM_NETCONN > (MEMP_NUM_TCP_PCB+MEMP_NUM_TCP_PCB_LISTEN+MEMP_NUM_UDP_PCB+MEMP_NUM_RAW_PCB) +#error "lwip_sanity_check: WARNING: MEMP_NUM_NETCONN should be less than the sum of MEMP_NUM_{TCP,RAW,UDP}_PCB+MEMP_NUM_TCP_PCB_LISTEN. If you know what you are doing, define LWIP_DISABLE_MEMP_SANITY_CHECKS to 1 to disable this error." +#endif +#endif /* LWIP_NETCONN || LWIP_SOCKET */ +#endif /* !LWIP_DISABLE_MEMP_SANITY_CHECKS */ +#if MEM_USE_POOLS +#error "MEMP_MEM_MALLOC and MEM_USE_POOLS cannot be enabled at the same time" +#endif +#ifdef LWIP_HOOK_MEMP_AVAILABLE +#error "LWIP_HOOK_MEMP_AVAILABLE doesn't make sense with MEMP_MEM_MALLOC" +#endif +#endif /* MEMP_MEM_MALLOC */ + +/* TCP sanity checks */ +#if !LWIP_DISABLE_TCP_SANITY_CHECKS +#if LWIP_TCP +#if !MEMP_MEM_MALLOC && (MEMP_NUM_TCP_SEG < TCP_SND_QUEUELEN) +#error "lwip_sanity_check: WARNING: MEMP_NUM_TCP_SEG should be at least as big as TCP_SND_QUEUELEN. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#if TCP_SND_BUF < (2 * TCP_MSS) +#error "lwip_sanity_check: WARNING: TCP_SND_BUF must be at least as much as (2 * TCP_MSS) for things to work smoothly. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#if TCP_SND_QUEUELEN < (2 * (TCP_SND_BUF / TCP_MSS)) +#error "lwip_sanity_check: WARNING: TCP_SND_QUEUELEN must be at least as much as (2 * TCP_SND_BUF/TCP_MSS) for things to work. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#if TCP_SNDLOWAT >= TCP_SND_BUF +#error "lwip_sanity_check: WARNING: TCP_SNDLOWAT must be less than TCP_SND_BUF. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#if TCP_SNDLOWAT >= (0xFFFF - (4 * TCP_MSS)) +#error "lwip_sanity_check: WARNING: TCP_SNDLOWAT must at least be 4*MSS below u16_t overflow!" +#endif +#if TCP_SNDQUEUELOWAT >= TCP_SND_QUEUELEN +#error "lwip_sanity_check: WARNING: TCP_SNDQUEUELOWAT must be less than TCP_SND_QUEUELEN. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#if !MEMP_MEM_MALLOC && PBUF_POOL_SIZE && (PBUF_POOL_BUFSIZE <= (PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN)) +#error "lwip_sanity_check: WARNING: PBUF_POOL_BUFSIZE does not provide enough space for protocol headers. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#if !MEMP_MEM_MALLOC && PBUF_POOL_SIZE && (TCP_WND > (PBUF_POOL_SIZE * (PBUF_POOL_BUFSIZE - (PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN)))) +#error "lwip_sanity_check: WARNING: TCP_WND is larger than space provided by PBUF_POOL_SIZE * (PBUF_POOL_BUFSIZE - protocol headers). If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#if TCP_WND < TCP_MSS +#error "lwip_sanity_check: WARNING: TCP_WND is smaller than MSS. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#endif /* LWIP_TCP */ +#endif /* !LWIP_DISABLE_TCP_SANITY_CHECKS */ + +/** + * @ingroup lwip_nosys + * Initialize all modules. + * Use this in NO_SYS mode. Use tcpip_init() otherwise. + */ +void +lwip_init(void) +{ +#ifndef LWIP_SKIP_CONST_CHECK + int a = 0; + LWIP_UNUSED_ARG(a); + LWIP_ASSERT("LWIP_CONST_CAST not implemented correctly. Check your lwIP port.", LWIP_CONST_CAST(void *, &a) == &a); +#endif +#ifndef LWIP_SKIP_PACKING_CHECK + LWIP_ASSERT("Struct packing not implemented correctly. Check your lwIP port.", sizeof(struct packed_struct_test) == PACKED_STRUCT_TEST_EXPECTED_SIZE); +#endif + + /* Modules initialization */ + stats_init(); +#if !NO_SYS + sys_init(); +#endif /* !NO_SYS */ + mem_init(); + memp_init(); + pbuf_init(); + netif_init(); +#if LWIP_IPV4 + ip_init(); +#if LWIP_ARP + etharp_init(); +#endif /* LWIP_ARP */ +#endif /* LWIP_IPV4 */ +#if LWIP_RAW + raw_init(); +#endif /* LWIP_RAW */ +#if LWIP_UDP + udp_init(); +#endif /* LWIP_UDP */ +#if LWIP_TCP + tcp_init(); +#endif /* LWIP_TCP */ +#if LWIP_IGMP + igmp_init(); +#endif /* LWIP_IGMP */ +#if LWIP_DNS + dns_init(); +#endif /* LWIP_DNS */ +#if PPP_SUPPORT + ppp_init(); +#endif + +#if LWIP_TIMERS + sys_timeouts_init(); +#endif /* LWIP_TIMERS */ +} diff --git a/Middlewares/Third_Party/LwIP/src/core/ip.c b/Middlewares/Third_Party/LwIP/src/core/ip.c new file mode 100644 index 0000000..18514cf --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/ip.c @@ -0,0 +1,167 @@ +/** + * @file + * Common IPv4 and IPv6 code + * + * @defgroup ip IP + * @ingroup callbackstyle_api + * + * @defgroup ip4 IPv4 + * @ingroup ip + * + * @defgroup ip6 IPv6 + * @ingroup ip + * + * @defgroup ipaddr IP address handling + * @ingroup infrastructure + * + * @defgroup ip4addr IPv4 only + * @ingroup ipaddr + * + * @defgroup ip6addr IPv6 only + * @ingroup ipaddr + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 || LWIP_IPV6 + +#include "lwip/ip_addr.h" +#include "lwip/ip.h" + +/** Global data for both IPv4 and IPv6 */ +struct ip_globals ip_data; + +#if LWIP_IPV4 && LWIP_IPV6 + +const ip_addr_t ip_addr_any_type = IPADDR_ANY_TYPE_INIT; + +/** + * @ingroup ipaddr + * Convert numeric IP address (both versions) into ASCII representation. + * returns ptr to static buffer; not reentrant! + * + * @param addr ip address in network order to convert + * @return pointer to a global static (!) buffer that holds the ASCII + * representation of addr + */ +char *ipaddr_ntoa(const ip_addr_t *addr) +{ + if (addr == NULL) { + return NULL; + } + if (IP_IS_V6(addr)) { + return ip6addr_ntoa(ip_2_ip6(addr)); + } else { + return ip4addr_ntoa(ip_2_ip4(addr)); + } +} + +/** + * @ingroup ipaddr + * Same as ipaddr_ntoa, but reentrant since a user-supplied buffer is used. + * + * @param addr ip address in network order to convert + * @param buf target buffer where the string is stored + * @param buflen length of buf + * @return either pointer to buf which now holds the ASCII + * representation of addr or NULL if buf was too small + */ +char *ipaddr_ntoa_r(const ip_addr_t *addr, char *buf, int buflen) +{ + if (addr == NULL) { + return NULL; + } + if (IP_IS_V6(addr)) { + return ip6addr_ntoa_r(ip_2_ip6(addr), buf, buflen); + } else { + return ip4addr_ntoa_r(ip_2_ip4(addr), buf, buflen); + } +} + +/** + * @ingroup ipaddr + * Convert IP address string (both versions) to numeric. + * The version is auto-detected from the string. + * + * @param cp IP address string to convert + * @param addr conversion result is stored here + * @return 1 on success, 0 on error + */ +int +ipaddr_aton(const char *cp, ip_addr_t *addr) +{ + if (cp != NULL) { + const char *c; + for (c = cp; *c != 0; c++) { + if (*c == ':') { + /* contains a colon: IPv6 address */ + if (addr) { + IP_SET_TYPE_VAL(*addr, IPADDR_TYPE_V6); + } + return ip6addr_aton(cp, ip_2_ip6(addr)); + } else if (*c == '.') { + /* contains a dot: IPv4 address */ + break; + } + } + /* call ip4addr_aton as fallback or if IPv4 was found */ + if (addr) { + IP_SET_TYPE_VAL(*addr, IPADDR_TYPE_V4); + } + return ip4addr_aton(cp, ip_2_ip4(addr)); + } + return 0; +} + +/** + * @ingroup lwip_nosys + * If both IP versions are enabled, this function can dispatch packets to the correct one. + * Don't call directly, pass to netif_add() and call netif->input(). + */ +err_t +ip_input(struct pbuf *p, struct netif *inp) +{ + if (p != NULL) { + if (IP_HDR_GET_VERSION(p->payload) == 6) { + return ip6_input(p, inp); + } + return ip4_input(p, inp); + } + return ERR_VAL; +} + +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + +#endif /* LWIP_IPV4 || LWIP_IPV6 */ diff --git a/Middlewares/Third_Party/LwIP/src/core/ipv4/autoip.c b/Middlewares/Third_Party/LwIP/src/core/ipv4/autoip.c new file mode 100644 index 0000000..9f7139b --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/ipv4/autoip.c @@ -0,0 +1,527 @@ +/** + * @file + * AutoIP Automatic LinkLocal IP Configuration + * + * This is a AutoIP implementation for the lwIP TCP/IP stack. It aims to conform + * with RFC 3927. + * + * @defgroup autoip AUTOIP + * @ingroup ip4 + * AUTOIP related functions + * USAGE: + * + * define @ref LWIP_AUTOIP 1 in your lwipopts.h + * Options: + * AUTOIP_TMR_INTERVAL msecs, + * I recommend a value of 100. The value must divide 1000 with a remainder almost 0. + * Possible values are 1000, 500, 333, 250, 200, 166, 142, 125, 111, 100 .... + * + * Without DHCP: + * - Call autoip_start() after netif_add(). + * + * With DHCP: + * - define @ref LWIP_DHCP_AUTOIP_COOP 1 in your lwipopts.h. + * - Configure your DHCP Client. + * + * @see netifapi_autoip + */ + +/* + * + * Copyright (c) 2007 Dominik Spies + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * Author: Dominik Spies + */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 && LWIP_AUTOIP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/mem.h" +/* #include "lwip/udp.h" */ +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/autoip.h" +#include "lwip/etharp.h" +#include "lwip/prot/autoip.h" + +#include + +/** Pseudo random macro based on netif informations. + * You could use "rand()" from the C Library if you define LWIP_AUTOIP_RAND in lwipopts.h */ +#ifndef LWIP_AUTOIP_RAND +#define LWIP_AUTOIP_RAND(netif) ( (((u32_t)((netif->hwaddr[5]) & 0xff) << 24) | \ + ((u32_t)((netif->hwaddr[3]) & 0xff) << 16) | \ + ((u32_t)((netif->hwaddr[2]) & 0xff) << 8) | \ + ((u32_t)((netif->hwaddr[4]) & 0xff))) + \ + (netif_autoip_data(netif)? netif_autoip_data(netif)->tried_llipaddr : 0)) +#endif /* LWIP_AUTOIP_RAND */ + +/** + * Macro that generates the initial IP address to be tried by AUTOIP. + * If you want to override this, define it to something else in lwipopts.h. + */ +#ifndef LWIP_AUTOIP_CREATE_SEED_ADDR +#define LWIP_AUTOIP_CREATE_SEED_ADDR(netif) \ + lwip_htonl(AUTOIP_RANGE_START + ((u32_t)(((u8_t)(netif->hwaddr[4])) | \ + ((u32_t)((u8_t)(netif->hwaddr[5]))) << 8))) +#endif /* LWIP_AUTOIP_CREATE_SEED_ADDR */ + +/* static functions */ +static err_t autoip_arp_announce(struct netif *netif); +static void autoip_start_probing(struct netif *netif); + +/** + * @ingroup autoip + * Set a statically allocated struct autoip to work with. + * Using this prevents autoip_start to allocate it using mem_malloc. + * + * @param netif the netif for which to set the struct autoip + * @param autoip (uninitialised) autoip struct allocated by the application + */ +void +autoip_set_struct(struct netif *netif, struct autoip *autoip) +{ + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("netif != NULL", netif != NULL); + LWIP_ASSERT("autoip != NULL", autoip != NULL); + LWIP_ASSERT("netif already has a struct autoip set", + netif_autoip_data(netif) == NULL); + + /* clear data structure */ + memset(autoip, 0, sizeof(struct autoip)); + /* autoip->state = AUTOIP_STATE_OFF; */ + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_AUTOIP, autoip); +} + +/** Restart AutoIP client and check the next address (conflict detected) + * + * @param netif The netif under AutoIP control + */ +static void +autoip_restart(struct netif *netif) +{ + struct autoip *autoip = netif_autoip_data(netif); + autoip->tried_llipaddr++; + autoip_start(netif); +} + +/** + * Handle a IP address conflict after an ARP conflict detection + */ +static void +autoip_handle_arp_conflict(struct netif *netif) +{ + struct autoip *autoip = netif_autoip_data(netif); + + /* RFC3927, 2.5 "Conflict Detection and Defense" allows two options where + a) means retreat on the first conflict and + b) allows to keep an already configured address when having only one + conflict in 10 seconds + We use option b) since it helps to improve the chance that one of the two + conflicting hosts may be able to retain its address. */ + + if (autoip->lastconflict > 0) { + /* retreat, there was a conflicting ARP in the last DEFEND_INTERVAL seconds */ + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("autoip_handle_arp_conflict(): we are defending, but in DEFEND_INTERVAL, retreating\n")); + + /* Active TCP sessions are aborted when removing the ip addresss */ + autoip_restart(netif); + } else { + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("autoip_handle_arp_conflict(): we are defend, send ARP Announce\n")); + autoip_arp_announce(netif); + autoip->lastconflict = DEFEND_INTERVAL * AUTOIP_TICKS_PER_SECOND; + } +} + +/** + * Create an IP-Address out of range 169.254.1.0 to 169.254.254.255 + * + * @param netif network interface on which create the IP-Address + * @param ipaddr ip address to initialize + */ +static void +autoip_create_addr(struct netif *netif, ip4_addr_t *ipaddr) +{ + struct autoip *autoip = netif_autoip_data(netif); + + /* Here we create an IP-Address out of range 169.254.1.0 to 169.254.254.255 + * compliant to RFC 3927 Section 2.1 + * We have 254 * 256 possibilities */ + + u32_t addr = lwip_ntohl(LWIP_AUTOIP_CREATE_SEED_ADDR(netif)); + addr += autoip->tried_llipaddr; + addr = AUTOIP_NET | (addr & 0xffff); + /* Now, 169.254.0.0 <= addr <= 169.254.255.255 */ + + if (addr < AUTOIP_RANGE_START) { + addr += AUTOIP_RANGE_END - AUTOIP_RANGE_START + 1; + } + if (addr > AUTOIP_RANGE_END) { + addr -= AUTOIP_RANGE_END - AUTOIP_RANGE_START + 1; + } + LWIP_ASSERT("AUTOIP address not in range", (addr >= AUTOIP_RANGE_START) && + (addr <= AUTOIP_RANGE_END)); + ip4_addr_set_u32(ipaddr, lwip_htonl(addr)); + + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("autoip_create_addr(): tried_llipaddr=%"U16_F", %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + (u16_t)(autoip->tried_llipaddr), ip4_addr1_16(ipaddr), ip4_addr2_16(ipaddr), + ip4_addr3_16(ipaddr), ip4_addr4_16(ipaddr))); +} + +/** + * Sends an ARP probe from a network interface + * + * @param netif network interface used to send the probe + */ +static err_t +autoip_arp_probe(struct netif *netif) +{ + struct autoip *autoip = netif_autoip_data(netif); + /* this works because netif->ip_addr is ANY */ + return etharp_request(netif, &autoip->llipaddr); +} + +/** + * Sends an ARP announce from a network interface + * + * @param netif network interface used to send the announce + */ +static err_t +autoip_arp_announce(struct netif *netif) +{ + return etharp_gratuitous(netif); +} + +/** + * Configure interface for use with current LL IP-Address + * + * @param netif network interface to configure with current LL IP-Address + */ +static err_t +autoip_bind(struct netif *netif) +{ + struct autoip *autoip = netif_autoip_data(netif); + ip4_addr_t sn_mask, gw_addr; + + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, + ("autoip_bind(netif=%p) %c%c%"U16_F" %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num, + ip4_addr1_16(&autoip->llipaddr), ip4_addr2_16(&autoip->llipaddr), + ip4_addr3_16(&autoip->llipaddr), ip4_addr4_16(&autoip->llipaddr))); + + IP4_ADDR(&sn_mask, 255, 255, 0, 0); + IP4_ADDR(&gw_addr, 0, 0, 0, 0); + + netif_set_addr(netif, &autoip->llipaddr, &sn_mask, &gw_addr); + /* interface is used by routing now that an address is set */ + + return ERR_OK; +} + +/** + * @ingroup autoip + * Start AutoIP client + * + * @param netif network interface on which start the AutoIP client + */ +err_t +autoip_start(struct netif *netif) +{ + struct autoip *autoip = netif_autoip_data(netif); + err_t result = ERR_OK; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ERROR("netif is not up, old style port?", netif_is_up(netif), return ERR_ARG;); + + /* Set IP-Address, Netmask and Gateway to 0 to make sure that + * ARP Packets are formed correctly + */ + netif_set_addr(netif, IP4_ADDR_ANY4, IP4_ADDR_ANY4, IP4_ADDR_ANY4); + + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("autoip_start(netif=%p) %c%c%"U16_F"\n", (void *)netif, netif->name[0], + netif->name[1], (u16_t)netif->num)); + if (autoip == NULL) { + /* no AutoIP client attached yet? */ + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, + ("autoip_start(): starting new AUTOIP client\n")); + autoip = (struct autoip *)mem_calloc(1, sizeof(struct autoip)); + if (autoip == NULL) { + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, + ("autoip_start(): could not allocate autoip\n")); + return ERR_MEM; + } + /* store this AutoIP client in the netif */ + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_AUTOIP, autoip); + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, ("autoip_start(): allocated autoip")); + } else { + autoip->state = AUTOIP_STATE_OFF; + autoip->ttw = 0; + autoip->sent_num = 0; + ip4_addr_set_zero(&autoip->llipaddr); + autoip->lastconflict = 0; + } + + autoip_create_addr(netif, &(autoip->llipaddr)); + autoip_start_probing(netif); + + return result; +} + +static void +autoip_start_probing(struct netif *netif) +{ + struct autoip *autoip = netif_autoip_data(netif); + + autoip->state = AUTOIP_STATE_PROBING; + autoip->sent_num = 0; + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("autoip_start_probing(): changing state to PROBING: %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(&autoip->llipaddr), ip4_addr2_16(&autoip->llipaddr), + ip4_addr3_16(&autoip->llipaddr), ip4_addr4_16(&autoip->llipaddr))); + + /* time to wait to first probe, this is randomly + * chosen out of 0 to PROBE_WAIT seconds. + * compliant to RFC 3927 Section 2.2.1 + */ + autoip->ttw = (u16_t)(LWIP_AUTOIP_RAND(netif) % (PROBE_WAIT * AUTOIP_TICKS_PER_SECOND)); + + /* + * if we tried more then MAX_CONFLICTS we must limit our rate for + * acquiring and probing address + * compliant to RFC 3927 Section 2.2.1 + */ + if (autoip->tried_llipaddr > MAX_CONFLICTS) { + autoip->ttw = RATE_LIMIT_INTERVAL * AUTOIP_TICKS_PER_SECOND; + } +} + +/** + * Handle a possible change in the network configuration. + * + * If there is an AutoIP address configured, take the interface down + * and begin probing with the same address. + */ +void +autoip_network_changed(struct netif *netif) +{ + struct autoip *autoip = netif_autoip_data(netif); + + if (autoip && (autoip->state != AUTOIP_STATE_OFF)) { + autoip_start_probing(netif); + } +} + +/** + * @ingroup autoip + * Stop AutoIP client + * + * @param netif network interface on which stop the AutoIP client + */ +err_t +autoip_stop(struct netif *netif) +{ + struct autoip *autoip = netif_autoip_data(netif); + + LWIP_ASSERT_CORE_LOCKED(); + if (autoip != NULL) { + autoip->state = AUTOIP_STATE_OFF; + if (ip4_addr_islinklocal(netif_ip4_addr(netif))) { + netif_set_addr(netif, IP4_ADDR_ANY4, IP4_ADDR_ANY4, IP4_ADDR_ANY4); + } + } + return ERR_OK; +} + +/** + * Has to be called in loop every AUTOIP_TMR_INTERVAL milliseconds + */ +void +autoip_tmr(void) +{ + struct netif *netif; + /* loop through netif's */ + NETIF_FOREACH(netif) { + struct autoip *autoip = netif_autoip_data(netif); + /* only act on AutoIP configured interfaces */ + if (autoip != NULL) { + if (autoip->lastconflict > 0) { + autoip->lastconflict--; + } + + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, + ("autoip_tmr() AutoIP-State: %"U16_F", ttw=%"U16_F"\n", + (u16_t)(autoip->state), autoip->ttw)); + + if (autoip->ttw > 0) { + autoip->ttw--; + } + + switch (autoip->state) { + case AUTOIP_STATE_PROBING: + if (autoip->ttw == 0) { + if (autoip->sent_num >= PROBE_NUM) { + /* Switch to ANNOUNCING: now we can bind to an IP address and use it */ + autoip->state = AUTOIP_STATE_ANNOUNCING; + autoip_bind(netif); + /* autoip_bind() calls netif_set_addr(): this triggers a gratuitous ARP + which counts as an announcement */ + autoip->sent_num = 1; + autoip->ttw = ANNOUNCE_WAIT * AUTOIP_TICKS_PER_SECOND; + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("autoip_tmr(): changing state to ANNOUNCING: %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(&autoip->llipaddr), ip4_addr2_16(&autoip->llipaddr), + ip4_addr3_16(&autoip->llipaddr), ip4_addr4_16(&autoip->llipaddr))); + } else { + autoip_arp_probe(netif); + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, ("autoip_tmr() PROBING Sent Probe\n")); + autoip->sent_num++; + if (autoip->sent_num == PROBE_NUM) { + /* calculate time to wait to for announce */ + autoip->ttw = ANNOUNCE_WAIT * AUTOIP_TICKS_PER_SECOND; + } else { + /* calculate time to wait to next probe */ + autoip->ttw = (u16_t)((LWIP_AUTOIP_RAND(netif) % + ((PROBE_MAX - PROBE_MIN) * AUTOIP_TICKS_PER_SECOND) ) + + PROBE_MIN * AUTOIP_TICKS_PER_SECOND); + } + } + } + break; + + case AUTOIP_STATE_ANNOUNCING: + if (autoip->ttw == 0) { + autoip_arp_announce(netif); + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, ("autoip_tmr() ANNOUNCING Sent Announce\n")); + autoip->ttw = ANNOUNCE_INTERVAL * AUTOIP_TICKS_PER_SECOND; + autoip->sent_num++; + + if (autoip->sent_num >= ANNOUNCE_NUM) { + autoip->state = AUTOIP_STATE_BOUND; + autoip->sent_num = 0; + autoip->ttw = 0; + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("autoip_tmr(): changing state to BOUND: %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(&autoip->llipaddr), ip4_addr2_16(&autoip->llipaddr), + ip4_addr3_16(&autoip->llipaddr), ip4_addr4_16(&autoip->llipaddr))); + } + } + break; + + default: + /* nothing to do in other states */ + break; + } + } + } +} + +/** + * Handles every incoming ARP Packet, called by etharp_input(). + * + * @param netif network interface to use for autoip processing + * @param hdr Incoming ARP packet + */ +void +autoip_arp_reply(struct netif *netif, struct etharp_hdr *hdr) +{ + struct autoip *autoip = netif_autoip_data(netif); + + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, ("autoip_arp_reply()\n")); + if ((autoip != NULL) && (autoip->state != AUTOIP_STATE_OFF)) { + /* when ip.src == llipaddr && hw.src != netif->hwaddr + * + * when probing ip.dst == llipaddr && hw.src != netif->hwaddr + * we have a conflict and must solve it + */ + ip4_addr_t sipaddr, dipaddr; + struct eth_addr netifaddr; + SMEMCPY(netifaddr.addr, netif->hwaddr, ETH_HWADDR_LEN); + + /* Copy struct ip4_addr_wordaligned to aligned ip4_addr, to support compilers without + * structure packing (not using structure copy which breaks strict-aliasing rules). + */ + IPADDR_WORDALIGNED_COPY_TO_IP4_ADDR_T(&sipaddr, &hdr->sipaddr); + IPADDR_WORDALIGNED_COPY_TO_IP4_ADDR_T(&dipaddr, &hdr->dipaddr); + + if (autoip->state == AUTOIP_STATE_PROBING) { + /* RFC 3927 Section 2.2.1: + * from beginning to after ANNOUNCE_WAIT + * seconds we have a conflict if + * ip.src == llipaddr OR + * ip.dst == llipaddr && hw.src != own hwaddr + */ + if ((ip4_addr_cmp(&sipaddr, &autoip->llipaddr)) || + (ip4_addr_isany_val(sipaddr) && + ip4_addr_cmp(&dipaddr, &autoip->llipaddr) && + !eth_addr_cmp(&netifaddr, &hdr->shwaddr))) { + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE | LWIP_DBG_LEVEL_WARNING, + ("autoip_arp_reply(): Probe Conflict detected\n")); + autoip_restart(netif); + } + } else { + /* RFC 3927 Section 2.5: + * in any state we have a conflict if + * ip.src == llipaddr && hw.src != own hwaddr + */ + if (ip4_addr_cmp(&sipaddr, &autoip->llipaddr) && + !eth_addr_cmp(&netifaddr, &hdr->shwaddr)) { + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE | LWIP_DBG_LEVEL_WARNING, + ("autoip_arp_reply(): Conflicting ARP-Packet detected\n")); + autoip_handle_arp_conflict(netif); + } + } + } +} + +/** check if AutoIP supplied netif->ip_addr + * + * @param netif the netif to check + * @return 1 if AutoIP supplied netif->ip_addr (state BOUND or ANNOUNCING), + * 0 otherwise + */ +u8_t +autoip_supplied_address(const struct netif *netif) +{ + if ((netif != NULL) && (netif_autoip_data(netif) != NULL)) { + struct autoip *autoip = netif_autoip_data(netif); + return (autoip->state == AUTOIP_STATE_BOUND) || (autoip->state == AUTOIP_STATE_ANNOUNCING); + } + return 0; +} + +u8_t +autoip_accept_packet(struct netif *netif, const ip4_addr_t *addr) +{ + struct autoip *autoip = netif_autoip_data(netif); + return (autoip != NULL) && ip4_addr_cmp(addr, &(autoip->llipaddr)); +} + +#endif /* LWIP_IPV4 && LWIP_AUTOIP */ diff --git a/Middlewares/Third_Party/LwIP/src/core/ipv4/dhcp.c b/Middlewares/Third_Party/LwIP/src/core/ipv4/dhcp.c new file mode 100644 index 0000000..534574f --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/ipv4/dhcp.c @@ -0,0 +1,1990 @@ +/** + * @file + * Dynamic Host Configuration Protocol client + * + * @defgroup dhcp4 DHCPv4 + * @ingroup ip4 + * DHCP (IPv4) related functions + * This is a DHCP client for the lwIP TCP/IP stack. It aims to conform + * with RFC 2131 and RFC 2132. + * + * @todo: + * - Support for interfaces other than Ethernet (SLIP, PPP, ...) + * + * Options: + * @ref DHCP_COARSE_TIMER_SECS (recommended 60 which is a minute) + * @ref DHCP_FINE_TIMER_MSECS (recommended 500 which equals TCP coarse timer) + * + * dhcp_start() starts a DHCP client instance which + * configures the interface by obtaining an IP address lease and maintaining it. + * + * Use dhcp_release() to end the lease and use dhcp_stop() + * to remove the DHCP client. + * + * @see LWIP_HOOK_DHCP_APPEND_OPTIONS + * @see LWIP_HOOK_DHCP_PARSE_OPTION + * + * @see netifapi_dhcp4 + */ + +/* + * Copyright (c) 2001-2004 Leon Woestenberg + * Copyright (c) 2001-2004 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * The Swedish Institute of Computer Science and Adam Dunkels + * are specifically granted permission to redistribute this + * source code. + * + * Author: Leon Woestenberg + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 && LWIP_DHCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/stats.h" +#include "lwip/mem.h" +#include "lwip/udp.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/def.h" +#include "lwip/dhcp.h" +#include "lwip/autoip.h" +#include "lwip/dns.h" +#include "lwip/etharp.h" +#include "lwip/prot/dhcp.h" +#include "lwip/prot/iana.h" + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif +#ifndef LWIP_HOOK_DHCP_APPEND_OPTIONS +#define LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, state, msg, msg_type, options_len_ptr) +#endif +#ifndef LWIP_HOOK_DHCP_PARSE_OPTION +#define LWIP_HOOK_DHCP_PARSE_OPTION(netif, dhcp, state, msg, msg_type, option, len, pbuf, offset) do { LWIP_UNUSED_ARG(msg); } while(0) +#endif + +/** DHCP_CREATE_RAND_XID: if this is set to 1, the xid is created using + * LWIP_RAND() (this overrides DHCP_GLOBAL_XID) + */ +#ifndef DHCP_CREATE_RAND_XID +#define DHCP_CREATE_RAND_XID 1 +#endif + +/** Default for DHCP_GLOBAL_XID is 0xABCD0000 + * This can be changed by defining DHCP_GLOBAL_XID and DHCP_GLOBAL_XID_HEADER, e.g. + * \#define DHCP_GLOBAL_XID_HEADER "stdlib.h" + * \#define DHCP_GLOBAL_XID rand() + */ +#ifdef DHCP_GLOBAL_XID_HEADER +#include DHCP_GLOBAL_XID_HEADER /* include optional starting XID generation prototypes */ +#endif + +/** DHCP_OPTION_MAX_MSG_SIZE is set to the MTU + * MTU is checked to be big enough in dhcp_start */ +#define DHCP_MAX_MSG_LEN(netif) (netif->mtu) +#define DHCP_MAX_MSG_LEN_MIN_REQUIRED 576 +/** Minimum length for reply before packet is parsed */ +#define DHCP_MIN_REPLY_LEN 44 + +#define REBOOT_TRIES 2 + +#if LWIP_DNS && LWIP_DHCP_MAX_DNS_SERVERS +#if DNS_MAX_SERVERS > LWIP_DHCP_MAX_DNS_SERVERS +#define LWIP_DHCP_PROVIDE_DNS_SERVERS LWIP_DHCP_MAX_DNS_SERVERS +#else +#define LWIP_DHCP_PROVIDE_DNS_SERVERS DNS_MAX_SERVERS +#endif +#else +#define LWIP_DHCP_PROVIDE_DNS_SERVERS 0 +#endif + +/** Option handling: options are parsed in dhcp_parse_reply + * and saved in an array where other functions can load them from. + * This might be moved into the struct dhcp (not necessarily since + * lwIP is single-threaded and the array is only used while in recv + * callback). */ +enum dhcp_option_idx { + DHCP_OPTION_IDX_OVERLOAD = 0, + DHCP_OPTION_IDX_MSG_TYPE, + DHCP_OPTION_IDX_SERVER_ID, + DHCP_OPTION_IDX_LEASE_TIME, + DHCP_OPTION_IDX_T1, + DHCP_OPTION_IDX_T2, + DHCP_OPTION_IDX_SUBNET_MASK, + DHCP_OPTION_IDX_ROUTER, +#if LWIP_DHCP_PROVIDE_DNS_SERVERS + DHCP_OPTION_IDX_DNS_SERVER, + DHCP_OPTION_IDX_DNS_SERVER_LAST = DHCP_OPTION_IDX_DNS_SERVER + LWIP_DHCP_PROVIDE_DNS_SERVERS - 1, +#endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS */ +#if LWIP_DHCP_GET_NTP_SRV + DHCP_OPTION_IDX_NTP_SERVER, + DHCP_OPTION_IDX_NTP_SERVER_LAST = DHCP_OPTION_IDX_NTP_SERVER + LWIP_DHCP_MAX_NTP_SERVERS - 1, +#endif /* LWIP_DHCP_GET_NTP_SRV */ + DHCP_OPTION_IDX_MAX +}; + +/** Holds the decoded option values, only valid while in dhcp_recv. + @todo: move this into struct dhcp? */ +u32_t dhcp_rx_options_val[DHCP_OPTION_IDX_MAX]; +/** Holds a flag which option was received and is contained in dhcp_rx_options_val, + only valid while in dhcp_recv. + @todo: move this into struct dhcp? */ +u8_t dhcp_rx_options_given[DHCP_OPTION_IDX_MAX]; + +static u8_t dhcp_discover_request_options[] = { + DHCP_OPTION_SUBNET_MASK, + DHCP_OPTION_ROUTER, + DHCP_OPTION_BROADCAST +#if LWIP_DHCP_PROVIDE_DNS_SERVERS + , DHCP_OPTION_DNS_SERVER +#endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS */ +#if LWIP_DHCP_GET_NTP_SRV + , DHCP_OPTION_NTP +#endif /* LWIP_DHCP_GET_NTP_SRV */ +}; + +#ifdef DHCP_GLOBAL_XID +static u32_t xid; +static u8_t xid_initialised; +#endif /* DHCP_GLOBAL_XID */ + +#define dhcp_option_given(dhcp, idx) (dhcp_rx_options_given[idx] != 0) +#define dhcp_got_option(dhcp, idx) (dhcp_rx_options_given[idx] = 1) +#define dhcp_clear_option(dhcp, idx) (dhcp_rx_options_given[idx] = 0) +#define dhcp_clear_all_options(dhcp) (memset(dhcp_rx_options_given, 0, sizeof(dhcp_rx_options_given))) +#define dhcp_get_option_value(dhcp, idx) (dhcp_rx_options_val[idx]) +#define dhcp_set_option_value(dhcp, idx, val) (dhcp_rx_options_val[idx] = (val)) + +static struct udp_pcb *dhcp_pcb; +static u8_t dhcp_pcb_refcount; + +/* DHCP client state machine functions */ +static err_t dhcp_discover(struct netif *netif); +static err_t dhcp_select(struct netif *netif); +static void dhcp_bind(struct netif *netif); +#if DHCP_DOES_ARP_CHECK +static err_t dhcp_decline(struct netif *netif); +#endif /* DHCP_DOES_ARP_CHECK */ +static err_t dhcp_rebind(struct netif *netif); +static err_t dhcp_reboot(struct netif *netif); +static void dhcp_set_state(struct dhcp *dhcp, u8_t new_state); + +/* receive, unfold, parse and free incoming messages */ +static void dhcp_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port); + +/* set the DHCP timers */ +static void dhcp_timeout(struct netif *netif); +static void dhcp_t1_timeout(struct netif *netif); +static void dhcp_t2_timeout(struct netif *netif); + +/* build outgoing messages */ +/* create a DHCP message, fill in common headers */ +static struct pbuf *dhcp_create_msg(struct netif *netif, struct dhcp *dhcp, u8_t message_type, u16_t *options_out_len); +/* add a DHCP option (type, then length in bytes) */ +static u16_t dhcp_option(u16_t options_out_len, u8_t *options, u8_t option_type, u8_t option_len); +/* add option values */ +static u16_t dhcp_option_byte(u16_t options_out_len, u8_t *options, u8_t value); +static u16_t dhcp_option_short(u16_t options_out_len, u8_t *options, u16_t value); +static u16_t dhcp_option_long(u16_t options_out_len, u8_t *options, u32_t value); +#if LWIP_NETIF_HOSTNAME +static u16_t dhcp_option_hostname(u16_t options_out_len, u8_t *options, struct netif *netif); +#endif /* LWIP_NETIF_HOSTNAME */ +/* always add the DHCP options trailer to end and pad */ +static void dhcp_option_trailer(u16_t options_out_len, u8_t *options, struct pbuf *p_out); + +/** Ensure DHCP PCB is allocated and bound */ +static err_t +dhcp_inc_pcb_refcount(void) +{ + if (dhcp_pcb_refcount == 0) { + LWIP_ASSERT("dhcp_inc_pcb_refcount(): memory leak", dhcp_pcb == NULL); + + /* allocate UDP PCB */ + dhcp_pcb = udp_new(); + + if (dhcp_pcb == NULL) { + return ERR_MEM; + } + + ip_set_option(dhcp_pcb, SOF_BROADCAST); + + /* set up local and remote port for the pcb -> listen on all interfaces on all src/dest IPs */ + udp_bind(dhcp_pcb, IP4_ADDR_ANY, LWIP_IANA_PORT_DHCP_CLIENT); + udp_connect(dhcp_pcb, IP4_ADDR_ANY, LWIP_IANA_PORT_DHCP_SERVER); + udp_recv(dhcp_pcb, dhcp_recv, NULL); + } + + dhcp_pcb_refcount++; + + return ERR_OK; +} + +/** Free DHCP PCB if the last netif stops using it */ +static void +dhcp_dec_pcb_refcount(void) +{ + LWIP_ASSERT("dhcp_pcb_refcount(): refcount error", (dhcp_pcb_refcount > 0)); + dhcp_pcb_refcount--; + + if (dhcp_pcb_refcount == 0) { + udp_remove(dhcp_pcb); + dhcp_pcb = NULL; + } +} + +/** + * Back-off the DHCP client (because of a received NAK response). + * + * Back-off the DHCP client because of a received NAK. Receiving a + * NAK means the client asked for something non-sensible, for + * example when it tries to renew a lease obtained on another network. + * + * We clear any existing set IP address and restart DHCP negotiation + * afresh (as per RFC2131 3.2.3). + * + * @param netif the netif under DHCP control + */ +static void +dhcp_handle_nak(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_handle_nak(netif=%p) %c%c%"U16_F"\n", + (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); + /* Change to a defined state - set this before assigning the address + to ensure the callback can use dhcp_supplied_address() */ + dhcp_set_state(dhcp, DHCP_STATE_BACKING_OFF); + /* remove IP address from interface (must no longer be used, as per RFC2131) */ + netif_set_addr(netif, IP4_ADDR_ANY4, IP4_ADDR_ANY4, IP4_ADDR_ANY4); + /* We can immediately restart discovery */ + dhcp_discover(netif); +} + +#if DHCP_DOES_ARP_CHECK +/** + * Checks if the offered IP address is already in use. + * + * It does so by sending an ARP request for the offered address and + * entering CHECKING state. If no ARP reply is received within a small + * interval, the address is assumed to be free for use by us. + * + * @param netif the netif under DHCP control + */ +static void +dhcp_check(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + err_t result; + u16_t msecs; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_check(netif=%p) %c%c\n", (void *)netif, (s16_t)netif->name[0], + (s16_t)netif->name[1])); + dhcp_set_state(dhcp, DHCP_STATE_CHECKING); + /* create an ARP query for the offered IP address, expecting that no host + responds, as the IP address should not be in use. */ + result = etharp_query(netif, &dhcp->offered_ip_addr, NULL); + if (result != ERR_OK) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("dhcp_check: could not perform ARP query\n")); + } + if (dhcp->tries < 255) { + dhcp->tries++; + } + msecs = 500; + dhcp->request_timeout = (u16_t)((msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_check(): set request timeout %"U16_F" msecs\n", msecs)); +} +#endif /* DHCP_DOES_ARP_CHECK */ + +/** + * Remember the configuration offered by a DHCP server. + * + * @param netif the netif under DHCP control + */ +static void +dhcp_handle_offer(struct netif *netif, struct dhcp_msg *msg_in) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_handle_offer(netif=%p) %c%c%"U16_F"\n", + (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); + /* obtain the server address */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_SERVER_ID)) { + dhcp->request_timeout = 0; /* stop timer */ + + ip_addr_set_ip4_u32(&dhcp->server_ip_addr, lwip_htonl(dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_SERVER_ID))); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_STATE, ("dhcp_handle_offer(): server 0x%08"X32_F"\n", + ip4_addr_get_u32(ip_2_ip4(&dhcp->server_ip_addr)))); + /* remember offered address */ + ip4_addr_copy(dhcp->offered_ip_addr, msg_in->yiaddr); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_STATE, ("dhcp_handle_offer(): offer for 0x%08"X32_F"\n", + ip4_addr_get_u32(&dhcp->offered_ip_addr))); + + dhcp_select(netif); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, + ("dhcp_handle_offer(netif=%p) did not get server ID!\n", (void *)netif)); + } +} + +/** + * Select a DHCP server offer out of all offers. + * + * Simply select the first offer received. + * + * @param netif the netif under DHCP control + * @return lwIP specific error (see error.h) + */ +static err_t +dhcp_select(struct netif *netif) +{ + struct dhcp *dhcp; + err_t result; + u16_t msecs; + u8_t i; + struct pbuf *p_out; + u16_t options_out_len; + + LWIP_ERROR("dhcp_select: netif != NULL", (netif != NULL), return ERR_ARG;); + dhcp = netif_dhcp_data(netif); + LWIP_ERROR("dhcp_select: dhcp != NULL", (dhcp != NULL), return ERR_VAL;); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_select(netif=%p) %c%c%"U16_F"\n", (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); + dhcp_set_state(dhcp, DHCP_STATE_REQUESTING); + + /* create and initialize the DHCP message header */ + p_out = dhcp_create_msg(netif, dhcp, DHCP_REQUEST, &options_out_len); + if (p_out != NULL) { + struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload; + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN); + options_out_len = dhcp_option_short(options_out_len, msg_out->options, DHCP_MAX_MSG_LEN(netif)); + + /* MUST request the offered IP address */ + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_REQUESTED_IP, 4); + options_out_len = dhcp_option_long(options_out_len, msg_out->options, lwip_ntohl(ip4_addr_get_u32(&dhcp->offered_ip_addr))); + + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_SERVER_ID, 4); + options_out_len = dhcp_option_long(options_out_len, msg_out->options, lwip_ntohl(ip4_addr_get_u32(ip_2_ip4(&dhcp->server_ip_addr)))); + + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_PARAMETER_REQUEST_LIST, LWIP_ARRAYSIZE(dhcp_discover_request_options)); + for (i = 0; i < LWIP_ARRAYSIZE(dhcp_discover_request_options); i++) { + options_out_len = dhcp_option_byte(options_out_len, msg_out->options, dhcp_discover_request_options[i]); + } + +#if LWIP_NETIF_HOSTNAME + options_out_len = dhcp_option_hostname(options_out_len, msg_out->options, netif); +#endif /* LWIP_NETIF_HOSTNAME */ + + LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, DHCP_STATE_REQUESTING, msg_out, DHCP_REQUEST, &options_out_len); + dhcp_option_trailer(options_out_len, msg_out->options, p_out); + + /* send broadcast to any DHCP server */ + result = udp_sendto_if_src(dhcp_pcb, p_out, IP_ADDR_BROADCAST, LWIP_IANA_PORT_DHCP_SERVER, netif, IP4_ADDR_ANY); + pbuf_free(p_out); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_select: REQUESTING\n")); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("dhcp_select: could not allocate DHCP request\n")); + result = ERR_MEM; + } + if (dhcp->tries < 255) { + dhcp->tries++; + } + msecs = (u16_t)((dhcp->tries < 6 ? 1 << dhcp->tries : 60) * 1000); + dhcp->request_timeout = (u16_t)((msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_STATE, ("dhcp_select(): set request timeout %"U16_F" msecs\n", msecs)); + return result; +} + +/** + * The DHCP timer that checks for lease renewal/rebind timeouts. + * Must be called once a minute (see @ref DHCP_COARSE_TIMER_SECS). + */ +void +dhcp_coarse_tmr(void) +{ + struct netif *netif; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_coarse_tmr()\n")); + /* iterate through all network interfaces */ + NETIF_FOREACH(netif) { + /* only act on DHCP configured interfaces */ + struct dhcp *dhcp = netif_dhcp_data(netif); + if ((dhcp != NULL) && (dhcp->state != DHCP_STATE_OFF)) { + /* compare lease time to expire timeout */ + if (dhcp->t0_timeout && (++dhcp->lease_used == dhcp->t0_timeout)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_coarse_tmr(): t0 timeout\n")); + /* this clients' lease time has expired */ + dhcp_release_and_stop(netif); + dhcp_start(netif); + /* timer is active (non zero), and triggers (zeroes) now? */ + } else if (dhcp->t2_rebind_time && (dhcp->t2_rebind_time-- == 1)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_coarse_tmr(): t2 timeout\n")); + /* this clients' rebind timeout triggered */ + dhcp_t2_timeout(netif); + /* timer is active (non zero), and triggers (zeroes) now */ + } else if (dhcp->t1_renew_time && (dhcp->t1_renew_time-- == 1)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_coarse_tmr(): t1 timeout\n")); + /* this clients' renewal timeout triggered */ + dhcp_t1_timeout(netif); + } + } + } +} + +/** + * DHCP transaction timeout handling (this function must be called every 500ms, + * see @ref DHCP_FINE_TIMER_MSECS). + * + * A DHCP server is expected to respond within a short period of time. + * This timer checks whether an outstanding DHCP request is timed out. + */ +void +dhcp_fine_tmr(void) +{ + struct netif *netif; + /* loop through netif's */ + NETIF_FOREACH(netif) { + struct dhcp *dhcp = netif_dhcp_data(netif); + /* only act on DHCP configured interfaces */ + if (dhcp != NULL) { + /* timer is active (non zero), and is about to trigger now */ + if (dhcp->request_timeout > 1) { + dhcp->request_timeout--; + } else if (dhcp->request_timeout == 1) { + dhcp->request_timeout--; + /* { dhcp->request_timeout == 0 } */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_fine_tmr(): request timeout\n")); + /* this client's request timeout triggered */ + dhcp_timeout(netif); + } + } + } +} + +/** + * A DHCP negotiation transaction, or ARP request, has timed out. + * + * The timer that was started with the DHCP or ARP request has + * timed out, indicating no response was received in time. + * + * @param netif the netif under DHCP control + */ +static void +dhcp_timeout(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_timeout()\n")); + /* back-off period has passed, or server selection timed out */ + if ((dhcp->state == DHCP_STATE_BACKING_OFF) || (dhcp->state == DHCP_STATE_SELECTING)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_timeout(): restarting discovery\n")); + dhcp_discover(netif); + /* receiving the requested lease timed out */ + } else if (dhcp->state == DHCP_STATE_REQUESTING) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_timeout(): REQUESTING, DHCP request timed out\n")); + if (dhcp->tries <= 5) { + dhcp_select(netif); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_timeout(): REQUESTING, releasing, restarting\n")); + dhcp_release_and_stop(netif); + dhcp_start(netif); + } +#if DHCP_DOES_ARP_CHECK + /* received no ARP reply for the offered address (which is good) */ + } else if (dhcp->state == DHCP_STATE_CHECKING) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_timeout(): CHECKING, ARP request timed out\n")); + if (dhcp->tries <= 1) { + dhcp_check(netif); + /* no ARP replies on the offered address, + looks like the IP address is indeed free */ + } else { + /* bind the interface to the offered address */ + dhcp_bind(netif); + } +#endif /* DHCP_DOES_ARP_CHECK */ + } else if (dhcp->state == DHCP_STATE_REBOOTING) { + if (dhcp->tries < REBOOT_TRIES) { + dhcp_reboot(netif); + } else { + dhcp_discover(netif); + } + } +} + +/** + * The renewal period has timed out. + * + * @param netif the netif under DHCP control + */ +static void +dhcp_t1_timeout(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_STATE, ("dhcp_t1_timeout()\n")); + if ((dhcp->state == DHCP_STATE_REQUESTING) || (dhcp->state == DHCP_STATE_BOUND) || + (dhcp->state == DHCP_STATE_RENEWING)) { + /* just retry to renew - note that the rebind timer (t2) will + * eventually time-out if renew tries fail. */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("dhcp_t1_timeout(): must renew\n")); + /* This slightly different to RFC2131: DHCPREQUEST will be sent from state + DHCP_STATE_RENEWING, not DHCP_STATE_BOUND */ + dhcp_renew(netif); + /* Calculate next timeout */ + if (((dhcp->t2_timeout - dhcp->lease_used) / 2) >= ((60 + DHCP_COARSE_TIMER_SECS / 2) / DHCP_COARSE_TIMER_SECS)) { + dhcp->t1_renew_time = (u16_t)((dhcp->t2_timeout - dhcp->lease_used) / 2); + } + } +} + +/** + * The rebind period has timed out. + * + * @param netif the netif under DHCP control + */ +static void +dhcp_t2_timeout(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_t2_timeout()\n")); + if ((dhcp->state == DHCP_STATE_REQUESTING) || (dhcp->state == DHCP_STATE_BOUND) || + (dhcp->state == DHCP_STATE_RENEWING) || (dhcp->state == DHCP_STATE_REBINDING)) { + /* just retry to rebind */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("dhcp_t2_timeout(): must rebind\n")); + /* This slightly different to RFC2131: DHCPREQUEST will be sent from state + DHCP_STATE_REBINDING, not DHCP_STATE_BOUND */ + dhcp_rebind(netif); + /* Calculate next timeout */ + if (((dhcp->t0_timeout - dhcp->lease_used) / 2) >= ((60 + DHCP_COARSE_TIMER_SECS / 2) / DHCP_COARSE_TIMER_SECS)) { + dhcp->t2_rebind_time = (u16_t)((dhcp->t0_timeout - dhcp->lease_used) / 2); + } + } +} + +/** + * Handle a DHCP ACK packet + * + * @param netif the netif under DHCP control + */ +static void +dhcp_handle_ack(struct netif *netif, struct dhcp_msg *msg_in) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + +#if LWIP_DHCP_PROVIDE_DNS_SERVERS || LWIP_DHCP_GET_NTP_SRV + u8_t n; +#endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS || LWIP_DHCP_GET_NTP_SRV */ +#if LWIP_DHCP_GET_NTP_SRV + ip4_addr_t ntp_server_addrs[LWIP_DHCP_MAX_NTP_SERVERS]; +#endif + + /* clear options we might not get from the ACK */ + ip4_addr_set_zero(&dhcp->offered_sn_mask); + ip4_addr_set_zero(&dhcp->offered_gw_addr); +#if LWIP_DHCP_BOOTP_FILE + ip4_addr_set_zero(&dhcp->offered_si_addr); +#endif /* LWIP_DHCP_BOOTP_FILE */ + + /* lease time given? */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_LEASE_TIME)) { + /* remember offered lease time */ + dhcp->offered_t0_lease = dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_LEASE_TIME); + } + /* renewal period given? */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_T1)) { + /* remember given renewal period */ + dhcp->offered_t1_renew = dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_T1); + } else { + /* calculate safe periods for renewal */ + dhcp->offered_t1_renew = dhcp->offered_t0_lease / 2; + } + + /* renewal period given? */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_T2)) { + /* remember given rebind period */ + dhcp->offered_t2_rebind = dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_T2); + } else { + /* calculate safe periods for rebinding (offered_t0_lease * 0.875 -> 87.5%)*/ + dhcp->offered_t2_rebind = (dhcp->offered_t0_lease * 7U) / 8U; + } + + /* (y)our internet address */ + ip4_addr_copy(dhcp->offered_ip_addr, msg_in->yiaddr); + +#if LWIP_DHCP_BOOTP_FILE + /* copy boot server address, + boot file name copied in dhcp_parse_reply if not overloaded */ + ip4_addr_copy(dhcp->offered_si_addr, msg_in->siaddr); +#endif /* LWIP_DHCP_BOOTP_FILE */ + + /* subnet mask given? */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_SUBNET_MASK)) { + /* remember given subnet mask */ + ip4_addr_set_u32(&dhcp->offered_sn_mask, lwip_htonl(dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_SUBNET_MASK))); + dhcp->subnet_mask_given = 1; + } else { + dhcp->subnet_mask_given = 0; + } + + /* gateway router */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_ROUTER)) { + ip4_addr_set_u32(&dhcp->offered_gw_addr, lwip_htonl(dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_ROUTER))); + } + +#if LWIP_DHCP_GET_NTP_SRV + /* NTP servers */ + for (n = 0; (n < LWIP_DHCP_MAX_NTP_SERVERS) && dhcp_option_given(dhcp, DHCP_OPTION_IDX_NTP_SERVER + n); n++) { + ip4_addr_set_u32(&ntp_server_addrs[n], lwip_htonl(dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_NTP_SERVER + n))); + } + dhcp_set_ntp_servers(n, ntp_server_addrs); +#endif /* LWIP_DHCP_GET_NTP_SRV */ + +#if LWIP_DHCP_PROVIDE_DNS_SERVERS + /* DNS servers */ + for (n = 0; (n < LWIP_DHCP_PROVIDE_DNS_SERVERS) && dhcp_option_given(dhcp, DHCP_OPTION_IDX_DNS_SERVER + n); n++) { + ip_addr_t dns_addr; + ip_addr_set_ip4_u32_val(dns_addr, lwip_htonl(dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_DNS_SERVER + n))); + dns_setserver(n, &dns_addr); + } +#endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS */ +} + +/** + * @ingroup dhcp4 + * Set a statically allocated struct dhcp to work with. + * Using this prevents dhcp_start to allocate it using mem_malloc. + * + * @param netif the netif for which to set the struct dhcp + * @param dhcp (uninitialised) dhcp struct allocated by the application + */ +void +dhcp_set_struct(struct netif *netif, struct dhcp *dhcp) +{ + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("netif != NULL", netif != NULL); + LWIP_ASSERT("dhcp != NULL", dhcp != NULL); + LWIP_ASSERT("netif already has a struct dhcp set", netif_dhcp_data(netif) == NULL); + + /* clear data structure */ + memset(dhcp, 0, sizeof(struct dhcp)); + /* dhcp_set_state(&dhcp, DHCP_STATE_OFF); */ + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP, dhcp); +} + +/** + * @ingroup dhcp4 + * Removes a struct dhcp from a netif. + * + * ATTENTION: Only use this when not using dhcp_set_struct() to allocate the + * struct dhcp since the memory is passed back to the heap. + * + * @param netif the netif from which to remove the struct dhcp + */ +void dhcp_cleanup(struct netif *netif) +{ + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("netif != NULL", netif != NULL); + + if (netif_dhcp_data(netif) != NULL) { + mem_free(netif_dhcp_data(netif)); + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP, NULL); + } +} + +/** + * @ingroup dhcp4 + * Start DHCP negotiation for a network interface. + * + * If no DHCP client instance was attached to this interface, + * a new client is created first. If a DHCP client instance + * was already present, it restarts negotiation. + * + * @param netif The lwIP network interface + * @return lwIP error code + * - ERR_OK - No error + * - ERR_MEM - Out of memory + */ +err_t +dhcp_start(struct netif *netif) +{ + struct dhcp *dhcp; + err_t result; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ERROR("netif != NULL", (netif != NULL), return ERR_ARG;); + LWIP_ERROR("netif is not up, old style port?", netif_is_up(netif), return ERR_ARG;); + dhcp = netif_dhcp_data(netif); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_start(netif=%p) %c%c%"U16_F"\n", (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); + + /* check MTU of the netif */ + if (netif->mtu < DHCP_MAX_MSG_LEN_MIN_REQUIRED) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_start(): Cannot use this netif with DHCP: MTU is too small\n")); + return ERR_MEM; + } + + /* no DHCP client attached yet? */ + if (dhcp == NULL) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_start(): mallocing new DHCP client\n")); + dhcp = (struct dhcp *)mem_malloc(sizeof(struct dhcp)); + if (dhcp == NULL) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_start(): could not allocate dhcp\n")); + return ERR_MEM; + } + + /* store this dhcp client in the netif */ + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP, dhcp); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_start(): allocated dhcp")); + /* already has DHCP client attached */ + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_start(): restarting DHCP configuration\n")); + + if (dhcp->pcb_allocated != 0) { + dhcp_dec_pcb_refcount(); /* free DHCP PCB if not needed any more */ + } + /* dhcp is cleared below, no need to reset flag*/ + } + + /* clear data structure */ + memset(dhcp, 0, sizeof(struct dhcp)); + /* dhcp_set_state(&dhcp, DHCP_STATE_OFF); */ + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_start(): starting DHCP configuration\n")); + + if (dhcp_inc_pcb_refcount() != ERR_OK) { /* ensure DHCP PCB is allocated */ + return ERR_MEM; + } + dhcp->pcb_allocated = 1; + + if (!netif_is_link_up(netif)) { + /* set state INIT and wait for dhcp_network_changed() to call dhcp_discover() */ + dhcp_set_state(dhcp, DHCP_STATE_INIT); + return ERR_OK; + } + + /* (re)start the DHCP negotiation */ + result = dhcp_discover(netif); + if (result != ERR_OK) { + /* free resources allocated above */ + dhcp_release_and_stop(netif); + return ERR_MEM; + } + return result; +} + +/** + * @ingroup dhcp4 + * Inform a DHCP server of our manual configuration. + * + * This informs DHCP servers of our fixed IP address configuration + * by sending an INFORM message. It does not involve DHCP address + * configuration, it is just here to be nice to the network. + * + * @param netif The lwIP network interface + */ +void +dhcp_inform(struct netif *netif) +{ + struct dhcp dhcp; + struct pbuf *p_out; + u16_t options_out_len; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ERROR("netif != NULL", (netif != NULL), return;); + + if (dhcp_inc_pcb_refcount() != ERR_OK) { /* ensure DHCP PCB is allocated */ + return; + } + + memset(&dhcp, 0, sizeof(struct dhcp)); + dhcp_set_state(&dhcp, DHCP_STATE_INFORMING); + + /* create and initialize the DHCP message header */ + p_out = dhcp_create_msg(netif, &dhcp, DHCP_INFORM, &options_out_len); + if (p_out != NULL) { + struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload; + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN); + options_out_len = dhcp_option_short(options_out_len, msg_out->options, DHCP_MAX_MSG_LEN(netif)); + + LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, &dhcp, DHCP_STATE_INFORMING, msg_out, DHCP_INFORM, &options_out_len); + dhcp_option_trailer(options_out_len, msg_out->options, p_out); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_inform: INFORMING\n")); + + udp_sendto_if(dhcp_pcb, p_out, IP_ADDR_BROADCAST, LWIP_IANA_PORT_DHCP_SERVER, netif); + + pbuf_free(p_out); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_inform: could not allocate DHCP request\n")); + } + + dhcp_dec_pcb_refcount(); /* delete DHCP PCB if not needed any more */ +} + +/** Handle a possible change in the network configuration. + * + * This enters the REBOOTING state to verify that the currently bound + * address is still valid. + */ +void +dhcp_network_changed(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + + if (!dhcp) { + return; + } + switch (dhcp->state) { + case DHCP_STATE_REBINDING: + case DHCP_STATE_RENEWING: + case DHCP_STATE_BOUND: + case DHCP_STATE_REBOOTING: + dhcp->tries = 0; + dhcp_reboot(netif); + break; + case DHCP_STATE_OFF: + /* stay off */ + break; + default: + LWIP_ASSERT("invalid dhcp->state", dhcp->state <= DHCP_STATE_BACKING_OFF); + /* INIT/REQUESTING/CHECKING/BACKING_OFF restart with new 'rid' because the + state changes, SELECTING: continue with current 'rid' as we stay in the + same state */ +#if LWIP_DHCP_AUTOIP_COOP + if (dhcp->autoip_coop_state == DHCP_AUTOIP_COOP_STATE_ON) { + autoip_stop(netif); + dhcp->autoip_coop_state = DHCP_AUTOIP_COOP_STATE_OFF; + } +#endif /* LWIP_DHCP_AUTOIP_COOP */ + /* ensure we start with short timeouts, even if already discovering */ + dhcp->tries = 0; + dhcp_discover(netif); + break; + } +} + +#if DHCP_DOES_ARP_CHECK +/** + * Match an ARP reply with the offered IP address: + * check whether the offered IP address is not in use using ARP + * + * @param netif the network interface on which the reply was received + * @param addr The IP address we received a reply from + */ +void +dhcp_arp_reply(struct netif *netif, const ip4_addr_t *addr) +{ + struct dhcp *dhcp; + + LWIP_ERROR("netif != NULL", (netif != NULL), return;); + dhcp = netif_dhcp_data(netif); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_arp_reply()\n")); + /* is a DHCP client doing an ARP check? */ + if ((dhcp != NULL) && (dhcp->state == DHCP_STATE_CHECKING)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_arp_reply(): CHECKING, arp reply for 0x%08"X32_F"\n", + ip4_addr_get_u32(addr))); + /* did a host respond with the address we + were offered by the DHCP server? */ + if (ip4_addr_cmp(addr, &dhcp->offered_ip_addr)) { + /* we will not accept the offered address */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE | LWIP_DBG_LEVEL_WARNING, + ("dhcp_arp_reply(): arp reply matched with offered address, declining\n")); + dhcp_decline(netif); + } + } +} + +/** + * Decline an offered lease. + * + * Tell the DHCP server we do not accept the offered address. + * One reason to decline the lease is when we find out the address + * is already in use by another host (through ARP). + * + * @param netif the netif under DHCP control + */ +static err_t +dhcp_decline(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + err_t result; + u16_t msecs; + struct pbuf *p_out; + u16_t options_out_len; + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_decline()\n")); + dhcp_set_state(dhcp, DHCP_STATE_BACKING_OFF); + /* create and initialize the DHCP message header */ + p_out = dhcp_create_msg(netif, dhcp, DHCP_DECLINE, &options_out_len); + if (p_out != NULL) { + struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload; + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_REQUESTED_IP, 4); + options_out_len = dhcp_option_long(options_out_len, msg_out->options, lwip_ntohl(ip4_addr_get_u32(&dhcp->offered_ip_addr))); + + LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, DHCP_STATE_BACKING_OFF, msg_out, DHCP_DECLINE, &options_out_len); + dhcp_option_trailer(options_out_len, msg_out->options, p_out); + + /* per section 4.4.4, broadcast DECLINE messages */ + result = udp_sendto_if_src(dhcp_pcb, p_out, IP_ADDR_BROADCAST, LWIP_IANA_PORT_DHCP_SERVER, netif, IP4_ADDR_ANY); + pbuf_free(p_out); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_decline: BACKING OFF\n")); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, + ("dhcp_decline: could not allocate DHCP request\n")); + result = ERR_MEM; + } + if (dhcp->tries < 255) { + dhcp->tries++; + } + msecs = 10 * 1000; + dhcp->request_timeout = (u16_t)((msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_decline(): set request timeout %"U16_F" msecs\n", msecs)); + return result; +} +#endif /* DHCP_DOES_ARP_CHECK */ + + +/** + * Start the DHCP process, discover a DHCP server. + * + * @param netif the netif under DHCP control + */ +static err_t +dhcp_discover(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + err_t result = ERR_OK; + u16_t msecs; + u8_t i; + struct pbuf *p_out; + u16_t options_out_len; + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_discover()\n")); + + ip4_addr_set_any(&dhcp->offered_ip_addr); + dhcp_set_state(dhcp, DHCP_STATE_SELECTING); + /* create and initialize the DHCP message header */ + p_out = dhcp_create_msg(netif, dhcp, DHCP_DISCOVER, &options_out_len); + if (p_out != NULL) { + struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_discover: making request\n")); + + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN); + options_out_len = dhcp_option_short(options_out_len, msg_out->options, DHCP_MAX_MSG_LEN(netif)); + + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_PARAMETER_REQUEST_LIST, LWIP_ARRAYSIZE(dhcp_discover_request_options)); + for (i = 0; i < LWIP_ARRAYSIZE(dhcp_discover_request_options); i++) { + options_out_len = dhcp_option_byte(options_out_len, msg_out->options, dhcp_discover_request_options[i]); + } + LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, DHCP_STATE_SELECTING, msg_out, DHCP_DISCOVER, &options_out_len); + dhcp_option_trailer(options_out_len, msg_out->options, p_out); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_discover: sendto(DISCOVER, IP_ADDR_BROADCAST, LWIP_IANA_PORT_DHCP_SERVER)\n")); + udp_sendto_if_src(dhcp_pcb, p_out, IP_ADDR_BROADCAST, LWIP_IANA_PORT_DHCP_SERVER, netif, IP4_ADDR_ANY); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_discover: deleting()ing\n")); + pbuf_free(p_out); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_discover: SELECTING\n")); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_discover: could not allocate DHCP request\n")); + } + if (dhcp->tries < 255) { + dhcp->tries++; + } +#if LWIP_DHCP_AUTOIP_COOP + if (dhcp->tries >= LWIP_DHCP_AUTOIP_COOP_TRIES && dhcp->autoip_coop_state == DHCP_AUTOIP_COOP_STATE_OFF) { + dhcp->autoip_coop_state = DHCP_AUTOIP_COOP_STATE_ON; + autoip_start(netif); + } +#endif /* LWIP_DHCP_AUTOIP_COOP */ + msecs = (u16_t)((dhcp->tries < 6 ? 1 << dhcp->tries : 60) * 1000); + dhcp->request_timeout = (u16_t)((msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_discover(): set request timeout %"U16_F" msecs\n", msecs)); + return result; +} + + +/** + * Bind the interface to the offered IP address. + * + * @param netif network interface to bind to the offered address + */ +static void +dhcp_bind(struct netif *netif) +{ + u32_t timeout; + struct dhcp *dhcp; + ip4_addr_t sn_mask, gw_addr; + LWIP_ERROR("dhcp_bind: netif != NULL", (netif != NULL), return;); + dhcp = netif_dhcp_data(netif); + LWIP_ERROR("dhcp_bind: dhcp != NULL", (dhcp != NULL), return;); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_bind(netif=%p) %c%c%"U16_F"\n", (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); + + /* reset time used of lease */ + dhcp->lease_used = 0; + + if (dhcp->offered_t0_lease != 0xffffffffUL) { + /* set renewal period timer */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_bind(): t0 renewal timer %"U32_F" secs\n", dhcp->offered_t0_lease)); + timeout = (dhcp->offered_t0_lease + DHCP_COARSE_TIMER_SECS / 2) / DHCP_COARSE_TIMER_SECS; + if (timeout > 0xffff) { + timeout = 0xffff; + } + dhcp->t0_timeout = (u16_t)timeout; + if (dhcp->t0_timeout == 0) { + dhcp->t0_timeout = 1; + } + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_bind(): set request timeout %"U32_F" msecs\n", dhcp->offered_t0_lease * 1000)); + } + + /* temporary DHCP lease? */ + if (dhcp->offered_t1_renew != 0xffffffffUL) { + /* set renewal period timer */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_bind(): t1 renewal timer %"U32_F" secs\n", dhcp->offered_t1_renew)); + timeout = (dhcp->offered_t1_renew + DHCP_COARSE_TIMER_SECS / 2) / DHCP_COARSE_TIMER_SECS; + if (timeout > 0xffff) { + timeout = 0xffff; + } + dhcp->t1_timeout = (u16_t)timeout; + if (dhcp->t1_timeout == 0) { + dhcp->t1_timeout = 1; + } + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_bind(): set request timeout %"U32_F" msecs\n", dhcp->offered_t1_renew * 1000)); + dhcp->t1_renew_time = dhcp->t1_timeout; + } + /* set renewal period timer */ + if (dhcp->offered_t2_rebind != 0xffffffffUL) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_bind(): t2 rebind timer %"U32_F" secs\n", dhcp->offered_t2_rebind)); + timeout = (dhcp->offered_t2_rebind + DHCP_COARSE_TIMER_SECS / 2) / DHCP_COARSE_TIMER_SECS; + if (timeout > 0xffff) { + timeout = 0xffff; + } + dhcp->t2_timeout = (u16_t)timeout; + if (dhcp->t2_timeout == 0) { + dhcp->t2_timeout = 1; + } + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_bind(): set request timeout %"U32_F" msecs\n", dhcp->offered_t2_rebind * 1000)); + dhcp->t2_rebind_time = dhcp->t2_timeout; + } + + /* If we have sub 1 minute lease, t2 and t1 will kick in at the same time. */ + if ((dhcp->t1_timeout >= dhcp->t2_timeout) && (dhcp->t2_timeout > 0)) { + dhcp->t1_timeout = 0; + } + + if (dhcp->subnet_mask_given) { + /* copy offered network mask */ + ip4_addr_copy(sn_mask, dhcp->offered_sn_mask); + } else { + /* subnet mask not given, choose a safe subnet mask given the network class */ + u8_t first_octet = ip4_addr1(&dhcp->offered_ip_addr); + if (first_octet <= 127) { + ip4_addr_set_u32(&sn_mask, PP_HTONL(0xff000000UL)); + } else if (first_octet >= 192) { + ip4_addr_set_u32(&sn_mask, PP_HTONL(0xffffff00UL)); + } else { + ip4_addr_set_u32(&sn_mask, PP_HTONL(0xffff0000UL)); + } + } + + ip4_addr_copy(gw_addr, dhcp->offered_gw_addr); + /* gateway address not given? */ + if (ip4_addr_isany_val(gw_addr)) { + /* copy network address */ + ip4_addr_get_network(&gw_addr, &dhcp->offered_ip_addr, &sn_mask); + /* use first host address on network as gateway */ + ip4_addr_set_u32(&gw_addr, ip4_addr_get_u32(&gw_addr) | PP_HTONL(0x00000001UL)); + } + +#if LWIP_DHCP_AUTOIP_COOP + if (dhcp->autoip_coop_state == DHCP_AUTOIP_COOP_STATE_ON) { + autoip_stop(netif); + dhcp->autoip_coop_state = DHCP_AUTOIP_COOP_STATE_OFF; + } +#endif /* LWIP_DHCP_AUTOIP_COOP */ + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_STATE, ("dhcp_bind(): IP: 0x%08"X32_F" SN: 0x%08"X32_F" GW: 0x%08"X32_F"\n", + ip4_addr_get_u32(&dhcp->offered_ip_addr), ip4_addr_get_u32(&sn_mask), ip4_addr_get_u32(&gw_addr))); + /* netif is now bound to DHCP leased address - set this before assigning the address + to ensure the callback can use dhcp_supplied_address() */ + dhcp_set_state(dhcp, DHCP_STATE_BOUND); + + netif_set_addr(netif, &dhcp->offered_ip_addr, &sn_mask, &gw_addr); + /* interface is used by routing now that an address is set */ +} + +/** + * @ingroup dhcp4 + * Renew an existing DHCP lease at the involved DHCP server. + * + * @param netif network interface which must renew its lease + */ +err_t +dhcp_renew(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + err_t result; + u16_t msecs; + u8_t i; + struct pbuf *p_out; + u16_t options_out_len; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_renew()\n")); + dhcp_set_state(dhcp, DHCP_STATE_RENEWING); + + /* create and initialize the DHCP message header */ + p_out = dhcp_create_msg(netif, dhcp, DHCP_REQUEST, &options_out_len); + if (p_out != NULL) { + struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload; + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN); + options_out_len = dhcp_option_short(options_out_len, msg_out->options, DHCP_MAX_MSG_LEN(netif)); + + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_PARAMETER_REQUEST_LIST, LWIP_ARRAYSIZE(dhcp_discover_request_options)); + for (i = 0; i < LWIP_ARRAYSIZE(dhcp_discover_request_options); i++) { + options_out_len = dhcp_option_byte(options_out_len, msg_out->options, dhcp_discover_request_options[i]); + } + +#if LWIP_NETIF_HOSTNAME + options_out_len = dhcp_option_hostname(options_out_len, msg_out->options, netif); +#endif /* LWIP_NETIF_HOSTNAME */ + + LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, DHCP_STATE_RENEWING, msg_out, DHCP_REQUEST, &options_out_len); + dhcp_option_trailer(options_out_len, msg_out->options, p_out); + + result = udp_sendto_if(dhcp_pcb, p_out, &dhcp->server_ip_addr, LWIP_IANA_PORT_DHCP_SERVER, netif); + pbuf_free(p_out); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_renew: RENEWING\n")); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_renew: could not allocate DHCP request\n")); + result = ERR_MEM; + } + if (dhcp->tries < 255) { + dhcp->tries++; + } + /* back-off on retries, but to a maximum of 20 seconds */ + msecs = (u16_t)(dhcp->tries < 10 ? dhcp->tries * 2000 : 20 * 1000); + dhcp->request_timeout = (u16_t)((msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_renew(): set request timeout %"U16_F" msecs\n", msecs)); + return result; +} + +/** + * Rebind with a DHCP server for an existing DHCP lease. + * + * @param netif network interface which must rebind with a DHCP server + */ +static err_t +dhcp_rebind(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + err_t result; + u16_t msecs; + u8_t i; + struct pbuf *p_out; + u16_t options_out_len; + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_rebind()\n")); + dhcp_set_state(dhcp, DHCP_STATE_REBINDING); + + /* create and initialize the DHCP message header */ + p_out = dhcp_create_msg(netif, dhcp, DHCP_REQUEST, &options_out_len); + if (p_out != NULL) { + struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload; + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN); + options_out_len = dhcp_option_short(options_out_len, msg_out->options, DHCP_MAX_MSG_LEN(netif)); + + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_PARAMETER_REQUEST_LIST, LWIP_ARRAYSIZE(dhcp_discover_request_options)); + for (i = 0; i < LWIP_ARRAYSIZE(dhcp_discover_request_options); i++) { + options_out_len = dhcp_option_byte(options_out_len, msg_out->options, dhcp_discover_request_options[i]); + } + +#if LWIP_NETIF_HOSTNAME + options_out_len = dhcp_option_hostname(options_out_len, msg_out->options, netif); +#endif /* LWIP_NETIF_HOSTNAME */ + + LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, DHCP_STATE_REBINDING, msg_out, DHCP_DISCOVER, &options_out_len); + dhcp_option_trailer(options_out_len, msg_out->options, p_out); + + /* broadcast to server */ + result = udp_sendto_if(dhcp_pcb, p_out, IP_ADDR_BROADCAST, LWIP_IANA_PORT_DHCP_SERVER, netif); + pbuf_free(p_out); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_rebind: REBINDING\n")); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_rebind: could not allocate DHCP request\n")); + result = ERR_MEM; + } + if (dhcp->tries < 255) { + dhcp->tries++; + } + msecs = (u16_t)(dhcp->tries < 10 ? dhcp->tries * 1000 : 10 * 1000); + dhcp->request_timeout = (u16_t)((msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_rebind(): set request timeout %"U16_F" msecs\n", msecs)); + return result; +} + +/** + * Enter REBOOTING state to verify an existing lease + * + * @param netif network interface which must reboot + */ +static err_t +dhcp_reboot(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + err_t result; + u16_t msecs; + u8_t i; + struct pbuf *p_out; + u16_t options_out_len; + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_reboot()\n")); + dhcp_set_state(dhcp, DHCP_STATE_REBOOTING); + + /* create and initialize the DHCP message header */ + p_out = dhcp_create_msg(netif, dhcp, DHCP_REQUEST, &options_out_len); + if (p_out != NULL) { + struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload; + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN); + options_out_len = dhcp_option_short(options_out_len, msg_out->options, DHCP_MAX_MSG_LEN_MIN_REQUIRED); + + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_REQUESTED_IP, 4); + options_out_len = dhcp_option_long(options_out_len, msg_out->options, lwip_ntohl(ip4_addr_get_u32(&dhcp->offered_ip_addr))); + + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_PARAMETER_REQUEST_LIST, LWIP_ARRAYSIZE(dhcp_discover_request_options)); + for (i = 0; i < LWIP_ARRAYSIZE(dhcp_discover_request_options); i++) { + options_out_len = dhcp_option_byte(options_out_len, msg_out->options, dhcp_discover_request_options[i]); + } + +#if LWIP_NETIF_HOSTNAME + options_out_len = dhcp_option_hostname(options_out_len, msg_out->options, netif); +#endif /* LWIP_NETIF_HOSTNAME */ + + LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, DHCP_STATE_REBOOTING, msg_out, DHCP_REQUEST, &options_out_len); + dhcp_option_trailer(options_out_len, msg_out->options, p_out); + + /* broadcast to server */ + result = udp_sendto_if(dhcp_pcb, p_out, IP_ADDR_BROADCAST, LWIP_IANA_PORT_DHCP_SERVER, netif); + pbuf_free(p_out); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_reboot: REBOOTING\n")); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_reboot: could not allocate DHCP request\n")); + result = ERR_MEM; + } + if (dhcp->tries < 255) { + dhcp->tries++; + } + msecs = (u16_t)(dhcp->tries < 10 ? dhcp->tries * 1000 : 10 * 1000); + dhcp->request_timeout = (u16_t)((msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_reboot(): set request timeout %"U16_F" msecs\n", msecs)); + return result; +} + +/** + * @ingroup dhcp4 + * Release a DHCP lease and stop DHCP statemachine (and AUTOIP if LWIP_DHCP_AUTOIP_COOP). + * + * @param netif network interface + */ +void +dhcp_release_and_stop(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + ip_addr_t server_ip_addr; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_release_and_stop()\n")); + if (dhcp == NULL) { + return; + } + + /* already off? -> nothing to do */ + if (dhcp->state == DHCP_STATE_OFF) { + return; + } + + ip_addr_copy(server_ip_addr, dhcp->server_ip_addr); + + /* clean old DHCP offer */ + ip_addr_set_zero_ip4(&dhcp->server_ip_addr); + ip4_addr_set_zero(&dhcp->offered_ip_addr); + ip4_addr_set_zero(&dhcp->offered_sn_mask); + ip4_addr_set_zero(&dhcp->offered_gw_addr); +#if LWIP_DHCP_BOOTP_FILE + ip4_addr_set_zero(&dhcp->offered_si_addr); +#endif /* LWIP_DHCP_BOOTP_FILE */ + dhcp->offered_t0_lease = dhcp->offered_t1_renew = dhcp->offered_t2_rebind = 0; + dhcp->t1_renew_time = dhcp->t2_rebind_time = dhcp->lease_used = dhcp->t0_timeout = 0; + + /* send release message when current IP was assigned via DHCP */ + if (dhcp_supplied_address(netif)) { + /* create and initialize the DHCP message header */ + struct pbuf *p_out; + u16_t options_out_len; + p_out = dhcp_create_msg(netif, dhcp, DHCP_RELEASE, &options_out_len); + if (p_out != NULL) { + struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload; + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_SERVER_ID, 4); + options_out_len = dhcp_option_long(options_out_len, msg_out->options, lwip_ntohl(ip4_addr_get_u32(ip_2_ip4(&server_ip_addr)))); + + LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, dhcp->state, msg_out, DHCP_RELEASE, &options_out_len); + dhcp_option_trailer(options_out_len, msg_out->options, p_out); + + udp_sendto_if(dhcp_pcb, p_out, &server_ip_addr, LWIP_IANA_PORT_DHCP_SERVER, netif); + pbuf_free(p_out); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_release: RELEASED, DHCP_STATE_OFF\n")); + } else { + /* sending release failed, but that's not a problem since the correct behaviour of dhcp does not rely on release */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_release: could not allocate DHCP request\n")); + } + } + + /* remove IP address from interface (prevents routing from selecting this interface) */ + netif_set_addr(netif, IP4_ADDR_ANY4, IP4_ADDR_ANY4, IP4_ADDR_ANY4); + +#if LWIP_DHCP_AUTOIP_COOP + if (dhcp->autoip_coop_state == DHCP_AUTOIP_COOP_STATE_ON) { + autoip_stop(netif); + dhcp->autoip_coop_state = DHCP_AUTOIP_COOP_STATE_OFF; + } +#endif /* LWIP_DHCP_AUTOIP_COOP */ + + dhcp_set_state(dhcp, DHCP_STATE_OFF); + + if (dhcp->pcb_allocated != 0) { + dhcp_dec_pcb_refcount(); /* free DHCP PCB if not needed any more */ + dhcp->pcb_allocated = 0; + } +} + +/** + * @ingroup dhcp4 + * This function calls dhcp_release_and_stop() internally. + * @deprecated Use dhcp_release_and_stop() instead. + */ +err_t +dhcp_release(struct netif *netif) +{ + dhcp_release_and_stop(netif); + return ERR_OK; +} + +/** + * @ingroup dhcp4 + * This function calls dhcp_release_and_stop() internally. + * @deprecated Use dhcp_release_and_stop() instead. + */ +void +dhcp_stop(struct netif *netif) +{ + dhcp_release_and_stop(netif); +} + +/* + * Set the DHCP state of a DHCP client. + * + * If the state changed, reset the number of tries. + */ +static void +dhcp_set_state(struct dhcp *dhcp, u8_t new_state) +{ + if (new_state != dhcp->state) { + dhcp->state = new_state; + dhcp->tries = 0; + dhcp->request_timeout = 0; + } +} + +/* + * Concatenate an option type and length field to the outgoing + * DHCP message. + * + */ +static u16_t +dhcp_option(u16_t options_out_len, u8_t *options, u8_t option_type, u8_t option_len) +{ + LWIP_ASSERT("dhcp_option: options_out_len + 2 + option_len <= DHCP_OPTIONS_LEN", options_out_len + 2U + option_len <= DHCP_OPTIONS_LEN); + options[options_out_len++] = option_type; + options[options_out_len++] = option_len; + return options_out_len; +} +/* + * Concatenate a single byte to the outgoing DHCP message. + * + */ +static u16_t +dhcp_option_byte(u16_t options_out_len, u8_t *options, u8_t value) +{ + LWIP_ASSERT("dhcp_option_byte: options_out_len < DHCP_OPTIONS_LEN", options_out_len < DHCP_OPTIONS_LEN); + options[options_out_len++] = value; + return options_out_len; +} + +static u16_t +dhcp_option_short(u16_t options_out_len, u8_t *options, u16_t value) +{ + LWIP_ASSERT("dhcp_option_short: options_out_len + 2 <= DHCP_OPTIONS_LEN", options_out_len + 2U <= DHCP_OPTIONS_LEN); + options[options_out_len++] = (u8_t)((value & 0xff00U) >> 8); + options[options_out_len++] = (u8_t) (value & 0x00ffU); + return options_out_len; +} + +static u16_t +dhcp_option_long(u16_t options_out_len, u8_t *options, u32_t value) +{ + LWIP_ASSERT("dhcp_option_long: options_out_len + 4 <= DHCP_OPTIONS_LEN", options_out_len + 4U <= DHCP_OPTIONS_LEN); + options[options_out_len++] = (u8_t)((value & 0xff000000UL) >> 24); + options[options_out_len++] = (u8_t)((value & 0x00ff0000UL) >> 16); + options[options_out_len++] = (u8_t)((value & 0x0000ff00UL) >> 8); + options[options_out_len++] = (u8_t)((value & 0x000000ffUL)); + return options_out_len; +} + +#if LWIP_NETIF_HOSTNAME +static u16_t +dhcp_option_hostname(u16_t options_out_len, u8_t *options, struct netif *netif) +{ + if (netif->hostname != NULL) { + size_t namelen = strlen(netif->hostname); + if (namelen > 0) { + size_t len; + const char *p = netif->hostname; + /* Shrink len to available bytes (need 2 bytes for OPTION_HOSTNAME + and 1 byte for trailer) */ + size_t available = DHCP_OPTIONS_LEN - options_out_len - 3; + LWIP_ASSERT("DHCP: hostname is too long!", namelen <= available); + len = LWIP_MIN(namelen, available); + LWIP_ASSERT("DHCP: hostname is too long!", len <= 0xFF); + options_out_len = dhcp_option(options_out_len, options, DHCP_OPTION_HOSTNAME, (u8_t)len); + while (len--) { + options_out_len = dhcp_option_byte(options_out_len, options, *p++); + } + } + } + return options_out_len; +} +#endif /* LWIP_NETIF_HOSTNAME */ + +/** + * Extract the DHCP message and the DHCP options. + * + * Extract the DHCP message and the DHCP options, each into a contiguous + * piece of memory. As a DHCP message is variable sized by its options, + * and also allows overriding some fields for options, the easy approach + * is to first unfold the options into a contiguous piece of memory, and + * use that further on. + * + */ +static err_t +dhcp_parse_reply(struct pbuf *p, struct dhcp *dhcp) +{ + u8_t *options; + u16_t offset; + u16_t offset_max; + u16_t options_idx; + u16_t options_idx_max; + struct pbuf *q; + int parse_file_as_options = 0; + int parse_sname_as_options = 0; + struct dhcp_msg *msg_in; +#if LWIP_DHCP_BOOTP_FILE + int file_overloaded = 0; +#endif + + LWIP_UNUSED_ARG(dhcp); + + /* clear received options */ + dhcp_clear_all_options(dhcp); + /* check that beginning of dhcp_msg (up to and including chaddr) is in first pbuf */ + if (p->len < DHCP_SNAME_OFS) { + return ERR_BUF; + } + msg_in = (struct dhcp_msg *)p->payload; +#if LWIP_DHCP_BOOTP_FILE + /* clear boot file name */ + dhcp->boot_file_name[0] = 0; +#endif /* LWIP_DHCP_BOOTP_FILE */ + + /* parse options */ + + /* start with options field */ + options_idx = DHCP_OPTIONS_OFS; + /* parse options to the end of the received packet */ + options_idx_max = p->tot_len; +again: + q = p; + while ((q != NULL) && (options_idx >= q->len)) { + options_idx = (u16_t)(options_idx - q->len); + options_idx_max = (u16_t)(options_idx_max - q->len); + q = q->next; + } + if (q == NULL) { + return ERR_BUF; + } + offset = options_idx; + offset_max = options_idx_max; + options = (u8_t *)q->payload; + /* at least 1 byte to read and no end marker, then at least 3 bytes to read? */ + while ((q != NULL) && (offset < offset_max) && (options[offset] != DHCP_OPTION_END)) { + u8_t op = options[offset]; + u8_t len; + u8_t decode_len = 0; + int decode_idx = -1; + u16_t val_offset = (u16_t)(offset + 2); + if (val_offset < offset) { + /* overflow */ + return ERR_BUF; + } + /* len byte might be in the next pbuf */ + if ((offset + 1) < q->len) { + len = options[offset + 1]; + } else { + len = (q->next != NULL ? ((u8_t *)q->next->payload)[0] : 0); + } + /* LWIP_DEBUGF(DHCP_DEBUG, ("msg_offset=%"U16_F", q->len=%"U16_F, msg_offset, q->len)); */ + decode_len = len; + switch (op) { + /* case(DHCP_OPTION_END): handled above */ + case (DHCP_OPTION_PAD): + /* special option: no len encoded */ + decode_len = len = 0; + /* will be increased below */ + break; + case (DHCP_OPTION_SUBNET_MASK): + LWIP_ERROR("len == 4", len == 4, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_SUBNET_MASK; + break; + case (DHCP_OPTION_ROUTER): + decode_len = 4; /* only copy the first given router */ + LWIP_ERROR("len >= decode_len", len >= decode_len, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_ROUTER; + break; +#if LWIP_DHCP_PROVIDE_DNS_SERVERS + case (DHCP_OPTION_DNS_SERVER): + /* special case: there might be more than one server */ + LWIP_ERROR("len %% 4 == 0", len % 4 == 0, return ERR_VAL;); + /* limit number of DNS servers */ + decode_len = LWIP_MIN(len, 4 * DNS_MAX_SERVERS); + LWIP_ERROR("len >= decode_len", len >= decode_len, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_DNS_SERVER; + break; +#endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS */ + case (DHCP_OPTION_LEASE_TIME): + LWIP_ERROR("len == 4", len == 4, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_LEASE_TIME; + break; +#if LWIP_DHCP_GET_NTP_SRV + case (DHCP_OPTION_NTP): + /* special case: there might be more than one server */ + LWIP_ERROR("len %% 4 == 0", len % 4 == 0, return ERR_VAL;); + /* limit number of NTP servers */ + decode_len = LWIP_MIN(len, 4 * LWIP_DHCP_MAX_NTP_SERVERS); + LWIP_ERROR("len >= decode_len", len >= decode_len, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_NTP_SERVER; + break; +#endif /* LWIP_DHCP_GET_NTP_SRV*/ + case (DHCP_OPTION_OVERLOAD): + LWIP_ERROR("len == 1", len == 1, return ERR_VAL;); + /* decode overload only in options, not in file/sname: invalid packet */ + LWIP_ERROR("overload in file/sname", options_idx == DHCP_OPTIONS_OFS, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_OVERLOAD; + break; + case (DHCP_OPTION_MESSAGE_TYPE): + LWIP_ERROR("len == 1", len == 1, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_MSG_TYPE; + break; + case (DHCP_OPTION_SERVER_ID): + LWIP_ERROR("len == 4", len == 4, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_SERVER_ID; + break; + case (DHCP_OPTION_T1): + LWIP_ERROR("len == 4", len == 4, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_T1; + break; + case (DHCP_OPTION_T2): + LWIP_ERROR("len == 4", len == 4, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_T2; + break; + default: + decode_len = 0; + LWIP_DEBUGF(DHCP_DEBUG, ("skipping option %"U16_F" in options\n", (u16_t)op)); + LWIP_HOOK_DHCP_PARSE_OPTION(ip_current_netif(), dhcp, dhcp->state, msg_in, + dhcp_option_given(dhcp, DHCP_OPTION_IDX_MSG_TYPE) ? (u8_t)dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_MSG_TYPE) : 0, + op, len, q, val_offset); + break; + } + if (op == DHCP_OPTION_PAD) { + offset++; + } else { + if (offset + len + 2 > 0xFFFF) { + /* overflow */ + return ERR_BUF; + } + offset = (u16_t)(offset + len + 2); + if (decode_len > 0) { + u32_t value = 0; + u16_t copy_len; +decode_next: + LWIP_ASSERT("check decode_idx", decode_idx >= 0 && decode_idx < DHCP_OPTION_IDX_MAX); + if (!dhcp_option_given(dhcp, decode_idx)) { + copy_len = LWIP_MIN(decode_len, 4); + if (pbuf_copy_partial(q, &value, copy_len, val_offset) != copy_len) { + return ERR_BUF; + } + if (decode_len > 4) { + /* decode more than one u32_t */ + u16_t next_val_offset; + LWIP_ERROR("decode_len %% 4 == 0", decode_len % 4 == 0, return ERR_VAL;); + dhcp_got_option(dhcp, decode_idx); + dhcp_set_option_value(dhcp, decode_idx, lwip_htonl(value)); + decode_len = (u8_t)(decode_len - 4); + next_val_offset = (u16_t)(val_offset + 4); + if (next_val_offset < val_offset) { + /* overflow */ + return ERR_BUF; + } + val_offset = next_val_offset; + decode_idx++; + goto decode_next; + } else if (decode_len == 4) { + value = lwip_ntohl(value); + } else { + LWIP_ERROR("invalid decode_len", decode_len == 1, return ERR_VAL;); + value = ((u8_t *)&value)[0]; + } + dhcp_got_option(dhcp, decode_idx); + dhcp_set_option_value(dhcp, decode_idx, value); + } + } + } + if (offset >= q->len) { + offset = (u16_t)(offset - q->len); + offset_max = (u16_t)(offset_max - q->len); + if (offset < offset_max) { + q = q->next; + LWIP_ERROR("next pbuf was null", q != NULL, return ERR_VAL;); + options = (u8_t *)q->payload; + } else { + /* We've run out of bytes, probably no end marker. Don't proceed. */ + return ERR_BUF; + } + } + } + /* is this an overloaded message? */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_OVERLOAD)) { + u32_t overload = dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_OVERLOAD); + dhcp_clear_option(dhcp, DHCP_OPTION_IDX_OVERLOAD); + if (overload == DHCP_OVERLOAD_FILE) { + parse_file_as_options = 1; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("overloaded file field\n")); + } else if (overload == DHCP_OVERLOAD_SNAME) { + parse_sname_as_options = 1; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("overloaded sname field\n")); + } else if (overload == DHCP_OVERLOAD_SNAME_FILE) { + parse_sname_as_options = 1; + parse_file_as_options = 1; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("overloaded sname and file field\n")); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("invalid overload option: %d\n", (int)overload)); + } + } + if (parse_file_as_options) { + /* if both are overloaded, parse file first and then sname (RFC 2131 ch. 4.1) */ + parse_file_as_options = 0; + options_idx = DHCP_FILE_OFS; + options_idx_max = DHCP_FILE_OFS + DHCP_FILE_LEN; +#if LWIP_DHCP_BOOTP_FILE + file_overloaded = 1; +#endif + goto again; + } else if (parse_sname_as_options) { + parse_sname_as_options = 0; + options_idx = DHCP_SNAME_OFS; + options_idx_max = DHCP_SNAME_OFS + DHCP_SNAME_LEN; + goto again; + } +#if LWIP_DHCP_BOOTP_FILE + if (!file_overloaded) { + /* only do this for ACK messages */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_MSG_TYPE) && + (dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_MSG_TYPE) == DHCP_ACK)) + /* copy bootp file name, don't care for sname (server hostname) */ + if (pbuf_copy_partial(p, dhcp->boot_file_name, DHCP_FILE_LEN-1, DHCP_FILE_OFS) != (DHCP_FILE_LEN-1)) { + return ERR_BUF; + } + /* make sure the string is really NULL-terminated */ + dhcp->boot_file_name[DHCP_FILE_LEN-1] = 0; + } +#endif /* LWIP_DHCP_BOOTP_FILE */ + return ERR_OK; +} + +/** + * If an incoming DHCP message is in response to us, then trigger the state machine + */ +static void +dhcp_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port) +{ + struct netif *netif = ip_current_input_netif(); + struct dhcp *dhcp = netif_dhcp_data(netif); + struct dhcp_msg *reply_msg = (struct dhcp_msg *)p->payload; + u8_t msg_type; + u8_t i; + struct dhcp_msg *msg_in; + + LWIP_UNUSED_ARG(arg); + + /* Caught DHCP message from netif that does not have DHCP enabled? -> not interested */ + if ((dhcp == NULL) || (dhcp->pcb_allocated == 0)) { + goto free_pbuf_and_return; + } + + LWIP_ASSERT("invalid server address type", IP_IS_V4(addr)); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_recv(pbuf = %p) from DHCP server %"U16_F".%"U16_F".%"U16_F".%"U16_F" port %"U16_F"\n", (void *)p, + ip4_addr1_16(ip_2_ip4(addr)), ip4_addr2_16(ip_2_ip4(addr)), ip4_addr3_16(ip_2_ip4(addr)), ip4_addr4_16(ip_2_ip4(addr)), port)); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("pbuf->len = %"U16_F"\n", p->len)); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("pbuf->tot_len = %"U16_F"\n", p->tot_len)); + /* prevent warnings about unused arguments */ + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(addr); + LWIP_UNUSED_ARG(port); + + if (p->len < DHCP_MIN_REPLY_LEN) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("DHCP reply message or pbuf too short\n")); + goto free_pbuf_and_return; + } + + if (reply_msg->op != DHCP_BOOTREPLY) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("not a DHCP reply message, but type %"U16_F"\n", (u16_t)reply_msg->op)); + goto free_pbuf_and_return; + } + /* iterate through hardware address and match against DHCP message */ + for (i = 0; i < netif->hwaddr_len && i < LWIP_MIN(DHCP_CHADDR_LEN, NETIF_MAX_HWADDR_LEN); i++) { + if (netif->hwaddr[i] != reply_msg->chaddr[i]) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, + ("netif->hwaddr[%"U16_F"]==%02"X16_F" != reply_msg->chaddr[%"U16_F"]==%02"X16_F"\n", + (u16_t)i, (u16_t)netif->hwaddr[i], (u16_t)i, (u16_t)reply_msg->chaddr[i])); + goto free_pbuf_and_return; + } + } + /* match transaction ID against what we expected */ + if (lwip_ntohl(reply_msg->xid) != dhcp->xid) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, + ("transaction id mismatch reply_msg->xid(%"X32_F")!=dhcp->xid(%"X32_F")\n", lwip_ntohl(reply_msg->xid), dhcp->xid)); + goto free_pbuf_and_return; + } + /* option fields could be unfold? */ + if (dhcp_parse_reply(p, dhcp) != ERR_OK) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, + ("problem unfolding DHCP message - too short on memory?\n")); + goto free_pbuf_and_return; + } + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("searching DHCP_OPTION_MESSAGE_TYPE\n")); + /* obtain pointer to DHCP message type */ + if (!dhcp_option_given(dhcp, DHCP_OPTION_IDX_MSG_TYPE)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("DHCP_OPTION_MESSAGE_TYPE option not found\n")); + goto free_pbuf_and_return; + } + + msg_in = (struct dhcp_msg *)p->payload; + /* read DHCP message type */ + msg_type = (u8_t)dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_MSG_TYPE); + /* message type is DHCP ACK? */ + if (msg_type == DHCP_ACK) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("DHCP_ACK received\n")); + /* in requesting state? */ + if (dhcp->state == DHCP_STATE_REQUESTING) { + dhcp_handle_ack(netif, msg_in); +#if DHCP_DOES_ARP_CHECK + if ((netif->flags & NETIF_FLAG_ETHARP) != 0) { + /* check if the acknowledged lease address is already in use */ + dhcp_check(netif); + } else { + /* bind interface to the acknowledged lease address */ + dhcp_bind(netif); + } +#else + /* bind interface to the acknowledged lease address */ + dhcp_bind(netif); +#endif + } + /* already bound to the given lease address? */ + else if ((dhcp->state == DHCP_STATE_REBOOTING) || (dhcp->state == DHCP_STATE_REBINDING) || + (dhcp->state == DHCP_STATE_RENEWING)) { + dhcp_handle_ack(netif, msg_in); + dhcp_bind(netif); + } + } + /* received a DHCP_NAK in appropriate state? */ + else if ((msg_type == DHCP_NAK) && + ((dhcp->state == DHCP_STATE_REBOOTING) || (dhcp->state == DHCP_STATE_REQUESTING) || + (dhcp->state == DHCP_STATE_REBINDING) || (dhcp->state == DHCP_STATE_RENEWING ))) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("DHCP_NAK received\n")); + dhcp_handle_nak(netif); + } + /* received a DHCP_OFFER in DHCP_STATE_SELECTING state? */ + else if ((msg_type == DHCP_OFFER) && (dhcp->state == DHCP_STATE_SELECTING)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("DHCP_OFFER received in DHCP_STATE_SELECTING state\n")); + /* remember offered lease */ + dhcp_handle_offer(netif, msg_in); + } + +free_pbuf_and_return: + pbuf_free(p); +} + +/** + * Create a DHCP request, fill in common headers + * + * @param netif the netif under DHCP control + * @param dhcp dhcp control struct + * @param message_type message type of the request + */ +static struct pbuf * +dhcp_create_msg(struct netif *netif, struct dhcp *dhcp, u8_t message_type, u16_t *options_out_len) +{ + u16_t i; + struct pbuf *p_out; + struct dhcp_msg *msg_out; + u16_t options_out_len_loc; + +#ifndef DHCP_GLOBAL_XID + /** default global transaction identifier starting value (easy to match + * with a packet analyser). We simply increment for each new request. + * Predefine DHCP_GLOBAL_XID to a better value or a function call to generate one + * at runtime, any supporting function prototypes can be defined in DHCP_GLOBAL_XID_HEADER */ +#if DHCP_CREATE_RAND_XID && defined(LWIP_RAND) + static u32_t xid; +#else /* DHCP_CREATE_RAND_XID && defined(LWIP_RAND) */ + static u32_t xid = 0xABCD0000; +#endif /* DHCP_CREATE_RAND_XID && defined(LWIP_RAND) */ +#else + if (!xid_initialised) { + xid = DHCP_GLOBAL_XID; + xid_initialised = !xid_initialised; + } +#endif + LWIP_ERROR("dhcp_create_msg: netif != NULL", (netif != NULL), return NULL;); + LWIP_ERROR("dhcp_create_msg: dhcp != NULL", (dhcp != NULL), return NULL;); + p_out = pbuf_alloc(PBUF_TRANSPORT, sizeof(struct dhcp_msg), PBUF_RAM); + if (p_out == NULL) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, + ("dhcp_create_msg(): could not allocate pbuf\n")); + return NULL; + } + LWIP_ASSERT("dhcp_create_msg: check that first pbuf can hold struct dhcp_msg", + (p_out->len >= sizeof(struct dhcp_msg))); + + /* DHCP_REQUEST should reuse 'xid' from DHCPOFFER */ + if ((message_type != DHCP_REQUEST) || (dhcp->state == DHCP_STATE_REBOOTING)) { + /* reuse transaction identifier in retransmissions */ + if (dhcp->tries == 0) { +#if DHCP_CREATE_RAND_XID && defined(LWIP_RAND) + xid = LWIP_RAND(); +#else /* DHCP_CREATE_RAND_XID && defined(LWIP_RAND) */ + xid++; +#endif /* DHCP_CREATE_RAND_XID && defined(LWIP_RAND) */ + } + dhcp->xid = xid; + } + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, + ("transaction id xid(%"X32_F")\n", xid)); + + msg_out = (struct dhcp_msg *)p_out->payload; + memset(msg_out, 0, sizeof(struct dhcp_msg)); + + msg_out->op = DHCP_BOOTREQUEST; + /* @todo: make link layer independent */ + msg_out->htype = LWIP_IANA_HWTYPE_ETHERNET; + msg_out->hlen = netif->hwaddr_len; + msg_out->xid = lwip_htonl(dhcp->xid); + /* we don't need the broadcast flag since we can receive unicast traffic + before being fully configured! */ + /* set ciaddr to netif->ip_addr based on message_type and state */ + if ((message_type == DHCP_INFORM) || (message_type == DHCP_DECLINE) || (message_type == DHCP_RELEASE) || + ((message_type == DHCP_REQUEST) && /* DHCP_STATE_BOUND not used for sending! */ + ((dhcp->state == DHCP_STATE_RENEWING) || dhcp->state == DHCP_STATE_REBINDING))) { + ip4_addr_copy(msg_out->ciaddr, *netif_ip4_addr(netif)); + } + for (i = 0; i < LWIP_MIN(DHCP_CHADDR_LEN, NETIF_MAX_HWADDR_LEN); i++) { + /* copy netif hardware address (padded with zeroes through memset already) */ + msg_out->chaddr[i] = netif->hwaddr[i]; + } + msg_out->cookie = PP_HTONL(DHCP_MAGIC_COOKIE); + /* Add option MESSAGE_TYPE */ + options_out_len_loc = dhcp_option(0, msg_out->options, DHCP_OPTION_MESSAGE_TYPE, DHCP_OPTION_MESSAGE_TYPE_LEN); + options_out_len_loc = dhcp_option_byte(options_out_len_loc, msg_out->options, message_type); + if (options_out_len) { + *options_out_len = options_out_len_loc; + } + return p_out; +} + +/** + * Add a DHCP message trailer + * + * Adds the END option to the DHCP message, and if + * necessary, up to three padding bytes. + */ +static void +dhcp_option_trailer(u16_t options_out_len, u8_t *options, struct pbuf *p_out) +{ + options[options_out_len++] = DHCP_OPTION_END; + /* packet is too small, or not 4 byte aligned? */ + while (((options_out_len < DHCP_MIN_OPTIONS_LEN) || (options_out_len & 3)) && + (options_out_len < DHCP_OPTIONS_LEN)) { + /* add a fill/padding byte */ + options[options_out_len++] = 0; + } + /* shrink the pbuf to the actual content length */ + pbuf_realloc(p_out, (u16_t)(sizeof(struct dhcp_msg) - DHCP_OPTIONS_LEN + options_out_len)); +} + +/** check if DHCP supplied netif->ip_addr + * + * @param netif the netif to check + * @return 1 if DHCP supplied netif->ip_addr (states BOUND or RENEWING), + * 0 otherwise + */ +u8_t +dhcp_supplied_address(const struct netif *netif) +{ + if ((netif != NULL) && (netif_dhcp_data(netif) != NULL)) { + struct dhcp *dhcp = netif_dhcp_data(netif); + return (dhcp->state == DHCP_STATE_BOUND) || (dhcp->state == DHCP_STATE_RENEWING) || + (dhcp->state == DHCP_STATE_REBINDING); + } + return 0; +} + +#endif /* LWIP_IPV4 && LWIP_DHCP */ diff --git a/Middlewares/Third_Party/LwIP/src/core/ipv4/etharp.c b/Middlewares/Third_Party/LwIP/src/core/ipv4/etharp.c new file mode 100644 index 0000000..442aac0 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/ipv4/etharp.c @@ -0,0 +1,1204 @@ +/** + * @file + * Address Resolution Protocol module for IP over Ethernet + * + * Functionally, ARP is divided into two parts. The first maps an IP address + * to a physical address when sending a packet, and the second part answers + * requests from other machines for our physical address. + * + * This implementation complies with RFC 826 (Ethernet ARP). It supports + * Gratuitious ARP from RFC3220 (IP Mobility Support for IPv4) section 4.6 + * if an interface calls etharp_gratuitous(our_netif) upon address change. + */ + +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * Copyright (c) 2003-2004 Leon Woestenberg + * Copyright (c) 2003-2004 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 && LWIP_ARP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/etharp.h" +#include "lwip/stats.h" +#include "lwip/snmp.h" +#include "lwip/dhcp.h" +#include "lwip/autoip.h" +#include "lwip/prot/iana.h" +#include "netif/ethernet.h" + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +/** Re-request a used ARP entry 1 minute before it would expire to prevent + * breaking a steadily used connection because the ARP entry timed out. */ +#define ARP_AGE_REREQUEST_USED_UNICAST (ARP_MAXAGE - 30) +#define ARP_AGE_REREQUEST_USED_BROADCAST (ARP_MAXAGE - 15) + +/** the time an ARP entry stays pending after first request, + * for ARP_TMR_INTERVAL = 1000, this is + * 10 seconds. + * + * @internal Keep this number at least 2, otherwise it might + * run out instantly if the timeout occurs directly after a request. + */ +#define ARP_MAXPENDING 5 + +/** ARP states */ +enum etharp_state { + ETHARP_STATE_EMPTY = 0, + ETHARP_STATE_PENDING, + ETHARP_STATE_STABLE, + ETHARP_STATE_STABLE_REREQUESTING_1, + ETHARP_STATE_STABLE_REREQUESTING_2 +#if ETHARP_SUPPORT_STATIC_ENTRIES + , ETHARP_STATE_STATIC +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ +}; + +struct etharp_entry { +#if ARP_QUEUEING + /** Pointer to queue of pending outgoing packets on this ARP entry. */ + struct etharp_q_entry *q; +#else /* ARP_QUEUEING */ + /** Pointer to a single pending outgoing packet on this ARP entry. */ + struct pbuf *q; +#endif /* ARP_QUEUEING */ + ip4_addr_t ipaddr; + struct netif *netif; + struct eth_addr ethaddr; + u16_t ctime; + u8_t state; +}; + +static struct etharp_entry arp_table[ARP_TABLE_SIZE]; + +#if !LWIP_NETIF_HWADDRHINT +static netif_addr_idx_t etharp_cached_entry; +#endif /* !LWIP_NETIF_HWADDRHINT */ + +/** Try hard to create a new entry - we want the IP address to appear in + the cache (even if this means removing an active entry or so). */ +#define ETHARP_FLAG_TRY_HARD 1 +#define ETHARP_FLAG_FIND_ONLY 2 +#if ETHARP_SUPPORT_STATIC_ENTRIES +#define ETHARP_FLAG_STATIC_ENTRY 4 +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ + +#if LWIP_NETIF_HWADDRHINT +#define ETHARP_SET_ADDRHINT(netif, addrhint) do { if (((netif) != NULL) && ((netif)->hints != NULL)) { \ + (netif)->hints->addr_hint = (addrhint); }} while(0) +#else /* LWIP_NETIF_HWADDRHINT */ +#define ETHARP_SET_ADDRHINT(netif, addrhint) (etharp_cached_entry = (addrhint)) +#endif /* LWIP_NETIF_HWADDRHINT */ + + +/* Check for maximum ARP_TABLE_SIZE */ +#if (ARP_TABLE_SIZE > NETIF_ADDR_IDX_MAX) +#error "ARP_TABLE_SIZE must fit in an s16_t, you have to reduce it in your lwipopts.h" +#endif + + +static err_t etharp_request_dst(struct netif *netif, const ip4_addr_t *ipaddr, const struct eth_addr *hw_dst_addr); +static err_t etharp_raw(struct netif *netif, + const struct eth_addr *ethsrc_addr, const struct eth_addr *ethdst_addr, + const struct eth_addr *hwsrc_addr, const ip4_addr_t *ipsrc_addr, + const struct eth_addr *hwdst_addr, const ip4_addr_t *ipdst_addr, + const u16_t opcode); + +#if ARP_QUEUEING +/** + * Free a complete queue of etharp entries + * + * @param q a qeueue of etharp_q_entry's to free + */ +static void +free_etharp_q(struct etharp_q_entry *q) +{ + struct etharp_q_entry *r; + LWIP_ASSERT("q != NULL", q != NULL); + while (q) { + r = q; + q = q->next; + LWIP_ASSERT("r->p != NULL", (r->p != NULL)); + pbuf_free(r->p); + memp_free(MEMP_ARP_QUEUE, r); + } +} +#else /* ARP_QUEUEING */ + +/** Compatibility define: free the queued pbuf */ +#define free_etharp_q(q) pbuf_free(q) + +#endif /* ARP_QUEUEING */ + +/** Clean up ARP table entries */ +static void +etharp_free_entry(int i) +{ + /* remove from SNMP ARP index tree */ + mib2_remove_arp_entry(arp_table[i].netif, &arp_table[i].ipaddr); + /* and empty packet queue */ + if (arp_table[i].q != NULL) { + /* remove all queued packets */ + LWIP_DEBUGF(ETHARP_DEBUG, ("etharp_free_entry: freeing entry %"U16_F", packet queue %p.\n", (u16_t)i, (void *)(arp_table[i].q))); + free_etharp_q(arp_table[i].q); + arp_table[i].q = NULL; + } + /* recycle entry for re-use */ + arp_table[i].state = ETHARP_STATE_EMPTY; +#ifdef LWIP_DEBUG + /* for debugging, clean out the complete entry */ + arp_table[i].ctime = 0; + arp_table[i].netif = NULL; + ip4_addr_set_zero(&arp_table[i].ipaddr); + arp_table[i].ethaddr = ethzero; +#endif /* LWIP_DEBUG */ +} + +/** + * Clears expired entries in the ARP table. + * + * This function should be called every ARP_TMR_INTERVAL milliseconds (1 second), + * in order to expire entries in the ARP table. + */ +void +etharp_tmr(void) +{ + int i; + + LWIP_DEBUGF(ETHARP_DEBUG, ("etharp_timer\n")); + /* remove expired entries from the ARP table */ + for (i = 0; i < ARP_TABLE_SIZE; ++i) { + u8_t state = arp_table[i].state; + if (state != ETHARP_STATE_EMPTY +#if ETHARP_SUPPORT_STATIC_ENTRIES + && (state != ETHARP_STATE_STATIC) +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ + ) { + arp_table[i].ctime++; + if ((arp_table[i].ctime >= ARP_MAXAGE) || + ((arp_table[i].state == ETHARP_STATE_PENDING) && + (arp_table[i].ctime >= ARP_MAXPENDING))) { + /* pending or stable entry has become old! */ + LWIP_DEBUGF(ETHARP_DEBUG, ("etharp_timer: expired %s entry %d.\n", + arp_table[i].state >= ETHARP_STATE_STABLE ? "stable" : "pending", i)); + /* clean up entries that have just been expired */ + etharp_free_entry(i); + } else if (arp_table[i].state == ETHARP_STATE_STABLE_REREQUESTING_1) { + /* Don't send more than one request every 2 seconds. */ + arp_table[i].state = ETHARP_STATE_STABLE_REREQUESTING_2; + } else if (arp_table[i].state == ETHARP_STATE_STABLE_REREQUESTING_2) { + /* Reset state to stable, so that the next transmitted packet will + re-send an ARP request. */ + arp_table[i].state = ETHARP_STATE_STABLE; + } else if (arp_table[i].state == ETHARP_STATE_PENDING) { + /* still pending, resend an ARP query */ + etharp_request(arp_table[i].netif, &arp_table[i].ipaddr); + } + } + } +} + +/** + * Search the ARP table for a matching or new entry. + * + * If an IP address is given, return a pending or stable ARP entry that matches + * the address. If no match is found, create a new entry with this address set, + * but in state ETHARP_EMPTY. The caller must check and possibly change the + * state of the returned entry. + * + * If ipaddr is NULL, return a initialized new entry in state ETHARP_EMPTY. + * + * In all cases, attempt to create new entries from an empty entry. If no + * empty entries are available and ETHARP_FLAG_TRY_HARD flag is set, recycle + * old entries. Heuristic choose the least important entry for recycling. + * + * @param ipaddr IP address to find in ARP cache, or to add if not found. + * @param flags See @ref etharp_state + * @param netif netif related to this address (used for NETIF_HWADDRHINT) + * + * @return The ARP entry index that matched or is created, ERR_MEM if no + * entry is found or could be recycled. + */ +static s16_t +etharp_find_entry(const ip4_addr_t *ipaddr, u8_t flags, struct netif *netif) +{ + s16_t old_pending = ARP_TABLE_SIZE, old_stable = ARP_TABLE_SIZE; + s16_t empty = ARP_TABLE_SIZE; + s16_t i = 0; + /* oldest entry with packets on queue */ + s16_t old_queue = ARP_TABLE_SIZE; + /* its age */ + u16_t age_queue = 0, age_pending = 0, age_stable = 0; + + LWIP_UNUSED_ARG(netif); + + /** + * a) do a search through the cache, remember candidates + * b) select candidate entry + * c) create new entry + */ + + /* a) in a single search sweep, do all of this + * 1) remember the first empty entry (if any) + * 2) remember the oldest stable entry (if any) + * 3) remember the oldest pending entry without queued packets (if any) + * 4) remember the oldest pending entry with queued packets (if any) + * 5) search for a matching IP entry, either pending or stable + * until 5 matches, or all entries are searched for. + */ + + for (i = 0; i < ARP_TABLE_SIZE; ++i) { + u8_t state = arp_table[i].state; + /* no empty entry found yet and now we do find one? */ + if ((empty == ARP_TABLE_SIZE) && (state == ETHARP_STATE_EMPTY)) { + LWIP_DEBUGF(ETHARP_DEBUG, ("etharp_find_entry: found empty entry %d\n", (int)i)); + /* remember first empty entry */ + empty = i; + } else if (state != ETHARP_STATE_EMPTY) { + LWIP_ASSERT("state == ETHARP_STATE_PENDING || state >= ETHARP_STATE_STABLE", + state == ETHARP_STATE_PENDING || state >= ETHARP_STATE_STABLE); + /* if given, does IP address match IP address in ARP entry? */ + if (ipaddr && ip4_addr_cmp(ipaddr, &arp_table[i].ipaddr) +#if ETHARP_TABLE_MATCH_NETIF + && ((netif == NULL) || (netif == arp_table[i].netif)) +#endif /* ETHARP_TABLE_MATCH_NETIF */ + ) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: found matching entry %d\n", (int)i)); + /* found exact IP address match, simply bail out */ + return i; + } + /* pending entry? */ + if (state == ETHARP_STATE_PENDING) { + /* pending with queued packets? */ + if (arp_table[i].q != NULL) { + if (arp_table[i].ctime >= age_queue) { + old_queue = i; + age_queue = arp_table[i].ctime; + } + } else + /* pending without queued packets? */ + { + if (arp_table[i].ctime >= age_pending) { + old_pending = i; + age_pending = arp_table[i].ctime; + } + } + /* stable entry? */ + } else if (state >= ETHARP_STATE_STABLE) { +#if ETHARP_SUPPORT_STATIC_ENTRIES + /* don't record old_stable for static entries since they never expire */ + if (state < ETHARP_STATE_STATIC) +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ + { + /* remember entry with oldest stable entry in oldest, its age in maxtime */ + if (arp_table[i].ctime >= age_stable) { + old_stable = i; + age_stable = arp_table[i].ctime; + } + } + } + } + } + /* { we have no match } => try to create a new entry */ + + /* don't create new entry, only search? */ + if (((flags & ETHARP_FLAG_FIND_ONLY) != 0) || + /* or no empty entry found and not allowed to recycle? */ + ((empty == ARP_TABLE_SIZE) && ((flags & ETHARP_FLAG_TRY_HARD) == 0))) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: no empty entry found and not allowed to recycle\n")); + return (s16_t)ERR_MEM; + } + + /* b) choose the least destructive entry to recycle: + * 1) empty entry + * 2) oldest stable entry + * 3) oldest pending entry without queued packets + * 4) oldest pending entry with queued packets + * + * { ETHARP_FLAG_TRY_HARD is set at this point } + */ + + /* 1) empty entry available? */ + if (empty < ARP_TABLE_SIZE) { + i = empty; + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: selecting empty entry %d\n", (int)i)); + } else { + /* 2) found recyclable stable entry? */ + if (old_stable < ARP_TABLE_SIZE) { + /* recycle oldest stable*/ + i = old_stable; + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: selecting oldest stable entry %d\n", (int)i)); + /* no queued packets should exist on stable entries */ + LWIP_ASSERT("arp_table[i].q == NULL", arp_table[i].q == NULL); + /* 3) found recyclable pending entry without queued packets? */ + } else if (old_pending < ARP_TABLE_SIZE) { + /* recycle oldest pending */ + i = old_pending; + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: selecting oldest pending entry %d (without queue)\n", (int)i)); + /* 4) found recyclable pending entry with queued packets? */ + } else if (old_queue < ARP_TABLE_SIZE) { + /* recycle oldest pending (queued packets are free in etharp_free_entry) */ + i = old_queue; + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: selecting oldest pending entry %d, freeing packet queue %p\n", (int)i, (void *)(arp_table[i].q))); + /* no empty or recyclable entries found */ + } else { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: no empty or recyclable entries found\n")); + return (s16_t)ERR_MEM; + } + + /* { empty or recyclable entry found } */ + LWIP_ASSERT("i < ARP_TABLE_SIZE", i < ARP_TABLE_SIZE); + etharp_free_entry(i); + } + + LWIP_ASSERT("i < ARP_TABLE_SIZE", i < ARP_TABLE_SIZE); + LWIP_ASSERT("arp_table[i].state == ETHARP_STATE_EMPTY", + arp_table[i].state == ETHARP_STATE_EMPTY); + + /* IP address given? */ + if (ipaddr != NULL) { + /* set IP address */ + ip4_addr_copy(arp_table[i].ipaddr, *ipaddr); + } + arp_table[i].ctime = 0; +#if ETHARP_TABLE_MATCH_NETIF + arp_table[i].netif = netif; +#endif /* ETHARP_TABLE_MATCH_NETIF */ + return (s16_t)i; +} + +/** + * Update (or insert) a IP/MAC address pair in the ARP cache. + * + * If a pending entry is resolved, any queued packets will be sent + * at this point. + * + * @param netif netif related to this entry (used for NETIF_ADDRHINT) + * @param ipaddr IP address of the inserted ARP entry. + * @param ethaddr Ethernet address of the inserted ARP entry. + * @param flags See @ref etharp_state + * + * @return + * - ERR_OK Successfully updated ARP cache. + * - ERR_MEM If we could not add a new ARP entry when ETHARP_FLAG_TRY_HARD was set. + * - ERR_ARG Non-unicast address given, those will not appear in ARP cache. + * + * @see pbuf_free() + */ +static err_t +etharp_update_arp_entry(struct netif *netif, const ip4_addr_t *ipaddr, struct eth_addr *ethaddr, u8_t flags) +{ + s16_t i; + LWIP_ASSERT("netif->hwaddr_len == ETH_HWADDR_LEN", netif->hwaddr_len == ETH_HWADDR_LEN); + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_update_arp_entry: %"U16_F".%"U16_F".%"U16_F".%"U16_F" - %02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F"\n", + ip4_addr1_16(ipaddr), ip4_addr2_16(ipaddr), ip4_addr3_16(ipaddr), ip4_addr4_16(ipaddr), + (u16_t)ethaddr->addr[0], (u16_t)ethaddr->addr[1], (u16_t)ethaddr->addr[2], + (u16_t)ethaddr->addr[3], (u16_t)ethaddr->addr[4], (u16_t)ethaddr->addr[5])); + /* non-unicast address? */ + if (ip4_addr_isany(ipaddr) || + ip4_addr_isbroadcast(ipaddr, netif) || + ip4_addr_ismulticast(ipaddr)) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_update_arp_entry: will not add non-unicast IP address to ARP cache\n")); + return ERR_ARG; + } + /* find or create ARP entry */ + i = etharp_find_entry(ipaddr, flags, netif); + /* bail out if no entry could be found */ + if (i < 0) { + return (err_t)i; + } + +#if ETHARP_SUPPORT_STATIC_ENTRIES + if (flags & ETHARP_FLAG_STATIC_ENTRY) { + /* record static type */ + arp_table[i].state = ETHARP_STATE_STATIC; + } else if (arp_table[i].state == ETHARP_STATE_STATIC) { + /* found entry is a static type, don't overwrite it */ + return ERR_VAL; + } else +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ + { + /* mark it stable */ + arp_table[i].state = ETHARP_STATE_STABLE; + } + + /* record network interface */ + arp_table[i].netif = netif; + /* insert in SNMP ARP index tree */ + mib2_add_arp_entry(netif, &arp_table[i].ipaddr); + + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_update_arp_entry: updating stable entry %"S16_F"\n", i)); + /* update address */ + SMEMCPY(&arp_table[i].ethaddr, ethaddr, ETH_HWADDR_LEN); + /* reset time stamp */ + arp_table[i].ctime = 0; + /* this is where we will send out queued packets! */ +#if ARP_QUEUEING + while (arp_table[i].q != NULL) { + struct pbuf *p; + /* remember remainder of queue */ + struct etharp_q_entry *q = arp_table[i].q; + /* pop first item off the queue */ + arp_table[i].q = q->next; + /* get the packet pointer */ + p = q->p; + /* now queue entry can be freed */ + memp_free(MEMP_ARP_QUEUE, q); +#else /* ARP_QUEUEING */ + if (arp_table[i].q != NULL) { + struct pbuf *p = arp_table[i].q; + arp_table[i].q = NULL; +#endif /* ARP_QUEUEING */ + /* send the queued IP packet */ + ethernet_output(netif, p, (struct eth_addr *)(netif->hwaddr), ethaddr, ETHTYPE_IP); + /* free the queued IP packet */ + pbuf_free(p); + } + return ERR_OK; +} + +#if ETHARP_SUPPORT_STATIC_ENTRIES +/** Add a new static entry to the ARP table. If an entry exists for the + * specified IP address, this entry is overwritten. + * If packets are queued for the specified IP address, they are sent out. + * + * @param ipaddr IP address for the new static entry + * @param ethaddr ethernet address for the new static entry + * @return See return values of etharp_add_static_entry + */ +err_t +etharp_add_static_entry(const ip4_addr_t *ipaddr, struct eth_addr *ethaddr) +{ + struct netif *netif; + LWIP_ASSERT_CORE_LOCKED(); + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_add_static_entry: %"U16_F".%"U16_F".%"U16_F".%"U16_F" - %02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F"\n", + ip4_addr1_16(ipaddr), ip4_addr2_16(ipaddr), ip4_addr3_16(ipaddr), ip4_addr4_16(ipaddr), + (u16_t)ethaddr->addr[0], (u16_t)ethaddr->addr[1], (u16_t)ethaddr->addr[2], + (u16_t)ethaddr->addr[3], (u16_t)ethaddr->addr[4], (u16_t)ethaddr->addr[5])); + + netif = ip4_route(ipaddr); + if (netif == NULL) { + return ERR_RTE; + } + + return etharp_update_arp_entry(netif, ipaddr, ethaddr, ETHARP_FLAG_TRY_HARD | ETHARP_FLAG_STATIC_ENTRY); +} + +/** Remove a static entry from the ARP table previously added with a call to + * etharp_add_static_entry. + * + * @param ipaddr IP address of the static entry to remove + * @return ERR_OK: entry removed + * ERR_MEM: entry wasn't found + * ERR_ARG: entry wasn't a static entry but a dynamic one + */ +err_t +etharp_remove_static_entry(const ip4_addr_t *ipaddr) +{ + s16_t i; + LWIP_ASSERT_CORE_LOCKED(); + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_remove_static_entry: %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(ipaddr), ip4_addr2_16(ipaddr), ip4_addr3_16(ipaddr), ip4_addr4_16(ipaddr))); + + /* find or create ARP entry */ + i = etharp_find_entry(ipaddr, ETHARP_FLAG_FIND_ONLY, NULL); + /* bail out if no entry could be found */ + if (i < 0) { + return (err_t)i; + } + + if (arp_table[i].state != ETHARP_STATE_STATIC) { + /* entry wasn't a static entry, cannot remove it */ + return ERR_ARG; + } + /* entry found, free it */ + etharp_free_entry(i); + return ERR_OK; +} +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ + +/** + * Remove all ARP table entries of the specified netif. + * + * @param netif points to a network interface + */ +void +etharp_cleanup_netif(struct netif *netif) +{ + int i; + + for (i = 0; i < ARP_TABLE_SIZE; ++i) { + u8_t state = arp_table[i].state; + if ((state != ETHARP_STATE_EMPTY) && (arp_table[i].netif == netif)) { + etharp_free_entry(i); + } + } +} + +/** + * Finds (stable) ethernet/IP address pair from ARP table + * using interface and IP address index. + * @note the addresses in the ARP table are in network order! + * + * @param netif points to interface index + * @param ipaddr points to the (network order) IP address index + * @param eth_ret points to return pointer + * @param ip_ret points to return pointer + * @return table index if found, -1 otherwise + */ +ssize_t +etharp_find_addr(struct netif *netif, const ip4_addr_t *ipaddr, + struct eth_addr **eth_ret, const ip4_addr_t **ip_ret) +{ + s16_t i; + + LWIP_ASSERT("eth_ret != NULL && ip_ret != NULL", + eth_ret != NULL && ip_ret != NULL); + + LWIP_UNUSED_ARG(netif); + + i = etharp_find_entry(ipaddr, ETHARP_FLAG_FIND_ONLY, netif); + if ((i >= 0) && (arp_table[i].state >= ETHARP_STATE_STABLE)) { + *eth_ret = &arp_table[i].ethaddr; + *ip_ret = &arp_table[i].ipaddr; + return i; + } + return -1; +} + +/** + * Possibility to iterate over stable ARP table entries + * + * @param i entry number, 0 to ARP_TABLE_SIZE + * @param ipaddr return value: IP address + * @param netif return value: points to interface + * @param eth_ret return value: ETH address + * @return 1 on valid index, 0 otherwise + */ +int +etharp_get_entry(size_t i, ip4_addr_t **ipaddr, struct netif **netif, struct eth_addr **eth_ret) +{ + LWIP_ASSERT("ipaddr != NULL", ipaddr != NULL); + LWIP_ASSERT("netif != NULL", netif != NULL); + LWIP_ASSERT("eth_ret != NULL", eth_ret != NULL); + + if ((i < ARP_TABLE_SIZE) && (arp_table[i].state >= ETHARP_STATE_STABLE)) { + *ipaddr = &arp_table[i].ipaddr; + *netif = arp_table[i].netif; + *eth_ret = &arp_table[i].ethaddr; + return 1; + } else { + return 0; + } +} + +/** + * Responds to ARP requests to us. Upon ARP replies to us, add entry to cache + * send out queued IP packets. Updates cache with snooped address pairs. + * + * Should be called for incoming ARP packets. The pbuf in the argument + * is freed by this function. + * + * @param p The ARP packet that arrived on netif. Is freed by this function. + * @param netif The lwIP network interface on which the ARP packet pbuf arrived. + * + * @see pbuf_free() + */ +void +etharp_input(struct pbuf *p, struct netif *netif) +{ + struct etharp_hdr *hdr; + /* these are aligned properly, whereas the ARP header fields might not be */ + ip4_addr_t sipaddr, dipaddr; + u8_t for_us; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("netif != NULL", (netif != NULL), return;); + + hdr = (struct etharp_hdr *)p->payload; + + /* RFC 826 "Packet Reception": */ + if ((hdr->hwtype != PP_HTONS(LWIP_IANA_HWTYPE_ETHERNET)) || + (hdr->hwlen != ETH_HWADDR_LEN) || + (hdr->protolen != sizeof(ip4_addr_t)) || + (hdr->proto != PP_HTONS(ETHTYPE_IP))) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, + ("etharp_input: packet dropped, wrong hw type, hwlen, proto, protolen or ethernet type (%"U16_F"/%"U16_F"/%"U16_F"/%"U16_F")\n", + hdr->hwtype, (u16_t)hdr->hwlen, hdr->proto, (u16_t)hdr->protolen)); + ETHARP_STATS_INC(etharp.proterr); + ETHARP_STATS_INC(etharp.drop); + pbuf_free(p); + return; + } + ETHARP_STATS_INC(etharp.recv); + +#if LWIP_AUTOIP + /* We have to check if a host already has configured our random + * created link local address and continuously check if there is + * a host with this IP-address so we can detect collisions */ + autoip_arp_reply(netif, hdr); +#endif /* LWIP_AUTOIP */ + + /* Copy struct ip4_addr_wordaligned to aligned ip4_addr, to support compilers without + * structure packing (not using structure copy which breaks strict-aliasing rules). */ + IPADDR_WORDALIGNED_COPY_TO_IP4_ADDR_T(&sipaddr, &hdr->sipaddr); + IPADDR_WORDALIGNED_COPY_TO_IP4_ADDR_T(&dipaddr, &hdr->dipaddr); + + /* this interface is not configured? */ + if (ip4_addr_isany_val(*netif_ip4_addr(netif))) { + for_us = 0; + } else { + /* ARP packet directed to us? */ + for_us = (u8_t)ip4_addr_cmp(&dipaddr, netif_ip4_addr(netif)); + } + + /* ARP message directed to us? + -> add IP address in ARP cache; assume requester wants to talk to us, + can result in directly sending the queued packets for this host. + ARP message not directed to us? + -> update the source IP address in the cache, if present */ + etharp_update_arp_entry(netif, &sipaddr, &(hdr->shwaddr), + for_us ? ETHARP_FLAG_TRY_HARD : ETHARP_FLAG_FIND_ONLY); + + /* now act on the message itself */ + switch (hdr->opcode) { + /* ARP request? */ + case PP_HTONS(ARP_REQUEST): + /* ARP request. If it asked for our address, we send out a + * reply. In any case, we time-stamp any existing ARP entry, + * and possibly send out an IP packet that was queued on it. */ + + LWIP_DEBUGF (ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_input: incoming ARP request\n")); + /* ARP request for our address? */ + if (for_us) { + /* send ARP response */ + etharp_raw(netif, + (struct eth_addr *)netif->hwaddr, &hdr->shwaddr, + (struct eth_addr *)netif->hwaddr, netif_ip4_addr(netif), + &hdr->shwaddr, &sipaddr, + ARP_REPLY); + /* we are not configured? */ + } else if (ip4_addr_isany_val(*netif_ip4_addr(netif))) { + /* { for_us == 0 and netif->ip_addr.addr == 0 } */ + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_input: we are unconfigured, ARP request ignored.\n")); + /* request was not directed to us */ + } else { + /* { for_us == 0 and netif->ip_addr.addr != 0 } */ + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_input: ARP request was not for us.\n")); + } + break; + case PP_HTONS(ARP_REPLY): + /* ARP reply. We already updated the ARP cache earlier. */ + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_input: incoming ARP reply\n")); +#if (LWIP_DHCP && DHCP_DOES_ARP_CHECK) + /* DHCP wants to know about ARP replies from any host with an + * IP address also offered to us by the DHCP server. We do not + * want to take a duplicate IP address on a single network. + * @todo How should we handle redundant (fail-over) interfaces? */ + dhcp_arp_reply(netif, &sipaddr); +#endif /* (LWIP_DHCP && DHCP_DOES_ARP_CHECK) */ + break; + default: + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_input: ARP unknown opcode type %"S16_F"\n", lwip_htons(hdr->opcode))); + ETHARP_STATS_INC(etharp.err); + break; + } + /* free ARP packet */ + pbuf_free(p); +} + +/** Just a small helper function that sends a pbuf to an ethernet address + * in the arp_table specified by the index 'arp_idx'. + */ +static err_t +etharp_output_to_arp_index(struct netif *netif, struct pbuf *q, netif_addr_idx_t arp_idx) +{ + LWIP_ASSERT("arp_table[arp_idx].state >= ETHARP_STATE_STABLE", + arp_table[arp_idx].state >= ETHARP_STATE_STABLE); + /* if arp table entry is about to expire: re-request it, + but only if its state is ETHARP_STATE_STABLE to prevent flooding the + network with ARP requests if this address is used frequently. */ + if (arp_table[arp_idx].state == ETHARP_STATE_STABLE) { + if (arp_table[arp_idx].ctime >= ARP_AGE_REREQUEST_USED_BROADCAST) { + /* issue a standard request using broadcast */ + if (etharp_request(netif, &arp_table[arp_idx].ipaddr) == ERR_OK) { + arp_table[arp_idx].state = ETHARP_STATE_STABLE_REREQUESTING_1; + } + } else if (arp_table[arp_idx].ctime >= ARP_AGE_REREQUEST_USED_UNICAST) { + /* issue a unicast request (for 15 seconds) to prevent unnecessary broadcast */ + if (etharp_request_dst(netif, &arp_table[arp_idx].ipaddr, &arp_table[arp_idx].ethaddr) == ERR_OK) { + arp_table[arp_idx].state = ETHARP_STATE_STABLE_REREQUESTING_1; + } + } + } + + return ethernet_output(netif, q, (struct eth_addr *)(netif->hwaddr), &arp_table[arp_idx].ethaddr, ETHTYPE_IP); +} + +/** + * Resolve and fill-in Ethernet address header for outgoing IP packet. + * + * For IP multicast and broadcast, corresponding Ethernet addresses + * are selected and the packet is transmitted on the link. + * + * For unicast addresses, the packet is submitted to etharp_query(). In + * case the IP address is outside the local network, the IP address of + * the gateway is used. + * + * @param netif The lwIP network interface which the IP packet will be sent on. + * @param q The pbuf(s) containing the IP packet to be sent. + * @param ipaddr The IP address of the packet destination. + * + * @return + * - ERR_RTE No route to destination (no gateway to external networks), + * or the return type of either etharp_query() or ethernet_output(). + */ +err_t +etharp_output(struct netif *netif, struct pbuf *q, const ip4_addr_t *ipaddr) +{ + const struct eth_addr *dest; + struct eth_addr mcastaddr; + const ip4_addr_t *dst_addr = ipaddr; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("netif != NULL", netif != NULL); + LWIP_ASSERT("q != NULL", q != NULL); + LWIP_ASSERT("ipaddr != NULL", ipaddr != NULL); + + /* Determine on destination hardware address. Broadcasts and multicasts + * are special, other IP addresses are looked up in the ARP table. */ + + /* broadcast destination IP address? */ + if (ip4_addr_isbroadcast(ipaddr, netif)) { + /* broadcast on Ethernet also */ + dest = (const struct eth_addr *)ðbroadcast; + /* multicast destination IP address? */ + } else if (ip4_addr_ismulticast(ipaddr)) { + /* Hash IP multicast address to MAC address.*/ + mcastaddr.addr[0] = LL_IP4_MULTICAST_ADDR_0; + mcastaddr.addr[1] = LL_IP4_MULTICAST_ADDR_1; + mcastaddr.addr[2] = LL_IP4_MULTICAST_ADDR_2; + mcastaddr.addr[3] = ip4_addr2(ipaddr) & 0x7f; + mcastaddr.addr[4] = ip4_addr3(ipaddr); + mcastaddr.addr[5] = ip4_addr4(ipaddr); + /* destination Ethernet address is multicast */ + dest = &mcastaddr; + /* unicast destination IP address? */ + } else { + netif_addr_idx_t i; + /* outside local network? if so, this can neither be a global broadcast nor + a subnet broadcast. */ + if (!ip4_addr_netcmp(ipaddr, netif_ip4_addr(netif), netif_ip4_netmask(netif)) && + !ip4_addr_islinklocal(ipaddr)) { +#if LWIP_AUTOIP + struct ip_hdr *iphdr = LWIP_ALIGNMENT_CAST(struct ip_hdr *, q->payload); + /* According to RFC 3297, chapter 2.6.2 (Forwarding Rules), a packet with + a link-local source address must always be "directly to its destination + on the same physical link. The host MUST NOT send the packet to any + router for forwarding". */ + if (!ip4_addr_islinklocal(&iphdr->src)) +#endif /* LWIP_AUTOIP */ + { +#ifdef LWIP_HOOK_ETHARP_GET_GW + /* For advanced routing, a single default gateway might not be enough, so get + the IP address of the gateway to handle the current destination address. */ + dst_addr = LWIP_HOOK_ETHARP_GET_GW(netif, ipaddr); + if (dst_addr == NULL) +#endif /* LWIP_HOOK_ETHARP_GET_GW */ + { + /* interface has default gateway? */ + if (!ip4_addr_isany_val(*netif_ip4_gw(netif))) { + /* send to hardware address of default gateway IP address */ + dst_addr = netif_ip4_gw(netif); + /* no default gateway available */ + } else { + /* no route to destination error (default gateway missing) */ + return ERR_RTE; + } + } + } + } +#if LWIP_NETIF_HWADDRHINT + if (netif->hints != NULL) { + /* per-pcb cached entry was given */ + netif_addr_idx_t etharp_cached_entry = netif->hints->addr_hint; + if (etharp_cached_entry < ARP_TABLE_SIZE) { +#endif /* LWIP_NETIF_HWADDRHINT */ + if ((arp_table[etharp_cached_entry].state >= ETHARP_STATE_STABLE) && +#if ETHARP_TABLE_MATCH_NETIF + (arp_table[etharp_cached_entry].netif == netif) && +#endif + (ip4_addr_cmp(dst_addr, &arp_table[etharp_cached_entry].ipaddr))) { + /* the per-pcb-cached entry is stable and the right one! */ + ETHARP_STATS_INC(etharp.cachehit); + return etharp_output_to_arp_index(netif, q, etharp_cached_entry); + } +#if LWIP_NETIF_HWADDRHINT + } + } +#endif /* LWIP_NETIF_HWADDRHINT */ + + /* find stable entry: do this here since this is a critical path for + throughput and etharp_find_entry() is kind of slow */ + for (i = 0; i < ARP_TABLE_SIZE; i++) { + if ((arp_table[i].state >= ETHARP_STATE_STABLE) && +#if ETHARP_TABLE_MATCH_NETIF + (arp_table[i].netif == netif) && +#endif + (ip4_addr_cmp(dst_addr, &arp_table[i].ipaddr))) { + /* found an existing, stable entry */ + ETHARP_SET_ADDRHINT(netif, i); + return etharp_output_to_arp_index(netif, q, i); + } + } + /* no stable entry found, use the (slower) query function: + queue on destination Ethernet address belonging to ipaddr */ + return etharp_query(netif, dst_addr, q); + } + + /* continuation for multicast/broadcast destinations */ + /* obtain source Ethernet address of the given interface */ + /* send packet directly on the link */ + return ethernet_output(netif, q, (struct eth_addr *)(netif->hwaddr), dest, ETHTYPE_IP); +} + +/** + * Send an ARP request for the given IP address and/or queue a packet. + * + * If the IP address was not yet in the cache, a pending ARP cache entry + * is added and an ARP request is sent for the given address. The packet + * is queued on this entry. + * + * If the IP address was already pending in the cache, a new ARP request + * is sent for the given address. The packet is queued on this entry. + * + * If the IP address was already stable in the cache, and a packet is + * given, it is directly sent and no ARP request is sent out. + * + * If the IP address was already stable in the cache, and no packet is + * given, an ARP request is sent out. + * + * @param netif The lwIP network interface on which ipaddr + * must be queried for. + * @param ipaddr The IP address to be resolved. + * @param q If non-NULL, a pbuf that must be delivered to the IP address. + * q is not freed by this function. + * + * @note q must only be ONE packet, not a packet queue! + * + * @return + * - ERR_BUF Could not make room for Ethernet header. + * - ERR_MEM Hardware address unknown, and no more ARP entries available + * to query for address or queue the packet. + * - ERR_MEM Could not queue packet due to memory shortage. + * - ERR_RTE No route to destination (no gateway to external networks). + * - ERR_ARG Non-unicast address given, those will not appear in ARP cache. + * + */ +err_t +etharp_query(struct netif *netif, const ip4_addr_t *ipaddr, struct pbuf *q) +{ + struct eth_addr *srcaddr = (struct eth_addr *)netif->hwaddr; + err_t result = ERR_MEM; + int is_new_entry = 0; + s16_t i_err; + netif_addr_idx_t i; + + /* non-unicast address? */ + if (ip4_addr_isbroadcast(ipaddr, netif) || + ip4_addr_ismulticast(ipaddr) || + ip4_addr_isany(ipaddr)) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: will not add non-unicast IP address to ARP cache\n")); + return ERR_ARG; + } + + /* find entry in ARP cache, ask to create entry if queueing packet */ + i_err = etharp_find_entry(ipaddr, ETHARP_FLAG_TRY_HARD, netif); + + /* could not find or create entry? */ + if (i_err < 0) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: could not create ARP entry\n")); + if (q) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: packet dropped\n")); + ETHARP_STATS_INC(etharp.memerr); + } + return (err_t)i_err; + } + LWIP_ASSERT("type overflow", (size_t)i_err < NETIF_ADDR_IDX_MAX); + i = (netif_addr_idx_t)i_err; + + /* mark a fresh entry as pending (we just sent a request) */ + if (arp_table[i].state == ETHARP_STATE_EMPTY) { + is_new_entry = 1; + arp_table[i].state = ETHARP_STATE_PENDING; + /* record network interface for re-sending arp request in etharp_tmr */ + arp_table[i].netif = netif; + } + + /* { i is either a STABLE or (new or existing) PENDING entry } */ + LWIP_ASSERT("arp_table[i].state == PENDING or STABLE", + ((arp_table[i].state == ETHARP_STATE_PENDING) || + (arp_table[i].state >= ETHARP_STATE_STABLE))); + + /* do we have a new entry? or an implicit query request? */ + if (is_new_entry || (q == NULL)) { + /* try to resolve it; send out ARP request */ + result = etharp_request(netif, ipaddr); + if (result != ERR_OK) { + /* ARP request couldn't be sent */ + /* We don't re-send arp request in etharp_tmr, but we still queue packets, + since this failure could be temporary, and the next packet calling + etharp_query again could lead to sending the queued packets. */ + } + if (q == NULL) { + return result; + } + } + + /* packet given? */ + LWIP_ASSERT("q != NULL", q != NULL); + /* stable entry? */ + if (arp_table[i].state >= ETHARP_STATE_STABLE) { + /* we have a valid IP->Ethernet address mapping */ + ETHARP_SET_ADDRHINT(netif, i); + /* send the packet */ + result = ethernet_output(netif, q, srcaddr, &(arp_table[i].ethaddr), ETHTYPE_IP); + /* pending entry? (either just created or already pending */ + } else if (arp_table[i].state == ETHARP_STATE_PENDING) { + /* entry is still pending, queue the given packet 'q' */ + struct pbuf *p; + int copy_needed = 0; + /* IF q includes a pbuf that must be copied, copy the whole chain into a + * new PBUF_RAM. See the definition of PBUF_NEEDS_COPY for details. */ + p = q; + while (p) { + LWIP_ASSERT("no packet queues allowed!", (p->len != p->tot_len) || (p->next == 0)); + if (PBUF_NEEDS_COPY(p)) { + copy_needed = 1; + break; + } + p = p->next; + } + if (copy_needed) { + /* copy the whole packet into new pbufs */ + p = pbuf_clone(PBUF_LINK, PBUF_RAM, q); + } else { + /* referencing the old pbuf is enough */ + p = q; + pbuf_ref(p); + } + /* packet could be taken over? */ + if (p != NULL) { + /* queue packet ... */ +#if ARP_QUEUEING + struct etharp_q_entry *new_entry; + /* allocate a new arp queue entry */ + new_entry = (struct etharp_q_entry *)memp_malloc(MEMP_ARP_QUEUE); + if (new_entry != NULL) { + unsigned int qlen = 0; + new_entry->next = 0; + new_entry->p = p; + if (arp_table[i].q != NULL) { + /* queue was already existent, append the new entry to the end */ + struct etharp_q_entry *r; + r = arp_table[i].q; + qlen++; + while (r->next != NULL) { + r = r->next; + qlen++; + } + r->next = new_entry; + } else { + /* queue did not exist, first item in queue */ + arp_table[i].q = new_entry; + } +#if ARP_QUEUE_LEN + if (qlen >= ARP_QUEUE_LEN) { + struct etharp_q_entry *old; + old = arp_table[i].q; + arp_table[i].q = arp_table[i].q->next; + pbuf_free(old->p); + memp_free(MEMP_ARP_QUEUE, old); + } +#endif + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: queued packet %p on ARP entry %"U16_F"\n", (void *)q, i)); + result = ERR_OK; + } else { + /* the pool MEMP_ARP_QUEUE is empty */ + pbuf_free(p); + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: could not queue a copy of PBUF_REF packet %p (out of memory)\n", (void *)q)); + result = ERR_MEM; + } +#else /* ARP_QUEUEING */ + /* always queue one packet per ARP request only, freeing a previously queued packet */ + if (arp_table[i].q != NULL) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: dropped previously queued packet %p for ARP entry %"U16_F"\n", (void *)q, (u16_t)i)); + pbuf_free(arp_table[i].q); + } + arp_table[i].q = p; + result = ERR_OK; + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: queued packet %p on ARP entry %"U16_F"\n", (void *)q, (u16_t)i)); +#endif /* ARP_QUEUEING */ + } else { + ETHARP_STATS_INC(etharp.memerr); + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: could not queue a copy of PBUF_REF packet %p (out of memory)\n", (void *)q)); + result = ERR_MEM; + } + } + return result; +} + +/** + * Send a raw ARP packet (opcode and all addresses can be modified) + * + * @param netif the lwip network interface on which to send the ARP packet + * @param ethsrc_addr the source MAC address for the ethernet header + * @param ethdst_addr the destination MAC address for the ethernet header + * @param hwsrc_addr the source MAC address for the ARP protocol header + * @param ipsrc_addr the source IP address for the ARP protocol header + * @param hwdst_addr the destination MAC address for the ARP protocol header + * @param ipdst_addr the destination IP address for the ARP protocol header + * @param opcode the type of the ARP packet + * @return ERR_OK if the ARP packet has been sent + * ERR_MEM if the ARP packet couldn't be allocated + * any other err_t on failure + */ +static err_t +etharp_raw(struct netif *netif, const struct eth_addr *ethsrc_addr, + const struct eth_addr *ethdst_addr, + const struct eth_addr *hwsrc_addr, const ip4_addr_t *ipsrc_addr, + const struct eth_addr *hwdst_addr, const ip4_addr_t *ipdst_addr, + const u16_t opcode) +{ + struct pbuf *p; + err_t result = ERR_OK; + struct etharp_hdr *hdr; + + LWIP_ASSERT("netif != NULL", netif != NULL); + + /* allocate a pbuf for the outgoing ARP request packet */ + p = pbuf_alloc(PBUF_LINK, SIZEOF_ETHARP_HDR, PBUF_RAM); + /* could allocate a pbuf for an ARP request? */ + if (p == NULL) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, + ("etharp_raw: could not allocate pbuf for ARP request.\n")); + ETHARP_STATS_INC(etharp.memerr); + return ERR_MEM; + } + LWIP_ASSERT("check that first pbuf can hold struct etharp_hdr", + (p->len >= SIZEOF_ETHARP_HDR)); + + hdr = (struct etharp_hdr *)p->payload; + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_raw: sending raw ARP packet.\n")); + hdr->opcode = lwip_htons(opcode); + + LWIP_ASSERT("netif->hwaddr_len must be the same as ETH_HWADDR_LEN for etharp!", + (netif->hwaddr_len == ETH_HWADDR_LEN)); + + /* Write the ARP MAC-Addresses */ + SMEMCPY(&hdr->shwaddr, hwsrc_addr, ETH_HWADDR_LEN); + SMEMCPY(&hdr->dhwaddr, hwdst_addr, ETH_HWADDR_LEN); + /* Copy struct ip4_addr_wordaligned to aligned ip4_addr, to support compilers without + * structure packing. */ + IPADDR_WORDALIGNED_COPY_FROM_IP4_ADDR_T(&hdr->sipaddr, ipsrc_addr); + IPADDR_WORDALIGNED_COPY_FROM_IP4_ADDR_T(&hdr->dipaddr, ipdst_addr); + + hdr->hwtype = PP_HTONS(LWIP_IANA_HWTYPE_ETHERNET); + hdr->proto = PP_HTONS(ETHTYPE_IP); + /* set hwlen and protolen */ + hdr->hwlen = ETH_HWADDR_LEN; + hdr->protolen = sizeof(ip4_addr_t); + + /* send ARP query */ +#if LWIP_AUTOIP + /* If we are using Link-Local, all ARP packets that contain a Link-Local + * 'sender IP address' MUST be sent using link-layer broadcast instead of + * link-layer unicast. (See RFC3927 Section 2.5, last paragraph) */ + if (ip4_addr_islinklocal(ipsrc_addr)) { + ethernet_output(netif, p, ethsrc_addr, ðbroadcast, ETHTYPE_ARP); + } else +#endif /* LWIP_AUTOIP */ + { + ethernet_output(netif, p, ethsrc_addr, ethdst_addr, ETHTYPE_ARP); + } + + ETHARP_STATS_INC(etharp.xmit); + /* free ARP query packet */ + pbuf_free(p); + p = NULL; + /* could not allocate pbuf for ARP request */ + + return result; +} + +/** + * Send an ARP request packet asking for ipaddr to a specific eth address. + * Used to send unicast request to refresh the ARP table just before an entry + * times out + * + * @param netif the lwip network interface on which to send the request + * @param ipaddr the IP address for which to ask + * @param hw_dst_addr the ethernet address to send this packet to + * @return ERR_OK if the request has been sent + * ERR_MEM if the ARP packet couldn't be allocated + * any other err_t on failure + */ +static err_t +etharp_request_dst(struct netif *netif, const ip4_addr_t *ipaddr, const struct eth_addr *hw_dst_addr) +{ + return etharp_raw(netif, (struct eth_addr *)netif->hwaddr, hw_dst_addr, + (struct eth_addr *)netif->hwaddr, netif_ip4_addr(netif), ðzero, + ipaddr, ARP_REQUEST); +} + +/** + * Send an ARP request packet asking for ipaddr. + * + * @param netif the lwip network interface on which to send the request + * @param ipaddr the IP address for which to ask + * @return ERR_OK if the request has been sent + * ERR_MEM if the ARP packet couldn't be allocated + * any other err_t on failure + */ +err_t +etharp_request(struct netif *netif, const ip4_addr_t *ipaddr) +{ + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_request: sending ARP request.\n")); + return etharp_request_dst(netif, ipaddr, ðbroadcast); +} + +#endif /* LWIP_IPV4 && LWIP_ARP */ diff --git a/Middlewares/Third_Party/LwIP/src/core/ipv4/icmp.c b/Middlewares/Third_Party/LwIP/src/core/ipv4/icmp.c new file mode 100644 index 0000000..a462ccd --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/ipv4/icmp.c @@ -0,0 +1,404 @@ +/** + * @file + * ICMP - Internet Control Message Protocol + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +/* Some ICMP messages should be passed to the transport protocols. This + is not implemented. */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 && LWIP_ICMP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/icmp.h" +#include "lwip/inet_chksum.h" +#include "lwip/ip.h" +#include "lwip/def.h" +#include "lwip/stats.h" + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +/** Small optimization: set to 0 if incoming PBUF_POOL pbuf always can be + * used to modify and send a response packet (and to 1 if this is not the case, + * e.g. when link header is stripped off when receiving) */ +#ifndef LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN +#define LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN 1 +#endif /* LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN */ + +/* The amount of data from the original packet to return in a dest-unreachable */ +#define ICMP_DEST_UNREACH_DATASIZE 8 + +static void icmp_send_response(struct pbuf *p, u8_t type, u8_t code); + +/** + * Processes ICMP input packets, called from ip_input(). + * + * Currently only processes icmp echo requests and sends + * out the echo response. + * + * @param p the icmp echo request packet, p->payload pointing to the icmp header + * @param inp the netif on which this packet was received + */ +void +icmp_input(struct pbuf *p, struct netif *inp) +{ + u8_t type; +#ifdef LWIP_DEBUG + u8_t code; +#endif /* LWIP_DEBUG */ + struct icmp_echo_hdr *iecho; + const struct ip_hdr *iphdr_in; + u16_t hlen; + const ip4_addr_t *src; + + ICMP_STATS_INC(icmp.recv); + MIB2_STATS_INC(mib2.icmpinmsgs); + + iphdr_in = ip4_current_header(); + hlen = IPH_HL_BYTES(iphdr_in); + if (hlen < IP_HLEN) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: short IP header (%"S16_F" bytes) received\n", hlen)); + goto lenerr; + } + if (p->len < sizeof(u16_t) * 2) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: short ICMP (%"U16_F" bytes) received\n", p->tot_len)); + goto lenerr; + } + + type = *((u8_t *)p->payload); +#ifdef LWIP_DEBUG + code = *(((u8_t *)p->payload) + 1); + /* if debug is enabled but debug statement below is somehow disabled: */ + LWIP_UNUSED_ARG(code); +#endif /* LWIP_DEBUG */ + switch (type) { + case ICMP_ER: + /* This is OK, echo reply might have been parsed by a raw PCB + (as obviously, an echo request has been sent, too). */ + MIB2_STATS_INC(mib2.icmpinechoreps); + break; + case ICMP_ECHO: + MIB2_STATS_INC(mib2.icmpinechos); + src = ip4_current_dest_addr(); + /* multicast destination address? */ + if (ip4_addr_ismulticast(ip4_current_dest_addr())) { +#if LWIP_MULTICAST_PING + /* For multicast, use address of receiving interface as source address */ + src = netif_ip4_addr(inp); +#else /* LWIP_MULTICAST_PING */ + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: Not echoing to multicast pings\n")); + goto icmperr; +#endif /* LWIP_MULTICAST_PING */ + } + /* broadcast destination address? */ + if (ip4_addr_isbroadcast(ip4_current_dest_addr(), ip_current_netif())) { +#if LWIP_BROADCAST_PING + /* For broadcast, use address of receiving interface as source address */ + src = netif_ip4_addr(inp); +#else /* LWIP_BROADCAST_PING */ + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: Not echoing to broadcast pings\n")); + goto icmperr; +#endif /* LWIP_BROADCAST_PING */ + } + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: ping\n")); + if (p->tot_len < sizeof(struct icmp_echo_hdr)) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: bad ICMP echo received\n")); + goto lenerr; + } +#if CHECKSUM_CHECK_ICMP + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_CHECK_ICMP) { + if (inet_chksum_pbuf(p) != 0) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: checksum failed for received ICMP echo\n")); + pbuf_free(p); + ICMP_STATS_INC(icmp.chkerr); + MIB2_STATS_INC(mib2.icmpinerrors); + return; + } + } +#endif +#if LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN + if (pbuf_add_header(p, hlen + PBUF_LINK_HLEN + PBUF_LINK_ENCAPSULATION_HLEN)) { + /* p is not big enough to contain link headers + * allocate a new one and copy p into it + */ + struct pbuf *r; + u16_t alloc_len = (u16_t)(p->tot_len + hlen); + if (alloc_len < p->tot_len) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: allocating new pbuf failed (tot_len overflow)\n")); + goto icmperr; + } + /* allocate new packet buffer with space for link headers */ + r = pbuf_alloc(PBUF_LINK, alloc_len, PBUF_RAM); + if (r == NULL) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: allocating new pbuf failed\n")); + goto icmperr; + } + if (r->len < hlen + sizeof(struct icmp_echo_hdr)) { + LWIP_DEBUGF(ICMP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("first pbuf cannot hold the ICMP header")); + pbuf_free(r); + goto icmperr; + } + /* copy the ip header */ + MEMCPY(r->payload, iphdr_in, hlen); + /* switch r->payload back to icmp header (cannot fail) */ + if (pbuf_remove_header(r, hlen)) { + LWIP_ASSERT("icmp_input: moving r->payload to icmp header failed\n", 0); + pbuf_free(r); + goto icmperr; + } + /* copy the rest of the packet without ip header */ + if (pbuf_copy(r, p) != ERR_OK) { + LWIP_DEBUGF(ICMP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("icmp_input: copying to new pbuf failed")); + pbuf_free(r); + goto icmperr; + } + /* free the original p */ + pbuf_free(p); + /* we now have an identical copy of p that has room for link headers */ + p = r; + } else { + /* restore p->payload to point to icmp header (cannot fail) */ + if (pbuf_remove_header(p, hlen + PBUF_LINK_HLEN + PBUF_LINK_ENCAPSULATION_HLEN)) { + LWIP_ASSERT("icmp_input: restoring original p->payload failed\n", 0); + goto icmperr; + } + } +#endif /* LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN */ + /* At this point, all checks are OK. */ + /* We generate an answer by switching the dest and src ip addresses, + * setting the icmp type to ECHO_RESPONSE and updating the checksum. */ + iecho = (struct icmp_echo_hdr *)p->payload; + if (pbuf_add_header(p, hlen)) { + LWIP_DEBUGF(ICMP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("Can't move over header in packet")); + } else { + err_t ret; + struct ip_hdr *iphdr = (struct ip_hdr *)p->payload; + ip4_addr_copy(iphdr->src, *src); + ip4_addr_copy(iphdr->dest, *ip4_current_src_addr()); + ICMPH_TYPE_SET(iecho, ICMP_ER); +#if CHECKSUM_GEN_ICMP + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_GEN_ICMP) { + /* adjust the checksum */ + if (iecho->chksum > PP_HTONS(0xffffU - (ICMP_ECHO << 8))) { + iecho->chksum = (u16_t)(iecho->chksum + PP_HTONS((u16_t)(ICMP_ECHO << 8)) + 1); + } else { + iecho->chksum = (u16_t)(iecho->chksum + PP_HTONS(ICMP_ECHO << 8)); + } + } +#if LWIP_CHECKSUM_CTRL_PER_NETIF + else { + iecho->chksum = 0; + } +#endif /* LWIP_CHECKSUM_CTRL_PER_NETIF */ +#else /* CHECKSUM_GEN_ICMP */ + iecho->chksum = 0; +#endif /* CHECKSUM_GEN_ICMP */ + + /* Set the correct TTL and recalculate the header checksum. */ + IPH_TTL_SET(iphdr, ICMP_TTL); + IPH_CHKSUM_SET(iphdr, 0); +#if CHECKSUM_GEN_IP + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_GEN_IP) { + IPH_CHKSUM_SET(iphdr, inet_chksum(iphdr, hlen)); + } +#endif /* CHECKSUM_GEN_IP */ + + ICMP_STATS_INC(icmp.xmit); + /* increase number of messages attempted to send */ + MIB2_STATS_INC(mib2.icmpoutmsgs); + /* increase number of echo replies attempted to send */ + MIB2_STATS_INC(mib2.icmpoutechoreps); + + /* send an ICMP packet */ + ret = ip4_output_if(p, src, LWIP_IP_HDRINCL, + ICMP_TTL, 0, IP_PROTO_ICMP, inp); + if (ret != ERR_OK) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: ip_output_if returned an error: %s\n", lwip_strerr(ret))); + } + } + break; + default: + if (type == ICMP_DUR) { + MIB2_STATS_INC(mib2.icmpindestunreachs); + } else if (type == ICMP_TE) { + MIB2_STATS_INC(mib2.icmpintimeexcds); + } else if (type == ICMP_PP) { + MIB2_STATS_INC(mib2.icmpinparmprobs); + } else if (type == ICMP_SQ) { + MIB2_STATS_INC(mib2.icmpinsrcquenchs); + } else if (type == ICMP_RD) { + MIB2_STATS_INC(mib2.icmpinredirects); + } else if (type == ICMP_TS) { + MIB2_STATS_INC(mib2.icmpintimestamps); + } else if (type == ICMP_TSR) { + MIB2_STATS_INC(mib2.icmpintimestampreps); + } else if (type == ICMP_AM) { + MIB2_STATS_INC(mib2.icmpinaddrmasks); + } else if (type == ICMP_AMR) { + MIB2_STATS_INC(mib2.icmpinaddrmaskreps); + } + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: ICMP type %"S16_F" code %"S16_F" not supported.\n", + (s16_t)type, (s16_t)code)); + ICMP_STATS_INC(icmp.proterr); + ICMP_STATS_INC(icmp.drop); + } + pbuf_free(p); + return; +lenerr: + pbuf_free(p); + ICMP_STATS_INC(icmp.lenerr); + MIB2_STATS_INC(mib2.icmpinerrors); + return; +#if LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN || !LWIP_MULTICAST_PING || !LWIP_BROADCAST_PING +icmperr: + pbuf_free(p); + ICMP_STATS_INC(icmp.err); + MIB2_STATS_INC(mib2.icmpinerrors); + return; +#endif /* LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN || !LWIP_MULTICAST_PING || !LWIP_BROADCAST_PING */ +} + +/** + * Send an icmp 'destination unreachable' packet, called from ip_input() if + * the transport layer protocol is unknown and from udp_input() if the local + * port is not bound. + * + * @param p the input packet for which the 'unreachable' should be sent, + * p->payload pointing to the IP header + * @param t type of the 'unreachable' packet + */ +void +icmp_dest_unreach(struct pbuf *p, enum icmp_dur_type t) +{ + MIB2_STATS_INC(mib2.icmpoutdestunreachs); + icmp_send_response(p, ICMP_DUR, t); +} + +#if IP_FORWARD || IP_REASSEMBLY +/** + * Send a 'time exceeded' packet, called from ip_forward() if TTL is 0. + * + * @param p the input packet for which the 'time exceeded' should be sent, + * p->payload pointing to the IP header + * @param t type of the 'time exceeded' packet + */ +void +icmp_time_exceeded(struct pbuf *p, enum icmp_te_type t) +{ + MIB2_STATS_INC(mib2.icmpouttimeexcds); + icmp_send_response(p, ICMP_TE, t); +} + +#endif /* IP_FORWARD || IP_REASSEMBLY */ + +/** + * Send an icmp packet in response to an incoming packet. + * + * @param p the input packet for which the 'unreachable' should be sent, + * p->payload pointing to the IP header + * @param type Type of the ICMP header + * @param code Code of the ICMP header + */ +static void +icmp_send_response(struct pbuf *p, u8_t type, u8_t code) +{ + struct pbuf *q; + struct ip_hdr *iphdr; + /* we can use the echo header here */ + struct icmp_echo_hdr *icmphdr; + ip4_addr_t iphdr_src; + struct netif *netif; + + /* increase number of messages attempted to send */ + MIB2_STATS_INC(mib2.icmpoutmsgs); + + /* ICMP header + IP header + 8 bytes of data */ + q = pbuf_alloc(PBUF_IP, sizeof(struct icmp_echo_hdr) + IP_HLEN + ICMP_DEST_UNREACH_DATASIZE, + PBUF_RAM); + if (q == NULL) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_time_exceeded: failed to allocate pbuf for ICMP packet.\n")); + MIB2_STATS_INC(mib2.icmpouterrors); + return; + } + LWIP_ASSERT("check that first pbuf can hold icmp message", + (q->len >= (sizeof(struct icmp_echo_hdr) + IP_HLEN + ICMP_DEST_UNREACH_DATASIZE))); + + iphdr = (struct ip_hdr *)p->payload; + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_time_exceeded from ")); + ip4_addr_debug_print_val(ICMP_DEBUG, iphdr->src); + LWIP_DEBUGF(ICMP_DEBUG, (" to ")); + ip4_addr_debug_print_val(ICMP_DEBUG, iphdr->dest); + LWIP_DEBUGF(ICMP_DEBUG, ("\n")); + + icmphdr = (struct icmp_echo_hdr *)q->payload; + icmphdr->type = type; + icmphdr->code = code; + icmphdr->id = 0; + icmphdr->seqno = 0; + + /* copy fields from original packet */ + SMEMCPY((u8_t *)q->payload + sizeof(struct icmp_echo_hdr), (u8_t *)p->payload, + IP_HLEN + ICMP_DEST_UNREACH_DATASIZE); + + ip4_addr_copy(iphdr_src, iphdr->src); +#ifdef LWIP_HOOK_IP4_ROUTE_SRC + { + ip4_addr_t iphdr_dst; + ip4_addr_copy(iphdr_dst, iphdr->dest); + netif = ip4_route_src(&iphdr_dst, &iphdr_src); + } +#else + netif = ip4_route(&iphdr_src); +#endif + if (netif != NULL) { + /* calculate checksum */ + icmphdr->chksum = 0; +#if CHECKSUM_GEN_ICMP + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP) { + icmphdr->chksum = inet_chksum(icmphdr, q->len); + } +#endif + ICMP_STATS_INC(icmp.xmit); + ip4_output_if(q, NULL, &iphdr_src, ICMP_TTL, 0, IP_PROTO_ICMP, netif); + } + pbuf_free(q); +} + +#endif /* LWIP_IPV4 && LWIP_ICMP */ diff --git a/Middlewares/Third_Party/LwIP/src/core/ipv4/igmp.c b/Middlewares/Third_Party/LwIP/src/core/ipv4/igmp.c new file mode 100644 index 0000000..b655aa3 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/ipv4/igmp.c @@ -0,0 +1,801 @@ +/** + * @file + * IGMP - Internet Group Management Protocol + * + * @defgroup igmp IGMP + * @ingroup ip4 + * To be called from TCPIP thread + */ + +/* + * Copyright (c) 2002 CITEL Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of CITEL Technologies Ltd nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY CITEL TECHNOLOGIES AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL CITEL TECHNOLOGIES OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * This file is a contribution to the lwIP TCP/IP stack. + * The Swedish Institute of Computer Science and Adam Dunkels + * are specifically granted permission to redistribute this + * source code. +*/ + +/*------------------------------------------------------------- +Note 1) +Although the rfc requires V1 AND V2 capability +we will only support v2 since now V1 is very old (August 1989) +V1 can be added if required + +a debug print and statistic have been implemented to +show this up. +------------------------------------------------------------- +------------------------------------------------------------- +Note 2) +A query for a specific group address (as opposed to ALLHOSTS) +has now been implemented as I am unsure if it is required + +a debug print and statistic have been implemented to +show this up. +------------------------------------------------------------- +------------------------------------------------------------- +Note 3) +The router alert rfc 2113 is implemented in outgoing packets +but not checked rigorously incoming +------------------------------------------------------------- +Steve Reynolds +------------------------------------------------------------*/ + +/*----------------------------------------------------------------------------- + * RFC 988 - Host extensions for IP multicasting - V0 + * RFC 1054 - Host extensions for IP multicasting - + * RFC 1112 - Host extensions for IP multicasting - V1 + * RFC 2236 - Internet Group Management Protocol, Version 2 - V2 <- this code is based on this RFC (it's the "de facto" standard) + * RFC 3376 - Internet Group Management Protocol, Version 3 - V3 + * RFC 4604 - Using Internet Group Management Protocol Version 3... - V3+ + * RFC 2113 - IP Router Alert Option - + *----------------------------------------------------------------------------*/ + +/*----------------------------------------------------------------------------- + * Includes + *----------------------------------------------------------------------------*/ + +#include "lwip/opt.h" + +#if LWIP_IPV4 && LWIP_IGMP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/igmp.h" +#include "lwip/debug.h" +#include "lwip/def.h" +#include "lwip/mem.h" +#include "lwip/ip.h" +#include "lwip/inet_chksum.h" +#include "lwip/netif.h" +#include "lwip/stats.h" +#include "lwip/prot/igmp.h" + +#include + +static struct igmp_group *igmp_lookup_group(struct netif *ifp, const ip4_addr_t *addr); +static err_t igmp_remove_group(struct netif *netif, struct igmp_group *group); +static void igmp_timeout(struct netif *netif, struct igmp_group *group); +static void igmp_start_timer(struct igmp_group *group, u8_t max_time); +static void igmp_delaying_member(struct igmp_group *group, u8_t maxresp); +static err_t igmp_ip_output_if(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, struct netif *netif); +static void igmp_send(struct netif *netif, struct igmp_group *group, u8_t type); + +static ip4_addr_t allsystems; +static ip4_addr_t allrouters; + +/** + * Initialize the IGMP module + */ +void +igmp_init(void) +{ + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_init: initializing\n")); + + IP4_ADDR(&allsystems, 224, 0, 0, 1); + IP4_ADDR(&allrouters, 224, 0, 0, 2); +} + +/** + * Start IGMP processing on interface + * + * @param netif network interface on which start IGMP processing + */ +err_t +igmp_start(struct netif *netif) +{ + struct igmp_group *group; + + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_start: starting IGMP processing on if %p\n", (void *)netif)); + + group = igmp_lookup_group(netif, &allsystems); + + if (group != NULL) { + group->group_state = IGMP_GROUP_IDLE_MEMBER; + group->use++; + + /* Allow the igmp messages at the MAC level */ + if (netif->igmp_mac_filter != NULL) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_start: igmp_mac_filter(ADD ")); + ip4_addr_debug_print_val(IGMP_DEBUG, allsystems); + LWIP_DEBUGF(IGMP_DEBUG, (") on if %p\n", (void *)netif)); + netif->igmp_mac_filter(netif, &allsystems, NETIF_ADD_MAC_FILTER); + } + + return ERR_OK; + } + + return ERR_MEM; +} + +/** + * Stop IGMP processing on interface + * + * @param netif network interface on which stop IGMP processing + */ +err_t +igmp_stop(struct netif *netif) +{ + struct igmp_group *group = netif_igmp_data(netif); + + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_IGMP, NULL); + + while (group != NULL) { + struct igmp_group *next = group->next; /* avoid use-after-free below */ + + /* disable the group at the MAC level */ + if (netif->igmp_mac_filter != NULL) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_stop: igmp_mac_filter(DEL ")); + ip4_addr_debug_print_val(IGMP_DEBUG, group->group_address); + LWIP_DEBUGF(IGMP_DEBUG, (") on if %p\n", (void *)netif)); + netif->igmp_mac_filter(netif, &(group->group_address), NETIF_DEL_MAC_FILTER); + } + + /* free group */ + memp_free(MEMP_IGMP_GROUP, group); + + /* move to "next" */ + group = next; + } + return ERR_OK; +} + +/** + * Report IGMP memberships for this interface + * + * @param netif network interface on which report IGMP memberships + */ +void +igmp_report_groups(struct netif *netif) +{ + struct igmp_group *group = netif_igmp_data(netif); + + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_report_groups: sending IGMP reports on if %p\n", (void *)netif)); + + /* Skip the first group in the list, it is always the allsystems group added in igmp_start() */ + if (group != NULL) { + group = group->next; + } + + while (group != NULL) { + igmp_delaying_member(group, IGMP_JOIN_DELAYING_MEMBER_TMR); + group = group->next; + } +} + +/** + * Search for a group in the netif's igmp group list + * + * @param ifp the network interface for which to look + * @param addr the group ip address to search for + * @return a struct igmp_group* if the group has been found, + * NULL if the group wasn't found. + */ +struct igmp_group * +igmp_lookfor_group(struct netif *ifp, const ip4_addr_t *addr) +{ + struct igmp_group *group = netif_igmp_data(ifp); + + while (group != NULL) { + if (ip4_addr_cmp(&(group->group_address), addr)) { + return group; + } + group = group->next; + } + + /* to be clearer, we return NULL here instead of + * 'group' (which is also NULL at this point). + */ + return NULL; +} + +/** + * Search for a specific igmp group and create a new one if not found- + * + * @param ifp the network interface for which to look + * @param addr the group ip address to search + * @return a struct igmp_group*, + * NULL on memory error. + */ +static struct igmp_group * +igmp_lookup_group(struct netif *ifp, const ip4_addr_t *addr) +{ + struct igmp_group *group; + struct igmp_group *list_head = netif_igmp_data(ifp); + + /* Search if the group already exists */ + group = igmp_lookfor_group(ifp, addr); + if (group != NULL) { + /* Group already exists. */ + return group; + } + + /* Group doesn't exist yet, create a new one */ + group = (struct igmp_group *)memp_malloc(MEMP_IGMP_GROUP); + if (group != NULL) { + ip4_addr_set(&(group->group_address), addr); + group->timer = 0; /* Not running */ + group->group_state = IGMP_GROUP_NON_MEMBER; + group->last_reporter_flag = 0; + group->use = 0; + + /* Ensure allsystems group is always first in list */ + if (list_head == NULL) { + /* this is the first entry in linked list */ + LWIP_ASSERT("igmp_lookup_group: first group must be allsystems", + (ip4_addr_cmp(addr, &allsystems) != 0)); + group->next = NULL; + netif_set_client_data(ifp, LWIP_NETIF_CLIENT_DATA_INDEX_IGMP, group); + } else { + /* append _after_ first entry */ + LWIP_ASSERT("igmp_lookup_group: all except first group must not be allsystems", + (ip4_addr_cmp(addr, &allsystems) == 0)); + group->next = list_head->next; + list_head->next = group; + } + } + + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_lookup_group: %sallocated a new group with address ", (group ? "" : "impossible to "))); + ip4_addr_debug_print(IGMP_DEBUG, addr); + LWIP_DEBUGF(IGMP_DEBUG, (" on if %p\n", (void *)ifp)); + + return group; +} + +/** + * Remove a group from netif's igmp group list, but don't free it yet + * + * @param group the group to remove from the netif's igmp group list + * @return ERR_OK if group was removed from the list, an err_t otherwise + */ +static err_t +igmp_remove_group(struct netif *netif, struct igmp_group *group) +{ + err_t err = ERR_OK; + struct igmp_group *tmp_group; + + /* Skip the first group in the list, it is always the allsystems group added in igmp_start() */ + for (tmp_group = netif_igmp_data(netif); tmp_group != NULL; tmp_group = tmp_group->next) { + if (tmp_group->next == group) { + tmp_group->next = group->next; + break; + } + } + /* Group not found in netif's igmp group list */ + if (tmp_group == NULL) { + err = ERR_ARG; + } + + return err; +} + +/** + * Called from ip_input() if a new IGMP packet is received. + * + * @param p received igmp packet, p->payload pointing to the igmp header + * @param inp network interface on which the packet was received + * @param dest destination ip address of the igmp packet + */ +void +igmp_input(struct pbuf *p, struct netif *inp, const ip4_addr_t *dest) +{ + struct igmp_msg *igmp; + struct igmp_group *group; + struct igmp_group *groupref; + + IGMP_STATS_INC(igmp.recv); + + /* Note that the length CAN be greater than 8 but only 8 are used - All are included in the checksum */ + if (p->len < IGMP_MINLEN) { + pbuf_free(p); + IGMP_STATS_INC(igmp.lenerr); + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: length error\n")); + return; + } + + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: message from ")); + ip4_addr_debug_print_val(IGMP_DEBUG, ip4_current_header()->src); + LWIP_DEBUGF(IGMP_DEBUG, (" to address ")); + ip4_addr_debug_print_val(IGMP_DEBUG, ip4_current_header()->dest); + LWIP_DEBUGF(IGMP_DEBUG, (" on if %p\n", (void *)inp)); + + /* Now calculate and check the checksum */ + igmp = (struct igmp_msg *)p->payload; + if (inet_chksum(igmp, p->len)) { + pbuf_free(p); + IGMP_STATS_INC(igmp.chkerr); + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: checksum error\n")); + return; + } + + /* Packet is ok so find an existing group */ + group = igmp_lookfor_group(inp, dest); /* use the destination IP address of incoming packet */ + + /* If group can be found or create... */ + if (!group) { + pbuf_free(p); + IGMP_STATS_INC(igmp.drop); + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: IGMP frame not for us\n")); + return; + } + + /* NOW ACT ON THE INCOMING MESSAGE TYPE... */ + switch (igmp->igmp_msgtype) { + case IGMP_MEMB_QUERY: + /* IGMP_MEMB_QUERY to the "all systems" address ? */ + if ((ip4_addr_cmp(dest, &allsystems)) && ip4_addr_isany(&igmp->igmp_group_address)) { + /* THIS IS THE GENERAL QUERY */ + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: General IGMP_MEMB_QUERY on \"ALL SYSTEMS\" address (224.0.0.1) [igmp_maxresp=%i]\n", (int)(igmp->igmp_maxresp))); + + if (igmp->igmp_maxresp == 0) { + IGMP_STATS_INC(igmp.rx_v1); + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: got an all hosts query with time== 0 - this is V1 and not implemented - treat as v2\n")); + igmp->igmp_maxresp = IGMP_V1_DELAYING_MEMBER_TMR; + } else { + IGMP_STATS_INC(igmp.rx_general); + } + + groupref = netif_igmp_data(inp); + + /* Do not send messages on the all systems group address! */ + /* Skip the first group in the list, it is always the allsystems group added in igmp_start() */ + if (groupref != NULL) { + groupref = groupref->next; + } + + while (groupref) { + igmp_delaying_member(groupref, igmp->igmp_maxresp); + groupref = groupref->next; + } + } else { + /* IGMP_MEMB_QUERY to a specific group ? */ + if (!ip4_addr_isany(&igmp->igmp_group_address)) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: IGMP_MEMB_QUERY to a specific group ")); + ip4_addr_debug_print_val(IGMP_DEBUG, igmp->igmp_group_address); + if (ip4_addr_cmp(dest, &allsystems)) { + ip4_addr_t groupaddr; + LWIP_DEBUGF(IGMP_DEBUG, (" using \"ALL SYSTEMS\" address (224.0.0.1) [igmp_maxresp=%i]\n", (int)(igmp->igmp_maxresp))); + /* we first need to re-look for the group since we used dest last time */ + ip4_addr_copy(groupaddr, igmp->igmp_group_address); + group = igmp_lookfor_group(inp, &groupaddr); + } else { + LWIP_DEBUGF(IGMP_DEBUG, (" with the group address as destination [igmp_maxresp=%i]\n", (int)(igmp->igmp_maxresp))); + } + + if (group != NULL) { + IGMP_STATS_INC(igmp.rx_group); + igmp_delaying_member(group, igmp->igmp_maxresp); + } else { + IGMP_STATS_INC(igmp.drop); + } + } else { + IGMP_STATS_INC(igmp.proterr); + } + } + break; + case IGMP_V2_MEMB_REPORT: + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: IGMP_V2_MEMB_REPORT\n")); + IGMP_STATS_INC(igmp.rx_report); + if (group->group_state == IGMP_GROUP_DELAYING_MEMBER) { + /* This is on a specific group we have already looked up */ + group->timer = 0; /* stopped */ + group->group_state = IGMP_GROUP_IDLE_MEMBER; + group->last_reporter_flag = 0; + } + break; + default: + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: unexpected msg %d in state %d on group %p on if %p\n", + igmp->igmp_msgtype, group->group_state, (void *)&group, (void *)inp)); + IGMP_STATS_INC(igmp.proterr); + break; + } + + pbuf_free(p); + return; +} + +/** + * @ingroup igmp + * Join a group on one network interface. + * + * @param ifaddr ip address of the network interface which should join a new group + * @param groupaddr the ip address of the group which to join + * @return ERR_OK if group was joined on the netif(s), an err_t otherwise + */ +err_t +igmp_joingroup(const ip4_addr_t *ifaddr, const ip4_addr_t *groupaddr) +{ + err_t err = ERR_VAL; /* no matching interface */ + struct netif *netif; + + LWIP_ASSERT_CORE_LOCKED(); + + /* make sure it is multicast address */ + LWIP_ERROR("igmp_joingroup: attempt to join non-multicast address", ip4_addr_ismulticast(groupaddr), return ERR_VAL;); + LWIP_ERROR("igmp_joingroup: attempt to join allsystems address", (!ip4_addr_cmp(groupaddr, &allsystems)), return ERR_VAL;); + + /* loop through netif's */ + NETIF_FOREACH(netif) { + /* Should we join this interface ? */ + if ((netif->flags & NETIF_FLAG_IGMP) && ((ip4_addr_isany(ifaddr) || ip4_addr_cmp(netif_ip4_addr(netif), ifaddr)))) { + err = igmp_joingroup_netif(netif, groupaddr); + if (err != ERR_OK) { + /* Return an error even if some network interfaces are joined */ + /** @todo undo any other netif already joined */ + return err; + } + } + } + + return err; +} + +/** + * @ingroup igmp + * Join a group on one network interface. + * + * @param netif the network interface which should join a new group + * @param groupaddr the ip address of the group which to join + * @return ERR_OK if group was joined on the netif, an err_t otherwise + */ +err_t +igmp_joingroup_netif(struct netif *netif, const ip4_addr_t *groupaddr) +{ + struct igmp_group *group; + + LWIP_ASSERT_CORE_LOCKED(); + + /* make sure it is multicast address */ + LWIP_ERROR("igmp_joingroup_netif: attempt to join non-multicast address", ip4_addr_ismulticast(groupaddr), return ERR_VAL;); + LWIP_ERROR("igmp_joingroup_netif: attempt to join allsystems address", (!ip4_addr_cmp(groupaddr, &allsystems)), return ERR_VAL;); + + /* make sure it is an igmp-enabled netif */ + LWIP_ERROR("igmp_joingroup_netif: attempt to join on non-IGMP netif", netif->flags & NETIF_FLAG_IGMP, return ERR_VAL;); + + /* find group or create a new one if not found */ + group = igmp_lookup_group(netif, groupaddr); + + if (group != NULL) { + /* This should create a new group, check the state to make sure */ + if (group->group_state != IGMP_GROUP_NON_MEMBER) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_joingroup_netif: join to group not in state IGMP_GROUP_NON_MEMBER\n")); + } else { + /* OK - it was new group */ + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_joingroup_netif: join to new group: ")); + ip4_addr_debug_print(IGMP_DEBUG, groupaddr); + LWIP_DEBUGF(IGMP_DEBUG, ("\n")); + + /* If first use of the group, allow the group at the MAC level */ + if ((group->use == 0) && (netif->igmp_mac_filter != NULL)) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_joingroup_netif: igmp_mac_filter(ADD ")); + ip4_addr_debug_print(IGMP_DEBUG, groupaddr); + LWIP_DEBUGF(IGMP_DEBUG, (") on if %p\n", (void *)netif)); + netif->igmp_mac_filter(netif, groupaddr, NETIF_ADD_MAC_FILTER); + } + + IGMP_STATS_INC(igmp.tx_join); + igmp_send(netif, group, IGMP_V2_MEMB_REPORT); + + igmp_start_timer(group, IGMP_JOIN_DELAYING_MEMBER_TMR); + + /* Need to work out where this timer comes from */ + group->group_state = IGMP_GROUP_DELAYING_MEMBER; + } + /* Increment group use */ + group->use++; + /* Join on this interface */ + return ERR_OK; + } else { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_joingroup_netif: Not enough memory to join to group\n")); + return ERR_MEM; + } +} + +/** + * @ingroup igmp + * Leave a group on one network interface. + * + * @param ifaddr ip address of the network interface which should leave a group + * @param groupaddr the ip address of the group which to leave + * @return ERR_OK if group was left on the netif(s), an err_t otherwise + */ +err_t +igmp_leavegroup(const ip4_addr_t *ifaddr, const ip4_addr_t *groupaddr) +{ + err_t err = ERR_VAL; /* no matching interface */ + struct netif *netif; + + LWIP_ASSERT_CORE_LOCKED(); + + /* make sure it is multicast address */ + LWIP_ERROR("igmp_leavegroup: attempt to leave non-multicast address", ip4_addr_ismulticast(groupaddr), return ERR_VAL;); + LWIP_ERROR("igmp_leavegroup: attempt to leave allsystems address", (!ip4_addr_cmp(groupaddr, &allsystems)), return ERR_VAL;); + + /* loop through netif's */ + NETIF_FOREACH(netif) { + /* Should we leave this interface ? */ + if ((netif->flags & NETIF_FLAG_IGMP) && ((ip4_addr_isany(ifaddr) || ip4_addr_cmp(netif_ip4_addr(netif), ifaddr)))) { + err_t res = igmp_leavegroup_netif(netif, groupaddr); + if (err != ERR_OK) { + /* Store this result if we have not yet gotten a success */ + err = res; + } + } + } + + return err; +} + +/** + * @ingroup igmp + * Leave a group on one network interface. + * + * @param netif the network interface which should leave a group + * @param groupaddr the ip address of the group which to leave + * @return ERR_OK if group was left on the netif, an err_t otherwise + */ +err_t +igmp_leavegroup_netif(struct netif *netif, const ip4_addr_t *groupaddr) +{ + struct igmp_group *group; + + LWIP_ASSERT_CORE_LOCKED(); + + /* make sure it is multicast address */ + LWIP_ERROR("igmp_leavegroup_netif: attempt to leave non-multicast address", ip4_addr_ismulticast(groupaddr), return ERR_VAL;); + LWIP_ERROR("igmp_leavegroup_netif: attempt to leave allsystems address", (!ip4_addr_cmp(groupaddr, &allsystems)), return ERR_VAL;); + + /* make sure it is an igmp-enabled netif */ + LWIP_ERROR("igmp_leavegroup_netif: attempt to leave on non-IGMP netif", netif->flags & NETIF_FLAG_IGMP, return ERR_VAL;); + + /* find group */ + group = igmp_lookfor_group(netif, groupaddr); + + if (group != NULL) { + /* Only send a leave if the flag is set according to the state diagram */ + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_leavegroup_netif: Leaving group: ")); + ip4_addr_debug_print(IGMP_DEBUG, groupaddr); + LWIP_DEBUGF(IGMP_DEBUG, ("\n")); + + /* If there is no other use of the group */ + if (group->use <= 1) { + /* Remove the group from the list */ + igmp_remove_group(netif, group); + + /* If we are the last reporter for this group */ + if (group->last_reporter_flag) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_leavegroup_netif: sending leaving group\n")); + IGMP_STATS_INC(igmp.tx_leave); + igmp_send(netif, group, IGMP_LEAVE_GROUP); + } + + /* Disable the group at the MAC level */ + if (netif->igmp_mac_filter != NULL) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_leavegroup_netif: igmp_mac_filter(DEL ")); + ip4_addr_debug_print(IGMP_DEBUG, groupaddr); + LWIP_DEBUGF(IGMP_DEBUG, (") on if %p\n", (void *)netif)); + netif->igmp_mac_filter(netif, groupaddr, NETIF_DEL_MAC_FILTER); + } + + /* Free group struct */ + memp_free(MEMP_IGMP_GROUP, group); + } else { + /* Decrement group use */ + group->use--; + } + return ERR_OK; + } else { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_leavegroup_netif: not member of group\n")); + return ERR_VAL; + } +} + +/** + * The igmp timer function (both for NO_SYS=1 and =0) + * Should be called every IGMP_TMR_INTERVAL milliseconds (100 ms is default). + */ +void +igmp_tmr(void) +{ + struct netif *netif; + + NETIF_FOREACH(netif) { + struct igmp_group *group = netif_igmp_data(netif); + + while (group != NULL) { + if (group->timer > 0) { + group->timer--; + if (group->timer == 0) { + igmp_timeout(netif, group); + } + } + group = group->next; + } + } +} + +/** + * Called if a timeout for one group is reached. + * Sends a report for this group. + * + * @param group an igmp_group for which a timeout is reached + */ +static void +igmp_timeout(struct netif *netif, struct igmp_group *group) +{ + /* If the state is IGMP_GROUP_DELAYING_MEMBER then we send a report for this group + (unless it is the allsystems group) */ + if ((group->group_state == IGMP_GROUP_DELAYING_MEMBER) && + (!(ip4_addr_cmp(&(group->group_address), &allsystems)))) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_timeout: report membership for group with address ")); + ip4_addr_debug_print_val(IGMP_DEBUG, group->group_address); + LWIP_DEBUGF(IGMP_DEBUG, (" on if %p\n", (void *)netif)); + + group->group_state = IGMP_GROUP_IDLE_MEMBER; + + IGMP_STATS_INC(igmp.tx_report); + igmp_send(netif, group, IGMP_V2_MEMB_REPORT); + } +} + +/** + * Start a timer for an igmp group + * + * @param group the igmp_group for which to start a timer + * @param max_time the time in multiples of IGMP_TMR_INTERVAL (decrease with + * every call to igmp_tmr()) + */ +static void +igmp_start_timer(struct igmp_group *group, u8_t max_time) +{ +#ifdef LWIP_RAND + group->timer = (u16_t)(max_time > 2 ? (LWIP_RAND() % max_time) : 1); +#else /* LWIP_RAND */ + /* ATTENTION: use this only if absolutely necessary! */ + group->timer = max_time / 2; +#endif /* LWIP_RAND */ + + if (group->timer == 0) { + group->timer = 1; + } +} + +/** + * Delaying membership report for a group if necessary + * + * @param group the igmp_group for which "delaying" membership report + * @param maxresp query delay + */ +static void +igmp_delaying_member(struct igmp_group *group, u8_t maxresp) +{ + if ((group->group_state == IGMP_GROUP_IDLE_MEMBER) || + ((group->group_state == IGMP_GROUP_DELAYING_MEMBER) && + ((group->timer == 0) || (maxresp < group->timer)))) { + igmp_start_timer(group, maxresp); + group->group_state = IGMP_GROUP_DELAYING_MEMBER; + } +} + + +/** + * Sends an IP packet on a network interface. This function constructs the IP header + * and calculates the IP header checksum. If the source IP address is NULL, + * the IP address of the outgoing network interface is filled in as source address. + * + * @param p the packet to send (p->payload points to the data, e.g. next + protocol header; if dest == LWIP_IP_HDRINCL, p already includes an + IP header and p->payload points to that IP header) + * @param src the source IP address to send from (if src == IP4_ADDR_ANY, the + * IP address of the netif used to send is used as source address) + * @param dest the destination IP address to send the packet to + * @param netif the netif on which to send this packet + * @return ERR_OK if the packet was sent OK + * ERR_BUF if p doesn't have enough space for IP/LINK headers + * returns errors returned by netif->output + */ +static err_t +igmp_ip_output_if(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, struct netif *netif) +{ + /* This is the "router alert" option */ + u16_t ra[2]; + ra[0] = PP_HTONS(ROUTER_ALERT); + ra[1] = 0x0000; /* Router shall examine packet */ + IGMP_STATS_INC(igmp.xmit); + return ip4_output_if_opt(p, src, dest, IGMP_TTL, 0, IP_PROTO_IGMP, netif, ra, ROUTER_ALERTLEN); +} + +/** + * Send an igmp packet to a specific group. + * + * @param group the group to which to send the packet + * @param type the type of igmp packet to send + */ +static void +igmp_send(struct netif *netif, struct igmp_group *group, u8_t type) +{ + struct pbuf *p = NULL; + struct igmp_msg *igmp = NULL; + ip4_addr_t src = *IP4_ADDR_ANY4; + ip4_addr_t *dest = NULL; + + /* IP header + "router alert" option + IGMP header */ + p = pbuf_alloc(PBUF_TRANSPORT, IGMP_MINLEN, PBUF_RAM); + + if (p) { + igmp = (struct igmp_msg *)p->payload; + LWIP_ASSERT("igmp_send: check that first pbuf can hold struct igmp_msg", + (p->len >= sizeof(struct igmp_msg))); + ip4_addr_copy(src, *netif_ip4_addr(netif)); + + if (type == IGMP_V2_MEMB_REPORT) { + dest = &(group->group_address); + ip4_addr_copy(igmp->igmp_group_address, group->group_address); + group->last_reporter_flag = 1; /* Remember we were the last to report */ + } else { + if (type == IGMP_LEAVE_GROUP) { + dest = &allrouters; + ip4_addr_copy(igmp->igmp_group_address, group->group_address); + } + } + + if ((type == IGMP_V2_MEMB_REPORT) || (type == IGMP_LEAVE_GROUP)) { + igmp->igmp_msgtype = type; + igmp->igmp_maxresp = 0; + igmp->igmp_checksum = 0; + igmp->igmp_checksum = inet_chksum(igmp, IGMP_MINLEN); + + igmp_ip_output_if(p, &src, dest, netif); + } + + pbuf_free(p); + } else { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_send: not enough memory for igmp_send\n")); + IGMP_STATS_INC(igmp.memerr); + } +} + +#endif /* LWIP_IPV4 && LWIP_IGMP */ diff --git a/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4.c b/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4.c new file mode 100644 index 0000000..26c26a9 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4.c @@ -0,0 +1,1132 @@ +/** + * @file + * This is the IPv4 layer implementation for incoming and outgoing IP traffic. + * + * @see ip_frag.c + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 + +#include "lwip/ip.h" +#include "lwip/def.h" +#include "lwip/mem.h" +#include "lwip/ip4_frag.h" +#include "lwip/inet_chksum.h" +#include "lwip/netif.h" +#include "lwip/icmp.h" +#include "lwip/igmp.h" +#include "lwip/priv/raw_priv.h" +#include "lwip/udp.h" +#include "lwip/priv/tcp_priv.h" +#include "lwip/autoip.h" +#include "lwip/stats.h" +#include "lwip/prot/iana.h" + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +/** Set this to 0 in the rare case of wanting to call an extra function to + * generate the IP checksum (in contrast to calculating it on-the-fly). */ +#ifndef LWIP_INLINE_IP_CHKSUM +#if LWIP_CHECKSUM_CTRL_PER_NETIF +#define LWIP_INLINE_IP_CHKSUM 0 +#else /* LWIP_CHECKSUM_CTRL_PER_NETIF */ +#define LWIP_INLINE_IP_CHKSUM 1 +#endif /* LWIP_CHECKSUM_CTRL_PER_NETIF */ +#endif + +#if LWIP_INLINE_IP_CHKSUM && CHECKSUM_GEN_IP +#define CHECKSUM_GEN_IP_INLINE 1 +#else +#define CHECKSUM_GEN_IP_INLINE 0 +#endif + +#if LWIP_DHCP || defined(LWIP_IP_ACCEPT_UDP_PORT) +#define IP_ACCEPT_LINK_LAYER_ADDRESSING 1 + +/** Some defines for DHCP to let link-layer-addressed packets through while the + * netif is down. + * To use this in your own application/protocol, define LWIP_IP_ACCEPT_UDP_PORT(port) + * to return 1 if the port is accepted and 0 if the port is not accepted. + */ +#if LWIP_DHCP && defined(LWIP_IP_ACCEPT_UDP_PORT) +/* accept DHCP client port and custom port */ +#define IP_ACCEPT_LINK_LAYER_ADDRESSED_PORT(port) (((port) == PP_NTOHS(LWIP_IANA_PORT_DHCP_CLIENT)) \ + || (LWIP_IP_ACCEPT_UDP_PORT(port))) +#elif defined(LWIP_IP_ACCEPT_UDP_PORT) /* LWIP_DHCP && defined(LWIP_IP_ACCEPT_UDP_PORT) */ +/* accept custom port only */ +#define IP_ACCEPT_LINK_LAYER_ADDRESSED_PORT(port) (LWIP_IP_ACCEPT_UDP_PORT(port)) +#else /* LWIP_DHCP && defined(LWIP_IP_ACCEPT_UDP_PORT) */ +/* accept DHCP client port only */ +#define IP_ACCEPT_LINK_LAYER_ADDRESSED_PORT(port) ((port) == PP_NTOHS(LWIP_IANA_PORT_DHCP_CLIENT)) +#endif /* LWIP_DHCP && defined(LWIP_IP_ACCEPT_UDP_PORT) */ + +#else /* LWIP_DHCP */ +#define IP_ACCEPT_LINK_LAYER_ADDRESSING 0 +#endif /* LWIP_DHCP */ + +/** The IP header ID of the next outgoing IP packet */ +static u16_t ip_id; + +#if LWIP_MULTICAST_TX_OPTIONS +/** The default netif used for multicast */ +static struct netif *ip4_default_multicast_netif; + +/** + * @ingroup ip4 + * Set a default netif for IPv4 multicast. */ +void +ip4_set_default_multicast_netif(struct netif *default_multicast_netif) +{ + ip4_default_multicast_netif = default_multicast_netif; +} +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + +#ifdef LWIP_HOOK_IP4_ROUTE_SRC +/** + * Source based IPv4 routing must be fully implemented in + * LWIP_HOOK_IP4_ROUTE_SRC(). This function only provides the parameters. + */ +struct netif * +ip4_route_src(const ip4_addr_t *src, const ip4_addr_t *dest) +{ + if (src != NULL) { + /* when src==NULL, the hook is called from ip4_route(dest) */ + struct netif *netif = LWIP_HOOK_IP4_ROUTE_SRC(src, dest); + if (netif != NULL) { + return netif; + } + } + return ip4_route(dest); +} +#endif /* LWIP_HOOK_IP4_ROUTE_SRC */ + +/** + * Finds the appropriate network interface for a given IP address. It + * searches the list of network interfaces linearly. A match is found + * if the masked IP address of the network interface equals the masked + * IP address given to the function. + * + * @param dest the destination IP address for which to find the route + * @return the netif on which to send to reach dest + */ +struct netif * +ip4_route(const ip4_addr_t *dest) +{ +#if !LWIP_SINGLE_NETIF + struct netif *netif; + + LWIP_ASSERT_CORE_LOCKED(); + +#if LWIP_MULTICAST_TX_OPTIONS + /* Use administratively selected interface for multicast by default */ + if (ip4_addr_ismulticast(dest) && ip4_default_multicast_netif) { + return ip4_default_multicast_netif; + } +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + + /* bug #54569: in case LWIP_SINGLE_NETIF=1 and LWIP_DEBUGF() disabled, the following loop is optimized away */ + LWIP_UNUSED_ARG(dest); + + /* iterate through netifs */ + NETIF_FOREACH(netif) { + /* is the netif up, does it have a link and a valid address? */ + if (netif_is_up(netif) && netif_is_link_up(netif) && !ip4_addr_isany_val(*netif_ip4_addr(netif))) { + /* network mask matches? */ + if (ip4_addr_netcmp(dest, netif_ip4_addr(netif), netif_ip4_netmask(netif))) { + /* return netif on which to forward IP packet */ + return netif; + } + /* gateway matches on a non broadcast interface? (i.e. peer in a point to point interface) */ + if (((netif->flags & NETIF_FLAG_BROADCAST) == 0) && ip4_addr_cmp(dest, netif_ip4_gw(netif))) { + /* return netif on which to forward IP packet */ + return netif; + } + } + } + +#if LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF + /* loopif is disabled, looopback traffic is passed through any netif */ + if (ip4_addr_isloopback(dest)) { + /* don't check for link on loopback traffic */ + if (netif_default != NULL && netif_is_up(netif_default)) { + return netif_default; + } + /* default netif is not up, just use any netif for loopback traffic */ + NETIF_FOREACH(netif) { + if (netif_is_up(netif)) { + return netif; + } + } + return NULL; + } +#endif /* LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF */ + +#ifdef LWIP_HOOK_IP4_ROUTE_SRC + netif = LWIP_HOOK_IP4_ROUTE_SRC(NULL, dest); + if (netif != NULL) { + return netif; + } +#elif defined(LWIP_HOOK_IP4_ROUTE) + netif = LWIP_HOOK_IP4_ROUTE(dest); + if (netif != NULL) { + return netif; + } +#endif +#endif /* !LWIP_SINGLE_NETIF */ + + if ((netif_default == NULL) || !netif_is_up(netif_default) || !netif_is_link_up(netif_default) || + ip4_addr_isany_val(*netif_ip4_addr(netif_default)) || ip4_addr_isloopback(dest)) { + /* No matching netif found and default netif is not usable. + If this is not good enough for you, use LWIP_HOOK_IP4_ROUTE() */ + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip4_route: No route to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(dest), ip4_addr2_16(dest), ip4_addr3_16(dest), ip4_addr4_16(dest))); + IP_STATS_INC(ip.rterr); + MIB2_STATS_INC(mib2.ipoutnoroutes); + return NULL; + } + + return netif_default; +} + +#if IP_FORWARD +/** + * Determine whether an IP address is in a reserved set of addresses + * that may not be forwarded, or whether datagrams to that destination + * may be forwarded. + * @param p the packet to forward + * @return 1: can forward 0: discard + */ +static int +ip4_canforward(struct pbuf *p) +{ + u32_t addr = lwip_htonl(ip4_addr_get_u32(ip4_current_dest_addr())); + +#ifdef LWIP_HOOK_IP4_CANFORWARD + int ret = LWIP_HOOK_IP4_CANFORWARD(p, addr); + if (ret >= 0) { + return ret; + } +#endif /* LWIP_HOOK_IP4_CANFORWARD */ + + if (p->flags & PBUF_FLAG_LLBCAST) { + /* don't route link-layer broadcasts */ + return 0; + } + if ((p->flags & PBUF_FLAG_LLMCAST) || IP_MULTICAST(addr)) { + /* don't route link-layer multicasts (use LWIP_HOOK_IP4_CANFORWARD instead) */ + return 0; + } + if (IP_EXPERIMENTAL(addr)) { + return 0; + } + if (IP_CLASSA(addr)) { + u32_t net = addr & IP_CLASSA_NET; + if ((net == 0) || (net == ((u32_t)IP_LOOPBACKNET << IP_CLASSA_NSHIFT))) { + /* don't route loopback packets */ + return 0; + } + } + return 1; +} + +/** + * Forwards an IP packet. It finds an appropriate route for the + * packet, decrements the TTL value of the packet, adjusts the + * checksum and outputs the packet on the appropriate interface. + * + * @param p the packet to forward (p->payload points to IP header) + * @param iphdr the IP header of the input packet + * @param inp the netif on which this packet was received + */ +static void +ip4_forward(struct pbuf *p, struct ip_hdr *iphdr, struct netif *inp) +{ + struct netif *netif; + + PERF_START; + LWIP_UNUSED_ARG(inp); + + if (!ip4_canforward(p)) { + goto return_noroute; + } + + /* RFC3927 2.7: do not forward link-local addresses */ + if (ip4_addr_islinklocal(ip4_current_dest_addr())) { + LWIP_DEBUGF(IP_DEBUG, ("ip4_forward: not forwarding LLA %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(ip4_current_dest_addr()), ip4_addr2_16(ip4_current_dest_addr()), + ip4_addr3_16(ip4_current_dest_addr()), ip4_addr4_16(ip4_current_dest_addr()))); + goto return_noroute; + } + + /* Find network interface where to forward this IP packet to. */ + netif = ip4_route_src(ip4_current_src_addr(), ip4_current_dest_addr()); + if (netif == NULL) { + LWIP_DEBUGF(IP_DEBUG, ("ip4_forward: no forwarding route for %"U16_F".%"U16_F".%"U16_F".%"U16_F" found\n", + ip4_addr1_16(ip4_current_dest_addr()), ip4_addr2_16(ip4_current_dest_addr()), + ip4_addr3_16(ip4_current_dest_addr()), ip4_addr4_16(ip4_current_dest_addr()))); + /* @todo: send ICMP_DUR_NET? */ + goto return_noroute; + } +#if !IP_FORWARD_ALLOW_TX_ON_RX_NETIF + /* Do not forward packets onto the same network interface on which + * they arrived. */ + if (netif == inp) { + LWIP_DEBUGF(IP_DEBUG, ("ip4_forward: not bouncing packets back on incoming interface.\n")); + goto return_noroute; + } +#endif /* IP_FORWARD_ALLOW_TX_ON_RX_NETIF */ + + /* decrement TTL */ + IPH_TTL_SET(iphdr, IPH_TTL(iphdr) - 1); + /* send ICMP if TTL == 0 */ + if (IPH_TTL(iphdr) == 0) { + MIB2_STATS_INC(mib2.ipinhdrerrors); +#if LWIP_ICMP + /* Don't send ICMP messages in response to ICMP messages */ + if (IPH_PROTO(iphdr) != IP_PROTO_ICMP) { + icmp_time_exceeded(p, ICMP_TE_TTL); + } +#endif /* LWIP_ICMP */ + return; + } + + /* Incrementally update the IP checksum. */ + if (IPH_CHKSUM(iphdr) >= PP_HTONS(0xffffU - 0x100)) { + IPH_CHKSUM_SET(iphdr, (u16_t)(IPH_CHKSUM(iphdr) + PP_HTONS(0x100) + 1)); + } else { + IPH_CHKSUM_SET(iphdr, (u16_t)(IPH_CHKSUM(iphdr) + PP_HTONS(0x100))); + } + + LWIP_DEBUGF(IP_DEBUG, ("ip4_forward: forwarding packet to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(ip4_current_dest_addr()), ip4_addr2_16(ip4_current_dest_addr()), + ip4_addr3_16(ip4_current_dest_addr()), ip4_addr4_16(ip4_current_dest_addr()))); + + IP_STATS_INC(ip.fw); + MIB2_STATS_INC(mib2.ipforwdatagrams); + IP_STATS_INC(ip.xmit); + + PERF_STOP("ip4_forward"); + /* don't fragment if interface has mtu set to 0 [loopif] */ + if (netif->mtu && (p->tot_len > netif->mtu)) { + if ((IPH_OFFSET(iphdr) & PP_NTOHS(IP_DF)) == 0) { +#if IP_FRAG + ip4_frag(p, netif, ip4_current_dest_addr()); +#else /* IP_FRAG */ + /* @todo: send ICMP Destination Unreachable code 13 "Communication administratively prohibited"? */ +#endif /* IP_FRAG */ + } else { +#if LWIP_ICMP + /* send ICMP Destination Unreachable code 4: "Fragmentation Needed and DF Set" */ + icmp_dest_unreach(p, ICMP_DUR_FRAG); +#endif /* LWIP_ICMP */ + } + return; + } + /* transmit pbuf on chosen interface */ + netif->output(netif, p, ip4_current_dest_addr()); + return; +return_noroute: + MIB2_STATS_INC(mib2.ipoutnoroutes); +} +#endif /* IP_FORWARD */ + +/** Return true if the current input packet should be accepted on this netif */ +static int +ip4_input_accept(struct netif *netif) +{ + LWIP_DEBUGF(IP_DEBUG, ("ip_input: iphdr->dest 0x%"X32_F" netif->ip_addr 0x%"X32_F" (0x%"X32_F", 0x%"X32_F", 0x%"X32_F")\n", + ip4_addr_get_u32(ip4_current_dest_addr()), ip4_addr_get_u32(netif_ip4_addr(netif)), + ip4_addr_get_u32(ip4_current_dest_addr()) & ip4_addr_get_u32(netif_ip4_netmask(netif)), + ip4_addr_get_u32(netif_ip4_addr(netif)) & ip4_addr_get_u32(netif_ip4_netmask(netif)), + ip4_addr_get_u32(ip4_current_dest_addr()) & ~ip4_addr_get_u32(netif_ip4_netmask(netif)))); + + /* interface is up and configured? */ + if ((netif_is_up(netif)) && (!ip4_addr_isany_val(*netif_ip4_addr(netif)))) { + /* unicast to this interface address? */ + if (ip4_addr_cmp(ip4_current_dest_addr(), netif_ip4_addr(netif)) || + /* or broadcast on this interface network address? */ + ip4_addr_isbroadcast(ip4_current_dest_addr(), netif) +#if LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF + || (ip4_addr_get_u32(ip4_current_dest_addr()) == PP_HTONL(IPADDR_LOOPBACK)) +#endif /* LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF */ + ) { + LWIP_DEBUGF(IP_DEBUG, ("ip4_input: packet accepted on interface %c%c\n", + netif->name[0], netif->name[1])); + /* accept on this netif */ + return 1; + } +#if LWIP_AUTOIP + /* connections to link-local addresses must persist after changing + the netif's address (RFC3927 ch. 1.9) */ + if (autoip_accept_packet(netif, ip4_current_dest_addr())) { + LWIP_DEBUGF(IP_DEBUG, ("ip4_input: LLA packet accepted on interface %c%c\n", + netif->name[0], netif->name[1])); + /* accept on this netif */ + return 1; + } +#endif /* LWIP_AUTOIP */ + } + return 0; +} + +/** + * This function is called by the network interface device driver when + * an IP packet is received. The function does the basic checks of the + * IP header such as packet size being at least larger than the header + * size etc. If the packet was not destined for us, the packet is + * forwarded (using ip_forward). The IP checksum is always checked. + * + * Finally, the packet is sent to the upper layer protocol input function. + * + * @param p the received IP packet (p->payload points to IP header) + * @param inp the netif on which this packet was received + * @return ERR_OK if the packet was processed (could return ERR_* if it wasn't + * processed, but currently always returns ERR_OK) + */ +err_t +ip4_input(struct pbuf *p, struct netif *inp) +{ + const struct ip_hdr *iphdr; + struct netif *netif; + u16_t iphdr_hlen; + u16_t iphdr_len; +#if IP_ACCEPT_LINK_LAYER_ADDRESSING || LWIP_IGMP + int check_ip_src = 1; +#endif /* IP_ACCEPT_LINK_LAYER_ADDRESSING || LWIP_IGMP */ +#if LWIP_RAW + raw_input_state_t raw_status; +#endif /* LWIP_RAW */ + + LWIP_ASSERT_CORE_LOCKED(); + + IP_STATS_INC(ip.recv); + MIB2_STATS_INC(mib2.ipinreceives); + + /* identify the IP header */ + iphdr = (struct ip_hdr *)p->payload; + if (IPH_V(iphdr) != 4) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_WARNING, ("IP packet dropped due to bad version number %"U16_F"\n", (u16_t)IPH_V(iphdr))); + ip4_debug_print(p); + pbuf_free(p); + IP_STATS_INC(ip.err); + IP_STATS_INC(ip.drop); + MIB2_STATS_INC(mib2.ipinhdrerrors); + return ERR_OK; + } + +#ifdef LWIP_HOOK_IP4_INPUT + if (LWIP_HOOK_IP4_INPUT(p, inp)) { + /* the packet has been eaten */ + return ERR_OK; + } +#endif + + /* obtain IP header length in bytes */ + iphdr_hlen = IPH_HL_BYTES(iphdr); + /* obtain ip length in bytes */ + iphdr_len = lwip_ntohs(IPH_LEN(iphdr)); + + /* Trim pbuf. This is especially required for packets < 60 bytes. */ + if (iphdr_len < p->tot_len) { + pbuf_realloc(p, iphdr_len); + } + + /* header length exceeds first pbuf length, or ip length exceeds total pbuf length? */ + if ((iphdr_hlen > p->len) || (iphdr_len > p->tot_len) || (iphdr_hlen < IP_HLEN)) { + if (iphdr_hlen < IP_HLEN) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("ip4_input: short IP header (%"U16_F" bytes) received, IP packet dropped\n", iphdr_hlen)); + } + if (iphdr_hlen > p->len) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IP header (len %"U16_F") does not fit in first pbuf (len %"U16_F"), IP packet dropped.\n", + iphdr_hlen, p->len)); + } + if (iphdr_len > p->tot_len) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IP (len %"U16_F") is longer than pbuf (len %"U16_F"), IP packet dropped.\n", + iphdr_len, p->tot_len)); + } + /* free (drop) packet pbufs */ + pbuf_free(p); + IP_STATS_INC(ip.lenerr); + IP_STATS_INC(ip.drop); + MIB2_STATS_INC(mib2.ipindiscards); + return ERR_OK; + } + + /* verify checksum */ +#if CHECKSUM_CHECK_IP + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_CHECK_IP) { + if (inet_chksum(iphdr, iphdr_hlen) != 0) { + + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("Checksum (0x%"X16_F") failed, IP packet dropped.\n", inet_chksum(iphdr, iphdr_hlen))); + ip4_debug_print(p); + pbuf_free(p); + IP_STATS_INC(ip.chkerr); + IP_STATS_INC(ip.drop); + MIB2_STATS_INC(mib2.ipinhdrerrors); + return ERR_OK; + } + } +#endif + + /* copy IP addresses to aligned ip_addr_t */ + ip_addr_copy_from_ip4(ip_data.current_iphdr_dest, iphdr->dest); + ip_addr_copy_from_ip4(ip_data.current_iphdr_src, iphdr->src); + + /* match packet against an interface, i.e. is this packet for us? */ + if (ip4_addr_ismulticast(ip4_current_dest_addr())) { +#if LWIP_IGMP + if ((inp->flags & NETIF_FLAG_IGMP) && (igmp_lookfor_group(inp, ip4_current_dest_addr()))) { + /* IGMP snooping switches need 0.0.0.0 to be allowed as source address (RFC 4541) */ + ip4_addr_t allsystems; + IP4_ADDR(&allsystems, 224, 0, 0, 1); + if (ip4_addr_cmp(ip4_current_dest_addr(), &allsystems) && + ip4_addr_isany(ip4_current_src_addr())) { + check_ip_src = 0; + } + netif = inp; + } else { + netif = NULL; + } +#else /* LWIP_IGMP */ + if ((netif_is_up(inp)) && (!ip4_addr_isany_val(*netif_ip4_addr(inp)))) { + netif = inp; + } else { + netif = NULL; + } +#endif /* LWIP_IGMP */ + } else { + /* start trying with inp. if that's not acceptable, start walking the + list of configured netifs. */ + if (ip4_input_accept(inp)) { + netif = inp; + } else { + netif = NULL; +#if !LWIP_NETIF_LOOPBACK || LWIP_HAVE_LOOPIF + /* Packets sent to the loopback address must not be accepted on an + * interface that does not have the loopback address assigned to it, + * unless a non-loopback interface is used for loopback traffic. */ + if (!ip4_addr_isloopback(ip4_current_dest_addr())) +#endif /* !LWIP_NETIF_LOOPBACK || LWIP_HAVE_LOOPIF */ + { +#if !LWIP_SINGLE_NETIF + NETIF_FOREACH(netif) { + if (netif == inp) { + /* we checked that before already */ + continue; + } + if (ip4_input_accept(netif)) { + break; + } + } +#endif /* !LWIP_SINGLE_NETIF */ + } + } + } + +#if IP_ACCEPT_LINK_LAYER_ADDRESSING + /* Pass DHCP messages regardless of destination address. DHCP traffic is addressed + * using link layer addressing (such as Ethernet MAC) so we must not filter on IP. + * According to RFC 1542 section 3.1.1, referred by RFC 2131). + * + * If you want to accept private broadcast communication while a netif is down, + * define LWIP_IP_ACCEPT_UDP_PORT(dst_port), e.g.: + * + * #define LWIP_IP_ACCEPT_UDP_PORT(dst_port) ((dst_port) == PP_NTOHS(12345)) + */ + if (netif == NULL) { + /* remote port is DHCP server? */ + if (IPH_PROTO(iphdr) == IP_PROTO_UDP) { + const struct udp_hdr *udphdr = (const struct udp_hdr *)((const u8_t *)iphdr + iphdr_hlen); + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_TRACE, ("ip4_input: UDP packet to DHCP client port %"U16_F"\n", + lwip_ntohs(udphdr->dest))); + if (IP_ACCEPT_LINK_LAYER_ADDRESSED_PORT(udphdr->dest)) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_TRACE, ("ip4_input: DHCP packet accepted.\n")); + netif = inp; + check_ip_src = 0; + } + } + } +#endif /* IP_ACCEPT_LINK_LAYER_ADDRESSING */ + + /* broadcast or multicast packet source address? Compliant with RFC 1122: 3.2.1.3 */ +#if LWIP_IGMP || IP_ACCEPT_LINK_LAYER_ADDRESSING + if (check_ip_src +#if IP_ACCEPT_LINK_LAYER_ADDRESSING + /* DHCP servers need 0.0.0.0 to be allowed as source address (RFC 1.1.2.2: 3.2.1.3/a) */ + && !ip4_addr_isany_val(*ip4_current_src_addr()) +#endif /* IP_ACCEPT_LINK_LAYER_ADDRESSING */ + ) +#endif /* LWIP_IGMP || IP_ACCEPT_LINK_LAYER_ADDRESSING */ + { + if ((ip4_addr_isbroadcast(ip4_current_src_addr(), inp)) || + (ip4_addr_ismulticast(ip4_current_src_addr()))) { + /* packet source is not valid */ + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("ip4_input: packet source is not valid.\n")); + /* free (drop) packet pbufs */ + pbuf_free(p); + IP_STATS_INC(ip.drop); + MIB2_STATS_INC(mib2.ipinaddrerrors); + MIB2_STATS_INC(mib2.ipindiscards); + return ERR_OK; + } + } + + /* packet not for us? */ + if (netif == NULL) { + /* packet not for us, route or discard */ + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_TRACE, ("ip4_input: packet not for us.\n")); +#if IP_FORWARD + /* non-broadcast packet? */ + if (!ip4_addr_isbroadcast(ip4_current_dest_addr(), inp)) { + /* try to forward IP packet on (other) interfaces */ + ip4_forward(p, (struct ip_hdr *)p->payload, inp); + } else +#endif /* IP_FORWARD */ + { + IP_STATS_INC(ip.drop); + MIB2_STATS_INC(mib2.ipinaddrerrors); + MIB2_STATS_INC(mib2.ipindiscards); + } + pbuf_free(p); + return ERR_OK; + } + /* packet consists of multiple fragments? */ + if ((IPH_OFFSET(iphdr) & PP_HTONS(IP_OFFMASK | IP_MF)) != 0) { +#if IP_REASSEMBLY /* packet fragment reassembly code present? */ + LWIP_DEBUGF(IP_DEBUG, ("IP packet is a fragment (id=0x%04"X16_F" tot_len=%"U16_F" len=%"U16_F" MF=%"U16_F" offset=%"U16_F"), calling ip4_reass()\n", + lwip_ntohs(IPH_ID(iphdr)), p->tot_len, lwip_ntohs(IPH_LEN(iphdr)), (u16_t)!!(IPH_OFFSET(iphdr) & PP_HTONS(IP_MF)), (u16_t)((lwip_ntohs(IPH_OFFSET(iphdr)) & IP_OFFMASK) * 8))); + /* reassemble the packet*/ + p = ip4_reass(p); + /* packet not fully reassembled yet? */ + if (p == NULL) { + return ERR_OK; + } + iphdr = (const struct ip_hdr *)p->payload; +#else /* IP_REASSEMBLY == 0, no packet fragment reassembly code present */ + pbuf_free(p); + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("IP packet dropped since it was fragmented (0x%"X16_F") (while IP_REASSEMBLY == 0).\n", + lwip_ntohs(IPH_OFFSET(iphdr)))); + IP_STATS_INC(ip.opterr); + IP_STATS_INC(ip.drop); + /* unsupported protocol feature */ + MIB2_STATS_INC(mib2.ipinunknownprotos); + return ERR_OK; +#endif /* IP_REASSEMBLY */ + } + +#if IP_OPTIONS_ALLOWED == 0 /* no support for IP options in the IP header? */ + +#if LWIP_IGMP + /* there is an extra "router alert" option in IGMP messages which we allow for but do not police */ + if ((iphdr_hlen > IP_HLEN) && (IPH_PROTO(iphdr) != IP_PROTO_IGMP)) { +#else + if (iphdr_hlen > IP_HLEN) { +#endif /* LWIP_IGMP */ + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("IP packet dropped since there were IP options (while IP_OPTIONS_ALLOWED == 0).\n")); + pbuf_free(p); + IP_STATS_INC(ip.opterr); + IP_STATS_INC(ip.drop); + /* unsupported protocol feature */ + MIB2_STATS_INC(mib2.ipinunknownprotos); + return ERR_OK; + } +#endif /* IP_OPTIONS_ALLOWED == 0 */ + + /* send to upper layers */ + LWIP_DEBUGF(IP_DEBUG, ("ip4_input: \n")); + ip4_debug_print(p); + LWIP_DEBUGF(IP_DEBUG, ("ip4_input: p->len %"U16_F" p->tot_len %"U16_F"\n", p->len, p->tot_len)); + + ip_data.current_netif = netif; + ip_data.current_input_netif = inp; + ip_data.current_ip4_header = iphdr; + ip_data.current_ip_header_tot_len = IPH_HL_BYTES(iphdr); + +#if LWIP_RAW + /* raw input did not eat the packet? */ + raw_status = raw_input(p, inp); + if (raw_status != RAW_INPUT_EATEN) +#endif /* LWIP_RAW */ + { + pbuf_remove_header(p, iphdr_hlen); /* Move to payload, no check necessary. */ + + switch (IPH_PROTO(iphdr)) { +#if LWIP_UDP + case IP_PROTO_UDP: +#if LWIP_UDPLITE + case IP_PROTO_UDPLITE: +#endif /* LWIP_UDPLITE */ + MIB2_STATS_INC(mib2.ipindelivers); + udp_input(p, inp); + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case IP_PROTO_TCP: + MIB2_STATS_INC(mib2.ipindelivers); + tcp_input(p, inp); + break; +#endif /* LWIP_TCP */ +#if LWIP_ICMP + case IP_PROTO_ICMP: + MIB2_STATS_INC(mib2.ipindelivers); + icmp_input(p, inp); + break; +#endif /* LWIP_ICMP */ +#if LWIP_IGMP + case IP_PROTO_IGMP: + igmp_input(p, inp, ip4_current_dest_addr()); + break; +#endif /* LWIP_IGMP */ + default: +#if LWIP_RAW + if (raw_status == RAW_INPUT_DELIVERED) { + MIB2_STATS_INC(mib2.ipindelivers); + } else +#endif /* LWIP_RAW */ + { +#if LWIP_ICMP + /* send ICMP destination protocol unreachable unless is was a broadcast */ + if (!ip4_addr_isbroadcast(ip4_current_dest_addr(), netif) && + !ip4_addr_ismulticast(ip4_current_dest_addr())) { + pbuf_header_force(p, (s16_t)iphdr_hlen); /* Move to ip header, no check necessary. */ + icmp_dest_unreach(p, ICMP_DUR_PROTO); + } +#endif /* LWIP_ICMP */ + + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("Unsupported transport protocol %"U16_F"\n", (u16_t)IPH_PROTO(iphdr))); + + IP_STATS_INC(ip.proterr); + IP_STATS_INC(ip.drop); + MIB2_STATS_INC(mib2.ipinunknownprotos); + } + pbuf_free(p); + break; + } + } + + /* @todo: this is not really necessary... */ + ip_data.current_netif = NULL; + ip_data.current_input_netif = NULL; + ip_data.current_ip4_header = NULL; + ip_data.current_ip_header_tot_len = 0; + ip4_addr_set_any(ip4_current_src_addr()); + ip4_addr_set_any(ip4_current_dest_addr()); + + return ERR_OK; +} + +/** + * Sends an IP packet on a network interface. This function constructs + * the IP header and calculates the IP header checksum. If the source + * IP address is NULL, the IP address of the outgoing network + * interface is filled in as source address. + * If the destination IP address is LWIP_IP_HDRINCL, p is assumed to already + * include an IP header and p->payload points to it instead of the data. + * + * @param p the packet to send (p->payload points to the data, e.g. next + protocol header; if dest == LWIP_IP_HDRINCL, p already includes an + IP header and p->payload points to that IP header) + * @param src the source IP address to send from (if src == IP4_ADDR_ANY, the + * IP address of the netif used to send is used as source address) + * @param dest the destination IP address to send the packet to + * @param ttl the TTL value to be set in the IP header + * @param tos the TOS value to be set in the IP header + * @param proto the PROTOCOL to be set in the IP header + * @param netif the netif on which to send this packet + * @return ERR_OK if the packet was sent OK + * ERR_BUF if p doesn't have enough space for IP/LINK headers + * returns errors returned by netif->output + * + * @note ip_id: RFC791 "some host may be able to simply use + * unique identifiers independent of destination" + */ +err_t +ip4_output_if(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, + u8_t proto, struct netif *netif) +{ +#if IP_OPTIONS_SEND + return ip4_output_if_opt(p, src, dest, ttl, tos, proto, netif, NULL, 0); +} + +/** + * Same as ip_output_if() but with the possibility to include IP options: + * + * @ param ip_options pointer to the IP options, copied into the IP header + * @ param optlen length of ip_options + */ +err_t +ip4_output_if_opt(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif *netif, void *ip_options, + u16_t optlen) +{ +#endif /* IP_OPTIONS_SEND */ + const ip4_addr_t *src_used = src; + if (dest != LWIP_IP_HDRINCL) { + if (ip4_addr_isany(src)) { + src_used = netif_ip4_addr(netif); + } + } + +#if IP_OPTIONS_SEND + return ip4_output_if_opt_src(p, src_used, dest, ttl, tos, proto, netif, + ip_options, optlen); +#else /* IP_OPTIONS_SEND */ + return ip4_output_if_src(p, src_used, dest, ttl, tos, proto, netif); +#endif /* IP_OPTIONS_SEND */ +} + +/** + * Same as ip_output_if() but 'src' address is not replaced by netif address + * when it is 'any'. + */ +err_t +ip4_output_if_src(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, + u8_t proto, struct netif *netif) +{ +#if IP_OPTIONS_SEND + return ip4_output_if_opt_src(p, src, dest, ttl, tos, proto, netif, NULL, 0); +} + +/** + * Same as ip_output_if_opt() but 'src' address is not replaced by netif address + * when it is 'any'. + */ +err_t +ip4_output_if_opt_src(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif *netif, void *ip_options, + u16_t optlen) +{ +#endif /* IP_OPTIONS_SEND */ + struct ip_hdr *iphdr; + ip4_addr_t dest_addr; +#if CHECKSUM_GEN_IP_INLINE + u32_t chk_sum = 0; +#endif /* CHECKSUM_GEN_IP_INLINE */ + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p); + + MIB2_STATS_INC(mib2.ipoutrequests); + + /* Should the IP header be generated or is it already included in p? */ + if (dest != LWIP_IP_HDRINCL) { + u16_t ip_hlen = IP_HLEN; +#if IP_OPTIONS_SEND + u16_t optlen_aligned = 0; + if (optlen != 0) { +#if CHECKSUM_GEN_IP_INLINE + int i; +#endif /* CHECKSUM_GEN_IP_INLINE */ + if (optlen > (IP_HLEN_MAX - IP_HLEN)) { + /* optlen too long */ + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip4_output_if_opt: optlen too long\n")); + IP_STATS_INC(ip.err); + MIB2_STATS_INC(mib2.ipoutdiscards); + return ERR_VAL; + } + /* round up to a multiple of 4 */ + optlen_aligned = (u16_t)((optlen + 3) & ~3); + ip_hlen = (u16_t)(ip_hlen + optlen_aligned); + /* First write in the IP options */ + if (pbuf_add_header(p, optlen_aligned)) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip4_output_if_opt: not enough room for IP options in pbuf\n")); + IP_STATS_INC(ip.err); + MIB2_STATS_INC(mib2.ipoutdiscards); + return ERR_BUF; + } + MEMCPY(p->payload, ip_options, optlen); + if (optlen < optlen_aligned) { + /* zero the remaining bytes */ + memset(((char *)p->payload) + optlen, 0, (size_t)(optlen_aligned - optlen)); + } +#if CHECKSUM_GEN_IP_INLINE + for (i = 0; i < optlen_aligned / 2; i++) { + chk_sum += ((u16_t *)p->payload)[i]; + } +#endif /* CHECKSUM_GEN_IP_INLINE */ + } +#endif /* IP_OPTIONS_SEND */ + /* generate IP header */ + if (pbuf_add_header(p, IP_HLEN)) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip4_output: not enough room for IP header in pbuf\n")); + + IP_STATS_INC(ip.err); + MIB2_STATS_INC(mib2.ipoutdiscards); + return ERR_BUF; + } + + iphdr = (struct ip_hdr *)p->payload; + LWIP_ASSERT("check that first pbuf can hold struct ip_hdr", + (p->len >= sizeof(struct ip_hdr))); + + IPH_TTL_SET(iphdr, ttl); + IPH_PROTO_SET(iphdr, proto); +#if CHECKSUM_GEN_IP_INLINE + chk_sum += PP_NTOHS(proto | (ttl << 8)); +#endif /* CHECKSUM_GEN_IP_INLINE */ + + /* dest cannot be NULL here */ + ip4_addr_copy(iphdr->dest, *dest); +#if CHECKSUM_GEN_IP_INLINE + chk_sum += ip4_addr_get_u32(&iphdr->dest) & 0xFFFF; + chk_sum += ip4_addr_get_u32(&iphdr->dest) >> 16; +#endif /* CHECKSUM_GEN_IP_INLINE */ + + IPH_VHL_SET(iphdr, 4, ip_hlen / 4); + IPH_TOS_SET(iphdr, tos); +#if CHECKSUM_GEN_IP_INLINE + chk_sum += PP_NTOHS(tos | (iphdr->_v_hl << 8)); +#endif /* CHECKSUM_GEN_IP_INLINE */ + IPH_LEN_SET(iphdr, lwip_htons(p->tot_len)); +#if CHECKSUM_GEN_IP_INLINE + chk_sum += iphdr->_len; +#endif /* CHECKSUM_GEN_IP_INLINE */ + IPH_OFFSET_SET(iphdr, 0); + IPH_ID_SET(iphdr, lwip_htons(ip_id)); +#if CHECKSUM_GEN_IP_INLINE + chk_sum += iphdr->_id; +#endif /* CHECKSUM_GEN_IP_INLINE */ + ++ip_id; + + if (src == NULL) { + ip4_addr_copy(iphdr->src, *IP4_ADDR_ANY4); + } else { + /* src cannot be NULL here */ + ip4_addr_copy(iphdr->src, *src); + } + +#if CHECKSUM_GEN_IP_INLINE + chk_sum += ip4_addr_get_u32(&iphdr->src) & 0xFFFF; + chk_sum += ip4_addr_get_u32(&iphdr->src) >> 16; + chk_sum = (chk_sum >> 16) + (chk_sum & 0xFFFF); + chk_sum = (chk_sum >> 16) + chk_sum; + chk_sum = ~chk_sum; + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_IP) { + iphdr->_chksum = (u16_t)chk_sum; /* network order */ + } +#if LWIP_CHECKSUM_CTRL_PER_NETIF + else { + IPH_CHKSUM_SET(iphdr, 0); + } +#endif /* LWIP_CHECKSUM_CTRL_PER_NETIF*/ +#else /* CHECKSUM_GEN_IP_INLINE */ + IPH_CHKSUM_SET(iphdr, 0); +#if CHECKSUM_GEN_IP + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_IP) { + IPH_CHKSUM_SET(iphdr, inet_chksum(iphdr, ip_hlen)); + } +#endif /* CHECKSUM_GEN_IP */ +#endif /* CHECKSUM_GEN_IP_INLINE */ + } else { + /* IP header already included in p */ + if (p->len < IP_HLEN) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip4_output: LWIP_IP_HDRINCL but pbuf is too short\n")); + IP_STATS_INC(ip.err); + MIB2_STATS_INC(mib2.ipoutdiscards); + return ERR_BUF; + } + iphdr = (struct ip_hdr *)p->payload; + ip4_addr_copy(dest_addr, iphdr->dest); + dest = &dest_addr; + } + + IP_STATS_INC(ip.xmit); + + LWIP_DEBUGF(IP_DEBUG, ("ip4_output_if: %c%c%"U16_F"\n", netif->name[0], netif->name[1], (u16_t)netif->num)); + ip4_debug_print(p); + +#if ENABLE_LOOPBACK + if (ip4_addr_cmp(dest, netif_ip4_addr(netif)) +#if !LWIP_HAVE_LOOPIF + || ip4_addr_isloopback(dest) +#endif /* !LWIP_HAVE_LOOPIF */ + ) { + /* Packet to self, enqueue it for loopback */ + LWIP_DEBUGF(IP_DEBUG, ("netif_loop_output()")); + return netif_loop_output(netif, p); + } +#if LWIP_MULTICAST_TX_OPTIONS + if ((p->flags & PBUF_FLAG_MCASTLOOP) != 0) { + netif_loop_output(netif, p); + } +#endif /* LWIP_MULTICAST_TX_OPTIONS */ +#endif /* ENABLE_LOOPBACK */ +#if IP_FRAG + /* don't fragment if interface has mtu set to 0 [loopif] */ + if (netif->mtu && (p->tot_len > netif->mtu)) { + return ip4_frag(p, netif, dest); + } +#endif /* IP_FRAG */ + + LWIP_DEBUGF(IP_DEBUG, ("ip4_output_if: call netif->output()\n")); + return netif->output(netif, p, dest); +} + +/** + * Simple interface to ip_output_if. It finds the outgoing network + * interface and calls upon ip_output_if to do the actual work. + * + * @param p the packet to send (p->payload points to the data, e.g. next + protocol header; if dest == LWIP_IP_HDRINCL, p already includes an + IP header and p->payload points to that IP header) + * @param src the source IP address to send from (if src == IP4_ADDR_ANY, the + * IP address of the netif used to send is used as source address) + * @param dest the destination IP address to send the packet to + * @param ttl the TTL value to be set in the IP header + * @param tos the TOS value to be set in the IP header + * @param proto the PROTOCOL to be set in the IP header + * + * @return ERR_RTE if no route is found + * see ip_output_if() for more return values + */ +err_t +ip4_output(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto) +{ + struct netif *netif; + + LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p); + + if ((netif = ip4_route_src(src, dest)) == NULL) { + LWIP_DEBUGF(IP_DEBUG, ("ip4_output: No route to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(dest), ip4_addr2_16(dest), ip4_addr3_16(dest), ip4_addr4_16(dest))); + IP_STATS_INC(ip.rterr); + return ERR_RTE; + } + + return ip4_output_if(p, src, dest, ttl, tos, proto, netif); +} + +#if LWIP_NETIF_USE_HINTS +/** Like ip_output, but takes and addr_hint pointer that is passed on to netif->addr_hint + * before calling ip_output_if. + * + * @param p the packet to send (p->payload points to the data, e.g. next + protocol header; if dest == LWIP_IP_HDRINCL, p already includes an + IP header and p->payload points to that IP header) + * @param src the source IP address to send from (if src == IP4_ADDR_ANY, the + * IP address of the netif used to send is used as source address) + * @param dest the destination IP address to send the packet to + * @param ttl the TTL value to be set in the IP header + * @param tos the TOS value to be set in the IP header + * @param proto the PROTOCOL to be set in the IP header + * @param netif_hint netif output hint pointer set to netif->hint before + * calling ip_output_if() + * + * @return ERR_RTE if no route is found + * see ip_output_if() for more return values + */ +err_t +ip4_output_hinted(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif_hint *netif_hint) +{ + struct netif *netif; + err_t err; + + LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p); + + if ((netif = ip4_route_src(src, dest)) == NULL) { + LWIP_DEBUGF(IP_DEBUG, ("ip4_output: No route to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(dest), ip4_addr2_16(dest), ip4_addr3_16(dest), ip4_addr4_16(dest))); + IP_STATS_INC(ip.rterr); + return ERR_RTE; + } + + NETIF_SET_HINTS(netif, netif_hint); + err = ip4_output_if(p, src, dest, ttl, tos, proto, netif); + NETIF_RESET_HINTS(netif); + + return err; +} +#endif /* LWIP_NETIF_USE_HINTS*/ + +#if IP_DEBUG +/* Print an IP header by using LWIP_DEBUGF + * @param p an IP packet, p->payload pointing to the IP header + */ +void +ip4_debug_print(struct pbuf *p) +{ + struct ip_hdr *iphdr = (struct ip_hdr *)p->payload; + + LWIP_DEBUGF(IP_DEBUG, ("IP header:\n")); + LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP_DEBUG, ("|%2"S16_F" |%2"S16_F" | 0x%02"X16_F" | %5"U16_F" | (v, hl, tos, len)\n", + (u16_t)IPH_V(iphdr), + (u16_t)IPH_HL(iphdr), + (u16_t)IPH_TOS(iphdr), + lwip_ntohs(IPH_LEN(iphdr)))); + LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP_DEBUG, ("| %5"U16_F" |%"U16_F"%"U16_F"%"U16_F"| %4"U16_F" | (id, flags, offset)\n", + lwip_ntohs(IPH_ID(iphdr)), + (u16_t)(lwip_ntohs(IPH_OFFSET(iphdr)) >> 15 & 1), + (u16_t)(lwip_ntohs(IPH_OFFSET(iphdr)) >> 14 & 1), + (u16_t)(lwip_ntohs(IPH_OFFSET(iphdr)) >> 13 & 1), + (u16_t)(lwip_ntohs(IPH_OFFSET(iphdr)) & IP_OFFMASK))); + LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP_DEBUG, ("| %3"U16_F" | %3"U16_F" | 0x%04"X16_F" | (ttl, proto, chksum)\n", + (u16_t)IPH_TTL(iphdr), + (u16_t)IPH_PROTO(iphdr), + lwip_ntohs(IPH_CHKSUM(iphdr)))); + LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP_DEBUG, ("| %3"U16_F" | %3"U16_F" | %3"U16_F" | %3"U16_F" | (src)\n", + ip4_addr1_16_val(iphdr->src), + ip4_addr2_16_val(iphdr->src), + ip4_addr3_16_val(iphdr->src), + ip4_addr4_16_val(iphdr->src))); + LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP_DEBUG, ("| %3"U16_F" | %3"U16_F" | %3"U16_F" | %3"U16_F" | (dest)\n", + ip4_addr1_16_val(iphdr->dest), + ip4_addr2_16_val(iphdr->dest), + ip4_addr3_16_val(iphdr->dest), + ip4_addr4_16_val(iphdr->dest))); + LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); +} +#endif /* IP_DEBUG */ + +#endif /* LWIP_IPV4 */ diff --git a/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_addr.c b/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_addr.c new file mode 100644 index 0000000..33204d1 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_addr.c @@ -0,0 +1,321 @@ +/** + * @file + * This is the IPv4 address tools implementation. + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 + +#include "lwip/ip_addr.h" +#include "lwip/netif.h" + +/* used by IP4_ADDR_ANY and IP_ADDR_BROADCAST in ip_addr.h */ +const ip_addr_t ip_addr_any = IPADDR4_INIT(IPADDR_ANY); +const ip_addr_t ip_addr_broadcast = IPADDR4_INIT(IPADDR_BROADCAST); + +/** + * Determine if an address is a broadcast address on a network interface + * + * @param addr address to be checked + * @param netif the network interface against which the address is checked + * @return returns non-zero if the address is a broadcast address + */ +u8_t +ip4_addr_isbroadcast_u32(u32_t addr, const struct netif *netif) +{ + ip4_addr_t ipaddr; + ip4_addr_set_u32(&ipaddr, addr); + + /* all ones (broadcast) or all zeroes (old skool broadcast) */ + if ((~addr == IPADDR_ANY) || + (addr == IPADDR_ANY)) { + return 1; + /* no broadcast support on this network interface? */ + } else if ((netif->flags & NETIF_FLAG_BROADCAST) == 0) { + /* the given address cannot be a broadcast address + * nor can we check against any broadcast addresses */ + return 0; + /* address matches network interface address exactly? => no broadcast */ + } else if (addr == ip4_addr_get_u32(netif_ip4_addr(netif))) { + return 0; + /* on the same (sub) network... */ + } else if (ip4_addr_netcmp(&ipaddr, netif_ip4_addr(netif), netif_ip4_netmask(netif)) + /* ...and host identifier bits are all ones? =>... */ + && ((addr & ~ip4_addr_get_u32(netif_ip4_netmask(netif))) == + (IPADDR_BROADCAST & ~ip4_addr_get_u32(netif_ip4_netmask(netif))))) { + /* => network broadcast address */ + return 1; + } else { + return 0; + } +} + +/** Checks if a netmask is valid (starting with ones, then only zeros) + * + * @param netmask the IPv4 netmask to check (in network byte order!) + * @return 1 if the netmask is valid, 0 if it is not + */ +u8_t +ip4_addr_netmask_valid(u32_t netmask) +{ + u32_t mask; + u32_t nm_hostorder = lwip_htonl(netmask); + + /* first, check for the first zero */ + for (mask = 1UL << 31 ; mask != 0; mask >>= 1) { + if ((nm_hostorder & mask) == 0) { + break; + } + } + /* then check that there is no one */ + for (; mask != 0; mask >>= 1) { + if ((nm_hostorder & mask) != 0) { + /* there is a one after the first zero -> invalid */ + return 0; + } + } + /* no one after the first zero -> valid */ + return 1; +} + +/** + * Ascii internet address interpretation routine. + * The value returned is in network order. + * + * @param cp IP address in ascii representation (e.g. "127.0.0.1") + * @return ip address in network order + */ +u32_t +ipaddr_addr(const char *cp) +{ + ip4_addr_t val; + + if (ip4addr_aton(cp, &val)) { + return ip4_addr_get_u32(&val); + } + return (IPADDR_NONE); +} + +/** + * Check whether "cp" is a valid ascii representation + * of an Internet address and convert to a binary address. + * Returns 1 if the address is valid, 0 if not. + * This replaces inet_addr, the return value from which + * cannot distinguish between failure and a local broadcast address. + * + * @param cp IP address in ascii representation (e.g. "127.0.0.1") + * @param addr pointer to which to save the ip address in network order + * @return 1 if cp could be converted to addr, 0 on failure + */ +int +ip4addr_aton(const char *cp, ip4_addr_t *addr) +{ + u32_t val; + u8_t base; + char c; + u32_t parts[4]; + u32_t *pp = parts; + + c = *cp; + for (;;) { + /* + * Collect number up to ``.''. + * Values are specified as for C: + * 0x=hex, 0=octal, 1-9=decimal. + */ + if (!lwip_isdigit(c)) { + return 0; + } + val = 0; + base = 10; + if (c == '0') { + c = *++cp; + if (c == 'x' || c == 'X') { + base = 16; + c = *++cp; + } else { + base = 8; + } + } + for (;;) { + if (lwip_isdigit(c)) { + val = (val * base) + (u32_t)(c - '0'); + c = *++cp; + } else if (base == 16 && lwip_isxdigit(c)) { + val = (val << 4) | (u32_t)(c + 10 - (lwip_islower(c) ? 'a' : 'A')); + c = *++cp; + } else { + break; + } + } + if (c == '.') { + /* + * Internet format: + * a.b.c.d + * a.b.c (with c treated as 16 bits) + * a.b (with b treated as 24 bits) + */ + if (pp >= parts + 3) { + return 0; + } + *pp++ = val; + c = *++cp; + } else { + break; + } + } + /* + * Check for trailing characters. + */ + if (c != '\0' && !lwip_isspace(c)) { + return 0; + } + /* + * Concoct the address according to + * the number of parts specified. + */ + switch (pp - parts + 1) { + + case 0: + return 0; /* initial nondigit */ + + case 1: /* a -- 32 bits */ + break; + + case 2: /* a.b -- 8.24 bits */ + if (val > 0xffffffUL) { + return 0; + } + if (parts[0] > 0xff) { + return 0; + } + val |= parts[0] << 24; + break; + + case 3: /* a.b.c -- 8.8.16 bits */ + if (val > 0xffff) { + return 0; + } + if ((parts[0] > 0xff) || (parts[1] > 0xff)) { + return 0; + } + val |= (parts[0] << 24) | (parts[1] << 16); + break; + + case 4: /* a.b.c.d -- 8.8.8.8 bits */ + if (val > 0xff) { + return 0; + } + if ((parts[0] > 0xff) || (parts[1] > 0xff) || (parts[2] > 0xff)) { + return 0; + } + val |= (parts[0] << 24) | (parts[1] << 16) | (parts[2] << 8); + break; + default: + LWIP_ASSERT("unhandled", 0); + break; + } + if (addr) { + ip4_addr_set_u32(addr, lwip_htonl(val)); + } + return 1; +} + +/** + * Convert numeric IP address into decimal dotted ASCII representation. + * returns ptr to static buffer; not reentrant! + * + * @param addr ip address in network order to convert + * @return pointer to a global static (!) buffer that holds the ASCII + * representation of addr + */ +char * +ip4addr_ntoa(const ip4_addr_t *addr) +{ + static char str[IP4ADDR_STRLEN_MAX]; + return ip4addr_ntoa_r(addr, str, IP4ADDR_STRLEN_MAX); +} + +/** + * Same as ip4addr_ntoa, but reentrant since a user-supplied buffer is used. + * + * @param addr ip address in network order to convert + * @param buf target buffer where the string is stored + * @param buflen length of buf + * @return either pointer to buf which now holds the ASCII + * representation of addr or NULL if buf was too small + */ +char * +ip4addr_ntoa_r(const ip4_addr_t *addr, char *buf, int buflen) +{ + u32_t s_addr; + char inv[3]; + char *rp; + u8_t *ap; + u8_t rem; + u8_t n; + u8_t i; + int len = 0; + + s_addr = ip4_addr_get_u32(addr); + + rp = buf; + ap = (u8_t *)&s_addr; + for (n = 0; n < 4; n++) { + i = 0; + do { + rem = *ap % (u8_t)10; + *ap /= (u8_t)10; + inv[i++] = (char)('0' + rem); + } while (*ap); + while (i--) { + if (len++ >= buflen) { + return NULL; + } + *rp++ = inv[i]; + } + if (len++ >= buflen) { + return NULL; + } + *rp++ = '.'; + ap++; + } + *--rp = 0; + return buf; +} + +#endif /* LWIP_IPV4 */ diff --git a/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_frag.c b/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_frag.c new file mode 100644 index 0000000..a445530 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_frag.c @@ -0,0 +1,894 @@ +/** + * @file + * This is the IPv4 packet segmentation and reassembly implementation. + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Jani Monoses + * Simon Goldschmidt + * original reassembly code by Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 + +#include "lwip/ip4_frag.h" +#include "lwip/def.h" +#include "lwip/inet_chksum.h" +#include "lwip/netif.h" +#include "lwip/stats.h" +#include "lwip/icmp.h" + +#include + +#if IP_REASSEMBLY +/** + * The IP reassembly code currently has the following limitations: + * - IP header options are not supported + * - fragments must not overlap (e.g. due to different routes), + * currently, overlapping or duplicate fragments are thrown away + * if IP_REASS_CHECK_OVERLAP=1 (the default)! + * + * @todo: work with IP header options + */ + +/** Setting this to 0, you can turn off checking the fragments for overlapping + * regions. The code gets a little smaller. Only use this if you know that + * overlapping won't occur on your network! */ +#ifndef IP_REASS_CHECK_OVERLAP +#define IP_REASS_CHECK_OVERLAP 1 +#endif /* IP_REASS_CHECK_OVERLAP */ + +/** Set to 0 to prevent freeing the oldest datagram when the reassembly buffer is + * full (IP_REASS_MAX_PBUFS pbufs are enqueued). The code gets a little smaller. + * Datagrams will be freed by timeout only. Especially useful when MEMP_NUM_REASSDATA + * is set to 1, so one datagram can be reassembled at a time, only. */ +#ifndef IP_REASS_FREE_OLDEST +#define IP_REASS_FREE_OLDEST 1 +#endif /* IP_REASS_FREE_OLDEST */ + +#define IP_REASS_FLAG_LASTFRAG 0x01 + +#define IP_REASS_VALIDATE_TELEGRAM_FINISHED 1 +#define IP_REASS_VALIDATE_PBUF_QUEUED 0 +#define IP_REASS_VALIDATE_PBUF_DROPPED -1 + +/** This is a helper struct which holds the starting + * offset and the ending offset of this fragment to + * easily chain the fragments. + * It has the same packing requirements as the IP header, since it replaces + * the IP header in memory in incoming fragments (after copying it) to keep + * track of the various fragments. (-> If the IP header doesn't need packing, + * this struct doesn't need packing, too.) + */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip_reass_helper { + PACK_STRUCT_FIELD(struct pbuf *next_pbuf); + PACK_STRUCT_FIELD(u16_t start); + PACK_STRUCT_FIELD(u16_t end); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#define IP_ADDRESSES_AND_ID_MATCH(iphdrA, iphdrB) \ + (ip4_addr_cmp(&(iphdrA)->src, &(iphdrB)->src) && \ + ip4_addr_cmp(&(iphdrA)->dest, &(iphdrB)->dest) && \ + IPH_ID(iphdrA) == IPH_ID(iphdrB)) ? 1 : 0 + +/* global variables */ +static struct ip_reassdata *reassdatagrams; +static u16_t ip_reass_pbufcount; + +/* function prototypes */ +static void ip_reass_dequeue_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev); +static int ip_reass_free_complete_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev); + +/** + * Reassembly timer base function + * for both NO_SYS == 0 and 1 (!). + * + * Should be called every 1000 msec (defined by IP_TMR_INTERVAL). + */ +void +ip_reass_tmr(void) +{ + struct ip_reassdata *r, *prev = NULL; + + r = reassdatagrams; + while (r != NULL) { + /* Decrement the timer. Once it reaches 0, + * clean up the incomplete fragment assembly */ + if (r->timer > 0) { + r->timer--; + LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_reass_tmr: timer dec %"U16_F"\n", (u16_t)r->timer)); + prev = r; + r = r->next; + } else { + /* reassembly timed out */ + struct ip_reassdata *tmp; + LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_reass_tmr: timer timed out\n")); + tmp = r; + /* get the next pointer before freeing */ + r = r->next; + /* free the helper struct and all enqueued pbufs */ + ip_reass_free_complete_datagram(tmp, prev); + } + } +} + +/** + * Free a datagram (struct ip_reassdata) and all its pbufs. + * Updates the total count of enqueued pbufs (ip_reass_pbufcount), + * SNMP counters and sends an ICMP time exceeded packet. + * + * @param ipr datagram to free + * @param prev the previous datagram in the linked list + * @return the number of pbufs freed + */ +static int +ip_reass_free_complete_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev) +{ + u16_t pbufs_freed = 0; + u16_t clen; + struct pbuf *p; + struct ip_reass_helper *iprh; + + LWIP_ASSERT("prev != ipr", prev != ipr); + if (prev != NULL) { + LWIP_ASSERT("prev->next == ipr", prev->next == ipr); + } + + MIB2_STATS_INC(mib2.ipreasmfails); +#if LWIP_ICMP + iprh = (struct ip_reass_helper *)ipr->p->payload; + if (iprh->start == 0) { + /* The first fragment was received, send ICMP time exceeded. */ + /* First, de-queue the first pbuf from r->p. */ + p = ipr->p; + ipr->p = iprh->next_pbuf; + /* Then, copy the original header into it. */ + SMEMCPY(p->payload, &ipr->iphdr, IP_HLEN); + icmp_time_exceeded(p, ICMP_TE_FRAG); + clen = pbuf_clen(p); + LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff); + pbufs_freed = (u16_t)(pbufs_freed + clen); + pbuf_free(p); + } +#endif /* LWIP_ICMP */ + + /* First, free all received pbufs. The individual pbufs need to be released + separately as they have not yet been chained */ + p = ipr->p; + while (p != NULL) { + struct pbuf *pcur; + iprh = (struct ip_reass_helper *)p->payload; + pcur = p; + /* get the next pointer before freeing */ + p = iprh->next_pbuf; + clen = pbuf_clen(pcur); + LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff); + pbufs_freed = (u16_t)(pbufs_freed + clen); + pbuf_free(pcur); + } + /* Then, unchain the struct ip_reassdata from the list and free it. */ + ip_reass_dequeue_datagram(ipr, prev); + LWIP_ASSERT("ip_reass_pbufcount >= pbufs_freed", ip_reass_pbufcount >= pbufs_freed); + ip_reass_pbufcount = (u16_t)(ip_reass_pbufcount - pbufs_freed); + + return pbufs_freed; +} + +#if IP_REASS_FREE_OLDEST +/** + * Free the oldest datagram to make room for enqueueing new fragments. + * The datagram 'fraghdr' belongs to is not freed! + * + * @param fraghdr IP header of the current fragment + * @param pbufs_needed number of pbufs needed to enqueue + * (used for freeing other datagrams if not enough space) + * @return the number of pbufs freed + */ +static int +ip_reass_remove_oldest_datagram(struct ip_hdr *fraghdr, int pbufs_needed) +{ + /* @todo Can't we simply remove the last datagram in the + * linked list behind reassdatagrams? + */ + struct ip_reassdata *r, *oldest, *prev, *oldest_prev; + int pbufs_freed = 0, pbufs_freed_current; + int other_datagrams; + + /* Free datagrams until being allowed to enqueue 'pbufs_needed' pbufs, + * but don't free the datagram that 'fraghdr' belongs to! */ + do { + oldest = NULL; + prev = NULL; + oldest_prev = NULL; + other_datagrams = 0; + r = reassdatagrams; + while (r != NULL) { + if (!IP_ADDRESSES_AND_ID_MATCH(&r->iphdr, fraghdr)) { + /* Not the same datagram as fraghdr */ + other_datagrams++; + if (oldest == NULL) { + oldest = r; + oldest_prev = prev; + } else if (r->timer <= oldest->timer) { + /* older than the previous oldest */ + oldest = r; + oldest_prev = prev; + } + } + if (r->next != NULL) { + prev = r; + } + r = r->next; + } + if (oldest != NULL) { + pbufs_freed_current = ip_reass_free_complete_datagram(oldest, oldest_prev); + pbufs_freed += pbufs_freed_current; + } + } while ((pbufs_freed < pbufs_needed) && (other_datagrams > 1)); + return pbufs_freed; +} +#endif /* IP_REASS_FREE_OLDEST */ + +/** + * Enqueues a new fragment into the fragment queue + * @param fraghdr points to the new fragments IP hdr + * @param clen number of pbufs needed to enqueue (used for freeing other datagrams if not enough space) + * @return A pointer to the queue location into which the fragment was enqueued + */ +static struct ip_reassdata * +ip_reass_enqueue_new_datagram(struct ip_hdr *fraghdr, int clen) +{ + struct ip_reassdata *ipr; +#if ! IP_REASS_FREE_OLDEST + LWIP_UNUSED_ARG(clen); +#endif + + /* No matching previous fragment found, allocate a new reassdata struct */ + ipr = (struct ip_reassdata *)memp_malloc(MEMP_REASSDATA); + if (ipr == NULL) { +#if IP_REASS_FREE_OLDEST + if (ip_reass_remove_oldest_datagram(fraghdr, clen) >= clen) { + ipr = (struct ip_reassdata *)memp_malloc(MEMP_REASSDATA); + } + if (ipr == NULL) +#endif /* IP_REASS_FREE_OLDEST */ + { + IPFRAG_STATS_INC(ip_frag.memerr); + LWIP_DEBUGF(IP_REASS_DEBUG, ("Failed to alloc reassdata struct\n")); + return NULL; + } + } + memset(ipr, 0, sizeof(struct ip_reassdata)); + ipr->timer = IP_REASS_MAXAGE; + + /* enqueue the new structure to the front of the list */ + ipr->next = reassdatagrams; + reassdatagrams = ipr; + /* copy the ip header for later tests and input */ + /* @todo: no ip options supported? */ + SMEMCPY(&(ipr->iphdr), fraghdr, IP_HLEN); + return ipr; +} + +/** + * Dequeues a datagram from the datagram queue. Doesn't deallocate the pbufs. + * @param ipr points to the queue entry to dequeue + */ +static void +ip_reass_dequeue_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev) +{ + /* dequeue the reass struct */ + if (reassdatagrams == ipr) { + /* it was the first in the list */ + reassdatagrams = ipr->next; + } else { + /* it wasn't the first, so it must have a valid 'prev' */ + LWIP_ASSERT("sanity check linked list", prev != NULL); + prev->next = ipr->next; + } + + /* now we can free the ip_reassdata struct */ + memp_free(MEMP_REASSDATA, ipr); +} + +/** + * Chain a new pbuf into the pbuf list that composes the datagram. The pbuf list + * will grow over time as new pbufs are rx. + * Also checks that the datagram passes basic continuity checks (if the last + * fragment was received at least once). + * @param ipr points to the reassembly state + * @param new_p points to the pbuf for the current fragment + * @param is_last is 1 if this pbuf has MF==0 (ipr->flags not updated yet) + * @return see IP_REASS_VALIDATE_* defines + */ +static int +ip_reass_chain_frag_into_datagram_and_validate(struct ip_reassdata *ipr, struct pbuf *new_p, int is_last) +{ + struct ip_reass_helper *iprh, *iprh_tmp, *iprh_prev = NULL; + struct pbuf *q; + u16_t offset, len; + u8_t hlen; + struct ip_hdr *fraghdr; + int valid = 1; + + /* Extract length and fragment offset from current fragment */ + fraghdr = (struct ip_hdr *)new_p->payload; + len = lwip_ntohs(IPH_LEN(fraghdr)); + hlen = IPH_HL_BYTES(fraghdr); + if (hlen > len) { + /* invalid datagram */ + return IP_REASS_VALIDATE_PBUF_DROPPED; + } + len = (u16_t)(len - hlen); + offset = IPH_OFFSET_BYTES(fraghdr); + + /* overwrite the fragment's ip header from the pbuf with our helper struct, + * and setup the embedded helper structure. */ + /* make sure the struct ip_reass_helper fits into the IP header */ + LWIP_ASSERT("sizeof(struct ip_reass_helper) <= IP_HLEN", + sizeof(struct ip_reass_helper) <= IP_HLEN); + iprh = (struct ip_reass_helper *)new_p->payload; + iprh->next_pbuf = NULL; + iprh->start = offset; + iprh->end = (u16_t)(offset + len); + if (iprh->end < offset) { + /* u16_t overflow, cannot handle this */ + return IP_REASS_VALIDATE_PBUF_DROPPED; + } + + /* Iterate through until we either get to the end of the list (append), + * or we find one with a larger offset (insert). */ + for (q = ipr->p; q != NULL;) { + iprh_tmp = (struct ip_reass_helper *)q->payload; + if (iprh->start < iprh_tmp->start) { + /* the new pbuf should be inserted before this */ + iprh->next_pbuf = q; + if (iprh_prev != NULL) { + /* not the fragment with the lowest offset */ +#if IP_REASS_CHECK_OVERLAP + if ((iprh->start < iprh_prev->end) || (iprh->end > iprh_tmp->start)) { + /* fragment overlaps with previous or following, throw away */ + return IP_REASS_VALIDATE_PBUF_DROPPED; + } +#endif /* IP_REASS_CHECK_OVERLAP */ + iprh_prev->next_pbuf = new_p; + if (iprh_prev->end != iprh->start) { + /* There is a fragment missing between the current + * and the previous fragment */ + valid = 0; + } + } else { +#if IP_REASS_CHECK_OVERLAP + if (iprh->end > iprh_tmp->start) { + /* fragment overlaps with following, throw away */ + return IP_REASS_VALIDATE_PBUF_DROPPED; + } +#endif /* IP_REASS_CHECK_OVERLAP */ + /* fragment with the lowest offset */ + ipr->p = new_p; + } + break; + } else if (iprh->start == iprh_tmp->start) { + /* received the same datagram twice: no need to keep the datagram */ + return IP_REASS_VALIDATE_PBUF_DROPPED; +#if IP_REASS_CHECK_OVERLAP + } else if (iprh->start < iprh_tmp->end) { + /* overlap: no need to keep the new datagram */ + return IP_REASS_VALIDATE_PBUF_DROPPED; +#endif /* IP_REASS_CHECK_OVERLAP */ + } else { + /* Check if the fragments received so far have no holes. */ + if (iprh_prev != NULL) { + if (iprh_prev->end != iprh_tmp->start) { + /* There is a fragment missing between the current + * and the previous fragment */ + valid = 0; + } + } + } + q = iprh_tmp->next_pbuf; + iprh_prev = iprh_tmp; + } + + /* If q is NULL, then we made it to the end of the list. Determine what to do now */ + if (q == NULL) { + if (iprh_prev != NULL) { + /* this is (for now), the fragment with the highest offset: + * chain it to the last fragment */ +#if IP_REASS_CHECK_OVERLAP + LWIP_ASSERT("check fragments don't overlap", iprh_prev->end <= iprh->start); +#endif /* IP_REASS_CHECK_OVERLAP */ + iprh_prev->next_pbuf = new_p; + if (iprh_prev->end != iprh->start) { + valid = 0; + } + } else { +#if IP_REASS_CHECK_OVERLAP + LWIP_ASSERT("no previous fragment, this must be the first fragment!", + ipr->p == NULL); +#endif /* IP_REASS_CHECK_OVERLAP */ + /* this is the first fragment we ever received for this ip datagram */ + ipr->p = new_p; + } + } + + /* At this point, the validation part begins: */ + /* If we already received the last fragment */ + if (is_last || ((ipr->flags & IP_REASS_FLAG_LASTFRAG) != 0)) { + /* and had no holes so far */ + if (valid) { + /* then check if the rest of the fragments is here */ + /* Check if the queue starts with the first datagram */ + if ((ipr->p == NULL) || (((struct ip_reass_helper *)ipr->p->payload)->start != 0)) { + valid = 0; + } else { + /* and check that there are no holes after this datagram */ + iprh_prev = iprh; + q = iprh->next_pbuf; + while (q != NULL) { + iprh = (struct ip_reass_helper *)q->payload; + if (iprh_prev->end != iprh->start) { + valid = 0; + break; + } + iprh_prev = iprh; + q = iprh->next_pbuf; + } + /* if still valid, all fragments are received + * (because to the MF==0 already arrived */ + if (valid) { + LWIP_ASSERT("sanity check", ipr->p != NULL); + LWIP_ASSERT("sanity check", + ((struct ip_reass_helper *)ipr->p->payload) != iprh); + LWIP_ASSERT("validate_datagram:next_pbuf!=NULL", + iprh->next_pbuf == NULL); + } + } + } + /* If valid is 0 here, there are some fragments missing in the middle + * (since MF == 0 has already arrived). Such datagrams simply time out if + * no more fragments are received... */ + return valid ? IP_REASS_VALIDATE_TELEGRAM_FINISHED : IP_REASS_VALIDATE_PBUF_QUEUED; + } + /* If we come here, not all fragments were received, yet! */ + return IP_REASS_VALIDATE_PBUF_QUEUED; /* not yet valid! */ +} + +/** + * Reassembles incoming IP fragments into an IP datagram. + * + * @param p points to a pbuf chain of the fragment + * @return NULL if reassembly is incomplete, ? otherwise + */ +struct pbuf * +ip4_reass(struct pbuf *p) +{ + struct pbuf *r; + struct ip_hdr *fraghdr; + struct ip_reassdata *ipr; + struct ip_reass_helper *iprh; + u16_t offset, len, clen; + u8_t hlen; + int valid; + int is_last; + + IPFRAG_STATS_INC(ip_frag.recv); + MIB2_STATS_INC(mib2.ipreasmreqds); + + fraghdr = (struct ip_hdr *)p->payload; + + if (IPH_HL_BYTES(fraghdr) != IP_HLEN) { + LWIP_DEBUGF(IP_REASS_DEBUG, ("ip4_reass: IP options currently not supported!\n")); + IPFRAG_STATS_INC(ip_frag.err); + goto nullreturn; + } + + offset = IPH_OFFSET_BYTES(fraghdr); + len = lwip_ntohs(IPH_LEN(fraghdr)); + hlen = IPH_HL_BYTES(fraghdr); + if (hlen > len) { + /* invalid datagram */ + goto nullreturn; + } + len = (u16_t)(len - hlen); + + /* Check if we are allowed to enqueue more datagrams. */ + clen = pbuf_clen(p); + if ((ip_reass_pbufcount + clen) > IP_REASS_MAX_PBUFS) { +#if IP_REASS_FREE_OLDEST + if (!ip_reass_remove_oldest_datagram(fraghdr, clen) || + ((ip_reass_pbufcount + clen) > IP_REASS_MAX_PBUFS)) +#endif /* IP_REASS_FREE_OLDEST */ + { + /* No datagram could be freed and still too many pbufs enqueued */ + LWIP_DEBUGF(IP_REASS_DEBUG, ("ip4_reass: Overflow condition: pbufct=%d, clen=%d, MAX=%d\n", + ip_reass_pbufcount, clen, IP_REASS_MAX_PBUFS)); + IPFRAG_STATS_INC(ip_frag.memerr); + /* @todo: send ICMP time exceeded here? */ + /* drop this pbuf */ + goto nullreturn; + } + } + + /* Look for the datagram the fragment belongs to in the current datagram queue, + * remembering the previous in the queue for later dequeueing. */ + for (ipr = reassdatagrams; ipr != NULL; ipr = ipr->next) { + /* Check if the incoming fragment matches the one currently present + in the reassembly buffer. If so, we proceed with copying the + fragment into the buffer. */ + if (IP_ADDRESSES_AND_ID_MATCH(&ipr->iphdr, fraghdr)) { + LWIP_DEBUGF(IP_REASS_DEBUG, ("ip4_reass: matching previous fragment ID=%"X16_F"\n", + lwip_ntohs(IPH_ID(fraghdr)))); + IPFRAG_STATS_INC(ip_frag.cachehit); + break; + } + } + + if (ipr == NULL) { + /* Enqueue a new datagram into the datagram queue */ + ipr = ip_reass_enqueue_new_datagram(fraghdr, clen); + /* Bail if unable to enqueue */ + if (ipr == NULL) { + goto nullreturn; + } + } else { + if (((lwip_ntohs(IPH_OFFSET(fraghdr)) & IP_OFFMASK) == 0) && + ((lwip_ntohs(IPH_OFFSET(&ipr->iphdr)) & IP_OFFMASK) != 0)) { + /* ipr->iphdr is not the header from the first fragment, but fraghdr is + * -> copy fraghdr into ipr->iphdr since we want to have the header + * of the first fragment (for ICMP time exceeded and later, for copying + * all options, if supported)*/ + SMEMCPY(&ipr->iphdr, fraghdr, IP_HLEN); + } + } + + /* At this point, we have either created a new entry or pointing + * to an existing one */ + + /* check for 'no more fragments', and update queue entry*/ + is_last = (IPH_OFFSET(fraghdr) & PP_NTOHS(IP_MF)) == 0; + if (is_last) { + u16_t datagram_len = (u16_t)(offset + len); + if ((datagram_len < offset) || (datagram_len > (0xFFFF - IP_HLEN))) { + /* u16_t overflow, cannot handle this */ + goto nullreturn_ipr; + } + } + /* find the right place to insert this pbuf */ + /* @todo: trim pbufs if fragments are overlapping */ + valid = ip_reass_chain_frag_into_datagram_and_validate(ipr, p, is_last); + if (valid == IP_REASS_VALIDATE_PBUF_DROPPED) { + goto nullreturn_ipr; + } + /* if we come here, the pbuf has been enqueued */ + + /* Track the current number of pbufs current 'in-flight', in order to limit + the number of fragments that may be enqueued at any one time + (overflow checked by testing against IP_REASS_MAX_PBUFS) */ + ip_reass_pbufcount = (u16_t)(ip_reass_pbufcount + clen); + if (is_last) { + u16_t datagram_len = (u16_t)(offset + len); + ipr->datagram_len = datagram_len; + ipr->flags |= IP_REASS_FLAG_LASTFRAG; + LWIP_DEBUGF(IP_REASS_DEBUG, + ("ip4_reass: last fragment seen, total len %"S16_F"\n", + ipr->datagram_len)); + } + + if (valid == IP_REASS_VALIDATE_TELEGRAM_FINISHED) { + struct ip_reassdata *ipr_prev; + /* the totally last fragment (flag more fragments = 0) was received at least + * once AND all fragments are received */ + u16_t datagram_len = (u16_t)(ipr->datagram_len + IP_HLEN); + + /* save the second pbuf before copying the header over the pointer */ + r = ((struct ip_reass_helper *)ipr->p->payload)->next_pbuf; + + /* copy the original ip header back to the first pbuf */ + fraghdr = (struct ip_hdr *)(ipr->p->payload); + SMEMCPY(fraghdr, &ipr->iphdr, IP_HLEN); + IPH_LEN_SET(fraghdr, lwip_htons(datagram_len)); + IPH_OFFSET_SET(fraghdr, 0); + IPH_CHKSUM_SET(fraghdr, 0); + /* @todo: do we need to set/calculate the correct checksum? */ +#if CHECKSUM_GEN_IP + IF__NETIF_CHECKSUM_ENABLED(ip_current_input_netif(), NETIF_CHECKSUM_GEN_IP) { + IPH_CHKSUM_SET(fraghdr, inet_chksum(fraghdr, IP_HLEN)); + } +#endif /* CHECKSUM_GEN_IP */ + + p = ipr->p; + + /* chain together the pbufs contained within the reass_data list. */ + while (r != NULL) { + iprh = (struct ip_reass_helper *)r->payload; + + /* hide the ip header for every succeeding fragment */ + pbuf_remove_header(r, IP_HLEN); + pbuf_cat(p, r); + r = iprh->next_pbuf; + } + + /* find the previous entry in the linked list */ + if (ipr == reassdatagrams) { + ipr_prev = NULL; + } else { + for (ipr_prev = reassdatagrams; ipr_prev != NULL; ipr_prev = ipr_prev->next) { + if (ipr_prev->next == ipr) { + break; + } + } + } + + /* release the sources allocate for the fragment queue entry */ + ip_reass_dequeue_datagram(ipr, ipr_prev); + + /* and adjust the number of pbufs currently queued for reassembly. */ + clen = pbuf_clen(p); + LWIP_ASSERT("ip_reass_pbufcount >= clen", ip_reass_pbufcount >= clen); + ip_reass_pbufcount = (u16_t)(ip_reass_pbufcount - clen); + + MIB2_STATS_INC(mib2.ipreasmoks); + + /* Return the pbuf chain */ + return p; + } + /* the datagram is not (yet?) reassembled completely */ + LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_reass_pbufcount: %d out\n", ip_reass_pbufcount)); + return NULL; + +nullreturn_ipr: + LWIP_ASSERT("ipr != NULL", ipr != NULL); + if (ipr->p == NULL) { + /* dropped pbuf after creating a new datagram entry: remove the entry, too */ + LWIP_ASSERT("not firstalthough just enqueued", ipr == reassdatagrams); + ip_reass_dequeue_datagram(ipr, NULL); + } + +nullreturn: + LWIP_DEBUGF(IP_REASS_DEBUG, ("ip4_reass: nullreturn\n")); + IPFRAG_STATS_INC(ip_frag.drop); + pbuf_free(p); + return NULL; +} +#endif /* IP_REASSEMBLY */ + +#if IP_FRAG +#if !LWIP_NETIF_TX_SINGLE_PBUF +/** Allocate a new struct pbuf_custom_ref */ +static struct pbuf_custom_ref * +ip_frag_alloc_pbuf_custom_ref(void) +{ + return (struct pbuf_custom_ref *)memp_malloc(MEMP_FRAG_PBUF); +} + +/** Free a struct pbuf_custom_ref */ +static void +ip_frag_free_pbuf_custom_ref(struct pbuf_custom_ref *p) +{ + LWIP_ASSERT("p != NULL", p != NULL); + memp_free(MEMP_FRAG_PBUF, p); +} + +/** Free-callback function to free a 'struct pbuf_custom_ref', called by + * pbuf_free. */ +static void +ipfrag_free_pbuf_custom(struct pbuf *p) +{ + struct pbuf_custom_ref *pcr = (struct pbuf_custom_ref *)p; + LWIP_ASSERT("pcr != NULL", pcr != NULL); + LWIP_ASSERT("pcr == p", (void *)pcr == (void *)p); + if (pcr->original != NULL) { + pbuf_free(pcr->original); + } + ip_frag_free_pbuf_custom_ref(pcr); +} +#endif /* !LWIP_NETIF_TX_SINGLE_PBUF */ + +/** + * Fragment an IP datagram if too large for the netif. + * + * Chop the datagram in MTU sized chunks and send them in order + * by pointing PBUF_REFs into p. + * + * @param p ip packet to send + * @param netif the netif on which to send + * @param dest destination ip address to which to send + * + * @return ERR_OK if sent successfully, err_t otherwise + */ +err_t +ip4_frag(struct pbuf *p, struct netif *netif, const ip4_addr_t *dest) +{ + struct pbuf *rambuf; +#if !LWIP_NETIF_TX_SINGLE_PBUF + struct pbuf *newpbuf; + u16_t newpbuflen = 0; + u16_t left_to_copy; +#endif + struct ip_hdr *original_iphdr; + struct ip_hdr *iphdr; + const u16_t nfb = (u16_t)((netif->mtu - IP_HLEN) / 8); + u16_t left, fragsize; + u16_t ofo; + int last; + u16_t poff = IP_HLEN; + u16_t tmp; + int mf_set; + + original_iphdr = (struct ip_hdr *)p->payload; + iphdr = original_iphdr; + if (IPH_HL_BYTES(iphdr) != IP_HLEN) { + /* ip4_frag() does not support IP options */ + return ERR_VAL; + } + LWIP_ERROR("ip4_frag(): pbuf too short", p->len >= IP_HLEN, return ERR_VAL); + + /* Save original offset */ + tmp = lwip_ntohs(IPH_OFFSET(iphdr)); + ofo = tmp & IP_OFFMASK; + /* already fragmented? if so, the last fragment we create must have MF, too */ + mf_set = tmp & IP_MF; + + left = (u16_t)(p->tot_len - IP_HLEN); + + while (left) { + /* Fill this fragment */ + fragsize = LWIP_MIN(left, (u16_t)(nfb * 8)); + +#if LWIP_NETIF_TX_SINGLE_PBUF + rambuf = pbuf_alloc(PBUF_IP, fragsize, PBUF_RAM); + if (rambuf == NULL) { + goto memerr; + } + LWIP_ASSERT("this needs a pbuf in one piece!", + (rambuf->len == rambuf->tot_len) && (rambuf->next == NULL)); + poff += pbuf_copy_partial(p, rambuf->payload, fragsize, poff); + /* make room for the IP header */ + if (pbuf_add_header(rambuf, IP_HLEN)) { + pbuf_free(rambuf); + goto memerr; + } + /* fill in the IP header */ + SMEMCPY(rambuf->payload, original_iphdr, IP_HLEN); + iphdr = (struct ip_hdr *)rambuf->payload; +#else /* LWIP_NETIF_TX_SINGLE_PBUF */ + /* When not using a static buffer, create a chain of pbufs. + * The first will be a PBUF_RAM holding the link and IP header. + * The rest will be PBUF_REFs mirroring the pbuf chain to be fragged, + * but limited to the size of an mtu. + */ + rambuf = pbuf_alloc(PBUF_LINK, IP_HLEN, PBUF_RAM); + if (rambuf == NULL) { + goto memerr; + } + LWIP_ASSERT("this needs a pbuf in one piece!", + (rambuf->len >= (IP_HLEN))); + SMEMCPY(rambuf->payload, original_iphdr, IP_HLEN); + iphdr = (struct ip_hdr *)rambuf->payload; + + left_to_copy = fragsize; + while (left_to_copy) { + struct pbuf_custom_ref *pcr; + u16_t plen = (u16_t)(p->len - poff); + LWIP_ASSERT("p->len >= poff", p->len >= poff); + newpbuflen = LWIP_MIN(left_to_copy, plen); + /* Is this pbuf already empty? */ + if (!newpbuflen) { + poff = 0; + p = p->next; + continue; + } + pcr = ip_frag_alloc_pbuf_custom_ref(); + if (pcr == NULL) { + pbuf_free(rambuf); + goto memerr; + } + /* Mirror this pbuf, although we might not need all of it. */ + newpbuf = pbuf_alloced_custom(PBUF_RAW, newpbuflen, PBUF_REF, &pcr->pc, + (u8_t *)p->payload + poff, newpbuflen); + if (newpbuf == NULL) { + ip_frag_free_pbuf_custom_ref(pcr); + pbuf_free(rambuf); + goto memerr; + } + pbuf_ref(p); + pcr->original = p; + pcr->pc.custom_free_function = ipfrag_free_pbuf_custom; + + /* Add it to end of rambuf's chain, but using pbuf_cat, not pbuf_chain + * so that it is removed when pbuf_dechain is later called on rambuf. + */ + pbuf_cat(rambuf, newpbuf); + left_to_copy = (u16_t)(left_to_copy - newpbuflen); + if (left_to_copy) { + poff = 0; + p = p->next; + } + } + poff = (u16_t)(poff + newpbuflen); +#endif /* LWIP_NETIF_TX_SINGLE_PBUF */ + + /* Correct header */ + last = (left <= netif->mtu - IP_HLEN); + + /* Set new offset and MF flag */ + tmp = (IP_OFFMASK & (ofo)); + if (!last || mf_set) { + /* the last fragment has MF set if the input frame had it */ + tmp = tmp | IP_MF; + } + IPH_OFFSET_SET(iphdr, lwip_htons(tmp)); + IPH_LEN_SET(iphdr, lwip_htons((u16_t)(fragsize + IP_HLEN))); + IPH_CHKSUM_SET(iphdr, 0); +#if CHECKSUM_GEN_IP + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_IP) { + IPH_CHKSUM_SET(iphdr, inet_chksum(iphdr, IP_HLEN)); + } +#endif /* CHECKSUM_GEN_IP */ + + /* No need for separate header pbuf - we allowed room for it in rambuf + * when allocated. + */ + netif->output(netif, rambuf, dest); + IPFRAG_STATS_INC(ip_frag.xmit); + + /* Unfortunately we can't reuse rambuf - the hardware may still be + * using the buffer. Instead we free it (and the ensuing chain) and + * recreate it next time round the loop. If we're lucky the hardware + * will have already sent the packet, the free will really free, and + * there will be zero memory penalty. + */ + + pbuf_free(rambuf); + left = (u16_t)(left - fragsize); + ofo = (u16_t)(ofo + nfb); + } + MIB2_STATS_INC(mib2.ipfragoks); + return ERR_OK; +memerr: + MIB2_STATS_INC(mib2.ipfragfails); + return ERR_MEM; +} +#endif /* IP_FRAG */ + +#endif /* LWIP_IPV4 */ diff --git a/Middlewares/Third_Party/LwIP/src/core/ipv6/dhcp6.c b/Middlewares/Third_Party/LwIP/src/core/ipv6/dhcp6.c new file mode 100644 index 0000000..7cf98a5 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/ipv6/dhcp6.c @@ -0,0 +1,812 @@ +/** + * @file + * + * @defgroup dhcp6 DHCPv6 + * @ingroup ip6 + * DHCPv6 client: IPv6 address autoconfiguration as per + * RFC 3315 (stateful DHCPv6) and + * RFC 3736 (stateless DHCPv6). + * + * For now, only stateless DHCPv6 is implemented! + * + * TODO: + * - enable/disable API to not always start when RA is received + * - stateful DHCPv6 (for now, only stateless DHCPv6 for DNS and NTP servers works) + * - create Client Identifier? + * - only start requests if a valid local address is available on the netif + * - only start information requests if required (not for every RA) + * + * dhcp6_enable_stateful() enables stateful DHCPv6 for a netif (stateless disabled)\n + * dhcp6_enable_stateless() enables stateless DHCPv6 for a netif (stateful disabled)\n + * dhcp6_disable() disable DHCPv6 for a netif + * + * When enabled, requests are only issued after receipt of RA with the + * corresponding bits set. + */ + +/* + * Copyright (c) 2018 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + */ + +#include "lwip/opt.h" + +#if LWIP_IPV6 && LWIP_IPV6_DHCP6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/dhcp6.h" +#include "lwip/prot/dhcp6.h" +#include "lwip/def.h" +#include "lwip/udp.h" +#include "lwip/dns.h" + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif +#ifndef LWIP_HOOK_DHCP6_APPEND_OPTIONS +#define LWIP_HOOK_DHCP6_APPEND_OPTIONS(netif, dhcp6, state, msg, msg_type, options_len_ptr, max_len) +#endif +#ifndef LWIP_HOOK_DHCP6_PARSE_OPTION +#define LWIP_HOOK_DHCP6_PARSE_OPTION(netif, dhcp6, state, msg, msg_type, option, len, pbuf, offset) do { LWIP_UNUSED_ARG(msg); } while(0) +#endif + +#if LWIP_DNS && LWIP_DHCP6_MAX_DNS_SERVERS +#if DNS_MAX_SERVERS > LWIP_DHCP6_MAX_DNS_SERVERS +#define LWIP_DHCP6_PROVIDE_DNS_SERVERS LWIP_DHCP6_MAX_DNS_SERVERS +#else +#define LWIP_DHCP6_PROVIDE_DNS_SERVERS DNS_MAX_SERVERS +#endif +#else +#define LWIP_DHCP6_PROVIDE_DNS_SERVERS 0 +#endif + + +/** Option handling: options are parsed in dhcp6_parse_reply + * and saved in an array where other functions can load them from. + * This might be moved into the struct dhcp6 (not necessarily since + * lwIP is single-threaded and the array is only used while in recv + * callback). */ +enum dhcp6_option_idx { + DHCP6_OPTION_IDX_CLI_ID = 0, + DHCP6_OPTION_IDX_SERVER_ID, +#if LWIP_DHCP6_PROVIDE_DNS_SERVERS + DHCP6_OPTION_IDX_DNS_SERVER, + DHCP6_OPTION_IDX_DOMAIN_LIST, +#endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS */ +#if LWIP_DHCP6_GET_NTP_SRV + DHCP6_OPTION_IDX_NTP_SERVER, +#endif /* LWIP_DHCP_GET_NTP_SRV */ + DHCP6_OPTION_IDX_MAX +}; + +struct dhcp6_option_info { + u8_t option_given; + u16_t val_start; + u16_t val_length; +}; + +/** Holds the decoded option info, only valid while in dhcp6_recv. */ +struct dhcp6_option_info dhcp6_rx_options[DHCP6_OPTION_IDX_MAX]; + +#define dhcp6_option_given(dhcp6, idx) (dhcp6_rx_options[idx].option_given != 0) +#define dhcp6_got_option(dhcp6, idx) (dhcp6_rx_options[idx].option_given = 1) +#define dhcp6_clear_option(dhcp6, idx) (dhcp6_rx_options[idx].option_given = 0) +#define dhcp6_clear_all_options(dhcp6) (memset(dhcp6_rx_options, 0, sizeof(dhcp6_rx_options))) +#define dhcp6_get_option_start(dhcp6, idx) (dhcp6_rx_options[idx].val_start) +#define dhcp6_get_option_length(dhcp6, idx) (dhcp6_rx_options[idx].val_length) +#define dhcp6_set_option(dhcp6, idx, start, len) do { dhcp6_rx_options[idx].val_start = (start); dhcp6_rx_options[idx].val_length = (len); }while(0) + + +const ip_addr_t dhcp6_All_DHCP6_Relay_Agents_and_Servers = IPADDR6_INIT_HOST(0xFF020000, 0, 0, 0x00010002); +const ip_addr_t dhcp6_All_DHCP6_Servers = IPADDR6_INIT_HOST(0xFF020000, 0, 0, 0x00010003); + +static struct udp_pcb *dhcp6_pcb; +static u8_t dhcp6_pcb_refcount; + + +/* receive, unfold, parse and free incoming messages */ +static void dhcp6_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port); + +/** Ensure DHCP PCB is allocated and bound */ +static err_t +dhcp6_inc_pcb_refcount(void) +{ + if (dhcp6_pcb_refcount == 0) { + LWIP_ASSERT("dhcp6_inc_pcb_refcount(): memory leak", dhcp6_pcb == NULL); + + /* allocate UDP PCB */ + dhcp6_pcb = udp_new_ip6(); + + if (dhcp6_pcb == NULL) { + return ERR_MEM; + } + + ip_set_option(dhcp6_pcb, SOF_BROADCAST); + + /* set up local and remote port for the pcb -> listen on all interfaces on all src/dest IPs */ + udp_bind(dhcp6_pcb, IP6_ADDR_ANY, DHCP6_CLIENT_PORT); + udp_recv(dhcp6_pcb, dhcp6_recv, NULL); + } + + dhcp6_pcb_refcount++; + + return ERR_OK; +} + +/** Free DHCP PCB if the last netif stops using it */ +static void +dhcp6_dec_pcb_refcount(void) +{ + LWIP_ASSERT("dhcp6_pcb_refcount(): refcount error", (dhcp6_pcb_refcount > 0)); + dhcp6_pcb_refcount--; + + if (dhcp6_pcb_refcount == 0) { + udp_remove(dhcp6_pcb); + dhcp6_pcb = NULL; + } +} + +/** + * @ingroup dhcp6 + * Set a statically allocated struct dhcp6 to work with. + * Using this prevents dhcp6_start to allocate it using mem_malloc. + * + * @param netif the netif for which to set the struct dhcp + * @param dhcp6 (uninitialised) dhcp6 struct allocated by the application + */ +void +dhcp6_set_struct(struct netif *netif, struct dhcp6 *dhcp6) +{ + LWIP_ASSERT("netif != NULL", netif != NULL); + LWIP_ASSERT("dhcp6 != NULL", dhcp6 != NULL); + LWIP_ASSERT("netif already has a struct dhcp6 set", netif_dhcp6_data(netif) == NULL); + + /* clear data structure */ + memset(dhcp6, 0, sizeof(struct dhcp6)); + /* dhcp6_set_state(&dhcp, DHCP6_STATE_OFF); */ + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP6, dhcp6); +} + +/** + * @ingroup dhcp6 + * Removes a struct dhcp6 from a netif. + * + * ATTENTION: Only use this when not using dhcp6_set_struct() to allocate the + * struct dhcp6 since the memory is passed back to the heap. + * + * @param netif the netif from which to remove the struct dhcp + */ +void dhcp6_cleanup(struct netif *netif) +{ + LWIP_ASSERT("netif != NULL", netif != NULL); + + if (netif_dhcp6_data(netif) != NULL) { + mem_free(netif_dhcp6_data(netif)); + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP6, NULL); + } +} + +static struct dhcp6* +dhcp6_get_struct(struct netif *netif, const char *dbg_requester) +{ + struct dhcp6 *dhcp6 = netif_dhcp6_data(netif); + if (dhcp6 == NULL) { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("%s: mallocing new DHCPv6 client\n", dbg_requester)); + dhcp6 = (struct dhcp6 *)mem_malloc(sizeof(struct dhcp6)); + if (dhcp6 == NULL) { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("%s: could not allocate dhcp6\n", dbg_requester)); + return NULL; + } + + /* clear data structure, this implies DHCP6_STATE_OFF */ + memset(dhcp6, 0, sizeof(struct dhcp6)); + /* store this dhcp6 client in the netif */ + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP6, dhcp6); + } else { + /* already has DHCP6 client attached */ + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("%s: using existing DHCPv6 client\n", dbg_requester)); + } + + if (!dhcp6->pcb_allocated) { + if (dhcp6_inc_pcb_refcount() != ERR_OK) { /* ensure DHCP6 PCB is allocated */ + mem_free(dhcp6); + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP6, NULL); + return NULL; + } + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("%s: allocated dhcp6", dbg_requester)); + dhcp6->pcb_allocated = 1; + } + return dhcp6; +} + +/* + * Set the DHCPv6 state + * If the state changed, reset the number of tries. + */ +static void +dhcp6_set_state(struct dhcp6 *dhcp6, u8_t new_state, const char *dbg_caller) +{ + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("DHCPv6 state: %d -> %d (%s)\n", + dhcp6->state, new_state, dbg_caller)); + if (new_state != dhcp6->state) { + dhcp6->state = new_state; + dhcp6->tries = 0; + dhcp6->request_timeout = 0; + } +} + +static int +dhcp6_stateless_enabled(struct dhcp6 *dhcp6) +{ + if ((dhcp6->state == DHCP6_STATE_STATELESS_IDLE) || + (dhcp6->state == DHCP6_STATE_REQUESTING_CONFIG)) { + return 1; + } + return 0; +} + +/*static int +dhcp6_stateful_enabled(struct dhcp6 *dhcp6) +{ + if (dhcp6->state == DHCP6_STATE_OFF) { + return 0; + } + if (dhcp6_stateless_enabled(dhcp6)) { + return 0; + } + return 1; +}*/ + +/** + * @ingroup dhcp6 + * Enable stateful DHCPv6 on this netif + * Requests are sent on receipt of an RA message with the + * ND6_RA_FLAG_MANAGED_ADDR_CONFIG flag set. + * + * A struct dhcp6 will be allocated for this netif if not + * set via @ref dhcp6_set_struct before. + * + * @todo: stateful DHCPv6 not supported, yet + */ +err_t +dhcp6_enable_stateful(struct netif *netif) +{ + LWIP_UNUSED_ARG(netif); + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("stateful dhcp6 not implemented yet")); + return ERR_VAL; +} + +/** + * @ingroup dhcp6 + * Enable stateless DHCPv6 on this netif + * Requests are sent on receipt of an RA message with the + * ND6_RA_FLAG_OTHER_CONFIG flag set. + * + * A struct dhcp6 will be allocated for this netif if not + * set via @ref dhcp6_set_struct before. + */ +err_t +dhcp6_enable_stateless(struct netif *netif) +{ + struct dhcp6 *dhcp6; + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp6_enable_stateless(netif=%p) %c%c%"U16_F"\n", (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); + + dhcp6 = dhcp6_get_struct(netif, "dhcp6_enable_stateless()"); + if (dhcp6 == NULL) { + return ERR_MEM; + } + if (dhcp6_stateless_enabled(dhcp6)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp6_enable_stateless(): stateless DHCPv6 already enabled")); + return ERR_OK; + } else if (dhcp6->state != DHCP6_STATE_OFF) { + /* stateful running */ + /* @todo: stop stateful once it is implemented */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp6_enable_stateless(): switching from stateful to stateless DHCPv6")); + } + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp6_enable_stateless(): stateless DHCPv6 enabled\n")); + dhcp6_set_state(dhcp6, DHCP6_STATE_STATELESS_IDLE, "dhcp6_enable_stateless"); + return ERR_OK; +} + +/** + * @ingroup dhcp6 + * Disable stateful or stateless DHCPv6 on this netif + * Requests are sent on receipt of an RA message with the + * ND6_RA_FLAG_OTHER_CONFIG flag set. + */ +void +dhcp6_disable(struct netif *netif) +{ + struct dhcp6 *dhcp6; + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp6_disable(netif=%p) %c%c%"U16_F"\n", (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); + + dhcp6 = netif_dhcp6_data(netif); + if (dhcp6 != NULL) { + if (dhcp6->state != DHCP6_STATE_OFF) { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("dhcp6_disable(): DHCPv6 disabled (old state: %s)\n", + (dhcp6_stateless_enabled(dhcp6) ? "stateless" : "stateful"))); + dhcp6_set_state(dhcp6, DHCP6_STATE_OFF, "dhcp6_disable"); + if (dhcp6->pcb_allocated != 0) { + dhcp6_dec_pcb_refcount(); /* free DHCPv6 PCB if not needed any more */ + dhcp6->pcb_allocated = 0; + } + } + } +} + +/** + * Create a DHCPv6 request, fill in common headers + * + * @param netif the netif under DHCPv6 control + * @param dhcp6 dhcp6 control struct + * @param message_type message type of the request + * @param opt_len_alloc option length to allocate + * @param options_out_len option length on exit + * @return a pbuf for the message + */ +static struct pbuf * +dhcp6_create_msg(struct netif *netif, struct dhcp6 *dhcp6, u8_t message_type, + u16_t opt_len_alloc, u16_t *options_out_len) +{ + struct pbuf *p_out; + struct dhcp6_msg *msg_out; + + LWIP_ERROR("dhcp6_create_msg: netif != NULL", (netif != NULL), return NULL;); + LWIP_ERROR("dhcp6_create_msg: dhcp6 != NULL", (dhcp6 != NULL), return NULL;); + p_out = pbuf_alloc(PBUF_TRANSPORT, sizeof(struct dhcp6_msg) + opt_len_alloc, PBUF_RAM); + if (p_out == NULL) { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, + ("dhcp6_create_msg(): could not allocate pbuf\n")); + return NULL; + } + LWIP_ASSERT("dhcp6_create_msg: check that first pbuf can hold struct dhcp6_msg", + (p_out->len >= sizeof(struct dhcp6_msg) + opt_len_alloc)); + + /* @todo: limit new xid for certain message types? */ + /* reuse transaction identifier in retransmissions */ + if (dhcp6->tries == 0) { + dhcp6->xid = LWIP_RAND() & 0xFFFFFF; + } + + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, + ("transaction id xid(%"X32_F")\n", dhcp6->xid)); + + msg_out = (struct dhcp6_msg *)p_out->payload; + memset(msg_out, 0, sizeof(struct dhcp6_msg) + opt_len_alloc); + + msg_out->msgtype = message_type; + msg_out->transaction_id[0] = (u8_t)(dhcp6->xid >> 16); + msg_out->transaction_id[1] = (u8_t)(dhcp6->xid >> 8); + msg_out->transaction_id[2] = (u8_t)dhcp6->xid; + *options_out_len = 0; + return p_out; +} + +static u16_t +dhcp6_option_short(u16_t options_out_len, u8_t *options, u16_t value) +{ + options[options_out_len++] = (u8_t)((value & 0xff00U) >> 8); + options[options_out_len++] = (u8_t) (value & 0x00ffU); + return options_out_len; +} + +static u16_t +dhcp6_option_optionrequest(u16_t options_out_len, u8_t *options, const u16_t *req_options, + u16_t num_req_options, u16_t max_len) +{ + size_t i; + u16_t ret; + + LWIP_ASSERT("dhcp6_option_optionrequest: options_out_len + sizeof(struct dhcp6_msg) + addlen <= max_len", + sizeof(struct dhcp6_msg) + options_out_len + 4U + (2U * num_req_options) <= max_len); + LWIP_UNUSED_ARG(max_len); + + ret = dhcp6_option_short(options_out_len, options, DHCP6_OPTION_ORO); + ret = dhcp6_option_short(ret, options, 2 * num_req_options); + for (i = 0; i < num_req_options; i++) { + ret = dhcp6_option_short(ret, options, req_options[i]); + } + return ret; +} + +/* All options are added, shrink the pbuf to the required size */ +static void +dhcp6_msg_finalize(u16_t options_out_len, struct pbuf *p_out) +{ + /* shrink the pbuf to the actual content length */ + pbuf_realloc(p_out, (u16_t)(sizeof(struct dhcp6_msg) + options_out_len)); +} + + +#if LWIP_IPV6_DHCP6_STATELESS +static void +dhcp6_information_request(struct netif *netif, struct dhcp6 *dhcp6) +{ + const u16_t requested_options[] = {DHCP6_OPTION_DNS_SERVERS, DHCP6_OPTION_DOMAIN_LIST, DHCP6_OPTION_SNTP_SERVERS}; + u16_t msecs; + struct pbuf *p_out; + u16_t options_out_len; + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("dhcp6_information_request()\n")); + /* create and initialize the DHCP message header */ + p_out = dhcp6_create_msg(netif, dhcp6, DHCP6_INFOREQUEST, 4 + sizeof(requested_options), &options_out_len); + if (p_out != NULL) { + err_t err; + struct dhcp6_msg *msg_out = (struct dhcp6_msg *)p_out->payload; + u8_t *options = (u8_t *)(msg_out + 1); + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("dhcp6_information_request: making request\n")); + + options_out_len = dhcp6_option_optionrequest(options_out_len, options, requested_options, + LWIP_ARRAYSIZE(requested_options), p_out->len); + LWIP_HOOK_DHCP6_APPEND_OPTIONS(netif, dhcp6, DHCP6_STATE_REQUESTING_CONFIG, msg_out, + DHCP6_INFOREQUEST, options_out_len, p_out->len); + dhcp6_msg_finalize(options_out_len, p_out); + + err = udp_sendto_if(dhcp6_pcb, p_out, &dhcp6_All_DHCP6_Relay_Agents_and_Servers, DHCP6_SERVER_PORT, netif); + pbuf_free(p_out); + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp6_information_request: INFOREQUESTING -> %d\n", (int)err)); + LWIP_UNUSED_ARG(err); + } else { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp6_information_request: could not allocate DHCP6 request\n")); + } + dhcp6_set_state(dhcp6, DHCP6_STATE_REQUESTING_CONFIG, "dhcp6_information_request"); + if (dhcp6->tries < 255) { + dhcp6->tries++; + } + msecs = (u16_t)((dhcp6->tries < 6 ? 1 << dhcp6->tries : 60) * 1000); + dhcp6->request_timeout = (u16_t)((msecs + DHCP6_TIMER_MSECS - 1) / DHCP6_TIMER_MSECS); + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp6_information_request(): set request timeout %"U16_F" msecs\n", msecs)); +} + +static err_t +dhcp6_request_config(struct netif *netif, struct dhcp6 *dhcp6) +{ + /* stateless mode enabled and no request running? */ + if (dhcp6->state == DHCP6_STATE_STATELESS_IDLE) { + /* send Information-request and wait for answer; setup receive timeout */ + dhcp6_information_request(netif, dhcp6); + } + + return ERR_OK; +} + +static void +dhcp6_abort_config_request(struct dhcp6 *dhcp6) +{ + if (dhcp6->state == DHCP6_STATE_REQUESTING_CONFIG) { + /* abort running request */ + dhcp6_set_state(dhcp6, DHCP6_STATE_STATELESS_IDLE, "dhcp6_abort_config_request"); + } +} + +/* Handle a REPLY to INFOREQUEST + * This parses DNS and NTP server addresses from the reply. + */ +static void +dhcp6_handle_config_reply(struct netif *netif, struct pbuf *p_msg_in) +{ + struct dhcp6 *dhcp6 = netif_dhcp6_data(netif); + + LWIP_UNUSED_ARG(dhcp6); + LWIP_UNUSED_ARG(p_msg_in); + +#if LWIP_DHCP6_PROVIDE_DNS_SERVERS + if (dhcp6_option_given(dhcp6, DHCP6_OPTION_IDX_DNS_SERVER)) { + ip_addr_t dns_addr; + ip6_addr_t *dns_addr6; + u16_t op_start = dhcp6_get_option_start(dhcp6, DHCP6_OPTION_IDX_DNS_SERVER); + u16_t op_len = dhcp6_get_option_length(dhcp6, DHCP6_OPTION_IDX_DNS_SERVER); + u16_t idx; + u8_t n; + + memset(&dns_addr, 0, sizeof(dns_addr)); + dns_addr6 = ip_2_ip6(&dns_addr); + for (n = 0, idx = op_start; (idx < op_start + op_len) && (n < LWIP_DHCP6_PROVIDE_DNS_SERVERS); + n++, idx += sizeof(struct ip6_addr_packed)) { + u16_t copied = pbuf_copy_partial(p_msg_in, dns_addr6, sizeof(struct ip6_addr_packed), idx); + if (copied != sizeof(struct ip6_addr_packed)) { + /* pbuf length mismatch */ + return; + } + ip6_addr_assign_zone(dns_addr6, IP6_UNKNOWN, netif); + /* @todo: do we need a different offset than DHCP(v4)? */ + dns_setserver(n, &dns_addr); + } + } + /* @ todo: parse and set Domain Search List */ +#endif /* LWIP_DHCP6_PROVIDE_DNS_SERVERS */ + +#if LWIP_DHCP6_GET_NTP_SRV + if (dhcp6_option_given(dhcp6, DHCP6_OPTION_IDX_NTP_SERVER)) { + ip_addr_t ntp_server_addrs[LWIP_DHCP6_MAX_NTP_SERVERS]; + u16_t op_start = dhcp6_get_option_start(dhcp6, DHCP6_OPTION_IDX_NTP_SERVER); + u16_t op_len = dhcp6_get_option_length(dhcp6, DHCP6_OPTION_IDX_NTP_SERVER); + u16_t idx; + u8_t n; + + for (n = 0, idx = op_start; (idx < op_start + op_len) && (n < LWIP_DHCP6_MAX_NTP_SERVERS); + n++, idx += sizeof(struct ip6_addr_packed)) { + u16_t copied; + ip6_addr_t *ntp_addr6 = ip_2_ip6(&ntp_server_addrs[n]); + ip_addr_set_zero_ip6(&ntp_server_addrs[n]); + copied = pbuf_copy_partial(p_msg_in, ntp_addr6, sizeof(struct ip6_addr_packed), idx); + if (copied != sizeof(struct ip6_addr_packed)) { + /* pbuf length mismatch */ + return; + } + ip6_addr_assign_zone(ntp_addr6, IP6_UNKNOWN, netif); + } + dhcp6_set_ntp_servers(n, ntp_server_addrs); + } +#endif /* LWIP_DHCP6_GET_NTP_SRV */ +} +#endif /* LWIP_IPV6_DHCP6_STATELESS */ + +/** This function is called from nd6 module when an RA messsage is received + * It triggers DHCPv6 requests (if enabled). + */ +void +dhcp6_nd6_ra_trigger(struct netif *netif, u8_t managed_addr_config, u8_t other_config) +{ + struct dhcp6 *dhcp6; + + LWIP_ASSERT("netif != NULL", netif != NULL); + dhcp6 = netif_dhcp6_data(netif); + + LWIP_UNUSED_ARG(managed_addr_config); + LWIP_UNUSED_ARG(other_config); + LWIP_UNUSED_ARG(dhcp6); + +#if LWIP_IPV6_DHCP6_STATELESS + if (dhcp6 != NULL) { + if (dhcp6_stateless_enabled(dhcp6)) { + if (other_config) { + dhcp6_request_config(netif, dhcp6); + } else { + dhcp6_abort_config_request(dhcp6); + } + } + } +#endif /* LWIP_IPV6_DHCP6_STATELESS */ +} + +/** + * Parse the DHCPv6 message and extract the DHCPv6 options. + * + * Extract the DHCPv6 options (offset + length) so that we can later easily + * check for them or extract the contents. + */ +static err_t +dhcp6_parse_reply(struct pbuf *p, struct dhcp6 *dhcp6) +{ + u16_t offset; + u16_t offset_max; + u16_t options_idx; + struct dhcp6_msg *msg_in; + + LWIP_UNUSED_ARG(dhcp6); + + /* clear received options */ + dhcp6_clear_all_options(dhcp6); + msg_in = (struct dhcp6_msg *)p->payload; + + /* parse options */ + + options_idx = sizeof(struct dhcp6_msg); + /* parse options to the end of the received packet */ + offset_max = p->tot_len; + + offset = options_idx; + /* at least 4 byte to read? */ + while ((offset + 4 <= offset_max)) { + u8_t op_len_buf[4]; + u8_t *op_len; + u16_t op; + u16_t len; + u16_t val_offset = (u16_t)(offset + 4); + if (val_offset < offset) { + /* overflow */ + return ERR_BUF; + } + /* copy option + length, might be split accross pbufs */ + op_len = (u8_t *)pbuf_get_contiguous(p, op_len_buf, 4, 4, offset); + if (op_len == NULL) { + /* failed to get option and length */ + return ERR_VAL; + } + op = (op_len[0] << 8) | op_len[1]; + len = (op_len[2] << 8) | op_len[3]; + offset = val_offset + len; + if (offset < val_offset) { + /* overflow */ + return ERR_BUF; + } + + switch (op) { + case (DHCP6_OPTION_CLIENTID): + dhcp6_got_option(dhcp6, DHCP6_OPTION_IDX_CLI_ID); + dhcp6_set_option(dhcp6, DHCP6_OPTION_IDX_CLI_ID, val_offset, len); + break; + case (DHCP6_OPTION_SERVERID): + dhcp6_got_option(dhcp6, DHCP6_OPTION_IDX_SERVER_ID); + dhcp6_set_option(dhcp6, DHCP6_OPTION_IDX_SERVER_ID, val_offset, len); + break; +#if LWIP_DHCP6_PROVIDE_DNS_SERVERS + case (DHCP6_OPTION_DNS_SERVERS): + dhcp6_got_option(dhcp6, DHCP6_OPTION_IDX_DNS_SERVER); + dhcp6_set_option(dhcp6, DHCP6_OPTION_IDX_DNS_SERVER, val_offset, len); + break; + case (DHCP6_OPTION_DOMAIN_LIST): + dhcp6_got_option(dhcp6, DHCP6_OPTION_IDX_DOMAIN_LIST); + dhcp6_set_option(dhcp6, DHCP6_OPTION_IDX_DOMAIN_LIST, val_offset, len); + break; +#endif /* LWIP_DHCP6_PROVIDE_DNS_SERVERS */ +#if LWIP_DHCP6_GET_NTP_SRV + case (DHCP6_OPTION_SNTP_SERVERS): + dhcp6_got_option(dhcp6, DHCP6_OPTION_IDX_NTP_SERVER); + dhcp6_set_option(dhcp6, DHCP6_OPTION_IDX_NTP_SERVER, val_offset, len); + break; +#endif /* LWIP_DHCP6_GET_NTP_SRV*/ + default: + LWIP_DEBUGF(DHCP6_DEBUG, ("skipping option %"U16_F" in options\n", op)); + LWIP_HOOK_DHCP6_PARSE_OPTION(ip_current_netif(), dhcp6, dhcp6->state, msg_in, + msg_in->msgtype, op, len, q, val_offset); + break; + } + } + return ERR_OK; +} + +static void +dhcp6_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port) +{ + struct netif *netif = ip_current_input_netif(); + struct dhcp6 *dhcp6 = netif_dhcp6_data(netif); + struct dhcp6_msg *reply_msg = (struct dhcp6_msg *)p->payload; + u8_t msg_type; + u32_t xid; + + LWIP_UNUSED_ARG(arg); + + /* Caught DHCPv6 message from netif that does not have DHCPv6 enabled? -> not interested */ + if ((dhcp6 == NULL) || (dhcp6->pcb_allocated == 0)) { + goto free_pbuf_and_return; + } + + LWIP_ERROR("invalid server address type", IP_IS_V6(addr), goto free_pbuf_and_return;); + + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("dhcp6_recv(pbuf = %p) from DHCPv6 server %s port %"U16_F"\n", (void *)p, + ipaddr_ntoa(addr), port)); + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("pbuf->len = %"U16_F"\n", p->len)); + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("pbuf->tot_len = %"U16_F"\n", p->tot_len)); + /* prevent warnings about unused arguments */ + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(addr); + LWIP_UNUSED_ARG(port); + + if (p->len < sizeof(struct dhcp6_msg)) { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("DHCPv6 reply message or pbuf too short\n")); + goto free_pbuf_and_return; + } + + /* match transaction ID against what we expected */ + xid = reply_msg->transaction_id[0] << 16; + xid |= reply_msg->transaction_id[1] << 8; + xid |= reply_msg->transaction_id[2]; + if (xid != dhcp6->xid) { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, + ("transaction id mismatch reply_msg->xid(%"X32_F")!= dhcp6->xid(%"X32_F")\n", xid, dhcp6->xid)); + goto free_pbuf_and_return; + } + /* option fields could be unfold? */ + if (dhcp6_parse_reply(p, dhcp6) != ERR_OK) { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, + ("problem unfolding DHCPv6 message - too short on memory?\n")); + goto free_pbuf_and_return; + } + + /* read DHCP message type */ + msg_type = reply_msg->msgtype; + /* message type is DHCP6 REPLY? */ + if (msg_type == DHCP6_REPLY) { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("DHCP6_REPLY received\n")); +#if LWIP_IPV6_DHCP6_STATELESS + /* in info-requesting state? */ + if (dhcp6->state == DHCP6_STATE_REQUESTING_CONFIG) { + dhcp6_set_state(dhcp6, DHCP6_STATE_STATELESS_IDLE, "dhcp6_recv"); + dhcp6_handle_config_reply(netif, p); + } else +#endif /* LWIP_IPV6_DHCP6_STATELESS */ + { + /* @todo: handle reply in other states? */ + } + } else { + /* @todo: handle other message types */ + } + +free_pbuf_and_return: + pbuf_free(p); +} + +/** + * A DHCPv6 request has timed out. + * + * The timer that was started with the DHCPv6 request has + * timed out, indicating no response was received in time. + */ +static void +dhcp6_timeout(struct netif *netif, struct dhcp6 *dhcp6) +{ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp6_timeout()\n")); + + LWIP_UNUSED_ARG(netif); + LWIP_UNUSED_ARG(dhcp6); + +#if LWIP_IPV6_DHCP6_STATELESS + /* back-off period has passed, or server selection timed out */ + if (dhcp6->state == DHCP6_STATE_REQUESTING_CONFIG) { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("dhcp6_timeout(): retrying information request\n")); + dhcp6_information_request(netif, dhcp6); + } +#endif /* LWIP_IPV6_DHCP6_STATELESS */ +} + +/** + * DHCPv6 timeout handling (this function must be called every 500ms, + * see @ref DHCP6_TIMER_MSECS). + * + * A DHCPv6 server is expected to respond within a short period of time. + * This timer checks whether an outstanding DHCPv6 request is timed out. + */ +void +dhcp6_tmr(void) +{ + struct netif *netif; + /* loop through netif's */ + NETIF_FOREACH(netif) { + struct dhcp6 *dhcp6 = netif_dhcp6_data(netif); + /* only act on DHCPv6 configured interfaces */ + if (dhcp6 != NULL) { + /* timer is active (non zero), and is about to trigger now */ + if (dhcp6->request_timeout > 1) { + dhcp6->request_timeout--; + } else if (dhcp6->request_timeout == 1) { + dhcp6->request_timeout--; + /* { dhcp6->request_timeout == 0 } */ + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp6_tmr(): request timeout\n")); + /* this client's request timeout triggered */ + dhcp6_timeout(netif, dhcp6); + } + } + } +} + +#endif /* LWIP_IPV6 && LWIP_IPV6_DHCP6 */ diff --git a/Middlewares/Third_Party/LwIP/src/core/ipv6/ethip6.c b/Middlewares/Third_Party/LwIP/src/core/ipv6/ethip6.c new file mode 100644 index 0000000..fec8b28 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/ipv6/ethip6.c @@ -0,0 +1,123 @@ +/** + * @file + * + * Ethernet output for IPv6. Uses ND tables for link-layer addressing. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV6 && LWIP_ETHERNET + +#include "lwip/ethip6.h" +#include "lwip/nd6.h" +#include "lwip/pbuf.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/inet_chksum.h" +#include "lwip/netif.h" +#include "lwip/icmp6.h" +#include "lwip/prot/ethernet.h" +#include "netif/ethernet.h" + +#include + +/** + * Resolve and fill-in Ethernet address header for outgoing IPv6 packet. + * + * For IPv6 multicast, corresponding Ethernet addresses + * are selected and the packet is transmitted on the link. + * + * For unicast addresses, ask the ND6 module what to do. It will either let us + * send the the packet right away, or queue the packet for later itself, unless + * an error occurs. + * + * @todo anycast addresses + * + * @param netif The lwIP network interface which the IP packet will be sent on. + * @param q The pbuf(s) containing the IP packet to be sent. + * @param ip6addr The IP address of the packet destination. + * + * @return + * - ERR_OK or the return value of @ref nd6_get_next_hop_addr_or_queue. + */ +err_t +ethip6_output(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr) +{ + struct eth_addr dest; + const u8_t *hwaddr; + err_t result; + + LWIP_ASSERT_CORE_LOCKED(); + + /* The destination IP address must be properly zoned from here on down. */ + IP6_ADDR_ZONECHECK_NETIF(ip6addr, netif); + + /* multicast destination IP address? */ + if (ip6_addr_ismulticast(ip6addr)) { + /* Hash IP multicast address to MAC address.*/ + dest.addr[0] = 0x33; + dest.addr[1] = 0x33; + dest.addr[2] = ((const u8_t *)(&(ip6addr->addr[3])))[0]; + dest.addr[3] = ((const u8_t *)(&(ip6addr->addr[3])))[1]; + dest.addr[4] = ((const u8_t *)(&(ip6addr->addr[3])))[2]; + dest.addr[5] = ((const u8_t *)(&(ip6addr->addr[3])))[3]; + + /* Send out. */ + return ethernet_output(netif, q, (const struct eth_addr*)(netif->hwaddr), &dest, ETHTYPE_IPV6); + } + + /* We have a unicast destination IP address */ + /* @todo anycast? */ + + /* Ask ND6 what to do with the packet. */ + result = nd6_get_next_hop_addr_or_queue(netif, q, ip6addr, &hwaddr); + if (result != ERR_OK) { + return result; + } + + /* If no hardware address is returned, nd6 has queued the packet for later. */ + if (hwaddr == NULL) { + return ERR_OK; + } + + /* Send out the packet using the returned hardware address. */ + SMEMCPY(dest.addr, hwaddr, 6); + return ethernet_output(netif, q, (const struct eth_addr*)(netif->hwaddr), &dest, ETHTYPE_IPV6); +} + +#endif /* LWIP_IPV6 && LWIP_ETHERNET */ diff --git a/Middlewares/Third_Party/LwIP/src/core/ipv6/icmp6.c b/Middlewares/Third_Party/LwIP/src/core/ipv6/icmp6.c new file mode 100644 index 0000000..167738a --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/ipv6/icmp6.c @@ -0,0 +1,425 @@ +/** + * @file + * + * IPv6 version of ICMP, as per RFC 4443. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#include "lwip/opt.h" + +#if LWIP_ICMP6 && LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/icmp6.h" +#include "lwip/prot/icmp6.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/inet_chksum.h" +#include "lwip/pbuf.h" +#include "lwip/netif.h" +#include "lwip/nd6.h" +#include "lwip/mld6.h" +#include "lwip/ip.h" +#include "lwip/stats.h" + +#include + +#if LWIP_ICMP6_DATASIZE == 0 +#undef LWIP_ICMP6_DATASIZE +#define LWIP_ICMP6_DATASIZE 8 +#endif + +/* Forward declarations */ +static void icmp6_send_response(struct pbuf *p, u8_t code, u32_t data, u8_t type); +static void icmp6_send_response_with_addrs(struct pbuf *p, u8_t code, u32_t data, + u8_t type, const ip6_addr_t *src_addr, const ip6_addr_t *dest_addr); +static void icmp6_send_response_with_addrs_and_netif(struct pbuf *p, u8_t code, u32_t data, + u8_t type, const ip6_addr_t *src_addr, const ip6_addr_t *dest_addr, struct netif *netif); + + +/** + * Process an input ICMPv6 message. Called by ip6_input. + * + * Will generate a reply for echo requests. Other messages are forwarded + * to nd6_input, or mld6_input. + * + * @param p the mld packet, p->payload pointing to the icmpv6 header + * @param inp the netif on which this packet was received + */ +void +icmp6_input(struct pbuf *p, struct netif *inp) +{ + struct icmp6_hdr *icmp6hdr; + struct pbuf *r; + const ip6_addr_t *reply_src; + + ICMP6_STATS_INC(icmp6.recv); + + /* Check that ICMPv6 header fits in payload */ + if (p->len < sizeof(struct icmp6_hdr)) { + /* drop short packets */ + pbuf_free(p); + ICMP6_STATS_INC(icmp6.lenerr); + ICMP6_STATS_INC(icmp6.drop); + return; + } + + icmp6hdr = (struct icmp6_hdr *)p->payload; + +#if CHECKSUM_CHECK_ICMP6 + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_CHECK_ICMP6) { + if (ip6_chksum_pseudo(p, IP6_NEXTH_ICMP6, p->tot_len, ip6_current_src_addr(), + ip6_current_dest_addr()) != 0) { + /* Checksum failed */ + pbuf_free(p); + ICMP6_STATS_INC(icmp6.chkerr); + ICMP6_STATS_INC(icmp6.drop); + return; + } + } +#endif /* CHECKSUM_CHECK_ICMP6 */ + + switch (icmp6hdr->type) { + case ICMP6_TYPE_NA: /* Neighbor advertisement */ + case ICMP6_TYPE_NS: /* Neighbor solicitation */ + case ICMP6_TYPE_RA: /* Router advertisement */ + case ICMP6_TYPE_RD: /* Redirect */ + case ICMP6_TYPE_PTB: /* Packet too big */ + nd6_input(p, inp); + return; + case ICMP6_TYPE_RS: +#if LWIP_IPV6_FORWARD + /* @todo implement router functionality */ +#endif + break; +#if LWIP_IPV6_MLD + case ICMP6_TYPE_MLQ: + case ICMP6_TYPE_MLR: + case ICMP6_TYPE_MLD: + mld6_input(p, inp); + return; +#endif + case ICMP6_TYPE_EREQ: +#if !LWIP_MULTICAST_PING + /* multicast destination address? */ + if (ip6_addr_ismulticast(ip6_current_dest_addr())) { + /* drop */ + pbuf_free(p); + ICMP6_STATS_INC(icmp6.drop); + return; + } +#endif /* LWIP_MULTICAST_PING */ + + /* Allocate reply. */ + r = pbuf_alloc(PBUF_IP, p->tot_len, PBUF_RAM); + if (r == NULL) { + /* drop */ + pbuf_free(p); + ICMP6_STATS_INC(icmp6.memerr); + return; + } + + /* Copy echo request. */ + if (pbuf_copy(r, p) != ERR_OK) { + /* drop */ + pbuf_free(p); + pbuf_free(r); + ICMP6_STATS_INC(icmp6.err); + return; + } + + /* Determine reply source IPv6 address. */ +#if LWIP_MULTICAST_PING + if (ip6_addr_ismulticast(ip6_current_dest_addr())) { + reply_src = ip_2_ip6(ip6_select_source_address(inp, ip6_current_src_addr())); + if (reply_src == NULL) { + /* drop */ + pbuf_free(p); + pbuf_free(r); + ICMP6_STATS_INC(icmp6.rterr); + return; + } + } + else +#endif /* LWIP_MULTICAST_PING */ + { + reply_src = ip6_current_dest_addr(); + } + + /* Set fields in reply. */ + ((struct icmp6_echo_hdr *)(r->payload))->type = ICMP6_TYPE_EREP; + ((struct icmp6_echo_hdr *)(r->payload))->chksum = 0; +#if CHECKSUM_GEN_ICMP6 + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_GEN_ICMP6) { + ((struct icmp6_echo_hdr *)(r->payload))->chksum = ip6_chksum_pseudo(r, + IP6_NEXTH_ICMP6, r->tot_len, reply_src, ip6_current_src_addr()); + } +#endif /* CHECKSUM_GEN_ICMP6 */ + + /* Send reply. */ + ICMP6_STATS_INC(icmp6.xmit); + ip6_output_if(r, reply_src, ip6_current_src_addr(), + LWIP_ICMP6_HL, 0, IP6_NEXTH_ICMP6, inp); + pbuf_free(r); + + break; + default: + ICMP6_STATS_INC(icmp6.proterr); + ICMP6_STATS_INC(icmp6.drop); + break; + } + + pbuf_free(p); +} + + +/** + * Send an icmpv6 'destination unreachable' packet. + * + * This function must be used only in direct response to a packet that is being + * received right now. Otherwise, address zones would be lost. + * + * @param p the input packet for which the 'unreachable' should be sent, + * p->payload pointing to the IPv6 header + * @param c ICMPv6 code for the unreachable type + */ +void +icmp6_dest_unreach(struct pbuf *p, enum icmp6_dur_code c) +{ + icmp6_send_response(p, c, 0, ICMP6_TYPE_DUR); +} + +/** + * Send an icmpv6 'packet too big' packet. + * + * This function must be used only in direct response to a packet that is being + * received right now. Otherwise, address zones would be lost. + * + * @param p the input packet for which the 'packet too big' should be sent, + * p->payload pointing to the IPv6 header + * @param mtu the maximum mtu that we can accept + */ +void +icmp6_packet_too_big(struct pbuf *p, u32_t mtu) +{ + icmp6_send_response(p, 0, mtu, ICMP6_TYPE_PTB); +} + +/** + * Send an icmpv6 'time exceeded' packet. + * + * This function must be used only in direct response to a packet that is being + * received right now. Otherwise, address zones would be lost. + * + * @param p the input packet for which the 'time exceeded' should be sent, + * p->payload pointing to the IPv6 header + * @param c ICMPv6 code for the time exceeded type + */ +void +icmp6_time_exceeded(struct pbuf *p, enum icmp6_te_code c) +{ + icmp6_send_response(p, c, 0, ICMP6_TYPE_TE); +} + +/** + * Send an icmpv6 'time exceeded' packet, with explicit source and destination + * addresses. + * + * This function may be used to send a response sometime after receiving the + * packet for which this response is meant. The provided source and destination + * addresses are used primarily to retain their zone information. + * + * @param p the input packet for which the 'time exceeded' should be sent, + * p->payload pointing to the IPv6 header + * @param c ICMPv6 code for the time exceeded type + * @param src_addr source address of the original packet, with zone information + * @param dest_addr destination address of the original packet, with zone + * information + */ +void +icmp6_time_exceeded_with_addrs(struct pbuf *p, enum icmp6_te_code c, + const ip6_addr_t *src_addr, const ip6_addr_t *dest_addr) +{ + icmp6_send_response_with_addrs(p, c, 0, ICMP6_TYPE_TE, src_addr, dest_addr); +} + +/** + * Send an icmpv6 'parameter problem' packet. + * + * This function must be used only in direct response to a packet that is being + * received right now. Otherwise, address zones would be lost and the calculated + * offset would be wrong (calculated against ip6_current_header()). + * + * @param p the input packet for which the 'param problem' should be sent, + * p->payload pointing to the IP header + * @param c ICMPv6 code for the param problem type + * @param pointer the pointer to the byte where the parameter is found + */ +void +icmp6_param_problem(struct pbuf *p, enum icmp6_pp_code c, const void *pointer) +{ + u32_t pointer_u32 = (u32_t)((const u8_t *)pointer - (const u8_t *)ip6_current_header()); + icmp6_send_response(p, c, pointer_u32, ICMP6_TYPE_PP); +} + +/** + * Send an ICMPv6 packet in response to an incoming packet. + * The packet is sent *to* ip_current_src_addr() on ip_current_netif(). + * + * @param p the input packet for which the response should be sent, + * p->payload pointing to the IPv6 header + * @param code Code of the ICMPv6 header + * @param data Additional 32-bit parameter in the ICMPv6 header + * @param type Type of the ICMPv6 header + */ +static void +icmp6_send_response(struct pbuf *p, u8_t code, u32_t data, u8_t type) +{ + const struct ip6_addr *reply_src, *reply_dest; + struct netif *netif = ip_current_netif(); + + LWIP_ASSERT("icmpv6 packet not a direct response", netif != NULL); + reply_dest = ip6_current_src_addr(); + + /* Select an address to use as source. */ + reply_src = ip_2_ip6(ip6_select_source_address(netif, reply_dest)); + if (reply_src == NULL) { + ICMP6_STATS_INC(icmp6.rterr); + return; + } + icmp6_send_response_with_addrs_and_netif(p, code, data, type, reply_src, reply_dest, netif); +} + +/** + * Send an ICMPv6 packet in response to an incoming packet. + * + * Call this function if the packet is NOT sent as a direct response to an + * incoming packet, but rather sometime later (e.g. for a fragment reassembly + * timeout). The caller must provide the zoned source and destination addresses + * from the original packet with the src_addr and dest_addr parameters. The + * reason for this approach is that while the addresses themselves are part of + * the original packet, their zone information is not, thus possibly resulting + * in a link-local response being sent over the wrong link. + * + * @param p the input packet for which the response should be sent, + * p->payload pointing to the IPv6 header + * @param code Code of the ICMPv6 header + * @param data Additional 32-bit parameter in the ICMPv6 header + * @param type Type of the ICMPv6 header + * @param src_addr original source address + * @param dest_addr original destination address + */ +static void +icmp6_send_response_with_addrs(struct pbuf *p, u8_t code, u32_t data, u8_t type, + const ip6_addr_t *src_addr, const ip6_addr_t *dest_addr) +{ + const struct ip6_addr *reply_src, *reply_dest; + struct netif *netif; + + /* Get the destination address and netif for this ICMP message. */ + LWIP_ASSERT("must provide both source and destination", src_addr != NULL); + LWIP_ASSERT("must provide both source and destination", dest_addr != NULL); + + /* Special case, as ip6_current_xxx is either NULL, or points + to a different packet than the one that expired. */ + IP6_ADDR_ZONECHECK(src_addr); + IP6_ADDR_ZONECHECK(dest_addr); + /* Swap source and destination for the reply. */ + reply_dest = src_addr; + reply_src = dest_addr; + netif = ip6_route(reply_src, reply_dest); + if (netif == NULL) { + ICMP6_STATS_INC(icmp6.rterr); + return; + } + icmp6_send_response_with_addrs_and_netif(p, code, data, type, reply_src, + reply_dest, netif); +} + +/** + * Send an ICMPv6 packet (with srd/dst address and netif given). + * + * @param p the input packet for which the response should be sent, + * p->payload pointing to the IPv6 header + * @param code Code of the ICMPv6 header + * @param data Additional 32-bit parameter in the ICMPv6 header + * @param type Type of the ICMPv6 header + * @param reply_src source address of the packet to send + * @param reply_dest destination address of the packet to send + * @param netif netif to send the packet + */ +static void +icmp6_send_response_with_addrs_and_netif(struct pbuf *p, u8_t code, u32_t data, u8_t type, + const ip6_addr_t *reply_src, const ip6_addr_t *reply_dest, struct netif *netif) +{ + struct pbuf *q; + struct icmp6_hdr *icmp6hdr; + + /* ICMPv6 header + IPv6 header + data */ + q = pbuf_alloc(PBUF_IP, sizeof(struct icmp6_hdr) + IP6_HLEN + LWIP_ICMP6_DATASIZE, + PBUF_RAM); + if (q == NULL) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_time_exceeded: failed to allocate pbuf for ICMPv6 packet.\n")); + ICMP6_STATS_INC(icmp6.memerr); + return; + } + LWIP_ASSERT("check that first pbuf can hold icmp 6message", + (q->len >= (sizeof(struct icmp6_hdr) + IP6_HLEN + LWIP_ICMP6_DATASIZE))); + + icmp6hdr = (struct icmp6_hdr *)q->payload; + icmp6hdr->type = type; + icmp6hdr->code = code; + icmp6hdr->data = lwip_htonl(data); + + /* copy fields from original packet */ + SMEMCPY((u8_t *)q->payload + sizeof(struct icmp6_hdr), (u8_t *)p->payload, + IP6_HLEN + LWIP_ICMP6_DATASIZE); + + /* calculate checksum */ + icmp6hdr->chksum = 0; +#if CHECKSUM_GEN_ICMP6 + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP6) { + icmp6hdr->chksum = ip6_chksum_pseudo(q, IP6_NEXTH_ICMP6, q->tot_len, + reply_src, reply_dest); + } +#endif /* CHECKSUM_GEN_ICMP6 */ + + ICMP6_STATS_INC(icmp6.xmit); + ip6_output_if(q, reply_src, reply_dest, LWIP_ICMP6_HL, 0, IP6_NEXTH_ICMP6, netif); + pbuf_free(q); +} + +#endif /* LWIP_ICMP6 && LWIP_IPV6 */ diff --git a/Middlewares/Third_Party/LwIP/src/core/ipv6/inet6.c b/Middlewares/Third_Party/LwIP/src/core/ipv6/inet6.c new file mode 100644 index 0000000..d9a992c --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/ipv6/inet6.c @@ -0,0 +1,53 @@ +/** + * @file + * + * INET v6 addresses. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV6 && LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/def.h" +#include "lwip/inet.h" + +/** This variable is initialized by the system to contain the wildcard IPv6 address. + */ +const struct in6_addr in6addr_any = IN6ADDR_ANY_INIT; + +#endif /* LWIP_IPV6 */ diff --git a/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6.c b/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6.c new file mode 100644 index 0000000..eda11dc --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6.c @@ -0,0 +1,1492 @@ +/** + * @file + * + * IPv6 layer. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/def.h" +#include "lwip/mem.h" +#include "lwip/netif.h" +#include "lwip/ip.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/ip6_frag.h" +#include "lwip/icmp6.h" +#include "lwip/priv/raw_priv.h" +#include "lwip/udp.h" +#include "lwip/priv/tcp_priv.h" +#include "lwip/dhcp6.h" +#include "lwip/nd6.h" +#include "lwip/mld6.h" +#include "lwip/debug.h" +#include "lwip/stats.h" + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +/** + * Finds the appropriate network interface for a given IPv6 address. It tries to select + * a netif following a sequence of heuristics: + * 1) if there is only 1 netif, return it + * 2) if the destination is a zoned address, match its zone to a netif + * 3) if the either the source or destination address is a scoped address, + * match the source address's zone (if set) or address (if not) to a netif + * 4) tries to match the destination subnet to a configured address + * 5) tries to find a router-announced route + * 6) tries to match the (unscoped) source address to the netif + * 7) returns the default netif, if configured + * + * Note that each of the two given addresses may or may not be properly zoned. + * + * @param src the source IPv6 address, if known + * @param dest the destination IPv6 address for which to find the route + * @return the netif on which to send to reach dest + */ +struct netif * +ip6_route(const ip6_addr_t *src, const ip6_addr_t *dest) +{ +#if LWIP_SINGLE_NETIF + LWIP_UNUSED_ARG(src); + LWIP_UNUSED_ARG(dest); +#else /* LWIP_SINGLE_NETIF */ + struct netif *netif; + s8_t i; + + LWIP_ASSERT_CORE_LOCKED(); + + /* If single netif configuration, fast return. */ + if ((netif_list != NULL) && (netif_list->next == NULL)) { + if (!netif_is_up(netif_list) || !netif_is_link_up(netif_list) || + (ip6_addr_has_zone(dest) && !ip6_addr_test_zone(dest, netif_list))) { + return NULL; + } + return netif_list; + } + +#if LWIP_IPV6_SCOPES + /* Special processing for zoned destination addresses. This includes link- + * local unicast addresses and interface/link-local multicast addresses. Use + * the zone to find a matching netif. If the address is not zoned, then there + * is technically no "wrong" netif to choose, and we leave routing to other + * rules; in most cases this should be the scoped-source rule below. */ + if (ip6_addr_has_zone(dest)) { + IP6_ADDR_ZONECHECK(dest); + /* Find a netif based on the zone. For custom mappings, one zone may map + * to multiple netifs, so find one that can actually send a packet. */ + NETIF_FOREACH(netif) { + if (ip6_addr_test_zone(dest, netif) && + netif_is_up(netif) && netif_is_link_up(netif)) { + return netif; + } + } + /* No matching netif found. Do no try to route to a different netif, + * as that would be a zone violation, resulting in any packets sent to + * that netif being dropped on output. */ + return NULL; + } +#endif /* LWIP_IPV6_SCOPES */ + + /* Special processing for scoped source and destination addresses. If we get + * here, the destination address does not have a zone, so either way we need + * to look at the source address, which may or may not have a zone. If it + * does, the zone is restrictive: there is (typically) only one matching + * netif for it, and we should avoid routing to any other netif as that would + * result in guaranteed zone violations. For scoped source addresses that do + * not have a zone, use (only) a netif that has that source address locally + * assigned. This case also applies to the loopback source address, which has + * an implied link-local scope. If only the destination address is scoped + * (but, again, not zoned), we still want to use only the source address to + * determine its zone because that's most likely what the user/application + * wants, regardless of whether the source address is scoped. Finally, some + * of this story also applies if scoping is disabled altogether. */ +#if LWIP_IPV6_SCOPES + if (ip6_addr_has_scope(dest, IP6_UNKNOWN) || + ip6_addr_has_scope(src, IP6_UNICAST) || +#else /* LWIP_IPV6_SCOPES */ + if (ip6_addr_islinklocal(dest) || ip6_addr_ismulticast_iflocal(dest) || + ip6_addr_ismulticast_linklocal(dest) || ip6_addr_islinklocal(src) || +#endif /* LWIP_IPV6_SCOPES */ + ip6_addr_isloopback(src)) { +#if LWIP_IPV6_SCOPES + if (ip6_addr_has_zone(src)) { + /* Find a netif matching the source zone (relatively cheap). */ + NETIF_FOREACH(netif) { + if (netif_is_up(netif) && netif_is_link_up(netif) && + ip6_addr_test_zone(src, netif)) { + return netif; + } + } + } else +#endif /* LWIP_IPV6_SCOPES */ + { + /* Find a netif matching the source address (relatively expensive). */ + NETIF_FOREACH(netif) { + if (!netif_is_up(netif) || !netif_is_link_up(netif)) { + continue; + } + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && + ip6_addr_cmp_zoneless(src, netif_ip6_addr(netif, i))) { + return netif; + } + } + } + } + /* Again, do not use any other netif in this case, as that could result in + * zone boundary violations. */ + return NULL; + } + + /* We come here only if neither source nor destination is scoped. */ + IP6_ADDR_ZONECHECK(src); + +#ifdef LWIP_HOOK_IP6_ROUTE + netif = LWIP_HOOK_IP6_ROUTE(src, dest); + if (netif != NULL) { + return netif; + } +#endif + + /* See if the destination subnet matches a configured address. In accordance + * with RFC 5942, dynamically configured addresses do not have an implied + * local subnet, and thus should be considered /128 assignments. However, as + * such, the destination address may still match a local address, and so we + * still need to check for exact matches here. By (lwIP) policy, statically + * configured addresses do always have an implied local /64 subnet. */ + NETIF_FOREACH(netif) { + if (!netif_is_up(netif) || !netif_is_link_up(netif)) { + continue; + } + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && + ip6_addr_netcmp(dest, netif_ip6_addr(netif, i)) && + (netif_ip6_addr_isstatic(netif, i) || + ip6_addr_nethostcmp(dest, netif_ip6_addr(netif, i)))) { + return netif; + } + } + } + + /* Get the netif for a suitable router-announced route. */ + netif = nd6_find_route(dest); + if (netif != NULL) { + return netif; + } + + /* Try with the netif that matches the source address. Given the earlier rule + * for scoped source addresses, this applies to unscoped addresses only. */ + if (!ip6_addr_isany(src)) { + NETIF_FOREACH(netif) { + if (!netif_is_up(netif) || !netif_is_link_up(netif)) { + continue; + } + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && + ip6_addr_cmp(src, netif_ip6_addr(netif, i))) { + return netif; + } + } + } + } + +#if LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF + /* loopif is disabled, loopback traffic is passed through any netif */ + if (ip6_addr_isloopback(dest)) { + /* don't check for link on loopback traffic */ + if (netif_default != NULL && netif_is_up(netif_default)) { + return netif_default; + } + /* default netif is not up, just use any netif for loopback traffic */ + NETIF_FOREACH(netif) { + if (netif_is_up(netif)) { + return netif; + } + } + return NULL; + } +#endif /* LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF */ +#endif /* !LWIP_SINGLE_NETIF */ + + /* no matching netif found, use default netif, if up */ + if ((netif_default == NULL) || !netif_is_up(netif_default) || !netif_is_link_up(netif_default)) { + return NULL; + } + return netif_default; +} + +/** + * @ingroup ip6 + * Select the best IPv6 source address for a given destination IPv6 address. + * + * This implementation follows RFC 6724 Sec. 5 to the following extent: + * - Rules 1, 2, 3: fully implemented + * - Rules 4, 5, 5.5: not applicable + * - Rule 6: not implemented + * - Rule 7: not applicable + * - Rule 8: limited to "prefer /64 subnet match over non-match" + * + * For Rule 2, we deliberately deviate from RFC 6724 Sec. 3.1 by considering + * ULAs to be of smaller scope than global addresses, to avoid that a preferred + * ULA is picked over a deprecated global address when given a global address + * as destination, as that would likely result in broken two-way communication. + * + * As long as temporary addresses are not supported (as used in Rule 7), a + * proper implementation of Rule 8 would obviate the need to implement Rule 6. + * + * @param netif the netif on which to send a packet + * @param dest the destination we are trying to reach (possibly not properly + * zoned) + * @return the most suitable source address to use, or NULL if no suitable + * source address is found + */ +const ip_addr_t * +ip6_select_source_address(struct netif *netif, const ip6_addr_t *dest) +{ + const ip_addr_t *best_addr; + const ip6_addr_t *cand_addr; + s8_t dest_scope, cand_scope; + s8_t best_scope = IP6_MULTICAST_SCOPE_RESERVED; + u8_t i, cand_pref, cand_bits; + u8_t best_pref = 0; + u8_t best_bits = 0; + + /* Start by determining the scope of the given destination address. These + * tests are hopefully (roughly) in order of likeliness to match. */ + if (ip6_addr_isglobal(dest)) { + dest_scope = IP6_MULTICAST_SCOPE_GLOBAL; + } else if (ip6_addr_islinklocal(dest) || ip6_addr_isloopback(dest)) { + dest_scope = IP6_MULTICAST_SCOPE_LINK_LOCAL; + } else if (ip6_addr_isuniquelocal(dest)) { + dest_scope = IP6_MULTICAST_SCOPE_ORGANIZATION_LOCAL; + } else if (ip6_addr_ismulticast(dest)) { + dest_scope = ip6_addr_multicast_scope(dest); + } else if (ip6_addr_issitelocal(dest)) { + dest_scope = IP6_MULTICAST_SCOPE_SITE_LOCAL; + } else { + /* no match, consider scope global */ + dest_scope = IP6_MULTICAST_SCOPE_GLOBAL; + } + + best_addr = NULL; + + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + /* Consider only valid (= preferred and deprecated) addresses. */ + if (!ip6_addr_isvalid(netif_ip6_addr_state(netif, i))) { + continue; + } + /* Determine the scope of this candidate address. Same ordering idea. */ + cand_addr = netif_ip6_addr(netif, i); + if (ip6_addr_isglobal(cand_addr)) { + cand_scope = IP6_MULTICAST_SCOPE_GLOBAL; + } else if (ip6_addr_islinklocal(cand_addr)) { + cand_scope = IP6_MULTICAST_SCOPE_LINK_LOCAL; + } else if (ip6_addr_isuniquelocal(cand_addr)) { + cand_scope = IP6_MULTICAST_SCOPE_ORGANIZATION_LOCAL; + } else if (ip6_addr_issitelocal(cand_addr)) { + cand_scope = IP6_MULTICAST_SCOPE_SITE_LOCAL; + } else { + /* no match, treat as low-priority global scope */ + cand_scope = IP6_MULTICAST_SCOPE_RESERVEDF; + } + cand_pref = ip6_addr_ispreferred(netif_ip6_addr_state(netif, i)); + /* @todo compute the actual common bits, for longest matching prefix. */ + /* We cannot count on the destination address having a proper zone + * assignment, so do not compare zones in this case. */ + cand_bits = ip6_addr_netcmp_zoneless(cand_addr, dest); /* just 1 or 0 for now */ + if (cand_bits && ip6_addr_nethostcmp(cand_addr, dest)) { + return netif_ip_addr6(netif, i); /* Rule 1 */ + } + if ((best_addr == NULL) || /* no alternative yet */ + ((cand_scope < best_scope) && (cand_scope >= dest_scope)) || + ((cand_scope > best_scope) && (best_scope < dest_scope)) || /* Rule 2 */ + ((cand_scope == best_scope) && ((cand_pref > best_pref) || /* Rule 3 */ + ((cand_pref == best_pref) && (cand_bits > best_bits))))) { /* Rule 8 */ + /* We found a new "winning" candidate. */ + best_addr = netif_ip_addr6(netif, i); + best_scope = cand_scope; + best_pref = cand_pref; + best_bits = cand_bits; + } + } + + return best_addr; /* may be NULL */ +} + +#if LWIP_IPV6_FORWARD +/** + * Forwards an IPv6 packet. It finds an appropriate route for the + * packet, decrements the HL value of the packet, and outputs + * the packet on the appropriate interface. + * + * @param p the packet to forward (p->payload points to IP header) + * @param iphdr the IPv6 header of the input packet + * @param inp the netif on which this packet was received + */ +static void +ip6_forward(struct pbuf *p, struct ip6_hdr *iphdr, struct netif *inp) +{ + struct netif *netif; + + /* do not forward link-local or loopback addresses */ + if (ip6_addr_islinklocal(ip6_current_dest_addr()) || + ip6_addr_isloopback(ip6_current_dest_addr())) { + LWIP_DEBUGF(IP6_DEBUG, ("ip6_forward: not forwarding link-local address.\n")); + IP6_STATS_INC(ip6.rterr); + IP6_STATS_INC(ip6.drop); + return; + } + + /* Find network interface where to forward this IP packet to. */ + netif = ip6_route(IP6_ADDR_ANY6, ip6_current_dest_addr()); + if (netif == NULL) { + LWIP_DEBUGF(IP6_DEBUG, ("ip6_forward: no route for %"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F"\n", + IP6_ADDR_BLOCK1(ip6_current_dest_addr()), + IP6_ADDR_BLOCK2(ip6_current_dest_addr()), + IP6_ADDR_BLOCK3(ip6_current_dest_addr()), + IP6_ADDR_BLOCK4(ip6_current_dest_addr()), + IP6_ADDR_BLOCK5(ip6_current_dest_addr()), + IP6_ADDR_BLOCK6(ip6_current_dest_addr()), + IP6_ADDR_BLOCK7(ip6_current_dest_addr()), + IP6_ADDR_BLOCK8(ip6_current_dest_addr()))); +#if LWIP_ICMP6 + /* Don't send ICMP messages in response to ICMP messages */ + if (IP6H_NEXTH(iphdr) != IP6_NEXTH_ICMP6) { + icmp6_dest_unreach(p, ICMP6_DUR_NO_ROUTE); + } +#endif /* LWIP_ICMP6 */ + IP6_STATS_INC(ip6.rterr); + IP6_STATS_INC(ip6.drop); + return; + } +#if LWIP_IPV6_SCOPES + /* Do not forward packets with a zoned (e.g., link-local) source address + * outside of their zone. We determined the zone a bit earlier, so we know + * that the address is properly zoned here, so we can safely use has_zone. + * Also skip packets with a loopback source address (link-local implied). */ + if ((ip6_addr_has_zone(ip6_current_src_addr()) && + !ip6_addr_test_zone(ip6_current_src_addr(), netif)) || + ip6_addr_isloopback(ip6_current_src_addr())) { + LWIP_DEBUGF(IP6_DEBUG, ("ip6_forward: not forwarding packet beyond its source address zone.\n")); + IP6_STATS_INC(ip6.rterr); + IP6_STATS_INC(ip6.drop); + return; + } +#endif /* LWIP_IPV6_SCOPES */ + /* Do not forward packets onto the same network interface on which + * they arrived. */ + if (netif == inp) { + LWIP_DEBUGF(IP6_DEBUG, ("ip6_forward: not bouncing packets back on incoming interface.\n")); + IP6_STATS_INC(ip6.rterr); + IP6_STATS_INC(ip6.drop); + return; + } + + /* decrement HL */ + IP6H_HOPLIM_SET(iphdr, IP6H_HOPLIM(iphdr) - 1); + /* send ICMP6 if HL == 0 */ + if (IP6H_HOPLIM(iphdr) == 0) { +#if LWIP_ICMP6 + /* Don't send ICMP messages in response to ICMP messages */ + if (IP6H_NEXTH(iphdr) != IP6_NEXTH_ICMP6) { + icmp6_time_exceeded(p, ICMP6_TE_HL); + } +#endif /* LWIP_ICMP6 */ + IP6_STATS_INC(ip6.drop); + return; + } + + if (netif->mtu && (p->tot_len > netif->mtu)) { +#if LWIP_ICMP6 + /* Don't send ICMP messages in response to ICMP messages */ + if (IP6H_NEXTH(iphdr) != IP6_NEXTH_ICMP6) { + icmp6_packet_too_big(p, netif->mtu); + } +#endif /* LWIP_ICMP6 */ + IP6_STATS_INC(ip6.drop); + return; + } + + LWIP_DEBUGF(IP6_DEBUG, ("ip6_forward: forwarding packet to %"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F"\n", + IP6_ADDR_BLOCK1(ip6_current_dest_addr()), + IP6_ADDR_BLOCK2(ip6_current_dest_addr()), + IP6_ADDR_BLOCK3(ip6_current_dest_addr()), + IP6_ADDR_BLOCK4(ip6_current_dest_addr()), + IP6_ADDR_BLOCK5(ip6_current_dest_addr()), + IP6_ADDR_BLOCK6(ip6_current_dest_addr()), + IP6_ADDR_BLOCK7(ip6_current_dest_addr()), + IP6_ADDR_BLOCK8(ip6_current_dest_addr()))); + + /* transmit pbuf on chosen interface */ + netif->output_ip6(netif, p, ip6_current_dest_addr()); + IP6_STATS_INC(ip6.fw); + IP6_STATS_INC(ip6.xmit); + return; +} +#endif /* LWIP_IPV6_FORWARD */ + +/** Return true if the current input packet should be accepted on this netif */ +static int +ip6_input_accept(struct netif *netif) +{ + /* interface is up? */ + if (netif_is_up(netif)) { + u8_t i; + /* unicast to this interface address? address configured? */ + /* If custom scopes are used, the destination zone will be tested as + * part of the local-address comparison, but we need to test the source + * scope as well (e.g., is this interface on the same link?). */ + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && + ip6_addr_cmp(ip6_current_dest_addr(), netif_ip6_addr(netif, i)) +#if IPV6_CUSTOM_SCOPES + && (!ip6_addr_has_zone(ip6_current_src_addr()) || + ip6_addr_test_zone(ip6_current_src_addr(), netif)) +#endif /* IPV6_CUSTOM_SCOPES */ + ) { + /* accept on this netif */ + return 1; + } + } + } + return 0; +} + +/** + * This function is called by the network interface device driver when + * an IPv6 packet is received. The function does the basic checks of the + * IP header such as packet size being at least larger than the header + * size etc. If the packet was not destined for us, the packet is + * forwarded (using ip6_forward). + * + * Finally, the packet is sent to the upper layer protocol input function. + * + * @param p the received IPv6 packet (p->payload points to IPv6 header) + * @param inp the netif on which this packet was received + * @return ERR_OK if the packet was processed (could return ERR_* if it wasn't + * processed, but currently always returns ERR_OK) + */ +err_t +ip6_input(struct pbuf *p, struct netif *inp) +{ + struct ip6_hdr *ip6hdr; + struct netif *netif; + const u8_t *nexth; + u16_t hlen, hlen_tot; /* the current header length */ +#if 0 /*IP_ACCEPT_LINK_LAYER_ADDRESSING*/ + @todo + int check_ip_src=1; +#endif /* IP_ACCEPT_LINK_LAYER_ADDRESSING */ +#if LWIP_RAW + raw_input_state_t raw_status; +#endif /* LWIP_RAW */ + + LWIP_ASSERT_CORE_LOCKED(); + + IP6_STATS_INC(ip6.recv); + + /* identify the IP header */ + ip6hdr = (struct ip6_hdr *)p->payload; + if (IP6H_V(ip6hdr) != 6) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_WARNING, ("IPv6 packet dropped due to bad version number %"U32_F"\n", + IP6H_V(ip6hdr))); + pbuf_free(p); + IP6_STATS_INC(ip6.err); + IP6_STATS_INC(ip6.drop); + return ERR_OK; + } + +#ifdef LWIP_HOOK_IP6_INPUT + if (LWIP_HOOK_IP6_INPUT(p, inp)) { + /* the packet has been eaten */ + return ERR_OK; + } +#endif + + /* header length exceeds first pbuf length, or ip length exceeds total pbuf length? */ + if ((IP6_HLEN > p->len) || (IP6H_PLEN(ip6hdr) > (p->tot_len - IP6_HLEN))) { + if (IP6_HLEN > p->len) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IPv6 header (len %"U16_F") does not fit in first pbuf (len %"U16_F"), IP packet dropped.\n", + (u16_t)IP6_HLEN, p->len)); + } + if ((IP6H_PLEN(ip6hdr) + IP6_HLEN) > p->tot_len) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IPv6 (plen %"U16_F") is longer than pbuf (len %"U16_F"), IP packet dropped.\n", + (u16_t)(IP6H_PLEN(ip6hdr) + IP6_HLEN), p->tot_len)); + } + /* free (drop) packet pbufs */ + pbuf_free(p); + IP6_STATS_INC(ip6.lenerr); + IP6_STATS_INC(ip6.drop); + return ERR_OK; + } + + /* Trim pbuf. This should have been done at the netif layer, + * but we'll do it anyway just to be sure that its done. */ + pbuf_realloc(p, (u16_t)(IP6_HLEN + IP6H_PLEN(ip6hdr))); + + /* copy IP addresses to aligned ip6_addr_t */ + ip_addr_copy_from_ip6_packed(ip_data.current_iphdr_dest, ip6hdr->dest); + ip_addr_copy_from_ip6_packed(ip_data.current_iphdr_src, ip6hdr->src); + + /* Don't accept virtual IPv4 mapped IPv6 addresses. + * Don't accept multicast source addresses. */ + if (ip6_addr_isipv4mappedipv6(ip_2_ip6(&ip_data.current_iphdr_dest)) || + ip6_addr_isipv4mappedipv6(ip_2_ip6(&ip_data.current_iphdr_src)) || + ip6_addr_ismulticast(ip_2_ip6(&ip_data.current_iphdr_src))) { + /* free (drop) packet pbufs */ + pbuf_free(p); + IP6_STATS_INC(ip6.err); + IP6_STATS_INC(ip6.drop); + return ERR_OK; + } + + /* Set the appropriate zone identifier on the addresses. */ + ip6_addr_assign_zone(ip_2_ip6(&ip_data.current_iphdr_dest), IP6_UNKNOWN, inp); + ip6_addr_assign_zone(ip_2_ip6(&ip_data.current_iphdr_src), IP6_UNICAST, inp); + + /* current header pointer. */ + ip_data.current_ip6_header = ip6hdr; + + /* In netif, used in case we need to send ICMPv6 packets back. */ + ip_data.current_netif = inp; + ip_data.current_input_netif = inp; + + /* match packet against an interface, i.e. is this packet for us? */ + if (ip6_addr_ismulticast(ip6_current_dest_addr())) { + /* Always joined to multicast if-local and link-local all-nodes group. */ + if (ip6_addr_isallnodes_iflocal(ip6_current_dest_addr()) || + ip6_addr_isallnodes_linklocal(ip6_current_dest_addr())) { + netif = inp; + } +#if LWIP_IPV6_MLD + else if (mld6_lookfor_group(inp, ip6_current_dest_addr())) { + netif = inp; + } +#else /* LWIP_IPV6_MLD */ + else if (ip6_addr_issolicitednode(ip6_current_dest_addr())) { + u8_t i; + /* Filter solicited node packets when MLD is not enabled + * (for Neighbor discovery). */ + netif = NULL; + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(inp, i)) && + ip6_addr_cmp_solicitednode(ip6_current_dest_addr(), netif_ip6_addr(inp, i))) { + netif = inp; + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: solicited node packet accepted on interface %c%c\n", + netif->name[0], netif->name[1])); + break; + } + } + } +#endif /* LWIP_IPV6_MLD */ + else { + netif = NULL; + } + } else { + /* start trying with inp. if that's not acceptable, start walking the + list of configured netifs. */ + if (ip6_input_accept(inp)) { + netif = inp; + } else { + netif = NULL; +#if !IPV6_CUSTOM_SCOPES + /* Shortcut: stop looking for other interfaces if either the source or + * the destination has a scope constrained to this interface. Custom + * scopes may break the 1:1 link/interface mapping, however. */ + if (ip6_addr_islinklocal(ip6_current_dest_addr()) || + ip6_addr_islinklocal(ip6_current_src_addr())) { + goto netif_found; + } +#endif /* !IPV6_CUSTOM_SCOPES */ +#if !LWIP_NETIF_LOOPBACK || LWIP_HAVE_LOOPIF + /* The loopback address is to be considered link-local. Packets to it + * should be dropped on other interfaces, as per RFC 4291 Sec. 2.5.3. + * Its implied scope means packets *from* the loopback address should + * not be accepted on other interfaces, either. These requirements + * cannot be implemented in the case that loopback traffic is sent + * across a non-loopback interface, however. */ + if (ip6_addr_isloopback(ip6_current_dest_addr()) || + ip6_addr_isloopback(ip6_current_src_addr())) { + goto netif_found; + } +#endif /* !LWIP_NETIF_LOOPBACK || LWIP_HAVE_LOOPIF */ +#if !LWIP_SINGLE_NETIF + NETIF_FOREACH(netif) { + if (netif == inp) { + /* we checked that before already */ + continue; + } + if (ip6_input_accept(netif)) { + break; + } + } +#endif /* !LWIP_SINGLE_NETIF */ + } +netif_found: + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet accepted on interface %c%c\n", + netif ? netif->name[0] : 'X', netif? netif->name[1] : 'X')); + } + + /* "::" packet source address? (used in duplicate address detection) */ + if (ip6_addr_isany(ip6_current_src_addr()) && + (!ip6_addr_issolicitednode(ip6_current_dest_addr()))) { + /* packet source is not valid */ + /* free (drop) packet pbufs */ + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with src ANY_ADDRESS dropped\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + } + + /* packet not for us? */ + if (netif == NULL) { + /* packet not for us, route or discard */ + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_TRACE, ("ip6_input: packet not for us.\n")); +#if LWIP_IPV6_FORWARD + /* non-multicast packet? */ + if (!ip6_addr_ismulticast(ip6_current_dest_addr())) { + /* try to forward IP packet on (other) interfaces */ + ip6_forward(p, ip6hdr, inp); + } +#endif /* LWIP_IPV6_FORWARD */ + pbuf_free(p); + goto ip6_input_cleanup; + } + + /* current netif pointer. */ + ip_data.current_netif = netif; + + /* Save next header type. */ + nexth = &IP6H_NEXTH(ip6hdr); + + /* Init header length. */ + hlen = hlen_tot = IP6_HLEN; + + /* Move to payload. */ + pbuf_remove_header(p, IP6_HLEN); + + /* Process known option extension headers, if present. */ + while (*nexth != IP6_NEXTH_NONE) + { + switch (*nexth) { + case IP6_NEXTH_HOPBYHOP: + { + s32_t opt_offset; + struct ip6_hbh_hdr *hbh_hdr; + struct ip6_opt_hdr *opt_hdr; + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Hop-by-Hop options header\n")); + + /* Get and check the header length, while staying in packet bounds. */ + hbh_hdr = (struct ip6_hbh_hdr *)p->payload; + + /* Get next header type. */ + nexth = &IP6_HBH_NEXTH(hbh_hdr); + + /* Get the header length. */ + hlen = (u16_t)(8 * (1 + hbh_hdr->_hlen)); + + if ((p->len < 8) || (hlen > p->len)) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IPv6 options header (hlen %"U16_F") does not fit in first pbuf (len %"U16_F"), IPv6 packet dropped.\n", + hlen, p->len)); + /* free (drop) packet pbufs */ + pbuf_free(p); + IP6_STATS_INC(ip6.lenerr); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + } + + hlen_tot = (u16_t)(hlen_tot + hlen); + + /* The extended option header starts right after Hop-by-Hop header. */ + opt_offset = IP6_HBH_HLEN; + while (opt_offset < hlen) + { + s32_t opt_dlen = 0; + + opt_hdr = (struct ip6_opt_hdr *)((u8_t *)hbh_hdr + opt_offset); + + switch (IP6_OPT_TYPE(opt_hdr)) { + /* @todo: process IPV6 Hop-by-Hop option data */ + case IP6_PAD1_OPTION: + /* PAD1 option doesn't have length and value field */ + opt_dlen = -1; + break; + case IP6_PADN_OPTION: + opt_dlen = IP6_OPT_DLEN(opt_hdr); + break; + case IP6_ROUTER_ALERT_OPTION: + opt_dlen = IP6_OPT_DLEN(opt_hdr); + break; + case IP6_JUMBO_OPTION: + opt_dlen = IP6_OPT_DLEN(opt_hdr); + break; + default: + /* Check 2 MSB of Hop-by-Hop header type. */ + switch (IP6_OPT_TYPE_ACTION(opt_hdr)) { + case 1: + /* Discard the packet. */ + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid Hop-by-Hop option type dropped.\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + case 2: + /* Send ICMP Parameter Problem */ + icmp6_param_problem(p, ICMP6_PP_OPTION, opt_hdr); + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid Hop-by-Hop option type dropped.\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + case 3: + /* Send ICMP Parameter Problem if destination address is not a multicast address */ + if (!ip6_addr_ismulticast(ip6_current_dest_addr())) { + icmp6_param_problem(p, ICMP6_PP_OPTION, opt_hdr); + } + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid Hop-by-Hop option type dropped.\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + default: + /* Skip over this option. */ + opt_dlen = IP6_OPT_DLEN(opt_hdr); + break; + } + break; + } + + /* Adjust the offset to move to the next extended option header */ + opt_offset = opt_offset + IP6_OPT_HLEN + opt_dlen; + } + pbuf_remove_header(p, hlen); + break; + } + case IP6_NEXTH_DESTOPTS: + { + s32_t opt_offset; + struct ip6_dest_hdr *dest_hdr; + struct ip6_opt_hdr *opt_hdr; + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Destination options header\n")); + + dest_hdr = (struct ip6_dest_hdr *)p->payload; + + /* Get next header type. */ + nexth = &IP6_DEST_NEXTH(dest_hdr); + + /* Get the header length. */ + hlen = 8 * (1 + dest_hdr->_hlen); + if ((p->len < 8) || (hlen > p->len)) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IPv6 options header (hlen %"U16_F") does not fit in first pbuf (len %"U16_F"), IPv6 packet dropped.\n", + hlen, p->len)); + /* free (drop) packet pbufs */ + pbuf_free(p); + IP6_STATS_INC(ip6.lenerr); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + } + + hlen_tot = (u16_t)(hlen_tot + hlen); + + /* The extended option header starts right after Destination header. */ + opt_offset = IP6_DEST_HLEN; + while (opt_offset < hlen) + { + s32_t opt_dlen = 0; + + opt_hdr = (struct ip6_opt_hdr *)((u8_t *)dest_hdr + opt_offset); + + switch (IP6_OPT_TYPE(opt_hdr)) + { + /* @todo: process IPV6 Destination option data */ + case IP6_PAD1_OPTION: + /* PAD1 option deosn't have length and value field */ + opt_dlen = -1; + break; + case IP6_PADN_OPTION: + opt_dlen = IP6_OPT_DLEN(opt_hdr); + break; + case IP6_ROUTER_ALERT_OPTION: + opt_dlen = IP6_OPT_DLEN(opt_hdr); + break; + case IP6_JUMBO_OPTION: + opt_dlen = IP6_OPT_DLEN(opt_hdr); + break; + case IP6_HOME_ADDRESS_OPTION: + opt_dlen = IP6_OPT_DLEN(opt_hdr); + break; + default: + /* Check 2 MSB of Destination header type. */ + switch (IP6_OPT_TYPE_ACTION(opt_hdr)) + { + case 1: + /* Discard the packet. */ + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid destination option type dropped.\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + case 2: + /* Send ICMP Parameter Problem */ + icmp6_param_problem(p, ICMP6_PP_OPTION, opt_hdr); + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid destination option type dropped.\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + case 3: + /* Send ICMP Parameter Problem if destination address is not a multicast address */ + if (!ip6_addr_ismulticast(ip6_current_dest_addr())) { + icmp6_param_problem(p, ICMP6_PP_OPTION, opt_hdr); + } + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid destination option type dropped.\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + default: + /* Skip over this option. */ + opt_dlen = IP6_OPT_DLEN(opt_hdr); + break; + } + break; + } + + /* Adjust the offset to move to the next extended option header */ + opt_offset = opt_offset + IP6_OPT_HLEN + opt_dlen; + } + + pbuf_remove_header(p, hlen); + break; + } + case IP6_NEXTH_ROUTING: + { + struct ip6_rout_hdr *rout_hdr; + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Routing header\n")); + + rout_hdr = (struct ip6_rout_hdr *)p->payload; + + /* Get next header type. */ + nexth = &IP6_ROUT_NEXTH(rout_hdr); + + /* Get the header length. */ + hlen = 8 * (1 + rout_hdr->_hlen); + + if ((p->len < 8) || (hlen > p->len)) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IPv6 options header (hlen %"U16_F") does not fit in first pbuf (len %"U16_F"), IPv6 packet dropped.\n", + hlen, p->len)); + /* free (drop) packet pbufs */ + pbuf_free(p); + IP6_STATS_INC(ip6.lenerr); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + } + + /* Skip over this header. */ + hlen_tot = (u16_t)(hlen_tot + hlen); + + /* if segment left value is 0 in routing header, ignore the option */ + if (IP6_ROUT_SEG_LEFT(rout_hdr)) { + /* The length field of routing option header must be even */ + if (rout_hdr->_hlen & 0x1) { + /* Discard and send parameter field error */ + icmp6_param_problem(p, ICMP6_PP_FIELD, &rout_hdr->_hlen); + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid routing type dropped\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + } + + switch (IP6_ROUT_TYPE(rout_hdr)) + { + /* TODO: process routing by the type */ + case IP6_ROUT_TYPE2: + break; + case IP6_ROUT_RPL: + break; + default: + /* Discard unrecognized routing type and send parameter field error */ + icmp6_param_problem(p, ICMP6_PP_FIELD, &IP6_ROUT_TYPE(rout_hdr)); + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid routing type dropped\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + } + } + + pbuf_remove_header(p, hlen); + break; + } + case IP6_NEXTH_FRAGMENT: + { + struct ip6_frag_hdr *frag_hdr; + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Fragment header\n")); + + frag_hdr = (struct ip6_frag_hdr *)p->payload; + + /* Get next header type. */ + nexth = &IP6_FRAG_NEXTH(frag_hdr); + + /* Fragment Header length. */ + hlen = 8; + + /* Make sure this header fits in current pbuf. */ + if (hlen > p->len) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IPv6 options header (hlen %"U16_F") does not fit in first pbuf (len %"U16_F"), IPv6 packet dropped.\n", + hlen, p->len)); + /* free (drop) packet pbufs */ + pbuf_free(p); + IP6_FRAG_STATS_INC(ip6_frag.lenerr); + IP6_FRAG_STATS_INC(ip6_frag.drop); + goto ip6_input_cleanup; + } + + hlen_tot = (u16_t)(hlen_tot + hlen); + + /* check payload length is multiple of 8 octets when mbit is set */ + if (IP6_FRAG_MBIT(frag_hdr) && (IP6H_PLEN(ip6hdr) & 0x7)) { + /* ipv6 payload length is not multiple of 8 octets */ + icmp6_param_problem(p, ICMP6_PP_FIELD, LWIP_PACKED_CAST(const void *, &ip6hdr->_plen)); + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid payload length dropped\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + } + + /* Offset == 0 and more_fragments == 0? */ + if ((frag_hdr->_fragment_offset & + PP_HTONS(IP6_FRAG_OFFSET_MASK | IP6_FRAG_MORE_FLAG)) == 0) { + /* This is a 1-fragment packet. Skip this header and continue. */ + pbuf_remove_header(p, hlen); + } else { +#if LWIP_IPV6_REASS + /* reassemble the packet */ + ip_data.current_ip_header_tot_len = hlen_tot; + p = ip6_reass(p); + /* packet not fully reassembled yet? */ + if (p == NULL) { + goto ip6_input_cleanup; + } + + /* Returned p point to IPv6 header. + * Update all our variables and pointers and continue. */ + ip6hdr = (struct ip6_hdr *)p->payload; + nexth = &IP6H_NEXTH(ip6hdr); + hlen = hlen_tot = IP6_HLEN; + pbuf_remove_header(p, IP6_HLEN); + +#else /* LWIP_IPV6_REASS */ + /* free (drop) packet pbufs */ + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Fragment header dropped (with LWIP_IPV6_REASS==0)\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.opterr); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; +#endif /* LWIP_IPV6_REASS */ + } + break; + } + default: + goto options_done; + } + + if (*nexth == IP6_NEXTH_HOPBYHOP) { + /* Hop-by-Hop header comes only as a first option */ + icmp6_param_problem(p, ICMP6_PP_HEADER, nexth); + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Hop-by-Hop options header dropped (only valid as a first option)\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + } + } + +options_done: + + /* send to upper layers */ + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: \n")); + ip6_debug_print(p); + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: p->len %"U16_F" p->tot_len %"U16_F"\n", p->len, p->tot_len)); + + ip_data.current_ip_header_tot_len = hlen_tot; + +#if LWIP_RAW + /* p points to IPv6 header again for raw_input. */ + pbuf_add_header_force(p, hlen_tot); + /* raw input did not eat the packet? */ + raw_status = raw_input(p, inp); + if (raw_status != RAW_INPUT_EATEN) + { + /* Point to payload. */ + pbuf_remove_header(p, hlen_tot); +#else /* LWIP_RAW */ + { +#endif /* LWIP_RAW */ + switch (*nexth) { + case IP6_NEXTH_NONE: + pbuf_free(p); + break; +#if LWIP_UDP + case IP6_NEXTH_UDP: +#if LWIP_UDPLITE + case IP6_NEXTH_UDPLITE: +#endif /* LWIP_UDPLITE */ + udp_input(p, inp); + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case IP6_NEXTH_TCP: + tcp_input(p, inp); + break; +#endif /* LWIP_TCP */ +#if LWIP_ICMP6 + case IP6_NEXTH_ICMP6: + icmp6_input(p, inp); + break; +#endif /* LWIP_ICMP */ + default: +#if LWIP_RAW + if (raw_status == RAW_INPUT_DELIVERED) { + /* @todo: ipv6 mib in-delivers? */ + } else +#endif /* LWIP_RAW */ + { +#if LWIP_ICMP6 + /* p points to IPv6 header again for raw_input. */ + pbuf_add_header_force(p, hlen_tot); + /* send ICMP parameter problem unless it was a multicast or ICMPv6 */ + if ((!ip6_addr_ismulticast(ip6_current_dest_addr())) && + (IP6H_NEXTH(ip6hdr) != IP6_NEXTH_ICMP6)) { + icmp6_param_problem(p, ICMP6_PP_HEADER, nexth); + } +#endif /* LWIP_ICMP */ + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip6_input: Unsupported transport protocol %"U16_F"\n", (u16_t)IP6H_NEXTH(ip6hdr))); + IP6_STATS_INC(ip6.proterr); + IP6_STATS_INC(ip6.drop); + } + pbuf_free(p); + break; + } + } + +ip6_input_cleanup: + ip_data.current_netif = NULL; + ip_data.current_input_netif = NULL; + ip_data.current_ip6_header = NULL; + ip_data.current_ip_header_tot_len = 0; + ip6_addr_set_zero(ip6_current_src_addr()); + ip6_addr_set_zero(ip6_current_dest_addr()); + + return ERR_OK; +} + + +/** + * Sends an IPv6 packet on a network interface. This function constructs + * the IPv6 header. If the source IPv6 address is NULL, the IPv6 "ANY" address is + * used as source (usually during network startup). If the source IPv6 address it + * IP6_ADDR_ANY, the most appropriate IPv6 address of the outgoing network + * interface is filled in as source address. If the destination IPv6 address is + * LWIP_IP_HDRINCL, p is assumed to already include an IPv6 header and + * p->payload points to it instead of the data. + * + * @param p the packet to send (p->payload points to the data, e.g. next + protocol header; if dest == LWIP_IP_HDRINCL, p already includes an + IPv6 header and p->payload points to that IPv6 header) + * @param src the source IPv6 address to send from (if src == IP6_ADDR_ANY, an + * IP address of the netif is selected and used as source address. + * if src == NULL, IP6_ADDR_ANY is used as source) (src is possibly not + * properly zoned) + * @param dest the destination IPv6 address to send the packet to (possibly not + * properly zoned) + * @param hl the Hop Limit value to be set in the IPv6 header + * @param tc the Traffic Class value to be set in the IPv6 header + * @param nexth the Next Header to be set in the IPv6 header + * @param netif the netif on which to send this packet + * @return ERR_OK if the packet was sent OK + * ERR_BUF if p doesn't have enough space for IPv6/LINK headers + * returns errors returned by netif->output_ip6 + */ +err_t +ip6_output_if(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, + u8_t nexth, struct netif *netif) +{ + const ip6_addr_t *src_used = src; + if (dest != LWIP_IP_HDRINCL) { + if (src != NULL && ip6_addr_isany(src)) { + src_used = ip_2_ip6(ip6_select_source_address(netif, dest)); + if ((src_used == NULL) || ip6_addr_isany(src_used)) { + /* No appropriate source address was found for this packet. */ + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip6_output: No suitable source address for packet.\n")); + IP6_STATS_INC(ip6.rterr); + return ERR_RTE; + } + } + } + return ip6_output_if_src(p, src_used, dest, hl, tc, nexth, netif); +} + +/** + * Same as ip6_output_if() but 'src' address is not replaced by netif address + * when it is 'any'. + */ +err_t +ip6_output_if_src(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, + u8_t nexth, struct netif *netif) +{ + struct ip6_hdr *ip6hdr; + ip6_addr_t dest_addr; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p); + + /* Should the IPv6 header be generated or is it already included in p? */ + if (dest != LWIP_IP_HDRINCL) { +#if LWIP_IPV6_SCOPES + /* If the destination address is scoped but lacks a zone, add a zone now, + * based on the outgoing interface. The lower layers (e.g., nd6) absolutely + * require addresses to be properly zoned for correctness. In some cases, + * earlier attempts will have been made to add a zone to the destination, + * but this function is the only one that is called in all (other) cases, + * so we must do this here. */ + if (ip6_addr_lacks_zone(dest, IP6_UNKNOWN)) { + ip6_addr_copy(dest_addr, *dest); + ip6_addr_assign_zone(&dest_addr, IP6_UNKNOWN, netif); + dest = &dest_addr; + } +#endif /* LWIP_IPV6_SCOPES */ + + /* generate IPv6 header */ + if (pbuf_add_header(p, IP6_HLEN)) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip6_output: not enough room for IPv6 header in pbuf\n")); + IP6_STATS_INC(ip6.err); + return ERR_BUF; + } + + ip6hdr = (struct ip6_hdr *)p->payload; + LWIP_ASSERT("check that first pbuf can hold struct ip6_hdr", + (p->len >= sizeof(struct ip6_hdr))); + + IP6H_HOPLIM_SET(ip6hdr, hl); + IP6H_NEXTH_SET(ip6hdr, nexth); + + /* dest cannot be NULL here */ + ip6_addr_copy_to_packed(ip6hdr->dest, *dest); + + IP6H_VTCFL_SET(ip6hdr, 6, tc, 0); + IP6H_PLEN_SET(ip6hdr, (u16_t)(p->tot_len - IP6_HLEN)); + + if (src == NULL) { + src = IP6_ADDR_ANY6; + } + /* src cannot be NULL here */ + ip6_addr_copy_to_packed(ip6hdr->src, *src); + + } else { + /* IP header already included in p */ + ip6hdr = (struct ip6_hdr *)p->payload; + ip6_addr_copy_from_packed(dest_addr, ip6hdr->dest); + ip6_addr_assign_zone(&dest_addr, IP6_UNKNOWN, netif); + dest = &dest_addr; + } + + IP6_STATS_INC(ip6.xmit); + + LWIP_DEBUGF(IP6_DEBUG, ("ip6_output_if: %c%c%"U16_F"\n", netif->name[0], netif->name[1], (u16_t)netif->num)); + ip6_debug_print(p); + +#if ENABLE_LOOPBACK + { + int i; +#if !LWIP_HAVE_LOOPIF + if (ip6_addr_isloopback(dest)) { + return netif_loop_output(netif, p); + } +#endif /* !LWIP_HAVE_LOOPIF */ + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && + ip6_addr_cmp(dest, netif_ip6_addr(netif, i))) { + /* Packet to self, enqueue it for loopback */ + LWIP_DEBUGF(IP6_DEBUG, ("netif_loop_output()\n")); + return netif_loop_output(netif, p); + } + } + } +#if LWIP_MULTICAST_TX_OPTIONS + if ((p->flags & PBUF_FLAG_MCASTLOOP) != 0) { + netif_loop_output(netif, p); + } +#endif /* LWIP_MULTICAST_TX_OPTIONS */ +#endif /* ENABLE_LOOPBACK */ +#if LWIP_IPV6_FRAG + /* don't fragment if interface has mtu set to 0 [loopif] */ + if (netif_mtu6(netif) && (p->tot_len > nd6_get_destination_mtu(dest, netif))) { + return ip6_frag(p, netif, dest); + } +#endif /* LWIP_IPV6_FRAG */ + + LWIP_DEBUGF(IP6_DEBUG, ("netif->output_ip6()\n")); + return netif->output_ip6(netif, p, dest); +} + +/** + * Simple interface to ip6_output_if. It finds the outgoing network + * interface and calls upon ip6_output_if to do the actual work. + * + * @param p the packet to send (p->payload points to the data, e.g. next + protocol header; if dest == LWIP_IP_HDRINCL, p already includes an + IPv6 header and p->payload points to that IPv6 header) + * @param src the source IPv6 address to send from (if src == IP6_ADDR_ANY, an + * IP address of the netif is selected and used as source address. + * if src == NULL, IP6_ADDR_ANY is used as source) + * @param dest the destination IPv6 address to send the packet to + * @param hl the Hop Limit value to be set in the IPv6 header + * @param tc the Traffic Class value to be set in the IPv6 header + * @param nexth the Next Header to be set in the IPv6 header + * + * @return ERR_RTE if no route is found + * see ip_output_if() for more return values + */ +err_t +ip6_output(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, u8_t nexth) +{ + struct netif *netif; + struct ip6_hdr *ip6hdr; + ip6_addr_t src_addr, dest_addr; + + LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p); + + if (dest != LWIP_IP_HDRINCL) { + netif = ip6_route(src, dest); + } else { + /* IP header included in p, read addresses. */ + ip6hdr = (struct ip6_hdr *)p->payload; + ip6_addr_copy_from_packed(src_addr, ip6hdr->src); + ip6_addr_copy_from_packed(dest_addr, ip6hdr->dest); + netif = ip6_route(&src_addr, &dest_addr); + } + + if (netif == NULL) { + LWIP_DEBUGF(IP6_DEBUG, ("ip6_output: no route for %"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F"\n", + IP6_ADDR_BLOCK1(dest), + IP6_ADDR_BLOCK2(dest), + IP6_ADDR_BLOCK3(dest), + IP6_ADDR_BLOCK4(dest), + IP6_ADDR_BLOCK5(dest), + IP6_ADDR_BLOCK6(dest), + IP6_ADDR_BLOCK7(dest), + IP6_ADDR_BLOCK8(dest))); + IP6_STATS_INC(ip6.rterr); + return ERR_RTE; + } + + return ip6_output_if(p, src, dest, hl, tc, nexth, netif); +} + + +#if LWIP_NETIF_USE_HINTS +/** Like ip6_output, but takes and addr_hint pointer that is passed on to netif->addr_hint + * before calling ip6_output_if. + * + * @param p the packet to send (p->payload points to the data, e.g. next + protocol header; if dest == LWIP_IP_HDRINCL, p already includes an + IPv6 header and p->payload points to that IPv6 header) + * @param src the source IPv6 address to send from (if src == IP6_ADDR_ANY, an + * IP address of the netif is selected and used as source address. + * if src == NULL, IP6_ADDR_ANY is used as source) + * @param dest the destination IPv6 address to send the packet to + * @param hl the Hop Limit value to be set in the IPv6 header + * @param tc the Traffic Class value to be set in the IPv6 header + * @param nexth the Next Header to be set in the IPv6 header + * @param netif_hint netif output hint pointer set to netif->hint before + * calling ip_output_if() + * + * @return ERR_RTE if no route is found + * see ip_output_if() for more return values + */ +err_t +ip6_output_hinted(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, u8_t nexth, struct netif_hint *netif_hint) +{ + struct netif *netif; + struct ip6_hdr *ip6hdr; + ip6_addr_t src_addr, dest_addr; + err_t err; + + LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p); + + if (dest != LWIP_IP_HDRINCL) { + netif = ip6_route(src, dest); + } else { + /* IP header included in p, read addresses. */ + ip6hdr = (struct ip6_hdr *)p->payload; + ip6_addr_copy_from_packed(src_addr, ip6hdr->src); + ip6_addr_copy_from_packed(dest_addr, ip6hdr->dest); + netif = ip6_route(&src_addr, &dest_addr); + } + + if (netif == NULL) { + LWIP_DEBUGF(IP6_DEBUG, ("ip6_output: no route for %"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F"\n", + IP6_ADDR_BLOCK1(dest), + IP6_ADDR_BLOCK2(dest), + IP6_ADDR_BLOCK3(dest), + IP6_ADDR_BLOCK4(dest), + IP6_ADDR_BLOCK5(dest), + IP6_ADDR_BLOCK6(dest), + IP6_ADDR_BLOCK7(dest), + IP6_ADDR_BLOCK8(dest))); + IP6_STATS_INC(ip6.rterr); + return ERR_RTE; + } + + NETIF_SET_HINTS(netif, netif_hint); + err = ip6_output_if(p, src, dest, hl, tc, nexth, netif); + NETIF_RESET_HINTS(netif); + + return err; +} +#endif /* LWIP_NETIF_USE_HINTS*/ + +#if LWIP_IPV6_MLD +/** + * Add a hop-by-hop options header with a router alert option and padding. + * + * Used by MLD when sending a Multicast listener report/done message. + * + * @param p the packet to which we will prepend the options header + * @param nexth the next header protocol number (e.g. IP6_NEXTH_ICMP6) + * @param value the value of the router alert option data (e.g. IP6_ROUTER_ALERT_VALUE_MLD) + * @return ERR_OK if hop-by-hop header was added, ERR_* otherwise + */ +err_t +ip6_options_add_hbh_ra(struct pbuf *p, u8_t nexth, u8_t value) +{ + u8_t *opt_data; + u32_t offset = 0; + struct ip6_hbh_hdr *hbh_hdr; + struct ip6_opt_hdr *opt_hdr; + + /* fixed 4 bytes for router alert option and 2 bytes padding */ + const u8_t hlen = (sizeof(struct ip6_opt_hdr) * 2) + IP6_ROUTER_ALERT_DLEN; + /* Move pointer to make room for hop-by-hop options header. */ + if (pbuf_add_header(p, sizeof(struct ip6_hbh_hdr) + hlen)) { + LWIP_DEBUGF(IP6_DEBUG, ("ip6_options: no space for options header\n")); + IP6_STATS_INC(ip6.err); + return ERR_BUF; + } + + /* Set fields of Hop-by-Hop header */ + hbh_hdr = (struct ip6_hbh_hdr *)p->payload; + IP6_HBH_NEXTH(hbh_hdr) = nexth; + hbh_hdr->_hlen = 0; + offset = IP6_HBH_HLEN; + + /* Set router alert options to Hop-by-Hop extended option header */ + opt_hdr = (struct ip6_opt_hdr *)((u8_t *)hbh_hdr + offset); + IP6_OPT_TYPE(opt_hdr) = IP6_ROUTER_ALERT_OPTION; + IP6_OPT_DLEN(opt_hdr) = IP6_ROUTER_ALERT_DLEN; + offset += IP6_OPT_HLEN; + + /* Set router alert option data */ + opt_data = (u8_t *)hbh_hdr + offset; + opt_data[0] = value; + opt_data[1] = 0; + offset += IP6_OPT_DLEN(opt_hdr); + + /* add 2 bytes padding to make 8 bytes Hop-by-Hop header length */ + opt_hdr = (struct ip6_opt_hdr *)((u8_t *)hbh_hdr + offset); + IP6_OPT_TYPE(opt_hdr) = IP6_PADN_OPTION; + IP6_OPT_DLEN(opt_hdr) = 0; + + return ERR_OK; +} +#endif /* LWIP_IPV6_MLD */ + +#if IP6_DEBUG +/* Print an IPv6 header by using LWIP_DEBUGF + * @param p an IPv6 packet, p->payload pointing to the IPv6 header + */ +void +ip6_debug_print(struct pbuf *p) +{ + struct ip6_hdr *ip6hdr = (struct ip6_hdr *)p->payload; + + LWIP_DEBUGF(IP6_DEBUG, ("IPv6 header:\n")); + LWIP_DEBUGF(IP6_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP6_DEBUG, ("| %2"U16_F" | %3"U16_F" | %7"U32_F" | (ver, class, flow)\n", + IP6H_V(ip6hdr), + IP6H_TC(ip6hdr), + IP6H_FL(ip6hdr))); + LWIP_DEBUGF(IP6_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP6_DEBUG, ("| %5"U16_F" | %3"U16_F" | %3"U16_F" | (plen, nexth, hopl)\n", + IP6H_PLEN(ip6hdr), + IP6H_NEXTH(ip6hdr), + IP6H_HOPLIM(ip6hdr))); + LWIP_DEBUGF(IP6_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP6_DEBUG, ("| %4"X32_F" | %4"X32_F" | %4"X32_F" | %4"X32_F" | (src)\n", + IP6_ADDR_BLOCK1(&(ip6hdr->src)), + IP6_ADDR_BLOCK2(&(ip6hdr->src)), + IP6_ADDR_BLOCK3(&(ip6hdr->src)), + IP6_ADDR_BLOCK4(&(ip6hdr->src)))); + LWIP_DEBUGF(IP6_DEBUG, ("| %4"X32_F" | %4"X32_F" | %4"X32_F" | %4"X32_F" |\n", + IP6_ADDR_BLOCK5(&(ip6hdr->src)), + IP6_ADDR_BLOCK6(&(ip6hdr->src)), + IP6_ADDR_BLOCK7(&(ip6hdr->src)), + IP6_ADDR_BLOCK8(&(ip6hdr->src)))); + LWIP_DEBUGF(IP6_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP6_DEBUG, ("| %4"X32_F" | %4"X32_F" | %4"X32_F" | %4"X32_F" | (dest)\n", + IP6_ADDR_BLOCK1(&(ip6hdr->dest)), + IP6_ADDR_BLOCK2(&(ip6hdr->dest)), + IP6_ADDR_BLOCK3(&(ip6hdr->dest)), + IP6_ADDR_BLOCK4(&(ip6hdr->dest)))); + LWIP_DEBUGF(IP6_DEBUG, ("| %4"X32_F" | %4"X32_F" | %4"X32_F" | %4"X32_F" |\n", + IP6_ADDR_BLOCK5(&(ip6hdr->dest)), + IP6_ADDR_BLOCK6(&(ip6hdr->dest)), + IP6_ADDR_BLOCK7(&(ip6hdr->dest)), + IP6_ADDR_BLOCK8(&(ip6hdr->dest)))); + LWIP_DEBUGF(IP6_DEBUG, ("+-------------------------------+\n")); +} +#endif /* IP6_DEBUG */ + +#endif /* LWIP_IPV6 */ diff --git a/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_addr.c b/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_addr.c new file mode 100644 index 0000000..687c02f --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_addr.c @@ -0,0 +1,343 @@ +/** + * @file + * + * IPv6 addresses. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * Functions for handling IPv6 addresses. + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/ip_addr.h" +#include "lwip/def.h" + +#include + +#if LWIP_IPV4 +#include "lwip/ip4_addr.h" /* for ip6addr_aton to handle IPv4-mapped addresses */ +#endif /* LWIP_IPV4 */ + +/* used by IP6_ADDR_ANY(6) in ip6_addr.h */ +const ip_addr_t ip6_addr_any = IPADDR6_INIT(0ul, 0ul, 0ul, 0ul); + +#define lwip_xchar(i) ((char)((i) < 10 ? '0' + (i) : 'A' + (i) - 10)) + +/** + * Check whether "cp" is a valid ascii representation + * of an IPv6 address and convert to a binary address. + * Returns 1 if the address is valid, 0 if not. + * + * @param cp IPv6 address in ascii representation (e.g. "FF01::1") + * @param addr pointer to which to save the ip address in network order + * @return 1 if cp could be converted to addr, 0 on failure + */ +int +ip6addr_aton(const char *cp, ip6_addr_t *addr) +{ + u32_t addr_index, zero_blocks, current_block_index, current_block_value; + const char *s; +#if LWIP_IPV4 + int check_ipv4_mapped = 0; +#endif /* LWIP_IPV4 */ + + /* Count the number of colons, to count the number of blocks in a "::" sequence + zero_blocks may be 1 even if there are no :: sequences */ + zero_blocks = 8; + for (s = cp; *s != 0; s++) { + if (*s == ':') { + zero_blocks--; +#if LWIP_IPV4 + } else if (*s == '.') { + if ((zero_blocks == 5) ||(zero_blocks == 2)) { + check_ipv4_mapped = 1; + /* last block could be the start of an IPv4 address */ + zero_blocks--; + } else { + /* invalid format */ + return 0; + } + break; +#endif /* LWIP_IPV4 */ + } else if (!lwip_isxdigit(*s)) { + break; + } + } + + /* parse each block */ + addr_index = 0; + current_block_index = 0; + current_block_value = 0; + for (s = cp; *s != 0; s++) { + if (*s == ':') { + if (addr) { + if (current_block_index & 0x1) { + addr->addr[addr_index++] |= current_block_value; + } + else { + addr->addr[addr_index] = current_block_value << 16; + } + } + current_block_index++; +#if LWIP_IPV4 + if (check_ipv4_mapped) { + if (current_block_index == 6) { + ip4_addr_t ip4; + int ret = ip4addr_aton(s + 1, &ip4); + if (ret) { + if (addr) { + addr->addr[3] = lwip_htonl(ip4.addr); + current_block_index++; + goto fix_byte_order_and_return; + } + return 1; + } + } + } +#endif /* LWIP_IPV4 */ + current_block_value = 0; + if (current_block_index > 7) { + /* address too long! */ + return 0; + } + if (s[1] == ':') { + if (s[2] == ':') { + /* invalid format: three successive colons */ + return 0; + } + s++; + /* "::" found, set zeros */ + while (zero_blocks > 0) { + zero_blocks--; + if (current_block_index & 0x1) { + addr_index++; + } else { + if (addr) { + addr->addr[addr_index] = 0; + } + } + current_block_index++; + if (current_block_index > 7) { + /* address too long! */ + return 0; + } + } + } + } else if (lwip_isxdigit(*s)) { + /* add current digit */ + current_block_value = (current_block_value << 4) + + (lwip_isdigit(*s) ? (u32_t)(*s - '0') : + (u32_t)(10 + (lwip_islower(*s) ? *s - 'a' : *s - 'A'))); + } else { + /* unexpected digit, space? CRLF? */ + break; + } + } + + if (addr) { + if (current_block_index & 0x1) { + addr->addr[addr_index++] |= current_block_value; + } + else { + addr->addr[addr_index] = current_block_value << 16; + } +#if LWIP_IPV4 +fix_byte_order_and_return: +#endif + /* convert to network byte order. */ + for (addr_index = 0; addr_index < 4; addr_index++) { + addr->addr[addr_index] = lwip_htonl(addr->addr[addr_index]); + } + + ip6_addr_clear_zone(addr); + } + + if (current_block_index != 7) { + return 0; + } + + return 1; +} + +/** + * Convert numeric IPv6 address into ASCII representation. + * returns ptr to static buffer; not reentrant! + * + * @param addr ip6 address in network order to convert + * @return pointer to a global static (!) buffer that holds the ASCII + * representation of addr + */ +char * +ip6addr_ntoa(const ip6_addr_t *addr) +{ + static char str[40]; + return ip6addr_ntoa_r(addr, str, 40); +} + +/** + * Same as ipaddr_ntoa, but reentrant since a user-supplied buffer is used. + * + * @param addr ip6 address in network order to convert + * @param buf target buffer where the string is stored + * @param buflen length of buf + * @return either pointer to buf which now holds the ASCII + * representation of addr or NULL if buf was too small + */ +char * +ip6addr_ntoa_r(const ip6_addr_t *addr, char *buf, int buflen) +{ + u32_t current_block_index, current_block_value, next_block_value; + s32_t i; + u8_t zero_flag, empty_block_flag; + +#if LWIP_IPV4 + if (ip6_addr_isipv4mappedipv6(addr)) { + /* This is an IPv4 mapped address */ + ip4_addr_t addr4; + char *ret; +#define IP4MAPPED_HEADER "::FFFF:" + char *buf_ip4 = buf + sizeof(IP4MAPPED_HEADER) - 1; + int buflen_ip4 = buflen - sizeof(IP4MAPPED_HEADER) + 1; + if (buflen < (int)sizeof(IP4MAPPED_HEADER)) { + return NULL; + } + memcpy(buf, IP4MAPPED_HEADER, sizeof(IP4MAPPED_HEADER)); + addr4.addr = addr->addr[3]; + ret = ip4addr_ntoa_r(&addr4, buf_ip4, buflen_ip4); + if (ret != buf_ip4) { + return NULL; + } + return buf; + } +#endif /* LWIP_IPV4 */ + i = 0; + empty_block_flag = 0; /* used to indicate a zero chain for "::' */ + + for (current_block_index = 0; current_block_index < 8; current_block_index++) { + /* get the current 16-bit block */ + current_block_value = lwip_htonl(addr->addr[current_block_index >> 1]); + if ((current_block_index & 0x1) == 0) { + current_block_value = current_block_value >> 16; + } + current_block_value &= 0xffff; + + /* Check for empty block. */ + if (current_block_value == 0) { + if (current_block_index == 7 && empty_block_flag == 1) { + /* special case, we must render a ':' for the last block. */ + buf[i++] = ':'; + if (i >= buflen) { + return NULL; + } + break; + } + if (empty_block_flag == 0) { + /* generate empty block "::", but only if more than one contiguous zero block, + * according to current formatting suggestions RFC 5952. */ + next_block_value = lwip_htonl(addr->addr[(current_block_index + 1) >> 1]); + if ((current_block_index & 0x1) == 0x01) { + next_block_value = next_block_value >> 16; + } + next_block_value &= 0xffff; + if (next_block_value == 0) { + empty_block_flag = 1; + buf[i++] = ':'; + if (i >= buflen) { + return NULL; + } + continue; /* move on to next block. */ + } + } else if (empty_block_flag == 1) { + /* move on to next block. */ + continue; + } + } else if (empty_block_flag == 1) { + /* Set this flag value so we don't produce multiple empty blocks. */ + empty_block_flag = 2; + } + + if (current_block_index > 0) { + buf[i++] = ':'; + if (i >= buflen) { + return NULL; + } + } + + if ((current_block_value & 0xf000) == 0) { + zero_flag = 1; + } else { + buf[i++] = lwip_xchar(((current_block_value & 0xf000) >> 12)); + zero_flag = 0; + if (i >= buflen) { + return NULL; + } + } + + if (((current_block_value & 0xf00) == 0) && (zero_flag)) { + /* do nothing */ + } else { + buf[i++] = lwip_xchar(((current_block_value & 0xf00) >> 8)); + zero_flag = 0; + if (i >= buflen) { + return NULL; + } + } + + if (((current_block_value & 0xf0) == 0) && (zero_flag)) { + /* do nothing */ + } + else { + buf[i++] = lwip_xchar(((current_block_value & 0xf0) >> 4)); + zero_flag = 0; + if (i >= buflen) { + return NULL; + } + } + + buf[i++] = lwip_xchar((current_block_value & 0xf)); + if (i >= buflen) { + return NULL; + } + } + + buf[i] = 0; + + return buf; +} + +#endif /* LWIP_IPV6 */ diff --git a/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_frag.c b/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_frag.c new file mode 100644 index 0000000..d6c5d22 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_frag.c @@ -0,0 +1,862 @@ +/** + * @file + * + * IPv6 fragmentation and reassembly. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#include "lwip/opt.h" +#include "lwip/ip6_frag.h" +#include "lwip/ip6.h" +#include "lwip/icmp6.h" +#include "lwip/nd6.h" +#include "lwip/ip.h" + +#include "lwip/pbuf.h" +#include "lwip/memp.h" +#include "lwip/stats.h" + +#include + +#if LWIP_IPV6 && LWIP_IPV6_REASS /* don't build if not configured for use in lwipopts.h */ + + +/** Setting this to 0, you can turn off checking the fragments for overlapping + * regions. The code gets a little smaller. Only use this if you know that + * overlapping won't occur on your network! */ +#ifndef IP_REASS_CHECK_OVERLAP +#define IP_REASS_CHECK_OVERLAP 1 +#endif /* IP_REASS_CHECK_OVERLAP */ + +/** Set to 0 to prevent freeing the oldest datagram when the reassembly buffer is + * full (IP_REASS_MAX_PBUFS pbufs are enqueued). The code gets a little smaller. + * Datagrams will be freed by timeout only. Especially useful when MEMP_NUM_REASSDATA + * is set to 1, so one datagram can be reassembled at a time, only. */ +#ifndef IP_REASS_FREE_OLDEST +#define IP_REASS_FREE_OLDEST 1 +#endif /* IP_REASS_FREE_OLDEST */ + +#if IPV6_FRAG_COPYHEADER +/* The number of bytes we need to "borrow" from (i.e., overwrite in) the header + * that precedes the fragment header for reassembly pruposes. */ +#define IPV6_FRAG_REQROOM ((s16_t)(sizeof(struct ip6_reass_helper) - IP6_FRAG_HLEN)) +#endif + +#define IP_REASS_FLAG_LASTFRAG 0x01 + +/** This is a helper struct which holds the starting + * offset and the ending offset of this fragment to + * easily chain the fragments. + * It has the same packing requirements as the IPv6 header, since it replaces + * the Fragment Header in memory in incoming fragments to keep + * track of the various fragments. + */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip6_reass_helper { + PACK_STRUCT_FIELD(struct pbuf *next_pbuf); + PACK_STRUCT_FIELD(u16_t start); + PACK_STRUCT_FIELD(u16_t end); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/* static variables */ +static struct ip6_reassdata *reassdatagrams; +static u16_t ip6_reass_pbufcount; + +/* Forward declarations. */ +static void ip6_reass_free_complete_datagram(struct ip6_reassdata *ipr); +#if IP_REASS_FREE_OLDEST +static void ip6_reass_remove_oldest_datagram(struct ip6_reassdata *ipr, int pbufs_needed); +#endif /* IP_REASS_FREE_OLDEST */ + +void +ip6_reass_tmr(void) +{ + struct ip6_reassdata *r, *tmp; + +#if !IPV6_FRAG_COPYHEADER + LWIP_ASSERT("sizeof(struct ip6_reass_helper) <= IP6_FRAG_HLEN, set IPV6_FRAG_COPYHEADER to 1", + sizeof(struct ip6_reass_helper) <= IP6_FRAG_HLEN); +#endif /* !IPV6_FRAG_COPYHEADER */ + + r = reassdatagrams; + while (r != NULL) { + /* Decrement the timer. Once it reaches 0, + * clean up the incomplete fragment assembly */ + if (r->timer > 0) { + r->timer--; + r = r->next; + } else { + /* reassembly timed out */ + tmp = r; + /* get the next pointer before freeing */ + r = r->next; + /* free the helper struct and all enqueued pbufs */ + ip6_reass_free_complete_datagram(tmp); + } + } +} + +/** + * Free a datagram (struct ip6_reassdata) and all its pbufs. + * Updates the total count of enqueued pbufs (ip6_reass_pbufcount), + * sends an ICMP time exceeded packet. + * + * @param ipr datagram to free + */ +static void +ip6_reass_free_complete_datagram(struct ip6_reassdata *ipr) +{ + struct ip6_reassdata *prev; + u16_t pbufs_freed = 0; + u16_t clen; + struct pbuf *p; + struct ip6_reass_helper *iprh; + +#if LWIP_ICMP6 + iprh = (struct ip6_reass_helper *)ipr->p->payload; + if (iprh->start == 0) { + /* The first fragment was received, send ICMP time exceeded. */ + /* First, de-queue the first pbuf from r->p. */ + p = ipr->p; + ipr->p = iprh->next_pbuf; + /* Restore the part that we've overwritten with our helper structure, or we + * might send garbage (and disclose a pointer) in the ICMPv6 reply. */ + MEMCPY(p->payload, ipr->orig_hdr, sizeof(iprh)); + /* Then, move back to the original ipv6 header (we are now pointing to Fragment header). + This cannot fail since we already checked when receiving this fragment. */ + if (pbuf_header_force(p, (s16_t)((u8_t*)p->payload - (u8_t*)ipr->iphdr))) { + LWIP_ASSERT("ip6_reass_free: moving p->payload to ip6 header failed\n", 0); + } + else { + /* Reconstruct the zoned source and destination addresses, so that we do + * not end up sending the ICMP response over the wrong link. */ + ip6_addr_t src_addr, dest_addr; + ip6_addr_copy_from_packed(src_addr, IPV6_FRAG_SRC(ipr)); + ip6_addr_set_zone(&src_addr, ipr->src_zone); + ip6_addr_copy_from_packed(dest_addr, IPV6_FRAG_DEST(ipr)); + ip6_addr_set_zone(&dest_addr, ipr->dest_zone); + /* Send the actual ICMP response. */ + icmp6_time_exceeded_with_addrs(p, ICMP6_TE_FRAG, &src_addr, &dest_addr); + } + clen = pbuf_clen(p); + LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff); + pbufs_freed = (u16_t)(pbufs_freed + clen); + pbuf_free(p); + } +#endif /* LWIP_ICMP6 */ + + /* First, free all received pbufs. The individual pbufs need to be released + separately as they have not yet been chained */ + p = ipr->p; + while (p != NULL) { + struct pbuf *pcur; + iprh = (struct ip6_reass_helper *)p->payload; + pcur = p; + /* get the next pointer before freeing */ + p = iprh->next_pbuf; + clen = pbuf_clen(pcur); + LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff); + pbufs_freed = (u16_t)(pbufs_freed + clen); + pbuf_free(pcur); + } + + /* Then, unchain the struct ip6_reassdata from the list and free it. */ + if (ipr == reassdatagrams) { + reassdatagrams = ipr->next; + } else { + prev = reassdatagrams; + while (prev != NULL) { + if (prev->next == ipr) { + break; + } + prev = prev->next; + } + if (prev != NULL) { + prev->next = ipr->next; + } + } + memp_free(MEMP_IP6_REASSDATA, ipr); + + /* Finally, update number of pbufs in reassembly queue */ + LWIP_ASSERT("ip_reass_pbufcount >= clen", ip6_reass_pbufcount >= pbufs_freed); + ip6_reass_pbufcount = (u16_t)(ip6_reass_pbufcount - pbufs_freed); +} + +#if IP_REASS_FREE_OLDEST +/** + * Free the oldest datagram to make room for enqueueing new fragments. + * The datagram ipr is not freed! + * + * @param ipr ip6_reassdata for the current fragment + * @param pbufs_needed number of pbufs needed to enqueue + * (used for freeing other datagrams if not enough space) + */ +static void +ip6_reass_remove_oldest_datagram(struct ip6_reassdata *ipr, int pbufs_needed) +{ + struct ip6_reassdata *r, *oldest; + + /* Free datagrams until being allowed to enqueue 'pbufs_needed' pbufs, + * but don't free the current datagram! */ + do { + r = oldest = reassdatagrams; + while (r != NULL) { + if (r != ipr) { + if (r->timer <= oldest->timer) { + /* older than the previous oldest */ + oldest = r; + } + } + r = r->next; + } + if (oldest == ipr) { + /* nothing to free, ipr is the only element on the list */ + return; + } + if (oldest != NULL) { + ip6_reass_free_complete_datagram(oldest); + } + } while (((ip6_reass_pbufcount + pbufs_needed) > IP_REASS_MAX_PBUFS) && (reassdatagrams != NULL)); +} +#endif /* IP_REASS_FREE_OLDEST */ + +/** + * Reassembles incoming IPv6 fragments into an IPv6 datagram. + * + * @param p points to the IPv6 Fragment Header + * @return NULL if reassembly is incomplete, pbuf pointing to + * IPv6 Header if reassembly is complete + */ +struct pbuf * +ip6_reass(struct pbuf *p) +{ + struct ip6_reassdata *ipr, *ipr_prev; + struct ip6_reass_helper *iprh, *iprh_tmp, *iprh_prev=NULL; + struct ip6_frag_hdr *frag_hdr; + u16_t offset, len, start, end; + ptrdiff_t hdrdiff; + u16_t clen; + u8_t valid = 1; + struct pbuf *q, *next_pbuf; + + IP6_FRAG_STATS_INC(ip6_frag.recv); + + /* ip6_frag_hdr must be in the first pbuf, not chained. Checked by caller. */ + LWIP_ASSERT("IPv6 fragment header does not fit in first pbuf", + p->len >= sizeof(struct ip6_frag_hdr)); + + frag_hdr = (struct ip6_frag_hdr *) p->payload; + + clen = pbuf_clen(p); + + offset = lwip_ntohs(frag_hdr->_fragment_offset); + + /* Calculate fragment length from IPv6 payload length. + * Adjust for headers before Fragment Header. + * And finally adjust by Fragment Header length. */ + len = lwip_ntohs(ip6_current_header()->_plen); + hdrdiff = (u8_t*)p->payload - (const u8_t*)ip6_current_header(); + LWIP_ASSERT("not a valid pbuf (ip6_input check missing?)", hdrdiff <= 0xFFFF); + LWIP_ASSERT("not a valid pbuf (ip6_input check missing?)", hdrdiff >= IP6_HLEN); + hdrdiff -= IP6_HLEN; + hdrdiff += IP6_FRAG_HLEN; + if (hdrdiff > len) { + IP6_FRAG_STATS_INC(ip6_frag.proterr); + goto nullreturn; + } + len = (u16_t)(len - hdrdiff); + start = (offset & IP6_FRAG_OFFSET_MASK); + if (start > (0xFFFF - len)) { + /* u16_t overflow, cannot handle this */ + IP6_FRAG_STATS_INC(ip6_frag.proterr); + goto nullreturn; + } + + /* Look for the datagram the fragment belongs to in the current datagram queue, + * remembering the previous in the queue for later dequeueing. */ + for (ipr = reassdatagrams, ipr_prev = NULL; ipr != NULL; ipr = ipr->next) { + /* Check if the incoming fragment matches the one currently present + in the reassembly buffer. If so, we proceed with copying the + fragment into the buffer. */ + if ((frag_hdr->_identification == ipr->identification) && + ip6_addr_cmp_packed(ip6_current_src_addr(), &(IPV6_FRAG_SRC(ipr)), ipr->src_zone) && + ip6_addr_cmp_packed(ip6_current_dest_addr(), &(IPV6_FRAG_DEST(ipr)), ipr->dest_zone)) { + IP6_FRAG_STATS_INC(ip6_frag.cachehit); + break; + } + ipr_prev = ipr; + } + + if (ipr == NULL) { + /* Enqueue a new datagram into the datagram queue */ + ipr = (struct ip6_reassdata *)memp_malloc(MEMP_IP6_REASSDATA); + if (ipr == NULL) { +#if IP_REASS_FREE_OLDEST + /* Make room and try again. */ + ip6_reass_remove_oldest_datagram(ipr, clen); + ipr = (struct ip6_reassdata *)memp_malloc(MEMP_IP6_REASSDATA); + if (ipr != NULL) { + /* re-search ipr_prev since it might have been removed */ + for (ipr_prev = reassdatagrams; ipr_prev != NULL; ipr_prev = ipr_prev->next) { + if (ipr_prev->next == ipr) { + break; + } + } + } else +#endif /* IP_REASS_FREE_OLDEST */ + { + IP6_FRAG_STATS_INC(ip6_frag.memerr); + goto nullreturn; + } + } + + memset(ipr, 0, sizeof(struct ip6_reassdata)); + ipr->timer = IPV6_REASS_MAXAGE; + + /* enqueue the new structure to the front of the list */ + ipr->next = reassdatagrams; + reassdatagrams = ipr; + + /* Use the current IPv6 header for src/dest address reference. + * Eventually, we will replace it when we get the first fragment + * (it might be this one, in any case, it is done later). */ + /* need to use the none-const pointer here: */ + ipr->iphdr = ip_data.current_ip6_header; +#if IPV6_FRAG_COPYHEADER + MEMCPY(&ipr->src, &ip6_current_header()->src, sizeof(ipr->src)); + MEMCPY(&ipr->dest, &ip6_current_header()->dest, sizeof(ipr->dest)); +#endif /* IPV6_FRAG_COPYHEADER */ +#if LWIP_IPV6_SCOPES + /* Also store the address zone information. + * @todo It is possible that due to netif destruction and recreation, the + * stored zones end up resolving to a different interface. In that case, we + * risk sending a "time exceeded" ICMP response over the wrong link. + * Ideally, netif destruction would clean up matching pending reassembly + * structures, but custom zone mappings would make that non-trivial. */ + ipr->src_zone = ip6_addr_zone(ip6_current_src_addr()); + ipr->dest_zone = ip6_addr_zone(ip6_current_dest_addr()); +#endif /* LWIP_IPV6_SCOPES */ + /* copy the fragmented packet id. */ + ipr->identification = frag_hdr->_identification; + + /* copy the nexth field */ + ipr->nexth = frag_hdr->_nexth; + } + + /* Check if we are allowed to enqueue more datagrams. */ + if ((ip6_reass_pbufcount + clen) > IP_REASS_MAX_PBUFS) { +#if IP_REASS_FREE_OLDEST + ip6_reass_remove_oldest_datagram(ipr, clen); + if ((ip6_reass_pbufcount + clen) <= IP_REASS_MAX_PBUFS) { + /* re-search ipr_prev since it might have been removed */ + for (ipr_prev = reassdatagrams; ipr_prev != NULL; ipr_prev = ipr_prev->next) { + if (ipr_prev->next == ipr) { + break; + } + } + } else +#endif /* IP_REASS_FREE_OLDEST */ + { + /* @todo: send ICMPv6 time exceeded here? */ + /* drop this pbuf */ + IP6_FRAG_STATS_INC(ip6_frag.memerr); + goto nullreturn; + } + } + + /* Overwrite Fragment Header with our own helper struct. */ +#if IPV6_FRAG_COPYHEADER + if (IPV6_FRAG_REQROOM > 0) { + /* Make room for struct ip6_reass_helper (only required if sizeof(void*) > 4). + This cannot fail since we already checked when receiving this fragment. */ + u8_t hdrerr = pbuf_header_force(p, IPV6_FRAG_REQROOM); + LWIP_UNUSED_ARG(hdrerr); /* in case of LWIP_NOASSERT */ + LWIP_ASSERT("no room for struct ip6_reass_helper", hdrerr == 0); + } +#else /* IPV6_FRAG_COPYHEADER */ + LWIP_ASSERT("sizeof(struct ip6_reass_helper) <= IP6_FRAG_HLEN, set IPV6_FRAG_COPYHEADER to 1", + sizeof(struct ip6_reass_helper) <= IP6_FRAG_HLEN); +#endif /* IPV6_FRAG_COPYHEADER */ + + /* Prepare the pointer to the helper structure, and its initial values. + * Do not yet write to the structure itself, as we still have to make a + * backup of the original data, and we should not do that until we know for + * sure that we are going to add this packet to the list. */ + iprh = (struct ip6_reass_helper *)p->payload; + next_pbuf = NULL; + end = (u16_t)(start + len); + + /* find the right place to insert this pbuf */ + /* Iterate through until we either get to the end of the list (append), + * or we find on with a larger offset (insert). */ + for (q = ipr->p; q != NULL;) { + iprh_tmp = (struct ip6_reass_helper*)q->payload; + if (start < iprh_tmp->start) { +#if IP_REASS_CHECK_OVERLAP + if (end > iprh_tmp->start) { + /* fragment overlaps with following, throw away */ + IP6_FRAG_STATS_INC(ip6_frag.proterr); + goto nullreturn; + } + if (iprh_prev != NULL) { + if (start < iprh_prev->end) { + /* fragment overlaps with previous, throw away */ + IP6_FRAG_STATS_INC(ip6_frag.proterr); + goto nullreturn; + } + } +#endif /* IP_REASS_CHECK_OVERLAP */ + /* the new pbuf should be inserted before this */ + next_pbuf = q; + if (iprh_prev != NULL) { + /* not the fragment with the lowest offset */ + iprh_prev->next_pbuf = p; + } else { + /* fragment with the lowest offset */ + ipr->p = p; + } + break; + } else if (start == iprh_tmp->start) { + /* received the same datagram twice: no need to keep the datagram */ + goto nullreturn; +#if IP_REASS_CHECK_OVERLAP + } else if (start < iprh_tmp->end) { + /* overlap: no need to keep the new datagram */ + IP6_FRAG_STATS_INC(ip6_frag.proterr); + goto nullreturn; +#endif /* IP_REASS_CHECK_OVERLAP */ + } else { + /* Check if the fragments received so far have no gaps. */ + if (iprh_prev != NULL) { + if (iprh_prev->end != iprh_tmp->start) { + /* There is a fragment missing between the current + * and the previous fragment */ + valid = 0; + } + } + } + q = iprh_tmp->next_pbuf; + iprh_prev = iprh_tmp; + } + + /* If q is NULL, then we made it to the end of the list. Determine what to do now */ + if (q == NULL) { + if (iprh_prev != NULL) { + /* this is (for now), the fragment with the highest offset: + * chain it to the last fragment */ +#if IP_REASS_CHECK_OVERLAP + LWIP_ASSERT("check fragments don't overlap", iprh_prev->end <= start); +#endif /* IP_REASS_CHECK_OVERLAP */ + iprh_prev->next_pbuf = p; + if (iprh_prev->end != start) { + valid = 0; + } + } else { +#if IP_REASS_CHECK_OVERLAP + LWIP_ASSERT("no previous fragment, this must be the first fragment!", + ipr->p == NULL); +#endif /* IP_REASS_CHECK_OVERLAP */ + /* this is the first fragment we ever received for this ip datagram */ + ipr->p = p; + } + } + + /* Track the current number of pbufs current 'in-flight', in order to limit + the number of fragments that may be enqueued at any one time */ + ip6_reass_pbufcount = (u16_t)(ip6_reass_pbufcount + clen); + + /* Remember IPv6 header if this is the first fragment. */ + if (start == 0) { + /* need to use the none-const pointer here: */ + ipr->iphdr = ip_data.current_ip6_header; + /* Make a backup of the part of the packet data that we are about to + * overwrite, so that we can restore the original later. */ + MEMCPY(ipr->orig_hdr, p->payload, sizeof(*iprh)); + /* For IPV6_FRAG_COPYHEADER there is no need to copy src/dst again, as they + * will be the same as they were. With LWIP_IPV6_SCOPES, the same applies + * to the source/destination zones. */ + } + /* Only after the backup do we get to fill in the actual helper structure. */ + iprh->next_pbuf = next_pbuf; + iprh->start = start; + iprh->end = end; + + /* If this is the last fragment, calculate total packet length. */ + if ((offset & IP6_FRAG_MORE_FLAG) == 0) { + ipr->datagram_len = iprh->end; + } + + /* Additional validity tests: we have received first and last fragment. */ + iprh_tmp = (struct ip6_reass_helper*)ipr->p->payload; + if (iprh_tmp->start != 0) { + valid = 0; + } + if (ipr->datagram_len == 0) { + valid = 0; + } + + /* Final validity test: no gaps between current and last fragment. */ + iprh_prev = iprh; + q = iprh->next_pbuf; + while ((q != NULL) && valid) { + iprh = (struct ip6_reass_helper*)q->payload; + if (iprh_prev->end != iprh->start) { + valid = 0; + break; + } + iprh_prev = iprh; + q = iprh->next_pbuf; + } + + if (valid) { + /* All fragments have been received */ + struct ip6_hdr* iphdr_ptr; + + /* chain together the pbufs contained within the ip6_reassdata list. */ + iprh = (struct ip6_reass_helper*) ipr->p->payload; + while (iprh != NULL) { + next_pbuf = iprh->next_pbuf; + if (next_pbuf != NULL) { + /* Save next helper struct (will be hidden in next step). */ + iprh_tmp = (struct ip6_reass_helper*)next_pbuf->payload; + + /* hide the fragment header for every succeeding fragment */ + pbuf_remove_header(next_pbuf, IP6_FRAG_HLEN); +#if IPV6_FRAG_COPYHEADER + if (IPV6_FRAG_REQROOM > 0) { + /* hide the extra bytes borrowed from ip6_hdr for struct ip6_reass_helper */ + u8_t hdrerr = pbuf_remove_header(next_pbuf, IPV6_FRAG_REQROOM); + LWIP_UNUSED_ARG(hdrerr); /* in case of LWIP_NOASSERT */ + LWIP_ASSERT("no room for struct ip6_reass_helper", hdrerr == 0); + } +#endif + pbuf_cat(ipr->p, next_pbuf); + } + else { + iprh_tmp = NULL; + } + + iprh = iprh_tmp; + } + + /* Get the first pbuf. */ + p = ipr->p; + +#if IPV6_FRAG_COPYHEADER + if (IPV6_FRAG_REQROOM > 0) { + u8_t hdrerr; + /* Restore (only) the bytes that we overwrote beyond the fragment header. + * Those bytes may belong to either the IPv6 header or an extension + * header placed before the fragment header. */ + MEMCPY(p->payload, ipr->orig_hdr, IPV6_FRAG_REQROOM); + /* get back room for struct ip6_reass_helper (only required if sizeof(void*) > 4) */ + hdrerr = pbuf_remove_header(p, IPV6_FRAG_REQROOM); + LWIP_UNUSED_ARG(hdrerr); /* in case of LWIP_NOASSERT */ + LWIP_ASSERT("no room for struct ip6_reass_helper", hdrerr == 0); + } +#endif + + /* We need to get rid of the fragment header itself, which is somewhere in + * the middle of the packet (but still in the first pbuf of the chain). + * Getting rid of the header is required by RFC 2460 Sec. 4.5 and necessary + * in order to be able to reassemble packets that are close to full size + * (i.e., around 65535 bytes). We simply move up all the headers before the + * fragment header, including the IPv6 header, and adjust the payload start + * accordingly. This works because all these headers are in the first pbuf + * of the chain, and because the caller adjusts all its pointers on + * successful reassembly. */ + MEMMOVE((u8_t*)ipr->iphdr + sizeof(struct ip6_frag_hdr), ipr->iphdr, + (size_t)((u8_t*)p->payload - (u8_t*)ipr->iphdr)); + + /* This is where the IPv6 header is now. */ + iphdr_ptr = (struct ip6_hdr*)((u8_t*)ipr->iphdr + + sizeof(struct ip6_frag_hdr)); + + /* Adjust datagram length by adding header lengths. */ + ipr->datagram_len = (u16_t)(ipr->datagram_len + ((u8_t*)p->payload - (u8_t*)iphdr_ptr) + - IP6_HLEN); + + /* Set payload length in ip header. */ + iphdr_ptr->_plen = lwip_htons(ipr->datagram_len); + + /* With the fragment header gone, we now need to adjust the next-header + * field of whatever header was originally before it. Since the packet made + * it through the original header processing routines at least up to the + * fragment header, we do not need any further sanity checks here. */ + if (IP6H_NEXTH(iphdr_ptr) == IP6_NEXTH_FRAGMENT) { + iphdr_ptr->_nexth = ipr->nexth; + } else { + u8_t *ptr = (u8_t *)iphdr_ptr + IP6_HLEN; + while (*ptr != IP6_NEXTH_FRAGMENT) { + ptr += 8 * (1 + ptr[1]); + } + *ptr = ipr->nexth; + } + + /* release the resources allocated for the fragment queue entry */ + if (reassdatagrams == ipr) { + /* it was the first in the list */ + reassdatagrams = ipr->next; + } else { + /* it wasn't the first, so it must have a valid 'prev' */ + LWIP_ASSERT("sanity check linked list", ipr_prev != NULL); + ipr_prev->next = ipr->next; + } + memp_free(MEMP_IP6_REASSDATA, ipr); + + /* adjust the number of pbufs currently queued for reassembly. */ + clen = pbuf_clen(p); + LWIP_ASSERT("ip6_reass_pbufcount >= clen", ip6_reass_pbufcount >= clen); + ip6_reass_pbufcount = (u16_t)(ip6_reass_pbufcount - clen); + + /* Move pbuf back to IPv6 header. This should never fail. */ + if (pbuf_header_force(p, (s16_t)((u8_t*)p->payload - (u8_t*)iphdr_ptr))) { + LWIP_ASSERT("ip6_reass: moving p->payload to ip6 header failed\n", 0); + pbuf_free(p); + return NULL; + } + + /* Return the pbuf chain */ + return p; + } + /* the datagram is not (yet?) reassembled completely */ + return NULL; + +nullreturn: + IP6_FRAG_STATS_INC(ip6_frag.drop); + pbuf_free(p); + return NULL; +} + +#endif /* LWIP_IPV6 && LWIP_IPV6_REASS */ + +#if LWIP_IPV6 && LWIP_IPV6_FRAG + +#if !LWIP_NETIF_TX_SINGLE_PBUF +/** Allocate a new struct pbuf_custom_ref */ +static struct pbuf_custom_ref* +ip6_frag_alloc_pbuf_custom_ref(void) +{ + return (struct pbuf_custom_ref*)memp_malloc(MEMP_FRAG_PBUF); +} + +/** Free a struct pbuf_custom_ref */ +static void +ip6_frag_free_pbuf_custom_ref(struct pbuf_custom_ref* p) +{ + LWIP_ASSERT("p != NULL", p != NULL); + memp_free(MEMP_FRAG_PBUF, p); +} + +/** Free-callback function to free a 'struct pbuf_custom_ref', called by + * pbuf_free. */ +static void +ip6_frag_free_pbuf_custom(struct pbuf *p) +{ + struct pbuf_custom_ref *pcr = (struct pbuf_custom_ref*)p; + LWIP_ASSERT("pcr != NULL", pcr != NULL); + LWIP_ASSERT("pcr == p", (void*)pcr == (void*)p); + if (pcr->original != NULL) { + pbuf_free(pcr->original); + } + ip6_frag_free_pbuf_custom_ref(pcr); +} +#endif /* !LWIP_NETIF_TX_SINGLE_PBUF */ + +/** + * Fragment an IPv6 datagram if too large for the netif or path MTU. + * + * Chop the datagram in MTU sized chunks and send them in order + * by pointing PBUF_REFs into p + * + * @param p ipv6 packet to send + * @param netif the netif on which to send + * @param dest destination ipv6 address to which to send + * + * @return ERR_OK if sent successfully, err_t otherwise + */ +err_t +ip6_frag(struct pbuf *p, struct netif *netif, const ip6_addr_t *dest) +{ + struct ip6_hdr *original_ip6hdr; + struct ip6_hdr *ip6hdr; + struct ip6_frag_hdr *frag_hdr; + struct pbuf *rambuf; +#if !LWIP_NETIF_TX_SINGLE_PBUF + struct pbuf *newpbuf; + u16_t newpbuflen = 0; + u16_t left_to_copy; +#endif + static u32_t identification; + u16_t left, cop; + const u16_t mtu = nd6_get_destination_mtu(dest, netif); + const u16_t nfb = (u16_t)((mtu - (IP6_HLEN + IP6_FRAG_HLEN)) & IP6_FRAG_OFFSET_MASK); + u16_t fragment_offset = 0; + u16_t last; + u16_t poff = IP6_HLEN; + + identification++; + + original_ip6hdr = (struct ip6_hdr *)p->payload; + + /* @todo we assume there are no options in the unfragmentable part (IPv6 header). */ + LWIP_ASSERT("p->tot_len >= IP6_HLEN", p->tot_len >= IP6_HLEN); + left = (u16_t)(p->tot_len - IP6_HLEN); + + while (left) { + last = (left <= nfb); + + /* Fill this fragment */ + cop = last ? left : nfb; + +#if LWIP_NETIF_TX_SINGLE_PBUF + rambuf = pbuf_alloc(PBUF_IP, cop + IP6_FRAG_HLEN, PBUF_RAM); + if (rambuf == NULL) { + IP6_FRAG_STATS_INC(ip6_frag.memerr); + return ERR_MEM; + } + LWIP_ASSERT("this needs a pbuf in one piece!", + (rambuf->len == rambuf->tot_len) && (rambuf->next == NULL)); + poff += pbuf_copy_partial(p, (u8_t*)rambuf->payload + IP6_FRAG_HLEN, cop, poff); + /* make room for the IP header */ + if (pbuf_add_header(rambuf, IP6_HLEN)) { + pbuf_free(rambuf); + IP6_FRAG_STATS_INC(ip6_frag.memerr); + return ERR_MEM; + } + /* fill in the IP header */ + SMEMCPY(rambuf->payload, original_ip6hdr, IP6_HLEN); + ip6hdr = (struct ip6_hdr *)rambuf->payload; + frag_hdr = (struct ip6_frag_hdr *)((u8_t*)rambuf->payload + IP6_HLEN); +#else + /* When not using a static buffer, create a chain of pbufs. + * The first will be a PBUF_RAM holding the link, IPv6, and Fragment header. + * The rest will be PBUF_REFs mirroring the pbuf chain to be fragged, + * but limited to the size of an mtu. + */ + rambuf = pbuf_alloc(PBUF_LINK, IP6_HLEN + IP6_FRAG_HLEN, PBUF_RAM); + if (rambuf == NULL) { + IP6_FRAG_STATS_INC(ip6_frag.memerr); + return ERR_MEM; + } + LWIP_ASSERT("this needs a pbuf in one piece!", + (p->len >= (IP6_HLEN))); + SMEMCPY(rambuf->payload, original_ip6hdr, IP6_HLEN); + ip6hdr = (struct ip6_hdr *)rambuf->payload; + frag_hdr = (struct ip6_frag_hdr *)((u8_t*)rambuf->payload + IP6_HLEN); + + /* Can just adjust p directly for needed offset. */ + p->payload = (u8_t *)p->payload + poff; + p->len = (u16_t)(p->len - poff); + p->tot_len = (u16_t)(p->tot_len - poff); + + left_to_copy = cop; + while (left_to_copy) { + struct pbuf_custom_ref *pcr; + newpbuflen = (left_to_copy < p->len) ? left_to_copy : p->len; + /* Is this pbuf already empty? */ + if (!newpbuflen) { + p = p->next; + continue; + } + pcr = ip6_frag_alloc_pbuf_custom_ref(); + if (pcr == NULL) { + pbuf_free(rambuf); + IP6_FRAG_STATS_INC(ip6_frag.memerr); + return ERR_MEM; + } + /* Mirror this pbuf, although we might not need all of it. */ + newpbuf = pbuf_alloced_custom(PBUF_RAW, newpbuflen, PBUF_REF, &pcr->pc, p->payload, newpbuflen); + if (newpbuf == NULL) { + ip6_frag_free_pbuf_custom_ref(pcr); + pbuf_free(rambuf); + IP6_FRAG_STATS_INC(ip6_frag.memerr); + return ERR_MEM; + } + pbuf_ref(p); + pcr->original = p; + pcr->pc.custom_free_function = ip6_frag_free_pbuf_custom; + + /* Add it to end of rambuf's chain, but using pbuf_cat, not pbuf_chain + * so that it is removed when pbuf_dechain is later called on rambuf. + */ + pbuf_cat(rambuf, newpbuf); + left_to_copy = (u16_t)(left_to_copy - newpbuflen); + if (left_to_copy) { + p = p->next; + } + } + poff = newpbuflen; +#endif /* LWIP_NETIF_TX_SINGLE_PBUF */ + + /* Set headers */ + frag_hdr->_nexth = original_ip6hdr->_nexth; + frag_hdr->reserved = 0; + frag_hdr->_fragment_offset = lwip_htons((u16_t)((fragment_offset & IP6_FRAG_OFFSET_MASK) | (last ? 0 : IP6_FRAG_MORE_FLAG))); + frag_hdr->_identification = lwip_htonl(identification); + + IP6H_NEXTH_SET(ip6hdr, IP6_NEXTH_FRAGMENT); + IP6H_PLEN_SET(ip6hdr, (u16_t)(cop + IP6_FRAG_HLEN)); + + /* No need for separate header pbuf - we allowed room for it in rambuf + * when allocated. + */ + IP6_FRAG_STATS_INC(ip6_frag.xmit); + netif->output_ip6(netif, rambuf, dest); + + /* Unfortunately we can't reuse rambuf - the hardware may still be + * using the buffer. Instead we free it (and the ensuing chain) and + * recreate it next time round the loop. If we're lucky the hardware + * will have already sent the packet, the free will really free, and + * there will be zero memory penalty. + */ + + pbuf_free(rambuf); + left = (u16_t)(left - cop); + fragment_offset = (u16_t)(fragment_offset + cop); + } + return ERR_OK; +} + +#endif /* LWIP_IPV6 && LWIP_IPV6_FRAG */ diff --git a/Middlewares/Third_Party/LwIP/src/core/ipv6/mld6.c b/Middlewares/Third_Party/LwIP/src/core/ipv6/mld6.c new file mode 100644 index 0000000..6387d46 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/ipv6/mld6.c @@ -0,0 +1,626 @@ +/** + * @file + * Multicast listener discovery + * + * @defgroup mld6 MLD6 + * @ingroup ip6 + * Multicast listener discovery for IPv6. Aims to be compliant with RFC 2710. + * No support for MLDv2.\n + * Note: The allnodes (ff01::1, ff02::1) group is assumed be received by your + * netif since it must always be received for correct IPv6 operation (e.g. SLAAC). + * Ensure the netif filters are configured accordingly!\n + * The netif flags also need NETIF_FLAG_MLD6 flag set to enable MLD6 on a + * netif ("netif->flags |= NETIF_FLAG_MLD6;").\n + * To be called from TCPIP thread. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +/* Based on igmp.c implementation of igmp v2 protocol */ + +#include "lwip/opt.h" + +#if LWIP_IPV6 && LWIP_IPV6_MLD /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/mld6.h" +#include "lwip/prot/mld6.h" +#include "lwip/icmp6.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/ip.h" +#include "lwip/inet_chksum.h" +#include "lwip/pbuf.h" +#include "lwip/netif.h" +#include "lwip/memp.h" +#include "lwip/stats.h" + +#include + + +/* + * MLD constants + */ +#define MLD6_HL 1 +#define MLD6_JOIN_DELAYING_MEMBER_TMR_MS (500) + +#define MLD6_GROUP_NON_MEMBER 0 +#define MLD6_GROUP_DELAYING_MEMBER 1 +#define MLD6_GROUP_IDLE_MEMBER 2 + +/* Forward declarations. */ +static struct mld_group *mld6_new_group(struct netif *ifp, const ip6_addr_t *addr); +static err_t mld6_remove_group(struct netif *netif, struct mld_group *group); +static void mld6_delayed_report(struct mld_group *group, u16_t maxresp); +static void mld6_send(struct netif *netif, struct mld_group *group, u8_t type); + + +/** + * Stop MLD processing on interface + * + * @param netif network interface on which stop MLD processing + */ +err_t +mld6_stop(struct netif *netif) +{ + struct mld_group *group = netif_mld6_data(netif); + + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_MLD6, NULL); + + while (group != NULL) { + struct mld_group *next = group->next; /* avoid use-after-free below */ + + /* disable the group at the MAC level */ + if (netif->mld_mac_filter != NULL) { + netif->mld_mac_filter(netif, &(group->group_address), NETIF_DEL_MAC_FILTER); + } + + /* free group */ + memp_free(MEMP_MLD6_GROUP, group); + + /* move to "next" */ + group = next; + } + return ERR_OK; +} + +/** + * Report MLD memberships for this interface + * + * @param netif network interface on which report MLD memberships + */ +void +mld6_report_groups(struct netif *netif) +{ + struct mld_group *group = netif_mld6_data(netif); + + while (group != NULL) { + mld6_delayed_report(group, MLD6_JOIN_DELAYING_MEMBER_TMR_MS); + group = group->next; + } +} + +/** + * Search for a group that is joined on a netif + * + * @param ifp the network interface for which to look + * @param addr the group ipv6 address to search for + * @return a struct mld_group* if the group has been found, + * NULL if the group wasn't found. + */ +struct mld_group * +mld6_lookfor_group(struct netif *ifp, const ip6_addr_t *addr) +{ + struct mld_group *group = netif_mld6_data(ifp); + + while (group != NULL) { + if (ip6_addr_cmp(&(group->group_address), addr)) { + return group; + } + group = group->next; + } + + return NULL; +} + + +/** + * create a new group + * + * @param ifp the network interface for which to create + * @param addr the new group ipv6 + * @return a struct mld_group*, + * NULL on memory error. + */ +static struct mld_group * +mld6_new_group(struct netif *ifp, const ip6_addr_t *addr) +{ + struct mld_group *group; + + group = (struct mld_group *)memp_malloc(MEMP_MLD6_GROUP); + if (group != NULL) { + ip6_addr_set(&(group->group_address), addr); + group->timer = 0; /* Not running */ + group->group_state = MLD6_GROUP_IDLE_MEMBER; + group->last_reporter_flag = 0; + group->use = 0; + group->next = netif_mld6_data(ifp); + + netif_set_client_data(ifp, LWIP_NETIF_CLIENT_DATA_INDEX_MLD6, group); + } + + return group; +} + +/** + * Remove a group from the mld_group_list, but do not free it yet + * + * @param group the group to remove + * @return ERR_OK if group was removed from the list, an err_t otherwise + */ +static err_t +mld6_remove_group(struct netif *netif, struct mld_group *group) +{ + err_t err = ERR_OK; + + /* Is it the first group? */ + if (netif_mld6_data(netif) == group) { + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_MLD6, group->next); + } else { + /* look for group further down the list */ + struct mld_group *tmpGroup; + for (tmpGroup = netif_mld6_data(netif); tmpGroup != NULL; tmpGroup = tmpGroup->next) { + if (tmpGroup->next == group) { + tmpGroup->next = group->next; + break; + } + } + /* Group not find group */ + if (tmpGroup == NULL) { + err = ERR_ARG; + } + } + + return err; +} + + +/** + * Process an input MLD message. Called by icmp6_input. + * + * @param p the mld packet, p->payload pointing to the icmpv6 header + * @param inp the netif on which this packet was received + */ +void +mld6_input(struct pbuf *p, struct netif *inp) +{ + struct mld_header *mld_hdr; + struct mld_group *group; + + MLD6_STATS_INC(mld6.recv); + + /* Check that mld header fits in packet. */ + if (p->len < sizeof(struct mld_header)) { + /* @todo debug message */ + pbuf_free(p); + MLD6_STATS_INC(mld6.lenerr); + MLD6_STATS_INC(mld6.drop); + return; + } + + mld_hdr = (struct mld_header *)p->payload; + + switch (mld_hdr->type) { + case ICMP6_TYPE_MLQ: /* Multicast listener query. */ + /* Is it a general query? */ + if (ip6_addr_isallnodes_linklocal(ip6_current_dest_addr()) && + ip6_addr_isany(&(mld_hdr->multicast_address))) { + MLD6_STATS_INC(mld6.rx_general); + /* Report all groups, except all nodes group, and if-local groups. */ + group = netif_mld6_data(inp); + while (group != NULL) { + if ((!(ip6_addr_ismulticast_iflocal(&(group->group_address)))) && + (!(ip6_addr_isallnodes_linklocal(&(group->group_address))))) { + mld6_delayed_report(group, mld_hdr->max_resp_delay); + } + group = group->next; + } + } else { + /* Have we joined this group? + * We use IP6 destination address to have a memory aligned copy. + * mld_hdr->multicast_address should be the same. */ + MLD6_STATS_INC(mld6.rx_group); + group = mld6_lookfor_group(inp, ip6_current_dest_addr()); + if (group != NULL) { + /* Schedule a report. */ + mld6_delayed_report(group, mld_hdr->max_resp_delay); + } + } + break; /* ICMP6_TYPE_MLQ */ + case ICMP6_TYPE_MLR: /* Multicast listener report. */ + /* Have we joined this group? + * We use IP6 destination address to have a memory aligned copy. + * mld_hdr->multicast_address should be the same. */ + MLD6_STATS_INC(mld6.rx_report); + group = mld6_lookfor_group(inp, ip6_current_dest_addr()); + if (group != NULL) { + /* If we are waiting to report, cancel it. */ + if (group->group_state == MLD6_GROUP_DELAYING_MEMBER) { + group->timer = 0; /* stopped */ + group->group_state = MLD6_GROUP_IDLE_MEMBER; + group->last_reporter_flag = 0; + } + } + break; /* ICMP6_TYPE_MLR */ + case ICMP6_TYPE_MLD: /* Multicast listener done. */ + /* Do nothing, router will query us. */ + break; /* ICMP6_TYPE_MLD */ + default: + MLD6_STATS_INC(mld6.proterr); + MLD6_STATS_INC(mld6.drop); + break; + } + + pbuf_free(p); +} + +/** + * @ingroup mld6 + * Join a group on one or all network interfaces. + * + * If the group is to be joined on all interfaces, the given group address must + * not have a zone set (i.e., it must have its zone index set to IP6_NO_ZONE). + * If the group is to be joined on one particular interface, the given group + * address may or may not have a zone set. + * + * @param srcaddr ipv6 address (zoned) of the network interface which should + * join a new group. If IP6_ADDR_ANY6, join on all netifs + * @param groupaddr the ipv6 address of the group to join (possibly but not + * necessarily zoned) + * @return ERR_OK if group was joined on the netif(s), an err_t otherwise + */ +err_t +mld6_joingroup(const ip6_addr_t *srcaddr, const ip6_addr_t *groupaddr) +{ + err_t err = ERR_VAL; /* no matching interface */ + struct netif *netif; + + LWIP_ASSERT_CORE_LOCKED(); + + /* loop through netif's */ + NETIF_FOREACH(netif) { + /* Should we join this interface ? */ + if (ip6_addr_isany(srcaddr) || + netif_get_ip6_addr_match(netif, srcaddr) >= 0) { + err = mld6_joingroup_netif(netif, groupaddr); + if (err != ERR_OK) { + return err; + } + } + } + + return err; +} + +/** + * @ingroup mld6 + * Join a group on a network interface. + * + * @param netif the network interface which should join a new group. + * @param groupaddr the ipv6 address of the group to join (possibly but not + * necessarily zoned) + * @return ERR_OK if group was joined on the netif, an err_t otherwise + */ +err_t +mld6_joingroup_netif(struct netif *netif, const ip6_addr_t *groupaddr) +{ + struct mld_group *group; +#if LWIP_IPV6_SCOPES + ip6_addr_t ip6addr; + + /* If the address has a particular scope but no zone set, use the netif to + * set one now. Within the mld6 module, all addresses are properly zoned. */ + if (ip6_addr_lacks_zone(groupaddr, IP6_MULTICAST)) { + ip6_addr_set(&ip6addr, groupaddr); + ip6_addr_assign_zone(&ip6addr, IP6_MULTICAST, netif); + groupaddr = &ip6addr; + } + IP6_ADDR_ZONECHECK_NETIF(groupaddr, netif); +#endif /* LWIP_IPV6_SCOPES */ + + LWIP_ASSERT_CORE_LOCKED(); + + /* find group or create a new one if not found */ + group = mld6_lookfor_group(netif, groupaddr); + + if (group == NULL) { + /* Joining a new group. Create a new group entry. */ + group = mld6_new_group(netif, groupaddr); + if (group == NULL) { + return ERR_MEM; + } + + /* Activate this address on the MAC layer. */ + if (netif->mld_mac_filter != NULL) { + netif->mld_mac_filter(netif, groupaddr, NETIF_ADD_MAC_FILTER); + } + + /* Report our membership. */ + MLD6_STATS_INC(mld6.tx_report); + mld6_send(netif, group, ICMP6_TYPE_MLR); + mld6_delayed_report(group, MLD6_JOIN_DELAYING_MEMBER_TMR_MS); + } + + /* Increment group use */ + group->use++; + return ERR_OK; +} + +/** + * @ingroup mld6 + * Leave a group on a network interface. + * + * Zoning of address follows the same rules as @ref mld6_joingroup. + * + * @param srcaddr ipv6 address (zoned) of the network interface which should + * leave the group. If IP6_ADDR_ANY6, leave on all netifs + * @param groupaddr the ipv6 address of the group to leave (possibly, but not + * necessarily zoned) + * @return ERR_OK if group was left on the netif(s), an err_t otherwise + */ +err_t +mld6_leavegroup(const ip6_addr_t *srcaddr, const ip6_addr_t *groupaddr) +{ + err_t err = ERR_VAL; /* no matching interface */ + struct netif *netif; + + LWIP_ASSERT_CORE_LOCKED(); + + /* loop through netif's */ + NETIF_FOREACH(netif) { + /* Should we leave this interface ? */ + if (ip6_addr_isany(srcaddr) || + netif_get_ip6_addr_match(netif, srcaddr) >= 0) { + err_t res = mld6_leavegroup_netif(netif, groupaddr); + if (err != ERR_OK) { + /* Store this result if we have not yet gotten a success */ + err = res; + } + } + } + + return err; +} + +/** + * @ingroup mld6 + * Leave a group on a network interface. + * + * @param netif the network interface which should leave the group. + * @param groupaddr the ipv6 address of the group to leave (possibly, but not + * necessarily zoned) + * @return ERR_OK if group was left on the netif, an err_t otherwise + */ +err_t +mld6_leavegroup_netif(struct netif *netif, const ip6_addr_t *groupaddr) +{ + struct mld_group *group; +#if LWIP_IPV6_SCOPES + ip6_addr_t ip6addr; + + if (ip6_addr_lacks_zone(groupaddr, IP6_MULTICAST)) { + ip6_addr_set(&ip6addr, groupaddr); + ip6_addr_assign_zone(&ip6addr, IP6_MULTICAST, netif); + groupaddr = &ip6addr; + } + IP6_ADDR_ZONECHECK_NETIF(groupaddr, netif); +#endif /* LWIP_IPV6_SCOPES */ + + LWIP_ASSERT_CORE_LOCKED(); + + /* find group */ + group = mld6_lookfor_group(netif, groupaddr); + + if (group != NULL) { + /* Leave if there is no other use of the group */ + if (group->use <= 1) { + /* Remove the group from the list */ + mld6_remove_group(netif, group); + + /* If we are the last reporter for this group */ + if (group->last_reporter_flag) { + MLD6_STATS_INC(mld6.tx_leave); + mld6_send(netif, group, ICMP6_TYPE_MLD); + } + + /* Disable the group at the MAC level */ + if (netif->mld_mac_filter != NULL) { + netif->mld_mac_filter(netif, groupaddr, NETIF_DEL_MAC_FILTER); + } + + /* free group struct */ + memp_free(MEMP_MLD6_GROUP, group); + } else { + /* Decrement group use */ + group->use--; + } + + /* Left group */ + return ERR_OK; + } + + /* Group not found */ + return ERR_VAL; +} + + +/** + * Periodic timer for mld processing. Must be called every + * MLD6_TMR_INTERVAL milliseconds (100). + * + * When a delaying member expires, a membership report is sent. + */ +void +mld6_tmr(void) +{ + struct netif *netif; + + NETIF_FOREACH(netif) { + struct mld_group *group = netif_mld6_data(netif); + + while (group != NULL) { + if (group->timer > 0) { + group->timer--; + if (group->timer == 0) { + /* If the state is MLD6_GROUP_DELAYING_MEMBER then we send a report for this group */ + if (group->group_state == MLD6_GROUP_DELAYING_MEMBER) { + MLD6_STATS_INC(mld6.tx_report); + mld6_send(netif, group, ICMP6_TYPE_MLR); + group->group_state = MLD6_GROUP_IDLE_MEMBER; + } + } + } + group = group->next; + } + } +} + +/** + * Schedule a delayed membership report for a group + * + * @param group the mld_group for which "delaying" membership report + * should be sent + * @param maxresp_in the max resp delay provided in the query + */ +static void +mld6_delayed_report(struct mld_group *group, u16_t maxresp_in) +{ + /* Convert maxresp from milliseconds to tmr ticks */ + u16_t maxresp = maxresp_in / MLD6_TMR_INTERVAL; + if (maxresp == 0) { + maxresp = 1; + } + +#ifdef LWIP_RAND + /* Randomize maxresp. (if LWIP_RAND is supported) */ + maxresp = (u16_t)(LWIP_RAND() % maxresp); + if (maxresp == 0) { + maxresp = 1; + } +#endif /* LWIP_RAND */ + + /* Apply timer value if no report has been scheduled already. */ + if ((group->group_state == MLD6_GROUP_IDLE_MEMBER) || + ((group->group_state == MLD6_GROUP_DELAYING_MEMBER) && + ((group->timer == 0) || (maxresp < group->timer)))) { + group->timer = maxresp; + group->group_state = MLD6_GROUP_DELAYING_MEMBER; + } +} + +/** + * Send a MLD message (report or done). + * + * An IPv6 hop-by-hop options header with a router alert option + * is prepended. + * + * @param group the group to report or quit + * @param type ICMP6_TYPE_MLR (report) or ICMP6_TYPE_MLD (done) + */ +static void +mld6_send(struct netif *netif, struct mld_group *group, u8_t type) +{ + struct mld_header *mld_hdr; + struct pbuf *p; + const ip6_addr_t *src_addr; + + /* Allocate a packet. Size is MLD header + IPv6 Hop-by-hop options header. */ + p = pbuf_alloc(PBUF_IP, sizeof(struct mld_header) + MLD6_HBH_HLEN, PBUF_RAM); + if (p == NULL) { + MLD6_STATS_INC(mld6.memerr); + return; + } + + /* Move to make room for Hop-by-hop options header. */ + if (pbuf_remove_header(p, MLD6_HBH_HLEN)) { + pbuf_free(p); + MLD6_STATS_INC(mld6.lenerr); + return; + } + + /* Select our source address. */ + if (!ip6_addr_isvalid(netif_ip6_addr_state(netif, 0))) { + /* This is a special case, when we are performing duplicate address detection. + * We must join the multicast group, but we don't have a valid address yet. */ + src_addr = IP6_ADDR_ANY6; + } else { + /* Use link-local address as source address. */ + src_addr = netif_ip6_addr(netif, 0); + } + + /* MLD message header pointer. */ + mld_hdr = (struct mld_header *)p->payload; + + /* Set fields. */ + mld_hdr->type = type; + mld_hdr->code = 0; + mld_hdr->chksum = 0; + mld_hdr->max_resp_delay = 0; + mld_hdr->reserved = 0; + ip6_addr_copy_to_packed(mld_hdr->multicast_address, group->group_address); + +#if CHECKSUM_GEN_ICMP6 + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP6) { + mld_hdr->chksum = ip6_chksum_pseudo(p, IP6_NEXTH_ICMP6, p->len, + src_addr, &(group->group_address)); + } +#endif /* CHECKSUM_GEN_ICMP6 */ + + /* Add hop-by-hop headers options: router alert with MLD value. */ + ip6_options_add_hbh_ra(p, IP6_NEXTH_ICMP6, IP6_ROUTER_ALERT_VALUE_MLD); + + if (type == ICMP6_TYPE_MLR) { + /* Remember we were the last to report */ + group->last_reporter_flag = 1; + } + + /* Send the packet out. */ + MLD6_STATS_INC(mld6.xmit); + ip6_output_if(p, (ip6_addr_isany(src_addr)) ? NULL : src_addr, &(group->group_address), + MLD6_HL, 0, IP6_NEXTH_HOPBYHOP, netif); + pbuf_free(p); +} + +#endif /* LWIP_IPV6 */ diff --git a/Middlewares/Third_Party/LwIP/src/core/ipv6/nd6.c b/Middlewares/Third_Party/LwIP/src/core/ipv6/nd6.c new file mode 100644 index 0000000..db0c132 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/ipv6/nd6.c @@ -0,0 +1,2434 @@ +/** + * @file + * + * Neighbor discovery and stateless address autoconfiguration for IPv6. + * Aims to be compliant with RFC 4861 (Neighbor discovery) and RFC 4862 + * (Address autoconfiguration). + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/nd6.h" +#include "lwip/priv/nd6_priv.h" +#include "lwip/prot/nd6.h" +#include "lwip/prot/icmp6.h" +#include "lwip/pbuf.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/inet_chksum.h" +#include "lwip/netif.h" +#include "lwip/icmp6.h" +#include "lwip/mld6.h" +#include "lwip/dhcp6.h" +#include "lwip/ip.h" +#include "lwip/stats.h" +#include "lwip/dns.h" + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +#if LWIP_IPV6_DUP_DETECT_ATTEMPTS > IP6_ADDR_TENTATIVE_COUNT_MASK +#error LWIP_IPV6_DUP_DETECT_ATTEMPTS > IP6_ADDR_TENTATIVE_COUNT_MASK +#endif + +/* Router tables. */ +struct nd6_neighbor_cache_entry neighbor_cache[LWIP_ND6_NUM_NEIGHBORS]; +struct nd6_destination_cache_entry destination_cache[LWIP_ND6_NUM_DESTINATIONS]; +struct nd6_prefix_list_entry prefix_list[LWIP_ND6_NUM_PREFIXES]; +struct nd6_router_list_entry default_router_list[LWIP_ND6_NUM_ROUTERS]; + +/* Default values, can be updated by a RA message. */ +u32_t reachable_time = LWIP_ND6_REACHABLE_TIME; +u32_t retrans_timer = LWIP_ND6_RETRANS_TIMER; /* @todo implement this value in timer */ + +/* Index for cache entries. */ +static u8_t nd6_cached_neighbor_index; +static netif_addr_idx_t nd6_cached_destination_index; + +/* Multicast address holder. */ +static ip6_addr_t multicast_address; + +static u8_t nd6_tmr_rs_reduction; + +/* Static buffer to parse RA packet options */ +union ra_options { + struct lladdr_option lladdr; + struct mtu_option mtu; + struct prefix_option prefix; +#if LWIP_ND6_RDNSS_MAX_DNS_SERVERS + struct rdnss_option rdnss; +#endif +}; +static union ra_options nd6_ra_buffer; + +/* Forward declarations. */ +static s8_t nd6_find_neighbor_cache_entry(const ip6_addr_t *ip6addr); +static s8_t nd6_new_neighbor_cache_entry(void); +static void nd6_free_neighbor_cache_entry(s8_t i); +static s16_t nd6_find_destination_cache_entry(const ip6_addr_t *ip6addr); +static s16_t nd6_new_destination_cache_entry(void); +static int nd6_is_prefix_in_netif(const ip6_addr_t *ip6addr, struct netif *netif); +static s8_t nd6_select_router(const ip6_addr_t *ip6addr, struct netif *netif); +static s8_t nd6_get_router(const ip6_addr_t *router_addr, struct netif *netif); +static s8_t nd6_new_router(const ip6_addr_t *router_addr, struct netif *netif); +static s8_t nd6_get_onlink_prefix(const ip6_addr_t *prefix, struct netif *netif); +static s8_t nd6_new_onlink_prefix(const ip6_addr_t *prefix, struct netif *netif); +static s8_t nd6_get_next_hop_entry(const ip6_addr_t *ip6addr, struct netif *netif); +static err_t nd6_queue_packet(s8_t neighbor_index, struct pbuf *q); + +#define ND6_SEND_FLAG_MULTICAST_DEST 0x01 +#define ND6_SEND_FLAG_ALLNODES_DEST 0x02 +#define ND6_SEND_FLAG_ANY_SRC 0x04 +static void nd6_send_ns(struct netif *netif, const ip6_addr_t *target_addr, u8_t flags); +static void nd6_send_na(struct netif *netif, const ip6_addr_t *target_addr, u8_t flags); +static void nd6_send_neighbor_cache_probe(struct nd6_neighbor_cache_entry *entry, u8_t flags); +#if LWIP_IPV6_SEND_ROUTER_SOLICIT +static err_t nd6_send_rs(struct netif *netif); +#endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ + +#if LWIP_ND6_QUEUEING +static void nd6_free_q(struct nd6_q_entry *q); +#else /* LWIP_ND6_QUEUEING */ +#define nd6_free_q(q) pbuf_free(q) +#endif /* LWIP_ND6_QUEUEING */ +static void nd6_send_q(s8_t i); + + +/** + * A local address has been determined to be a duplicate. Take the appropriate + * action(s) on the address and the interface as a whole. + * + * @param netif the netif that owns the address + * @param addr_idx the index of the address detected to be a duplicate + */ +static void +nd6_duplicate_addr_detected(struct netif *netif, s8_t addr_idx) +{ + + /* Mark the address as duplicate, but leave its lifetimes alone. If this was + * a manually assigned address, it will remain in existence as duplicate, and + * as such be unusable for any practical purposes until manual intervention. + * If this was an autogenerated address, the address will follow normal + * expiration rules, and thus disappear once its valid lifetime expires. */ + netif_ip6_addr_set_state(netif, addr_idx, IP6_ADDR_DUPLICATED); + +#if LWIP_IPV6_AUTOCONFIG + /* If the affected address was the link-local address that we use to generate + * all other addresses, then we should not continue to use those derived + * addresses either, so mark them as duplicate as well. For autoconfig-only + * setups, this will make the interface effectively unusable, approaching the + * intention of RFC 4862 Sec. 5.4.5. @todo implement the full requirements */ + if (addr_idx == 0) { + s8_t i; + for (i = 1; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (!ip6_addr_isinvalid(netif_ip6_addr_state(netif, i)) && + !netif_ip6_addr_isstatic(netif, i)) { + netif_ip6_addr_set_state(netif, i, IP6_ADDR_DUPLICATED); + } + } + } +#endif /* LWIP_IPV6_AUTOCONFIG */ +} + +#if LWIP_IPV6_AUTOCONFIG +/** + * We received a router advertisement that contains a prefix with the + * autoconfiguration flag set. Add or update an associated autogenerated + * address. + * + * @param netif the netif on which the router advertisement arrived + * @param prefix_opt a pointer to the prefix option data + * @param prefix_addr an aligned copy of the prefix address + */ +static void +nd6_process_autoconfig_prefix(struct netif *netif, + struct prefix_option *prefix_opt, const ip6_addr_t *prefix_addr) +{ + ip6_addr_t ip6addr; + u32_t valid_life, pref_life; + u8_t addr_state; + s8_t i, free_idx; + + /* The caller already checks RFC 4862 Sec. 5.5.3 points (a) and (b). We do + * the rest, starting with checks for (c) and (d) here. */ + valid_life = lwip_htonl(prefix_opt->valid_lifetime); + pref_life = lwip_htonl(prefix_opt->preferred_lifetime); + if (pref_life > valid_life || prefix_opt->prefix_length != 64) { + return; /* silently ignore this prefix for autoconfiguration purposes */ + } + + /* If an autogenerated address already exists for this prefix, update its + * lifetimes. An address is considered autogenerated if 1) it is not static + * (i.e., manually assigned), and 2) there is an advertised autoconfiguration + * prefix for it (the one we are processing here). This does not necessarily + * exclude the possibility that the address was actually assigned by, say, + * DHCPv6. If that distinction becomes important in the future, more state + * must be kept. As explained elsewhere we also update lifetimes of tentative + * and duplicate addresses. Skip address slot 0 (the link-local address). */ + for (i = 1; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + addr_state = netif_ip6_addr_state(netif, i); + if (!ip6_addr_isinvalid(addr_state) && !netif_ip6_addr_isstatic(netif, i) && + ip6_addr_netcmp(prefix_addr, netif_ip6_addr(netif, i))) { + /* Update the valid lifetime, as per RFC 4862 Sec. 5.5.3 point (e). + * The valid lifetime will never drop to zero as a result of this. */ + u32_t remaining_life = netif_ip6_addr_valid_life(netif, i); + if (valid_life > ND6_2HRS || valid_life > remaining_life) { + netif_ip6_addr_set_valid_life(netif, i, valid_life); + } else if (remaining_life > ND6_2HRS) { + netif_ip6_addr_set_valid_life(netif, i, ND6_2HRS); + } + LWIP_ASSERT("bad valid lifetime", !netif_ip6_addr_isstatic(netif, i)); + /* Update the preferred lifetime. No bounds checks are needed here. In + * rare cases the advertisement may un-deprecate the address, though. + * Deprecation is left to the timer code where it is handled anyway. */ + if (pref_life > 0 && addr_state == IP6_ADDR_DEPRECATED) { + netif_ip6_addr_set_state(netif, i, IP6_ADDR_PREFERRED); + } + netif_ip6_addr_set_pref_life(netif, i, pref_life); + return; /* there should be at most one matching address */ + } + } + + /* No autogenerated address exists for this prefix yet. See if we can add a + * new one. However, if IPv6 autoconfiguration is administratively disabled, + * do not generate new addresses, but do keep updating lifetimes for existing + * addresses. Also, when adding new addresses, we must protect explicitly + * against a valid lifetime of zero, because again, we use that as a special + * value. The generated address would otherwise expire immediately anyway. + * Finally, the original link-local address must be usable at all. We start + * creating addresses even if the link-local address is still in tentative + * state though, and deal with the fallout of that upon DAD collision. */ + addr_state = netif_ip6_addr_state(netif, 0); + if (!netif->ip6_autoconfig_enabled || valid_life == IP6_ADDR_LIFE_STATIC || + ip6_addr_isinvalid(addr_state) || ip6_addr_isduplicated(addr_state)) { + return; + } + + /* Construct the new address that we intend to use, and then see if that + * address really does not exist. It might have been added manually, after + * all. As a side effect, find a free slot. Note that we cannot use + * netif_add_ip6_address() here, as it would return ERR_OK if the address + * already did exist, resulting in that address being given lifetimes. */ + IP6_ADDR(&ip6addr, prefix_addr->addr[0], prefix_addr->addr[1], + netif_ip6_addr(netif, 0)->addr[2], netif_ip6_addr(netif, 0)->addr[3]); + ip6_addr_assign_zone(&ip6addr, IP6_UNICAST, netif); + + free_idx = 0; + for (i = 1; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (!ip6_addr_isinvalid(netif_ip6_addr_state(netif, i))) { + if (ip6_addr_cmp(&ip6addr, netif_ip6_addr(netif, i))) { + return; /* formed address already exists */ + } + } else if (free_idx == 0) { + free_idx = i; + } + } + if (free_idx == 0) { + return; /* no address slots available, try again on next advertisement */ + } + + /* Assign the new address to the interface. */ + ip_addr_copy_from_ip6(netif->ip6_addr[free_idx], ip6addr); + netif_ip6_addr_set_valid_life(netif, free_idx, valid_life); + netif_ip6_addr_set_pref_life(netif, free_idx, pref_life); + netif_ip6_addr_set_state(netif, free_idx, IP6_ADDR_TENTATIVE); +} +#endif /* LWIP_IPV6_AUTOCONFIG */ + +/** + * Process an incoming neighbor discovery message + * + * @param p the nd packet, p->payload pointing to the icmpv6 header + * @param inp the netif on which this packet was received + */ +void +nd6_input(struct pbuf *p, struct netif *inp) +{ + u8_t msg_type; + s8_t i; + s16_t dest_idx; + + ND6_STATS_INC(nd6.recv); + + msg_type = *((u8_t *)p->payload); + switch (msg_type) { + case ICMP6_TYPE_NA: /* Neighbor Advertisement. */ + { + struct na_header *na_hdr; + struct lladdr_option *lladdr_opt; + ip6_addr_t target_address; + + /* Check that na header fits in packet. */ + if (p->len < (sizeof(struct na_header))) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + na_hdr = (struct na_header *)p->payload; + + /* Create an aligned, zoned copy of the target address. */ + ip6_addr_copy_from_packed(target_address, na_hdr->target_address); + ip6_addr_assign_zone(&target_address, IP6_UNICAST, inp); + + /* Check a subset of the other RFC 4861 Sec. 7.1.2 requirements. */ + if (IP6H_HOPLIM(ip6_current_header()) != ND6_HOPLIM || na_hdr->code != 0 || + ip6_addr_ismulticast(&target_address)) { + pbuf_free(p); + ND6_STATS_INC(nd6.proterr); + ND6_STATS_INC(nd6.drop); + return; + } + + /* @todo RFC MUST: if IP destination is multicast, Solicited flag is zero */ + /* @todo RFC MUST: all included options have a length greater than zero */ + + /* Unsolicited NA?*/ + if (ip6_addr_ismulticast(ip6_current_dest_addr())) { + /* This is an unsolicited NA. + * link-layer changed? + * part of DAD mechanism? */ + +#if LWIP_IPV6_DUP_DETECT_ATTEMPTS + /* If the target address matches this netif, it is a DAD response. */ + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (!ip6_addr_isinvalid(netif_ip6_addr_state(inp, i)) && + !ip6_addr_isduplicated(netif_ip6_addr_state(inp, i)) && + ip6_addr_cmp(&target_address, netif_ip6_addr(inp, i))) { + /* We are using a duplicate address. */ + nd6_duplicate_addr_detected(inp, i); + + pbuf_free(p); + return; + } + } +#endif /* LWIP_IPV6_DUP_DETECT_ATTEMPTS */ + + /* Check that link-layer address option also fits in packet. */ + if (p->len < (sizeof(struct na_header) + 2)) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct na_header)); + + if (p->len < (sizeof(struct na_header) + (lladdr_opt->length << 3))) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + /* This is an unsolicited NA, most likely there was a LLADDR change. */ + i = nd6_find_neighbor_cache_entry(&target_address); + if (i >= 0) { + if (na_hdr->flags & ND6_FLAG_OVERRIDE) { + MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len); + } + } + } else { + /* This is a solicited NA. + * neighbor address resolution response? + * neighbor unreachability detection response? */ + + /* Find the cache entry corresponding to this na. */ + i = nd6_find_neighbor_cache_entry(&target_address); + if (i < 0) { + /* We no longer care about this target address. drop it. */ + pbuf_free(p); + return; + } + + /* Update cache entry. */ + if ((na_hdr->flags & ND6_FLAG_OVERRIDE) || + (neighbor_cache[i].state == ND6_INCOMPLETE)) { + /* Check that link-layer address option also fits in packet. */ + if (p->len < (sizeof(struct na_header) + 2)) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct na_header)); + + if (p->len < (sizeof(struct na_header) + (lladdr_opt->length << 3))) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len); + } + + neighbor_cache[i].netif = inp; + neighbor_cache[i].state = ND6_REACHABLE; + neighbor_cache[i].counter.reachable_time = reachable_time; + + /* Send queued packets, if any. */ + if (neighbor_cache[i].q != NULL) { + nd6_send_q(i); + } + } + + break; /* ICMP6_TYPE_NA */ + } + case ICMP6_TYPE_NS: /* Neighbor solicitation. */ + { + struct ns_header *ns_hdr; + struct lladdr_option *lladdr_opt; + ip6_addr_t target_address; + u8_t accepted; + + /* Check that ns header fits in packet. */ + if (p->len < sizeof(struct ns_header)) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + ns_hdr = (struct ns_header *)p->payload; + + /* Create an aligned, zoned copy of the target address. */ + ip6_addr_copy_from_packed(target_address, ns_hdr->target_address); + ip6_addr_assign_zone(&target_address, IP6_UNICAST, inp); + + /* Check a subset of the other RFC 4861 Sec. 7.1.1 requirements. */ + if (IP6H_HOPLIM(ip6_current_header()) != ND6_HOPLIM || ns_hdr->code != 0 || + ip6_addr_ismulticast(&target_address)) { + pbuf_free(p); + ND6_STATS_INC(nd6.proterr); + ND6_STATS_INC(nd6.drop); + return; + } + + /* @todo RFC MUST: all included options have a length greater than zero */ + /* @todo RFC MUST: if IP source is 'any', destination is solicited-node multicast address */ + /* @todo RFC MUST: if IP source is 'any', there is no source LL address option */ + + /* Check if there is a link-layer address provided. Only point to it if in this buffer. */ + if (p->len >= (sizeof(struct ns_header) + 2)) { + lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct ns_header)); + if (p->len < (sizeof(struct ns_header) + (lladdr_opt->length << 3))) { + lladdr_opt = NULL; + } + } else { + lladdr_opt = NULL; + } + + /* Check if the target address is configured on the receiving netif. */ + accepted = 0; + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; ++i) { + if ((ip6_addr_isvalid(netif_ip6_addr_state(inp, i)) || + (ip6_addr_istentative(netif_ip6_addr_state(inp, i)) && + ip6_addr_isany(ip6_current_src_addr()))) && + ip6_addr_cmp(&target_address, netif_ip6_addr(inp, i))) { + accepted = 1; + break; + } + } + + /* NS not for us? */ + if (!accepted) { + pbuf_free(p); + return; + } + + /* Check for ANY address in src (DAD algorithm). */ + if (ip6_addr_isany(ip6_current_src_addr())) { + /* Sender is validating this address. */ + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; ++i) { + if (!ip6_addr_isinvalid(netif_ip6_addr_state(inp, i)) && + ip6_addr_cmp(&target_address, netif_ip6_addr(inp, i))) { + /* Send a NA back so that the sender does not use this address. */ + nd6_send_na(inp, netif_ip6_addr(inp, i), ND6_FLAG_OVERRIDE | ND6_SEND_FLAG_ALLNODES_DEST); + if (ip6_addr_istentative(netif_ip6_addr_state(inp, i))) { + /* We shouldn't use this address either. */ + nd6_duplicate_addr_detected(inp, i); + } + } + } + } else { + /* Sender is trying to resolve our address. */ + /* Verify that they included their own link-layer address. */ + if (lladdr_opt == NULL) { + /* Not a valid message. */ + pbuf_free(p); + ND6_STATS_INC(nd6.proterr); + ND6_STATS_INC(nd6.drop); + return; + } + + i = nd6_find_neighbor_cache_entry(ip6_current_src_addr()); + if (i>= 0) { + /* We already have a record for the solicitor. */ + if (neighbor_cache[i].state == ND6_INCOMPLETE) { + neighbor_cache[i].netif = inp; + MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len); + + /* Delay probe in case we get confirmation of reachability from upper layer (TCP). */ + neighbor_cache[i].state = ND6_DELAY; + neighbor_cache[i].counter.delay_time = LWIP_ND6_DELAY_FIRST_PROBE_TIME / ND6_TMR_INTERVAL; + } + } else { + /* Add their IPv6 address and link-layer address to neighbor cache. + * We will need it at least to send a unicast NA message, but most + * likely we will also be communicating with this node soon. */ + i = nd6_new_neighbor_cache_entry(); + if (i < 0) { + /* We couldn't assign a cache entry for this neighbor. + * we won't be able to reply. drop it. */ + pbuf_free(p); + ND6_STATS_INC(nd6.memerr); + return; + } + neighbor_cache[i].netif = inp; + MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len); + ip6_addr_set(&(neighbor_cache[i].next_hop_address), ip6_current_src_addr()); + + /* Receiving a message does not prove reachability: only in one direction. + * Delay probe in case we get confirmation of reachability from upper layer (TCP). */ + neighbor_cache[i].state = ND6_DELAY; + neighbor_cache[i].counter.delay_time = LWIP_ND6_DELAY_FIRST_PROBE_TIME / ND6_TMR_INTERVAL; + } + + /* Send back a NA for us. Allocate the reply pbuf. */ + nd6_send_na(inp, &target_address, ND6_FLAG_SOLICITED | ND6_FLAG_OVERRIDE); + } + + break; /* ICMP6_TYPE_NS */ + } + case ICMP6_TYPE_RA: /* Router Advertisement. */ + { + struct ra_header *ra_hdr; + u8_t *buffer; /* Used to copy options. */ + u16_t offset; +#if LWIP_ND6_RDNSS_MAX_DNS_SERVERS + /* There can be multiple RDNSS options per RA */ + u8_t rdnss_server_idx = 0; +#endif /* LWIP_ND6_RDNSS_MAX_DNS_SERVERS */ + + /* Check that RA header fits in packet. */ + if (p->len < sizeof(struct ra_header)) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + ra_hdr = (struct ra_header *)p->payload; + + /* Check a subset of the other RFC 4861 Sec. 6.1.2 requirements. */ + if (!ip6_addr_islinklocal(ip6_current_src_addr()) || + IP6H_HOPLIM(ip6_current_header()) != ND6_HOPLIM || ra_hdr->code != 0) { + pbuf_free(p); + ND6_STATS_INC(nd6.proterr); + ND6_STATS_INC(nd6.drop); + return; + } + + /* @todo RFC MUST: all included options have a length greater than zero */ + + /* If we are sending RS messages, stop. */ +#if LWIP_IPV6_SEND_ROUTER_SOLICIT + /* ensure at least one solicitation is sent (see RFC 4861, ch. 6.3.7) */ + if ((inp->rs_count < LWIP_ND6_MAX_MULTICAST_SOLICIT) || + (nd6_send_rs(inp) == ERR_OK)) { + inp->rs_count = 0; + } else { + inp->rs_count = 1; + } +#endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ + + /* Get the matching default router entry. */ + i = nd6_get_router(ip6_current_src_addr(), inp); + if (i < 0) { + /* Create a new router entry. */ + i = nd6_new_router(ip6_current_src_addr(), inp); + } + + if (i < 0) { + /* Could not create a new router entry. */ + pbuf_free(p); + ND6_STATS_INC(nd6.memerr); + return; + } + + /* Re-set invalidation timer. */ + default_router_list[i].invalidation_timer = lwip_htons(ra_hdr->router_lifetime); + + /* Re-set default timer values. */ +#if LWIP_ND6_ALLOW_RA_UPDATES + if (ra_hdr->retrans_timer > 0) { + retrans_timer = lwip_htonl(ra_hdr->retrans_timer); + } + if (ra_hdr->reachable_time > 0) { + reachable_time = lwip_htonl(ra_hdr->reachable_time); + } +#endif /* LWIP_ND6_ALLOW_RA_UPDATES */ + + /* @todo set default hop limit... */ + /* ra_hdr->current_hop_limit;*/ + + /* Update flags in local entry (incl. preference). */ + default_router_list[i].flags = ra_hdr->flags; + +#if LWIP_IPV6_DHCP6 + /* Trigger DHCPv6 if enabled */ + dhcp6_nd6_ra_trigger(inp, ra_hdr->flags & ND6_RA_FLAG_MANAGED_ADDR_CONFIG, + ra_hdr->flags & ND6_RA_FLAG_OTHER_CONFIG); +#endif + + /* Offset to options. */ + offset = sizeof(struct ra_header); + + /* Process each option. */ + while ((p->tot_len - offset) >= 2) { + u8_t option_type; + u16_t option_len; + int option_len8 = pbuf_try_get_at(p, offset + 1); + if (option_len8 <= 0) { + /* read beyond end or zero length */ + goto lenerr_drop_free_return; + } + option_len = ((u8_t)option_len8) << 3; + if (option_len > p->tot_len - offset) { + /* short packet (option does not fit in) */ + goto lenerr_drop_free_return; + } + if (p->len == p->tot_len) { + /* no need to copy from contiguous pbuf */ + buffer = &((u8_t*)p->payload)[offset]; + } else { + /* check if this option fits into our buffer */ + if (option_len > sizeof(nd6_ra_buffer)) { + option_type = pbuf_get_at(p, offset); + /* invalid option length */ + if (option_type != ND6_OPTION_TYPE_RDNSS) { + goto lenerr_drop_free_return; + } + /* we allow RDNSS option to be longer - we'll just drop some servers */ + option_len = sizeof(nd6_ra_buffer); + } + buffer = (u8_t*)&nd6_ra_buffer; + option_len = pbuf_copy_partial(p, &nd6_ra_buffer, option_len, offset); + } + option_type = buffer[0]; + switch (option_type) { + case ND6_OPTION_TYPE_SOURCE_LLADDR: + { + struct lladdr_option *lladdr_opt; + if (option_len < sizeof(struct lladdr_option)) { + goto lenerr_drop_free_return; + } + lladdr_opt = (struct lladdr_option *)buffer; + if ((default_router_list[i].neighbor_entry != NULL) && + (default_router_list[i].neighbor_entry->state == ND6_INCOMPLETE)) { + SMEMCPY(default_router_list[i].neighbor_entry->lladdr, lladdr_opt->addr, inp->hwaddr_len); + default_router_list[i].neighbor_entry->state = ND6_REACHABLE; + default_router_list[i].neighbor_entry->counter.reachable_time = reachable_time; + } + break; + } + case ND6_OPTION_TYPE_MTU: + { + struct mtu_option *mtu_opt; + u32_t mtu32; + if (option_len < sizeof(struct mtu_option)) { + goto lenerr_drop_free_return; + } + mtu_opt = (struct mtu_option *)buffer; + mtu32 = lwip_htonl(mtu_opt->mtu); + if ((mtu32 >= 1280) && (mtu32 <= 0xffff)) { +#if LWIP_ND6_ALLOW_RA_UPDATES + if (inp->mtu) { + /* don't set the mtu for IPv6 higher than the netif driver supports */ + inp->mtu6 = LWIP_MIN(inp->mtu, (u16_t)mtu32); + } else { + inp->mtu6 = (u16_t)mtu32; + } +#endif /* LWIP_ND6_ALLOW_RA_UPDATES */ + } + break; + } + case ND6_OPTION_TYPE_PREFIX_INFO: + { + struct prefix_option *prefix_opt; + ip6_addr_t prefix_addr; + if (option_len < sizeof(struct prefix_option)) { + goto lenerr_drop_free_return; + } + + prefix_opt = (struct prefix_option *)buffer; + + /* Get a memory-aligned copy of the prefix. */ + ip6_addr_copy_from_packed(prefix_addr, prefix_opt->prefix); + ip6_addr_assign_zone(&prefix_addr, IP6_UNICAST, inp); + + if (!ip6_addr_islinklocal(&prefix_addr)) { + if ((prefix_opt->flags & ND6_PREFIX_FLAG_ON_LINK) && + (prefix_opt->prefix_length == 64)) { + /* Add to on-link prefix list. */ + u32_t valid_life; + s8_t prefix; + + valid_life = lwip_htonl(prefix_opt->valid_lifetime); + + /* find cache entry for this prefix. */ + prefix = nd6_get_onlink_prefix(&prefix_addr, inp); + if (prefix < 0 && valid_life > 0) { + /* Create a new cache entry. */ + prefix = nd6_new_onlink_prefix(&prefix_addr, inp); + } + if (prefix >= 0) { + prefix_list[prefix].invalidation_timer = valid_life; + } + } +#if LWIP_IPV6_AUTOCONFIG + if (prefix_opt->flags & ND6_PREFIX_FLAG_AUTONOMOUS) { + /* Perform processing for autoconfiguration. */ + nd6_process_autoconfig_prefix(inp, prefix_opt, &prefix_addr); + } +#endif /* LWIP_IPV6_AUTOCONFIG */ + } + + break; + } + case ND6_OPTION_TYPE_ROUTE_INFO: + /* @todo implement preferred routes. + struct route_option * route_opt; + route_opt = (struct route_option *)buffer;*/ + + break; +#if LWIP_ND6_RDNSS_MAX_DNS_SERVERS + case ND6_OPTION_TYPE_RDNSS: + { + u8_t num, n; + u16_t copy_offset = offset + SIZEOF_RDNSS_OPTION_BASE; + struct rdnss_option * rdnss_opt; + if (option_len < SIZEOF_RDNSS_OPTION_BASE) { + goto lenerr_drop_free_return; + } + + rdnss_opt = (struct rdnss_option *)buffer; + num = (rdnss_opt->length - 1) / 2; + for (n = 0; (rdnss_server_idx < DNS_MAX_SERVERS) && (n < num); n++) { + ip_addr_t rdnss_address; + + /* Copy directly from pbuf to get an aligned, zoned copy of the prefix. */ + if (pbuf_copy_partial(p, &rdnss_address, sizeof(ip6_addr_p_t), copy_offset) == sizeof(ip6_addr_p_t)) { + IP_SET_TYPE_VAL(rdnss_address, IPADDR_TYPE_V6); + ip6_addr_assign_zone(ip_2_ip6(&rdnss_address), IP6_UNKNOWN, inp); + + if (htonl(rdnss_opt->lifetime) > 0) { + /* TODO implement Lifetime > 0 */ + dns_setserver(rdnss_server_idx++, &rdnss_address); + } else { + /* TODO implement DNS removal in dns.c */ + u8_t s; + for (s = 0; s < DNS_MAX_SERVERS; s++) { + const ip_addr_t *addr = dns_getserver(s); + if(ip_addr_cmp(addr, &rdnss_address)) { + dns_setserver(s, NULL); + } + } + } + } + } + break; + } +#endif /* LWIP_ND6_RDNSS_MAX_DNS_SERVERS */ + default: + /* Unrecognized option, abort. */ + ND6_STATS_INC(nd6.proterr); + break; + } + /* option length is checked earlier to be non-zero to make sure loop ends */ + offset += 8 * (u8_t)option_len8; + } + + break; /* ICMP6_TYPE_RA */ + } + case ICMP6_TYPE_RD: /* Redirect */ + { + struct redirect_header *redir_hdr; + struct lladdr_option *lladdr_opt; + ip6_addr_t destination_address, target_address; + + /* Check that Redir header fits in packet. */ + if (p->len < sizeof(struct redirect_header)) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + redir_hdr = (struct redirect_header *)p->payload; + + /* Create an aligned, zoned copy of the destination address. */ + ip6_addr_copy_from_packed(destination_address, redir_hdr->destination_address); + ip6_addr_assign_zone(&destination_address, IP6_UNICAST, inp); + + /* Check a subset of the other RFC 4861 Sec. 8.1 requirements. */ + if (!ip6_addr_islinklocal(ip6_current_src_addr()) || + IP6H_HOPLIM(ip6_current_header()) != ND6_HOPLIM || + redir_hdr->code != 0 || ip6_addr_ismulticast(&destination_address)) { + pbuf_free(p); + ND6_STATS_INC(nd6.proterr); + ND6_STATS_INC(nd6.drop); + return; + } + + /* @todo RFC MUST: IP source address equals first-hop router for destination_address */ + /* @todo RFC MUST: ICMP target address is either link-local address or same as destination_address */ + /* @todo RFC MUST: all included options have a length greater than zero */ + + if (p->len >= (sizeof(struct redirect_header) + 2)) { + lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct redirect_header)); + if (p->len < (sizeof(struct redirect_header) + (lladdr_opt->length << 3))) { + lladdr_opt = NULL; + } + } else { + lladdr_opt = NULL; + } + + /* Find dest address in cache */ + dest_idx = nd6_find_destination_cache_entry(&destination_address); + if (dest_idx < 0) { + /* Destination not in cache, drop packet. */ + pbuf_free(p); + return; + } + + /* Create an aligned, zoned copy of the target address. */ + ip6_addr_copy_from_packed(target_address, redir_hdr->target_address); + ip6_addr_assign_zone(&target_address, IP6_UNICAST, inp); + + /* Set the new target address. */ + ip6_addr_copy(destination_cache[dest_idx].next_hop_addr, target_address); + + /* If Link-layer address of other router is given, try to add to neighbor cache. */ + if (lladdr_opt != NULL) { + if (lladdr_opt->type == ND6_OPTION_TYPE_TARGET_LLADDR) { + i = nd6_find_neighbor_cache_entry(&target_address); + if (i < 0) { + i = nd6_new_neighbor_cache_entry(); + if (i >= 0) { + neighbor_cache[i].netif = inp; + MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len); + ip6_addr_copy(neighbor_cache[i].next_hop_address, target_address); + + /* Receiving a message does not prove reachability: only in one direction. + * Delay probe in case we get confirmation of reachability from upper layer (TCP). */ + neighbor_cache[i].state = ND6_DELAY; + neighbor_cache[i].counter.delay_time = LWIP_ND6_DELAY_FIRST_PROBE_TIME / ND6_TMR_INTERVAL; + } + } + if (i >= 0) { + if (neighbor_cache[i].state == ND6_INCOMPLETE) { + MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len); + /* Receiving a message does not prove reachability: only in one direction. + * Delay probe in case we get confirmation of reachability from upper layer (TCP). */ + neighbor_cache[i].state = ND6_DELAY; + neighbor_cache[i].counter.delay_time = LWIP_ND6_DELAY_FIRST_PROBE_TIME / ND6_TMR_INTERVAL; + } + } + } + } + break; /* ICMP6_TYPE_RD */ + } + case ICMP6_TYPE_PTB: /* Packet too big */ + { + struct icmp6_hdr *icmp6hdr; /* Packet too big message */ + struct ip6_hdr *ip6hdr; /* IPv6 header of the packet which caused the error */ + u32_t pmtu; + ip6_addr_t destination_address; + + /* Check that ICMPv6 header + IPv6 header fit in payload */ + if (p->len < (sizeof(struct icmp6_hdr) + IP6_HLEN)) { + /* drop short packets */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + icmp6hdr = (struct icmp6_hdr *)p->payload; + ip6hdr = (struct ip6_hdr *)((u8_t*)p->payload + sizeof(struct icmp6_hdr)); + + /* Create an aligned, zoned copy of the destination address. */ + ip6_addr_copy_from_packed(destination_address, ip6hdr->dest); + ip6_addr_assign_zone(&destination_address, IP6_UNKNOWN, inp); + + /* Look for entry in destination cache. */ + dest_idx = nd6_find_destination_cache_entry(&destination_address); + if (dest_idx < 0) { + /* Destination not in cache, drop packet. */ + pbuf_free(p); + return; + } + + /* Change the Path MTU. */ + pmtu = lwip_htonl(icmp6hdr->data); + destination_cache[dest_idx].pmtu = (u16_t)LWIP_MIN(pmtu, 0xFFFF); + + break; /* ICMP6_TYPE_PTB */ + } + + default: + ND6_STATS_INC(nd6.proterr); + ND6_STATS_INC(nd6.drop); + break; /* default */ + } + + pbuf_free(p); + return; +lenerr_drop_free_return: + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + pbuf_free(p); +} + + +/** + * Periodic timer for Neighbor discovery functions: + * + * - Update neighbor reachability states + * - Update destination cache entries age + * - Update invalidation timers of default routers and on-link prefixes + * - Update lifetimes of our addresses + * - Perform duplicate address detection (DAD) for our addresses + * - Send router solicitations + */ +void +nd6_tmr(void) +{ + s8_t i; + struct netif *netif; + + /* Process neighbor entries. */ + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + switch (neighbor_cache[i].state) { + case ND6_INCOMPLETE: + if ((neighbor_cache[i].counter.probes_sent >= LWIP_ND6_MAX_MULTICAST_SOLICIT) && + (!neighbor_cache[i].isrouter)) { + /* Retries exceeded. */ + nd6_free_neighbor_cache_entry(i); + } else { + /* Send a NS for this entry. */ + neighbor_cache[i].counter.probes_sent++; + nd6_send_neighbor_cache_probe(&neighbor_cache[i], ND6_SEND_FLAG_MULTICAST_DEST); + } + break; + case ND6_REACHABLE: + /* Send queued packets, if any are left. Should have been sent already. */ + if (neighbor_cache[i].q != NULL) { + nd6_send_q(i); + } + if (neighbor_cache[i].counter.reachable_time <= ND6_TMR_INTERVAL) { + /* Change to stale state. */ + neighbor_cache[i].state = ND6_STALE; + neighbor_cache[i].counter.stale_time = 0; + } else { + neighbor_cache[i].counter.reachable_time -= ND6_TMR_INTERVAL; + } + break; + case ND6_STALE: + neighbor_cache[i].counter.stale_time++; + break; + case ND6_DELAY: + if (neighbor_cache[i].counter.delay_time <= 1) { + /* Change to PROBE state. */ + neighbor_cache[i].state = ND6_PROBE; + neighbor_cache[i].counter.probes_sent = 0; + } else { + neighbor_cache[i].counter.delay_time--; + } + break; + case ND6_PROBE: + if ((neighbor_cache[i].counter.probes_sent >= LWIP_ND6_MAX_MULTICAST_SOLICIT) && + (!neighbor_cache[i].isrouter)) { + /* Retries exceeded. */ + nd6_free_neighbor_cache_entry(i); + } else { + /* Send a NS for this entry. */ + neighbor_cache[i].counter.probes_sent++; + nd6_send_neighbor_cache_probe(&neighbor_cache[i], 0); + } + break; + case ND6_NO_ENTRY: + default: + /* Do nothing. */ + break; + } + } + + /* Process destination entries. */ + for (i = 0; i < LWIP_ND6_NUM_DESTINATIONS; i++) { + destination_cache[i].age++; + } + + /* Process router entries. */ + for (i = 0; i < LWIP_ND6_NUM_ROUTERS; i++) { + if (default_router_list[i].neighbor_entry != NULL) { + /* Active entry. */ + if (default_router_list[i].invalidation_timer <= ND6_TMR_INTERVAL / 1000) { + /* No more than 1 second remaining. Clear this entry. Also clear any of + * its destination cache entries, as per RFC 4861 Sec. 5.3 and 6.3.5. */ + s8_t j; + for (j = 0; j < LWIP_ND6_NUM_DESTINATIONS; j++) { + if (ip6_addr_cmp(&destination_cache[j].next_hop_addr, + &default_router_list[i].neighbor_entry->next_hop_address)) { + ip6_addr_set_any(&destination_cache[j].destination_addr); + } + } + default_router_list[i].neighbor_entry->isrouter = 0; + default_router_list[i].neighbor_entry = NULL; + default_router_list[i].invalidation_timer = 0; + default_router_list[i].flags = 0; + } else { + default_router_list[i].invalidation_timer -= ND6_TMR_INTERVAL / 1000; + } + } + } + + /* Process prefix entries. */ + for (i = 0; i < LWIP_ND6_NUM_PREFIXES; i++) { + if (prefix_list[i].netif != NULL) { + if (prefix_list[i].invalidation_timer <= ND6_TMR_INTERVAL / 1000) { + /* Entry timed out, remove it */ + prefix_list[i].invalidation_timer = 0; + prefix_list[i].netif = NULL; + } else { + prefix_list[i].invalidation_timer -= ND6_TMR_INTERVAL / 1000; + } + } + } + + /* Process our own addresses, updating address lifetimes and/or DAD state. */ + NETIF_FOREACH(netif) { + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; ++i) { + u8_t addr_state; +#if LWIP_IPV6_ADDRESS_LIFETIMES + /* Step 1: update address lifetimes (valid and preferred). */ + addr_state = netif_ip6_addr_state(netif, i); + /* RFC 4862 is not entirely clear as to whether address lifetimes affect + * tentative addresses, and is even less clear as to what should happen + * with duplicate addresses. We choose to track and update lifetimes for + * both those types, although for different reasons: + * - for tentative addresses, the line of thought of Sec. 5.7 combined + * with the potentially long period that an address may be in tentative + * state (due to the interface being down) suggests that lifetimes + * should be independent of external factors which would include DAD; + * - for duplicate addresses, retiring them early could result in a new + * but unwanted attempt at marking them as valid, while retiring them + * late/never could clog up address slots on the netif. + * As a result, we may end up expiring addresses of either type here. + */ + if (!ip6_addr_isinvalid(addr_state) && + !netif_ip6_addr_isstatic(netif, i)) { + u32_t life = netif_ip6_addr_valid_life(netif, i); + if (life <= ND6_TMR_INTERVAL / 1000) { + /* The address has expired. */ + netif_ip6_addr_set_valid_life(netif, i, 0); + netif_ip6_addr_set_pref_life(netif, i, 0); + netif_ip6_addr_set_state(netif, i, IP6_ADDR_INVALID); + } else { + if (!ip6_addr_life_isinfinite(life)) { + life -= ND6_TMR_INTERVAL / 1000; + LWIP_ASSERT("bad valid lifetime", life != IP6_ADDR_LIFE_STATIC); + netif_ip6_addr_set_valid_life(netif, i, life); + } + /* The address is still here. Update the preferred lifetime too. */ + life = netif_ip6_addr_pref_life(netif, i); + if (life <= ND6_TMR_INTERVAL / 1000) { + /* This case must also trigger if 'life' was already zero, so as to + * deal correctly with advertised preferred-lifetime reductions. */ + netif_ip6_addr_set_pref_life(netif, i, 0); + if (addr_state == IP6_ADDR_PREFERRED) + netif_ip6_addr_set_state(netif, i, IP6_ADDR_DEPRECATED); + } else if (!ip6_addr_life_isinfinite(life)) { + life -= ND6_TMR_INTERVAL / 1000; + netif_ip6_addr_set_pref_life(netif, i, life); + } + } + } + /* The address state may now have changed, so reobtain it next. */ +#endif /* LWIP_IPV6_ADDRESS_LIFETIMES */ + /* Step 2: update DAD state. */ + addr_state = netif_ip6_addr_state(netif, i); + if (ip6_addr_istentative(addr_state)) { + if ((addr_state & IP6_ADDR_TENTATIVE_COUNT_MASK) >= LWIP_IPV6_DUP_DETECT_ATTEMPTS) { + /* No NA received in response. Mark address as valid. For dynamic + * addresses with an expired preferred lifetime, the state is set to + * deprecated right away. That should almost never happen, though. */ + addr_state = IP6_ADDR_PREFERRED; +#if LWIP_IPV6_ADDRESS_LIFETIMES + if (!netif_ip6_addr_isstatic(netif, i) && + netif_ip6_addr_pref_life(netif, i) == 0) { + addr_state = IP6_ADDR_DEPRECATED; + } +#endif /* LWIP_IPV6_ADDRESS_LIFETIMES */ + netif_ip6_addr_set_state(netif, i, addr_state); + } else if (netif_is_up(netif) && netif_is_link_up(netif)) { + /* tentative: set next state by increasing by one */ + netif_ip6_addr_set_state(netif, i, addr_state + 1); + /* Send a NS for this address. Use the unspecified address as source + * address in all cases (RFC 4862 Sec. 5.4.2), not in the least + * because as it is, we only consider multicast replies for DAD. */ + nd6_send_ns(netif, netif_ip6_addr(netif, i), + ND6_SEND_FLAG_MULTICAST_DEST | ND6_SEND_FLAG_ANY_SRC); + } + } + } + } + +#if LWIP_IPV6_SEND_ROUTER_SOLICIT + /* Send router solicitation messages, if necessary. */ + if (!nd6_tmr_rs_reduction) { + nd6_tmr_rs_reduction = (ND6_RTR_SOLICITATION_INTERVAL / ND6_TMR_INTERVAL) - 1; + NETIF_FOREACH(netif) { + if ((netif->rs_count > 0) && netif_is_up(netif) && + netif_is_link_up(netif) && + !ip6_addr_isinvalid(netif_ip6_addr_state(netif, 0)) && + !ip6_addr_isduplicated(netif_ip6_addr_state(netif, 0))) { + if (nd6_send_rs(netif) == ERR_OK) { + netif->rs_count--; + } + } + } + } else { + nd6_tmr_rs_reduction--; + } +#endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ + +} + +/** Send a neighbor solicitation message for a specific neighbor cache entry + * + * @param entry the neightbor cache entry for wich to send the message + * @param flags one of ND6_SEND_FLAG_* + */ +static void +nd6_send_neighbor_cache_probe(struct nd6_neighbor_cache_entry *entry, u8_t flags) +{ + nd6_send_ns(entry->netif, &entry->next_hop_address, flags); +} + +/** + * Send a neighbor solicitation message + * + * @param netif the netif on which to send the message + * @param target_addr the IPv6 target address for the ND message + * @param flags one of ND6_SEND_FLAG_* + */ +static void +nd6_send_ns(struct netif *netif, const ip6_addr_t *target_addr, u8_t flags) +{ + struct ns_header *ns_hdr; + struct pbuf *p; + const ip6_addr_t *src_addr; + u16_t lladdr_opt_len; + + LWIP_ASSERT("target address is required", target_addr != NULL); + + if (!(flags & ND6_SEND_FLAG_ANY_SRC) && + ip6_addr_isvalid(netif_ip6_addr_state(netif,0))) { + /* Use link-local address as source address. */ + src_addr = netif_ip6_addr(netif, 0); + /* calculate option length (in 8-byte-blocks) */ + lladdr_opt_len = ((netif->hwaddr_len + 2) + 7) >> 3; + } else { + src_addr = IP6_ADDR_ANY6; + /* Option "MUST NOT be included when the source IP address is the unspecified address." */ + lladdr_opt_len = 0; + } + + /* Allocate a packet. */ + p = pbuf_alloc(PBUF_IP, sizeof(struct ns_header) + (lladdr_opt_len << 3), PBUF_RAM); + if (p == NULL) { + ND6_STATS_INC(nd6.memerr); + return; + } + + /* Set fields. */ + ns_hdr = (struct ns_header *)p->payload; + + ns_hdr->type = ICMP6_TYPE_NS; + ns_hdr->code = 0; + ns_hdr->chksum = 0; + ns_hdr->reserved = 0; + ip6_addr_copy_to_packed(ns_hdr->target_address, *target_addr); + + if (lladdr_opt_len != 0) { + struct lladdr_option *lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct ns_header)); + lladdr_opt->type = ND6_OPTION_TYPE_SOURCE_LLADDR; + lladdr_opt->length = (u8_t)lladdr_opt_len; + SMEMCPY(lladdr_opt->addr, netif->hwaddr, netif->hwaddr_len); + } + + /* Generate the solicited node address for the target address. */ + if (flags & ND6_SEND_FLAG_MULTICAST_DEST) { + ip6_addr_set_solicitednode(&multicast_address, target_addr->addr[3]); + ip6_addr_assign_zone(&multicast_address, IP6_MULTICAST, netif); + target_addr = &multicast_address; + } + +#if CHECKSUM_GEN_ICMP6 + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP6) { + ns_hdr->chksum = ip6_chksum_pseudo(p, IP6_NEXTH_ICMP6, p->len, src_addr, + target_addr); + } +#endif /* CHECKSUM_GEN_ICMP6 */ + + /* Send the packet out. */ + ND6_STATS_INC(nd6.xmit); + ip6_output_if(p, (src_addr == IP6_ADDR_ANY6) ? NULL : src_addr, target_addr, + ND6_HOPLIM, 0, IP6_NEXTH_ICMP6, netif); + pbuf_free(p); +} + +/** + * Send a neighbor advertisement message + * + * @param netif the netif on which to send the message + * @param target_addr the IPv6 target address for the ND message + * @param flags one of ND6_SEND_FLAG_* + */ +static void +nd6_send_na(struct netif *netif, const ip6_addr_t *target_addr, u8_t flags) +{ + struct na_header *na_hdr; + struct lladdr_option *lladdr_opt; + struct pbuf *p; + const ip6_addr_t *src_addr; + const ip6_addr_t *dest_addr; + u16_t lladdr_opt_len; + + LWIP_ASSERT("target address is required", target_addr != NULL); + + /* Use link-local address as source address. */ + /* src_addr = netif_ip6_addr(netif, 0); */ + /* Use target address as source address. */ + src_addr = target_addr; + + /* Allocate a packet. */ + lladdr_opt_len = ((netif->hwaddr_len + 2) >> 3) + (((netif->hwaddr_len + 2) & 0x07) ? 1 : 0); + p = pbuf_alloc(PBUF_IP, sizeof(struct na_header) + (lladdr_opt_len << 3), PBUF_RAM); + if (p == NULL) { + ND6_STATS_INC(nd6.memerr); + return; + } + + /* Set fields. */ + na_hdr = (struct na_header *)p->payload; + lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct na_header)); + + na_hdr->type = ICMP6_TYPE_NA; + na_hdr->code = 0; + na_hdr->chksum = 0; + na_hdr->flags = flags & 0xf0; + na_hdr->reserved[0] = 0; + na_hdr->reserved[1] = 0; + na_hdr->reserved[2] = 0; + ip6_addr_copy_to_packed(na_hdr->target_address, *target_addr); + + lladdr_opt->type = ND6_OPTION_TYPE_TARGET_LLADDR; + lladdr_opt->length = (u8_t)lladdr_opt_len; + SMEMCPY(lladdr_opt->addr, netif->hwaddr, netif->hwaddr_len); + + /* Generate the solicited node address for the target address. */ + if (flags & ND6_SEND_FLAG_MULTICAST_DEST) { + ip6_addr_set_solicitednode(&multicast_address, target_addr->addr[3]); + ip6_addr_assign_zone(&multicast_address, IP6_MULTICAST, netif); + dest_addr = &multicast_address; + } else if (flags & ND6_SEND_FLAG_ALLNODES_DEST) { + ip6_addr_set_allnodes_linklocal(&multicast_address); + ip6_addr_assign_zone(&multicast_address, IP6_MULTICAST, netif); + dest_addr = &multicast_address; + } else { + dest_addr = ip6_current_src_addr(); + } + +#if CHECKSUM_GEN_ICMP6 + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP6) { + na_hdr->chksum = ip6_chksum_pseudo(p, IP6_NEXTH_ICMP6, p->len, src_addr, + dest_addr); + } +#endif /* CHECKSUM_GEN_ICMP6 */ + + /* Send the packet out. */ + ND6_STATS_INC(nd6.xmit); + ip6_output_if(p, src_addr, dest_addr, + ND6_HOPLIM, 0, IP6_NEXTH_ICMP6, netif); + pbuf_free(p); +} + +#if LWIP_IPV6_SEND_ROUTER_SOLICIT +/** + * Send a router solicitation message + * + * @param netif the netif on which to send the message + */ +static err_t +nd6_send_rs(struct netif *netif) +{ + struct rs_header *rs_hdr; + struct lladdr_option *lladdr_opt; + struct pbuf *p; + const ip6_addr_t *src_addr; + err_t err; + u16_t lladdr_opt_len = 0; + + /* Link-local source address, or unspecified address? */ + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, 0))) { + src_addr = netif_ip6_addr(netif, 0); + } else { + src_addr = IP6_ADDR_ANY6; + } + + /* Generate the all routers target address. */ + ip6_addr_set_allrouters_linklocal(&multicast_address); + ip6_addr_assign_zone(&multicast_address, IP6_MULTICAST, netif); + + /* Allocate a packet. */ + if (src_addr != IP6_ADDR_ANY6) { + lladdr_opt_len = ((netif->hwaddr_len + 2) >> 3) + (((netif->hwaddr_len + 2) & 0x07) ? 1 : 0); + } + p = pbuf_alloc(PBUF_IP, sizeof(struct rs_header) + (lladdr_opt_len << 3), PBUF_RAM); + if (p == NULL) { + ND6_STATS_INC(nd6.memerr); + return ERR_BUF; + } + + /* Set fields. */ + rs_hdr = (struct rs_header *)p->payload; + + rs_hdr->type = ICMP6_TYPE_RS; + rs_hdr->code = 0; + rs_hdr->chksum = 0; + rs_hdr->reserved = 0; + + if (src_addr != IP6_ADDR_ANY6) { + /* Include our hw address. */ + lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct rs_header)); + lladdr_opt->type = ND6_OPTION_TYPE_SOURCE_LLADDR; + lladdr_opt->length = (u8_t)lladdr_opt_len; + SMEMCPY(lladdr_opt->addr, netif->hwaddr, netif->hwaddr_len); + } + +#if CHECKSUM_GEN_ICMP6 + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP6) { + rs_hdr->chksum = ip6_chksum_pseudo(p, IP6_NEXTH_ICMP6, p->len, src_addr, + &multicast_address); + } +#endif /* CHECKSUM_GEN_ICMP6 */ + + /* Send the packet out. */ + ND6_STATS_INC(nd6.xmit); + + err = ip6_output_if(p, (src_addr == IP6_ADDR_ANY6) ? NULL : src_addr, &multicast_address, + ND6_HOPLIM, 0, IP6_NEXTH_ICMP6, netif); + pbuf_free(p); + + return err; +} +#endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ + +/** + * Search for a neighbor cache entry + * + * @param ip6addr the IPv6 address of the neighbor + * @return The neighbor cache entry index that matched, -1 if no + * entry is found + */ +static s8_t +nd6_find_neighbor_cache_entry(const ip6_addr_t *ip6addr) +{ + s8_t i; + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if (ip6_addr_cmp(ip6addr, &(neighbor_cache[i].next_hop_address))) { + return i; + } + } + return -1; +} + +/** + * Create a new neighbor cache entry. + * + * If no unused entry is found, will try to recycle an old entry + * according to ad-hoc "age" heuristic. + * + * @return The neighbor cache entry index that was created, -1 if no + * entry could be created + */ +static s8_t +nd6_new_neighbor_cache_entry(void) +{ + s8_t i; + s8_t j; + u32_t time; + + + /* First, try to find an empty entry. */ + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if (neighbor_cache[i].state == ND6_NO_ENTRY) { + return i; + } + } + + /* We need to recycle an entry. in general, do not recycle if it is a router. */ + + /* Next, try to find a Stale entry. */ + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if ((neighbor_cache[i].state == ND6_STALE) && + (!neighbor_cache[i].isrouter)) { + nd6_free_neighbor_cache_entry(i); + return i; + } + } + + /* Next, try to find a Probe entry. */ + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if ((neighbor_cache[i].state == ND6_PROBE) && + (!neighbor_cache[i].isrouter)) { + nd6_free_neighbor_cache_entry(i); + return i; + } + } + + /* Next, try to find a Delayed entry. */ + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if ((neighbor_cache[i].state == ND6_DELAY) && + (!neighbor_cache[i].isrouter)) { + nd6_free_neighbor_cache_entry(i); + return i; + } + } + + /* Next, try to find the oldest reachable entry. */ + time = 0xfffffffful; + j = -1; + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if ((neighbor_cache[i].state == ND6_REACHABLE) && + (!neighbor_cache[i].isrouter)) { + if (neighbor_cache[i].counter.reachable_time < time) { + j = i; + time = neighbor_cache[i].counter.reachable_time; + } + } + } + if (j >= 0) { + nd6_free_neighbor_cache_entry(j); + return j; + } + + /* Next, find oldest incomplete entry without queued packets. */ + time = 0; + j = -1; + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if ( + (neighbor_cache[i].q == NULL) && + (neighbor_cache[i].state == ND6_INCOMPLETE) && + (!neighbor_cache[i].isrouter)) { + if (neighbor_cache[i].counter.probes_sent >= time) { + j = i; + time = neighbor_cache[i].counter.probes_sent; + } + } + } + if (j >= 0) { + nd6_free_neighbor_cache_entry(j); + return j; + } + + /* Next, find oldest incomplete entry with queued packets. */ + time = 0; + j = -1; + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if ((neighbor_cache[i].state == ND6_INCOMPLETE) && + (!neighbor_cache[i].isrouter)) { + if (neighbor_cache[i].counter.probes_sent >= time) { + j = i; + time = neighbor_cache[i].counter.probes_sent; + } + } + } + if (j >= 0) { + nd6_free_neighbor_cache_entry(j); + return j; + } + + /* No more entries to try. */ + return -1; +} + +/** + * Will free any resources associated with a neighbor cache + * entry, and will mark it as unused. + * + * @param i the neighbor cache entry index to free + */ +static void +nd6_free_neighbor_cache_entry(s8_t i) +{ + if ((i < 0) || (i >= LWIP_ND6_NUM_NEIGHBORS)) { + return; + } + if (neighbor_cache[i].isrouter) { + /* isrouter needs to be cleared before deleting a neighbor cache entry */ + return; + } + + /* Free any queued packets. */ + if (neighbor_cache[i].q != NULL) { + nd6_free_q(neighbor_cache[i].q); + neighbor_cache[i].q = NULL; + } + + neighbor_cache[i].state = ND6_NO_ENTRY; + neighbor_cache[i].isrouter = 0; + neighbor_cache[i].netif = NULL; + neighbor_cache[i].counter.reachable_time = 0; + ip6_addr_set_zero(&(neighbor_cache[i].next_hop_address)); +} + +/** + * Search for a destination cache entry + * + * @param ip6addr the IPv6 address of the destination + * @return The destination cache entry index that matched, -1 if no + * entry is found + */ +static s16_t +nd6_find_destination_cache_entry(const ip6_addr_t *ip6addr) +{ + s16_t i; + + IP6_ADDR_ZONECHECK(ip6addr); + + for (i = 0; i < LWIP_ND6_NUM_DESTINATIONS; i++) { + if (ip6_addr_cmp(ip6addr, &(destination_cache[i].destination_addr))) { + return i; + } + } + return -1; +} + +/** + * Create a new destination cache entry. If no unused entry is found, + * will recycle oldest entry. + * + * @return The destination cache entry index that was created, -1 if no + * entry was created + */ +static s16_t +nd6_new_destination_cache_entry(void) +{ + s16_t i, j; + u32_t age; + + /* Find an empty entry. */ + for (i = 0; i < LWIP_ND6_NUM_DESTINATIONS; i++) { + if (ip6_addr_isany(&(destination_cache[i].destination_addr))) { + return i; + } + } + + /* Find oldest entry. */ + age = 0; + j = LWIP_ND6_NUM_DESTINATIONS - 1; + for (i = 0; i < LWIP_ND6_NUM_DESTINATIONS; i++) { + if (destination_cache[i].age > age) { + j = i; + } + } + + return j; +} + +/** + * Clear the destination cache. + * + * This operation may be necessary for consistency in the light of changing + * local addresses and/or use of the gateway hook. + */ +void +nd6_clear_destination_cache(void) +{ + int i; + + for (i = 0; i < LWIP_ND6_NUM_DESTINATIONS; i++) { + ip6_addr_set_any(&destination_cache[i].destination_addr); + } +} + +/** + * Determine whether an address matches an on-link prefix or the subnet of a + * statically assigned address. + * + * @param ip6addr the IPv6 address to match + * @return 1 if the address is on-link, 0 otherwise + */ +static int +nd6_is_prefix_in_netif(const ip6_addr_t *ip6addr, struct netif *netif) +{ + s8_t i; + + /* Check to see if the address matches an on-link prefix. */ + for (i = 0; i < LWIP_ND6_NUM_PREFIXES; i++) { + if ((prefix_list[i].netif == netif) && + (prefix_list[i].invalidation_timer > 0) && + ip6_addr_netcmp(ip6addr, &(prefix_list[i].prefix))) { + return 1; + } + } + /* Check to see if address prefix matches a manually configured (= static) + * address. Static addresses have an implied /64 subnet assignment. Dynamic + * addresses (from autoconfiguration) have no implied subnet assignment, and + * are thus effectively /128 assignments. See RFC 5942 for more on this. */ + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && + netif_ip6_addr_isstatic(netif, i) && + ip6_addr_netcmp(ip6addr, netif_ip6_addr(netif, i))) { + return 1; + } + } + return 0; +} + +/** + * Select a default router for a destination. + * + * This function is used both for routing and for finding a next-hop target for + * a packet. In the former case, the given netif is NULL, and the returned + * router entry must be for a netif suitable for sending packets (up, link up). + * In the latter case, the given netif is not NULL and restricts router choice. + * + * @param ip6addr the destination address + * @param netif the netif for the outgoing packet, if known + * @return the default router entry index, or -1 if no suitable + * router is found + */ +static s8_t +nd6_select_router(const ip6_addr_t *ip6addr, struct netif *netif) +{ + struct netif *router_netif; + s8_t i, j, valid_router; + static s8_t last_router; + + LWIP_UNUSED_ARG(ip6addr); /* @todo match preferred routes!! (must implement ND6_OPTION_TYPE_ROUTE_INFO) */ + + /* @todo: implement default router preference */ + + /* Look for valid routers. A reachable router is preferred. */ + valid_router = -1; + for (i = 0; i < LWIP_ND6_NUM_ROUTERS; i++) { + /* Is the router netif both set and apppropriate? */ + if (default_router_list[i].neighbor_entry != NULL) { + router_netif = default_router_list[i].neighbor_entry->netif; + if ((router_netif != NULL) && (netif != NULL ? netif == router_netif : + (netif_is_up(router_netif) && netif_is_link_up(router_netif)))) { + /* Is the router valid, i.e., reachable or probably reachable as per + * RFC 4861 Sec. 6.3.6? Note that we will never return a router that + * has no neighbor cache entry, due to the netif association tests. */ + if (default_router_list[i].neighbor_entry->state != ND6_INCOMPLETE) { + /* Is the router known to be reachable? */ + if (default_router_list[i].neighbor_entry->state == ND6_REACHABLE) { + return i; /* valid and reachable - done! */ + } else if (valid_router < 0) { + valid_router = i; /* valid but not known to be reachable */ + } + } + } + } + } + if (valid_router >= 0) { + return valid_router; + } + + /* Look for any router for which we have any information at all. */ + /* last_router is used for round-robin selection of incomplete routers, as + * recommended in RFC 4861 Sec. 6.3.6 point (2). Advance only when picking a + * route, to select the same router as next-hop target in the common case. */ + if ((netif == NULL) && (++last_router >= LWIP_ND6_NUM_ROUTERS)) { + last_router = 0; + } + i = last_router; + for (j = 0; j < LWIP_ND6_NUM_ROUTERS; j++) { + if (default_router_list[i].neighbor_entry != NULL) { + router_netif = default_router_list[i].neighbor_entry->netif; + if ((router_netif != NULL) && (netif != NULL ? netif == router_netif : + (netif_is_up(router_netif) && netif_is_link_up(router_netif)))) { + return i; + } + } + if (++i >= LWIP_ND6_NUM_ROUTERS) { + i = 0; + } + } + + /* no suitable router found. */ + return -1; +} + +/** + * Find a router-announced route to the given destination. This route may be + * based on an on-link prefix or a default router. + * + * If a suitable route is found, the returned netif is guaranteed to be in a + * suitable state (up, link up) to be used for packet transmission. + * + * @param ip6addr the destination IPv6 address + * @return the netif to use for the destination, or NULL if none found + */ +struct netif * +nd6_find_route(const ip6_addr_t *ip6addr) +{ + struct netif *netif; + s8_t i; + + /* @todo decide if it makes sense to check the destination cache first */ + + /* Check if there is a matching on-link prefix. There may be multiple + * matches. Pick the first one that is associated with a suitable netif. */ + for (i = 0; i < LWIP_ND6_NUM_PREFIXES; ++i) { + netif = prefix_list[i].netif; + if ((netif != NULL) && ip6_addr_netcmp(&prefix_list[i].prefix, ip6addr) && + netif_is_up(netif) && netif_is_link_up(netif)) { + return netif; + } + } + + /* No on-link prefix match. Find a router that can forward the packet. */ + i = nd6_select_router(ip6addr, NULL); + if (i >= 0) { + LWIP_ASSERT("selected router must have a neighbor entry", + default_router_list[i].neighbor_entry != NULL); + return default_router_list[i].neighbor_entry->netif; + } + + return NULL; +} + +/** + * Find an entry for a default router. + * + * @param router_addr the IPv6 address of the router + * @param netif the netif on which the router is found, if known + * @return the index of the router entry, or -1 if not found + */ +static s8_t +nd6_get_router(const ip6_addr_t *router_addr, struct netif *netif) +{ + s8_t i; + + IP6_ADDR_ZONECHECK_NETIF(router_addr, netif); + + /* Look for router. */ + for (i = 0; i < LWIP_ND6_NUM_ROUTERS; i++) { + if ((default_router_list[i].neighbor_entry != NULL) && + ((netif != NULL) ? netif == default_router_list[i].neighbor_entry->netif : 1) && + ip6_addr_cmp(router_addr, &(default_router_list[i].neighbor_entry->next_hop_address))) { + return i; + } + } + + /* router not found. */ + return -1; +} + +/** + * Create a new entry for a default router. + * + * @param router_addr the IPv6 address of the router + * @param netif the netif on which the router is connected, if known + * @return the index on the router table, or -1 if could not be created + */ +static s8_t +nd6_new_router(const ip6_addr_t *router_addr, struct netif *netif) +{ + s8_t router_index; + s8_t free_router_index; + s8_t neighbor_index; + + IP6_ADDR_ZONECHECK_NETIF(router_addr, netif); + + /* Do we have a neighbor entry for this router? */ + neighbor_index = nd6_find_neighbor_cache_entry(router_addr); + if (neighbor_index < 0) { + /* Create a neighbor entry for this router. */ + neighbor_index = nd6_new_neighbor_cache_entry(); + if (neighbor_index < 0) { + /* Could not create neighbor entry for this router. */ + return -1; + } + ip6_addr_set(&(neighbor_cache[neighbor_index].next_hop_address), router_addr); + neighbor_cache[neighbor_index].netif = netif; + neighbor_cache[neighbor_index].q = NULL; + neighbor_cache[neighbor_index].state = ND6_INCOMPLETE; + neighbor_cache[neighbor_index].counter.probes_sent = 1; + nd6_send_neighbor_cache_probe(&neighbor_cache[neighbor_index], ND6_SEND_FLAG_MULTICAST_DEST); + } + + /* Mark neighbor as router. */ + neighbor_cache[neighbor_index].isrouter = 1; + + /* Look for empty entry. */ + free_router_index = LWIP_ND6_NUM_ROUTERS; + for (router_index = LWIP_ND6_NUM_ROUTERS - 1; router_index >= 0; router_index--) { + /* check if router already exists (this is a special case for 2 netifs on the same subnet + - e.g. wifi and cable) */ + if(default_router_list[router_index].neighbor_entry == &(neighbor_cache[neighbor_index])){ + return router_index; + } + if (default_router_list[router_index].neighbor_entry == NULL) { + /* remember lowest free index to create a new entry */ + free_router_index = router_index; + } + } + if (free_router_index < LWIP_ND6_NUM_ROUTERS) { + default_router_list[free_router_index].neighbor_entry = &(neighbor_cache[neighbor_index]); + return free_router_index; + } + + /* Could not create a router entry. */ + + /* Mark neighbor entry as not-router. Entry might be useful as neighbor still. */ + neighbor_cache[neighbor_index].isrouter = 0; + + /* router not found. */ + return -1; +} + +/** + * Find the cached entry for an on-link prefix. + * + * @param prefix the IPv6 prefix that is on-link + * @param netif the netif on which the prefix is on-link + * @return the index on the prefix table, or -1 if not found + */ +static s8_t +nd6_get_onlink_prefix(const ip6_addr_t *prefix, struct netif *netif) +{ + s8_t i; + + /* Look for prefix in list. */ + for (i = 0; i < LWIP_ND6_NUM_PREFIXES; ++i) { + if ((ip6_addr_netcmp(&(prefix_list[i].prefix), prefix)) && + (prefix_list[i].netif == netif)) { + return i; + } + } + + /* Entry not available. */ + return -1; +} + +/** + * Creates a new entry for an on-link prefix. + * + * @param prefix the IPv6 prefix that is on-link + * @param netif the netif on which the prefix is on-link + * @return the index on the prefix table, or -1 if not created + */ +static s8_t +nd6_new_onlink_prefix(const ip6_addr_t *prefix, struct netif *netif) +{ + s8_t i; + + /* Create new entry. */ + for (i = 0; i < LWIP_ND6_NUM_PREFIXES; ++i) { + if ((prefix_list[i].netif == NULL) || + (prefix_list[i].invalidation_timer == 0)) { + /* Found empty prefix entry. */ + prefix_list[i].netif = netif; + ip6_addr_set(&(prefix_list[i].prefix), prefix); + return i; + } + } + + /* Entry not available. */ + return -1; +} + +/** + * Determine the next hop for a destination. Will determine if the + * destination is on-link, else a suitable on-link router is selected. + * + * The last entry index is cached for fast entry search. + * + * @param ip6addr the destination address + * @param netif the netif on which the packet will be sent + * @return the neighbor cache entry for the next hop, ERR_RTE if no + * suitable next hop was found, ERR_MEM if no cache entry + * could be created + */ +static s8_t +nd6_get_next_hop_entry(const ip6_addr_t *ip6addr, struct netif *netif) +{ +#ifdef LWIP_HOOK_ND6_GET_GW + const ip6_addr_t *next_hop_addr; +#endif /* LWIP_HOOK_ND6_GET_GW */ + s8_t i; + s16_t dst_idx; + + IP6_ADDR_ZONECHECK_NETIF(ip6addr, netif); + +#if LWIP_NETIF_HWADDRHINT + if (netif->hints != NULL) { + /* per-pcb cached entry was given */ + netif_addr_idx_t addr_hint = netif->hints->addr_hint; + if (addr_hint < LWIP_ND6_NUM_DESTINATIONS) { + nd6_cached_destination_index = addr_hint; + } + } +#endif /* LWIP_NETIF_HWADDRHINT */ + + /* Look for ip6addr in destination cache. */ + if (ip6_addr_cmp(ip6addr, &(destination_cache[nd6_cached_destination_index].destination_addr))) { + /* the cached entry index is the right one! */ + /* do nothing. */ + ND6_STATS_INC(nd6.cachehit); + } else { + /* Search destination cache. */ + dst_idx = nd6_find_destination_cache_entry(ip6addr); + if (dst_idx >= 0) { + /* found destination entry. make it our new cached index. */ + LWIP_ASSERT("type overflow", (size_t)dst_idx < NETIF_ADDR_IDX_MAX); + nd6_cached_destination_index = (netif_addr_idx_t)dst_idx; + } else { + /* Not found. Create a new destination entry. */ + dst_idx = nd6_new_destination_cache_entry(); + if (dst_idx >= 0) { + /* got new destination entry. make it our new cached index. */ + LWIP_ASSERT("type overflow", (size_t)dst_idx < NETIF_ADDR_IDX_MAX); + nd6_cached_destination_index = (netif_addr_idx_t)dst_idx; + } else { + /* Could not create a destination cache entry. */ + return ERR_MEM; + } + + /* Copy dest address to destination cache. */ + ip6_addr_set(&(destination_cache[nd6_cached_destination_index].destination_addr), ip6addr); + + /* Now find the next hop. is it a neighbor? */ + if (ip6_addr_islinklocal(ip6addr) || + nd6_is_prefix_in_netif(ip6addr, netif)) { + /* Destination in local link. */ + destination_cache[nd6_cached_destination_index].pmtu = netif_mtu6(netif); + ip6_addr_copy(destination_cache[nd6_cached_destination_index].next_hop_addr, destination_cache[nd6_cached_destination_index].destination_addr); +#ifdef LWIP_HOOK_ND6_GET_GW + } else if ((next_hop_addr = LWIP_HOOK_ND6_GET_GW(netif, ip6addr)) != NULL) { + /* Next hop for destination provided by hook function. */ + destination_cache[nd6_cached_destination_index].pmtu = netif->mtu; + ip6_addr_set(&destination_cache[nd6_cached_destination_index].next_hop_addr, next_hop_addr); +#endif /* LWIP_HOOK_ND6_GET_GW */ + } else { + /* We need to select a router. */ + i = nd6_select_router(ip6addr, netif); + if (i < 0) { + /* No router found. */ + ip6_addr_set_any(&(destination_cache[nd6_cached_destination_index].destination_addr)); + return ERR_RTE; + } + destination_cache[nd6_cached_destination_index].pmtu = netif_mtu6(netif); /* Start with netif mtu, correct through ICMPv6 if necessary */ + ip6_addr_copy(destination_cache[nd6_cached_destination_index].next_hop_addr, default_router_list[i].neighbor_entry->next_hop_address); + } + } + } + +#if LWIP_NETIF_HWADDRHINT + if (netif->hints != NULL) { + /* per-pcb cached entry was given */ + netif->hints->addr_hint = nd6_cached_destination_index; + } +#endif /* LWIP_NETIF_HWADDRHINT */ + + /* Look in neighbor cache for the next-hop address. */ + if (ip6_addr_cmp(&(destination_cache[nd6_cached_destination_index].next_hop_addr), + &(neighbor_cache[nd6_cached_neighbor_index].next_hop_address))) { + /* Cache hit. */ + /* Do nothing. */ + ND6_STATS_INC(nd6.cachehit); + } else { + i = nd6_find_neighbor_cache_entry(&(destination_cache[nd6_cached_destination_index].next_hop_addr)); + if (i >= 0) { + /* Found a matching record, make it new cached entry. */ + nd6_cached_neighbor_index = i; + } else { + /* Neighbor not in cache. Make a new entry. */ + i = nd6_new_neighbor_cache_entry(); + if (i >= 0) { + /* got new neighbor entry. make it our new cached index. */ + nd6_cached_neighbor_index = i; + } else { + /* Could not create a neighbor cache entry. */ + return ERR_MEM; + } + + /* Initialize fields. */ + ip6_addr_copy(neighbor_cache[i].next_hop_address, + destination_cache[nd6_cached_destination_index].next_hop_addr); + neighbor_cache[i].isrouter = 0; + neighbor_cache[i].netif = netif; + neighbor_cache[i].state = ND6_INCOMPLETE; + neighbor_cache[i].counter.probes_sent = 1; + nd6_send_neighbor_cache_probe(&neighbor_cache[i], ND6_SEND_FLAG_MULTICAST_DEST); + } + } + + /* Reset this destination's age. */ + destination_cache[nd6_cached_destination_index].age = 0; + + return nd6_cached_neighbor_index; +} + +/** + * Queue a packet for a neighbor. + * + * @param neighbor_index the index in the neighbor cache table + * @param q packet to be queued + * @return ERR_OK if succeeded, ERR_MEM if out of memory + */ +static err_t +nd6_queue_packet(s8_t neighbor_index, struct pbuf *q) +{ + err_t result = ERR_MEM; + struct pbuf *p; + int copy_needed = 0; +#if LWIP_ND6_QUEUEING + struct nd6_q_entry *new_entry, *r; +#endif /* LWIP_ND6_QUEUEING */ + + if ((neighbor_index < 0) || (neighbor_index >= LWIP_ND6_NUM_NEIGHBORS)) { + return ERR_ARG; + } + + /* IF q includes a pbuf that must be copied, we have to copy the whole chain + * into a new PBUF_RAM. See the definition of PBUF_NEEDS_COPY for details. */ + p = q; + while (p) { + if (PBUF_NEEDS_COPY(p)) { + copy_needed = 1; + break; + } + p = p->next; + } + if (copy_needed) { + /* copy the whole packet into new pbufs */ + p = pbuf_clone(PBUF_LINK, PBUF_RAM, q); + while ((p == NULL) && (neighbor_cache[neighbor_index].q != NULL)) { + /* Free oldest packet (as per RFC recommendation) */ +#if LWIP_ND6_QUEUEING + r = neighbor_cache[neighbor_index].q; + neighbor_cache[neighbor_index].q = r->next; + r->next = NULL; + nd6_free_q(r); +#else /* LWIP_ND6_QUEUEING */ + pbuf_free(neighbor_cache[neighbor_index].q); + neighbor_cache[neighbor_index].q = NULL; +#endif /* LWIP_ND6_QUEUEING */ + p = pbuf_clone(PBUF_LINK, PBUF_RAM, q); + } + } else { + /* referencing the old pbuf is enough */ + p = q; + pbuf_ref(p); + } + /* packet was copied/ref'd? */ + if (p != NULL) { + /* queue packet ... */ +#if LWIP_ND6_QUEUEING + /* allocate a new nd6 queue entry */ + new_entry = (struct nd6_q_entry *)memp_malloc(MEMP_ND6_QUEUE); + if ((new_entry == NULL) && (neighbor_cache[neighbor_index].q != NULL)) { + /* Free oldest packet (as per RFC recommendation) */ + r = neighbor_cache[neighbor_index].q; + neighbor_cache[neighbor_index].q = r->next; + r->next = NULL; + nd6_free_q(r); + new_entry = (struct nd6_q_entry *)memp_malloc(MEMP_ND6_QUEUE); + } + if (new_entry != NULL) { + new_entry->next = NULL; + new_entry->p = p; + if (neighbor_cache[neighbor_index].q != NULL) { + /* queue was already existent, append the new entry to the end */ + r = neighbor_cache[neighbor_index].q; + while (r->next != NULL) { + r = r->next; + } + r->next = new_entry; + } else { + /* queue did not exist, first item in queue */ + neighbor_cache[neighbor_index].q = new_entry; + } + LWIP_DEBUGF(LWIP_DBG_TRACE, ("ipv6: queued packet %p on neighbor entry %"S16_F"\n", (void *)p, (s16_t)neighbor_index)); + result = ERR_OK; + } else { + /* the pool MEMP_ND6_QUEUE is empty */ + pbuf_free(p); + LWIP_DEBUGF(LWIP_DBG_TRACE, ("ipv6: could not queue a copy of packet %p (out of memory)\n", (void *)p)); + /* { result == ERR_MEM } through initialization */ + } +#else /* LWIP_ND6_QUEUEING */ + /* Queue a single packet. If an older packet is already queued, free it as per RFC. */ + if (neighbor_cache[neighbor_index].q != NULL) { + pbuf_free(neighbor_cache[neighbor_index].q); + } + neighbor_cache[neighbor_index].q = p; + LWIP_DEBUGF(LWIP_DBG_TRACE, ("ipv6: queued packet %p on neighbor entry %"S16_F"\n", (void *)p, (s16_t)neighbor_index)); + result = ERR_OK; +#endif /* LWIP_ND6_QUEUEING */ + } else { + LWIP_DEBUGF(LWIP_DBG_TRACE, ("ipv6: could not queue a copy of packet %p (out of memory)\n", (void *)q)); + /* { result == ERR_MEM } through initialization */ + } + + return result; +} + +#if LWIP_ND6_QUEUEING +/** + * Free a complete queue of nd6 q entries + * + * @param q a queue of nd6_q_entry to free + */ +static void +nd6_free_q(struct nd6_q_entry *q) +{ + struct nd6_q_entry *r; + LWIP_ASSERT("q != NULL", q != NULL); + LWIP_ASSERT("q->p != NULL", q->p != NULL); + while (q) { + r = q; + q = q->next; + LWIP_ASSERT("r->p != NULL", (r->p != NULL)); + pbuf_free(r->p); + memp_free(MEMP_ND6_QUEUE, r); + } +} +#endif /* LWIP_ND6_QUEUEING */ + +/** + * Send queued packets for a neighbor + * + * @param i the neighbor to send packets to + */ +static void +nd6_send_q(s8_t i) +{ + struct ip6_hdr *ip6hdr; + ip6_addr_t dest; +#if LWIP_ND6_QUEUEING + struct nd6_q_entry *q; +#endif /* LWIP_ND6_QUEUEING */ + + if ((i < 0) || (i >= LWIP_ND6_NUM_NEIGHBORS)) { + return; + } + +#if LWIP_ND6_QUEUEING + while (neighbor_cache[i].q != NULL) { + /* remember first in queue */ + q = neighbor_cache[i].q; + /* pop first item off the queue */ + neighbor_cache[i].q = q->next; + /* Get ipv6 header. */ + ip6hdr = (struct ip6_hdr *)(q->p->payload); + /* Create an aligned copy. */ + ip6_addr_copy_from_packed(dest, ip6hdr->dest); + /* Restore the zone, if applicable. */ + ip6_addr_assign_zone(&dest, IP6_UNKNOWN, neighbor_cache[i].netif); + /* send the queued IPv6 packet */ + (neighbor_cache[i].netif)->output_ip6(neighbor_cache[i].netif, q->p, &dest); + /* free the queued IP packet */ + pbuf_free(q->p); + /* now queue entry can be freed */ + memp_free(MEMP_ND6_QUEUE, q); + } +#else /* LWIP_ND6_QUEUEING */ + if (neighbor_cache[i].q != NULL) { + /* Get ipv6 header. */ + ip6hdr = (struct ip6_hdr *)(neighbor_cache[i].q->payload); + /* Create an aligned copy. */ + ip6_addr_copy_from_packed(dest, ip6hdr->dest); + /* Restore the zone, if applicable. */ + ip6_addr_assign_zone(&dest, IP6_UNKNOWN, neighbor_cache[i].netif); + /* send the queued IPv6 packet */ + (neighbor_cache[i].netif)->output_ip6(neighbor_cache[i].netif, neighbor_cache[i].q, &dest); + /* free the queued IP packet */ + pbuf_free(neighbor_cache[i].q); + neighbor_cache[i].q = NULL; + } +#endif /* LWIP_ND6_QUEUEING */ +} + +/** + * A packet is to be transmitted to a specific IPv6 destination on a specific + * interface. Check if we can find the hardware address of the next hop to use + * for the packet. If so, give the hardware address to the caller, which should + * use it to send the packet right away. Otherwise, enqueue the packet for + * later transmission while looking up the hardware address, if possible. + * + * As such, this function returns one of three different possible results: + * + * - ERR_OK with a non-NULL 'hwaddrp': the caller should send the packet now. + * - ERR_OK with a NULL 'hwaddrp': the packet has been enqueued for later. + * - not ERR_OK: something went wrong; forward the error upward in the stack. + * + * @param netif The lwIP network interface on which the IP packet will be sent. + * @param q The pbuf(s) containing the IP packet to be sent. + * @param ip6addr The destination IPv6 address of the packet. + * @param hwaddrp On success, filled with a pointer to a HW address or NULL (meaning + * the packet has been queued). + * @return + * - ERR_OK on success, ERR_RTE if no route was found for the packet, + * or ERR_MEM if low memory conditions prohibit sending the packet at all. + */ +err_t +nd6_get_next_hop_addr_or_queue(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr, const u8_t **hwaddrp) +{ + s8_t i; + + /* Get next hop record. */ + i = nd6_get_next_hop_entry(ip6addr, netif); + if (i < 0) { + /* failed to get a next hop neighbor record. */ + return i; + } + + /* Now that we have a destination record, send or queue the packet. */ + if (neighbor_cache[i].state == ND6_STALE) { + /* Switch to delay state. */ + neighbor_cache[i].state = ND6_DELAY; + neighbor_cache[i].counter.delay_time = LWIP_ND6_DELAY_FIRST_PROBE_TIME / ND6_TMR_INTERVAL; + } + /* @todo should we send or queue if PROBE? send for now, to let unicast NS pass. */ + if ((neighbor_cache[i].state == ND6_REACHABLE) || + (neighbor_cache[i].state == ND6_DELAY) || + (neighbor_cache[i].state == ND6_PROBE)) { + + /* Tell the caller to send out the packet now. */ + *hwaddrp = neighbor_cache[i].lladdr; + return ERR_OK; + } + + /* We should queue packet on this interface. */ + *hwaddrp = NULL; + return nd6_queue_packet(i, q); +} + + +/** + * Get the Path MTU for a destination. + * + * @param ip6addr the destination address + * @param netif the netif on which the packet will be sent + * @return the Path MTU, if known, or the netif default MTU + */ +u16_t +nd6_get_destination_mtu(const ip6_addr_t *ip6addr, struct netif *netif) +{ + s16_t i; + + i = nd6_find_destination_cache_entry(ip6addr); + if (i >= 0) { + if (destination_cache[i].pmtu > 0) { + return destination_cache[i].pmtu; + } + } + + if (netif != NULL) { + return netif_mtu6(netif); + } + + return 1280; /* Minimum MTU */ +} + + +#if LWIP_ND6_TCP_REACHABILITY_HINTS +/** + * Provide the Neighbor discovery process with a hint that a + * destination is reachable. Called by tcp_receive when ACKs are + * received or sent (as per RFC). This is useful to avoid sending + * NS messages every 30 seconds. + * + * @param ip6addr the destination address which is know to be reachable + * by an upper layer protocol (TCP) + */ +void +nd6_reachability_hint(const ip6_addr_t *ip6addr) +{ + s8_t i; + s16_t dst_idx; + + /* Find destination in cache. */ + if (ip6_addr_cmp(ip6addr, &(destination_cache[nd6_cached_destination_index].destination_addr))) { + dst_idx = nd6_cached_destination_index; + ND6_STATS_INC(nd6.cachehit); + } else { + dst_idx = nd6_find_destination_cache_entry(ip6addr); + } + if (dst_idx < 0) { + return; + } + + /* Find next hop neighbor in cache. */ + if (ip6_addr_cmp(&(destination_cache[dst_idx].next_hop_addr), &(neighbor_cache[nd6_cached_neighbor_index].next_hop_address))) { + i = nd6_cached_neighbor_index; + ND6_STATS_INC(nd6.cachehit); + } else { + i = nd6_find_neighbor_cache_entry(&(destination_cache[dst_idx].next_hop_addr)); + } + if (i < 0) { + return; + } + + /* For safety: don't set as reachable if we don't have a LL address yet. Misuse protection. */ + if (neighbor_cache[i].state == ND6_INCOMPLETE || neighbor_cache[i].state == ND6_NO_ENTRY) { + return; + } + + /* Set reachability state. */ + neighbor_cache[i].state = ND6_REACHABLE; + neighbor_cache[i].counter.reachable_time = reachable_time; +} +#endif /* LWIP_ND6_TCP_REACHABILITY_HINTS */ + +/** + * Remove all prefix, neighbor_cache and router entries of the specified netif. + * + * @param netif points to a network interface + */ +void +nd6_cleanup_netif(struct netif *netif) +{ + u8_t i; + s8_t router_index; + for (i = 0; i < LWIP_ND6_NUM_PREFIXES; i++) { + if (prefix_list[i].netif == netif) { + prefix_list[i].netif = NULL; + } + } + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if (neighbor_cache[i].netif == netif) { + for (router_index = 0; router_index < LWIP_ND6_NUM_ROUTERS; router_index++) { + if (default_router_list[router_index].neighbor_entry == &neighbor_cache[i]) { + default_router_list[router_index].neighbor_entry = NULL; + default_router_list[router_index].flags = 0; + } + } + neighbor_cache[i].isrouter = 0; + nd6_free_neighbor_cache_entry(i); + } + } + /* Clear the destination cache, since many entries may now have become + * invalid for one of several reasons. As destination cache entries have no + * netif association, use a sledgehammer approach (this can be improved). */ + nd6_clear_destination_cache(); +} + +#if LWIP_IPV6_MLD +/** + * The state of a local IPv6 address entry is about to change. If needed, join + * or leave the solicited-node multicast group for the address. + * + * @param netif The netif that owns the address. + * @param addr_idx The index of the address. + * @param new_state The new (IP6_ADDR_) state for the address. + */ +void +nd6_adjust_mld_membership(struct netif *netif, s8_t addr_idx, u8_t new_state) +{ + u8_t old_state, old_member, new_member; + + old_state = netif_ip6_addr_state(netif, addr_idx); + + /* Determine whether we were, and should be, a member of the solicited-node + * multicast group for this address. For tentative addresses, the group is + * not joined until the address enters the TENTATIVE_1 (or VALID) state. */ + old_member = (old_state != IP6_ADDR_INVALID && old_state != IP6_ADDR_DUPLICATED && old_state != IP6_ADDR_TENTATIVE); + new_member = (new_state != IP6_ADDR_INVALID && new_state != IP6_ADDR_DUPLICATED && new_state != IP6_ADDR_TENTATIVE); + + if (old_member != new_member) { + ip6_addr_set_solicitednode(&multicast_address, netif_ip6_addr(netif, addr_idx)->addr[3]); + ip6_addr_assign_zone(&multicast_address, IP6_MULTICAST, netif); + + if (new_member) { + mld6_joingroup_netif(netif, &multicast_address); + } else { + mld6_leavegroup_netif(netif, &multicast_address); + } + } +} +#endif /* LWIP_IPV6_MLD */ + +/** Netif was added, set up, or reconnected (link up) */ +void +nd6_restart_netif(struct netif *netif) +{ +#if LWIP_IPV6_SEND_ROUTER_SOLICIT + /* Send Router Solicitation messages (see RFC 4861, ch. 6.3.7). */ + netif->rs_count = LWIP_ND6_MAX_MULTICAST_SOLICIT; +#endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ +} + +#endif /* LWIP_IPV6 */ diff --git a/Middlewares/Third_Party/LwIP/src/core/mem.c b/Middlewares/Third_Party/LwIP/src/core/mem.c new file mode 100644 index 0000000..315fb3c --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/mem.c @@ -0,0 +1,1017 @@ +/** + * @file + * Dynamic memory manager + * + * This is a lightweight replacement for the standard C library malloc(). + * + * If you want to use the standard C library malloc() instead, define + * MEM_LIBC_MALLOC to 1 in your lwipopts.h + * + * To let mem_malloc() use pools (prevents fragmentation and is much faster than + * a heap but might waste some memory), define MEM_USE_POOLS to 1, define + * MEMP_USE_CUSTOM_POOLS to 1 and create a file "lwippools.h" that includes a list + * of pools like this (more pools can be added between _START and _END): + * + * Define three pools with sizes 256, 512, and 1512 bytes + * LWIP_MALLOC_MEMPOOL_START + * LWIP_MALLOC_MEMPOOL(20, 256) + * LWIP_MALLOC_MEMPOOL(10, 512) + * LWIP_MALLOC_MEMPOOL(5, 1512) + * LWIP_MALLOC_MEMPOOL_END + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * Simon Goldschmidt + * + */ + +#include "lwip/opt.h" +#include "lwip/mem.h" +#include "lwip/def.h" +#include "lwip/sys.h" +#include "lwip/stats.h" +#include "lwip/err.h" + +#include + +#if MEM_LIBC_MALLOC +#include /* for malloc()/free() */ +#endif + +/* This is overridable for tests only... */ +#ifndef LWIP_MEM_ILLEGAL_FREE +#define LWIP_MEM_ILLEGAL_FREE(msg) LWIP_ASSERT(msg, 0) +#endif + +#define MEM_STATS_INC_LOCKED(x) SYS_ARCH_LOCKED(MEM_STATS_INC(x)) +#define MEM_STATS_INC_USED_LOCKED(x, y) SYS_ARCH_LOCKED(MEM_STATS_INC_USED(x, y)) +#define MEM_STATS_DEC_USED_LOCKED(x, y) SYS_ARCH_LOCKED(MEM_STATS_DEC_USED(x, y)) + +#if MEM_OVERFLOW_CHECK +#define MEM_SANITY_OFFSET MEM_SANITY_REGION_BEFORE_ALIGNED +#define MEM_SANITY_OVERHEAD (MEM_SANITY_REGION_BEFORE_ALIGNED + MEM_SANITY_REGION_AFTER_ALIGNED) +#else +#define MEM_SANITY_OFFSET 0 +#define MEM_SANITY_OVERHEAD 0 +#endif + +#if MEM_OVERFLOW_CHECK || MEMP_OVERFLOW_CHECK +/** + * Check if a mep element was victim of an overflow or underflow + * (e.g. the restricted area after/before it has been altered) + * + * @param p the mem element to check + * @param size allocated size of the element + * @param descr1 description of the element source shown on error + * @param descr2 description of the element source shown on error + */ +void +mem_overflow_check_raw(void *p, size_t size, const char *descr1, const char *descr2) +{ +#if MEM_SANITY_REGION_AFTER_ALIGNED || MEM_SANITY_REGION_BEFORE_ALIGNED + u16_t k; + u8_t *m; + +#if MEM_SANITY_REGION_AFTER_ALIGNED > 0 + m = (u8_t *)p + size; + for (k = 0; k < MEM_SANITY_REGION_AFTER_ALIGNED; k++) { + if (m[k] != 0xcd) { + char errstr[128]; + snprintf(errstr, sizeof(errstr), "detected mem overflow in %s%s", descr1, descr2); + LWIP_ASSERT(errstr, 0); + } + } +#endif /* MEM_SANITY_REGION_AFTER_ALIGNED > 0 */ + +#if MEM_SANITY_REGION_BEFORE_ALIGNED > 0 + m = (u8_t *)p - MEM_SANITY_REGION_BEFORE_ALIGNED; + for (k = 0; k < MEM_SANITY_REGION_BEFORE_ALIGNED; k++) { + if (m[k] != 0xcd) { + char errstr[128]; + snprintf(errstr, sizeof(errstr), "detected mem underflow in %s%s", descr1, descr2); + LWIP_ASSERT(errstr, 0); + } + } +#endif /* MEM_SANITY_REGION_BEFORE_ALIGNED > 0 */ +#else + LWIP_UNUSED_ARG(p); + LWIP_UNUSED_ARG(desc); + LWIP_UNUSED_ARG(descr); +#endif +} + +/** + * Initialize the restricted area of a mem element. + */ +void +mem_overflow_init_raw(void *p, size_t size) +{ +#if MEM_SANITY_REGION_BEFORE_ALIGNED > 0 || MEM_SANITY_REGION_AFTER_ALIGNED > 0 + u8_t *m; +#if MEM_SANITY_REGION_BEFORE_ALIGNED > 0 + m = (u8_t *)p - MEM_SANITY_REGION_BEFORE_ALIGNED; + memset(m, 0xcd, MEM_SANITY_REGION_BEFORE_ALIGNED); +#endif +#if MEM_SANITY_REGION_AFTER_ALIGNED > 0 + m = (u8_t *)p + size; + memset(m, 0xcd, MEM_SANITY_REGION_AFTER_ALIGNED); +#endif +#else /* MEM_SANITY_REGION_BEFORE_ALIGNED > 0 || MEM_SANITY_REGION_AFTER_ALIGNED > 0 */ + LWIP_UNUSED_ARG(p); + LWIP_UNUSED_ARG(desc); +#endif /* MEM_SANITY_REGION_BEFORE_ALIGNED > 0 || MEM_SANITY_REGION_AFTER_ALIGNED > 0 */ +} +#endif /* MEM_OVERFLOW_CHECK || MEMP_OVERFLOW_CHECK */ + +#if MEM_LIBC_MALLOC || MEM_USE_POOLS + +/** mem_init is not used when using pools instead of a heap or using + * C library malloc(). + */ +void +mem_init(void) +{ +} + +/** mem_trim is not used when using pools instead of a heap or using + * C library malloc(): we can't free part of a pool element and the stack + * support mem_trim() to return a different pointer + */ +void * +mem_trim(void *mem, mem_size_t size) +{ + LWIP_UNUSED_ARG(size); + return mem; +} +#endif /* MEM_LIBC_MALLOC || MEM_USE_POOLS */ + +#if MEM_LIBC_MALLOC +/* lwIP heap implemented using C library malloc() */ + +/* in case C library malloc() needs extra protection, + * allow these defines to be overridden. + */ +#ifndef mem_clib_free +#define mem_clib_free free +#endif +#ifndef mem_clib_malloc +#define mem_clib_malloc malloc +#endif +#ifndef mem_clib_calloc +#define mem_clib_calloc calloc +#endif + +#if LWIP_STATS && MEM_STATS +#define MEM_LIBC_STATSHELPER_SIZE LWIP_MEM_ALIGN_SIZE(sizeof(mem_size_t)) +#else +#define MEM_LIBC_STATSHELPER_SIZE 0 +#endif + +/** + * Allocate a block of memory with a minimum of 'size' bytes. + * + * @param size is the minimum size of the requested block in bytes. + * @return pointer to allocated memory or NULL if no free memory was found. + * + * Note that the returned value must always be aligned (as defined by MEM_ALIGNMENT). + */ +void * +mem_malloc(mem_size_t size) +{ + void *ret = mem_clib_malloc(size + MEM_LIBC_STATSHELPER_SIZE); + if (ret == NULL) { + MEM_STATS_INC_LOCKED(err); + } else { + LWIP_ASSERT("malloc() must return aligned memory", LWIP_MEM_ALIGN(ret) == ret); +#if LWIP_STATS && MEM_STATS + *(mem_size_t *)ret = size; + ret = (u8_t *)ret + MEM_LIBC_STATSHELPER_SIZE; + MEM_STATS_INC_USED_LOCKED(used, size); +#endif + } + return ret; +} + +/** Put memory back on the heap + * + * @param rmem is the pointer as returned by a previous call to mem_malloc() + */ +void +mem_free(void *rmem) +{ + LWIP_ASSERT("rmem != NULL", (rmem != NULL)); + LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem))); +#if LWIP_STATS && MEM_STATS + rmem = (u8_t *)rmem - MEM_LIBC_STATSHELPER_SIZE; + MEM_STATS_DEC_USED_LOCKED(used, *(mem_size_t *)rmem); +#endif + mem_clib_free(rmem); +} + +#elif MEM_USE_POOLS + +/* lwIP heap implemented with different sized pools */ + +/** + * Allocate memory: determine the smallest pool that is big enough + * to contain an element of 'size' and get an element from that pool. + * + * @param size the size in bytes of the memory needed + * @return a pointer to the allocated memory or NULL if the pool is empty + */ +void * +mem_malloc(mem_size_t size) +{ + void *ret; + struct memp_malloc_helper *element = NULL; + memp_t poolnr; + mem_size_t required_size = size + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper)); + + for (poolnr = MEMP_POOL_FIRST; poolnr <= MEMP_POOL_LAST; poolnr = (memp_t)(poolnr + 1)) { + /* is this pool big enough to hold an element of the required size + plus a struct memp_malloc_helper that saves the pool this element came from? */ + if (required_size <= memp_pools[poolnr]->size) { + element = (struct memp_malloc_helper *)memp_malloc(poolnr); + if (element == NULL) { + /* No need to DEBUGF or ASSERT: This error is already taken care of in memp.c */ +#if MEM_USE_POOLS_TRY_BIGGER_POOL + /** Try a bigger pool if this one is empty! */ + if (poolnr < MEMP_POOL_LAST) { + continue; + } +#endif /* MEM_USE_POOLS_TRY_BIGGER_POOL */ + MEM_STATS_INC_LOCKED(err); + return NULL; + } + break; + } + } + if (poolnr > MEMP_POOL_LAST) { + LWIP_ASSERT("mem_malloc(): no pool is that big!", 0); + MEM_STATS_INC_LOCKED(err); + return NULL; + } + + /* save the pool number this element came from */ + element->poolnr = poolnr; + /* and return a pointer to the memory directly after the struct memp_malloc_helper */ + ret = (u8_t *)element + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper)); + +#if MEMP_OVERFLOW_CHECK || (LWIP_STATS && MEM_STATS) + /* truncating to u16_t is safe because struct memp_desc::size is u16_t */ + element->size = (u16_t)size; + MEM_STATS_INC_USED_LOCKED(used, element->size); +#endif /* MEMP_OVERFLOW_CHECK || (LWIP_STATS && MEM_STATS) */ +#if MEMP_OVERFLOW_CHECK + /* initialize unused memory (diff between requested size and selected pool's size) */ + memset((u8_t *)ret + size, 0xcd, memp_pools[poolnr]->size - size); +#endif /* MEMP_OVERFLOW_CHECK */ + return ret; +} + +/** + * Free memory previously allocated by mem_malloc. Loads the pool number + * and calls memp_free with that pool number to put the element back into + * its pool + * + * @param rmem the memory element to free + */ +void +mem_free(void *rmem) +{ + struct memp_malloc_helper *hmem; + + LWIP_ASSERT("rmem != NULL", (rmem != NULL)); + LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem))); + + /* get the original struct memp_malloc_helper */ + /* cast through void* to get rid of alignment warnings */ + hmem = (struct memp_malloc_helper *)(void *)((u8_t *)rmem - LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper))); + + LWIP_ASSERT("hmem != NULL", (hmem != NULL)); + LWIP_ASSERT("hmem == MEM_ALIGN(hmem)", (hmem == LWIP_MEM_ALIGN(hmem))); + LWIP_ASSERT("hmem->poolnr < MEMP_MAX", (hmem->poolnr < MEMP_MAX)); + + MEM_STATS_DEC_USED_LOCKED(used, hmem->size); +#if MEMP_OVERFLOW_CHECK + { + u16_t i; + LWIP_ASSERT("MEM_USE_POOLS: invalid chunk size", + hmem->size <= memp_pools[hmem->poolnr]->size); + /* check that unused memory remained untouched (diff between requested size and selected pool's size) */ + for (i = hmem->size; i < memp_pools[hmem->poolnr]->size; i++) { + u8_t data = *((u8_t *)rmem + i); + LWIP_ASSERT("MEM_USE_POOLS: mem overflow detected", data == 0xcd); + } + } +#endif /* MEMP_OVERFLOW_CHECK */ + + /* and put it in the pool we saved earlier */ + memp_free(hmem->poolnr, hmem); +} + +#else /* MEM_USE_POOLS */ +/* lwIP replacement for your libc malloc() */ + +/** + * The heap is made up as a list of structs of this type. + * This does not have to be aligned since for getting its size, + * we only use the macro SIZEOF_STRUCT_MEM, which automatically aligns. + */ +struct mem { + /** index (-> ram[next]) of the next struct */ + mem_size_t next; + /** index (-> ram[prev]) of the previous struct */ + mem_size_t prev; + /** 1: this area is used; 0: this area is unused */ + u8_t used; +#if MEM_OVERFLOW_CHECK + /** this keeps track of the user allocation size for guard checks */ + mem_size_t user_size; +#endif +}; + +/** All allocated blocks will be MIN_SIZE bytes big, at least! + * MIN_SIZE can be overridden to suit your needs. Smaller values save space, + * larger values could prevent too small blocks to fragment the RAM too much. */ +#ifndef MIN_SIZE +#define MIN_SIZE 12 +#endif /* MIN_SIZE */ +/* some alignment macros: we define them here for better source code layout */ +#define MIN_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MIN_SIZE) +#define SIZEOF_STRUCT_MEM LWIP_MEM_ALIGN_SIZE(sizeof(struct mem)) +#define MEM_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MEM_SIZE) + +/** If you want to relocate the heap to external memory, simply define + * LWIP_RAM_HEAP_POINTER as a void-pointer to that location. + * If so, make sure the memory at that location is big enough (see below on + * how that space is calculated). */ +#ifndef LWIP_RAM_HEAP_POINTER +/** the heap. we need one struct mem at the end and some room for alignment */ +LWIP_DECLARE_MEMORY_ALIGNED(ram_heap, MEM_SIZE_ALIGNED + (2U * SIZEOF_STRUCT_MEM)); +#define LWIP_RAM_HEAP_POINTER ram_heap +#endif /* LWIP_RAM_HEAP_POINTER */ + +/** pointer to the heap (ram_heap): for alignment, ram is now a pointer instead of an array */ +static u8_t *ram; +/** the last entry, always unused! */ +static struct mem *ram_end; + +/** concurrent access protection */ +#if !NO_SYS +static sys_mutex_t mem_mutex; +#endif + +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + +static volatile u8_t mem_free_count; + +/* Allow mem_free from other (e.g. interrupt) context */ +#define LWIP_MEM_FREE_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_free) +#define LWIP_MEM_FREE_PROTECT() SYS_ARCH_PROTECT(lev_free) +#define LWIP_MEM_FREE_UNPROTECT() SYS_ARCH_UNPROTECT(lev_free) +#define LWIP_MEM_ALLOC_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_alloc) +#define LWIP_MEM_ALLOC_PROTECT() SYS_ARCH_PROTECT(lev_alloc) +#define LWIP_MEM_ALLOC_UNPROTECT() SYS_ARCH_UNPROTECT(lev_alloc) +#define LWIP_MEM_LFREE_VOLATILE volatile + +#else /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + +/* Protect the heap only by using a mutex */ +#define LWIP_MEM_FREE_DECL_PROTECT() +#define LWIP_MEM_FREE_PROTECT() sys_mutex_lock(&mem_mutex) +#define LWIP_MEM_FREE_UNPROTECT() sys_mutex_unlock(&mem_mutex) +/* mem_malloc is protected using mutex AND LWIP_MEM_ALLOC_PROTECT */ +#define LWIP_MEM_ALLOC_DECL_PROTECT() +#define LWIP_MEM_ALLOC_PROTECT() +#define LWIP_MEM_ALLOC_UNPROTECT() +#define LWIP_MEM_LFREE_VOLATILE + +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + +/** pointer to the lowest free block, this is used for faster search */ +static struct mem * LWIP_MEM_LFREE_VOLATILE lfree; + +#if MEM_SANITY_CHECK +static void mem_sanity(void); +#define MEM_SANITY() mem_sanity() +#else +#define MEM_SANITY() +#endif + +#if MEM_OVERFLOW_CHECK +static void +mem_overflow_init_element(struct mem *mem, mem_size_t user_size) +{ + void *p = (u8_t *)mem + SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET; + mem->user_size = user_size; + mem_overflow_init_raw(p, user_size); +} + +static void +mem_overflow_check_element(struct mem *mem) +{ + void *p = (u8_t *)mem + SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET; + mem_overflow_check_raw(p, mem->user_size, "heap", ""); +} +#else /* MEM_OVERFLOW_CHECK */ +#define mem_overflow_init_element(mem, size) +#define mem_overflow_check_element(mem) +#endif /* MEM_OVERFLOW_CHECK */ + +static struct mem * +ptr_to_mem(mem_size_t ptr) +{ + return (struct mem *)(void *)&ram[ptr]; +} + +static mem_size_t +mem_to_ptr(void *mem) +{ + return (mem_size_t)((u8_t *)mem - ram); +} + +/** + * "Plug holes" by combining adjacent empty struct mems. + * After this function is through, there should not exist + * one empty struct mem pointing to another empty struct mem. + * + * @param mem this points to a struct mem which just has been freed + * @internal this function is only called by mem_free() and mem_trim() + * + * This assumes access to the heap is protected by the calling function + * already. + */ +static void +plug_holes(struct mem *mem) +{ + struct mem *nmem; + struct mem *pmem; + + LWIP_ASSERT("plug_holes: mem >= ram", (u8_t *)mem >= ram); + LWIP_ASSERT("plug_holes: mem < ram_end", (u8_t *)mem < (u8_t *)ram_end); + LWIP_ASSERT("plug_holes: mem->used == 0", mem->used == 0); + + /* plug hole forward */ + LWIP_ASSERT("plug_holes: mem->next <= MEM_SIZE_ALIGNED", mem->next <= MEM_SIZE_ALIGNED); + + nmem = ptr_to_mem(mem->next); + if (mem != nmem && nmem->used == 0 && (u8_t *)nmem != (u8_t *)ram_end) { + /* if mem->next is unused and not end of ram, combine mem and mem->next */ + if (lfree == nmem) { + lfree = mem; + } + mem->next = nmem->next; + if (nmem->next != MEM_SIZE_ALIGNED) { + ptr_to_mem(nmem->next)->prev = mem_to_ptr(mem); + } + } + + /* plug hole backward */ + pmem = ptr_to_mem(mem->prev); + if (pmem != mem && pmem->used == 0) { + /* if mem->prev is unused, combine mem and mem->prev */ + if (lfree == mem) { + lfree = pmem; + } + pmem->next = mem->next; + if (mem->next != MEM_SIZE_ALIGNED) { + ptr_to_mem(mem->next)->prev = mem_to_ptr(pmem); + } + } +} + +/** + * Zero the heap and initialize start, end and lowest-free + */ +void +mem_init(void) +{ + struct mem *mem; + + LWIP_ASSERT("Sanity check alignment", + (SIZEOF_STRUCT_MEM & (MEM_ALIGNMENT - 1)) == 0); + + /* align the heap */ + ram = (u8_t *)LWIP_MEM_ALIGN(LWIP_RAM_HEAP_POINTER); + /* initialize the start of the heap */ + mem = (struct mem *)(void *)ram; + mem->next = MEM_SIZE_ALIGNED; + mem->prev = 0; + mem->used = 0; + /* initialize the end of the heap */ + ram_end = ptr_to_mem(MEM_SIZE_ALIGNED); + ram_end->used = 1; + ram_end->next = MEM_SIZE_ALIGNED; + ram_end->prev = MEM_SIZE_ALIGNED; + MEM_SANITY(); + + /* initialize the lowest-free pointer to the start of the heap */ + lfree = (struct mem *)(void *)ram; + + MEM_STATS_AVAIL(avail, MEM_SIZE_ALIGNED); + + if (sys_mutex_new(&mem_mutex) != ERR_OK) { + LWIP_ASSERT("failed to create mem_mutex", 0); + } +} + +/* Check if a struct mem is correctly linked. + * If not, double-free is a possible reason. + */ +static int +mem_link_valid(struct mem *mem) +{ + struct mem *nmem, *pmem; + mem_size_t rmem_idx; + rmem_idx = mem_to_ptr(mem); + nmem = ptr_to_mem(mem->next); + pmem = ptr_to_mem(mem->prev); + if ((mem->next > MEM_SIZE_ALIGNED) || (mem->prev > MEM_SIZE_ALIGNED) || + ((mem->prev != rmem_idx) && (pmem->next != rmem_idx)) || + ((nmem != ram_end) && (nmem->prev != rmem_idx))) { + return 0; + } + return 1; +} + +#if MEM_SANITY_CHECK +static void +mem_sanity(void) +{ + struct mem *mem; + u8_t last_used; + + /* begin with first element here */ + mem = (struct mem *)ram; + LWIP_ASSERT("heap element used valid", (mem->used == 0) || (mem->used == 1)); + last_used = mem->used; + LWIP_ASSERT("heap element prev ptr valid", mem->prev == 0); + LWIP_ASSERT("heap element next ptr valid", mem->next <= MEM_SIZE_ALIGNED); + LWIP_ASSERT("heap element next ptr aligned", LWIP_MEM_ALIGN(ptr_to_mem(mem->next) == ptr_to_mem(mem->next))); + + /* check all elements before the end of the heap */ + for (mem = ptr_to_mem(mem->next); + ((u8_t *)mem > ram) && (mem < ram_end); + mem = ptr_to_mem(mem->next)) { + LWIP_ASSERT("heap element aligned", LWIP_MEM_ALIGN(mem) == mem); + LWIP_ASSERT("heap element prev ptr valid", mem->prev <= MEM_SIZE_ALIGNED); + LWIP_ASSERT("heap element next ptr valid", mem->next <= MEM_SIZE_ALIGNED); + LWIP_ASSERT("heap element prev ptr aligned", LWIP_MEM_ALIGN(ptr_to_mem(mem->prev) == ptr_to_mem(mem->prev))); + LWIP_ASSERT("heap element next ptr aligned", LWIP_MEM_ALIGN(ptr_to_mem(mem->next) == ptr_to_mem(mem->next))); + + if (last_used == 0) { + /* 2 unused elements in a row? */ + LWIP_ASSERT("heap element unused?", mem->used == 1); + } else { + LWIP_ASSERT("heap element unused member", (mem->used == 0) || (mem->used == 1)); + } + + LWIP_ASSERT("heap element link valid", mem_link_valid(mem)); + + /* used/unused altering */ + last_used = mem->used; + } + LWIP_ASSERT("heap end ptr sanity", mem == ptr_to_mem(MEM_SIZE_ALIGNED)); + LWIP_ASSERT("heap element used valid", mem->used == 1); + LWIP_ASSERT("heap element prev ptr valid", mem->prev == MEM_SIZE_ALIGNED); + LWIP_ASSERT("heap element next ptr valid", mem->next == MEM_SIZE_ALIGNED); +} +#endif /* MEM_SANITY_CHECK */ + +/** + * Put a struct mem back on the heap + * + * @param rmem is the data portion of a struct mem as returned by a previous + * call to mem_malloc() + */ +void +mem_free(void *rmem) +{ + struct mem *mem; + LWIP_MEM_FREE_DECL_PROTECT(); + + if (rmem == NULL) { + LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("mem_free(p == NULL) was called.\n")); + return; + } + if ((((mem_ptr_t)rmem) & (MEM_ALIGNMENT - 1)) != 0) { + LWIP_MEM_ILLEGAL_FREE("mem_free: sanity check alignment"); + LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: sanity check alignment\n")); + /* protect mem stats from concurrent access */ + MEM_STATS_INC_LOCKED(illegal); + return; + } + + /* Get the corresponding struct mem: */ + /* cast through void* to get rid of alignment warnings */ + mem = (struct mem *)(void *)((u8_t *)rmem - (SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET)); + + if ((u8_t *)mem < ram || (u8_t *)rmem + MIN_SIZE_ALIGNED > (u8_t *)ram_end) { + LWIP_MEM_ILLEGAL_FREE("mem_free: illegal memory"); + LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory\n")); + /* protect mem stats from concurrent access */ + MEM_STATS_INC_LOCKED(illegal); + return; + } +#if MEM_OVERFLOW_CHECK + mem_overflow_check_element(mem); +#endif + /* protect the heap from concurrent access */ + LWIP_MEM_FREE_PROTECT(); + /* mem has to be in a used state */ + if (!mem->used) { + LWIP_MEM_ILLEGAL_FREE("mem_free: illegal memory: double free"); + LWIP_MEM_FREE_UNPROTECT(); + LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory: double free?\n")); + /* protect mem stats from concurrent access */ + MEM_STATS_INC_LOCKED(illegal); + return; + } + + if (!mem_link_valid(mem)) { + LWIP_MEM_ILLEGAL_FREE("mem_free: illegal memory: non-linked: double free"); + LWIP_MEM_FREE_UNPROTECT(); + LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory: non-linked: double free?\n")); + /* protect mem stats from concurrent access */ + MEM_STATS_INC_LOCKED(illegal); + return; + } + + /* mem is now unused. */ + mem->used = 0; + + if (mem < lfree) { + /* the newly freed struct is now the lowest */ + lfree = mem; + } + + MEM_STATS_DEC_USED(used, mem->next - (mem_size_t)(((u8_t *)mem - ram))); + + /* finally, see if prev or next are free also */ + plug_holes(mem); + MEM_SANITY(); +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + mem_free_count = 1; +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + LWIP_MEM_FREE_UNPROTECT(); +} + +/** + * Shrink memory returned by mem_malloc(). + * + * @param rmem pointer to memory allocated by mem_malloc the is to be shrinked + * @param new_size required size after shrinking (needs to be smaller than or + * equal to the previous size) + * @return for compatibility reasons: is always == rmem, at the moment + * or NULL if newsize is > old size, in which case rmem is NOT touched + * or freed! + */ +void * +mem_trim(void *rmem, mem_size_t new_size) +{ + mem_size_t size, newsize; + mem_size_t ptr, ptr2; + struct mem *mem, *mem2; + /* use the FREE_PROTECT here: it protects with sem OR SYS_ARCH_PROTECT */ + LWIP_MEM_FREE_DECL_PROTECT(); + + /* Expand the size of the allocated memory region so that we can + adjust for alignment. */ + newsize = (mem_size_t)LWIP_MEM_ALIGN_SIZE(new_size); + if (newsize < MIN_SIZE_ALIGNED) { + /* every data block must be at least MIN_SIZE_ALIGNED long */ + newsize = MIN_SIZE_ALIGNED; + } +#if MEM_OVERFLOW_CHECK + newsize += MEM_SANITY_REGION_BEFORE_ALIGNED + MEM_SANITY_REGION_AFTER_ALIGNED; +#endif + if ((newsize > MEM_SIZE_ALIGNED) || (newsize < new_size)) { + return NULL; + } + + LWIP_ASSERT("mem_trim: legal memory", (u8_t *)rmem >= (u8_t *)ram && + (u8_t *)rmem < (u8_t *)ram_end); + + if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) { + LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_trim: illegal memory\n")); + /* protect mem stats from concurrent access */ + MEM_STATS_INC_LOCKED(illegal); + return rmem; + } + /* Get the corresponding struct mem ... */ + /* cast through void* to get rid of alignment warnings */ + mem = (struct mem *)(void *)((u8_t *)rmem - (SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET)); +#if MEM_OVERFLOW_CHECK + mem_overflow_check_element(mem); +#endif + /* ... and its offset pointer */ + ptr = mem_to_ptr(mem); + + size = (mem_size_t)((mem_size_t)(mem->next - ptr) - (SIZEOF_STRUCT_MEM + MEM_SANITY_OVERHEAD)); + LWIP_ASSERT("mem_trim can only shrink memory", newsize <= size); + if (newsize > size) { + /* not supported */ + return NULL; + } + if (newsize == size) { + /* No change in size, simply return */ + return rmem; + } + + /* protect the heap from concurrent access */ + LWIP_MEM_FREE_PROTECT(); + + mem2 = ptr_to_mem(mem->next); + if (mem2->used == 0) { + /* The next struct is unused, we can simply move it at little */ + mem_size_t next; + LWIP_ASSERT("invalid next ptr", mem->next != MEM_SIZE_ALIGNED); + /* remember the old next pointer */ + next = mem2->next; + /* create new struct mem which is moved directly after the shrinked mem */ + ptr2 = (mem_size_t)(ptr + SIZEOF_STRUCT_MEM + newsize); + if (lfree == mem2) { + lfree = ptr_to_mem(ptr2); + } + mem2 = ptr_to_mem(ptr2); + mem2->used = 0; + /* restore the next pointer */ + mem2->next = next; + /* link it back to mem */ + mem2->prev = ptr; + /* link mem to it */ + mem->next = ptr2; + /* last thing to restore linked list: as we have moved mem2, + * let 'mem2->next->prev' point to mem2 again. but only if mem2->next is not + * the end of the heap */ + if (mem2->next != MEM_SIZE_ALIGNED) { + ptr_to_mem(mem2->next)->prev = ptr2; + } + MEM_STATS_DEC_USED(used, (size - newsize)); + /* no need to plug holes, we've already done that */ + } else if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED <= size) { + /* Next struct is used but there's room for another struct mem with + * at least MIN_SIZE_ALIGNED of data. + * Old size ('size') must be big enough to contain at least 'newsize' plus a struct mem + * ('SIZEOF_STRUCT_MEM') with some data ('MIN_SIZE_ALIGNED'). + * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty + * region that couldn't hold data, but when mem->next gets freed, + * the 2 regions would be combined, resulting in more free memory */ + ptr2 = (mem_size_t)(ptr + SIZEOF_STRUCT_MEM + newsize); + LWIP_ASSERT("invalid next ptr", mem->next != MEM_SIZE_ALIGNED); + mem2 = ptr_to_mem(ptr2); + if (mem2 < lfree) { + lfree = mem2; + } + mem2->used = 0; + mem2->next = mem->next; + mem2->prev = ptr; + mem->next = ptr2; + if (mem2->next != MEM_SIZE_ALIGNED) { + ptr_to_mem(mem2->next)->prev = ptr2; + } + MEM_STATS_DEC_USED(used, (size - newsize)); + /* the original mem->next is used, so no need to plug holes! */ + } + /* else { + next struct mem is used but size between mem and mem2 is not big enough + to create another struct mem + -> don't do anyhting. + -> the remaining space stays unused since it is too small + } */ +#if MEM_OVERFLOW_CHECK + mem_overflow_init_element(mem, new_size); +#endif + MEM_SANITY(); +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + mem_free_count = 1; +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + LWIP_MEM_FREE_UNPROTECT(); + return rmem; +} + +/** + * Allocate a block of memory with a minimum of 'size' bytes. + * + * @param size_in is the minimum size of the requested block in bytes. + * @return pointer to allocated memory or NULL if no free memory was found. + * + * Note that the returned value will always be aligned (as defined by MEM_ALIGNMENT). + */ +void * +mem_malloc(mem_size_t size_in) +{ + mem_size_t ptr, ptr2, size; + struct mem *mem, *mem2; +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + u8_t local_mem_free_count = 0; +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + LWIP_MEM_ALLOC_DECL_PROTECT(); + + if (size_in == 0) { + return NULL; + } + + /* Expand the size of the allocated memory region so that we can + adjust for alignment. */ + size = (mem_size_t)LWIP_MEM_ALIGN_SIZE(size_in); + if (size < MIN_SIZE_ALIGNED) { + /* every data block must be at least MIN_SIZE_ALIGNED long */ + size = MIN_SIZE_ALIGNED; + } +#if MEM_OVERFLOW_CHECK + size += MEM_SANITY_REGION_BEFORE_ALIGNED + MEM_SANITY_REGION_AFTER_ALIGNED; +#endif + if ((size > MEM_SIZE_ALIGNED) || (size < size_in)) { + return NULL; + } + + /* protect the heap from concurrent access */ + sys_mutex_lock(&mem_mutex); + LWIP_MEM_ALLOC_PROTECT(); +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + /* run as long as a mem_free disturbed mem_malloc or mem_trim */ + do { + local_mem_free_count = 0; +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + + /* Scan through the heap searching for a free block that is big enough, + * beginning with the lowest free block. + */ + for (ptr = mem_to_ptr(lfree); ptr < MEM_SIZE_ALIGNED - size; + ptr = ptr_to_mem(ptr)->next) { + mem = ptr_to_mem(ptr); +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + mem_free_count = 0; + LWIP_MEM_ALLOC_UNPROTECT(); + /* allow mem_free or mem_trim to run */ + LWIP_MEM_ALLOC_PROTECT(); + if (mem_free_count != 0) { + /* If mem_free or mem_trim have run, we have to restart since they + could have altered our current struct mem. */ + local_mem_free_count = 1; + break; + } +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + + if ((!mem->used) && + (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size) { + /* mem is not used and at least perfect fit is possible: + * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */ + + if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >= (size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED)) { + /* (in addition to the above, we test if another struct mem (SIZEOF_STRUCT_MEM) containing + * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem') + * -> split large block, create empty remainder, + * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if + * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size, + * struct mem would fit in but no data between mem2 and mem2->next + * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty + * region that couldn't hold data, but when mem->next gets freed, + * the 2 regions would be combined, resulting in more free memory + */ + ptr2 = (mem_size_t)(ptr + SIZEOF_STRUCT_MEM + size); + LWIP_ASSERT("invalid next ptr",ptr2 != MEM_SIZE_ALIGNED); + /* create mem2 struct */ + mem2 = ptr_to_mem(ptr2); + mem2->used = 0; + mem2->next = mem->next; + mem2->prev = ptr; + /* and insert it between mem and mem->next */ + mem->next = ptr2; + mem->used = 1; + + if (mem2->next != MEM_SIZE_ALIGNED) { + ptr_to_mem(mem2->next)->prev = ptr2; + } + MEM_STATS_INC_USED(used, (size + SIZEOF_STRUCT_MEM)); + } else { + /* (a mem2 struct does no fit into the user data space of mem and mem->next will always + * be used at this point: if not we have 2 unused structs in a row, plug_holes should have + * take care of this). + * -> near fit or exact fit: do not split, no mem2 creation + * also can't move mem->next directly behind mem, since mem->next + * will always be used at this point! + */ + mem->used = 1; + MEM_STATS_INC_USED(used, mem->next - mem_to_ptr(mem)); + } +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT +mem_malloc_adjust_lfree: +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + if (mem == lfree) { + struct mem *cur = lfree; + /* Find next free block after mem and update lowest free pointer */ + while (cur->used && cur != ram_end) { +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + mem_free_count = 0; + LWIP_MEM_ALLOC_UNPROTECT(); + /* prevent high interrupt latency... */ + LWIP_MEM_ALLOC_PROTECT(); + if (mem_free_count != 0) { + /* If mem_free or mem_trim have run, we have to restart since they + could have altered our current struct mem or lfree. */ + goto mem_malloc_adjust_lfree; + } +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + cur = ptr_to_mem(cur->next); + } + lfree = cur; + LWIP_ASSERT("mem_malloc: !lfree->used", ((lfree == ram_end) || (!lfree->used))); + } + LWIP_MEM_ALLOC_UNPROTECT(); + sys_mutex_unlock(&mem_mutex); + LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.", + (mem_ptr_t)mem + SIZEOF_STRUCT_MEM + size <= (mem_ptr_t)ram_end); + LWIP_ASSERT("mem_malloc: allocated memory properly aligned.", + ((mem_ptr_t)mem + SIZEOF_STRUCT_MEM) % MEM_ALIGNMENT == 0); + LWIP_ASSERT("mem_malloc: sanity check alignment", + (((mem_ptr_t)mem) & (MEM_ALIGNMENT - 1)) == 0); + +#if MEM_OVERFLOW_CHECK + mem_overflow_init_element(mem, size_in); +#endif + MEM_SANITY(); + return (u8_t *)mem + SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET; + } + } +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + /* if we got interrupted by a mem_free, try again */ + } while (local_mem_free_count != 0); +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + MEM_STATS_INC(err); + LWIP_MEM_ALLOC_UNPROTECT(); + sys_mutex_unlock(&mem_mutex); + LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("mem_malloc: could not allocate %"S16_F" bytes\n", (s16_t)size)); + return NULL; +} + +#endif /* MEM_USE_POOLS */ + +#if MEM_LIBC_MALLOC && (!LWIP_STATS || !MEM_STATS) +void * +mem_calloc(mem_size_t count, mem_size_t size) +{ + return mem_clib_calloc(count, size); +} + +#else /* MEM_LIBC_MALLOC && (!LWIP_STATS || !MEM_STATS) */ +/** + * Contiguously allocates enough space for count objects that are size bytes + * of memory each and returns a pointer to the allocated memory. + * + * The allocated memory is filled with bytes of value zero. + * + * @param count number of objects to allocate + * @param size size of the objects to allocate + * @return pointer to allocated memory / NULL pointer if there is an error + */ +void * +mem_calloc(mem_size_t count, mem_size_t size) +{ + void *p; + size_t alloc_size = (size_t)count * (size_t)size; + + if ((size_t)(mem_size_t)alloc_size != alloc_size) { + LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("mem_calloc: could not allocate %"SZT_F" bytes\n", alloc_size)); + return NULL; + } + + /* allocate 'count' objects of size 'size' */ + p = mem_malloc((mem_size_t)alloc_size); + if (p) { + /* zero the memory */ + memset(p, 0, alloc_size); + } + return p; +} +#endif /* MEM_LIBC_MALLOC && (!LWIP_STATS || !MEM_STATS) */ diff --git a/Middlewares/Third_Party/LwIP/src/core/memp.c b/Middlewares/Third_Party/LwIP/src/core/memp.c new file mode 100644 index 0000000..352ce5a --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/memp.c @@ -0,0 +1,447 @@ +/** + * @file + * Dynamic pool memory manager + * + * lwIP has dedicated pools for many structures (netconn, protocol control blocks, + * packet buffers, ...). All these pools are managed here. + * + * @defgroup mempool Memory pools + * @ingroup infrastructure + * Custom memory pools + + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#include "lwip/memp.h" +#include "lwip/sys.h" +#include "lwip/stats.h" + +#include + +/* Make sure we include everything we need for size calculation required by memp_std.h */ +#include "lwip/pbuf.h" +#include "lwip/raw.h" +#include "lwip/udp.h" +#include "lwip/tcp.h" +#include "lwip/priv/tcp_priv.h" +#include "lwip/altcp.h" +#include "lwip/ip4_frag.h" +#include "lwip/netbuf.h" +#include "lwip/api.h" +#include "lwip/priv/tcpip_priv.h" +#include "lwip/priv/api_msg.h" +#include "lwip/priv/sockets_priv.h" +#include "lwip/etharp.h" +#include "lwip/igmp.h" +#include "lwip/timeouts.h" +/* needed by default MEMP_NUM_SYS_TIMEOUT */ +#include "netif/ppp/ppp_opts.h" +#include "lwip/netdb.h" +#include "lwip/dns.h" +#include "lwip/priv/nd6_priv.h" +#include "lwip/ip6_frag.h" +#include "lwip/mld6.h" + +#define LWIP_MEMPOOL(name,num,size,desc) LWIP_MEMPOOL_DECLARE(name,num,size,desc) +#include "lwip/priv/memp_std.h" + +const struct memp_desc *const memp_pools[MEMP_MAX] = { +#define LWIP_MEMPOOL(name,num,size,desc) &memp_ ## name, +#include "lwip/priv/memp_std.h" +}; + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +#if MEMP_MEM_MALLOC && MEMP_OVERFLOW_CHECK >= 2 +#undef MEMP_OVERFLOW_CHECK +/* MEMP_OVERFLOW_CHECK >= 2 does not work with MEMP_MEM_MALLOC, use 1 instead */ +#define MEMP_OVERFLOW_CHECK 1 +#endif + +#if MEMP_SANITY_CHECK && !MEMP_MEM_MALLOC +/** + * Check that memp-lists don't form a circle, using "Floyd's cycle-finding algorithm". + */ +static int +memp_sanity(const struct memp_desc *desc) +{ + struct memp *t, *h; + + t = *desc->tab; + if (t != NULL) { + for (h = t->next; (t != NULL) && (h != NULL); t = t->next, + h = ((h->next != NULL) ? h->next->next : NULL)) { + if (t == h) { + return 0; + } + } + } + + return 1; +} +#endif /* MEMP_SANITY_CHECK && !MEMP_MEM_MALLOC */ + +#if MEMP_OVERFLOW_CHECK +/** + * Check if a memp element was victim of an overflow or underflow + * (e.g. the restricted area after/before it has been altered) + * + * @param p the memp element to check + * @param desc the pool p comes from + */ +static void +memp_overflow_check_element(struct memp *p, const struct memp_desc *desc) +{ + mem_overflow_check_raw((u8_t *)p + MEMP_SIZE, desc->size, "pool ", desc->desc); +} + +/** + * Initialize the restricted area of on memp element. + */ +static void +memp_overflow_init_element(struct memp *p, const struct memp_desc *desc) +{ + mem_overflow_init_raw((u8_t *)p + MEMP_SIZE, desc->size); +} + +#if MEMP_OVERFLOW_CHECK >= 2 +/** + * Do an overflow check for all elements in every pool. + * + * @see memp_overflow_check_element for a description of the check + */ +static void +memp_overflow_check_all(void) +{ + u16_t i, j; + struct memp *p; + SYS_ARCH_DECL_PROTECT(old_level); + SYS_ARCH_PROTECT(old_level); + + for (i = 0; i < MEMP_MAX; ++i) { + p = (struct memp *)LWIP_MEM_ALIGN(memp_pools[i]->base); + for (j = 0; j < memp_pools[i]->num; ++j) { + memp_overflow_check_element(p, memp_pools[i]); + p = LWIP_ALIGNMENT_CAST(struct memp *, ((u8_t *)p + MEMP_SIZE + memp_pools[i]->size + MEM_SANITY_REGION_AFTER_ALIGNED)); + } + } + SYS_ARCH_UNPROTECT(old_level); +} +#endif /* MEMP_OVERFLOW_CHECK >= 2 */ +#endif /* MEMP_OVERFLOW_CHECK */ + +/** + * Initialize custom memory pool. + * Related functions: memp_malloc_pool, memp_free_pool + * + * @param desc pool to initialize + */ +void +memp_init_pool(const struct memp_desc *desc) +{ +#if MEMP_MEM_MALLOC + LWIP_UNUSED_ARG(desc); +#else + int i; + struct memp *memp; + + *desc->tab = NULL; + memp = (struct memp *)LWIP_MEM_ALIGN(desc->base); +#if MEMP_MEM_INIT + /* force memset on pool memory */ + memset(memp, 0, (size_t)desc->num * (MEMP_SIZE + desc->size +#if MEMP_OVERFLOW_CHECK + + MEM_SANITY_REGION_AFTER_ALIGNED +#endif + )); +#endif + /* create a linked list of memp elements */ + for (i = 0; i < desc->num; ++i) { + memp->next = *desc->tab; + *desc->tab = memp; +#if MEMP_OVERFLOW_CHECK + memp_overflow_init_element(memp, desc); +#endif /* MEMP_OVERFLOW_CHECK */ + /* cast through void* to get rid of alignment warnings */ + memp = (struct memp *)(void *)((u8_t *)memp + MEMP_SIZE + desc->size +#if MEMP_OVERFLOW_CHECK + + MEM_SANITY_REGION_AFTER_ALIGNED +#endif + ); + } +#if MEMP_STATS + desc->stats->avail = desc->num; +#endif /* MEMP_STATS */ +#endif /* !MEMP_MEM_MALLOC */ + +#if MEMP_STATS && (defined(LWIP_DEBUG) || LWIP_STATS_DISPLAY) + desc->stats->name = desc->desc; +#endif /* MEMP_STATS && (defined(LWIP_DEBUG) || LWIP_STATS_DISPLAY) */ +} + +/** + * Initializes lwIP built-in pools. + * Related functions: memp_malloc, memp_free + * + * Carves out memp_memory into linked lists for each pool-type. + */ +void +memp_init(void) +{ + u16_t i; + + /* for every pool: */ + for (i = 0; i < LWIP_ARRAYSIZE(memp_pools); i++) { + memp_init_pool(memp_pools[i]); + +#if LWIP_STATS && MEMP_STATS + lwip_stats.memp[i] = memp_pools[i]->stats; +#endif + } + +#if MEMP_OVERFLOW_CHECK >= 2 + /* check everything a first time to see if it worked */ + memp_overflow_check_all(); +#endif /* MEMP_OVERFLOW_CHECK >= 2 */ +} + +static void * +#if !MEMP_OVERFLOW_CHECK +do_memp_malloc_pool(const struct memp_desc *desc) +#else +do_memp_malloc_pool_fn(const struct memp_desc *desc, const char *file, const int line) +#endif +{ + struct memp *memp; + SYS_ARCH_DECL_PROTECT(old_level); + +#if MEMP_MEM_MALLOC + memp = (struct memp *)mem_malloc(MEMP_SIZE + MEMP_ALIGN_SIZE(desc->size)); + SYS_ARCH_PROTECT(old_level); +#else /* MEMP_MEM_MALLOC */ + SYS_ARCH_PROTECT(old_level); + + memp = *desc->tab; +#endif /* MEMP_MEM_MALLOC */ + + if (memp != NULL) { +#if !MEMP_MEM_MALLOC +#if MEMP_OVERFLOW_CHECK == 1 + memp_overflow_check_element(memp, desc); +#endif /* MEMP_OVERFLOW_CHECK */ + + *desc->tab = memp->next; +#if MEMP_OVERFLOW_CHECK + memp->next = NULL; +#endif /* MEMP_OVERFLOW_CHECK */ +#endif /* !MEMP_MEM_MALLOC */ +#if MEMP_OVERFLOW_CHECK + memp->file = file; + memp->line = line; +#if MEMP_MEM_MALLOC + memp_overflow_init_element(memp, desc); +#endif /* MEMP_MEM_MALLOC */ +#endif /* MEMP_OVERFLOW_CHECK */ + LWIP_ASSERT("memp_malloc: memp properly aligned", + ((mem_ptr_t)memp % MEM_ALIGNMENT) == 0); +#if MEMP_STATS + desc->stats->used++; + if (desc->stats->used > desc->stats->max) { + desc->stats->max = desc->stats->used; + } +#endif + SYS_ARCH_UNPROTECT(old_level); + /* cast through u8_t* to get rid of alignment warnings */ + return ((u8_t *)memp + MEMP_SIZE); + } else { +#if MEMP_STATS + desc->stats->err++; +#endif + SYS_ARCH_UNPROTECT(old_level); + LWIP_DEBUGF(MEMP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("memp_malloc: out of memory in pool %s\n", desc->desc)); + } + + return NULL; +} + +/** + * Get an element from a custom pool. + * + * @param desc the pool to get an element from + * + * @return a pointer to the allocated memory or a NULL pointer on error + */ +void * +#if !MEMP_OVERFLOW_CHECK +memp_malloc_pool(const struct memp_desc *desc) +#else +memp_malloc_pool_fn(const struct memp_desc *desc, const char *file, const int line) +#endif +{ + LWIP_ASSERT("invalid pool desc", desc != NULL); + if (desc == NULL) { + return NULL; + } + +#if !MEMP_OVERFLOW_CHECK + return do_memp_malloc_pool(desc); +#else + return do_memp_malloc_pool_fn(desc, file, line); +#endif +} + +/** + * Get an element from a specific pool. + * + * @param type the pool to get an element from + * + * @return a pointer to the allocated memory or a NULL pointer on error + */ +void * +#if !MEMP_OVERFLOW_CHECK +memp_malloc(memp_t type) +#else +memp_malloc_fn(memp_t type, const char *file, const int line) +#endif +{ + void *memp; + LWIP_ERROR("memp_malloc: type < MEMP_MAX", (type < MEMP_MAX), return NULL;); + +#if MEMP_OVERFLOW_CHECK >= 2 + memp_overflow_check_all(); +#endif /* MEMP_OVERFLOW_CHECK >= 2 */ + +#if !MEMP_OVERFLOW_CHECK + memp = do_memp_malloc_pool(memp_pools[type]); +#else + memp = do_memp_malloc_pool_fn(memp_pools[type], file, line); +#endif + + return memp; +} + +static void +do_memp_free_pool(const struct memp_desc *desc, void *mem) +{ + struct memp *memp; + SYS_ARCH_DECL_PROTECT(old_level); + + LWIP_ASSERT("memp_free: mem properly aligned", + ((mem_ptr_t)mem % MEM_ALIGNMENT) == 0); + + /* cast through void* to get rid of alignment warnings */ + memp = (struct memp *)(void *)((u8_t *)mem - MEMP_SIZE); + + SYS_ARCH_PROTECT(old_level); + +#if MEMP_OVERFLOW_CHECK == 1 + memp_overflow_check_element(memp, desc); +#endif /* MEMP_OVERFLOW_CHECK */ + +#if MEMP_STATS + desc->stats->used--; +#endif + +#if MEMP_MEM_MALLOC + LWIP_UNUSED_ARG(desc); + SYS_ARCH_UNPROTECT(old_level); + mem_free(memp); +#else /* MEMP_MEM_MALLOC */ + memp->next = *desc->tab; + *desc->tab = memp; + +#if MEMP_SANITY_CHECK + LWIP_ASSERT("memp sanity", memp_sanity(desc)); +#endif /* MEMP_SANITY_CHECK */ + + SYS_ARCH_UNPROTECT(old_level); +#endif /* !MEMP_MEM_MALLOC */ +} + +/** + * Put a custom pool element back into its pool. + * + * @param desc the pool where to put mem + * @param mem the memp element to free + */ +void +memp_free_pool(const struct memp_desc *desc, void *mem) +{ + LWIP_ASSERT("invalid pool desc", desc != NULL); + if ((desc == NULL) || (mem == NULL)) { + return; + } + + do_memp_free_pool(desc, mem); +} + +/** + * Put an element back into its pool. + * + * @param type the pool where to put mem + * @param mem the memp element to free + */ +void +memp_free(memp_t type, void *mem) +{ +#ifdef LWIP_HOOK_MEMP_AVAILABLE + struct memp *old_first; +#endif + + LWIP_ERROR("memp_free: type < MEMP_MAX", (type < MEMP_MAX), return;); + + if (mem == NULL) { + return; + } + +#if MEMP_OVERFLOW_CHECK >= 2 + memp_overflow_check_all(); +#endif /* MEMP_OVERFLOW_CHECK >= 2 */ + +#ifdef LWIP_HOOK_MEMP_AVAILABLE + old_first = *memp_pools[type]->tab; +#endif + + do_memp_free_pool(memp_pools[type], mem); + +#ifdef LWIP_HOOK_MEMP_AVAILABLE + if (old_first == NULL) { + LWIP_HOOK_MEMP_AVAILABLE(type); + } +#endif +} diff --git a/Middlewares/Third_Party/LwIP/src/core/netif.c b/Middlewares/Third_Party/LwIP/src/core/netif.c new file mode 100644 index 0000000..15200a2 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/netif.c @@ -0,0 +1,1795 @@ +/** + * @file + * lwIP network interface abstraction + * + * @defgroup netif Network interface (NETIF) + * @ingroup callbackstyle_api + * + * @defgroup netif_ip4 IPv4 address handling + * @ingroup netif + * + * @defgroup netif_ip6 IPv6 address handling + * @ingroup netif + * + * @defgroup netif_cd Client data handling + * Store data (void*) on a netif for application usage. + * @see @ref LWIP_NUM_NETIF_CLIENT_DATA + * @ingroup netif + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + */ + +#include "lwip/opt.h" + +#include /* memset */ +#include /* atoi */ + +#include "lwip/def.h" +#include "lwip/ip_addr.h" +#include "lwip/ip6_addr.h" +#include "lwip/netif.h" +#include "lwip/priv/tcp_priv.h" +#include "lwip/udp.h" +#include "lwip/priv/raw_priv.h" +#include "lwip/snmp.h" +#include "lwip/igmp.h" +#include "lwip/etharp.h" +#include "lwip/stats.h" +#include "lwip/sys.h" +#include "lwip/ip.h" +#if ENABLE_LOOPBACK +#if LWIP_NETIF_LOOPBACK_MULTITHREADING +#include "lwip/tcpip.h" +#endif /* LWIP_NETIF_LOOPBACK_MULTITHREADING */ +#endif /* ENABLE_LOOPBACK */ + +#include "netif/ethernet.h" + +#if LWIP_AUTOIP +#include "lwip/autoip.h" +#endif /* LWIP_AUTOIP */ +#if LWIP_DHCP +#include "lwip/dhcp.h" +#endif /* LWIP_DHCP */ +#if LWIP_IPV6_DHCP6 +#include "lwip/dhcp6.h" +#endif /* LWIP_IPV6_DHCP6 */ +#if LWIP_IPV6_MLD +#include "lwip/mld6.h" +#endif /* LWIP_IPV6_MLD */ +#if LWIP_IPV6 +#include "lwip/nd6.h" +#endif + +#if LWIP_NETIF_STATUS_CALLBACK +#define NETIF_STATUS_CALLBACK(n) do{ if (n->status_callback) { (n->status_callback)(n); }}while(0) +#else +#define NETIF_STATUS_CALLBACK(n) +#endif /* LWIP_NETIF_STATUS_CALLBACK */ + +#if LWIP_NETIF_LINK_CALLBACK +#define NETIF_LINK_CALLBACK(n) do{ if (n->link_callback) { (n->link_callback)(n); }}while(0) +#else +#define NETIF_LINK_CALLBACK(n) +#endif /* LWIP_NETIF_LINK_CALLBACK */ + +#if LWIP_NETIF_EXT_STATUS_CALLBACK +static netif_ext_callback_t *ext_callback; +#endif + +#if !LWIP_SINGLE_NETIF +struct netif *netif_list; +#endif /* !LWIP_SINGLE_NETIF */ +struct netif *netif_default; + +#define netif_index_to_num(index) ((index) - 1) +static u8_t netif_num; + +#if LWIP_NUM_NETIF_CLIENT_DATA > 0 +static u8_t netif_client_id; +#endif + +#define NETIF_REPORT_TYPE_IPV4 0x01 +#define NETIF_REPORT_TYPE_IPV6 0x02 +static void netif_issue_reports(struct netif *netif, u8_t report_type); + +#if LWIP_IPV6 +static err_t netif_null_output_ip6(struct netif *netif, struct pbuf *p, const ip6_addr_t *ipaddr); +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 +static err_t netif_null_output_ip4(struct netif *netif, struct pbuf *p, const ip4_addr_t *ipaddr); +#endif /* LWIP_IPV4 */ + +#if LWIP_HAVE_LOOPIF +#if LWIP_IPV4 +static err_t netif_loop_output_ipv4(struct netif *netif, struct pbuf *p, const ip4_addr_t *addr); +#endif +#if LWIP_IPV6 +static err_t netif_loop_output_ipv6(struct netif *netif, struct pbuf *p, const ip6_addr_t *addr); +#endif + + +static struct netif loop_netif; + +/** + * Initialize a lwip network interface structure for a loopback interface + * + * @param netif the lwip network interface structure for this loopif + * @return ERR_OK if the loopif is initialized + * ERR_MEM if private data couldn't be allocated + */ +static err_t +netif_loopif_init(struct netif *netif) +{ + LWIP_ASSERT("netif_loopif_init: invalid netif", netif != NULL); + + /* initialize the snmp variables and counters inside the struct netif + * ifSpeed: no assumption can be made! + */ + MIB2_INIT_NETIF(netif, snmp_ifType_softwareLoopback, 0); + + netif->name[0] = 'l'; + netif->name[1] = 'o'; +#if LWIP_IPV4 + netif->output = netif_loop_output_ipv4; +#endif +#if LWIP_IPV6 + netif->output_ip6 = netif_loop_output_ipv6; +#endif +#if LWIP_LOOPIF_MULTICAST + netif_set_flags(netif, NETIF_FLAG_IGMP); +#endif + NETIF_SET_CHECKSUM_CTRL(netif, NETIF_CHECKSUM_DISABLE_ALL); + return ERR_OK; +} +#endif /* LWIP_HAVE_LOOPIF */ + +void +netif_init(void) +{ +#if LWIP_HAVE_LOOPIF +#if LWIP_IPV4 +#define LOOPIF_ADDRINIT &loop_ipaddr, &loop_netmask, &loop_gw, + ip4_addr_t loop_ipaddr, loop_netmask, loop_gw; + IP4_ADDR(&loop_gw, 127, 0, 0, 1); + IP4_ADDR(&loop_ipaddr, 127, 0, 0, 1); + IP4_ADDR(&loop_netmask, 255, 0, 0, 0); +#else /* LWIP_IPV4 */ +#define LOOPIF_ADDRINIT +#endif /* LWIP_IPV4 */ + +#if NO_SYS + netif_add(&loop_netif, LOOPIF_ADDRINIT NULL, netif_loopif_init, ip_input); +#else /* NO_SYS */ + netif_add(&loop_netif, LOOPIF_ADDRINIT NULL, netif_loopif_init, tcpip_input); +#endif /* NO_SYS */ + +#if LWIP_IPV6 + IP_ADDR6_HOST(loop_netif.ip6_addr, 0, 0, 0, 0x00000001UL); + loop_netif.ip6_addr_state[0] = IP6_ADDR_VALID; +#endif /* LWIP_IPV6 */ + + netif_set_link_up(&loop_netif); + netif_set_up(&loop_netif); + +#endif /* LWIP_HAVE_LOOPIF */ +} + +/** + * @ingroup lwip_nosys + * Forwards a received packet for input processing with + * ethernet_input() or ip_input() depending on netif flags. + * Don't call directly, pass to netif_add() and call + * netif->input(). + * Only works if the netif driver correctly sets + * NETIF_FLAG_ETHARP and/or NETIF_FLAG_ETHERNET flag! + */ +err_t +netif_input(struct pbuf *p, struct netif *inp) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ASSERT("netif_input: invalid pbuf", p != NULL); + LWIP_ASSERT("netif_input: invalid netif", inp != NULL); + +#if LWIP_ETHERNET + if (inp->flags & (NETIF_FLAG_ETHARP | NETIF_FLAG_ETHERNET)) { + return ethernet_input(p, inp); + } else +#endif /* LWIP_ETHERNET */ + return ip_input(p, inp); +} + +/** + * @ingroup netif + * Add a network interface to the list of lwIP netifs. + * + * Same as @ref netif_add but without IPv4 addresses + */ +struct netif * +netif_add_noaddr(struct netif *netif, void *state, netif_init_fn init, netif_input_fn input) +{ + return netif_add(netif, +#if LWIP_IPV4 + NULL, NULL, NULL, +#endif /* LWIP_IPV4*/ + state, init, input); +} + +/** + * @ingroup netif + * Add a network interface to the list of lwIP netifs. + * + * @param netif a pre-allocated netif structure + * @param ipaddr IP address for the new netif + * @param netmask network mask for the new netif + * @param gw default gateway IP address for the new netif + * @param state opaque data passed to the new netif + * @param init callback function that initializes the interface + * @param input callback function that is called to pass + * ingress packets up in the protocol layer stack.\n + * It is recommended to use a function that passes the input directly + * to the stack (netif_input(), NO_SYS=1 mode) or via sending a + * message to TCPIP thread (tcpip_input(), NO_SYS=0 mode).\n + * These functions use netif flags NETIF_FLAG_ETHARP and NETIF_FLAG_ETHERNET + * to decide whether to forward to ethernet_input() or ip_input(). + * In other words, the functions only work when the netif + * driver is implemented correctly!\n + * Most members of struct netif should be be initialized by the + * netif init function = netif driver (init parameter of this function).\n + * IPv6: Don't forget to call netif_create_ip6_linklocal_address() after + * setting the MAC address in struct netif.hwaddr + * (IPv6 requires a link-local address). + * + * @return netif, or NULL if failed. + */ +struct netif * +netif_add(struct netif *netif, +#if LWIP_IPV4 + const ip4_addr_t *ipaddr, const ip4_addr_t *netmask, const ip4_addr_t *gw, +#endif /* LWIP_IPV4 */ + void *state, netif_init_fn init, netif_input_fn input) +{ +#if LWIP_IPV6 + s8_t i; +#endif + + LWIP_ASSERT_CORE_LOCKED(); + +#if LWIP_SINGLE_NETIF + if (netif_default != NULL) { + LWIP_ASSERT("single netif already set", 0); + return NULL; + } +#endif + + LWIP_ERROR("netif_add: invalid netif", netif != NULL, return NULL); + LWIP_ERROR("netif_add: No init function given", init != NULL, return NULL); + +#if LWIP_IPV4 + if (ipaddr == NULL) { + ipaddr = ip_2_ip4(IP4_ADDR_ANY); + } + if (netmask == NULL) { + netmask = ip_2_ip4(IP4_ADDR_ANY); + } + if (gw == NULL) { + gw = ip_2_ip4(IP4_ADDR_ANY); + } + + /* reset new interface configuration state */ + ip_addr_set_zero_ip4(&netif->ip_addr); + ip_addr_set_zero_ip4(&netif->netmask); + ip_addr_set_zero_ip4(&netif->gw); + netif->output = netif_null_output_ip4; +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + ip_addr_set_zero_ip6(&netif->ip6_addr[i]); + netif->ip6_addr_state[i] = IP6_ADDR_INVALID; +#if LWIP_IPV6_ADDRESS_LIFETIMES + netif->ip6_addr_valid_life[i] = IP6_ADDR_LIFE_STATIC; + netif->ip6_addr_pref_life[i] = IP6_ADDR_LIFE_STATIC; +#endif /* LWIP_IPV6_ADDRESS_LIFETIMES */ + } + netif->output_ip6 = netif_null_output_ip6; +#endif /* LWIP_IPV6 */ + NETIF_SET_CHECKSUM_CTRL(netif, NETIF_CHECKSUM_ENABLE_ALL); + netif->mtu = 0; + netif->flags = 0; +#ifdef netif_get_client_data + memset(netif->client_data, 0, sizeof(netif->client_data)); +#endif /* LWIP_NUM_NETIF_CLIENT_DATA */ +#if LWIP_IPV6 +#if LWIP_IPV6_AUTOCONFIG + /* IPv6 address autoconfiguration not enabled by default */ + netif->ip6_autoconfig_enabled = 0; +#endif /* LWIP_IPV6_AUTOCONFIG */ + nd6_restart_netif(netif); +#endif /* LWIP_IPV6 */ +#if LWIP_NETIF_STATUS_CALLBACK + netif->status_callback = NULL; +#endif /* LWIP_NETIF_STATUS_CALLBACK */ +#if LWIP_NETIF_LINK_CALLBACK + netif->link_callback = NULL; +#endif /* LWIP_NETIF_LINK_CALLBACK */ +#if LWIP_IGMP + netif->igmp_mac_filter = NULL; +#endif /* LWIP_IGMP */ +#if LWIP_IPV6 && LWIP_IPV6_MLD + netif->mld_mac_filter = NULL; +#endif /* LWIP_IPV6 && LWIP_IPV6_MLD */ +#if ENABLE_LOOPBACK + netif->loop_first = NULL; + netif->loop_last = NULL; +#endif /* ENABLE_LOOPBACK */ + + /* remember netif specific state information data */ + netif->state = state; + netif->num = netif_num; + netif->input = input; + + NETIF_RESET_HINTS(netif); +#if ENABLE_LOOPBACK && LWIP_LOOPBACK_MAX_PBUFS + netif->loop_cnt_current = 0; +#endif /* ENABLE_LOOPBACK && LWIP_LOOPBACK_MAX_PBUFS */ + +#if LWIP_IPV4 + netif_set_addr(netif, ipaddr, netmask, gw); +#endif /* LWIP_IPV4 */ + + /* call user specified initialization function for netif */ + if (init(netif) != ERR_OK) { + return NULL; + } +#if LWIP_IPV6 && LWIP_ND6_ALLOW_RA_UPDATES + /* Initialize the MTU for IPv6 to the one set by the netif driver. + This can be updated later by RA. */ + netif->mtu6 = netif->mtu; +#endif /* LWIP_IPV6 && LWIP_ND6_ALLOW_RA_UPDATES */ + +#if !LWIP_SINGLE_NETIF + /* Assign a unique netif number in the range [0..254], so that (num+1) can + serve as an interface index that fits in a u8_t. + We assume that the new netif has not yet been added to the list here. + This algorithm is O(n^2), but that should be OK for lwIP. + */ + { + struct netif *netif2; + int num_netifs; + do { + if (netif->num == 255) { + netif->num = 0; + } + num_netifs = 0; + for (netif2 = netif_list; netif2 != NULL; netif2 = netif2->next) { + LWIP_ASSERT("netif already added", netif2 != netif); + num_netifs++; + LWIP_ASSERT("too many netifs, max. supported number is 255", num_netifs <= 255); + if (netif2->num == netif->num) { + netif->num++; + break; + } + } + } while (netif2 != NULL); + } + if (netif->num == 254) { + netif_num = 0; + } else { + netif_num = (u8_t)(netif->num + 1); + } + + /* add this netif to the list */ + netif->next = netif_list; + netif_list = netif; +#endif /* "LWIP_SINGLE_NETIF */ + mib2_netif_added(netif); + +#if LWIP_IGMP + /* start IGMP processing */ + if (netif->flags & NETIF_FLAG_IGMP) { + igmp_start(netif); + } +#endif /* LWIP_IGMP */ + + LWIP_DEBUGF(NETIF_DEBUG, ("netif: added interface %c%c IP", + netif->name[0], netif->name[1])); +#if LWIP_IPV4 + LWIP_DEBUGF(NETIF_DEBUG, (" addr ")); + ip4_addr_debug_print(NETIF_DEBUG, ipaddr); + LWIP_DEBUGF(NETIF_DEBUG, (" netmask ")); + ip4_addr_debug_print(NETIF_DEBUG, netmask); + LWIP_DEBUGF(NETIF_DEBUG, (" gw ")); + ip4_addr_debug_print(NETIF_DEBUG, gw); +#endif /* LWIP_IPV4 */ + LWIP_DEBUGF(NETIF_DEBUG, ("\n")); + + netif_invoke_ext_callback(netif, LWIP_NSC_NETIF_ADDED, NULL); + + return netif; +} + +static void +netif_do_ip_addr_changed(const ip_addr_t *old_addr, const ip_addr_t *new_addr) +{ +#if LWIP_TCP + tcp_netif_ip_addr_changed(old_addr, new_addr); +#endif /* LWIP_TCP */ +#if LWIP_UDP + udp_netif_ip_addr_changed(old_addr, new_addr); +#endif /* LWIP_UDP */ +#if LWIP_RAW + raw_netif_ip_addr_changed(old_addr, new_addr); +#endif /* LWIP_RAW */ +} + +#if LWIP_IPV4 +static int +netif_do_set_ipaddr(struct netif *netif, const ip4_addr_t *ipaddr, ip_addr_t *old_addr) +{ + LWIP_ASSERT("invalid pointer", ipaddr != NULL); + LWIP_ASSERT("invalid pointer", old_addr != NULL); + + /* address is actually being changed? */ + if (ip4_addr_cmp(ipaddr, netif_ip4_addr(netif)) == 0) { + ip_addr_t new_addr; + *ip_2_ip4(&new_addr) = *ipaddr; + IP_SET_TYPE_VAL(new_addr, IPADDR_TYPE_V4); + + ip_addr_copy(*old_addr, *netif_ip_addr4(netif)); + + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_STATE, ("netif_set_ipaddr: netif address being changed\n")); + netif_do_ip_addr_changed(old_addr, &new_addr); + + mib2_remove_ip4(netif); + mib2_remove_route_ip4(0, netif); + /* set new IP address to netif */ + ip4_addr_set(ip_2_ip4(&netif->ip_addr), ipaddr); + IP_SET_TYPE_VAL(netif->ip_addr, IPADDR_TYPE_V4); + mib2_add_ip4(netif); + mib2_add_route_ip4(0, netif); + + netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV4); + + NETIF_STATUS_CALLBACK(netif); + return 1; /* address changed */ + } + return 0; /* address unchanged */ +} + +/** + * @ingroup netif_ip4 + * Change the IP address of a network interface + * + * @param netif the network interface to change + * @param ipaddr the new IP address + * + * @note call netif_set_addr() if you also want to change netmask and + * default gateway + */ +void +netif_set_ipaddr(struct netif *netif, const ip4_addr_t *ipaddr) +{ + ip_addr_t old_addr; + + LWIP_ERROR("netif_set_ipaddr: invalid netif", netif != NULL, return); + + /* Don't propagate NULL pointer (IPv4 ANY) to subsequent functions */ + if (ipaddr == NULL) { + ipaddr = IP4_ADDR_ANY4; + } + + LWIP_ASSERT_CORE_LOCKED(); + + if (netif_do_set_ipaddr(netif, ipaddr, &old_addr)) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + netif_ext_callback_args_t args; + args.ipv4_changed.old_address = &old_addr; + netif_invoke_ext_callback(netif, LWIP_NSC_IPV4_ADDRESS_CHANGED, &args); +#endif + } +} + +static int +netif_do_set_netmask(struct netif *netif, const ip4_addr_t *netmask, ip_addr_t *old_nm) +{ + /* address is actually being changed? */ + if (ip4_addr_cmp(netmask, netif_ip4_netmask(netif)) == 0) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + LWIP_ASSERT("invalid pointer", old_nm != NULL); + ip_addr_copy(*old_nm, *netif_ip_netmask4(netif)); +#else + LWIP_UNUSED_ARG(old_nm); +#endif + mib2_remove_route_ip4(0, netif); + /* set new netmask to netif */ + ip4_addr_set(ip_2_ip4(&netif->netmask), netmask); + IP_SET_TYPE_VAL(netif->netmask, IPADDR_TYPE_V4); + mib2_add_route_ip4(0, netif); + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("netif: netmask of interface %c%c set to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + netif->name[0], netif->name[1], + ip4_addr1_16(netif_ip4_netmask(netif)), + ip4_addr2_16(netif_ip4_netmask(netif)), + ip4_addr3_16(netif_ip4_netmask(netif)), + ip4_addr4_16(netif_ip4_netmask(netif)))); + return 1; /* netmask changed */ + } + return 0; /* netmask unchanged */ +} + +/** + * @ingroup netif_ip4 + * Change the netmask of a network interface + * + * @param netif the network interface to change + * @param netmask the new netmask + * + * @note call netif_set_addr() if you also want to change ip address and + * default gateway + */ +void +netif_set_netmask(struct netif *netif, const ip4_addr_t *netmask) +{ +#if LWIP_NETIF_EXT_STATUS_CALLBACK + ip_addr_t old_nm_val; + ip_addr_t *old_nm = &old_nm_val; +#else + ip_addr_t *old_nm = NULL; +#endif + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("netif_set_netmask: invalid netif", netif != NULL, return); + + /* Don't propagate NULL pointer (IPv4 ANY) to subsequent functions */ + if (netmask == NULL) { + netmask = IP4_ADDR_ANY4; + } + + if (netif_do_set_netmask(netif, netmask, old_nm)) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + netif_ext_callback_args_t args; + args.ipv4_changed.old_netmask = old_nm; + netif_invoke_ext_callback(netif, LWIP_NSC_IPV4_NETMASK_CHANGED, &args); +#endif + } +} + +static int +netif_do_set_gw(struct netif *netif, const ip4_addr_t *gw, ip_addr_t *old_gw) +{ + /* address is actually being changed? */ + if (ip4_addr_cmp(gw, netif_ip4_gw(netif)) == 0) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + LWIP_ASSERT("invalid pointer", old_gw != NULL); + ip_addr_copy(*old_gw, *netif_ip_gw4(netif)); +#else + LWIP_UNUSED_ARG(old_gw); +#endif + + ip4_addr_set(ip_2_ip4(&netif->gw), gw); + IP_SET_TYPE_VAL(netif->gw, IPADDR_TYPE_V4); + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("netif: GW address of interface %c%c set to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + netif->name[0], netif->name[1], + ip4_addr1_16(netif_ip4_gw(netif)), + ip4_addr2_16(netif_ip4_gw(netif)), + ip4_addr3_16(netif_ip4_gw(netif)), + ip4_addr4_16(netif_ip4_gw(netif)))); + return 1; /* gateway changed */ + } + return 0; /* gateway unchanged */ +} + +/** + * @ingroup netif_ip4 + * Change the default gateway for a network interface + * + * @param netif the network interface to change + * @param gw the new default gateway + * + * @note call netif_set_addr() if you also want to change ip address and netmask + */ +void +netif_set_gw(struct netif *netif, const ip4_addr_t *gw) +{ +#if LWIP_NETIF_EXT_STATUS_CALLBACK + ip_addr_t old_gw_val; + ip_addr_t *old_gw = &old_gw_val; +#else + ip_addr_t *old_gw = NULL; +#endif + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("netif_set_gw: invalid netif", netif != NULL, return); + + /* Don't propagate NULL pointer (IPv4 ANY) to subsequent functions */ + if (gw == NULL) { + gw = IP4_ADDR_ANY4; + } + + if (netif_do_set_gw(netif, gw, old_gw)) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + netif_ext_callback_args_t args; + args.ipv4_changed.old_gw = old_gw; + netif_invoke_ext_callback(netif, LWIP_NSC_IPV4_GATEWAY_CHANGED, &args); +#endif + } +} + +/** + * @ingroup netif_ip4 + * Change IP address configuration for a network interface (including netmask + * and default gateway). + * + * @param netif the network interface to change + * @param ipaddr the new IP address + * @param netmask the new netmask + * @param gw the new default gateway + */ +void +netif_set_addr(struct netif *netif, const ip4_addr_t *ipaddr, const ip4_addr_t *netmask, + const ip4_addr_t *gw) +{ +#if LWIP_NETIF_EXT_STATUS_CALLBACK + netif_nsc_reason_t change_reason = LWIP_NSC_NONE; + netif_ext_callback_args_t cb_args; + ip_addr_t old_nm_val; + ip_addr_t old_gw_val; + ip_addr_t *old_nm = &old_nm_val; + ip_addr_t *old_gw = &old_gw_val; +#else + ip_addr_t *old_nm = NULL; + ip_addr_t *old_gw = NULL; +#endif + ip_addr_t old_addr; + int remove; + + LWIP_ASSERT_CORE_LOCKED(); + + /* Don't propagate NULL pointer (IPv4 ANY) to subsequent functions */ + if (ipaddr == NULL) { + ipaddr = IP4_ADDR_ANY4; + } + if (netmask == NULL) { + netmask = IP4_ADDR_ANY4; + } + if (gw == NULL) { + gw = IP4_ADDR_ANY4; + } + + remove = ip4_addr_isany(ipaddr); + if (remove) { + /* when removing an address, we have to remove it *before* changing netmask/gw + to ensure that tcp RST segment can be sent correctly */ + if (netif_do_set_ipaddr(netif, ipaddr, &old_addr)) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + change_reason |= LWIP_NSC_IPV4_ADDRESS_CHANGED; + cb_args.ipv4_changed.old_address = &old_addr; +#endif + } + } + if (netif_do_set_netmask(netif, netmask, old_nm)) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + change_reason |= LWIP_NSC_IPV4_NETMASK_CHANGED; + cb_args.ipv4_changed.old_netmask = old_nm; +#endif + } + if (netif_do_set_gw(netif, gw, old_gw)) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + change_reason |= LWIP_NSC_IPV4_GATEWAY_CHANGED; + cb_args.ipv4_changed.old_gw = old_gw; +#endif + } + if (!remove) { + /* set ipaddr last to ensure netmask/gw have been set when status callback is called */ + if (netif_do_set_ipaddr(netif, ipaddr, &old_addr)) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + change_reason |= LWIP_NSC_IPV4_ADDRESS_CHANGED; + cb_args.ipv4_changed.old_address = &old_addr; +#endif + } + } + +#if LWIP_NETIF_EXT_STATUS_CALLBACK + if (change_reason != LWIP_NSC_NONE) { + change_reason |= LWIP_NSC_IPV4_SETTINGS_CHANGED; + netif_invoke_ext_callback(netif, change_reason, &cb_args); + } +#endif +} +#endif /* LWIP_IPV4*/ + +/** + * @ingroup netif + * Remove a network interface from the list of lwIP netifs. + * + * @param netif the network interface to remove + */ +void +netif_remove(struct netif *netif) +{ +#if LWIP_IPV6 + int i; +#endif + + LWIP_ASSERT_CORE_LOCKED(); + + if (netif == NULL) { + return; + } + + netif_invoke_ext_callback(netif, LWIP_NSC_NETIF_REMOVED, NULL); + +#if LWIP_IPV4 + if (!ip4_addr_isany_val(*netif_ip4_addr(netif))) { + netif_do_ip_addr_changed(netif_ip_addr4(netif), NULL); + } + +#if LWIP_IGMP + /* stop IGMP processing */ + if (netif->flags & NETIF_FLAG_IGMP) { + igmp_stop(netif); + } +#endif /* LWIP_IGMP */ +#endif /* LWIP_IPV4*/ + +#if LWIP_IPV6 + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i))) { + netif_do_ip_addr_changed(netif_ip_addr6(netif, i), NULL); + } + } +#if LWIP_IPV6_MLD + /* stop MLD processing */ + mld6_stop(netif); +#endif /* LWIP_IPV6_MLD */ +#endif /* LWIP_IPV6 */ + if (netif_is_up(netif)) { + /* set netif down before removing (call callback function) */ + netif_set_down(netif); + } + + mib2_remove_ip4(netif); + + /* this netif is default? */ + if (netif_default == netif) { + /* reset default netif */ + netif_set_default(NULL); + } +#if !LWIP_SINGLE_NETIF + /* is it the first netif? */ + if (netif_list == netif) { + netif_list = netif->next; + } else { + /* look for netif further down the list */ + struct netif *tmp_netif; + NETIF_FOREACH(tmp_netif) { + if (tmp_netif->next == netif) { + tmp_netif->next = netif->next; + break; + } + } + if (tmp_netif == NULL) { + return; /* netif is not on the list */ + } + } +#endif /* !LWIP_SINGLE_NETIF */ + mib2_netif_removed(netif); +#if LWIP_NETIF_REMOVE_CALLBACK + if (netif->remove_callback) { + netif->remove_callback(netif); + } +#endif /* LWIP_NETIF_REMOVE_CALLBACK */ + LWIP_DEBUGF( NETIF_DEBUG, ("netif_remove: removed netif\n") ); +} + +/** + * @ingroup netif + * Set a network interface as the default network interface + * (used to output all packets for which no specific route is found) + * + * @param netif the default network interface + */ +void +netif_set_default(struct netif *netif) +{ + LWIP_ASSERT_CORE_LOCKED(); + + if (netif == NULL) { + /* remove default route */ + mib2_remove_route_ip4(1, netif); + } else { + /* install default route */ + mib2_add_route_ip4(1, netif); + } + netif_default = netif; + LWIP_DEBUGF(NETIF_DEBUG, ("netif: setting default interface %c%c\n", + netif ? netif->name[0] : '\'', netif ? netif->name[1] : '\'')); +} + +/** + * @ingroup netif + * Bring an interface up, available for processing + * traffic. + */ +void +netif_set_up(struct netif *netif) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("netif_set_up: invalid netif", netif != NULL, return); + + if (!(netif->flags & NETIF_FLAG_UP)) { + netif_set_flags(netif, NETIF_FLAG_UP); + + MIB2_COPY_SYSUPTIME_TO(&netif->ts); + + NETIF_STATUS_CALLBACK(netif); + +#if LWIP_NETIF_EXT_STATUS_CALLBACK + { + netif_ext_callback_args_t args; + args.status_changed.state = 1; + netif_invoke_ext_callback(netif, LWIP_NSC_STATUS_CHANGED, &args); + } +#endif + + netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV4 | NETIF_REPORT_TYPE_IPV6); +#if LWIP_IPV6 + nd6_restart_netif(netif); +#endif /* LWIP_IPV6 */ + } +} + +/** Send ARP/IGMP/MLD/RS events, e.g. on link-up/netif-up or addr-change + */ +static void +netif_issue_reports(struct netif *netif, u8_t report_type) +{ + LWIP_ASSERT("netif_issue_reports: invalid netif", netif != NULL); + + /* Only send reports when both link and admin states are up */ + if (!(netif->flags & NETIF_FLAG_LINK_UP) || + !(netif->flags & NETIF_FLAG_UP)) { + return; + } + +#if LWIP_IPV4 + if ((report_type & NETIF_REPORT_TYPE_IPV4) && + !ip4_addr_isany_val(*netif_ip4_addr(netif))) { +#if LWIP_ARP + /* For Ethernet network interfaces, we would like to send a "gratuitous ARP" */ + if (netif->flags & (NETIF_FLAG_ETHARP)) { + etharp_gratuitous(netif); + } +#endif /* LWIP_ARP */ + +#if LWIP_IGMP + /* resend IGMP memberships */ + if (netif->flags & NETIF_FLAG_IGMP) { + igmp_report_groups(netif); + } +#endif /* LWIP_IGMP */ + } +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 + if (report_type & NETIF_REPORT_TYPE_IPV6) { +#if LWIP_IPV6_MLD + /* send mld memberships */ + mld6_report_groups(netif); +#endif /* LWIP_IPV6_MLD */ + } +#endif /* LWIP_IPV6 */ +} + +/** + * @ingroup netif + * Bring an interface down, disabling any traffic processing. + */ +void +netif_set_down(struct netif *netif) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("netif_set_down: invalid netif", netif != NULL, return); + + if (netif->flags & NETIF_FLAG_UP) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + { + netif_ext_callback_args_t args; + args.status_changed.state = 0; + netif_invoke_ext_callback(netif, LWIP_NSC_STATUS_CHANGED, &args); + } +#endif + + netif_clear_flags(netif, NETIF_FLAG_UP); + MIB2_COPY_SYSUPTIME_TO(&netif->ts); + +#if LWIP_IPV4 && LWIP_ARP + if (netif->flags & NETIF_FLAG_ETHARP) { + etharp_cleanup_netif(netif); + } +#endif /* LWIP_IPV4 && LWIP_ARP */ + +#if LWIP_IPV6 + nd6_cleanup_netif(netif); +#endif /* LWIP_IPV6 */ + + NETIF_STATUS_CALLBACK(netif); + } +} + +#if LWIP_NETIF_STATUS_CALLBACK +/** + * @ingroup netif + * Set callback to be called when interface is brought up/down or address is changed while up + */ +void +netif_set_status_callback(struct netif *netif, netif_status_callback_fn status_callback) +{ + LWIP_ASSERT_CORE_LOCKED(); + + if (netif) { + netif->status_callback = status_callback; + } +} +#endif /* LWIP_NETIF_STATUS_CALLBACK */ + +#if LWIP_NETIF_REMOVE_CALLBACK +/** + * @ingroup netif + * Set callback to be called when the interface has been removed + */ +void +netif_set_remove_callback(struct netif *netif, netif_status_callback_fn remove_callback) +{ + LWIP_ASSERT_CORE_LOCKED(); + + if (netif) { + netif->remove_callback = remove_callback; + } +} +#endif /* LWIP_NETIF_REMOVE_CALLBACK */ + +/** + * @ingroup netif + * Called by a driver when its link goes up + */ +void +netif_set_link_up(struct netif *netif) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("netif_set_link_up: invalid netif", netif != NULL, return); + + if (!(netif->flags & NETIF_FLAG_LINK_UP)) { + netif_set_flags(netif, NETIF_FLAG_LINK_UP); + +#if LWIP_DHCP + dhcp_network_changed(netif); +#endif /* LWIP_DHCP */ + +#if LWIP_AUTOIP + autoip_network_changed(netif); +#endif /* LWIP_AUTOIP */ + + netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV4 | NETIF_REPORT_TYPE_IPV6); +#if LWIP_IPV6 + nd6_restart_netif(netif); +#endif /* LWIP_IPV6 */ + + NETIF_LINK_CALLBACK(netif); +#if LWIP_NETIF_EXT_STATUS_CALLBACK + { + netif_ext_callback_args_t args; + args.link_changed.state = 1; + netif_invoke_ext_callback(netif, LWIP_NSC_LINK_CHANGED, &args); + } +#endif + } +} + +/** + * @ingroup netif + * Called by a driver when its link goes down + */ +void +netif_set_link_down(struct netif *netif) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("netif_set_link_down: invalid netif", netif != NULL, return); + + if (netif->flags & NETIF_FLAG_LINK_UP) { + netif_clear_flags(netif, NETIF_FLAG_LINK_UP); + NETIF_LINK_CALLBACK(netif); +#if LWIP_NETIF_EXT_STATUS_CALLBACK + { + netif_ext_callback_args_t args; + args.link_changed.state = 0; + netif_invoke_ext_callback(netif, LWIP_NSC_LINK_CHANGED, &args); + } +#endif + } +} + +#if LWIP_NETIF_LINK_CALLBACK +/** + * @ingroup netif + * Set callback to be called when link is brought up/down + */ +void +netif_set_link_callback(struct netif *netif, netif_status_callback_fn link_callback) +{ + LWIP_ASSERT_CORE_LOCKED(); + + if (netif) { + netif->link_callback = link_callback; + } +} +#endif /* LWIP_NETIF_LINK_CALLBACK */ + +#if ENABLE_LOOPBACK +/** + * @ingroup netif + * Send an IP packet to be received on the same netif (loopif-like). + * The pbuf is simply copied and handed back to netif->input. + * In multithreaded mode, this is done directly since netif->input must put + * the packet on a queue. + * In callback mode, the packet is put on an internal queue and is fed to + * netif->input by netif_poll(). + * + * @param netif the lwip network interface structure + * @param p the (IP) packet to 'send' + * @return ERR_OK if the packet has been sent + * ERR_MEM if the pbuf used to copy the packet couldn't be allocated + */ +err_t +netif_loop_output(struct netif *netif, struct pbuf *p) +{ + struct pbuf *r; + err_t err; + struct pbuf *last; +#if LWIP_LOOPBACK_MAX_PBUFS + u16_t clen = 0; +#endif /* LWIP_LOOPBACK_MAX_PBUFS */ + /* If we have a loopif, SNMP counters are adjusted for it, + * if not they are adjusted for 'netif'. */ +#if MIB2_STATS +#if LWIP_HAVE_LOOPIF + struct netif *stats_if = &loop_netif; +#else /* LWIP_HAVE_LOOPIF */ + struct netif *stats_if = netif; +#endif /* LWIP_HAVE_LOOPIF */ +#endif /* MIB2_STATS */ +#if LWIP_NETIF_LOOPBACK_MULTITHREADING + u8_t schedule_poll = 0; +#endif /* LWIP_NETIF_LOOPBACK_MULTITHREADING */ + SYS_ARCH_DECL_PROTECT(lev); + + LWIP_ASSERT("netif_loop_output: invalid netif", netif != NULL); + LWIP_ASSERT("netif_loop_output: invalid pbuf", p != NULL); + + /* Allocate a new pbuf */ + r = pbuf_alloc(PBUF_LINK, p->tot_len, PBUF_RAM); + if (r == NULL) { + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(stats_if, ifoutdiscards); + return ERR_MEM; + } +#if LWIP_LOOPBACK_MAX_PBUFS + clen = pbuf_clen(r); + /* check for overflow or too many pbuf on queue */ + if (((netif->loop_cnt_current + clen) < netif->loop_cnt_current) || + ((netif->loop_cnt_current + clen) > LWIP_MIN(LWIP_LOOPBACK_MAX_PBUFS, 0xFFFF))) { + pbuf_free(r); + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(stats_if, ifoutdiscards); + return ERR_MEM; + } + netif->loop_cnt_current = (u16_t)(netif->loop_cnt_current + clen); +#endif /* LWIP_LOOPBACK_MAX_PBUFS */ + + /* Copy the whole pbuf queue p into the single pbuf r */ + if ((err = pbuf_copy(r, p)) != ERR_OK) { + pbuf_free(r); + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(stats_if, ifoutdiscards); + return err; + } + + /* Put the packet on a linked list which gets emptied through calling + netif_poll(). */ + + /* let last point to the last pbuf in chain r */ + for (last = r; last->next != NULL; last = last->next) { + /* nothing to do here, just get to the last pbuf */ + } + + SYS_ARCH_PROTECT(lev); + if (netif->loop_first != NULL) { + LWIP_ASSERT("if first != NULL, last must also be != NULL", netif->loop_last != NULL); + netif->loop_last->next = r; + netif->loop_last = last; + } else { + netif->loop_first = r; + netif->loop_last = last; +#if LWIP_NETIF_LOOPBACK_MULTITHREADING + /* No existing packets queued, schedule poll */ + schedule_poll = 1; +#endif /* LWIP_NETIF_LOOPBACK_MULTITHREADING */ + } + SYS_ARCH_UNPROTECT(lev); + + LINK_STATS_INC(link.xmit); + MIB2_STATS_NETIF_ADD(stats_if, ifoutoctets, p->tot_len); + MIB2_STATS_NETIF_INC(stats_if, ifoutucastpkts); + +#if LWIP_NETIF_LOOPBACK_MULTITHREADING + /* For multithreading environment, schedule a call to netif_poll */ + if (schedule_poll) { + tcpip_try_callback((tcpip_callback_fn)netif_poll, netif); + } +#endif /* LWIP_NETIF_LOOPBACK_MULTITHREADING */ + + return ERR_OK; +} + +#if LWIP_HAVE_LOOPIF +#if LWIP_IPV4 +static err_t +netif_loop_output_ipv4(struct netif *netif, struct pbuf *p, const ip4_addr_t *addr) +{ + LWIP_UNUSED_ARG(addr); + return netif_loop_output(netif, p); +} +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 +static err_t +netif_loop_output_ipv6(struct netif *netif, struct pbuf *p, const ip6_addr_t *addr) +{ + LWIP_UNUSED_ARG(addr); + return netif_loop_output(netif, p); +} +#endif /* LWIP_IPV6 */ +#endif /* LWIP_HAVE_LOOPIF */ + + +/** + * Call netif_poll() in the main loop of your application. This is to prevent + * reentering non-reentrant functions like tcp_input(). Packets passed to + * netif_loop_output() are put on a list that is passed to netif->input() by + * netif_poll(). + */ +void +netif_poll(struct netif *netif) +{ + /* If we have a loopif, SNMP counters are adjusted for it, + * if not they are adjusted for 'netif'. */ +#if MIB2_STATS +#if LWIP_HAVE_LOOPIF + struct netif *stats_if = &loop_netif; +#else /* LWIP_HAVE_LOOPIF */ + struct netif *stats_if = netif; +#endif /* LWIP_HAVE_LOOPIF */ +#endif /* MIB2_STATS */ + SYS_ARCH_DECL_PROTECT(lev); + + LWIP_ASSERT("netif_poll: invalid netif", netif != NULL); + + /* Get a packet from the list. With SYS_LIGHTWEIGHT_PROT=1, this is protected */ + SYS_ARCH_PROTECT(lev); + while (netif->loop_first != NULL) { + struct pbuf *in, *in_end; +#if LWIP_LOOPBACK_MAX_PBUFS + u8_t clen = 1; +#endif /* LWIP_LOOPBACK_MAX_PBUFS */ + + in = in_end = netif->loop_first; + while (in_end->len != in_end->tot_len) { + LWIP_ASSERT("bogus pbuf: len != tot_len but next == NULL!", in_end->next != NULL); + in_end = in_end->next; +#if LWIP_LOOPBACK_MAX_PBUFS + clen++; +#endif /* LWIP_LOOPBACK_MAX_PBUFS */ + } +#if LWIP_LOOPBACK_MAX_PBUFS + /* adjust the number of pbufs on queue */ + LWIP_ASSERT("netif->loop_cnt_current underflow", + ((netif->loop_cnt_current - clen) < netif->loop_cnt_current)); + netif->loop_cnt_current = (u16_t)(netif->loop_cnt_current - clen); +#endif /* LWIP_LOOPBACK_MAX_PBUFS */ + + /* 'in_end' now points to the last pbuf from 'in' */ + if (in_end == netif->loop_last) { + /* this was the last pbuf in the list */ + netif->loop_first = netif->loop_last = NULL; + } else { + /* pop the pbuf off the list */ + netif->loop_first = in_end->next; + LWIP_ASSERT("should not be null since first != last!", netif->loop_first != NULL); + } + /* De-queue the pbuf from its successors on the 'loop_' list. */ + in_end->next = NULL; + SYS_ARCH_UNPROTECT(lev); + + in->if_idx = netif_get_index(netif); + + LINK_STATS_INC(link.recv); + MIB2_STATS_NETIF_ADD(stats_if, ifinoctets, in->tot_len); + MIB2_STATS_NETIF_INC(stats_if, ifinucastpkts); + /* loopback packets are always IP packets! */ + if (ip_input(in, netif) != ERR_OK) { + pbuf_free(in); + } + SYS_ARCH_PROTECT(lev); + } + SYS_ARCH_UNPROTECT(lev); +} + +#if !LWIP_NETIF_LOOPBACK_MULTITHREADING +/** + * Calls netif_poll() for every netif on the netif_list. + */ +void +netif_poll_all(void) +{ + struct netif *netif; + /* loop through netifs */ + NETIF_FOREACH(netif) { + netif_poll(netif); + } +} +#endif /* !LWIP_NETIF_LOOPBACK_MULTITHREADING */ +#endif /* ENABLE_LOOPBACK */ + +#if LWIP_NUM_NETIF_CLIENT_DATA > 0 +/** + * @ingroup netif_cd + * Allocate an index to store data in client_data member of struct netif. + * Returned value is an index in mentioned array. + * @see LWIP_NUM_NETIF_CLIENT_DATA + */ +u8_t +netif_alloc_client_data_id(void) +{ + u8_t result = netif_client_id; + netif_client_id++; + + LWIP_ASSERT_CORE_LOCKED(); + +#if LWIP_NUM_NETIF_CLIENT_DATA > 256 +#error LWIP_NUM_NETIF_CLIENT_DATA must be <= 256 +#endif + LWIP_ASSERT("Increase LWIP_NUM_NETIF_CLIENT_DATA in lwipopts.h", result < LWIP_NUM_NETIF_CLIENT_DATA); + return (u8_t)(result + LWIP_NETIF_CLIENT_DATA_INDEX_MAX); +} +#endif + +#if LWIP_IPV6 +/** + * @ingroup netif_ip6 + * Change an IPv6 address of a network interface + * + * @param netif the network interface to change + * @param addr_idx index of the IPv6 address + * @param addr6 the new IPv6 address + * + * @note call netif_ip6_addr_set_state() to set the address valid/temptative + */ +void +netif_ip6_addr_set(struct netif *netif, s8_t addr_idx, const ip6_addr_t *addr6) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ASSERT("netif_ip6_addr_set: invalid netif", netif != NULL); + LWIP_ASSERT("netif_ip6_addr_set: invalid addr6", addr6 != NULL); + + netif_ip6_addr_set_parts(netif, addr_idx, addr6->addr[0], addr6->addr[1], + addr6->addr[2], addr6->addr[3]); +} + +/* + * Change an IPv6 address of a network interface (internal version taking 4 * u32_t) + * + * @param netif the network interface to change + * @param addr_idx index of the IPv6 address + * @param i0 word0 of the new IPv6 address + * @param i1 word1 of the new IPv6 address + * @param i2 word2 of the new IPv6 address + * @param i3 word3 of the new IPv6 address + */ +void +netif_ip6_addr_set_parts(struct netif *netif, s8_t addr_idx, u32_t i0, u32_t i1, u32_t i2, u32_t i3) +{ + ip_addr_t old_addr; + ip_addr_t new_ipaddr; + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("netif != NULL", netif != NULL); + LWIP_ASSERT("invalid index", addr_idx < LWIP_IPV6_NUM_ADDRESSES); + + ip6_addr_copy(*ip_2_ip6(&old_addr), *netif_ip6_addr(netif, addr_idx)); + IP_SET_TYPE_VAL(old_addr, IPADDR_TYPE_V6); + + /* address is actually being changed? */ + if ((ip_2_ip6(&old_addr)->addr[0] != i0) || (ip_2_ip6(&old_addr)->addr[1] != i1) || + (ip_2_ip6(&old_addr)->addr[2] != i2) || (ip_2_ip6(&old_addr)->addr[3] != i3)) { + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_STATE, ("netif_ip6_addr_set: netif address being changed\n")); + + IP_ADDR6(&new_ipaddr, i0, i1, i2, i3); + ip6_addr_assign_zone(ip_2_ip6(&new_ipaddr), IP6_UNICAST, netif); + + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, addr_idx))) { + netif_do_ip_addr_changed(netif_ip_addr6(netif, addr_idx), &new_ipaddr); + } + /* @todo: remove/readd mib2 ip6 entries? */ + + ip_addr_copy(netif->ip6_addr[addr_idx], new_ipaddr); + + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, addr_idx))) { + netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV6); + NETIF_STATUS_CALLBACK(netif); + } + +#if LWIP_NETIF_EXT_STATUS_CALLBACK + { + netif_ext_callback_args_t args; + args.ipv6_set.addr_index = addr_idx; + args.ipv6_set.old_address = &old_addr; + netif_invoke_ext_callback(netif, LWIP_NSC_IPV6_SET, &args); + } +#endif + } + + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("netif: IPv6 address %d of interface %c%c set to %s/0x%"X8_F"\n", + addr_idx, netif->name[0], netif->name[1], ip6addr_ntoa(netif_ip6_addr(netif, addr_idx)), + netif_ip6_addr_state(netif, addr_idx))); +} + +/** + * @ingroup netif_ip6 + * Change the state of an IPv6 address of a network interface + * (INVALID, TEMPTATIVE, PREFERRED, DEPRECATED, where TEMPTATIVE + * includes the number of checks done, see ip6_addr.h) + * + * @param netif the network interface to change + * @param addr_idx index of the IPv6 address + * @param state the new IPv6 address state + */ +void +netif_ip6_addr_set_state(struct netif *netif, s8_t addr_idx, u8_t state) +{ + u8_t old_state; + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("netif != NULL", netif != NULL); + LWIP_ASSERT("invalid index", addr_idx < LWIP_IPV6_NUM_ADDRESSES); + + old_state = netif_ip6_addr_state(netif, addr_idx); + /* state is actually being changed? */ + if (old_state != state) { + u8_t old_valid = old_state & IP6_ADDR_VALID; + u8_t new_valid = state & IP6_ADDR_VALID; + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_STATE, ("netif_ip6_addr_set_state: netif address state being changed\n")); + +#if LWIP_IPV6_MLD + /* Reevaluate solicited-node multicast group membership. */ + if (netif->flags & NETIF_FLAG_MLD6) { + nd6_adjust_mld_membership(netif, addr_idx, state); + } +#endif /* LWIP_IPV6_MLD */ + + if (old_valid && !new_valid) { + /* address about to be removed by setting invalid */ + netif_do_ip_addr_changed(netif_ip_addr6(netif, addr_idx), NULL); + /* @todo: remove mib2 ip6 entries? */ + } + netif->ip6_addr_state[addr_idx] = state; + + if (!old_valid && new_valid) { + /* address added by setting valid */ + /* This is a good moment to check that the address is properly zoned. */ + IP6_ADDR_ZONECHECK_NETIF(netif_ip6_addr(netif, addr_idx), netif); + /* @todo: add mib2 ip6 entries? */ + netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV6); + } + if ((old_state & ~IP6_ADDR_TENTATIVE_COUNT_MASK) != + (state & ~IP6_ADDR_TENTATIVE_COUNT_MASK)) { + /* address state has changed -> call the callback function */ + NETIF_STATUS_CALLBACK(netif); + } + +#if LWIP_NETIF_EXT_STATUS_CALLBACK + { + netif_ext_callback_args_t args; + args.ipv6_addr_state_changed.addr_index = addr_idx; + args.ipv6_addr_state_changed.old_state = old_state; + args.ipv6_addr_state_changed.address = netif_ip_addr6(netif, addr_idx); + netif_invoke_ext_callback(netif, LWIP_NSC_IPV6_ADDR_STATE_CHANGED, &args); + } +#endif + } + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("netif: IPv6 address %d of interface %c%c set to %s/0x%"X8_F"\n", + addr_idx, netif->name[0], netif->name[1], ip6addr_ntoa(netif_ip6_addr(netif, addr_idx)), + netif_ip6_addr_state(netif, addr_idx))); +} + +/** + * Checks if a specific local address is present on the netif and returns its + * index. Depending on its state, it may or may not be assigned to the + * interface (as per RFC terminology). + * + * The given address may or may not be zoned (i.e., have a zone index other + * than IP6_NO_ZONE). If the address is zoned, it must have the correct zone + * for the given netif, or no match will be found. + * + * @param netif the netif to check + * @param ip6addr the IPv6 address to find + * @return >= 0: address found, this is its index + * -1: address not found on this netif + */ +s8_t +netif_get_ip6_addr_match(struct netif *netif, const ip6_addr_t *ip6addr) +{ + s8_t i; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ASSERT("netif_get_ip6_addr_match: invalid netif", netif != NULL); + LWIP_ASSERT("netif_get_ip6_addr_match: invalid ip6addr", ip6addr != NULL); + +#if LWIP_IPV6_SCOPES + if (ip6_addr_has_zone(ip6addr) && !ip6_addr_test_zone(ip6addr, netif)) { + return -1; /* wrong zone, no match */ + } +#endif /* LWIP_IPV6_SCOPES */ + + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (!ip6_addr_isinvalid(netif_ip6_addr_state(netif, i)) && + ip6_addr_cmp_zoneless(netif_ip6_addr(netif, i), ip6addr)) { + return i; + } + } + return -1; +} + +/** + * @ingroup netif_ip6 + * Create a link-local IPv6 address on a netif (stored in slot 0) + * + * @param netif the netif to create the address on + * @param from_mac_48bit if != 0, assume hwadr is a 48-bit MAC address (std conversion) + * if == 0, use hwaddr directly as interface ID + */ +void +netif_create_ip6_linklocal_address(struct netif *netif, u8_t from_mac_48bit) +{ + u8_t i, addr_index; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ASSERT("netif_create_ip6_linklocal_address: invalid netif", netif != NULL); + + /* Link-local prefix. */ + ip_2_ip6(&netif->ip6_addr[0])->addr[0] = PP_HTONL(0xfe800000ul); + ip_2_ip6(&netif->ip6_addr[0])->addr[1] = 0; + + /* Generate interface ID. */ + if (from_mac_48bit) { + /* Assume hwaddr is a 48-bit IEEE 802 MAC. Convert to EUI-64 address. Complement Group bit. */ + ip_2_ip6(&netif->ip6_addr[0])->addr[2] = lwip_htonl((((u32_t)(netif->hwaddr[0] ^ 0x02)) << 24) | + ((u32_t)(netif->hwaddr[1]) << 16) | + ((u32_t)(netif->hwaddr[2]) << 8) | + (0xff)); + ip_2_ip6(&netif->ip6_addr[0])->addr[3] = lwip_htonl((u32_t)(0xfeul << 24) | + ((u32_t)(netif->hwaddr[3]) << 16) | + ((u32_t)(netif->hwaddr[4]) << 8) | + (netif->hwaddr[5])); + } else { + /* Use hwaddr directly as interface ID. */ + ip_2_ip6(&netif->ip6_addr[0])->addr[2] = 0; + ip_2_ip6(&netif->ip6_addr[0])->addr[3] = 0; + + addr_index = 3; + for (i = 0; (i < 8) && (i < netif->hwaddr_len); i++) { + if (i == 4) { + addr_index--; + } + ip_2_ip6(&netif->ip6_addr[0])->addr[addr_index] |= lwip_htonl(((u32_t)(netif->hwaddr[netif->hwaddr_len - i - 1])) << (8 * (i & 0x03))); + } + } + + /* Set a link-local zone. Even though the zone is implied by the owning + * netif, setting the zone anyway has two important conceptual advantages: + * 1) it avoids the need for a ton of exceptions in internal code, allowing + * e.g. ip6_addr_cmp() to be used on local addresses; + * 2) the properly zoned address is visible externally, e.g. when any outside + * code enumerates available addresses or uses one to bind a socket. + * Any external code unaware of address scoping is likely to just ignore the + * zone field, so this should not create any compatibility problems. */ + ip6_addr_assign_zone(ip_2_ip6(&netif->ip6_addr[0]), IP6_UNICAST, netif); + + /* Set address state. */ +#if LWIP_IPV6_DUP_DETECT_ATTEMPTS + /* Will perform duplicate address detection (DAD). */ + netif_ip6_addr_set_state(netif, 0, IP6_ADDR_TENTATIVE); +#else + /* Consider address valid. */ + netif_ip6_addr_set_state(netif, 0, IP6_ADDR_PREFERRED); +#endif /* LWIP_IPV6_AUTOCONFIG */ +} + +/** + * @ingroup netif_ip6 + * This function allows for the easy addition of a new IPv6 address to an interface. + * It takes care of finding an empty slot and then sets the address tentative + * (to make sure that all the subsequent processing happens). + * + * @param netif netif to add the address on + * @param ip6addr address to add + * @param chosen_idx if != NULL, the chosen IPv6 address index will be stored here + */ +err_t +netif_add_ip6_address(struct netif *netif, const ip6_addr_t *ip6addr, s8_t *chosen_idx) +{ + s8_t i; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ASSERT("netif_add_ip6_address: invalid netif", netif != NULL); + LWIP_ASSERT("netif_add_ip6_address: invalid ip6addr", ip6addr != NULL); + + i = netif_get_ip6_addr_match(netif, ip6addr); + if (i >= 0) { + /* Address already added */ + if (chosen_idx != NULL) { + *chosen_idx = i; + } + return ERR_OK; + } + + /* Find a free slot. The first one is reserved for link-local addresses. */ + for (i = ip6_addr_islinklocal(ip6addr) ? 0 : 1; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isinvalid(netif_ip6_addr_state(netif, i))) { + ip_addr_copy_from_ip6(netif->ip6_addr[i], *ip6addr); + ip6_addr_assign_zone(ip_2_ip6(&netif->ip6_addr[i]), IP6_UNICAST, netif); + netif_ip6_addr_set_state(netif, i, IP6_ADDR_TENTATIVE); + if (chosen_idx != NULL) { + *chosen_idx = i; + } + return ERR_OK; + } + } + + if (chosen_idx != NULL) { + *chosen_idx = -1; + } + return ERR_VAL; +} + +/** Dummy IPv6 output function for netifs not supporting IPv6 + */ +static err_t +netif_null_output_ip6(struct netif *netif, struct pbuf *p, const ip6_addr_t *ipaddr) +{ + LWIP_UNUSED_ARG(netif); + LWIP_UNUSED_ARG(p); + LWIP_UNUSED_ARG(ipaddr); + + return ERR_IF; +} +#endif /* LWIP_IPV6 */ + +#if LWIP_IPV4 +/** Dummy IPv4 output function for netifs not supporting IPv4 + */ +static err_t +netif_null_output_ip4(struct netif *netif, struct pbuf *p, const ip4_addr_t *ipaddr) +{ + LWIP_UNUSED_ARG(netif); + LWIP_UNUSED_ARG(p); + LWIP_UNUSED_ARG(ipaddr); + + return ERR_IF; +} +#endif /* LWIP_IPV4 */ + +/** +* @ingroup netif +* Return the interface index for the netif with name +* or NETIF_NO_INDEX if not found/on error +* +* @param name the name of the netif +*/ +u8_t +netif_name_to_index(const char *name) +{ + struct netif *netif = netif_find(name); + if (netif != NULL) { + return netif_get_index(netif); + } + /* No name found, return invalid index */ + return NETIF_NO_INDEX; +} + +/** +* @ingroup netif +* Return the interface name for the netif matching index +* or NULL if not found/on error +* +* @param idx the interface index of the netif +* @param name char buffer of at least NETIF_NAMESIZE bytes +*/ +char * +netif_index_to_name(u8_t idx, char *name) +{ + struct netif *netif = netif_get_by_index(idx); + + if (netif != NULL) { + name[0] = netif->name[0]; + name[1] = netif->name[1]; + lwip_itoa(&name[2], NETIF_NAMESIZE - 2, netif_index_to_num(idx)); + return name; + } + return NULL; +} + +/** +* @ingroup netif +* Return the interface for the netif index +* +* @param idx index of netif to find +*/ +struct netif * +netif_get_by_index(u8_t idx) +{ + struct netif *netif; + + LWIP_ASSERT_CORE_LOCKED(); + + if (idx != NETIF_NO_INDEX) { + NETIF_FOREACH(netif) { + if (idx == netif_get_index(netif)) { + return netif; /* found! */ + } + } + } + + return NULL; +} + +/** + * @ingroup netif + * Find a network interface by searching for its name + * + * @param name the name of the netif (like netif->name) plus concatenated number + * in ascii representation (e.g. 'en0') + */ +struct netif * +netif_find(const char *name) +{ + struct netif *netif; + u8_t num; + + LWIP_ASSERT_CORE_LOCKED(); + + if (name == NULL) { + return NULL; + } + + num = (u8_t)atoi(&name[2]); + + NETIF_FOREACH(netif) { + if (num == netif->num && + name[0] == netif->name[0] && + name[1] == netif->name[1]) { + LWIP_DEBUGF(NETIF_DEBUG, ("netif_find: found %c%c\n", name[0], name[1])); + return netif; + } + } + LWIP_DEBUGF(NETIF_DEBUG, ("netif_find: didn't find %c%c\n", name[0], name[1])); + return NULL; +} + +#if LWIP_NETIF_EXT_STATUS_CALLBACK +/** + * @ingroup netif + * Add extended netif events listener + * @param callback pointer to listener structure + * @param fn callback function + */ +void +netif_add_ext_callback(netif_ext_callback_t *callback, netif_ext_callback_fn fn) +{ + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("callback must be != NULL", callback != NULL); + LWIP_ASSERT("fn must be != NULL", fn != NULL); + + callback->callback_fn = fn; + callback->next = ext_callback; + ext_callback = callback; +} + +/** + * @ingroup netif + * Remove extended netif events listener + * @param callback pointer to listener structure + */ +void +netif_remove_ext_callback(netif_ext_callback_t* callback) +{ + netif_ext_callback_t *last, *iter; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("callback must be != NULL", callback != NULL); + + if (ext_callback == NULL) { + return; + } + + if (callback == ext_callback) { + ext_callback = ext_callback->next; + } else { + last = ext_callback; + for (iter = ext_callback->next; iter != NULL; last = iter, iter = iter->next) { + if (iter == callback) { + LWIP_ASSERT("last != NULL", last != NULL); + last->next = callback->next; + callback->next = NULL; + return; + } + } + } +} + +/** + * Invoke extended netif status event + * @param netif netif that is affected by change + * @param reason change reason + * @param args depends on reason, see reason description + */ +void +netif_invoke_ext_callback(struct netif *netif, netif_nsc_reason_t reason, const netif_ext_callback_args_t *args) +{ + netif_ext_callback_t *callback = ext_callback; + + LWIP_ASSERT("netif must be != NULL", netif != NULL); + + while (callback != NULL) { + callback->callback_fn(netif, reason, args); + callback = callback->next; + } +} +#endif /* LWIP_NETIF_EXT_STATUS_CALLBACK */ diff --git a/Middlewares/Third_Party/LwIP/src/core/pbuf.c b/Middlewares/Third_Party/LwIP/src/core/pbuf.c new file mode 100644 index 0000000..a209e0c --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/pbuf.c @@ -0,0 +1,1514 @@ +/** + * @file + * Packet buffer management + */ + +/** + * @defgroup pbuf Packet buffers (PBUF) + * @ingroup infrastructure + * + * Packets are built from the pbuf data structure. It supports dynamic + * memory allocation for packet contents or can reference externally + * managed packet contents both in RAM and ROM. Quick allocation for + * incoming packets is provided through pools with fixed sized pbufs. + * + * A packet may span over multiple pbufs, chained as a singly linked + * list. This is called a "pbuf chain". + * + * Multiple packets may be queued, also using this singly linked list. + * This is called a "packet queue". + * + * So, a packet queue consists of one or more pbuf chains, each of + * which consist of one or more pbufs. CURRENTLY, PACKET QUEUES ARE + * NOT SUPPORTED!!! Use helper structs to queue multiple packets. + * + * The differences between a pbuf chain and a packet queue are very + * precise but subtle. + * + * The last pbuf of a packet has a ->tot_len field that equals the + * ->len field. It can be found by traversing the list. If the last + * pbuf of a packet has a ->next field other than NULL, more packets + * are on the queue. + * + * Therefore, looping through a pbuf of a single packet, has an + * loop end condition (tot_len == p->len), NOT (next == NULL). + * + * Example of custom pbuf usage: @ref zerocopyrx + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#include "lwip/pbuf.h" +#include "lwip/stats.h" +#include "lwip/def.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/sys.h" +#include "lwip/netif.h" +#if LWIP_TCP && TCP_QUEUE_OOSEQ +#include "lwip/priv/tcp_priv.h" +#endif +#if LWIP_CHECKSUM_ON_COPY +#include "lwip/inet_chksum.h" +#endif + +#include + +#define SIZEOF_STRUCT_PBUF LWIP_MEM_ALIGN_SIZE(sizeof(struct pbuf)) +/* Since the pool is created in memp, PBUF_POOL_BUFSIZE will be automatically + aligned there. Therefore, PBUF_POOL_BUFSIZE_ALIGNED can be used here. */ +#define PBUF_POOL_BUFSIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(PBUF_POOL_BUFSIZE) + +static const struct pbuf * +pbuf_skip_const(const struct pbuf *in, u16_t in_offset, u16_t *out_offset); + +#if !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ +#define PBUF_POOL_IS_EMPTY() +#else /* !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ */ + +#if !NO_SYS +#ifndef PBUF_POOL_FREE_OOSEQ_QUEUE_CALL +#include "lwip/tcpip.h" +#define PBUF_POOL_FREE_OOSEQ_QUEUE_CALL() do { \ + if (tcpip_try_callback(pbuf_free_ooseq_callback, NULL) != ERR_OK) { \ + SYS_ARCH_PROTECT(old_level); \ + pbuf_free_ooseq_pending = 0; \ + SYS_ARCH_UNPROTECT(old_level); \ + } } while(0) +#endif /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */ +#endif /* !NO_SYS */ + +volatile u8_t pbuf_free_ooseq_pending; +#define PBUF_POOL_IS_EMPTY() pbuf_pool_is_empty() + +/** + * Attempt to reclaim some memory from queued out-of-sequence TCP segments + * if we run out of pool pbufs. It's better to give priority to new packets + * if we're running out. + * + * This must be done in the correct thread context therefore this function + * can only be used with NO_SYS=0 and through tcpip_callback. + */ +#if !NO_SYS +static +#endif /* !NO_SYS */ +void +pbuf_free_ooseq(void) +{ + struct tcp_pcb *pcb; + SYS_ARCH_SET(pbuf_free_ooseq_pending, 0); + + for (pcb = tcp_active_pcbs; NULL != pcb; pcb = pcb->next) { + if (pcb->ooseq != NULL) { + /** Free the ooseq pbufs of one PCB only */ + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free_ooseq: freeing out-of-sequence pbufs\n")); + tcp_free_ooseq(pcb); + return; + } + } +} + +#if !NO_SYS +/** + * Just a callback function for tcpip_callback() that calls pbuf_free_ooseq(). + */ +static void +pbuf_free_ooseq_callback(void *arg) +{ + LWIP_UNUSED_ARG(arg); + pbuf_free_ooseq(); +} +#endif /* !NO_SYS */ + +/** Queue a call to pbuf_free_ooseq if not already queued. */ +static void +pbuf_pool_is_empty(void) +{ +#ifndef PBUF_POOL_FREE_OOSEQ_QUEUE_CALL + SYS_ARCH_SET(pbuf_free_ooseq_pending, 1); +#else /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */ + u8_t queued; + SYS_ARCH_DECL_PROTECT(old_level); + SYS_ARCH_PROTECT(old_level); + queued = pbuf_free_ooseq_pending; + pbuf_free_ooseq_pending = 1; + SYS_ARCH_UNPROTECT(old_level); + + if (!queued) { + /* queue a call to pbuf_free_ooseq if not already queued */ + PBUF_POOL_FREE_OOSEQ_QUEUE_CALL(); + } +#endif /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */ +} +#endif /* !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ */ + +/* Initialize members of struct pbuf after allocation */ +static void +pbuf_init_alloced_pbuf(struct pbuf *p, void *payload, u16_t tot_len, u16_t len, pbuf_type type, u8_t flags) +{ + p->next = NULL; + p->payload = payload; + p->tot_len = tot_len; + p->len = len; + p->type_internal = (u8_t)type; + p->flags = flags; + p->ref = 1; + p->if_idx = NETIF_NO_INDEX; +} + +/** + * @ingroup pbuf + * Allocates a pbuf of the given type (possibly a chain for PBUF_POOL type). + * + * The actual memory allocated for the pbuf is determined by the + * layer at which the pbuf is allocated and the requested size + * (from the size parameter). + * + * @param layer header size + * @param length size of the pbuf's payload + * @param type this parameter decides how and where the pbuf + * should be allocated as follows: + * + * - PBUF_RAM: buffer memory for pbuf is allocated as one large + * chunk. This includes protocol headers as well. + * - PBUF_ROM: no buffer memory is allocated for the pbuf, even for + * protocol headers. Additional headers must be prepended + * by allocating another pbuf and chain in to the front of + * the ROM pbuf. It is assumed that the memory used is really + * similar to ROM in that it is immutable and will not be + * changed. Memory which is dynamic should generally not + * be attached to PBUF_ROM pbufs. Use PBUF_REF instead. + * - PBUF_REF: no buffer memory is allocated for the pbuf, even for + * protocol headers. It is assumed that the pbuf is only + * being used in a single thread. If the pbuf gets queued, + * then pbuf_take should be called to copy the buffer. + * - PBUF_POOL: the pbuf is allocated as a pbuf chain, with pbufs from + * the pbuf pool that is allocated during pbuf_init(). + * + * @return the allocated pbuf. If multiple pbufs where allocated, this + * is the first pbuf of a pbuf chain. + */ +struct pbuf * +pbuf_alloc(pbuf_layer layer, u16_t length, pbuf_type type) +{ + struct pbuf *p; + u16_t offset = (u16_t)layer; + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc(length=%"U16_F")\n", length)); + + switch (type) { + case PBUF_REF: /* fall through */ + case PBUF_ROM: + p = pbuf_alloc_reference(NULL, length, type); + break; + case PBUF_POOL: { + struct pbuf *q, *last; + u16_t rem_len; /* remaining length */ + p = NULL; + last = NULL; + rem_len = length; + do { + u16_t qlen; + q = (struct pbuf *)memp_malloc(MEMP_PBUF_POOL); + if (q == NULL) { + PBUF_POOL_IS_EMPTY(); + /* free chain so far allocated */ + if (p) { + pbuf_free(p); + } + /* bail out unsuccessfully */ + return NULL; + } + qlen = LWIP_MIN(rem_len, (u16_t)(PBUF_POOL_BUFSIZE_ALIGNED - LWIP_MEM_ALIGN_SIZE(offset))); + pbuf_init_alloced_pbuf(q, LWIP_MEM_ALIGN((void *)((u8_t *)q + SIZEOF_STRUCT_PBUF + offset)), + rem_len, qlen, type, 0); + LWIP_ASSERT("pbuf_alloc: pbuf q->payload properly aligned", + ((mem_ptr_t)q->payload % MEM_ALIGNMENT) == 0); + LWIP_ASSERT("PBUF_POOL_BUFSIZE must be bigger than MEM_ALIGNMENT", + (PBUF_POOL_BUFSIZE_ALIGNED - LWIP_MEM_ALIGN_SIZE(offset)) > 0 ); + if (p == NULL) { + /* allocated head of pbuf chain (into p) */ + p = q; + } else { + /* make previous pbuf point to this pbuf */ + last->next = q; + } + last = q; + rem_len = (u16_t)(rem_len - qlen); + offset = 0; + } while (rem_len > 0); + break; + } + case PBUF_RAM: { + u16_t payload_len = (u16_t)(LWIP_MEM_ALIGN_SIZE(offset) + LWIP_MEM_ALIGN_SIZE(length)); + mem_size_t alloc_len = (mem_size_t)(LWIP_MEM_ALIGN_SIZE(SIZEOF_STRUCT_PBUF) + payload_len); + + /* bug #50040: Check for integer overflow when calculating alloc_len */ + if ((payload_len < LWIP_MEM_ALIGN_SIZE(length)) || + (alloc_len < LWIP_MEM_ALIGN_SIZE(length))) { + return NULL; + } + + /* If pbuf is to be allocated in RAM, allocate memory for it. */ + p = (struct pbuf *)mem_malloc(alloc_len); + if (p == NULL) { + return NULL; + } + pbuf_init_alloced_pbuf(p, LWIP_MEM_ALIGN((void *)((u8_t *)p + SIZEOF_STRUCT_PBUF + offset)), + length, length, type, 0); + LWIP_ASSERT("pbuf_alloc: pbuf->payload properly aligned", + ((mem_ptr_t)p->payload % MEM_ALIGNMENT) == 0); + break; + } + default: + LWIP_ASSERT("pbuf_alloc: erroneous type", 0); + return NULL; + } + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc(length=%"U16_F") == %p\n", length, (void *)p)); + return p; +} + +/** + * @ingroup pbuf + * Allocates a pbuf for referenced data. + * Referenced data can be volatile (PBUF_REF) or long-lived (PBUF_ROM). + * + * The actual memory allocated for the pbuf is determined by the + * layer at which the pbuf is allocated and the requested size + * (from the size parameter). + * + * @param payload referenced payload + * @param length size of the pbuf's payload + * @param type this parameter decides how and where the pbuf + * should be allocated as follows: + * + * - PBUF_ROM: It is assumed that the memory used is really + * similar to ROM in that it is immutable and will not be + * changed. Memory which is dynamic should generally not + * be attached to PBUF_ROM pbufs. Use PBUF_REF instead. + * - PBUF_REF: It is assumed that the pbuf is only + * being used in a single thread. If the pbuf gets queued, + * then pbuf_take should be called to copy the buffer. + * + * @return the allocated pbuf. + */ +struct pbuf * +pbuf_alloc_reference(void *payload, u16_t length, pbuf_type type) +{ + struct pbuf *p; + LWIP_ASSERT("invalid pbuf_type", (type == PBUF_REF) || (type == PBUF_ROM)); + /* only allocate memory for the pbuf structure */ + p = (struct pbuf *)memp_malloc(MEMP_PBUF); + if (p == NULL) { + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("pbuf_alloc_reference: Could not allocate MEMP_PBUF for PBUF_%s.\n", + (type == PBUF_ROM) ? "ROM" : "REF")); + return NULL; + } + pbuf_init_alloced_pbuf(p, payload, length, length, type, 0); + return p; +} + + +#if LWIP_SUPPORT_CUSTOM_PBUF +/** + * @ingroup pbuf + * Initialize a custom pbuf (already allocated). + * Example of custom pbuf usage: @ref zerocopyrx + * + * @param l header size + * @param length size of the pbuf's payload + * @param type type of the pbuf (only used to treat the pbuf accordingly, as + * this function allocates no memory) + * @param p pointer to the custom pbuf to initialize (already allocated) + * @param payload_mem pointer to the buffer that is used for payload and headers, + * must be at least big enough to hold 'length' plus the header size, + * may be NULL if set later. + * ATTENTION: The caller is responsible for correct alignment of this buffer!! + * @param payload_mem_len the size of the 'payload_mem' buffer, must be at least + * big enough to hold 'length' plus the header size + */ +struct pbuf * +pbuf_alloced_custom(pbuf_layer l, u16_t length, pbuf_type type, struct pbuf_custom *p, + void *payload_mem, u16_t payload_mem_len) +{ + u16_t offset = (u16_t)l; + void *payload; + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloced_custom(length=%"U16_F")\n", length)); + + if (LWIP_MEM_ALIGN_SIZE(offset) + length > payload_mem_len) { + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_WARNING, ("pbuf_alloced_custom(length=%"U16_F") buffer too short\n", length)); + return NULL; + } + + if (payload_mem != NULL) { + payload = (u8_t *)payload_mem + LWIP_MEM_ALIGN_SIZE(offset); + } else { + payload = NULL; + } + pbuf_init_alloced_pbuf(&p->pbuf, payload, length, length, type, PBUF_FLAG_IS_CUSTOM); + return &p->pbuf; +} +#endif /* LWIP_SUPPORT_CUSTOM_PBUF */ + +/** + * @ingroup pbuf + * Shrink a pbuf chain to a desired length. + * + * @param p pbuf to shrink. + * @param new_len desired new length of pbuf chain + * + * Depending on the desired length, the first few pbufs in a chain might + * be skipped and left unchanged. The new last pbuf in the chain will be + * resized, and any remaining pbufs will be freed. + * + * @note If the pbuf is ROM/REF, only the ->tot_len and ->len fields are adjusted. + * @note May not be called on a packet queue. + * + * @note Despite its name, pbuf_realloc cannot grow the size of a pbuf (chain). + */ +void +pbuf_realloc(struct pbuf *p, u16_t new_len) +{ + struct pbuf *q; + u16_t rem_len; /* remaining length */ + u16_t shrink; + + LWIP_ASSERT("pbuf_realloc: p != NULL", p != NULL); + + /* desired length larger than current length? */ + if (new_len >= p->tot_len) { + /* enlarging not yet supported */ + return; + } + + /* the pbuf chain grows by (new_len - p->tot_len) bytes + * (which may be negative in case of shrinking) */ + shrink = (u16_t)(p->tot_len - new_len); + + /* first, step over any pbufs that should remain in the chain */ + rem_len = new_len; + q = p; + /* should this pbuf be kept? */ + while (rem_len > q->len) { + /* decrease remaining length by pbuf length */ + rem_len = (u16_t)(rem_len - q->len); + /* decrease total length indicator */ + q->tot_len = (u16_t)(q->tot_len - shrink); + /* proceed to next pbuf in chain */ + q = q->next; + LWIP_ASSERT("pbuf_realloc: q != NULL", q != NULL); + } + /* we have now reached the new last pbuf (in q) */ + /* rem_len == desired length for pbuf q */ + + /* shrink allocated memory for PBUF_RAM */ + /* (other types merely adjust their length fields */ + if (pbuf_match_allocsrc(q, PBUF_TYPE_ALLOC_SRC_MASK_STD_HEAP) && (rem_len != q->len) +#if LWIP_SUPPORT_CUSTOM_PBUF + && ((q->flags & PBUF_FLAG_IS_CUSTOM) == 0) +#endif /* LWIP_SUPPORT_CUSTOM_PBUF */ + ) { + /* reallocate and adjust the length of the pbuf that will be split */ + q = (struct pbuf *)mem_trim(q, (mem_size_t)(((u8_t *)q->payload - (u8_t *)q) + rem_len)); + LWIP_ASSERT("mem_trim returned q == NULL", q != NULL); + } + /* adjust length fields for new last pbuf */ + q->len = rem_len; + q->tot_len = q->len; + + /* any remaining pbufs in chain? */ + if (q->next != NULL) { + /* free remaining pbufs in chain */ + pbuf_free(q->next); + } + /* q is last packet in chain */ + q->next = NULL; + +} + +/** + * Adjusts the payload pointer to reveal headers in the payload. + * @see pbuf_add_header. + * + * @param p pbuf to change the header size. + * @param header_size_increment Number of bytes to increment header size. + * @param force Allow 'header_size_increment > 0' for PBUF_REF/PBUF_ROM types + * + * @return non-zero on failure, zero on success. + * + */ +static u8_t +pbuf_add_header_impl(struct pbuf *p, size_t header_size_increment, u8_t force) +{ + u16_t type_internal; + void *payload; + u16_t increment_magnitude; + + LWIP_ASSERT("p != NULL", p != NULL); + if ((p == NULL) || (header_size_increment > 0xFFFF)) { + return 1; + } + if (header_size_increment == 0) { + return 0; + } + + increment_magnitude = (u16_t)header_size_increment; + /* Do not allow tot_len to wrap as a result. */ + if ((u16_t)(increment_magnitude + p->tot_len) < increment_magnitude) { + return 1; + } + + type_internal = p->type_internal; + + /* pbuf types containing payloads? */ + if (type_internal & PBUF_TYPE_FLAG_STRUCT_DATA_CONTIGUOUS) { + /* set new payload pointer */ + payload = (u8_t *)p->payload - header_size_increment; + /* boundary check fails? */ + if ((u8_t *)payload < (u8_t *)p + SIZEOF_STRUCT_PBUF) { + LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE, + ("pbuf_add_header: failed as %p < %p (not enough space for new header size)\n", + (void *)payload, (void *)((u8_t *)p + SIZEOF_STRUCT_PBUF))); + /* bail out unsuccessfully */ + return 1; + } + /* pbuf types referring to external payloads? */ + } else { + /* hide a header in the payload? */ + if (force) { + payload = (u8_t *)p->payload - header_size_increment; + } else { + /* cannot expand payload to front (yet!) + * bail out unsuccessfully */ + return 1; + } + } + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_add_header: old %p new %p (%"U16_F")\n", + (void *)p->payload, (void *)payload, increment_magnitude)); + + /* modify pbuf fields */ + p->payload = payload; + p->len = (u16_t)(p->len + increment_magnitude); + p->tot_len = (u16_t)(p->tot_len + increment_magnitude); + + + return 0; +} + +/** + * Adjusts the payload pointer to reveal headers in the payload. + * + * Adjusts the ->payload pointer so that space for a header + * appears in the pbuf payload. + * + * The ->payload, ->tot_len and ->len fields are adjusted. + * + * @param p pbuf to change the header size. + * @param header_size_increment Number of bytes to increment header size which + * increases the size of the pbuf. New space is on the front. + * If header_size_increment is 0, this function does nothing and returns successful. + * + * PBUF_ROM and PBUF_REF type buffers cannot have their sizes increased, so + * the call will fail. A check is made that the increase in header size does + * not move the payload pointer in front of the start of the buffer. + * + * @return non-zero on failure, zero on success. + * + */ +u8_t +pbuf_add_header(struct pbuf *p, size_t header_size_increment) +{ + return pbuf_add_header_impl(p, header_size_increment, 0); +} + +/** + * Same as @ref pbuf_add_header but does not check if 'header_size > 0' is allowed. + * This is used internally only, to allow PBUF_REF for RX. + */ +u8_t +pbuf_add_header_force(struct pbuf *p, size_t header_size_increment) +{ + return pbuf_add_header_impl(p, header_size_increment, 1); +} + +/** + * Adjusts the payload pointer to hide headers in the payload. + * + * Adjusts the ->payload pointer so that space for a header + * disappears in the pbuf payload. + * + * The ->payload, ->tot_len and ->len fields are adjusted. + * + * @param p pbuf to change the header size. + * @param header_size_decrement Number of bytes to decrement header size which + * decreases the size of the pbuf. + * If header_size_decrement is 0, this function does nothing and returns successful. + * @return non-zero on failure, zero on success. + * + */ +u8_t +pbuf_remove_header(struct pbuf *p, size_t header_size_decrement) +{ + void *payload; + u16_t increment_magnitude; + + LWIP_ASSERT("p != NULL", p != NULL); + if ((p == NULL) || (header_size_decrement > 0xFFFF)) { + return 1; + } + if (header_size_decrement == 0) { + return 0; + } + + increment_magnitude = (u16_t)header_size_decrement; + /* Check that we aren't going to move off the end of the pbuf */ + LWIP_ERROR("increment_magnitude <= p->len", (increment_magnitude <= p->len), return 1;); + + /* remember current payload pointer */ + payload = p->payload; + LWIP_UNUSED_ARG(payload); /* only used in LWIP_DEBUGF below */ + + /* increase payload pointer (guarded by length check above) */ + p->payload = (u8_t *)p->payload + header_size_decrement; + /* modify pbuf length fields */ + p->len = (u16_t)(p->len - increment_magnitude); + p->tot_len = (u16_t)(p->tot_len - increment_magnitude); + + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_remove_header: old %p new %p (%"U16_F")\n", + (void *)payload, (void *)p->payload, increment_magnitude)); + + return 0; +} + +static u8_t +pbuf_header_impl(struct pbuf *p, s16_t header_size_increment, u8_t force) +{ + if (header_size_increment < 0) { + return pbuf_remove_header(p, (size_t) - header_size_increment); + } else { + return pbuf_add_header_impl(p, (size_t)header_size_increment, force); + } +} + +/** + * Adjusts the payload pointer to hide or reveal headers in the payload. + * + * Adjusts the ->payload pointer so that space for a header + * (dis)appears in the pbuf payload. + * + * The ->payload, ->tot_len and ->len fields are adjusted. + * + * @param p pbuf to change the header size. + * @param header_size_increment Number of bytes to increment header size which + * increases the size of the pbuf. New space is on the front. + * (Using a negative value decreases the header size.) + * If header_size_increment is 0, this function does nothing and returns successful. + * + * PBUF_ROM and PBUF_REF type buffers cannot have their sizes increased, so + * the call will fail. A check is made that the increase in header size does + * not move the payload pointer in front of the start of the buffer. + * @return non-zero on failure, zero on success. + * + */ +u8_t +pbuf_header(struct pbuf *p, s16_t header_size_increment) +{ + return pbuf_header_impl(p, header_size_increment, 0); +} + +/** + * Same as pbuf_header but does not check if 'header_size > 0' is allowed. + * This is used internally only, to allow PBUF_REF for RX. + */ +u8_t +pbuf_header_force(struct pbuf *p, s16_t header_size_increment) +{ + return pbuf_header_impl(p, header_size_increment, 1); +} + +/** Similar to pbuf_header(-size) but de-refs header pbufs for (size >= p->len) + * + * @param q pbufs to operate on + * @param size The number of bytes to remove from the beginning of the pbuf list. + * While size >= p->len, pbufs are freed. + * ATTENTION: this is the opposite direction as @ref pbuf_header, but + * takes an u16_t not s16_t! + * @return the new head pbuf + */ +struct pbuf * +pbuf_free_header(struct pbuf *q, u16_t size) +{ + struct pbuf *p = q; + u16_t free_left = size; + while (free_left && p) { + if (free_left >= p->len) { + struct pbuf *f = p; + free_left = (u16_t)(free_left - p->len); + p = p->next; + f->next = 0; + pbuf_free(f); + } else { + pbuf_remove_header(p, free_left); + free_left = 0; + } + } + return p; +} + +/** + * @ingroup pbuf + * Dereference a pbuf chain or queue and deallocate any no-longer-used + * pbufs at the head of this chain or queue. + * + * Decrements the pbuf reference count. If it reaches zero, the pbuf is + * deallocated. + * + * For a pbuf chain, this is repeated for each pbuf in the chain, + * up to the first pbuf which has a non-zero reference count after + * decrementing. So, when all reference counts are one, the whole + * chain is free'd. + * + * @param p The pbuf (chain) to be dereferenced. + * + * @return the number of pbufs that were de-allocated + * from the head of the chain. + * + * @note MUST NOT be called on a packet queue (Not verified to work yet). + * @note the reference counter of a pbuf equals the number of pointers + * that refer to the pbuf (or into the pbuf). + * + * @internal examples: + * + * Assuming existing chains a->b->c with the following reference + * counts, calling pbuf_free(a) results in: + * + * 1->2->3 becomes ...1->3 + * 3->3->3 becomes 2->3->3 + * 1->1->2 becomes ......1 + * 2->1->1 becomes 1->1->1 + * 1->1->1 becomes ....... + * + */ +u8_t +pbuf_free(struct pbuf *p) +{ + u8_t alloc_src; + struct pbuf *q; + u8_t count; + + if (p == NULL) { + LWIP_ASSERT("p != NULL", p != NULL); + /* if assertions are disabled, proceed with debug output */ + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("pbuf_free(p == NULL) was called.\n")); + return 0; + } + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free(%p)\n", (void *)p)); + + PERF_START; + + count = 0; + /* de-allocate all consecutive pbufs from the head of the chain that + * obtain a zero reference count after decrementing*/ + while (p != NULL) { + LWIP_PBUF_REF_T ref; + SYS_ARCH_DECL_PROTECT(old_level); + /* Since decrementing ref cannot be guaranteed to be a single machine operation + * we must protect it. We put the new ref into a local variable to prevent + * further protection. */ + SYS_ARCH_PROTECT(old_level); + /* all pbufs in a chain are referenced at least once */ + LWIP_ASSERT("pbuf_free: p->ref > 0", p->ref > 0); + /* decrease reference count (number of pointers to pbuf) */ + ref = --(p->ref); + SYS_ARCH_UNPROTECT(old_level); + /* this pbuf is no longer referenced to? */ + if (ref == 0) { + /* remember next pbuf in chain for next iteration */ + q = p->next; + LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free: deallocating %p\n", (void *)p)); + alloc_src = pbuf_get_allocsrc(p); +#if LWIP_SUPPORT_CUSTOM_PBUF + /* is this a custom pbuf? */ + if ((p->flags & PBUF_FLAG_IS_CUSTOM) != 0) { + struct pbuf_custom *pc = (struct pbuf_custom *)p; + LWIP_ASSERT("pc->custom_free_function != NULL", pc->custom_free_function != NULL); + pc->custom_free_function(p); + } else +#endif /* LWIP_SUPPORT_CUSTOM_PBUF */ + { + /* is this a pbuf from the pool? */ + if (alloc_src == PBUF_TYPE_ALLOC_SRC_MASK_STD_MEMP_PBUF_POOL) { + memp_free(MEMP_PBUF_POOL, p); + /* is this a ROM or RAM referencing pbuf? */ + } else if (alloc_src == PBUF_TYPE_ALLOC_SRC_MASK_STD_MEMP_PBUF) { + memp_free(MEMP_PBUF, p); + /* type == PBUF_RAM */ + } else if (alloc_src == PBUF_TYPE_ALLOC_SRC_MASK_STD_HEAP) { + mem_free(p); + } else { + /* @todo: support freeing other types */ + LWIP_ASSERT("invalid pbuf type", 0); + } + } + count++; + /* proceed to next pbuf */ + p = q; + /* p->ref > 0, this pbuf is still referenced to */ + /* (and so the remaining pbufs in chain as well) */ + } else { + LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free: %p has ref %"U16_F", ending here.\n", (void *)p, (u16_t)ref)); + /* stop walking through the chain */ + p = NULL; + } + } + PERF_STOP("pbuf_free"); + /* return number of de-allocated pbufs */ + return count; +} + +/** + * Count number of pbufs in a chain + * + * @param p first pbuf of chain + * @return the number of pbufs in a chain + */ +u16_t +pbuf_clen(const struct pbuf *p) +{ + u16_t len; + + len = 0; + while (p != NULL) { + ++len; + p = p->next; + } + return len; +} + +/** + * @ingroup pbuf + * Increment the reference count of the pbuf. + * + * @param p pbuf to increase reference counter of + * + */ +void +pbuf_ref(struct pbuf *p) +{ + /* pbuf given? */ + if (p != NULL) { + SYS_ARCH_SET(p->ref, (LWIP_PBUF_REF_T)(p->ref + 1)); + LWIP_ASSERT("pbuf ref overflow", p->ref > 0); + } +} + +/** + * @ingroup pbuf + * Concatenate two pbufs (each may be a pbuf chain) and take over + * the caller's reference of the tail pbuf. + * + * @note The caller MAY NOT reference the tail pbuf afterwards. + * Use pbuf_chain() for that purpose. + * + * This function explicitly does not check for tot_len overflow to prevent + * failing to queue too long pbufs. This can produce invalid pbufs, so + * handle with care! + * + * @see pbuf_chain() + */ +void +pbuf_cat(struct pbuf *h, struct pbuf *t) +{ + struct pbuf *p; + + LWIP_ERROR("(h != NULL) && (t != NULL) (programmer violates API)", + ((h != NULL) && (t != NULL)), return;); + + /* proceed to last pbuf of chain */ + for (p = h; p->next != NULL; p = p->next) { + /* add total length of second chain to all totals of first chain */ + p->tot_len = (u16_t)(p->tot_len + t->tot_len); + } + /* { p is last pbuf of first h chain, p->next == NULL } */ + LWIP_ASSERT("p->tot_len == p->len (of last pbuf in chain)", p->tot_len == p->len); + LWIP_ASSERT("p->next == NULL", p->next == NULL); + /* add total length of second chain to last pbuf total of first chain */ + p->tot_len = (u16_t)(p->tot_len + t->tot_len); + /* chain last pbuf of head (p) with first of tail (t) */ + p->next = t; + /* p->next now references t, but the caller will drop its reference to t, + * so netto there is no change to the reference count of t. + */ +} + +/** + * @ingroup pbuf + * Chain two pbufs (or pbuf chains) together. + * + * The caller MUST call pbuf_free(t) once it has stopped + * using it. Use pbuf_cat() instead if you no longer use t. + * + * @param h head pbuf (chain) + * @param t tail pbuf (chain) + * @note The pbufs MUST belong to the same packet. + * @note MAY NOT be called on a packet queue. + * + * The ->tot_len fields of all pbufs of the head chain are adjusted. + * The ->next field of the last pbuf of the head chain is adjusted. + * The ->ref field of the first pbuf of the tail chain is adjusted. + * + */ +void +pbuf_chain(struct pbuf *h, struct pbuf *t) +{ + pbuf_cat(h, t); + /* t is now referenced by h */ + pbuf_ref(t); + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_chain: %p references %p\n", (void *)h, (void *)t)); +} + +/** + * Dechains the first pbuf from its succeeding pbufs in the chain. + * + * Makes p->tot_len field equal to p->len. + * @param p pbuf to dechain + * @return remainder of the pbuf chain, or NULL if it was de-allocated. + * @note May not be called on a packet queue. + */ +struct pbuf * +pbuf_dechain(struct pbuf *p) +{ + struct pbuf *q; + u8_t tail_gone = 1; + /* tail */ + q = p->next; + /* pbuf has successor in chain? */ + if (q != NULL) { + /* assert tot_len invariant: (p->tot_len == p->len + (p->next? p->next->tot_len: 0) */ + LWIP_ASSERT("p->tot_len == p->len + q->tot_len", q->tot_len == p->tot_len - p->len); + /* enforce invariant if assertion is disabled */ + q->tot_len = (u16_t)(p->tot_len - p->len); + /* decouple pbuf from remainder */ + p->next = NULL; + /* total length of pbuf p is its own length only */ + p->tot_len = p->len; + /* q is no longer referenced by p, free it */ + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_dechain: unreferencing %p\n", (void *)q)); + tail_gone = pbuf_free(q); + if (tail_gone > 0) { + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, + ("pbuf_dechain: deallocated %p (as it is no longer referenced)\n", (void *)q)); + } + /* return remaining tail or NULL if deallocated */ + } + /* assert tot_len invariant: (p->tot_len == p->len + (p->next? p->next->tot_len: 0) */ + LWIP_ASSERT("p->tot_len == p->len", p->tot_len == p->len); + return ((tail_gone > 0) ? NULL : q); +} + +/** + * @ingroup pbuf + * Create PBUF_RAM copies of pbufs. + * + * Used to queue packets on behalf of the lwIP stack, such as + * ARP based queueing. + * + * @note You MUST explicitly use p = pbuf_take(p); + * + * @note Only one packet is copied, no packet queue! + * + * @param p_to pbuf destination of the copy + * @param p_from pbuf source of the copy + * + * @return ERR_OK if pbuf was copied + * ERR_ARG if one of the pbufs is NULL or p_to is not big + * enough to hold p_from + */ +err_t +pbuf_copy(struct pbuf *p_to, const struct pbuf *p_from) +{ + size_t offset_to = 0, offset_from = 0, len; + + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_copy(%p, %p)\n", + (const void *)p_to, (const void *)p_from)); + + /* is the target big enough to hold the source? */ + LWIP_ERROR("pbuf_copy: target not big enough to hold source", ((p_to != NULL) && + (p_from != NULL) && (p_to->tot_len >= p_from->tot_len)), return ERR_ARG;); + + /* iterate through pbuf chain */ + do { + /* copy one part of the original chain */ + if ((p_to->len - offset_to) >= (p_from->len - offset_from)) { + /* complete current p_from fits into current p_to */ + len = p_from->len - offset_from; + } else { + /* current p_from does not fit into current p_to */ + len = p_to->len - offset_to; + } + MEMCPY((u8_t *)p_to->payload + offset_to, (u8_t *)p_from->payload + offset_from, len); + offset_to += len; + offset_from += len; + LWIP_ASSERT("offset_to <= p_to->len", offset_to <= p_to->len); + LWIP_ASSERT("offset_from <= p_from->len", offset_from <= p_from->len); + if (offset_from >= p_from->len) { + /* on to next p_from (if any) */ + offset_from = 0; + p_from = p_from->next; + } + if (offset_to == p_to->len) { + /* on to next p_to (if any) */ + offset_to = 0; + p_to = p_to->next; + LWIP_ERROR("p_to != NULL", (p_to != NULL) || (p_from == NULL), return ERR_ARG;); + } + + if ((p_from != NULL) && (p_from->len == p_from->tot_len)) { + /* don't copy more than one packet! */ + LWIP_ERROR("pbuf_copy() does not allow packet queues!", + (p_from->next == NULL), return ERR_VAL;); + } + if ((p_to != NULL) && (p_to->len == p_to->tot_len)) { + /* don't copy more than one packet! */ + LWIP_ERROR("pbuf_copy() does not allow packet queues!", + (p_to->next == NULL), return ERR_VAL;); + } + } while (p_from); + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_copy: end of chain reached.\n")); + return ERR_OK; +} + +/** + * @ingroup pbuf + * Copy (part of) the contents of a packet buffer + * to an application supplied buffer. + * + * @param buf the pbuf from which to copy data + * @param dataptr the application supplied buffer + * @param len length of data to copy (dataptr must be big enough). No more + * than buf->tot_len will be copied, irrespective of len + * @param offset offset into the packet buffer from where to begin copying len bytes + * @return the number of bytes copied, or 0 on failure + */ +u16_t +pbuf_copy_partial(const struct pbuf *buf, void *dataptr, u16_t len, u16_t offset) +{ + const struct pbuf *p; + u16_t left = 0; + u16_t buf_copy_len; + u16_t copied_total = 0; + + LWIP_ERROR("pbuf_copy_partial: invalid buf", (buf != NULL), return 0;); + LWIP_ERROR("pbuf_copy_partial: invalid dataptr", (dataptr != NULL), return 0;); + + /* Note some systems use byte copy if dataptr or one of the pbuf payload pointers are unaligned. */ + for (p = buf; len != 0 && p != NULL; p = p->next) { + if ((offset != 0) && (offset >= p->len)) { + /* don't copy from this buffer -> on to the next */ + offset = (u16_t)(offset - p->len); + } else { + /* copy from this buffer. maybe only partially. */ + buf_copy_len = (u16_t)(p->len - offset); + if (buf_copy_len > len) { + buf_copy_len = len; + } + /* copy the necessary parts of the buffer */ + MEMCPY(&((char *)dataptr)[left], &((char *)p->payload)[offset], buf_copy_len); + copied_total = (u16_t)(copied_total + buf_copy_len); + left = (u16_t)(left + buf_copy_len); + len = (u16_t)(len - buf_copy_len); + offset = 0; + } + } + return copied_total; +} + +/** + * @ingroup pbuf + * Get part of a pbuf's payload as contiguous memory. The returned memory is + * either a pointer into the pbuf's payload or, if split over multiple pbufs, + * a copy into the user-supplied buffer. + * + * @param p the pbuf from which to copy data + * @param buffer the application supplied buffer + * @param bufsize size of the application supplied buffer + * @param len length of data to copy (dataptr must be big enough). No more + * than buf->tot_len will be copied, irrespective of len + * @param offset offset into the packet buffer from where to begin copying len bytes + * @return the number of bytes copied, or 0 on failure + */ +void * +pbuf_get_contiguous(const struct pbuf *p, void *buffer, size_t bufsize, u16_t len, u16_t offset) +{ + const struct pbuf *q; + u16_t out_offset; + + LWIP_ERROR("pbuf_get_contiguous: invalid buf", (p != NULL), return NULL;); + LWIP_ERROR("pbuf_get_contiguous: invalid dataptr", (buffer != NULL), return NULL;); + LWIP_ERROR("pbuf_get_contiguous: invalid dataptr", (bufsize >= len), return NULL;); + + q = pbuf_skip_const(p, offset, &out_offset); + if (q != NULL) { + if (q->len >= (out_offset + len)) { + /* all data in this pbuf, return zero-copy */ + return (u8_t *)q->payload + out_offset; + } + /* need to copy */ + if (pbuf_copy_partial(q, buffer, len, out_offset) != len) { + /* copying failed: pbuf is too short */ + return NULL; + } + return buffer; + } + /* pbuf is too short (offset does not fit in) */ + return NULL; +} + +#if LWIP_TCP && TCP_QUEUE_OOSEQ && LWIP_WND_SCALE +/** + * This method modifies a 'pbuf chain', so that its total length is + * smaller than 64K. The remainder of the original pbuf chain is stored + * in *rest. + * This function never creates new pbufs, but splits an existing chain + * in two parts. The tot_len of the modified packet queue will likely be + * smaller than 64K. + * 'packet queues' are not supported by this function. + * + * @param p the pbuf queue to be split + * @param rest pointer to store the remainder (after the first 64K) + */ +void pbuf_split_64k(struct pbuf *p, struct pbuf **rest) +{ + *rest = NULL; + if ((p != NULL) && (p->next != NULL)) { + u16_t tot_len_front = p->len; + struct pbuf *i = p; + struct pbuf *r = p->next; + + /* continue until the total length (summed up as u16_t) overflows */ + while ((r != NULL) && ((u16_t)(tot_len_front + r->len) >= tot_len_front)) { + tot_len_front = (u16_t)(tot_len_front + r->len); + i = r; + r = r->next; + } + /* i now points to last packet of the first segment. Set next + pointer to NULL */ + i->next = NULL; + + if (r != NULL) { + /* Update the tot_len field in the first part */ + for (i = p; i != NULL; i = i->next) { + i->tot_len = (u16_t)(i->tot_len - r->tot_len); + LWIP_ASSERT("tot_len/len mismatch in last pbuf", + (i->next != NULL) || (i->tot_len == i->len)); + } + if (p->flags & PBUF_FLAG_TCP_FIN) { + r->flags |= PBUF_FLAG_TCP_FIN; + } + + /* tot_len field in rest does not need modifications */ + /* reference counters do not need modifications */ + *rest = r; + } + } +} +#endif /* LWIP_TCP && TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + +/* Actual implementation of pbuf_skip() but returning const pointer... */ +static const struct pbuf * +pbuf_skip_const(const struct pbuf *in, u16_t in_offset, u16_t *out_offset) +{ + u16_t offset_left = in_offset; + const struct pbuf *q = in; + + /* get the correct pbuf */ + while ((q != NULL) && (q->len <= offset_left)) { + offset_left = (u16_t)(offset_left - q->len); + q = q->next; + } + if (out_offset != NULL) { + *out_offset = offset_left; + } + return q; +} + +/** + * @ingroup pbuf + * Skip a number of bytes at the start of a pbuf + * + * @param in input pbuf + * @param in_offset offset to skip + * @param out_offset resulting offset in the returned pbuf + * @return the pbuf in the queue where the offset is + */ +struct pbuf * +pbuf_skip(struct pbuf *in, u16_t in_offset, u16_t *out_offset) +{ + const struct pbuf *out = pbuf_skip_const(in, in_offset, out_offset); + return LWIP_CONST_CAST(struct pbuf *, out); +} + +/** + * @ingroup pbuf + * Copy application supplied data into a pbuf. + * This function can only be used to copy the equivalent of buf->tot_len data. + * + * @param buf pbuf to fill with data + * @param dataptr application supplied data buffer + * @param len length of the application supplied data buffer + * + * @return ERR_OK if successful, ERR_MEM if the pbuf is not big enough + */ +err_t +pbuf_take(struct pbuf *buf, const void *dataptr, u16_t len) +{ + struct pbuf *p; + size_t buf_copy_len; + size_t total_copy_len = len; + size_t copied_total = 0; + + LWIP_ERROR("pbuf_take: invalid buf", (buf != NULL), return ERR_ARG;); + LWIP_ERROR("pbuf_take: invalid dataptr", (dataptr != NULL), return ERR_ARG;); + LWIP_ERROR("pbuf_take: buf not large enough", (buf->tot_len >= len), return ERR_MEM;); + + if ((buf == NULL) || (dataptr == NULL) || (buf->tot_len < len)) { + return ERR_ARG; + } + + /* Note some systems use byte copy if dataptr or one of the pbuf payload pointers are unaligned. */ + for (p = buf; total_copy_len != 0; p = p->next) { + LWIP_ASSERT("pbuf_take: invalid pbuf", p != NULL); + buf_copy_len = total_copy_len; + if (buf_copy_len > p->len) { + /* this pbuf cannot hold all remaining data */ + buf_copy_len = p->len; + } + /* copy the necessary parts of the buffer */ + MEMCPY(p->payload, &((const char *)dataptr)[copied_total], buf_copy_len); + total_copy_len -= buf_copy_len; + copied_total += buf_copy_len; + } + LWIP_ASSERT("did not copy all data", total_copy_len == 0 && copied_total == len); + return ERR_OK; +} + +/** + * @ingroup pbuf + * Same as pbuf_take() but puts data at an offset + * + * @param buf pbuf to fill with data + * @param dataptr application supplied data buffer + * @param len length of the application supplied data buffer + * @param offset offset in pbuf where to copy dataptr to + * + * @return ERR_OK if successful, ERR_MEM if the pbuf is not big enough + */ +err_t +pbuf_take_at(struct pbuf *buf, const void *dataptr, u16_t len, u16_t offset) +{ + u16_t target_offset; + struct pbuf *q = pbuf_skip(buf, offset, &target_offset); + + /* return requested data if pbuf is OK */ + if ((q != NULL) && (q->tot_len >= target_offset + len)) { + u16_t remaining_len = len; + const u8_t *src_ptr = (const u8_t *)dataptr; + /* copy the part that goes into the first pbuf */ + u16_t first_copy_len; + LWIP_ASSERT("check pbuf_skip result", target_offset < q->len); + first_copy_len = (u16_t)LWIP_MIN(q->len - target_offset, len); + MEMCPY(((u8_t *)q->payload) + target_offset, dataptr, first_copy_len); + remaining_len = (u16_t)(remaining_len - first_copy_len); + src_ptr += first_copy_len; + if (remaining_len > 0) { + return pbuf_take(q->next, src_ptr, remaining_len); + } + return ERR_OK; + } + return ERR_MEM; +} + +/** + * @ingroup pbuf + * Creates a single pbuf out of a queue of pbufs. + * + * @remark: Either the source pbuf 'p' is freed by this function or the original + * pbuf 'p' is returned, therefore the caller has to check the result! + * + * @param p the source pbuf + * @param layer pbuf_layer of the new pbuf + * + * @return a new, single pbuf (p->next is NULL) + * or the old pbuf if allocation fails + */ +struct pbuf * +pbuf_coalesce(struct pbuf *p, pbuf_layer layer) +{ + struct pbuf *q; + if (p->next == NULL) { + return p; + } + q = pbuf_clone(layer, PBUF_RAM, p); + if (q == NULL) { + /* @todo: what do we do now? */ + return p; + } + pbuf_free(p); + return q; +} + +/** + * @ingroup pbuf + * Allocates a new pbuf of same length (via pbuf_alloc()) and copies the source + * pbuf into this new pbuf (using pbuf_copy()). + * + * @param layer pbuf_layer of the new pbuf + * @param type this parameter decides how and where the pbuf should be allocated + * (@see pbuf_alloc()) + * @param p the source pbuf + * + * @return a new pbuf or NULL if allocation fails + */ +struct pbuf * +pbuf_clone(pbuf_layer layer, pbuf_type type, struct pbuf *p) +{ + struct pbuf *q; + err_t err; + q = pbuf_alloc(layer, p->tot_len, type); + if (q == NULL) { + return NULL; + } + err = pbuf_copy(q, p); + LWIP_UNUSED_ARG(err); /* in case of LWIP_NOASSERT */ + LWIP_ASSERT("pbuf_copy failed", err == ERR_OK); + return q; +} + +#if LWIP_CHECKSUM_ON_COPY +/** + * Copies data into a single pbuf (*not* into a pbuf queue!) and updates + * the checksum while copying + * + * @param p the pbuf to copy data into + * @param start_offset offset of p->payload where to copy the data to + * @param dataptr data to copy into the pbuf + * @param len length of data to copy into the pbuf + * @param chksum pointer to the checksum which is updated + * @return ERR_OK if successful, another error if the data does not fit + * within the (first) pbuf (no pbuf queues!) + */ +err_t +pbuf_fill_chksum(struct pbuf *p, u16_t start_offset, const void *dataptr, + u16_t len, u16_t *chksum) +{ + u32_t acc; + u16_t copy_chksum; + char *dst_ptr; + LWIP_ASSERT("p != NULL", p != NULL); + LWIP_ASSERT("dataptr != NULL", dataptr != NULL); + LWIP_ASSERT("chksum != NULL", chksum != NULL); + LWIP_ASSERT("len != 0", len != 0); + + if ((start_offset >= p->len) || (start_offset + len > p->len)) { + return ERR_ARG; + } + + dst_ptr = ((char *)p->payload) + start_offset; + copy_chksum = LWIP_CHKSUM_COPY(dst_ptr, dataptr, len); + if ((start_offset & 1) != 0) { + copy_chksum = SWAP_BYTES_IN_WORD(copy_chksum); + } + acc = *chksum; + acc += copy_chksum; + *chksum = FOLD_U32T(acc); + return ERR_OK; +} +#endif /* LWIP_CHECKSUM_ON_COPY */ + +/** + * @ingroup pbuf + * Get one byte from the specified position in a pbuf + * WARNING: returns zero for offset >= p->tot_len + * + * @param p pbuf to parse + * @param offset offset into p of the byte to return + * @return byte at an offset into p OR ZERO IF 'offset' >= p->tot_len + */ +u8_t +pbuf_get_at(const struct pbuf *p, u16_t offset) +{ + int ret = pbuf_try_get_at(p, offset); + if (ret >= 0) { + return (u8_t)ret; + } + return 0; +} + +/** + * @ingroup pbuf + * Get one byte from the specified position in a pbuf + * + * @param p pbuf to parse + * @param offset offset into p of the byte to return + * @return byte at an offset into p [0..0xFF] OR negative if 'offset' >= p->tot_len + */ +int +pbuf_try_get_at(const struct pbuf *p, u16_t offset) +{ + u16_t q_idx; + const struct pbuf *q = pbuf_skip_const(p, offset, &q_idx); + + /* return requested data if pbuf is OK */ + if ((q != NULL) && (q->len > q_idx)) { + return ((u8_t *)q->payload)[q_idx]; + } + return -1; +} + +/** + * @ingroup pbuf + * Put one byte to the specified position in a pbuf + * WARNING: silently ignores offset >= p->tot_len + * + * @param p pbuf to fill + * @param offset offset into p of the byte to write + * @param data byte to write at an offset into p + */ +void +pbuf_put_at(struct pbuf *p, u16_t offset, u8_t data) +{ + u16_t q_idx; + struct pbuf *q = pbuf_skip(p, offset, &q_idx); + + /* write requested data if pbuf is OK */ + if ((q != NULL) && (q->len > q_idx)) { + ((u8_t *)q->payload)[q_idx] = data; + } +} + +/** + * @ingroup pbuf + * Compare pbuf contents at specified offset with memory s2, both of length n + * + * @param p pbuf to compare + * @param offset offset into p at which to start comparing + * @param s2 buffer to compare + * @param n length of buffer to compare + * @return zero if equal, nonzero otherwise + * (0xffff if p is too short, diffoffset+1 otherwise) + */ +u16_t +pbuf_memcmp(const struct pbuf *p, u16_t offset, const void *s2, u16_t n) +{ + u16_t start = offset; + const struct pbuf *q = p; + u16_t i; + + /* pbuf long enough to perform check? */ + if (p->tot_len < (offset + n)) { + return 0xffff; + } + + /* get the correct pbuf from chain. We know it succeeds because of p->tot_len check above. */ + while ((q != NULL) && (q->len <= start)) { + start = (u16_t)(start - q->len); + q = q->next; + } + + /* return requested data if pbuf is OK */ + for (i = 0; i < n; i++) { + /* We know pbuf_get_at() succeeds because of p->tot_len check above. */ + u8_t a = pbuf_get_at(q, (u16_t)(start + i)); + u8_t b = ((const u8_t *)s2)[i]; + if (a != b) { + return (u16_t)LWIP_MIN(i + 1, 0xFFFF); + } + } + return 0; +} + +/** + * @ingroup pbuf + * Find occurrence of mem (with length mem_len) in pbuf p, starting at offset + * start_offset. + * + * @param p pbuf to search, maximum length is 0xFFFE since 0xFFFF is used as + * return value 'not found' + * @param mem search for the contents of this buffer + * @param mem_len length of 'mem' + * @param start_offset offset into p at which to start searching + * @return 0xFFFF if substr was not found in p or the index where it was found + */ +u16_t +pbuf_memfind(const struct pbuf *p, const void *mem, u16_t mem_len, u16_t start_offset) +{ + u16_t i; + u16_t max_cmp_start = (u16_t)(p->tot_len - mem_len); + if (p->tot_len >= mem_len + start_offset) { + for (i = start_offset; i <= max_cmp_start; i++) { + u16_t plus = pbuf_memcmp(p, i, mem, mem_len); + if (plus == 0) { + return i; + } + } + } + return 0xFFFF; +} + +/** + * Find occurrence of substr with length substr_len in pbuf p, start at offset + * start_offset + * WARNING: in contrast to strstr(), this one does not stop at the first \0 in + * the pbuf/source string! + * + * @param p pbuf to search, maximum length is 0xFFFE since 0xFFFF is used as + * return value 'not found' + * @param substr string to search for in p, maximum length is 0xFFFE + * @return 0xFFFF if substr was not found in p or the index where it was found + */ +u16_t +pbuf_strstr(const struct pbuf *p, const char *substr) +{ + size_t substr_len; + if ((substr == NULL) || (substr[0] == 0) || (p->tot_len == 0xFFFF)) { + return 0xFFFF; + } + substr_len = strlen(substr); + if (substr_len >= 0xFFFF) { + return 0xFFFF; + } + return pbuf_memfind(p, substr, (u16_t)substr_len, 0); +} diff --git a/Middlewares/Third_Party/LwIP/src/core/raw.c b/Middlewares/Third_Party/LwIP/src/core/raw.c new file mode 100644 index 0000000..3b34544 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/raw.c @@ -0,0 +1,671 @@ +/** + * @file + * Implementation of raw protocol PCBs for low-level handling of + * different types of protocols besides (or overriding) those + * already available in lwIP.\n + * See also @ref raw_raw + * + * @defgroup raw_raw RAW + * @ingroup callbackstyle_api + * Implementation of raw protocol PCBs for low-level handling of + * different types of protocols besides (or overriding) those + * already available in lwIP.\n + * @see @ref api + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_RAW /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/def.h" +#include "lwip/memp.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/raw.h" +#include "lwip/priv/raw_priv.h" +#include "lwip/stats.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/inet_chksum.h" + +#include + +/** The list of RAW PCBs */ +static struct raw_pcb *raw_pcbs; + +static u8_t +raw_input_local_match(struct raw_pcb *pcb, u8_t broadcast) +{ + LWIP_UNUSED_ARG(broadcast); /* in IPv6 only case */ + + /* check if PCB is bound to specific netif */ + if ((pcb->netif_idx != NETIF_NO_INDEX) && + (pcb->netif_idx != netif_get_index(ip_data.current_input_netif))) { + return 0; + } + +#if LWIP_IPV4 && LWIP_IPV6 + /* Dual-stack: PCBs listening to any IP type also listen to any IP address */ + if (IP_IS_ANY_TYPE_VAL(pcb->local_ip)) { +#if IP_SOF_BROADCAST_RECV + if ((broadcast != 0) && !ip_get_option(pcb, SOF_BROADCAST)) { + return 0; + } +#endif /* IP_SOF_BROADCAST_RECV */ + return 1; + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + /* Only need to check PCB if incoming IP version matches PCB IP version */ + if (IP_ADDR_PCB_VERSION_MATCH_EXACT(pcb, ip_current_dest_addr())) { +#if LWIP_IPV4 + /* Special case: IPv4 broadcast: receive all broadcasts + * Note: broadcast variable can only be 1 if it is an IPv4 broadcast */ + if (broadcast != 0) { +#if IP_SOF_BROADCAST_RECV + if (ip_get_option(pcb, SOF_BROADCAST)) +#endif /* IP_SOF_BROADCAST_RECV */ + { + if (ip4_addr_isany(ip_2_ip4(&pcb->local_ip))) { + return 1; + } + } + } else +#endif /* LWIP_IPV4 */ + /* Handle IPv4 and IPv6: catch all or exact match */ + if (ip_addr_isany(&pcb->local_ip) || + ip_addr_cmp(&pcb->local_ip, ip_current_dest_addr())) { + return 1; + } + } + + return 0; +} + +/** + * Determine if in incoming IP packet is covered by a RAW PCB + * and if so, pass it to a user-provided receive callback function. + * + * Given an incoming IP datagram (as a chain of pbufs) this function + * finds a corresponding RAW PCB and calls the corresponding receive + * callback function. + * + * @param p pbuf to be demultiplexed to a RAW PCB. + * @param inp network interface on which the datagram was received. + * @return - 1 if the packet has been eaten by a RAW PCB receive + * callback function. The caller MAY NOT not reference the + * packet any longer, and MAY NOT call pbuf_free(). + * @return - 0 if packet is not eaten (pbuf is still referenced by the + * caller). + * + */ +raw_input_state_t +raw_input(struct pbuf *p, struct netif *inp) +{ + struct raw_pcb *pcb, *prev; + s16_t proto; + raw_input_state_t ret = RAW_INPUT_NONE; + u8_t broadcast = ip_addr_isbroadcast(ip_current_dest_addr(), ip_current_netif()); + + LWIP_UNUSED_ARG(inp); + +#if LWIP_IPV6 +#if LWIP_IPV4 + if (IP_HDR_GET_VERSION(p->payload) == 6) +#endif /* LWIP_IPV4 */ + { + struct ip6_hdr *ip6hdr = (struct ip6_hdr *)p->payload; + proto = IP6H_NEXTH(ip6hdr); + } +#if LWIP_IPV4 + else +#endif /* LWIP_IPV4 */ +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 + { + proto = IPH_PROTO((struct ip_hdr *)p->payload); + } +#endif /* LWIP_IPV4 */ + + prev = NULL; + pcb = raw_pcbs; + /* loop through all raw pcbs until the packet is eaten by one */ + /* this allows multiple pcbs to match against the packet by design */ + while (pcb != NULL) { + if ((pcb->protocol == proto) && raw_input_local_match(pcb, broadcast) && + (((pcb->flags & RAW_FLAGS_CONNECTED) == 0) || + ip_addr_cmp(&pcb->remote_ip, ip_current_src_addr()))) { + /* receive callback function available? */ + if (pcb->recv != NULL) { + u8_t eaten; +#ifndef LWIP_NOASSERT + void *old_payload = p->payload; +#endif + ret = RAW_INPUT_DELIVERED; + /* the receive callback function did not eat the packet? */ + eaten = pcb->recv(pcb->recv_arg, pcb, p, ip_current_src_addr()); + if (eaten != 0) { + /* receive function ate the packet */ + p = NULL; + if (prev != NULL) { + /* move the pcb to the front of raw_pcbs so that is + found faster next time */ + prev->next = pcb->next; + pcb->next = raw_pcbs; + raw_pcbs = pcb; + } + return RAW_INPUT_EATEN; + } else { + /* sanity-check that the receive callback did not alter the pbuf */ + LWIP_ASSERT("raw pcb recv callback altered pbuf payload pointer without eating packet", + p->payload == old_payload); + } + } + /* no receive callback function was set for this raw PCB */ + } + /* drop the packet */ + prev = pcb; + pcb = pcb->next; + } + return ret; +} + +/** + * @ingroup raw_raw + * Bind a RAW PCB. + * + * @param pcb RAW PCB to be bound with a local address ipaddr. + * @param ipaddr local IP address to bind with. Use IP4_ADDR_ANY to + * bind to all local interfaces. + * + * @return lwIP error code. + * - ERR_OK. Successful. No error occurred. + * - ERR_USE. The specified IP address is already bound to by + * another RAW PCB. + * + * @see raw_disconnect() + */ +err_t +raw_bind(struct raw_pcb *pcb, const ip_addr_t *ipaddr) +{ + LWIP_ASSERT_CORE_LOCKED(); + if ((pcb == NULL) || (ipaddr == NULL)) { + return ERR_VAL; + } + ip_addr_set_ipaddr(&pcb->local_ip, ipaddr); +#if LWIP_IPV6 && LWIP_IPV6_SCOPES + /* If the given IP address should have a zone but doesn't, assign one now. + * This is legacy support: scope-aware callers should always provide properly + * zoned source addresses. */ + if (IP_IS_V6(&pcb->local_ip) && + ip6_addr_lacks_zone(ip_2_ip6(&pcb->local_ip), IP6_UNKNOWN)) { + ip6_addr_select_zone(ip_2_ip6(&pcb->local_ip), ip_2_ip6(&pcb->local_ip)); + } +#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */ + return ERR_OK; +} + +/** + * @ingroup raw_raw + * Bind an RAW PCB to a specific netif. + * After calling this function, all packets received via this PCB + * are guaranteed to have come in via the specified netif, and all + * outgoing packets will go out via the specified netif. + * + * @param pcb RAW PCB to be bound with netif. + * @param netif netif to bind to. Can be NULL. + * + * @see raw_disconnect() + */ +void +raw_bind_netif(struct raw_pcb *pcb, const struct netif *netif) +{ + LWIP_ASSERT_CORE_LOCKED(); + if (netif != NULL) { + pcb->netif_idx = netif_get_index(netif); + } else { + pcb->netif_idx = NETIF_NO_INDEX; + } +} + +/** + * @ingroup raw_raw + * Connect an RAW PCB. This function is required by upper layers + * of lwip. Using the raw api you could use raw_sendto() instead + * + * This will associate the RAW PCB with the remote address. + * + * @param pcb RAW PCB to be connected with remote address ipaddr and port. + * @param ipaddr remote IP address to connect with. + * + * @return lwIP error code + * + * @see raw_disconnect() and raw_sendto() + */ +err_t +raw_connect(struct raw_pcb *pcb, const ip_addr_t *ipaddr) +{ + LWIP_ASSERT_CORE_LOCKED(); + if ((pcb == NULL) || (ipaddr == NULL)) { + return ERR_VAL; + } + ip_addr_set_ipaddr(&pcb->remote_ip, ipaddr); +#if LWIP_IPV6 && LWIP_IPV6_SCOPES + /* If the given IP address should have a zone but doesn't, assign one now, + * using the bound address to make a more informed decision when possible. */ + if (IP_IS_V6(&pcb->remote_ip) && + ip6_addr_lacks_zone(ip_2_ip6(&pcb->remote_ip), IP6_UNKNOWN)) { + ip6_addr_select_zone(ip_2_ip6(&pcb->remote_ip), ip_2_ip6(&pcb->local_ip)); + } +#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */ + raw_set_flags(pcb, RAW_FLAGS_CONNECTED); + return ERR_OK; +} + +/** + * @ingroup raw_raw + * Disconnect a RAW PCB. + * + * @param pcb the raw pcb to disconnect. + */ +void +raw_disconnect(struct raw_pcb *pcb) +{ + LWIP_ASSERT_CORE_LOCKED(); + /* reset remote address association */ +#if LWIP_IPV4 && LWIP_IPV6 + if (IP_IS_ANY_TYPE_VAL(pcb->local_ip)) { + ip_addr_copy(pcb->remote_ip, *IP_ANY_TYPE); + } else { +#endif + ip_addr_set_any(IP_IS_V6_VAL(pcb->remote_ip), &pcb->remote_ip); +#if LWIP_IPV4 && LWIP_IPV6 + } +#endif + pcb->netif_idx = NETIF_NO_INDEX; + /* mark PCB as unconnected */ + raw_clear_flags(pcb, RAW_FLAGS_CONNECTED); +} + +/** + * @ingroup raw_raw + * Set the callback function for received packets that match the + * raw PCB's protocol and binding. + * + * The callback function MUST either + * - eat the packet by calling pbuf_free() and returning non-zero. The + * packet will not be passed to other raw PCBs or other protocol layers. + * - not free the packet, and return zero. The packet will be matched + * against further PCBs and/or forwarded to another protocol layers. + */ +void +raw_recv(struct raw_pcb *pcb, raw_recv_fn recv, void *recv_arg) +{ + LWIP_ASSERT_CORE_LOCKED(); + /* remember recv() callback and user data */ + pcb->recv = recv; + pcb->recv_arg = recv_arg; +} + +/** + * @ingroup raw_raw + * Send the raw IP packet to the given address. An IP header will be prepended + * to the packet, unless the RAW_FLAGS_HDRINCL flag is set on the PCB. In that + * case, the packet must include an IP header, which will then be sent as is. + * + * @param pcb the raw pcb which to send + * @param p the IP payload to send + * @param ipaddr the destination address of the IP packet + * + */ +err_t +raw_sendto(struct raw_pcb *pcb, struct pbuf *p, const ip_addr_t *ipaddr) +{ + struct netif *netif; + const ip_addr_t *src_ip; + + if ((pcb == NULL) || (ipaddr == NULL) || !IP_ADDR_PCB_VERSION_MATCH(pcb, ipaddr)) { + return ERR_VAL; + } + + LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_TRACE, ("raw_sendto\n")); + + if (pcb->netif_idx != NETIF_NO_INDEX) { + netif = netif_get_by_index(pcb->netif_idx); + } else { +#if LWIP_MULTICAST_TX_OPTIONS + netif = NULL; + if (ip_addr_ismulticast(ipaddr)) { + /* For multicast-destined packets, use the user-provided interface index to + * determine the outgoing interface, if an interface index is set and a + * matching netif can be found. Otherwise, fall back to regular routing. */ + netif = netif_get_by_index(pcb->mcast_ifindex); + } + + if (netif == NULL) +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + { + netif = ip_route(&pcb->local_ip, ipaddr); + } + } + + if (netif == NULL) { + LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_LEVEL_WARNING, ("raw_sendto: No route to ")); + ip_addr_debug_print(RAW_DEBUG | LWIP_DBG_LEVEL_WARNING, ipaddr); + return ERR_RTE; + } + + if (ip_addr_isany(&pcb->local_ip) || ip_addr_ismulticast(&pcb->local_ip)) { + /* use outgoing network interface IP address as source address */ + src_ip = ip_netif_get_local_ip(netif, ipaddr); +#if LWIP_IPV6 + if (src_ip == NULL) { + return ERR_RTE; + } +#endif /* LWIP_IPV6 */ + } else { + /* use RAW PCB local IP address as source address */ + src_ip = &pcb->local_ip; + } + + return raw_sendto_if_src(pcb, p, ipaddr, netif, src_ip); +} + +/** + * @ingroup raw_raw + * Send the raw IP packet to the given address, using a particular outgoing + * netif and source IP address. An IP header will be prepended to the packet, + * unless the RAW_FLAGS_HDRINCL flag is set on the PCB. In that case, the + * packet must include an IP header, which will then be sent as is. + * + * @param pcb RAW PCB used to send the data + * @param p chain of pbufs to be sent + * @param dst_ip destination IP address + * @param netif the netif used for sending + * @param src_ip source IP address + */ +err_t +raw_sendto_if_src(struct raw_pcb *pcb, struct pbuf *p, const ip_addr_t *dst_ip, + struct netif *netif, const ip_addr_t *src_ip) +{ + err_t err; + struct pbuf *q; /* q will be sent down the stack */ + u16_t header_size; + u8_t ttl; + + LWIP_ASSERT_CORE_LOCKED(); + + if ((pcb == NULL) || (dst_ip == NULL) || (netif == NULL) || (src_ip == NULL) || + !IP_ADDR_PCB_VERSION_MATCH(pcb, src_ip) || !IP_ADDR_PCB_VERSION_MATCH(pcb, dst_ip)) { + return ERR_VAL; + } + + header_size = ( +#if LWIP_IPV4 && LWIP_IPV6 + IP_IS_V6(dst_ip) ? IP6_HLEN : IP_HLEN); +#elif LWIP_IPV4 + IP_HLEN); +#else + IP6_HLEN); +#endif + + /* Handle the HDRINCL option as an exception: none of the code below applies + * to this case, and sending the packet needs to be done differently too. */ + if (pcb->flags & RAW_FLAGS_HDRINCL) { + /* A full header *must* be present in the first pbuf of the chain, as the + * output routines may access its fields directly. */ + if (p->len < header_size) { + return ERR_VAL; + } + /* @todo multicast loop support, if at all desired for this scenario.. */ + NETIF_SET_HINTS(netif, &pcb->netif_hints); + err = ip_output_if_hdrincl(p, src_ip, dst_ip, netif); + NETIF_RESET_HINTS(netif); + return err; + } + + /* packet too large to add an IP header without causing an overflow? */ + if ((u16_t)(p->tot_len + header_size) < p->tot_len) { + return ERR_MEM; + } + /* not enough space to add an IP header to first pbuf in given p chain? */ + if (pbuf_add_header(p, header_size)) { + /* allocate header in new pbuf */ + q = pbuf_alloc(PBUF_IP, 0, PBUF_RAM); + /* new header pbuf could not be allocated? */ + if (q == NULL) { + LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("raw_sendto: could not allocate header\n")); + return ERR_MEM; + } + if (p->tot_len != 0) { + /* chain header q in front of given pbuf p */ + pbuf_chain(q, p); + } + /* { first pbuf q points to header pbuf } */ + LWIP_DEBUGF(RAW_DEBUG, ("raw_sendto: added header pbuf %p before given pbuf %p\n", (void *)q, (void *)p)); + } else { + /* first pbuf q equals given pbuf */ + q = p; + if (pbuf_remove_header(q, header_size)) { + LWIP_ASSERT("Can't restore header we just removed!", 0); + return ERR_MEM; + } + } + +#if IP_SOF_BROADCAST + if (IP_IS_V4(dst_ip)) { + /* broadcast filter? */ + if (!ip_get_option(pcb, SOF_BROADCAST) && ip_addr_isbroadcast(dst_ip, netif)) { + LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_LEVEL_WARNING, ("raw_sendto: SOF_BROADCAST not enabled on pcb %p\n", (void *)pcb)); + /* free any temporary header pbuf allocated by pbuf_header() */ + if (q != p) { + pbuf_free(q); + } + return ERR_VAL; + } + } +#endif /* IP_SOF_BROADCAST */ + + /* Multicast Loop? */ +#if LWIP_MULTICAST_TX_OPTIONS + if (((pcb->flags & RAW_FLAGS_MULTICAST_LOOP) != 0) && ip_addr_ismulticast(dst_ip)) { + q->flags |= PBUF_FLAG_MCASTLOOP; + } +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + +#if LWIP_IPV6 + /* If requested, based on the IPV6_CHECKSUM socket option per RFC3542, + compute the checksum and update the checksum in the payload. */ + if (IP_IS_V6(dst_ip) && pcb->chksum_reqd) { + u16_t chksum = ip6_chksum_pseudo(p, pcb->protocol, p->tot_len, ip_2_ip6(src_ip), ip_2_ip6(dst_ip)); + LWIP_ASSERT("Checksum must fit into first pbuf", p->len >= (pcb->chksum_offset + 2)); + SMEMCPY(((u8_t *)p->payload) + pcb->chksum_offset, &chksum, sizeof(u16_t)); + } +#endif + + /* Determine TTL to use */ +#if LWIP_MULTICAST_TX_OPTIONS + ttl = (ip_addr_ismulticast(dst_ip) ? raw_get_multicast_ttl(pcb) : pcb->ttl); +#else /* LWIP_MULTICAST_TX_OPTIONS */ + ttl = pcb->ttl; +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + + NETIF_SET_HINTS(netif, &pcb->netif_hints); + err = ip_output_if(q, src_ip, dst_ip, ttl, pcb->tos, pcb->protocol, netif); + NETIF_RESET_HINTS(netif); + + /* did we chain a header earlier? */ + if (q != p) { + /* free the header */ + pbuf_free(q); + } + return err; +} + +/** + * @ingroup raw_raw + * Send the raw IP packet to the address given by raw_connect() + * + * @param pcb the raw pcb which to send + * @param p the IP payload to send + * + */ +err_t +raw_send(struct raw_pcb *pcb, struct pbuf *p) +{ + return raw_sendto(pcb, p, &pcb->remote_ip); +} + +/** + * @ingroup raw_raw + * Remove an RAW PCB. + * + * @param pcb RAW PCB to be removed. The PCB is removed from the list of + * RAW PCB's and the data structure is freed from memory. + * + * @see raw_new() + */ +void +raw_remove(struct raw_pcb *pcb) +{ + struct raw_pcb *pcb2; + LWIP_ASSERT_CORE_LOCKED(); + /* pcb to be removed is first in list? */ + if (raw_pcbs == pcb) { + /* make list start at 2nd pcb */ + raw_pcbs = raw_pcbs->next; + /* pcb not 1st in list */ + } else { + for (pcb2 = raw_pcbs; pcb2 != NULL; pcb2 = pcb2->next) { + /* find pcb in raw_pcbs list */ + if (pcb2->next != NULL && pcb2->next == pcb) { + /* remove pcb from list */ + pcb2->next = pcb->next; + break; + } + } + } + memp_free(MEMP_RAW_PCB, pcb); +} + +/** + * @ingroup raw_raw + * Create a RAW PCB. + * + * @return The RAW PCB which was created. NULL if the PCB data structure + * could not be allocated. + * + * @param proto the protocol number of the IPs payload (e.g. IP_PROTO_ICMP) + * + * @see raw_remove() + */ +struct raw_pcb * +raw_new(u8_t proto) +{ + struct raw_pcb *pcb; + + LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_TRACE, ("raw_new\n")); + LWIP_ASSERT_CORE_LOCKED(); + + pcb = (struct raw_pcb *)memp_malloc(MEMP_RAW_PCB); + /* could allocate RAW PCB? */ + if (pcb != NULL) { + /* initialize PCB to all zeroes */ + memset(pcb, 0, sizeof(struct raw_pcb)); + pcb->protocol = proto; + pcb->ttl = RAW_TTL; +#if LWIP_MULTICAST_TX_OPTIONS + raw_set_multicast_ttl(pcb, RAW_TTL); +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + pcb->next = raw_pcbs; + raw_pcbs = pcb; + } + return pcb; +} + +/** + * @ingroup raw_raw + * Create a RAW PCB for specific IP type. + * + * @return The RAW PCB which was created. NULL if the PCB data structure + * could not be allocated. + * + * @param type IP address type, see @ref lwip_ip_addr_type definitions. + * If you want to listen to IPv4 and IPv6 (dual-stack) packets, + * supply @ref IPADDR_TYPE_ANY as argument and bind to @ref IP_ANY_TYPE. + * @param proto the protocol number (next header) of the IPv6 packet payload + * (e.g. IP6_NEXTH_ICMP6) + * + * @see raw_remove() + */ +struct raw_pcb * +raw_new_ip_type(u8_t type, u8_t proto) +{ + struct raw_pcb *pcb; + LWIP_ASSERT_CORE_LOCKED(); + pcb = raw_new(proto); +#if LWIP_IPV4 && LWIP_IPV6 + if (pcb != NULL) { + IP_SET_TYPE_VAL(pcb->local_ip, type); + IP_SET_TYPE_VAL(pcb->remote_ip, type); + } +#else /* LWIP_IPV4 && LWIP_IPV6 */ + LWIP_UNUSED_ARG(type); +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + return pcb; +} + +/** This function is called from netif.c when address is changed + * + * @param old_addr IP address of the netif before change + * @param new_addr IP address of the netif after change + */ +void raw_netif_ip_addr_changed(const ip_addr_t *old_addr, const ip_addr_t *new_addr) +{ + struct raw_pcb *rpcb; + + if (!ip_addr_isany(old_addr) && !ip_addr_isany(new_addr)) { + for (rpcb = raw_pcbs; rpcb != NULL; rpcb = rpcb->next) { + /* PCB bound to current local interface address? */ + if (ip_addr_cmp(&rpcb->local_ip, old_addr)) { + /* The PCB is bound to the old ipaddr and + * is set to bound to the new one instead */ + ip_addr_copy(rpcb->local_ip, *new_addr); + } + } + } +} + +#endif /* LWIP_RAW */ diff --git a/Middlewares/Third_Party/LwIP/src/core/stats.c b/Middlewares/Third_Party/LwIP/src/core/stats.c new file mode 100644 index 0000000..34e9b27 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/stats.c @@ -0,0 +1,169 @@ +/** + * @file + * Statistics module + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_STATS /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/def.h" +#include "lwip/stats.h" +#include "lwip/mem.h" +#include "lwip/debug.h" + +#include + +struct stats_ lwip_stats; + +void +stats_init(void) +{ +#ifdef LWIP_DEBUG +#if MEM_STATS + lwip_stats.mem.name = "MEM"; +#endif /* MEM_STATS */ +#endif /* LWIP_DEBUG */ +} + +#if LWIP_STATS_DISPLAY +void +stats_display_proto(struct stats_proto *proto, const char *name) +{ + LWIP_PLATFORM_DIAG(("\n%s\n\t", name)); + LWIP_PLATFORM_DIAG(("xmit: %"STAT_COUNTER_F"\n\t", proto->xmit)); + LWIP_PLATFORM_DIAG(("recv: %"STAT_COUNTER_F"\n\t", proto->recv)); + LWIP_PLATFORM_DIAG(("fw: %"STAT_COUNTER_F"\n\t", proto->fw)); + LWIP_PLATFORM_DIAG(("drop: %"STAT_COUNTER_F"\n\t", proto->drop)); + LWIP_PLATFORM_DIAG(("chkerr: %"STAT_COUNTER_F"\n\t", proto->chkerr)); + LWIP_PLATFORM_DIAG(("lenerr: %"STAT_COUNTER_F"\n\t", proto->lenerr)); + LWIP_PLATFORM_DIAG(("memerr: %"STAT_COUNTER_F"\n\t", proto->memerr)); + LWIP_PLATFORM_DIAG(("rterr: %"STAT_COUNTER_F"\n\t", proto->rterr)); + LWIP_PLATFORM_DIAG(("proterr: %"STAT_COUNTER_F"\n\t", proto->proterr)); + LWIP_PLATFORM_DIAG(("opterr: %"STAT_COUNTER_F"\n\t", proto->opterr)); + LWIP_PLATFORM_DIAG(("err: %"STAT_COUNTER_F"\n\t", proto->err)); + LWIP_PLATFORM_DIAG(("cachehit: %"STAT_COUNTER_F"\n", proto->cachehit)); +} + +#if IGMP_STATS || MLD6_STATS +void +stats_display_igmp(struct stats_igmp *igmp, const char *name) +{ + LWIP_PLATFORM_DIAG(("\n%s\n\t", name)); + LWIP_PLATFORM_DIAG(("xmit: %"STAT_COUNTER_F"\n\t", igmp->xmit)); + LWIP_PLATFORM_DIAG(("recv: %"STAT_COUNTER_F"\n\t", igmp->recv)); + LWIP_PLATFORM_DIAG(("drop: %"STAT_COUNTER_F"\n\t", igmp->drop)); + LWIP_PLATFORM_DIAG(("chkerr: %"STAT_COUNTER_F"\n\t", igmp->chkerr)); + LWIP_PLATFORM_DIAG(("lenerr: %"STAT_COUNTER_F"\n\t", igmp->lenerr)); + LWIP_PLATFORM_DIAG(("memerr: %"STAT_COUNTER_F"\n\t", igmp->memerr)); + LWIP_PLATFORM_DIAG(("proterr: %"STAT_COUNTER_F"\n\t", igmp->proterr)); + LWIP_PLATFORM_DIAG(("rx_v1: %"STAT_COUNTER_F"\n\t", igmp->rx_v1)); + LWIP_PLATFORM_DIAG(("rx_group: %"STAT_COUNTER_F"\n\t", igmp->rx_group)); + LWIP_PLATFORM_DIAG(("rx_general: %"STAT_COUNTER_F"\n\t", igmp->rx_general)); + LWIP_PLATFORM_DIAG(("rx_report: %"STAT_COUNTER_F"\n\t", igmp->rx_report)); + LWIP_PLATFORM_DIAG(("tx_join: %"STAT_COUNTER_F"\n\t", igmp->tx_join)); + LWIP_PLATFORM_DIAG(("tx_leave: %"STAT_COUNTER_F"\n\t", igmp->tx_leave)); + LWIP_PLATFORM_DIAG(("tx_report: %"STAT_COUNTER_F"\n", igmp->tx_report)); +} +#endif /* IGMP_STATS || MLD6_STATS */ + +#if MEM_STATS || MEMP_STATS +void +stats_display_mem(struct stats_mem *mem, const char *name) +{ + LWIP_PLATFORM_DIAG(("\nMEM %s\n\t", name)); + LWIP_PLATFORM_DIAG(("avail: %"MEM_SIZE_F"\n\t", mem->avail)); + LWIP_PLATFORM_DIAG(("used: %"MEM_SIZE_F"\n\t", mem->used)); + LWIP_PLATFORM_DIAG(("max: %"MEM_SIZE_F"\n\t", mem->max)); + LWIP_PLATFORM_DIAG(("err: %"STAT_COUNTER_F"\n", mem->err)); +} + +#if MEMP_STATS +void +stats_display_memp(struct stats_mem *mem, int idx) +{ + if (idx < MEMP_MAX) { + stats_display_mem(mem, mem->name); + } +} +#endif /* MEMP_STATS */ +#endif /* MEM_STATS || MEMP_STATS */ + +#if SYS_STATS +void +stats_display_sys(struct stats_sys *sys) +{ + LWIP_PLATFORM_DIAG(("\nSYS\n\t")); + LWIP_PLATFORM_DIAG(("sem.used: %"STAT_COUNTER_F"\n\t", sys->sem.used)); + LWIP_PLATFORM_DIAG(("sem.max: %"STAT_COUNTER_F"\n\t", sys->sem.max)); + LWIP_PLATFORM_DIAG(("sem.err: %"STAT_COUNTER_F"\n\t", sys->sem.err)); + LWIP_PLATFORM_DIAG(("mutex.used: %"STAT_COUNTER_F"\n\t", sys->mutex.used)); + LWIP_PLATFORM_DIAG(("mutex.max: %"STAT_COUNTER_F"\n\t", sys->mutex.max)); + LWIP_PLATFORM_DIAG(("mutex.err: %"STAT_COUNTER_F"\n\t", sys->mutex.err)); + LWIP_PLATFORM_DIAG(("mbox.used: %"STAT_COUNTER_F"\n\t", sys->mbox.used)); + LWIP_PLATFORM_DIAG(("mbox.max: %"STAT_COUNTER_F"\n\t", sys->mbox.max)); + LWIP_PLATFORM_DIAG(("mbox.err: %"STAT_COUNTER_F"\n", sys->mbox.err)); +} +#endif /* SYS_STATS */ + +void +stats_display(void) +{ + s16_t i; + + LINK_STATS_DISPLAY(); + ETHARP_STATS_DISPLAY(); + IPFRAG_STATS_DISPLAY(); + IP6_FRAG_STATS_DISPLAY(); + IP_STATS_DISPLAY(); + ND6_STATS_DISPLAY(); + IP6_STATS_DISPLAY(); + IGMP_STATS_DISPLAY(); + MLD6_STATS_DISPLAY(); + ICMP_STATS_DISPLAY(); + ICMP6_STATS_DISPLAY(); + UDP_STATS_DISPLAY(); + TCP_STATS_DISPLAY(); + MEM_STATS_DISPLAY(); + for (i = 0; i < MEMP_MAX; i++) { + MEMP_STATS_DISPLAY(i); + } + SYS_STATS_DISPLAY(); +} +#endif /* LWIP_STATS_DISPLAY */ + +#endif /* LWIP_STATS */ + diff --git a/Middlewares/Third_Party/LwIP/src/core/sys.c b/Middlewares/Third_Party/LwIP/src/core/sys.c new file mode 100644 index 0000000..5f08352 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/sys.c @@ -0,0 +1,148 @@ +/** + * @file + * lwIP Operating System abstraction + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +/** + * @defgroup sys_layer Porting (system abstraction layer) + * @ingroup lwip + * + * @defgroup sys_os OS abstraction layer + * @ingroup sys_layer + * No need to implement functions in this section in NO_SYS mode. + * The OS-specific code should be implemented in arch/sys_arch.h + * and sys_arch.c of your port. + * + * The operating system emulation layer provides a common interface + * between the lwIP code and the underlying operating system kernel. The + * general idea is that porting lwIP to new architectures requires only + * small changes to a few header files and a new sys_arch + * implementation. It is also possible to do a sys_arch implementation + * that does not rely on any underlying operating system. + * + * The sys_arch provides semaphores, mailboxes and mutexes to lwIP. For the full + * lwIP functionality, multiple threads support can be implemented in the + * sys_arch, but this is not required for the basic lwIP + * functionality. Timer scheduling is implemented in lwIP, but can be implemented + * by the sys_arch port (LWIP_TIMERS_CUSTOM==1). + * + * In addition to the source file providing the functionality of sys_arch, + * the OS emulation layer must provide several header files defining + * macros used throughout lwip. The files required and the macros they + * must define are listed below the sys_arch description. + * + * Since lwIP 1.4.0, semaphore, mutexes and mailbox functions are prototyped in a way that + * allows both using pointers or actual OS structures to be used. This way, memory + * required for such types can be either allocated in place (globally or on the + * stack) or on the heap (allocated internally in the "*_new()" functions). + * + * Note: + * ----- + * Be careful with using mem_malloc() in sys_arch. When malloc() refers to + * mem_malloc() you can run into a circular function call problem. In mem.c + * mem_init() tries to allocate a semaphore using mem_malloc, which of course + * can't be performed when sys_arch uses mem_malloc. + * + * @defgroup sys_sem Semaphores + * @ingroup sys_os + * Semaphores can be either counting or binary - lwIP works with both + * kinds. + * Semaphores are represented by the type "sys_sem_t" which is typedef'd + * in the sys_arch.h file. Mailboxes are equivalently represented by the + * type "sys_mbox_t". Mutexes are represented by the type "sys_mutex_t". + * lwIP does not place any restrictions on how these types are represented + * internally. + * + * @defgroup sys_mutex Mutexes + * @ingroup sys_os + * Mutexes are recommended to correctly handle priority inversion, + * especially if you use LWIP_CORE_LOCKING . + * + * @defgroup sys_mbox Mailboxes + * @ingroup sys_os + * Mailboxes should be implemented as a queue which allows multiple messages + * to be posted (implementing as a rendez-vous point where only one message can be + * posted at a time can have a highly negative impact on performance). A message + * in a mailbox is just a pointer, nothing more. + * + * @defgroup sys_time Time + * @ingroup sys_layer + * + * @defgroup sys_prot Critical sections + * @ingroup sys_layer + * Used to protect short regions of code against concurrent access. + * - Your system is a bare-metal system (probably with an RTOS) + * and interrupts are under your control: + * Implement this as LockInterrupts() / UnlockInterrupts() + * - Your system uses an RTOS with deferred interrupt handling from a + * worker thread: Implement as a global mutex or lock/unlock scheduler + * - Your system uses a high-level OS with e.g. POSIX signals: + * Implement as a global mutex + * + * @defgroup sys_misc Misc + * @ingroup sys_os + */ + +#include "lwip/opt.h" + +#include "lwip/sys.h" + +/* Most of the functions defined in sys.h must be implemented in the + * architecture-dependent file sys_arch.c */ + +#if !NO_SYS + +#ifndef sys_msleep +/** + * Sleep for some ms. Timeouts are NOT processed while sleeping. + * + * @param ms number of milliseconds to sleep + */ +void +sys_msleep(u32_t ms) +{ + if (ms > 0) { + sys_sem_t delaysem; + err_t err = sys_sem_new(&delaysem, 0); + if (err == ERR_OK) { + sys_arch_sem_wait(&delaysem, ms); + sys_sem_free(&delaysem); + } + } +} +#endif /* sys_msleep */ + +#endif /* !NO_SYS */ diff --git a/Middlewares/Third_Party/LwIP/src/core/tcp.c b/Middlewares/Third_Party/LwIP/src/core/tcp.c new file mode 100644 index 0000000..bd7d64e --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/tcp.c @@ -0,0 +1,2686 @@ +/** + * @file + * Transmission Control Protocol for IP + * See also @ref tcp_raw + * + * @defgroup tcp_raw TCP + * @ingroup callbackstyle_api + * Transmission Control Protocol for IP\n + * @see @ref api + * + * Common functions for the TCP implementation, such as functions + * for manipulating the data structures and the TCP timer functions. TCP functions + * related to input and output is found in tcp_in.c and tcp_out.c respectively.\n + * + * TCP connection setup + * -------------------- + * The functions used for setting up connections is similar to that of + * the sequential API and of the BSD socket API. A new TCP connection + * identifier (i.e., a protocol control block - PCB) is created with the + * tcp_new() function. This PCB can then be either set to listen for new + * incoming connections or be explicitly connected to another host. + * - tcp_new() + * - tcp_bind() + * - tcp_listen() and tcp_listen_with_backlog() + * - tcp_accept() + * - tcp_connect() + * + * Sending TCP data + * ---------------- + * TCP data is sent by enqueueing the data with a call to tcp_write() and + * triggering to send by calling tcp_output(). When the data is successfully + * transmitted to the remote host, the application will be notified with a + * call to a specified callback function. + * - tcp_write() + * - tcp_output() + * - tcp_sent() + * + * Receiving TCP data + * ------------------ + * TCP data reception is callback based - an application specified + * callback function is called when new data arrives. When the + * application has taken the data, it has to call the tcp_recved() + * function to indicate that TCP can advertise increase the receive + * window. + * - tcp_recv() + * - tcp_recved() + * + * Application polling + * ------------------- + * When a connection is idle (i.e., no data is either transmitted or + * received), lwIP will repeatedly poll the application by calling a + * specified callback function. This can be used either as a watchdog + * timer for killing connections that have stayed idle for too long, or + * as a method of waiting for memory to become available. For instance, + * if a call to tcp_write() has failed because memory wasn't available, + * the application may use the polling functionality to call tcp_write() + * again when the connection has been idle for a while. + * - tcp_poll() + * + * Closing and aborting connections + * -------------------------------- + * - tcp_close() + * - tcp_abort() + * - tcp_err() + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_TCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/def.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/tcp.h" +#include "lwip/priv/tcp_priv.h" +#include "lwip/debug.h" +#include "lwip/stats.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/nd6.h" + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +#ifndef TCP_LOCAL_PORT_RANGE_START +/* From http://www.iana.org/assignments/port-numbers: + "The Dynamic and/or Private Ports are those from 49152 through 65535" */ +#define TCP_LOCAL_PORT_RANGE_START 0xc000 +#define TCP_LOCAL_PORT_RANGE_END 0xffff +#define TCP_ENSURE_LOCAL_PORT_RANGE(port) ((u16_t)(((port) & (u16_t)~TCP_LOCAL_PORT_RANGE_START) + TCP_LOCAL_PORT_RANGE_START)) +#endif + +#if LWIP_TCP_KEEPALIVE +#define TCP_KEEP_DUR(pcb) ((pcb)->keep_cnt * (pcb)->keep_intvl) +#define TCP_KEEP_INTVL(pcb) ((pcb)->keep_intvl) +#else /* LWIP_TCP_KEEPALIVE */ +#define TCP_KEEP_DUR(pcb) TCP_MAXIDLE +#define TCP_KEEP_INTVL(pcb) TCP_KEEPINTVL_DEFAULT +#endif /* LWIP_TCP_KEEPALIVE */ + +/* As initial send MSS, we use TCP_MSS but limit it to 536. */ +#if TCP_MSS > 536 +#define INITIAL_MSS 536 +#else +#define INITIAL_MSS TCP_MSS +#endif + +static const char *const tcp_state_str[] = { + "CLOSED", + "LISTEN", + "SYN_SENT", + "SYN_RCVD", + "ESTABLISHED", + "FIN_WAIT_1", + "FIN_WAIT_2", + "CLOSE_WAIT", + "CLOSING", + "LAST_ACK", + "TIME_WAIT" +}; + +/* last local TCP port */ +static u16_t tcp_port = TCP_LOCAL_PORT_RANGE_START; + +/* Incremented every coarse grained timer shot (typically every 500 ms). */ +u32_t tcp_ticks; +static const u8_t tcp_backoff[13] = +{ 1, 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7}; +/* Times per slowtmr hits */ +static const u8_t tcp_persist_backoff[7] = { 3, 6, 12, 24, 48, 96, 120 }; + +/* The TCP PCB lists. */ + +/** List of all TCP PCBs bound but not yet (connected || listening) */ +struct tcp_pcb *tcp_bound_pcbs; +/** List of all TCP PCBs in LISTEN state */ +union tcp_listen_pcbs_t tcp_listen_pcbs; +/** List of all TCP PCBs that are in a state in which + * they accept or send data. */ +struct tcp_pcb *tcp_active_pcbs; +/** List of all TCP PCBs in TIME-WAIT state */ +struct tcp_pcb *tcp_tw_pcbs; + +/** An array with all (non-temporary) PCB lists, mainly used for smaller code size */ +struct tcp_pcb **const tcp_pcb_lists[] = {&tcp_listen_pcbs.pcbs, &tcp_bound_pcbs, + &tcp_active_pcbs, &tcp_tw_pcbs +}; + +u8_t tcp_active_pcbs_changed; + +/** Timer counter to handle calling slow-timer from tcp_tmr() */ +static u8_t tcp_timer; +static u8_t tcp_timer_ctr; +static u16_t tcp_new_port(void); + +static err_t tcp_close_shutdown_fin(struct tcp_pcb *pcb); +#if LWIP_TCP_PCB_NUM_EXT_ARGS +static void tcp_ext_arg_invoke_callbacks_destroyed(struct tcp_pcb_ext_args *ext_args); +#endif + +/** + * Initialize this module. + */ +void +tcp_init(void) +{ +#ifdef LWIP_RAND + tcp_port = TCP_ENSURE_LOCAL_PORT_RANGE(LWIP_RAND()); +#endif /* LWIP_RAND */ +} + +/** Free a tcp pcb */ +void +tcp_free(struct tcp_pcb *pcb) +{ + LWIP_ASSERT("tcp_free: LISTEN", pcb->state != LISTEN); +#if LWIP_TCP_PCB_NUM_EXT_ARGS + tcp_ext_arg_invoke_callbacks_destroyed(pcb->ext_args); +#endif + memp_free(MEMP_TCP_PCB, pcb); +} + +/** Free a tcp listen pcb */ +static void +tcp_free_listen(struct tcp_pcb *pcb) +{ + LWIP_ASSERT("tcp_free_listen: !LISTEN", pcb->state != LISTEN); +#if LWIP_TCP_PCB_NUM_EXT_ARGS + tcp_ext_arg_invoke_callbacks_destroyed(pcb->ext_args); +#endif + memp_free(MEMP_TCP_PCB_LISTEN, pcb); +} + +/** + * Called periodically to dispatch TCP timers. + */ +void +tcp_tmr(void) +{ + /* Call tcp_fasttmr() every 250 ms */ + tcp_fasttmr(); + + if (++tcp_timer & 1) { + /* Call tcp_slowtmr() every 500 ms, i.e., every other timer + tcp_tmr() is called. */ + tcp_slowtmr(); + } +} + +#if LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG +/** Called when a listen pcb is closed. Iterates one pcb list and removes the + * closed listener pcb from pcb->listener if matching. + */ +static void +tcp_remove_listener(struct tcp_pcb *list, struct tcp_pcb_listen *lpcb) +{ + struct tcp_pcb *pcb; + + LWIP_ASSERT("tcp_remove_listener: invalid listener", lpcb != NULL); + + for (pcb = list; pcb != NULL; pcb = pcb->next) { + if (pcb->listener == lpcb) { + pcb->listener = NULL; + } + } +} +#endif + +/** Called when a listen pcb is closed. Iterates all pcb lists and removes the + * closed listener pcb from pcb->listener if matching. + */ +static void +tcp_listen_closed(struct tcp_pcb *pcb) +{ +#if LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG + size_t i; + LWIP_ASSERT("pcb != NULL", pcb != NULL); + LWIP_ASSERT("pcb->state == LISTEN", pcb->state == LISTEN); + for (i = 1; i < LWIP_ARRAYSIZE(tcp_pcb_lists); i++) { + tcp_remove_listener(*tcp_pcb_lists[i], (struct tcp_pcb_listen *)pcb); + } +#endif + LWIP_UNUSED_ARG(pcb); +} + +#if TCP_LISTEN_BACKLOG +/** @ingroup tcp_raw + * Delay accepting a connection in respect to the listen backlog: + * the number of outstanding connections is increased until + * tcp_backlog_accepted() is called. + * + * ATTENTION: the caller is responsible for calling tcp_backlog_accepted() + * or else the backlog feature will get out of sync! + * + * @param pcb the connection pcb which is not fully accepted yet + */ +void +tcp_backlog_delayed(struct tcp_pcb *pcb) +{ + LWIP_ASSERT("pcb != NULL", pcb != NULL); + LWIP_ASSERT_CORE_LOCKED(); + if ((pcb->flags & TF_BACKLOGPEND) == 0) { + if (pcb->listener != NULL) { + pcb->listener->accepts_pending++; + LWIP_ASSERT("accepts_pending != 0", pcb->listener->accepts_pending != 0); + tcp_set_flags(pcb, TF_BACKLOGPEND); + } + } +} + +/** @ingroup tcp_raw + * A delayed-accept a connection is accepted (or closed/aborted): decreases + * the number of outstanding connections after calling tcp_backlog_delayed(). + * + * ATTENTION: the caller is responsible for calling tcp_backlog_accepted() + * or else the backlog feature will get out of sync! + * + * @param pcb the connection pcb which is now fully accepted (or closed/aborted) + */ +void +tcp_backlog_accepted(struct tcp_pcb *pcb) +{ + LWIP_ASSERT("pcb != NULL", pcb != NULL); + LWIP_ASSERT_CORE_LOCKED(); + if ((pcb->flags & TF_BACKLOGPEND) != 0) { + if (pcb->listener != NULL) { + LWIP_ASSERT("accepts_pending != 0", pcb->listener->accepts_pending != 0); + pcb->listener->accepts_pending--; + tcp_clear_flags(pcb, TF_BACKLOGPEND); + } + } +} +#endif /* TCP_LISTEN_BACKLOG */ + +/** + * Closes the TX side of a connection held by the PCB. + * For tcp_close(), a RST is sent if the application didn't receive all data + * (tcp_recved() not called for all data passed to recv callback). + * + * Listening pcbs are freed and may not be referenced any more. + * Connection pcbs are freed if not yet connected and may not be referenced + * any more. If a connection is established (at least SYN received or in + * a closing state), the connection is closed, and put in a closing state. + * The pcb is then automatically freed in tcp_slowtmr(). It is therefore + * unsafe to reference it. + * + * @param pcb the tcp_pcb to close + * @return ERR_OK if connection has been closed + * another err_t if closing failed and pcb is not freed + */ +static err_t +tcp_close_shutdown(struct tcp_pcb *pcb, u8_t rst_on_unacked_data) +{ + LWIP_ASSERT("tcp_close_shutdown: invalid pcb", pcb != NULL); + + if (rst_on_unacked_data && ((pcb->state == ESTABLISHED) || (pcb->state == CLOSE_WAIT))) { + if ((pcb->refused_data != NULL) || (pcb->rcv_wnd != TCP_WND_MAX(pcb))) { + /* Not all data received by application, send RST to tell the remote + side about this. */ + LWIP_ASSERT("pcb->flags & TF_RXCLOSED", pcb->flags & TF_RXCLOSED); + + /* don't call tcp_abort here: we must not deallocate the pcb since + that might not be expected when calling tcp_close */ + tcp_rst(pcb, pcb->snd_nxt, pcb->rcv_nxt, &pcb->local_ip, &pcb->remote_ip, + pcb->local_port, pcb->remote_port); + + tcp_pcb_purge(pcb); + TCP_RMV_ACTIVE(pcb); + /* Deallocate the pcb since we already sent a RST for it */ + if (tcp_input_pcb == pcb) { + /* prevent using a deallocated pcb: free it from tcp_input later */ + tcp_trigger_input_pcb_close(); + } else { + tcp_free(pcb); + } + return ERR_OK; + } + } + + /* - states which free the pcb are handled here, + - states which send FIN and change state are handled in tcp_close_shutdown_fin() */ + switch (pcb->state) { + case CLOSED: + /* Closing a pcb in the CLOSED state might seem erroneous, + * however, it is in this state once allocated and as yet unused + * and the user needs some way to free it should the need arise. + * Calling tcp_close() with a pcb that has already been closed, (i.e. twice) + * or for a pcb that has been used and then entered the CLOSED state + * is erroneous, but this should never happen as the pcb has in those cases + * been freed, and so any remaining handles are bogus. */ + if (pcb->local_port != 0) { + TCP_RMV(&tcp_bound_pcbs, pcb); + } + tcp_free(pcb); + break; + case LISTEN: + tcp_listen_closed(pcb); + tcp_pcb_remove(&tcp_listen_pcbs.pcbs, pcb); + tcp_free_listen(pcb); + break; + case SYN_SENT: + TCP_PCB_REMOVE_ACTIVE(pcb); + tcp_free(pcb); + MIB2_STATS_INC(mib2.tcpattemptfails); + break; + default: + return tcp_close_shutdown_fin(pcb); + } + return ERR_OK; +} + +static err_t +tcp_close_shutdown_fin(struct tcp_pcb *pcb) +{ + err_t err; + LWIP_ASSERT("pcb != NULL", pcb != NULL); + + switch (pcb->state) { + case SYN_RCVD: + err = tcp_send_fin(pcb); + if (err == ERR_OK) { + tcp_backlog_accepted(pcb); + MIB2_STATS_INC(mib2.tcpattemptfails); + pcb->state = FIN_WAIT_1; + } + break; + case ESTABLISHED: + err = tcp_send_fin(pcb); + if (err == ERR_OK) { + MIB2_STATS_INC(mib2.tcpestabresets); + pcb->state = FIN_WAIT_1; + } + break; + case CLOSE_WAIT: + err = tcp_send_fin(pcb); + if (err == ERR_OK) { + MIB2_STATS_INC(mib2.tcpestabresets); + pcb->state = LAST_ACK; + } + break; + default: + /* Has already been closed, do nothing. */ + return ERR_OK; + } + + if (err == ERR_OK) { + /* To ensure all data has been sent when tcp_close returns, we have + to make sure tcp_output doesn't fail. + Since we don't really have to ensure all data has been sent when tcp_close + returns (unsent data is sent from tcp timer functions, also), we don't care + for the return value of tcp_output for now. */ + tcp_output(pcb); + } else if (err == ERR_MEM) { + /* Mark this pcb for closing. Closing is retried from tcp_tmr. */ + tcp_set_flags(pcb, TF_CLOSEPEND); + /* We have to return ERR_OK from here to indicate to the callers that this + pcb should not be used any more as it will be freed soon via tcp_tmr. + This is OK here since sending FIN does not guarantee a time frime for + actually freeing the pcb, either (it is left in closure states for + remote ACK or timeout) */ + return ERR_OK; + } + return err; +} + +/** + * @ingroup tcp_raw + * Closes the connection held by the PCB. + * + * Listening pcbs are freed and may not be referenced any more. + * Connection pcbs are freed if not yet connected and may not be referenced + * any more. If a connection is established (at least SYN received or in + * a closing state), the connection is closed, and put in a closing state. + * The pcb is then automatically freed in tcp_slowtmr(). It is therefore + * unsafe to reference it (unless an error is returned). + * + * The function may return ERR_MEM if no memory + * was available for closing the connection. If so, the application + * should wait and try again either by using the acknowledgment + * callback or the polling functionality. If the close succeeds, the + * function returns ERR_OK. + * + * @param pcb the tcp_pcb to close + * @return ERR_OK if connection has been closed + * another err_t if closing failed and pcb is not freed + */ +err_t +tcp_close(struct tcp_pcb *pcb) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("tcp_close: invalid pcb", pcb != NULL, return ERR_ARG); + LWIP_DEBUGF(TCP_DEBUG, ("tcp_close: closing in ")); + + tcp_debug_print_state(pcb->state); + + if (pcb->state != LISTEN) { + /* Set a flag not to receive any more data... */ + tcp_set_flags(pcb, TF_RXCLOSED); + } + /* ... and close */ + return tcp_close_shutdown(pcb, 1); +} + +/** + * @ingroup tcp_raw + * Causes all or part of a full-duplex connection of this PCB to be shut down. + * This doesn't deallocate the PCB unless shutting down both sides! + * Shutting down both sides is the same as calling tcp_close, so if it succeds + * (i.e. returns ER_OK), the PCB must not be referenced any more! + * + * @param pcb PCB to shutdown + * @param shut_rx shut down receive side if this is != 0 + * @param shut_tx shut down send side if this is != 0 + * @return ERR_OK if shutdown succeeded (or the PCB has already been shut down) + * another err_t on error. + */ +err_t +tcp_shutdown(struct tcp_pcb *pcb, int shut_rx, int shut_tx) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("tcp_shutdown: invalid pcb", pcb != NULL, return ERR_ARG); + + if (pcb->state == LISTEN) { + return ERR_CONN; + } + if (shut_rx) { + /* shut down the receive side: set a flag not to receive any more data... */ + tcp_set_flags(pcb, TF_RXCLOSED); + if (shut_tx) { + /* shutting down the tx AND rx side is the same as closing for the raw API */ + return tcp_close_shutdown(pcb, 1); + } + /* ... and free buffered data */ + if (pcb->refused_data != NULL) { + pbuf_free(pcb->refused_data); + pcb->refused_data = NULL; + } + } + if (shut_tx) { + /* This can't happen twice since if it succeeds, the pcb's state is changed. + Only close in these states as the others directly deallocate the PCB */ + switch (pcb->state) { + case SYN_RCVD: + case ESTABLISHED: + case CLOSE_WAIT: + return tcp_close_shutdown(pcb, (u8_t)shut_rx); + default: + /* Not (yet?) connected, cannot shutdown the TX side as that would bring us + into CLOSED state, where the PCB is deallocated. */ + return ERR_CONN; + } + } + return ERR_OK; +} + +/** + * Abandons a connection and optionally sends a RST to the remote + * host. Deletes the local protocol control block. This is done when + * a connection is killed because of shortage of memory. + * + * @param pcb the tcp_pcb to abort + * @param reset boolean to indicate whether a reset should be sent + */ +void +tcp_abandon(struct tcp_pcb *pcb, int reset) +{ + u32_t seqno, ackno; +#if LWIP_CALLBACK_API + tcp_err_fn errf; +#endif /* LWIP_CALLBACK_API */ + void *errf_arg; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("tcp_abandon: invalid pcb", pcb != NULL, return); + + /* pcb->state LISTEN not allowed here */ + LWIP_ASSERT("don't call tcp_abort/tcp_abandon for listen-pcbs", + pcb->state != LISTEN); + /* Figure out on which TCP PCB list we are, and remove us. If we + are in an active state, call the receive function associated with + the PCB with a NULL argument, and send an RST to the remote end. */ + if (pcb->state == TIME_WAIT) { + tcp_pcb_remove(&tcp_tw_pcbs, pcb); + tcp_free(pcb); + } else { + int send_rst = 0; + u16_t local_port = 0; + enum tcp_state last_state; + seqno = pcb->snd_nxt; + ackno = pcb->rcv_nxt; +#if LWIP_CALLBACK_API + errf = pcb->errf; +#endif /* LWIP_CALLBACK_API */ + errf_arg = pcb->callback_arg; + if (pcb->state == CLOSED) { + if (pcb->local_port != 0) { + /* bound, not yet opened */ + TCP_RMV(&tcp_bound_pcbs, pcb); + } + } else { + send_rst = reset; + local_port = pcb->local_port; + TCP_PCB_REMOVE_ACTIVE(pcb); + } + if (pcb->unacked != NULL) { + tcp_segs_free(pcb->unacked); + } + if (pcb->unsent != NULL) { + tcp_segs_free(pcb->unsent); + } +#if TCP_QUEUE_OOSEQ + if (pcb->ooseq != NULL) { + tcp_segs_free(pcb->ooseq); + } +#endif /* TCP_QUEUE_OOSEQ */ + tcp_backlog_accepted(pcb); + if (send_rst) { + LWIP_DEBUGF(TCP_RST_DEBUG, ("tcp_abandon: sending RST\n")); + tcp_rst(pcb, seqno, ackno, &pcb->local_ip, &pcb->remote_ip, local_port, pcb->remote_port); + } + last_state = pcb->state; + tcp_free(pcb); + TCP_EVENT_ERR(last_state, errf, errf_arg, ERR_ABRT); + } +} + +/** + * @ingroup tcp_raw + * Aborts the connection by sending a RST (reset) segment to the remote + * host. The pcb is deallocated. This function never fails. + * + * ATTENTION: When calling this from one of the TCP callbacks, make + * sure you always return ERR_ABRT (and never return ERR_ABRT otherwise + * or you will risk accessing deallocated memory or memory leaks! + * + * @param pcb the tcp pcb to abort + */ +void +tcp_abort(struct tcp_pcb *pcb) +{ + tcp_abandon(pcb, 1); +} + +/** + * @ingroup tcp_raw + * Binds the connection to a local port number and IP address. If the + * IP address is not given (i.e., ipaddr == IP_ANY_TYPE), the connection is + * bound to all local IP addresses. + * If another connection is bound to the same port, the function will + * return ERR_USE, otherwise ERR_OK is returned. + * + * @param pcb the tcp_pcb to bind (no check is done whether this pcb is + * already bound!) + * @param ipaddr the local ip address to bind to (use IPx_ADDR_ANY to bind + * to any local address + * @param port the local port to bind to + * @return ERR_USE if the port is already in use + * ERR_VAL if bind failed because the PCB is not in a valid state + * ERR_OK if bound + */ +err_t +tcp_bind(struct tcp_pcb *pcb, const ip_addr_t *ipaddr, u16_t port) +{ + int i; + int max_pcb_list = NUM_TCP_PCB_LISTS; + struct tcp_pcb *cpcb; +#if LWIP_IPV6 && LWIP_IPV6_SCOPES + ip_addr_t zoned_ipaddr; +#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */ + + LWIP_ASSERT_CORE_LOCKED(); + +#if LWIP_IPV4 + /* Don't propagate NULL pointer (IPv4 ANY) to subsequent functions */ + if (ipaddr == NULL) { + ipaddr = IP4_ADDR_ANY; + } +#else /* LWIP_IPV4 */ + LWIP_ERROR("tcp_bind: invalid ipaddr", ipaddr != NULL, return ERR_ARG); +#endif /* LWIP_IPV4 */ + + LWIP_ERROR("tcp_bind: invalid pcb", pcb != NULL, return ERR_ARG); + + LWIP_ERROR("tcp_bind: can only bind in state CLOSED", pcb->state == CLOSED, return ERR_VAL); + +#if SO_REUSE + /* Unless the REUSEADDR flag is set, + we have to check the pcbs in TIME-WAIT state, also. + We do not dump TIME_WAIT pcb's; they can still be matched by incoming + packets using both local and remote IP addresses and ports to distinguish. + */ + if (ip_get_option(pcb, SOF_REUSEADDR)) { + max_pcb_list = NUM_TCP_PCB_LISTS_NO_TIME_WAIT; + } +#endif /* SO_REUSE */ + +#if LWIP_IPV6 && LWIP_IPV6_SCOPES + /* If the given IP address should have a zone but doesn't, assign one now. + * This is legacy support: scope-aware callers should always provide properly + * zoned source addresses. Do the zone selection before the address-in-use + * check below; as such we have to make a temporary copy of the address. */ + if (IP_IS_V6(ipaddr) && ip6_addr_lacks_zone(ip_2_ip6(ipaddr), IP6_UNICAST)) { + ip_addr_copy(zoned_ipaddr, *ipaddr); + ip6_addr_select_zone(ip_2_ip6(&zoned_ipaddr), ip_2_ip6(&zoned_ipaddr)); + ipaddr = &zoned_ipaddr; + } +#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */ + + if (port == 0) { + port = tcp_new_port(); + if (port == 0) { + return ERR_BUF; + } + } else { + /* Check if the address already is in use (on all lists) */ + for (i = 0; i < max_pcb_list; i++) { + for (cpcb = *tcp_pcb_lists[i]; cpcb != NULL; cpcb = cpcb->next) { + if (cpcb->local_port == port) { +#if SO_REUSE + /* Omit checking for the same port if both pcbs have REUSEADDR set. + For SO_REUSEADDR, the duplicate-check for a 5-tuple is done in + tcp_connect. */ + if (!ip_get_option(pcb, SOF_REUSEADDR) || + !ip_get_option(cpcb, SOF_REUSEADDR)) +#endif /* SO_REUSE */ + { + /* @todo: check accept_any_ip_version */ + if ((IP_IS_V6(ipaddr) == IP_IS_V6_VAL(cpcb->local_ip)) && + (ip_addr_isany(&cpcb->local_ip) || + ip_addr_isany(ipaddr) || + ip_addr_cmp(&cpcb->local_ip, ipaddr))) { + return ERR_USE; + } + } + } + } + } + } + + if (!ip_addr_isany(ipaddr) +#if LWIP_IPV4 && LWIP_IPV6 + || (IP_GET_TYPE(ipaddr) != IP_GET_TYPE(&pcb->local_ip)) +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + ) { + ip_addr_set(&pcb->local_ip, ipaddr); + } + pcb->local_port = port; + TCP_REG(&tcp_bound_pcbs, pcb); + LWIP_DEBUGF(TCP_DEBUG, ("tcp_bind: bind to port %"U16_F"\n", port)); + return ERR_OK; +} + +/** + * @ingroup tcp_raw + * Binds the connection to a netif and IP address. + * After calling this function, all packets received via this PCB + * are guaranteed to have come in via the specified netif, and all + * outgoing packets will go out via the specified netif. + * + * @param pcb the tcp_pcb to bind. + * @param netif the netif to bind to. Can be NULL. + */ +void +tcp_bind_netif(struct tcp_pcb *pcb, const struct netif *netif) +{ + LWIP_ASSERT_CORE_LOCKED(); + if (netif != NULL) { + pcb->netif_idx = netif_get_index(netif); + } else { + pcb->netif_idx = NETIF_NO_INDEX; + } +} + +#if LWIP_CALLBACK_API +/** + * Default accept callback if no accept callback is specified by the user. + */ +static err_t +tcp_accept_null(void *arg, struct tcp_pcb *pcb, err_t err) +{ + LWIP_UNUSED_ARG(arg); + LWIP_UNUSED_ARG(err); + + LWIP_ASSERT("tcp_accept_null: invalid pcb", pcb != NULL); + + tcp_abort(pcb); + + return ERR_ABRT; +} +#endif /* LWIP_CALLBACK_API */ + +/** + * @ingroup tcp_raw + * Set the state of the connection to be LISTEN, which means that it + * is able to accept incoming connections. The protocol control block + * is reallocated in order to consume less memory. Setting the + * connection to LISTEN is an irreversible process. + * When an incoming connection is accepted, the function specified with + * the tcp_accept() function will be called. The pcb has to be bound + * to a local port with the tcp_bind() function. + * + * The tcp_listen() function returns a new connection identifier, and + * the one passed as an argument to the function will be + * deallocated. The reason for this behavior is that less memory is + * needed for a connection that is listening, so tcp_listen() will + * reclaim the memory needed for the original connection and allocate a + * new smaller memory block for the listening connection. + * + * tcp_listen() may return NULL if no memory was available for the + * listening connection. If so, the memory associated with the pcb + * passed as an argument to tcp_listen() will not be deallocated. + * + * The backlog limits the number of outstanding connections + * in the listen queue to the value specified by the backlog argument. + * To use it, your need to set TCP_LISTEN_BACKLOG=1 in your lwipopts.h. + * + * @param pcb the original tcp_pcb + * @param backlog the incoming connections queue limit + * @return tcp_pcb used for listening, consumes less memory. + * + * @note The original tcp_pcb is freed. This function therefore has to be + * called like this: + * tpcb = tcp_listen_with_backlog(tpcb, backlog); + */ +struct tcp_pcb * +tcp_listen_with_backlog(struct tcp_pcb *pcb, u8_t backlog) +{ + LWIP_ASSERT_CORE_LOCKED(); + return tcp_listen_with_backlog_and_err(pcb, backlog, NULL); +} + +/** + * @ingroup tcp_raw + * Set the state of the connection to be LISTEN, which means that it + * is able to accept incoming connections. The protocol control block + * is reallocated in order to consume less memory. Setting the + * connection to LISTEN is an irreversible process. + * + * @param pcb the original tcp_pcb + * @param backlog the incoming connections queue limit + * @param err when NULL is returned, this contains the error reason + * @return tcp_pcb used for listening, consumes less memory. + * + * @note The original tcp_pcb is freed. This function therefore has to be + * called like this: + * tpcb = tcp_listen_with_backlog_and_err(tpcb, backlog, &err); + */ +struct tcp_pcb * +tcp_listen_with_backlog_and_err(struct tcp_pcb *pcb, u8_t backlog, err_t *err) +{ + struct tcp_pcb_listen *lpcb = NULL; + err_t res; + + LWIP_UNUSED_ARG(backlog); + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("tcp_listen_with_backlog_and_err: invalid pcb", pcb != NULL, res = ERR_ARG; goto done); + LWIP_ERROR("tcp_listen_with_backlog_and_err: pcb already connected", pcb->state == CLOSED, res = ERR_CLSD; goto done); + + /* already listening? */ + if (pcb->state == LISTEN) { + lpcb = (struct tcp_pcb_listen *)pcb; + res = ERR_ALREADY; + goto done; + } +#if SO_REUSE + if (ip_get_option(pcb, SOF_REUSEADDR)) { + /* Since SOF_REUSEADDR allows reusing a local address before the pcb's usage + is declared (listen-/connection-pcb), we have to make sure now that + this port is only used once for every local IP. */ + for (lpcb = tcp_listen_pcbs.listen_pcbs; lpcb != NULL; lpcb = lpcb->next) { + if ((lpcb->local_port == pcb->local_port) && + ip_addr_cmp(&lpcb->local_ip, &pcb->local_ip)) { + /* this address/port is already used */ + lpcb = NULL; + res = ERR_USE; + goto done; + } + } + } +#endif /* SO_REUSE */ + lpcb = (struct tcp_pcb_listen *)memp_malloc(MEMP_TCP_PCB_LISTEN); + if (lpcb == NULL) { + res = ERR_MEM; + goto done; + } + lpcb->callback_arg = pcb->callback_arg; + lpcb->local_port = pcb->local_port; + lpcb->state = LISTEN; + lpcb->prio = pcb->prio; + lpcb->so_options = pcb->so_options; + lpcb->netif_idx = NETIF_NO_INDEX; + lpcb->ttl = pcb->ttl; + lpcb->tos = pcb->tos; +#if LWIP_IPV4 && LWIP_IPV6 + IP_SET_TYPE_VAL(lpcb->remote_ip, pcb->local_ip.type); +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + ip_addr_copy(lpcb->local_ip, pcb->local_ip); + if (pcb->local_port != 0) { + TCP_RMV(&tcp_bound_pcbs, pcb); + } +#if LWIP_TCP_PCB_NUM_EXT_ARGS + /* copy over ext_args to listening pcb */ + memcpy(&lpcb->ext_args, &pcb->ext_args, sizeof(pcb->ext_args)); +#endif + tcp_free(pcb); +#if LWIP_CALLBACK_API + lpcb->accept = tcp_accept_null; +#endif /* LWIP_CALLBACK_API */ +#if TCP_LISTEN_BACKLOG + lpcb->accepts_pending = 0; + tcp_backlog_set(lpcb, backlog); +#endif /* TCP_LISTEN_BACKLOG */ + TCP_REG(&tcp_listen_pcbs.pcbs, (struct tcp_pcb *)lpcb); + res = ERR_OK; +done: + if (err != NULL) { + *err = res; + } + return (struct tcp_pcb *)lpcb; +} + +/** + * Update the state that tracks the available window space to advertise. + * + * Returns how much extra window would be advertised if we sent an + * update now. + */ +u32_t +tcp_update_rcv_ann_wnd(struct tcp_pcb *pcb) +{ + u32_t new_right_edge; + + LWIP_ASSERT("tcp_update_rcv_ann_wnd: invalid pcb", pcb != NULL); + new_right_edge = pcb->rcv_nxt + pcb->rcv_wnd; + + if (TCP_SEQ_GEQ(new_right_edge, pcb->rcv_ann_right_edge + LWIP_MIN((TCP_WND / 2), pcb->mss))) { + /* we can advertise more window */ + pcb->rcv_ann_wnd = pcb->rcv_wnd; + return new_right_edge - pcb->rcv_ann_right_edge; + } else { + if (TCP_SEQ_GT(pcb->rcv_nxt, pcb->rcv_ann_right_edge)) { + /* Can happen due to other end sending out of advertised window, + * but within actual available (but not yet advertised) window */ + pcb->rcv_ann_wnd = 0; + } else { + /* keep the right edge of window constant */ + u32_t new_rcv_ann_wnd = pcb->rcv_ann_right_edge - pcb->rcv_nxt; +#if !LWIP_WND_SCALE + LWIP_ASSERT("new_rcv_ann_wnd <= 0xffff", new_rcv_ann_wnd <= 0xffff); +#endif + pcb->rcv_ann_wnd = (tcpwnd_size_t)new_rcv_ann_wnd; + } + return 0; + } +} + +/** + * @ingroup tcp_raw + * This function should be called by the application when it has + * processed the data. The purpose is to advertise a larger window + * when the data has been processed. + * + * @param pcb the tcp_pcb for which data is read + * @param len the amount of bytes that have been read by the application + */ +void +tcp_recved(struct tcp_pcb *pcb, u16_t len) +{ + u32_t wnd_inflation; + tcpwnd_size_t rcv_wnd; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("tcp_recved: invalid pcb", pcb != NULL, return); + + /* pcb->state LISTEN not allowed here */ + LWIP_ASSERT("don't call tcp_recved for listen-pcbs", + pcb->state != LISTEN); + + rcv_wnd = (tcpwnd_size_t)(pcb->rcv_wnd + len); + if ((rcv_wnd > TCP_WND_MAX(pcb)) || (rcv_wnd < pcb->rcv_wnd)) { + /* window got too big or tcpwnd_size_t overflow */ + LWIP_DEBUGF(TCP_DEBUG, ("tcp_recved: window got too big or tcpwnd_size_t overflow\n")); + pcb->rcv_wnd = TCP_WND_MAX(pcb); + } else { + pcb->rcv_wnd = rcv_wnd; + } + + wnd_inflation = tcp_update_rcv_ann_wnd(pcb); + + /* If the change in the right edge of window is significant (default + * watermark is TCP_WND/4), then send an explicit update now. + * Otherwise wait for a packet to be sent in the normal course of + * events (or more window to be available later) */ + if (wnd_inflation >= TCP_WND_UPDATE_THRESHOLD) { + tcp_ack_now(pcb); + tcp_output(pcb); + } + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_recved: received %"U16_F" bytes, wnd %"TCPWNDSIZE_F" (%"TCPWNDSIZE_F").\n", + len, pcb->rcv_wnd, (u16_t)(TCP_WND_MAX(pcb) - pcb->rcv_wnd))); +} + +/** + * Allocate a new local TCP port. + * + * @return a new (free) local TCP port number + */ +static u16_t +tcp_new_port(void) +{ + u8_t i; + u16_t n = 0; + struct tcp_pcb *pcb; + +again: + tcp_port++; + if (tcp_port == TCP_LOCAL_PORT_RANGE_END) { + tcp_port = TCP_LOCAL_PORT_RANGE_START; + } + /* Check all PCB lists. */ + for (i = 0; i < NUM_TCP_PCB_LISTS; i++) { + for (pcb = *tcp_pcb_lists[i]; pcb != NULL; pcb = pcb->next) { + if (pcb->local_port == tcp_port) { + n++; + if (n > (TCP_LOCAL_PORT_RANGE_END - TCP_LOCAL_PORT_RANGE_START)) { + return 0; + } + goto again; + } + } + } + return tcp_port; +} + +/** + * @ingroup tcp_raw + * Connects to another host. The function given as the "connected" + * argument will be called when the connection has been established. + * Sets up the pcb to connect to the remote host and sends the + * initial SYN segment which opens the connection. + * + * The tcp_connect() function returns immediately; it does not wait for + * the connection to be properly setup. Instead, it will call the + * function specified as the fourth argument (the "connected" argument) + * when the connection is established. If the connection could not be + * properly established, either because the other host refused the + * connection or because the other host didn't answer, the "err" + * callback function of this pcb (registered with tcp_err, see below) + * will be called. + * + * The tcp_connect() function can return ERR_MEM if no memory is + * available for enqueueing the SYN segment. If the SYN indeed was + * enqueued successfully, the tcp_connect() function returns ERR_OK. + * + * @param pcb the tcp_pcb used to establish the connection + * @param ipaddr the remote ip address to connect to + * @param port the remote tcp port to connect to + * @param connected callback function to call when connected (on error, + the err calback will be called) + * @return ERR_VAL if invalid arguments are given + * ERR_OK if connect request has been sent + * other err_t values if connect request couldn't be sent + */ +err_t +tcp_connect(struct tcp_pcb *pcb, const ip_addr_t *ipaddr, u16_t port, + tcp_connected_fn connected) +{ + struct netif *netif = NULL; + err_t ret; + u32_t iss; + u16_t old_local_port; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("tcp_connect: invalid pcb", pcb != NULL, return ERR_ARG); + LWIP_ERROR("tcp_connect: invalid ipaddr", ipaddr != NULL, return ERR_ARG); + + LWIP_ERROR("tcp_connect: can only connect from state CLOSED", pcb->state == CLOSED, return ERR_ISCONN); + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_connect to port %"U16_F"\n", port)); + ip_addr_set(&pcb->remote_ip, ipaddr); + pcb->remote_port = port; + + if (pcb->netif_idx != NETIF_NO_INDEX) { + netif = netif_get_by_index(pcb->netif_idx); + } else { + /* check if we have a route to the remote host */ + netif = ip_route(&pcb->local_ip, &pcb->remote_ip); + } + if (netif == NULL) { + /* Don't even try to send a SYN packet if we have no route since that will fail. */ + return ERR_RTE; + } + + /* check if local IP has been assigned to pcb, if not, get one */ + if (ip_addr_isany(&pcb->local_ip)) { + const ip_addr_t *local_ip = ip_netif_get_local_ip(netif, ipaddr); + if (local_ip == NULL) { + return ERR_RTE; + } + ip_addr_copy(pcb->local_ip, *local_ip); + } + +#if LWIP_IPV6 && LWIP_IPV6_SCOPES + /* If the given IP address should have a zone but doesn't, assign one now. + * Given that we already have the target netif, this is easy and cheap. */ + if (IP_IS_V6(&pcb->remote_ip) && + ip6_addr_lacks_zone(ip_2_ip6(&pcb->remote_ip), IP6_UNICAST)) { + ip6_addr_assign_zone(ip_2_ip6(&pcb->remote_ip), IP6_UNICAST, netif); + } +#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */ + + old_local_port = pcb->local_port; + if (pcb->local_port == 0) { + pcb->local_port = tcp_new_port(); + if (pcb->local_port == 0) { + return ERR_BUF; + } + } else { +#if SO_REUSE + if (ip_get_option(pcb, SOF_REUSEADDR)) { + /* Since SOF_REUSEADDR allows reusing a local address, we have to make sure + now that the 5-tuple is unique. */ + struct tcp_pcb *cpcb; + int i; + /* Don't check listen- and bound-PCBs, check active- and TIME-WAIT PCBs. */ + for (i = 2; i < NUM_TCP_PCB_LISTS; i++) { + for (cpcb = *tcp_pcb_lists[i]; cpcb != NULL; cpcb = cpcb->next) { + if ((cpcb->local_port == pcb->local_port) && + (cpcb->remote_port == port) && + ip_addr_cmp(&cpcb->local_ip, &pcb->local_ip) && + ip_addr_cmp(&cpcb->remote_ip, ipaddr)) { + /* linux returns EISCONN here, but ERR_USE should be OK for us */ + return ERR_USE; + } + } + } + } +#endif /* SO_REUSE */ + } + + iss = tcp_next_iss(pcb); + pcb->rcv_nxt = 0; + pcb->snd_nxt = iss; + pcb->lastack = iss - 1; + pcb->snd_wl2 = iss - 1; + pcb->snd_lbb = iss - 1; + /* Start with a window that does not need scaling. When window scaling is + enabled and used, the window is enlarged when both sides agree on scaling. */ + pcb->rcv_wnd = pcb->rcv_ann_wnd = TCPWND_MIN16(TCP_WND); + pcb->rcv_ann_right_edge = pcb->rcv_nxt; + pcb->snd_wnd = TCP_WND; + /* As initial send MSS, we use TCP_MSS but limit it to 536. + The send MSS is updated when an MSS option is received. */ + pcb->mss = INITIAL_MSS; +#if TCP_CALCULATE_EFF_SEND_MSS + pcb->mss = tcp_eff_send_mss_netif(pcb->mss, netif, &pcb->remote_ip); +#endif /* TCP_CALCULATE_EFF_SEND_MSS */ + pcb->cwnd = 1; +#if LWIP_CALLBACK_API + pcb->connected = connected; +#else /* LWIP_CALLBACK_API */ + LWIP_UNUSED_ARG(connected); +#endif /* LWIP_CALLBACK_API */ + + /* Send a SYN together with the MSS option. */ + ret = tcp_enqueue_flags(pcb, TCP_SYN); + if (ret == ERR_OK) { + /* SYN segment was enqueued, changed the pcbs state now */ + pcb->state = SYN_SENT; + if (old_local_port != 0) { + TCP_RMV(&tcp_bound_pcbs, pcb); + } + TCP_REG_ACTIVE(pcb); + MIB2_STATS_INC(mib2.tcpactiveopens); + + tcp_output(pcb); + } + return ret; +} + +/** + * Called every 500 ms and implements the retransmission timer and the timer that + * removes PCBs that have been in TIME-WAIT for enough time. It also increments + * various timers such as the inactivity timer in each PCB. + * + * Automatically called from tcp_tmr(). + */ +void +tcp_slowtmr(void) +{ + struct tcp_pcb *pcb, *prev; + tcpwnd_size_t eff_wnd; + u8_t pcb_remove; /* flag if a PCB should be removed */ + u8_t pcb_reset; /* flag if a RST should be sent when removing */ + err_t err; + + err = ERR_OK; + + ++tcp_ticks; + ++tcp_timer_ctr; + +tcp_slowtmr_start: + /* Steps through all of the active PCBs. */ + prev = NULL; + pcb = tcp_active_pcbs; + if (pcb == NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: no active pcbs\n")); + } + while (pcb != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: processing active pcb\n")); + LWIP_ASSERT("tcp_slowtmr: active pcb->state != CLOSED\n", pcb->state != CLOSED); + LWIP_ASSERT("tcp_slowtmr: active pcb->state != LISTEN\n", pcb->state != LISTEN); + LWIP_ASSERT("tcp_slowtmr: active pcb->state != TIME-WAIT\n", pcb->state != TIME_WAIT); + if (pcb->last_timer == tcp_timer_ctr) { + /* skip this pcb, we have already processed it */ + prev = pcb; + pcb = pcb->next; + continue; + } + pcb->last_timer = tcp_timer_ctr; + + pcb_remove = 0; + pcb_reset = 0; + + if (pcb->state == SYN_SENT && pcb->nrtx >= TCP_SYNMAXRTX) { + ++pcb_remove; + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: max SYN retries reached\n")); + } else if (pcb->nrtx >= TCP_MAXRTX) { + ++pcb_remove; + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: max DATA retries reached\n")); + } else { + if (pcb->persist_backoff > 0) { + LWIP_ASSERT("tcp_slowtimr: persist ticking with in-flight data", pcb->unacked == NULL); + LWIP_ASSERT("tcp_slowtimr: persist ticking with empty send buffer", pcb->unsent != NULL); + if (pcb->persist_probe >= TCP_MAXRTX) { + ++pcb_remove; /* max probes reached */ + } else { + u8_t backoff_cnt = tcp_persist_backoff[pcb->persist_backoff - 1]; + if (pcb->persist_cnt < backoff_cnt) { + pcb->persist_cnt++; + } + if (pcb->persist_cnt >= backoff_cnt) { + int next_slot = 1; /* increment timer to next slot */ + /* If snd_wnd is zero, send 1 byte probes */ + if (pcb->snd_wnd == 0) { + if (tcp_zero_window_probe(pcb) != ERR_OK) { + next_slot = 0; /* try probe again with current slot */ + } + /* snd_wnd not fully closed, split unsent head and fill window */ + } else { + if (tcp_split_unsent_seg(pcb, (u16_t)pcb->snd_wnd) == ERR_OK) { + if (tcp_output(pcb) == ERR_OK) { + /* sending will cancel persist timer, else retry with current slot */ + next_slot = 0; + } + } + } + if (next_slot) { + pcb->persist_cnt = 0; + if (pcb->persist_backoff < sizeof(tcp_persist_backoff)) { + pcb->persist_backoff++; + } + } + } + } + } else { + /* Increase the retransmission timer if it is running */ + if ((pcb->rtime >= 0) && (pcb->rtime < 0x7FFF)) { + ++pcb->rtime; + } + + if (pcb->rtime >= pcb->rto) { + /* Time for a retransmission. */ + LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_slowtmr: rtime %"S16_F + " pcb->rto %"S16_F"\n", + pcb->rtime, pcb->rto)); + /* If prepare phase fails but we have unsent data but no unacked data, + still execute the backoff calculations below, as this means we somehow + failed to send segment. */ + if ((tcp_rexmit_rto_prepare(pcb) == ERR_OK) || ((pcb->unacked == NULL) && (pcb->unsent != NULL))) { + /* Double retransmission time-out unless we are trying to + * connect to somebody (i.e., we are in SYN_SENT). */ + if (pcb->state != SYN_SENT) { + u8_t backoff_idx = LWIP_MIN(pcb->nrtx, sizeof(tcp_backoff) - 1); + int calc_rto = ((pcb->sa >> 3) + pcb->sv) << tcp_backoff[backoff_idx]; + pcb->rto = (s16_t)LWIP_MIN(calc_rto, 0x7FFF); + } + + /* Reset the retransmission timer. */ + pcb->rtime = 0; + + /* Reduce congestion window and ssthresh. */ + eff_wnd = LWIP_MIN(pcb->cwnd, pcb->snd_wnd); + pcb->ssthresh = eff_wnd >> 1; + if (pcb->ssthresh < (tcpwnd_size_t)(pcb->mss << 1)) { + pcb->ssthresh = (tcpwnd_size_t)(pcb->mss << 1); + } + pcb->cwnd = pcb->mss; + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_slowtmr: cwnd %"TCPWNDSIZE_F + " ssthresh %"TCPWNDSIZE_F"\n", + pcb->cwnd, pcb->ssthresh)); + pcb->bytes_acked = 0; + + /* The following needs to be called AFTER cwnd is set to one + mss - STJ */ + tcp_rexmit_rto_commit(pcb); + } + } + } + } + /* Check if this PCB has stayed too long in FIN-WAIT-2 */ + if (pcb->state == FIN_WAIT_2) { + /* If this PCB is in FIN_WAIT_2 because of SHUT_WR don't let it time out. */ + if (pcb->flags & TF_RXCLOSED) { + /* PCB was fully closed (either through close() or SHUT_RDWR): + normal FIN-WAIT timeout handling. */ + if ((u32_t)(tcp_ticks - pcb->tmr) > + TCP_FIN_WAIT_TIMEOUT / TCP_SLOW_INTERVAL) { + ++pcb_remove; + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: removing pcb stuck in FIN-WAIT-2\n")); + } + } + } + + /* Check if KEEPALIVE should be sent */ + if (ip_get_option(pcb, SOF_KEEPALIVE) && + ((pcb->state == ESTABLISHED) || + (pcb->state == CLOSE_WAIT))) { + if ((u32_t)(tcp_ticks - pcb->tmr) > + (pcb->keep_idle + TCP_KEEP_DUR(pcb)) / TCP_SLOW_INTERVAL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: KEEPALIVE timeout. Aborting connection to ")); + ip_addr_debug_print_val(TCP_DEBUG, pcb->remote_ip); + LWIP_DEBUGF(TCP_DEBUG, ("\n")); + + ++pcb_remove; + ++pcb_reset; + } else if ((u32_t)(tcp_ticks - pcb->tmr) > + (pcb->keep_idle + pcb->keep_cnt_sent * TCP_KEEP_INTVL(pcb)) + / TCP_SLOW_INTERVAL) { + err = tcp_keepalive(pcb); + if (err == ERR_OK) { + pcb->keep_cnt_sent++; + } + } + } + + /* If this PCB has queued out of sequence data, but has been + inactive for too long, will drop the data (it will eventually + be retransmitted). */ +#if TCP_QUEUE_OOSEQ + if (pcb->ooseq != NULL && + (tcp_ticks - pcb->tmr >= (u32_t)pcb->rto * TCP_OOSEQ_TIMEOUT)) { + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_slowtmr: dropping OOSEQ queued data\n")); + tcp_free_ooseq(pcb); + } +#endif /* TCP_QUEUE_OOSEQ */ + + /* Check if this PCB has stayed too long in SYN-RCVD */ + if (pcb->state == SYN_RCVD) { + if ((u32_t)(tcp_ticks - pcb->tmr) > + TCP_SYN_RCVD_TIMEOUT / TCP_SLOW_INTERVAL) { + ++pcb_remove; + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: removing pcb stuck in SYN-RCVD\n")); + } + } + + /* Check if this PCB has stayed too long in LAST-ACK */ + if (pcb->state == LAST_ACK) { + if ((u32_t)(tcp_ticks - pcb->tmr) > 2 * TCP_MSL / TCP_SLOW_INTERVAL) { + ++pcb_remove; + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: removing pcb stuck in LAST-ACK\n")); + } + } + + /* If the PCB should be removed, do it. */ + if (pcb_remove) { + struct tcp_pcb *pcb2; +#if LWIP_CALLBACK_API + tcp_err_fn err_fn = pcb->errf; +#endif /* LWIP_CALLBACK_API */ + void *err_arg; + enum tcp_state last_state; + tcp_pcb_purge(pcb); + /* Remove PCB from tcp_active_pcbs list. */ + if (prev != NULL) { + LWIP_ASSERT("tcp_slowtmr: middle tcp != tcp_active_pcbs", pcb != tcp_active_pcbs); + prev->next = pcb->next; + } else { + /* This PCB was the first. */ + LWIP_ASSERT("tcp_slowtmr: first pcb == tcp_active_pcbs", tcp_active_pcbs == pcb); + tcp_active_pcbs = pcb->next; + } + + if (pcb_reset) { + tcp_rst(pcb, pcb->snd_nxt, pcb->rcv_nxt, &pcb->local_ip, &pcb->remote_ip, + pcb->local_port, pcb->remote_port); + } + + err_arg = pcb->callback_arg; + last_state = pcb->state; + pcb2 = pcb; + pcb = pcb->next; + tcp_free(pcb2); + + tcp_active_pcbs_changed = 0; + TCP_EVENT_ERR(last_state, err_fn, err_arg, ERR_ABRT); + if (tcp_active_pcbs_changed) { + goto tcp_slowtmr_start; + } + } else { + /* get the 'next' element now and work with 'prev' below (in case of abort) */ + prev = pcb; + pcb = pcb->next; + + /* We check if we should poll the connection. */ + ++prev->polltmr; + if (prev->polltmr >= prev->pollinterval) { + prev->polltmr = 0; + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: polling application\n")); + tcp_active_pcbs_changed = 0; + TCP_EVENT_POLL(prev, err); + if (tcp_active_pcbs_changed) { + goto tcp_slowtmr_start; + } + /* if err == ERR_ABRT, 'prev' is already deallocated */ + if (err == ERR_OK) { + tcp_output(prev); + } + } + } + } + + + /* Steps through all of the TIME-WAIT PCBs. */ + prev = NULL; + pcb = tcp_tw_pcbs; + while (pcb != NULL) { + LWIP_ASSERT("tcp_slowtmr: TIME-WAIT pcb->state == TIME-WAIT", pcb->state == TIME_WAIT); + pcb_remove = 0; + + /* Check if this PCB has stayed long enough in TIME-WAIT */ + if ((u32_t)(tcp_ticks - pcb->tmr) > 2 * TCP_MSL / TCP_SLOW_INTERVAL) { + ++pcb_remove; + } + + /* If the PCB should be removed, do it. */ + if (pcb_remove) { + struct tcp_pcb *pcb2; + tcp_pcb_purge(pcb); + /* Remove PCB from tcp_tw_pcbs list. */ + if (prev != NULL) { + LWIP_ASSERT("tcp_slowtmr: middle tcp != tcp_tw_pcbs", pcb != tcp_tw_pcbs); + prev->next = pcb->next; + } else { + /* This PCB was the first. */ + LWIP_ASSERT("tcp_slowtmr: first pcb == tcp_tw_pcbs", tcp_tw_pcbs == pcb); + tcp_tw_pcbs = pcb->next; + } + pcb2 = pcb; + pcb = pcb->next; + tcp_free(pcb2); + } else { + prev = pcb; + pcb = pcb->next; + } + } +} + +/** + * Is called every TCP_FAST_INTERVAL (250 ms) and process data previously + * "refused" by upper layer (application) and sends delayed ACKs or pending FINs. + * + * Automatically called from tcp_tmr(). + */ +void +tcp_fasttmr(void) +{ + struct tcp_pcb *pcb; + + ++tcp_timer_ctr; + +tcp_fasttmr_start: + pcb = tcp_active_pcbs; + + while (pcb != NULL) { + if (pcb->last_timer != tcp_timer_ctr) { + struct tcp_pcb *next; + pcb->last_timer = tcp_timer_ctr; + /* send delayed ACKs */ + if (pcb->flags & TF_ACK_DELAY) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_fasttmr: delayed ACK\n")); + tcp_ack_now(pcb); + tcp_output(pcb); + tcp_clear_flags(pcb, TF_ACK_DELAY | TF_ACK_NOW); + } + /* send pending FIN */ + if (pcb->flags & TF_CLOSEPEND) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_fasttmr: pending FIN\n")); + tcp_clear_flags(pcb, TF_CLOSEPEND); + tcp_close_shutdown_fin(pcb); + } + + next = pcb->next; + + /* If there is data which was previously "refused" by upper layer */ + if (pcb->refused_data != NULL) { + tcp_active_pcbs_changed = 0; + tcp_process_refused_data(pcb); + if (tcp_active_pcbs_changed) { + /* application callback has changed the pcb list: restart the loop */ + goto tcp_fasttmr_start; + } + } + pcb = next; + } else { + pcb = pcb->next; + } + } +} + +/** Call tcp_output for all active pcbs that have TF_NAGLEMEMERR set */ +void +tcp_txnow(void) +{ + struct tcp_pcb *pcb; + + for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) { + if (pcb->flags & TF_NAGLEMEMERR) { + tcp_output(pcb); + } + } +} + +/** Pass pcb->refused_data to the recv callback */ +err_t +tcp_process_refused_data(struct tcp_pcb *pcb) +{ +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + struct pbuf *rest; +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + + LWIP_ERROR("tcp_process_refused_data: invalid pcb", pcb != NULL, return ERR_ARG); + +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + while (pcb->refused_data != NULL) +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + { + err_t err; + u8_t refused_flags = pcb->refused_data->flags; + /* set pcb->refused_data to NULL in case the callback frees it and then + closes the pcb */ + struct pbuf *refused_data = pcb->refused_data; +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + pbuf_split_64k(refused_data, &rest); + pcb->refused_data = rest; +#else /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + pcb->refused_data = NULL; +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + /* Notify again application with data previously received. */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: notify kept packet\n")); + TCP_EVENT_RECV(pcb, refused_data, ERR_OK, err); + if (err == ERR_OK) { + /* did refused_data include a FIN? */ + if ((refused_flags & PBUF_FLAG_TCP_FIN) +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + && (rest == NULL) +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + ) { + /* correct rcv_wnd as the application won't call tcp_recved() + for the FIN's seqno */ + if (pcb->rcv_wnd != TCP_WND_MAX(pcb)) { + pcb->rcv_wnd++; + } + TCP_EVENT_CLOSED(pcb, err); + if (err == ERR_ABRT) { + return ERR_ABRT; + } + } + } else if (err == ERR_ABRT) { + /* if err == ERR_ABRT, 'pcb' is already deallocated */ + /* Drop incoming packets because pcb is "full" (only if the incoming + segment contains data). */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: drop incoming packets, because pcb is \"full\"\n")); + return ERR_ABRT; + } else { + /* data is still refused, pbuf is still valid (go on for ACK-only packets) */ +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + if (rest != NULL) { + pbuf_cat(refused_data, rest); + } +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + pcb->refused_data = refused_data; + return ERR_INPROGRESS; + } + } + return ERR_OK; +} + +/** + * Deallocates a list of TCP segments (tcp_seg structures). + * + * @param seg tcp_seg list of TCP segments to free + */ +void +tcp_segs_free(struct tcp_seg *seg) +{ + while (seg != NULL) { + struct tcp_seg *next = seg->next; + tcp_seg_free(seg); + seg = next; + } +} + +/** + * Frees a TCP segment (tcp_seg structure). + * + * @param seg single tcp_seg to free + */ +void +tcp_seg_free(struct tcp_seg *seg) +{ + if (seg != NULL) { + if (seg->p != NULL) { + pbuf_free(seg->p); +#if TCP_DEBUG + seg->p = NULL; +#endif /* TCP_DEBUG */ + } + memp_free(MEMP_TCP_SEG, seg); + } +} + +/** + * @ingroup tcp + * Sets the priority of a connection. + * + * @param pcb the tcp_pcb to manipulate + * @param prio new priority + */ +void +tcp_setprio(struct tcp_pcb *pcb, u8_t prio) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("tcp_setprio: invalid pcb", pcb != NULL, return); + + pcb->prio = prio; +} + +#if TCP_QUEUE_OOSEQ +/** + * Returns a copy of the given TCP segment. + * The pbuf and data are not copied, only the pointers + * + * @param seg the old tcp_seg + * @return a copy of seg + */ +struct tcp_seg * +tcp_seg_copy(struct tcp_seg *seg) +{ + struct tcp_seg *cseg; + + LWIP_ASSERT("tcp_seg_copy: invalid seg", seg != NULL); + + cseg = (struct tcp_seg *)memp_malloc(MEMP_TCP_SEG); + if (cseg == NULL) { + return NULL; + } + SMEMCPY((u8_t *)cseg, (const u8_t *)seg, sizeof(struct tcp_seg)); + pbuf_ref(cseg->p); + return cseg; +} +#endif /* TCP_QUEUE_OOSEQ */ + +#if LWIP_CALLBACK_API +/** + * Default receive callback that is called if the user didn't register + * a recv callback for the pcb. + */ +err_t +tcp_recv_null(void *arg, struct tcp_pcb *pcb, struct pbuf *p, err_t err) +{ + LWIP_UNUSED_ARG(arg); + + LWIP_ERROR("tcp_recv_null: invalid pcb", pcb != NULL, return ERR_ARG); + + if (p != NULL) { + tcp_recved(pcb, p->tot_len); + pbuf_free(p); + } else if (err == ERR_OK) { + return tcp_close(pcb); + } + return ERR_OK; +} +#endif /* LWIP_CALLBACK_API */ + +/** + * Kills the oldest active connection that has a lower priority than 'prio'. + * + * @param prio minimum priority + */ +static void +tcp_kill_prio(u8_t prio) +{ + struct tcp_pcb *pcb, *inactive; + u32_t inactivity; + u8_t mprio; + + mprio = LWIP_MIN(TCP_PRIO_MAX, prio); + + /* We want to kill connections with a lower prio, so bail out if + * supplied prio is 0 - there can never be a lower prio + */ + if (mprio == 0) { + return; + } + + /* We only want kill connections with a lower prio, so decrement prio by one + * and start searching for oldest connection with same or lower priority than mprio. + * We want to find the connections with the lowest possible prio, and among + * these the one with the longest inactivity time. + */ + mprio--; + + inactivity = 0; + inactive = NULL; + for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) { + /* lower prio is always a kill candidate */ + if ((pcb->prio < mprio) || + /* longer inactivity is also a kill candidate */ + ((pcb->prio == mprio) && ((u32_t)(tcp_ticks - pcb->tmr) >= inactivity))) { + inactivity = tcp_ticks - pcb->tmr; + inactive = pcb; + mprio = pcb->prio; + } + } + if (inactive != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_kill_prio: killing oldest PCB %p (%"S32_F")\n", + (void *)inactive, inactivity)); + tcp_abort(inactive); + } +} + +/** + * Kills the oldest connection that is in specific state. + * Called from tcp_alloc() for LAST_ACK and CLOSING if no more connections are available. + */ +static void +tcp_kill_state(enum tcp_state state) +{ + struct tcp_pcb *pcb, *inactive; + u32_t inactivity; + + LWIP_ASSERT("invalid state", (state == CLOSING) || (state == LAST_ACK)); + + inactivity = 0; + inactive = NULL; + /* Go through the list of active pcbs and get the oldest pcb that is in state + CLOSING/LAST_ACK. */ + for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) { + if (pcb->state == state) { + if ((u32_t)(tcp_ticks - pcb->tmr) >= inactivity) { + inactivity = tcp_ticks - pcb->tmr; + inactive = pcb; + } + } + } + if (inactive != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_kill_closing: killing oldest %s PCB %p (%"S32_F")\n", + tcp_state_str[state], (void *)inactive, inactivity)); + /* Don't send a RST, since no data is lost. */ + tcp_abandon(inactive, 0); + } +} + +/** + * Kills the oldest connection that is in TIME_WAIT state. + * Called from tcp_alloc() if no more connections are available. + */ +static void +tcp_kill_timewait(void) +{ + struct tcp_pcb *pcb, *inactive; + u32_t inactivity; + + inactivity = 0; + inactive = NULL; + /* Go through the list of TIME_WAIT pcbs and get the oldest pcb. */ + for (pcb = tcp_tw_pcbs; pcb != NULL; pcb = pcb->next) { + if ((u32_t)(tcp_ticks - pcb->tmr) >= inactivity) { + inactivity = tcp_ticks - pcb->tmr; + inactive = pcb; + } + } + if (inactive != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_kill_timewait: killing oldest TIME-WAIT PCB %p (%"S32_F")\n", + (void *)inactive, inactivity)); + tcp_abort(inactive); + } +} + +/* Called when allocating a pcb fails. + * In this case, we want to handle all pcbs that want to close first: if we can + * now send the FIN (which failed before), the pcb might be in a state that is + * OK for us to now free it. + */ +static void +tcp_handle_closepend(void) +{ + struct tcp_pcb *pcb = tcp_active_pcbs; + + while (pcb != NULL) { + struct tcp_pcb *next = pcb->next; + /* send pending FIN */ + if (pcb->flags & TF_CLOSEPEND) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_handle_closepend: pending FIN\n")); + tcp_clear_flags(pcb, TF_CLOSEPEND); + tcp_close_shutdown_fin(pcb); + } + pcb = next; + } +} + +/** + * Allocate a new tcp_pcb structure. + * + * @param prio priority for the new pcb + * @return a new tcp_pcb that initially is in state CLOSED + */ +struct tcp_pcb * +tcp_alloc(u8_t prio) +{ + struct tcp_pcb *pcb; + + LWIP_ASSERT_CORE_LOCKED(); + + pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB); + if (pcb == NULL) { + /* Try to send FIN for all pcbs stuck in TF_CLOSEPEND first */ + tcp_handle_closepend(); + + /* Try killing oldest connection in TIME-WAIT. */ + LWIP_DEBUGF(TCP_DEBUG, ("tcp_alloc: killing off oldest TIME-WAIT connection\n")); + tcp_kill_timewait(); + /* Try to allocate a tcp_pcb again. */ + pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB); + if (pcb == NULL) { + /* Try killing oldest connection in LAST-ACK (these wouldn't go to TIME-WAIT). */ + LWIP_DEBUGF(TCP_DEBUG, ("tcp_alloc: killing off oldest LAST-ACK connection\n")); + tcp_kill_state(LAST_ACK); + /* Try to allocate a tcp_pcb again. */ + pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB); + if (pcb == NULL) { + /* Try killing oldest connection in CLOSING. */ + LWIP_DEBUGF(TCP_DEBUG, ("tcp_alloc: killing off oldest CLOSING connection\n")); + tcp_kill_state(CLOSING); + /* Try to allocate a tcp_pcb again. */ + pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB); + if (pcb == NULL) { + /* Try killing oldest active connection with lower priority than the new one. */ + LWIP_DEBUGF(TCP_DEBUG, ("tcp_alloc: killing oldest connection with prio lower than %d\n", prio)); + tcp_kill_prio(prio); + /* Try to allocate a tcp_pcb again. */ + pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB); + if (pcb != NULL) { + /* adjust err stats: memp_malloc failed multiple times before */ + MEMP_STATS_DEC(err, MEMP_TCP_PCB); + } + } + if (pcb != NULL) { + /* adjust err stats: memp_malloc failed multiple times before */ + MEMP_STATS_DEC(err, MEMP_TCP_PCB); + } + } + if (pcb != NULL) { + /* adjust err stats: memp_malloc failed multiple times before */ + MEMP_STATS_DEC(err, MEMP_TCP_PCB); + } + } + if (pcb != NULL) { + /* adjust err stats: memp_malloc failed above */ + MEMP_STATS_DEC(err, MEMP_TCP_PCB); + } + } + if (pcb != NULL) { + /* zero out the whole pcb, so there is no need to initialize members to zero */ + memset(pcb, 0, sizeof(struct tcp_pcb)); + pcb->prio = prio; + pcb->snd_buf = TCP_SND_BUF; + /* Start with a window that does not need scaling. When window scaling is + enabled and used, the window is enlarged when both sides agree on scaling. */ + pcb->rcv_wnd = pcb->rcv_ann_wnd = TCPWND_MIN16(TCP_WND); + pcb->ttl = TCP_TTL; + /* As initial send MSS, we use TCP_MSS but limit it to 536. + The send MSS is updated when an MSS option is received. */ + pcb->mss = INITIAL_MSS; + pcb->rto = 3000 / TCP_SLOW_INTERVAL; + pcb->sv = 3000 / TCP_SLOW_INTERVAL; + pcb->rtime = -1; + pcb->cwnd = 1; + pcb->tmr = tcp_ticks; + pcb->last_timer = tcp_timer_ctr; + + /* RFC 5681 recommends setting ssthresh abritrarily high and gives an example + of using the largest advertised receive window. We've seen complications with + receiving TCPs that use window scaling and/or window auto-tuning where the + initial advertised window is very small and then grows rapidly once the + connection is established. To avoid these complications, we set ssthresh to the + largest effective cwnd (amount of in-flight data) that the sender can have. */ + pcb->ssthresh = TCP_SND_BUF; + +#if LWIP_CALLBACK_API + pcb->recv = tcp_recv_null; +#endif /* LWIP_CALLBACK_API */ + + /* Init KEEPALIVE timer */ + pcb->keep_idle = TCP_KEEPIDLE_DEFAULT; + +#if LWIP_TCP_KEEPALIVE + pcb->keep_intvl = TCP_KEEPINTVL_DEFAULT; + pcb->keep_cnt = TCP_KEEPCNT_DEFAULT; +#endif /* LWIP_TCP_KEEPALIVE */ + } + return pcb; +} + +/** + * @ingroup tcp_raw + * Creates a new TCP protocol control block but doesn't place it on + * any of the TCP PCB lists. + * The pcb is not put on any list until binding using tcp_bind(). + * If memory is not available for creating the new pcb, NULL is returned. + * + * @internal: Maybe there should be a idle TCP PCB list where these + * PCBs are put on. Port reservation using tcp_bind() is implemented but + * allocated pcbs that are not bound can't be killed automatically if wanting + * to allocate a pcb with higher prio (@see tcp_kill_prio()) + * + * @return a new tcp_pcb that initially is in state CLOSED + */ +struct tcp_pcb * +tcp_new(void) +{ + return tcp_alloc(TCP_PRIO_NORMAL); +} + +/** + * @ingroup tcp_raw + * Creates a new TCP protocol control block but doesn't + * place it on any of the TCP PCB lists. + * The pcb is not put on any list until binding using tcp_bind(). + * + * @param type IP address type, see @ref lwip_ip_addr_type definitions. + * If you want to listen to IPv4 and IPv6 (dual-stack) connections, + * supply @ref IPADDR_TYPE_ANY as argument and bind to @ref IP_ANY_TYPE. + * @return a new tcp_pcb that initially is in state CLOSED + */ +struct tcp_pcb * +tcp_new_ip_type(u8_t type) +{ + struct tcp_pcb *pcb; + pcb = tcp_alloc(TCP_PRIO_NORMAL); +#if LWIP_IPV4 && LWIP_IPV6 + if (pcb != NULL) { + IP_SET_TYPE_VAL(pcb->local_ip, type); + IP_SET_TYPE_VAL(pcb->remote_ip, type); + } +#else + LWIP_UNUSED_ARG(type); +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + return pcb; +} + +/** + * @ingroup tcp_raw + * Specifies the program specific state that should be passed to all + * other callback functions. The "pcb" argument is the current TCP + * connection control block, and the "arg" argument is the argument + * that will be passed to the callbacks. + * + * @param pcb tcp_pcb to set the callback argument + * @param arg void pointer argument to pass to callback functions + */ +void +tcp_arg(struct tcp_pcb *pcb, void *arg) +{ + LWIP_ASSERT_CORE_LOCKED(); + /* This function is allowed to be called for both listen pcbs and + connection pcbs. */ + if (pcb != NULL) { + pcb->callback_arg = arg; + } +} +#if LWIP_CALLBACK_API + +/** + * @ingroup tcp_raw + * Sets the callback function that will be called when new data + * arrives. The callback function will be passed a NULL pbuf to + * indicate that the remote host has closed the connection. If the + * callback function returns ERR_OK or ERR_ABRT it must have + * freed the pbuf, otherwise it must not have freed it. + * + * @param pcb tcp_pcb to set the recv callback + * @param recv callback function to call for this pcb when data is received + */ +void +tcp_recv(struct tcp_pcb *pcb, tcp_recv_fn recv) +{ + LWIP_ASSERT_CORE_LOCKED(); + if (pcb != NULL) { + LWIP_ASSERT("invalid socket state for recv callback", pcb->state != LISTEN); + pcb->recv = recv; + } +} + +/** + * @ingroup tcp_raw + * Specifies the callback function that should be called when data has + * successfully been received (i.e., acknowledged) by the remote + * host. The len argument passed to the callback function gives the + * amount bytes that was acknowledged by the last acknowledgment. + * + * @param pcb tcp_pcb to set the sent callback + * @param sent callback function to call for this pcb when data is successfully sent + */ +void +tcp_sent(struct tcp_pcb *pcb, tcp_sent_fn sent) +{ + LWIP_ASSERT_CORE_LOCKED(); + if (pcb != NULL) { + LWIP_ASSERT("invalid socket state for sent callback", pcb->state != LISTEN); + pcb->sent = sent; + } +} + +/** + * @ingroup tcp_raw + * Used to specify the function that should be called when a fatal error + * has occurred on the connection. + * + * If a connection is aborted because of an error, the application is + * alerted of this event by the err callback. Errors that might abort a + * connection are when there is a shortage of memory. The callback + * function to be called is set using the tcp_err() function. + * + * @note The corresponding pcb is already freed when this callback is called! + * + * @param pcb tcp_pcb to set the err callback + * @param err callback function to call for this pcb when a fatal error + * has occurred on the connection + */ +void +tcp_err(struct tcp_pcb *pcb, tcp_err_fn err) +{ + LWIP_ASSERT_CORE_LOCKED(); + if (pcb != NULL) { + LWIP_ASSERT("invalid socket state for err callback", pcb->state != LISTEN); + pcb->errf = err; + } +} + +/** + * @ingroup tcp_raw + * Used for specifying the function that should be called when a + * LISTENing connection has been connected to another host. + * + * @param pcb tcp_pcb to set the accept callback + * @param accept callback function to call for this pcb when LISTENing + * connection has been connected to another host + */ +void +tcp_accept(struct tcp_pcb *pcb, tcp_accept_fn accept) +{ + LWIP_ASSERT_CORE_LOCKED(); + if ((pcb != NULL) && (pcb->state == LISTEN)) { + struct tcp_pcb_listen *lpcb = (struct tcp_pcb_listen *)pcb; + lpcb->accept = accept; + } +} +#endif /* LWIP_CALLBACK_API */ + + +/** + * @ingroup tcp_raw + * Specifies the polling interval and the callback function that should + * be called to poll the application. The interval is specified in + * number of TCP coarse grained timer shots, which typically occurs + * twice a second. An interval of 10 means that the application would + * be polled every 5 seconds. + * + * When a connection is idle (i.e., no data is either transmitted or + * received), lwIP will repeatedly poll the application by calling a + * specified callback function. This can be used either as a watchdog + * timer for killing connections that have stayed idle for too long, or + * as a method of waiting for memory to become available. For instance, + * if a call to tcp_write() has failed because memory wasn't available, + * the application may use the polling functionality to call tcp_write() + * again when the connection has been idle for a while. + */ +void +tcp_poll(struct tcp_pcb *pcb, tcp_poll_fn poll, u8_t interval) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("tcp_poll: invalid pcb", pcb != NULL, return); + LWIP_ASSERT("invalid socket state for poll", pcb->state != LISTEN); + +#if LWIP_CALLBACK_API + pcb->poll = poll; +#else /* LWIP_CALLBACK_API */ + LWIP_UNUSED_ARG(poll); +#endif /* LWIP_CALLBACK_API */ + pcb->pollinterval = interval; +} + +/** + * Purges a TCP PCB. Removes any buffered data and frees the buffer memory + * (pcb->ooseq, pcb->unsent and pcb->unacked are freed). + * + * @param pcb tcp_pcb to purge. The pcb itself is not deallocated! + */ +void +tcp_pcb_purge(struct tcp_pcb *pcb) +{ + LWIP_ERROR("tcp_pcb_purge: invalid pcb", pcb != NULL, return); + + if (pcb->state != CLOSED && + pcb->state != TIME_WAIT && + pcb->state != LISTEN) { + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge\n")); + + tcp_backlog_accepted(pcb); + + if (pcb->refused_data != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge: data left on ->refused_data\n")); + pbuf_free(pcb->refused_data); + pcb->refused_data = NULL; + } + if (pcb->unsent != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge: not all data sent\n")); + } + if (pcb->unacked != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge: data left on ->unacked\n")); + } +#if TCP_QUEUE_OOSEQ + if (pcb->ooseq != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge: data left on ->ooseq\n")); + tcp_free_ooseq(pcb); + } +#endif /* TCP_QUEUE_OOSEQ */ + + /* Stop the retransmission timer as it will expect data on unacked + queue if it fires */ + pcb->rtime = -1; + + tcp_segs_free(pcb->unsent); + tcp_segs_free(pcb->unacked); + pcb->unacked = pcb->unsent = NULL; +#if TCP_OVERSIZE + pcb->unsent_oversize = 0; +#endif /* TCP_OVERSIZE */ + } +} + +/** + * Purges the PCB and removes it from a PCB list. Any delayed ACKs are sent first. + * + * @param pcblist PCB list to purge. + * @param pcb tcp_pcb to purge. The pcb itself is NOT deallocated! + */ +void +tcp_pcb_remove(struct tcp_pcb **pcblist, struct tcp_pcb *pcb) +{ + LWIP_ASSERT("tcp_pcb_remove: invalid pcb", pcb != NULL); + LWIP_ASSERT("tcp_pcb_remove: invalid pcblist", pcblist != NULL); + + TCP_RMV(pcblist, pcb); + + tcp_pcb_purge(pcb); + + /* if there is an outstanding delayed ACKs, send it */ + if ((pcb->state != TIME_WAIT) && + (pcb->state != LISTEN) && + (pcb->flags & TF_ACK_DELAY)) { + tcp_ack_now(pcb); + tcp_output(pcb); + } + + if (pcb->state != LISTEN) { + LWIP_ASSERT("unsent segments leaking", pcb->unsent == NULL); + LWIP_ASSERT("unacked segments leaking", pcb->unacked == NULL); +#if TCP_QUEUE_OOSEQ + LWIP_ASSERT("ooseq segments leaking", pcb->ooseq == NULL); +#endif /* TCP_QUEUE_OOSEQ */ + } + + pcb->state = CLOSED; + /* reset the local port to prevent the pcb from being 'bound' */ + pcb->local_port = 0; + + LWIP_ASSERT("tcp_pcb_remove: tcp_pcbs_sane()", tcp_pcbs_sane()); +} + +/** + * Calculates a new initial sequence number for new connections. + * + * @return u32_t pseudo random sequence number + */ +u32_t +tcp_next_iss(struct tcp_pcb *pcb) +{ +#ifdef LWIP_HOOK_TCP_ISN + LWIP_ASSERT("tcp_next_iss: invalid pcb", pcb != NULL); + return LWIP_HOOK_TCP_ISN(&pcb->local_ip, pcb->local_port, &pcb->remote_ip, pcb->remote_port); +#else /* LWIP_HOOK_TCP_ISN */ + static u32_t iss = 6510; + + LWIP_ASSERT("tcp_next_iss: invalid pcb", pcb != NULL); + LWIP_UNUSED_ARG(pcb); + + iss += tcp_ticks; /* XXX */ + return iss; +#endif /* LWIP_HOOK_TCP_ISN */ +} + +#if TCP_CALCULATE_EFF_SEND_MSS +/** + * Calculates the effective send mss that can be used for a specific IP address + * by calculating the minimum of TCP_MSS and the mtu (if set) of the target + * netif (if not NULL). + */ +u16_t +tcp_eff_send_mss_netif(u16_t sendmss, struct netif *outif, const ip_addr_t *dest) +{ + u16_t mss_s; + u16_t mtu; + + LWIP_UNUSED_ARG(dest); /* in case IPv6 is disabled */ + + LWIP_ASSERT("tcp_eff_send_mss_netif: invalid dst_ip", dest != NULL); + +#if LWIP_IPV6 +#if LWIP_IPV4 + if (IP_IS_V6(dest)) +#endif /* LWIP_IPV4 */ + { + /* First look in destination cache, to see if there is a Path MTU. */ + mtu = nd6_get_destination_mtu(ip_2_ip6(dest), outif); + } +#if LWIP_IPV4 + else +#endif /* LWIP_IPV4 */ +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 + { + if (outif == NULL) { + return sendmss; + } + mtu = outif->mtu; + } +#endif /* LWIP_IPV4 */ + + if (mtu != 0) { + u16_t offset; +#if LWIP_IPV6 +#if LWIP_IPV4 + if (IP_IS_V6(dest)) +#endif /* LWIP_IPV4 */ + { + offset = IP6_HLEN + TCP_HLEN; + } +#if LWIP_IPV4 + else +#endif /* LWIP_IPV4 */ +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 + { + offset = IP_HLEN + TCP_HLEN; + } +#endif /* LWIP_IPV4 */ + mss_s = (mtu > offset) ? (u16_t)(mtu - offset) : 0; + /* RFC 1122, chap 4.2.2.6: + * Eff.snd.MSS = min(SendMSS+20, MMS_S) - TCPhdrsize - IPoptionsize + * We correct for TCP options in tcp_write(), and don't support IP options. + */ + sendmss = LWIP_MIN(sendmss, mss_s); + } + return sendmss; +} +#endif /* TCP_CALCULATE_EFF_SEND_MSS */ + +/** Helper function for tcp_netif_ip_addr_changed() that iterates a pcb list */ +static void +tcp_netif_ip_addr_changed_pcblist(const ip_addr_t *old_addr, struct tcp_pcb *pcb_list) +{ + struct tcp_pcb *pcb; + pcb = pcb_list; + + LWIP_ASSERT("tcp_netif_ip_addr_changed_pcblist: invalid old_addr", old_addr != NULL); + + while (pcb != NULL) { + /* PCB bound to current local interface address? */ + if (ip_addr_cmp(&pcb->local_ip, old_addr) +#if LWIP_AUTOIP + /* connections to link-local addresses must persist (RFC3927 ch. 1.9) */ + && (!IP_IS_V4_VAL(pcb->local_ip) || !ip4_addr_islinklocal(ip_2_ip4(&pcb->local_ip))) +#endif /* LWIP_AUTOIP */ + ) { + /* this connection must be aborted */ + struct tcp_pcb *next = pcb->next; + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_STATE, ("netif_set_ipaddr: aborting TCP pcb %p\n", (void *)pcb)); + tcp_abort(pcb); + pcb = next; + } else { + pcb = pcb->next; + } + } +} + +/** This function is called from netif.c when address is changed or netif is removed + * + * @param old_addr IP address of the netif before change + * @param new_addr IP address of the netif after change or NULL if netif has been removed + */ +void +tcp_netif_ip_addr_changed(const ip_addr_t *old_addr, const ip_addr_t *new_addr) +{ + struct tcp_pcb_listen *lpcb; + + if (!ip_addr_isany(old_addr)) { + tcp_netif_ip_addr_changed_pcblist(old_addr, tcp_active_pcbs); + tcp_netif_ip_addr_changed_pcblist(old_addr, tcp_bound_pcbs); + + if (!ip_addr_isany(new_addr)) { + /* PCB bound to current local interface address? */ + for (lpcb = tcp_listen_pcbs.listen_pcbs; lpcb != NULL; lpcb = lpcb->next) { + /* PCB bound to current local interface address? */ + if (ip_addr_cmp(&lpcb->local_ip, old_addr)) { + /* The PCB is listening to the old ipaddr and + * is set to listen to the new one instead */ + ip_addr_copy(lpcb->local_ip, *new_addr); + } + } + } + } +} + +const char * +tcp_debug_state_str(enum tcp_state s) +{ + return tcp_state_str[s]; +} + +err_t +tcp_tcp_get_tcp_addrinfo(struct tcp_pcb *pcb, int local, ip_addr_t *addr, u16_t *port) +{ + if (pcb) { + if (local) { + if (addr) { + *addr = pcb->local_ip; + } + if (port) { + *port = pcb->local_port; + } + } else { + if (addr) { + *addr = pcb->remote_ip; + } + if (port) { + *port = pcb->remote_port; + } + } + return ERR_OK; + } + return ERR_VAL; +} + +#if TCP_QUEUE_OOSEQ +/* Free all ooseq pbufs (and possibly reset SACK state) */ +void +tcp_free_ooseq(struct tcp_pcb *pcb) +{ + if (pcb->ooseq) { + tcp_segs_free(pcb->ooseq); + pcb->ooseq = NULL; +#if LWIP_TCP_SACK_OUT + memset(pcb->rcv_sacks, 0, sizeof(pcb->rcv_sacks)); +#endif /* LWIP_TCP_SACK_OUT */ + } +} +#endif /* TCP_QUEUE_OOSEQ */ + +#if TCP_DEBUG || TCP_INPUT_DEBUG || TCP_OUTPUT_DEBUG +/** + * Print a tcp header for debugging purposes. + * + * @param tcphdr pointer to a struct tcp_hdr + */ +void +tcp_debug_print(struct tcp_hdr *tcphdr) +{ + LWIP_DEBUGF(TCP_DEBUG, ("TCP header:\n")); + LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(TCP_DEBUG, ("| %5"U16_F" | %5"U16_F" | (src port, dest port)\n", + lwip_ntohs(tcphdr->src), lwip_ntohs(tcphdr->dest))); + LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(TCP_DEBUG, ("| %010"U32_F" | (seq no)\n", + lwip_ntohl(tcphdr->seqno))); + LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(TCP_DEBUG, ("| %010"U32_F" | (ack no)\n", + lwip_ntohl(tcphdr->ackno))); + LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(TCP_DEBUG, ("| %2"U16_F" | |%"U16_F"%"U16_F"%"U16_F"%"U16_F"%"U16_F"%"U16_F"| %5"U16_F" | (hdrlen, flags (", + TCPH_HDRLEN(tcphdr), + (u16_t)(TCPH_FLAGS(tcphdr) >> 5 & 1), + (u16_t)(TCPH_FLAGS(tcphdr) >> 4 & 1), + (u16_t)(TCPH_FLAGS(tcphdr) >> 3 & 1), + (u16_t)(TCPH_FLAGS(tcphdr) >> 2 & 1), + (u16_t)(TCPH_FLAGS(tcphdr) >> 1 & 1), + (u16_t)(TCPH_FLAGS(tcphdr) & 1), + lwip_ntohs(tcphdr->wnd))); + tcp_debug_print_flags(TCPH_FLAGS(tcphdr)); + LWIP_DEBUGF(TCP_DEBUG, ("), win)\n")); + LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(TCP_DEBUG, ("| 0x%04"X16_F" | %5"U16_F" | (chksum, urgp)\n", + lwip_ntohs(tcphdr->chksum), lwip_ntohs(tcphdr->urgp))); + LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n")); +} + +/** + * Print a tcp state for debugging purposes. + * + * @param s enum tcp_state to print + */ +void +tcp_debug_print_state(enum tcp_state s) +{ + LWIP_DEBUGF(TCP_DEBUG, ("State: %s\n", tcp_state_str[s])); +} + +/** + * Print tcp flags for debugging purposes. + * + * @param flags tcp flags, all active flags are printed + */ +void +tcp_debug_print_flags(u8_t flags) +{ + if (flags & TCP_FIN) { + LWIP_DEBUGF(TCP_DEBUG, ("FIN ")); + } + if (flags & TCP_SYN) { + LWIP_DEBUGF(TCP_DEBUG, ("SYN ")); + } + if (flags & TCP_RST) { + LWIP_DEBUGF(TCP_DEBUG, ("RST ")); + } + if (flags & TCP_PSH) { + LWIP_DEBUGF(TCP_DEBUG, ("PSH ")); + } + if (flags & TCP_ACK) { + LWIP_DEBUGF(TCP_DEBUG, ("ACK ")); + } + if (flags & TCP_URG) { + LWIP_DEBUGF(TCP_DEBUG, ("URG ")); + } + if (flags & TCP_ECE) { + LWIP_DEBUGF(TCP_DEBUG, ("ECE ")); + } + if (flags & TCP_CWR) { + LWIP_DEBUGF(TCP_DEBUG, ("CWR ")); + } + LWIP_DEBUGF(TCP_DEBUG, ("\n")); +} + +/** + * Print all tcp_pcbs in every list for debugging purposes. + */ +void +tcp_debug_print_pcbs(void) +{ + struct tcp_pcb *pcb; + struct tcp_pcb_listen *pcbl; + + LWIP_DEBUGF(TCP_DEBUG, ("Active PCB states:\n")); + for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) { + LWIP_DEBUGF(TCP_DEBUG, ("Local port %"U16_F", foreign port %"U16_F" snd_nxt %"U32_F" rcv_nxt %"U32_F" ", + pcb->local_port, pcb->remote_port, + pcb->snd_nxt, pcb->rcv_nxt)); + tcp_debug_print_state(pcb->state); + } + + LWIP_DEBUGF(TCP_DEBUG, ("Listen PCB states:\n")); + for (pcbl = tcp_listen_pcbs.listen_pcbs; pcbl != NULL; pcbl = pcbl->next) { + LWIP_DEBUGF(TCP_DEBUG, ("Local port %"U16_F" ", pcbl->local_port)); + tcp_debug_print_state(pcbl->state); + } + + LWIP_DEBUGF(TCP_DEBUG, ("TIME-WAIT PCB states:\n")); + for (pcb = tcp_tw_pcbs; pcb != NULL; pcb = pcb->next) { + LWIP_DEBUGF(TCP_DEBUG, ("Local port %"U16_F", foreign port %"U16_F" snd_nxt %"U32_F" rcv_nxt %"U32_F" ", + pcb->local_port, pcb->remote_port, + pcb->snd_nxt, pcb->rcv_nxt)); + tcp_debug_print_state(pcb->state); + } +} + +/** + * Check state consistency of the tcp_pcb lists. + */ +s16_t +tcp_pcbs_sane(void) +{ + struct tcp_pcb *pcb; + for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) { + LWIP_ASSERT("tcp_pcbs_sane: active pcb->state != CLOSED", pcb->state != CLOSED); + LWIP_ASSERT("tcp_pcbs_sane: active pcb->state != LISTEN", pcb->state != LISTEN); + LWIP_ASSERT("tcp_pcbs_sane: active pcb->state != TIME-WAIT", pcb->state != TIME_WAIT); + } + for (pcb = tcp_tw_pcbs; pcb != NULL; pcb = pcb->next) { + LWIP_ASSERT("tcp_pcbs_sane: tw pcb->state == TIME-WAIT", pcb->state == TIME_WAIT); + } + return 1; +} +#endif /* TCP_DEBUG */ + +#if LWIP_TCP_PCB_NUM_EXT_ARGS +/** + * @defgroup tcp_raw_extargs ext arguments + * @ingroup tcp_raw + * Additional data storage per tcp pcb\n + * @see @ref tcp_raw + * + * When LWIP_TCP_PCB_NUM_EXT_ARGS is > 0, every tcp pcb (including listen pcb) + * includes a number of additional argument entries in an array. + * + * To support memory management, in addition to a 'void *', callbacks can be + * provided to manage transition from listening pcbs to connections and to + * deallocate memory when a pcb is deallocated (see struct @ref tcp_ext_arg_callbacks). + * + * After allocating this index, use @ref tcp_ext_arg_set and @ref tcp_ext_arg_get + * to store and load arguments from this index for a given pcb. + */ + +static u8_t tcp_ext_arg_id; + +/** + * @ingroup tcp_raw_extargs + * Allocate an index to store data in ext_args member of struct tcp_pcb. + * Returned value is an index in mentioned array. + * The index is *global* over all pcbs! + * + * When @ref LWIP_TCP_PCB_NUM_EXT_ARGS is > 0, every tcp pcb (including listen pcb) + * includes a number of additional argument entries in an array. + * + * To support memory management, in addition to a 'void *', callbacks can be + * provided to manage transition from listening pcbs to connections and to + * deallocate memory when a pcb is deallocated (see struct @ref tcp_ext_arg_callbacks). + * + * After allocating this index, use @ref tcp_ext_arg_set and @ref tcp_ext_arg_get + * to store and load arguments from this index for a given pcb. + * + * @return a unique index into struct tcp_pcb.ext_args + */ +u8_t +tcp_ext_arg_alloc_id(void) +{ + u8_t result = tcp_ext_arg_id; + tcp_ext_arg_id++; + + LWIP_ASSERT_CORE_LOCKED(); + +#if LWIP_TCP_PCB_NUM_EXT_ARGS >= 255 +#error LWIP_TCP_PCB_NUM_EXT_ARGS +#endif + LWIP_ASSERT("Increase LWIP_TCP_PCB_NUM_EXT_ARGS in lwipopts.h", result < LWIP_TCP_PCB_NUM_EXT_ARGS); + return result; +} + +/** + * @ingroup tcp_raw_extargs + * Set callbacks for a given index of ext_args on the specified pcb. + * + * @param pcb tcp_pcb for which to set the callback + * @param id ext_args index to set (allocated via @ref tcp_ext_arg_alloc_id) + * @param callbacks callback table (const since it is referenced, not copied!) + */ +void +tcp_ext_arg_set_callbacks(struct tcp_pcb *pcb, uint8_t id, const struct tcp_ext_arg_callbacks * const callbacks) +{ + LWIP_ASSERT("pcb != NULL", pcb != NULL); + LWIP_ASSERT("id < LWIP_TCP_PCB_NUM_EXT_ARGS", id < LWIP_TCP_PCB_NUM_EXT_ARGS); + LWIP_ASSERT("callbacks != NULL", callbacks != NULL); + + LWIP_ASSERT_CORE_LOCKED(); + + pcb->ext_args[id].callbacks = callbacks; +} + +/** + * @ingroup tcp_raw_extargs + * Set data for a given index of ext_args on the specified pcb. + * + * @param pcb tcp_pcb for which to set the data + * @param id ext_args index to set (allocated via @ref tcp_ext_arg_alloc_id) + * @param arg data pointer to set + */ +void tcp_ext_arg_set(struct tcp_pcb *pcb, uint8_t id, void *arg) +{ + LWIP_ASSERT("pcb != NULL", pcb != NULL); + LWIP_ASSERT("id < LWIP_TCP_PCB_NUM_EXT_ARGS", id < LWIP_TCP_PCB_NUM_EXT_ARGS); + + LWIP_ASSERT_CORE_LOCKED(); + + pcb->ext_args[id].data = arg; +} + +/** + * @ingroup tcp_raw_extargs + * Set data for a given index of ext_args on the specified pcb. + * + * @param pcb tcp_pcb for which to set the data + * @param id ext_args index to set (allocated via @ref tcp_ext_arg_alloc_id) + * @return data pointer at the given index + */ +void *tcp_ext_arg_get(const struct tcp_pcb *pcb, uint8_t id) +{ + LWIP_ASSERT("pcb != NULL", pcb != NULL); + LWIP_ASSERT("id < LWIP_TCP_PCB_NUM_EXT_ARGS", id < LWIP_TCP_PCB_NUM_EXT_ARGS); + + LWIP_ASSERT_CORE_LOCKED(); + + return pcb->ext_args[id].data; +} + +/** This function calls the "destroy" callback for all ext_args once a pcb is + * freed. + */ +static void +tcp_ext_arg_invoke_callbacks_destroyed(struct tcp_pcb_ext_args *ext_args) +{ + int i; + LWIP_ASSERT("ext_args != NULL", ext_args != NULL); + + for (i = 0; i < LWIP_TCP_PCB_NUM_EXT_ARGS; i++) { + if (ext_args[i].callbacks != NULL) { + if (ext_args[i].callbacks->destroy != NULL) { + ext_args[i].callbacks->destroy((u8_t)i, ext_args[i].data); + } + } + } +} + +/** This function calls the "passive_open" callback for all ext_args if a connection + * is in the process of being accepted. This is called just after the SYN is + * received and before a SYN/ACK is sent, to allow to modify the very first + * segment sent even on passive open. Naturally, the "accepted" callback of the + * pcb has not been called yet! + */ +err_t +tcp_ext_arg_invoke_callbacks_passive_open(struct tcp_pcb_listen *lpcb, struct tcp_pcb *cpcb) +{ + int i; + LWIP_ASSERT("lpcb != NULL", lpcb != NULL); + LWIP_ASSERT("cpcb != NULL", cpcb != NULL); + + for (i = 0; i < LWIP_TCP_PCB_NUM_EXT_ARGS; i++) { + if (lpcb->ext_args[i].callbacks != NULL) { + if (lpcb->ext_args[i].callbacks->passive_open != NULL) { + err_t err = lpcb->ext_args[i].callbacks->passive_open((u8_t)i, lpcb, cpcb); + if (err != ERR_OK) { + return err; + } + } + } + } + return ERR_OK; +} +#endif /* LWIP_TCP_PCB_NUM_EXT_ARGS */ + +#endif /* LWIP_TCP */ diff --git a/Middlewares/Third_Party/LwIP/src/core/tcp_in.c b/Middlewares/Third_Party/LwIP/src/core/tcp_in.c new file mode 100644 index 0000000..428a6f4 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/tcp_in.c @@ -0,0 +1,2178 @@ +/** + * @file + * Transmission Control Protocol, incoming traffic + * + * The input processing functions of the TCP layer. + * + * These functions are generally called in the order (ip_input() ->) + * tcp_input() -> * tcp_process() -> tcp_receive() (-> application). + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_TCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/priv/tcp_priv.h" +#include "lwip/def.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/inet_chksum.h" +#include "lwip/stats.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#if LWIP_ND6_TCP_REACHABILITY_HINTS +#include "lwip/nd6.h" +#endif /* LWIP_ND6_TCP_REACHABILITY_HINTS */ + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +/** Initial CWND calculation as defined RFC 2581 */ +#define LWIP_TCP_CALC_INITIAL_CWND(mss) ((tcpwnd_size_t)LWIP_MIN((4U * (mss)), LWIP_MAX((2U * (mss)), 4380U))) + +/* These variables are global to all functions involved in the input + processing of TCP segments. They are set by the tcp_input() + function. */ +static struct tcp_seg inseg; +static struct tcp_hdr *tcphdr; +static u16_t tcphdr_optlen; +static u16_t tcphdr_opt1len; +static u8_t *tcphdr_opt2; +static u16_t tcp_optidx; +static u32_t seqno, ackno; +static tcpwnd_size_t recv_acked; +static u16_t tcplen; +static u8_t flags; + +static u8_t recv_flags; +static struct pbuf *recv_data; + +struct tcp_pcb *tcp_input_pcb; + +/* Forward declarations. */ +static err_t tcp_process(struct tcp_pcb *pcb); +static void tcp_receive(struct tcp_pcb *pcb); +static void tcp_parseopt(struct tcp_pcb *pcb); + +static void tcp_listen_input(struct tcp_pcb_listen *pcb); +static void tcp_timewait_input(struct tcp_pcb *pcb); + +static int tcp_input_delayed_close(struct tcp_pcb *pcb); + +#if LWIP_TCP_SACK_OUT +static void tcp_add_sack(struct tcp_pcb *pcb, u32_t left, u32_t right); +static void tcp_remove_sacks_lt(struct tcp_pcb *pcb, u32_t seq); +#if defined(TCP_OOSEQ_BYTES_LIMIT) || defined(TCP_OOSEQ_PBUFS_LIMIT) +static void tcp_remove_sacks_gt(struct tcp_pcb *pcb, u32_t seq); +#endif /* TCP_OOSEQ_BYTES_LIMIT || TCP_OOSEQ_PBUFS_LIMIT */ +#endif /* LWIP_TCP_SACK_OUT */ + +/** + * The initial input processing of TCP. It verifies the TCP header, demultiplexes + * the segment between the PCBs and passes it on to tcp_process(), which implements + * the TCP finite state machine. This function is called by the IP layer (in + * ip_input()). + * + * @param p received TCP segment to process (p->payload pointing to the TCP header) + * @param inp network interface on which this segment was received + */ +void +tcp_input(struct pbuf *p, struct netif *inp) +{ + struct tcp_pcb *pcb, *prev; + struct tcp_pcb_listen *lpcb; +#if SO_REUSE + struct tcp_pcb *lpcb_prev = NULL; + struct tcp_pcb_listen *lpcb_any = NULL; +#endif /* SO_REUSE */ + u8_t hdrlen_bytes; + err_t err; + + LWIP_UNUSED_ARG(inp); + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("tcp_input: invalid pbuf", p != NULL); + + PERF_START; + + TCP_STATS_INC(tcp.recv); + MIB2_STATS_INC(mib2.tcpinsegs); + + tcphdr = (struct tcp_hdr *)p->payload; + +#if TCP_INPUT_DEBUG + tcp_debug_print(tcphdr); +#endif + + /* Check that TCP header fits in payload */ + if (p->len < TCP_HLEN) { + /* drop short packets */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: short packet (%"U16_F" bytes) discarded\n", p->tot_len)); + TCP_STATS_INC(tcp.lenerr); + goto dropped; + } + + /* Don't even process incoming broadcasts/multicasts. */ + if (ip_addr_isbroadcast(ip_current_dest_addr(), ip_current_netif()) || + ip_addr_ismulticast(ip_current_dest_addr())) { + TCP_STATS_INC(tcp.proterr); + goto dropped; + } + +#if CHECKSUM_CHECK_TCP + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_CHECK_TCP) { + /* Verify TCP checksum. */ + u16_t chksum = ip_chksum_pseudo(p, IP_PROTO_TCP, p->tot_len, + ip_current_src_addr(), ip_current_dest_addr()); + if (chksum != 0) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: packet discarded due to failing checksum 0x%04"X16_F"\n", + chksum)); + tcp_debug_print(tcphdr); + TCP_STATS_INC(tcp.chkerr); + goto dropped; + } + } +#endif /* CHECKSUM_CHECK_TCP */ + + /* sanity-check header length */ + hdrlen_bytes = TCPH_HDRLEN_BYTES(tcphdr); + if ((hdrlen_bytes < TCP_HLEN) || (hdrlen_bytes > p->tot_len)) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: invalid header length (%"U16_F")\n", (u16_t)hdrlen_bytes)); + TCP_STATS_INC(tcp.lenerr); + goto dropped; + } + + /* Move the payload pointer in the pbuf so that it points to the + TCP data instead of the TCP header. */ + tcphdr_optlen = (u16_t)(hdrlen_bytes - TCP_HLEN); + tcphdr_opt2 = NULL; + if (p->len >= hdrlen_bytes) { + /* all options are in the first pbuf */ + tcphdr_opt1len = tcphdr_optlen; + pbuf_remove_header(p, hdrlen_bytes); /* cannot fail */ + } else { + u16_t opt2len; + /* TCP header fits into first pbuf, options don't - data is in the next pbuf */ + /* there must be a next pbuf, due to hdrlen_bytes sanity check above */ + LWIP_ASSERT("p->next != NULL", p->next != NULL); + + /* advance over the TCP header (cannot fail) */ + pbuf_remove_header(p, TCP_HLEN); + + /* determine how long the first and second parts of the options are */ + tcphdr_opt1len = p->len; + opt2len = (u16_t)(tcphdr_optlen - tcphdr_opt1len); + + /* options continue in the next pbuf: set p to zero length and hide the + options in the next pbuf (adjusting p->tot_len) */ + pbuf_remove_header(p, tcphdr_opt1len); + + /* check that the options fit in the second pbuf */ + if (opt2len > p->next->len) { + /* drop short packets */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: options overflow second pbuf (%"U16_F" bytes)\n", p->next->len)); + TCP_STATS_INC(tcp.lenerr); + goto dropped; + } + + /* remember the pointer to the second part of the options */ + tcphdr_opt2 = (u8_t *)p->next->payload; + + /* advance p->next to point after the options, and manually + adjust p->tot_len to keep it consistent with the changed p->next */ + pbuf_remove_header(p->next, opt2len); + p->tot_len = (u16_t)(p->tot_len - opt2len); + + LWIP_ASSERT("p->len == 0", p->len == 0); + LWIP_ASSERT("p->tot_len == p->next->tot_len", p->tot_len == p->next->tot_len); + } + + /* Convert fields in TCP header to host byte order. */ + tcphdr->src = lwip_ntohs(tcphdr->src); + tcphdr->dest = lwip_ntohs(tcphdr->dest); + seqno = tcphdr->seqno = lwip_ntohl(tcphdr->seqno); + ackno = tcphdr->ackno = lwip_ntohl(tcphdr->ackno); + tcphdr->wnd = lwip_ntohs(tcphdr->wnd); + + flags = TCPH_FLAGS(tcphdr); + tcplen = p->tot_len; + if (flags & (TCP_FIN | TCP_SYN)) { + tcplen++; + if (tcplen < p->tot_len) { + /* u16_t overflow, cannot handle this */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: length u16_t overflow, cannot handle this\n")); + TCP_STATS_INC(tcp.lenerr); + goto dropped; + } + } + + /* Demultiplex an incoming segment. First, we check if it is destined + for an active connection. */ + prev = NULL; + + for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) { + LWIP_ASSERT("tcp_input: active pcb->state != CLOSED", pcb->state != CLOSED); + LWIP_ASSERT("tcp_input: active pcb->state != TIME-WAIT", pcb->state != TIME_WAIT); + LWIP_ASSERT("tcp_input: active pcb->state != LISTEN", pcb->state != LISTEN); + + /* check if PCB is bound to specific netif */ + if ((pcb->netif_idx != NETIF_NO_INDEX) && + (pcb->netif_idx != netif_get_index(ip_data.current_input_netif))) { + prev = pcb; + continue; + } + + if (pcb->remote_port == tcphdr->src && + pcb->local_port == tcphdr->dest && + ip_addr_cmp(&pcb->remote_ip, ip_current_src_addr()) && + ip_addr_cmp(&pcb->local_ip, ip_current_dest_addr())) { + /* Move this PCB to the front of the list so that subsequent + lookups will be faster (we exploit locality in TCP segment + arrivals). */ + LWIP_ASSERT("tcp_input: pcb->next != pcb (before cache)", pcb->next != pcb); + if (prev != NULL) { + prev->next = pcb->next; + pcb->next = tcp_active_pcbs; + tcp_active_pcbs = pcb; + } else { + TCP_STATS_INC(tcp.cachehit); + } + LWIP_ASSERT("tcp_input: pcb->next != pcb (after cache)", pcb->next != pcb); + break; + } + prev = pcb; + } + + if (pcb == NULL) { + /* If it did not go to an active connection, we check the connections + in the TIME-WAIT state. */ + for (pcb = tcp_tw_pcbs; pcb != NULL; pcb = pcb->next) { + LWIP_ASSERT("tcp_input: TIME-WAIT pcb->state == TIME-WAIT", pcb->state == TIME_WAIT); + + /* check if PCB is bound to specific netif */ + if ((pcb->netif_idx != NETIF_NO_INDEX) && + (pcb->netif_idx != netif_get_index(ip_data.current_input_netif))) { + continue; + } + + if (pcb->remote_port == tcphdr->src && + pcb->local_port == tcphdr->dest && + ip_addr_cmp(&pcb->remote_ip, ip_current_src_addr()) && + ip_addr_cmp(&pcb->local_ip, ip_current_dest_addr())) { + /* We don't really care enough to move this PCB to the front + of the list since we are not very likely to receive that + many segments for connections in TIME-WAIT. */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: packed for TIME_WAITing connection.\n")); +#ifdef LWIP_HOOK_TCP_INPACKET_PCB + if (LWIP_HOOK_TCP_INPACKET_PCB(pcb, tcphdr, tcphdr_optlen, tcphdr_opt1len, + tcphdr_opt2, p) == ERR_OK) +#endif + { + tcp_timewait_input(pcb); + } + pbuf_free(p); + return; + } + } + + /* Finally, if we still did not get a match, we check all PCBs that + are LISTENing for incoming connections. */ + prev = NULL; + for (lpcb = tcp_listen_pcbs.listen_pcbs; lpcb != NULL; lpcb = lpcb->next) { + /* check if PCB is bound to specific netif */ + if ((lpcb->netif_idx != NETIF_NO_INDEX) && + (lpcb->netif_idx != netif_get_index(ip_data.current_input_netif))) { + prev = (struct tcp_pcb *)lpcb; + continue; + } + + if (lpcb->local_port == tcphdr->dest) { + if (IP_IS_ANY_TYPE_VAL(lpcb->local_ip)) { + /* found an ANY TYPE (IPv4/IPv6) match */ +#if SO_REUSE + lpcb_any = lpcb; + lpcb_prev = prev; +#else /* SO_REUSE */ + break; +#endif /* SO_REUSE */ + } else if (IP_ADDR_PCB_VERSION_MATCH_EXACT(lpcb, ip_current_dest_addr())) { + if (ip_addr_cmp(&lpcb->local_ip, ip_current_dest_addr())) { + /* found an exact match */ + break; + } else if (ip_addr_isany(&lpcb->local_ip)) { + /* found an ANY-match */ +#if SO_REUSE + lpcb_any = lpcb; + lpcb_prev = prev; +#else /* SO_REUSE */ + break; +#endif /* SO_REUSE */ + } + } + } + prev = (struct tcp_pcb *)lpcb; + } +#if SO_REUSE + /* first try specific local IP */ + if (lpcb == NULL) { + /* only pass to ANY if no specific local IP has been found */ + lpcb = lpcb_any; + prev = lpcb_prev; + } +#endif /* SO_REUSE */ + if (lpcb != NULL) { + /* Move this PCB to the front of the list so that subsequent + lookups will be faster (we exploit locality in TCP segment + arrivals). */ + if (prev != NULL) { + ((struct tcp_pcb_listen *)prev)->next = lpcb->next; + /* our successor is the remainder of the listening list */ + lpcb->next = tcp_listen_pcbs.listen_pcbs; + /* put this listening pcb at the head of the listening list */ + tcp_listen_pcbs.listen_pcbs = lpcb; + } else { + TCP_STATS_INC(tcp.cachehit); + } + + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: packed for LISTENing connection.\n")); +#ifdef LWIP_HOOK_TCP_INPACKET_PCB + if (LWIP_HOOK_TCP_INPACKET_PCB((struct tcp_pcb *)lpcb, tcphdr, tcphdr_optlen, + tcphdr_opt1len, tcphdr_opt2, p) == ERR_OK) +#endif + { + tcp_listen_input(lpcb); + } + pbuf_free(p); + return; + } + } + +#if TCP_INPUT_DEBUG + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("+-+-+-+-+-+-+-+-+-+-+-+-+-+- tcp_input: flags ")); + tcp_debug_print_flags(TCPH_FLAGS(tcphdr)); + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n")); +#endif /* TCP_INPUT_DEBUG */ + + +#ifdef LWIP_HOOK_TCP_INPACKET_PCB + if ((pcb != NULL) && LWIP_HOOK_TCP_INPACKET_PCB(pcb, tcphdr, tcphdr_optlen, + tcphdr_opt1len, tcphdr_opt2, p) != ERR_OK) { + pbuf_free(p); + return; + } +#endif + if (pcb != NULL) { + /* The incoming segment belongs to a connection. */ +#if TCP_INPUT_DEBUG + tcp_debug_print_state(pcb->state); +#endif /* TCP_INPUT_DEBUG */ + + /* Set up a tcp_seg structure. */ + inseg.next = NULL; + inseg.len = p->tot_len; + inseg.p = p; + inseg.tcphdr = tcphdr; + + recv_data = NULL; + recv_flags = 0; + recv_acked = 0; + + if (flags & TCP_PSH) { + p->flags |= PBUF_FLAG_PUSH; + } + + /* If there is data which was previously "refused" by upper layer */ + if (pcb->refused_data != NULL) { + if ((tcp_process_refused_data(pcb) == ERR_ABRT) || + ((pcb->refused_data != NULL) && (tcplen > 0))) { + /* pcb has been aborted or refused data is still refused and the new + segment contains data */ + if (pcb->rcv_ann_wnd == 0) { + /* this is a zero-window probe, we respond to it with current RCV.NXT + and drop the data segment */ + tcp_send_empty_ack(pcb); + } + TCP_STATS_INC(tcp.drop); + MIB2_STATS_INC(mib2.tcpinerrs); + goto aborted; + } + } + tcp_input_pcb = pcb; + err = tcp_process(pcb); + /* A return value of ERR_ABRT means that tcp_abort() was called + and that the pcb has been freed. If so, we don't do anything. */ + if (err != ERR_ABRT) { + if (recv_flags & TF_RESET) { + /* TF_RESET means that the connection was reset by the other + end. We then call the error callback to inform the + application that the connection is dead before we + deallocate the PCB. */ + TCP_EVENT_ERR(pcb->state, pcb->errf, pcb->callback_arg, ERR_RST); + tcp_pcb_remove(&tcp_active_pcbs, pcb); + tcp_free(pcb); + } else { + err = ERR_OK; + /* If the application has registered a "sent" function to be + called when new send buffer space is available, we call it + now. */ + if (recv_acked > 0) { + u16_t acked16; +#if LWIP_WND_SCALE + /* recv_acked is u32_t but the sent callback only takes a u16_t, + so we might have to call it multiple times. */ + u32_t acked = recv_acked; + while (acked > 0) { + acked16 = (u16_t)LWIP_MIN(acked, 0xffffu); + acked -= acked16; +#else + { + acked16 = recv_acked; +#endif + TCP_EVENT_SENT(pcb, (u16_t)acked16, err); + if (err == ERR_ABRT) { + goto aborted; + } + } + recv_acked = 0; + } + if (tcp_input_delayed_close(pcb)) { + goto aborted; + } +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + while (recv_data != NULL) { + struct pbuf *rest = NULL; + pbuf_split_64k(recv_data, &rest); +#else /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + if (recv_data != NULL) { +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + + LWIP_ASSERT("pcb->refused_data == NULL", pcb->refused_data == NULL); + if (pcb->flags & TF_RXCLOSED) { + /* received data although already closed -> abort (send RST) to + notify the remote host that not all data has been processed */ + pbuf_free(recv_data); +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + if (rest != NULL) { + pbuf_free(rest); + } +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + tcp_abort(pcb); + goto aborted; + } + + /* Notify application that data has been received. */ + TCP_EVENT_RECV(pcb, recv_data, ERR_OK, err); + if (err == ERR_ABRT) { +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + if (rest != NULL) { + pbuf_free(rest); + } +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + goto aborted; + } + + /* If the upper layer can't receive this data, store it */ + if (err != ERR_OK) { +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + if (rest != NULL) { + pbuf_cat(recv_data, rest); + } +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + pcb->refused_data = recv_data; + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: keep incoming packet, because pcb is \"full\"\n")); +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + break; + } else { + /* Upper layer received the data, go on with the rest if > 64K */ + recv_data = rest; +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + } + } + + /* If a FIN segment was received, we call the callback + function with a NULL buffer to indicate EOF. */ + if (recv_flags & TF_GOT_FIN) { + if (pcb->refused_data != NULL) { + /* Delay this if we have refused data. */ + pcb->refused_data->flags |= PBUF_FLAG_TCP_FIN; + } else { + /* correct rcv_wnd as the application won't call tcp_recved() + for the FIN's seqno */ + if (pcb->rcv_wnd != TCP_WND_MAX(pcb)) { + pcb->rcv_wnd++; + } + TCP_EVENT_CLOSED(pcb, err); + if (err == ERR_ABRT) { + goto aborted; + } + } + } + + tcp_input_pcb = NULL; + if (tcp_input_delayed_close(pcb)) { + goto aborted; + } + /* Try to send something out. */ + tcp_output(pcb); +#if TCP_INPUT_DEBUG +#if TCP_DEBUG + tcp_debug_print_state(pcb->state); +#endif /* TCP_DEBUG */ +#endif /* TCP_INPUT_DEBUG */ + } + } + /* Jump target if pcb has been aborted in a callback (by calling tcp_abort()). + Below this line, 'pcb' may not be dereferenced! */ +aborted: + tcp_input_pcb = NULL; + recv_data = NULL; + + /* give up our reference to inseg.p */ + if (inseg.p != NULL) { + pbuf_free(inseg.p); + inseg.p = NULL; + } + } else { + /* If no matching PCB was found, send a TCP RST (reset) to the + sender. */ + LWIP_DEBUGF(TCP_RST_DEBUG, ("tcp_input: no PCB match found, resetting.\n")); + if (!(TCPH_FLAGS(tcphdr) & TCP_RST)) { + TCP_STATS_INC(tcp.proterr); + TCP_STATS_INC(tcp.drop); + tcp_rst(NULL, ackno, seqno + tcplen, ip_current_dest_addr(), + ip_current_src_addr(), tcphdr->dest, tcphdr->src); + } + pbuf_free(p); + } + + LWIP_ASSERT("tcp_input: tcp_pcbs_sane()", tcp_pcbs_sane()); + PERF_STOP("tcp_input"); + return; +dropped: + TCP_STATS_INC(tcp.drop); + MIB2_STATS_INC(mib2.tcpinerrs); + pbuf_free(p); +} + +/** Called from tcp_input to check for TF_CLOSED flag. This results in closing + * and deallocating a pcb at the correct place to ensure noone references it + * any more. + * @returns 1 if the pcb has been closed and deallocated, 0 otherwise + */ +static int +tcp_input_delayed_close(struct tcp_pcb *pcb) +{ + LWIP_ASSERT("tcp_input_delayed_close: invalid pcb", pcb != NULL); + + if (recv_flags & TF_CLOSED) { + /* The connection has been closed and we will deallocate the + PCB. */ + if (!(pcb->flags & TF_RXCLOSED)) { + /* Connection closed although the application has only shut down the + tx side: call the PCB's err callback and indicate the closure to + ensure the application doesn't continue using the PCB. */ + TCP_EVENT_ERR(pcb->state, pcb->errf, pcb->callback_arg, ERR_CLSD); + } + tcp_pcb_remove(&tcp_active_pcbs, pcb); + tcp_free(pcb); + return 1; + } + return 0; +} + +/** + * Called by tcp_input() when a segment arrives for a listening + * connection (from tcp_input()). + * + * @param pcb the tcp_pcb_listen for which a segment arrived + * + * @note the segment which arrived is saved in global variables, therefore only the pcb + * involved is passed as a parameter to this function + */ +static void +tcp_listen_input(struct tcp_pcb_listen *pcb) +{ + struct tcp_pcb *npcb; + u32_t iss; + err_t rc; + + if (flags & TCP_RST) { + /* An incoming RST should be ignored. Return. */ + return; + } + + LWIP_ASSERT("tcp_listen_input: invalid pcb", pcb != NULL); + + /* In the LISTEN state, we check for incoming SYN segments, + creates a new PCB, and responds with a SYN|ACK. */ + if (flags & TCP_ACK) { + /* For incoming segments with the ACK flag set, respond with a + RST. */ + LWIP_DEBUGF(TCP_RST_DEBUG, ("tcp_listen_input: ACK in LISTEN, sending reset\n")); + tcp_rst((const struct tcp_pcb *)pcb, ackno, seqno + tcplen, ip_current_dest_addr(), + ip_current_src_addr(), tcphdr->dest, tcphdr->src); + } else if (flags & TCP_SYN) { + LWIP_DEBUGF(TCP_DEBUG, ("TCP connection request %"U16_F" -> %"U16_F".\n", tcphdr->src, tcphdr->dest)); +#if TCP_LISTEN_BACKLOG + if (pcb->accepts_pending >= pcb->backlog) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_listen_input: listen backlog exceeded for port %"U16_F"\n", tcphdr->dest)); + return; + } +#endif /* TCP_LISTEN_BACKLOG */ + npcb = tcp_alloc(pcb->prio); + /* If a new PCB could not be created (probably due to lack of memory), + we don't do anything, but rely on the sender will retransmit the + SYN at a time when we have more memory available. */ + if (npcb == NULL) { + err_t err; + LWIP_DEBUGF(TCP_DEBUG, ("tcp_listen_input: could not allocate PCB\n")); + TCP_STATS_INC(tcp.memerr); + TCP_EVENT_ACCEPT(pcb, NULL, pcb->callback_arg, ERR_MEM, err); + LWIP_UNUSED_ARG(err); /* err not useful here */ + return; + } +#if TCP_LISTEN_BACKLOG + pcb->accepts_pending++; + tcp_set_flags(npcb, TF_BACKLOGPEND); +#endif /* TCP_LISTEN_BACKLOG */ + /* Set up the new PCB. */ + ip_addr_copy(npcb->local_ip, *ip_current_dest_addr()); + ip_addr_copy(npcb->remote_ip, *ip_current_src_addr()); + npcb->local_port = pcb->local_port; + npcb->remote_port = tcphdr->src; + npcb->state = SYN_RCVD; + npcb->rcv_nxt = seqno + 1; + npcb->rcv_ann_right_edge = npcb->rcv_nxt; + iss = tcp_next_iss(npcb); + npcb->snd_wl2 = iss; + npcb->snd_nxt = iss; + npcb->lastack = iss; + npcb->snd_lbb = iss; + npcb->snd_wl1 = seqno - 1;/* initialise to seqno-1 to force window update */ + npcb->callback_arg = pcb->callback_arg; +#if LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG + npcb->listener = pcb; +#endif /* LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG */ + /* inherit socket options */ + npcb->so_options = pcb->so_options & SOF_INHERITED; + npcb->netif_idx = pcb->netif_idx; + /* Register the new PCB so that we can begin receiving segments + for it. */ + TCP_REG_ACTIVE(npcb); + + /* Parse any options in the SYN. */ + tcp_parseopt(npcb); + npcb->snd_wnd = tcphdr->wnd; + npcb->snd_wnd_max = npcb->snd_wnd; + +#if TCP_CALCULATE_EFF_SEND_MSS + npcb->mss = tcp_eff_send_mss(npcb->mss, &npcb->local_ip, &npcb->remote_ip); +#endif /* TCP_CALCULATE_EFF_SEND_MSS */ + + MIB2_STATS_INC(mib2.tcppassiveopens); + +#if LWIP_TCP_PCB_NUM_EXT_ARGS + if (tcp_ext_arg_invoke_callbacks_passive_open(pcb, npcb) != ERR_OK) { + tcp_abandon(npcb, 0); + return; + } +#endif + + /* Send a SYN|ACK together with the MSS option. */ + rc = tcp_enqueue_flags(npcb, TCP_SYN | TCP_ACK); + if (rc != ERR_OK) { + tcp_abandon(npcb, 0); + return; + } + tcp_output(npcb); + } + return; +} + +/** + * Called by tcp_input() when a segment arrives for a connection in + * TIME_WAIT. + * + * @param pcb the tcp_pcb for which a segment arrived + * + * @note the segment which arrived is saved in global variables, therefore only the pcb + * involved is passed as a parameter to this function + */ +static void +tcp_timewait_input(struct tcp_pcb *pcb) +{ + /* RFC 1337: in TIME_WAIT, ignore RST and ACK FINs + any 'acceptable' segments */ + /* RFC 793 3.9 Event Processing - Segment Arrives: + * - first check sequence number - we skip that one in TIME_WAIT (always + * acceptable since we only send ACKs) + * - second check the RST bit (... return) */ + if (flags & TCP_RST) { + return; + } + + LWIP_ASSERT("tcp_timewait_input: invalid pcb", pcb != NULL); + + /* - fourth, check the SYN bit, */ + if (flags & TCP_SYN) { + /* If an incoming segment is not acceptable, an acknowledgment + should be sent in reply */ + if (TCP_SEQ_BETWEEN(seqno, pcb->rcv_nxt, pcb->rcv_nxt + pcb->rcv_wnd)) { + /* If the SYN is in the window it is an error, send a reset */ + tcp_rst(pcb, ackno, seqno + tcplen, ip_current_dest_addr(), + ip_current_src_addr(), tcphdr->dest, tcphdr->src); + return; + } + } else if (flags & TCP_FIN) { + /* - eighth, check the FIN bit: Remain in the TIME-WAIT state. + Restart the 2 MSL time-wait timeout.*/ + pcb->tmr = tcp_ticks; + } + + if ((tcplen > 0)) { + /* Acknowledge data, FIN or out-of-window SYN */ + tcp_ack_now(pcb); + tcp_output(pcb); + } + return; +} + +/** + * Implements the TCP state machine. Called by tcp_input. In some + * states tcp_receive() is called to receive data. The tcp_seg + * argument will be freed by the caller (tcp_input()) unless the + * recv_data pointer in the pcb is set. + * + * @param pcb the tcp_pcb for which a segment arrived + * + * @note the segment which arrived is saved in global variables, therefore only the pcb + * involved is passed as a parameter to this function + */ +static err_t +tcp_process(struct tcp_pcb *pcb) +{ + struct tcp_seg *rseg; + u8_t acceptable = 0; + err_t err; + + err = ERR_OK; + + LWIP_ASSERT("tcp_process: invalid pcb", pcb != NULL); + + /* Process incoming RST segments. */ + if (flags & TCP_RST) { + /* First, determine if the reset is acceptable. */ + if (pcb->state == SYN_SENT) { + /* "In the SYN-SENT state (a RST received in response to an initial SYN), + the RST is acceptable if the ACK field acknowledges the SYN." */ + if (ackno == pcb->snd_nxt) { + acceptable = 1; + } + } else { + /* "In all states except SYN-SENT, all reset (RST) segments are validated + by checking their SEQ-fields." */ + if (seqno == pcb->rcv_nxt) { + acceptable = 1; + } else if (TCP_SEQ_BETWEEN(seqno, pcb->rcv_nxt, + pcb->rcv_nxt + pcb->rcv_wnd)) { + /* If the sequence number is inside the window, we send a challenge ACK + and wait for a re-send with matching sequence number. + This follows RFC 5961 section 3.2 and addresses CVE-2004-0230 + (RST spoofing attack), which is present in RFC 793 RST handling. */ + tcp_ack_now(pcb); + } + } + + if (acceptable) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_process: Connection RESET\n")); + LWIP_ASSERT("tcp_input: pcb->state != CLOSED", pcb->state != CLOSED); + recv_flags |= TF_RESET; + tcp_clear_flags(pcb, TF_ACK_DELAY); + return ERR_RST; + } else { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_process: unacceptable reset seqno %"U32_F" rcv_nxt %"U32_F"\n", + seqno, pcb->rcv_nxt)); + LWIP_DEBUGF(TCP_DEBUG, ("tcp_process: unacceptable reset seqno %"U32_F" rcv_nxt %"U32_F"\n", + seqno, pcb->rcv_nxt)); + return ERR_OK; + } + } + + if ((flags & TCP_SYN) && (pcb->state != SYN_SENT && pcb->state != SYN_RCVD)) { + /* Cope with new connection attempt after remote end crashed */ + tcp_ack_now(pcb); + return ERR_OK; + } + + if ((pcb->flags & TF_RXCLOSED) == 0) { + /* Update the PCB (in)activity timer unless rx is closed (see tcp_shutdown) */ + pcb->tmr = tcp_ticks; + } + pcb->keep_cnt_sent = 0; + pcb->persist_probe = 0; + + tcp_parseopt(pcb); + + /* Do different things depending on the TCP state. */ + switch (pcb->state) { + case SYN_SENT: + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("SYN-SENT: ackno %"U32_F" pcb->snd_nxt %"U32_F" unacked %"U32_F"\n", ackno, + pcb->snd_nxt, lwip_ntohl(pcb->unacked->tcphdr->seqno))); + /* received SYN ACK with expected sequence number? */ + if ((flags & TCP_ACK) && (flags & TCP_SYN) + && (ackno == pcb->lastack + 1)) { + pcb->rcv_nxt = seqno + 1; + pcb->rcv_ann_right_edge = pcb->rcv_nxt; + pcb->lastack = ackno; + pcb->snd_wnd = tcphdr->wnd; + pcb->snd_wnd_max = pcb->snd_wnd; + pcb->snd_wl1 = seqno - 1; /* initialise to seqno - 1 to force window update */ + pcb->state = ESTABLISHED; + +#if TCP_CALCULATE_EFF_SEND_MSS + pcb->mss = tcp_eff_send_mss(pcb->mss, &pcb->local_ip, &pcb->remote_ip); +#endif /* TCP_CALCULATE_EFF_SEND_MSS */ + + pcb->cwnd = LWIP_TCP_CALC_INITIAL_CWND(pcb->mss); + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_process (SENT): cwnd %"TCPWNDSIZE_F + " ssthresh %"TCPWNDSIZE_F"\n", + pcb->cwnd, pcb->ssthresh)); + LWIP_ASSERT("pcb->snd_queuelen > 0", (pcb->snd_queuelen > 0)); + --pcb->snd_queuelen; + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_process: SYN-SENT --queuelen %"TCPWNDSIZE_F"\n", (tcpwnd_size_t)pcb->snd_queuelen)); + rseg = pcb->unacked; + if (rseg == NULL) { + /* might happen if tcp_output fails in tcp_rexmit_rto() + in which case the segment is on the unsent list */ + rseg = pcb->unsent; + LWIP_ASSERT("no segment to free", rseg != NULL); + pcb->unsent = rseg->next; + } else { + pcb->unacked = rseg->next; + } + tcp_seg_free(rseg); + + /* If there's nothing left to acknowledge, stop the retransmit + timer, otherwise reset it to start again */ + if (pcb->unacked == NULL) { + pcb->rtime = -1; + } else { + pcb->rtime = 0; + pcb->nrtx = 0; + } + + /* Call the user specified function to call when successfully + * connected. */ + TCP_EVENT_CONNECTED(pcb, ERR_OK, err); + if (err == ERR_ABRT) { + return ERR_ABRT; + } + tcp_ack_now(pcb); + } + /* received ACK? possibly a half-open connection */ + else if (flags & TCP_ACK) { + /* send a RST to bring the other side in a non-synchronized state. */ + tcp_rst(pcb, ackno, seqno + tcplen, ip_current_dest_addr(), + ip_current_src_addr(), tcphdr->dest, tcphdr->src); + /* Resend SYN immediately (don't wait for rto timeout) to establish + connection faster, but do not send more SYNs than we otherwise would + have, or we might get caught in a loop on loopback interfaces. */ + if (pcb->nrtx < TCP_SYNMAXRTX) { + pcb->rtime = 0; + tcp_rexmit_rto(pcb); + } + } + break; + case SYN_RCVD: + if (flags & TCP_ACK) { + /* expected ACK number? */ + if (TCP_SEQ_BETWEEN(ackno, pcb->lastack + 1, pcb->snd_nxt)) { + pcb->state = ESTABLISHED; + LWIP_DEBUGF(TCP_DEBUG, ("TCP connection established %"U16_F" -> %"U16_F".\n", inseg.tcphdr->src, inseg.tcphdr->dest)); +#if LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG + if (pcb->listener == NULL) { + /* listen pcb might be closed by now */ + err = ERR_VAL; + } else +#endif /* LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG */ + { +#if LWIP_CALLBACK_API + LWIP_ASSERT("pcb->listener->accept != NULL", pcb->listener->accept != NULL); +#endif + tcp_backlog_accepted(pcb); + /* Call the accept function. */ + TCP_EVENT_ACCEPT(pcb->listener, pcb, pcb->callback_arg, ERR_OK, err); + } + if (err != ERR_OK) { + /* If the accept function returns with an error, we abort + * the connection. */ + /* Already aborted? */ + if (err != ERR_ABRT) { + tcp_abort(pcb); + } + return ERR_ABRT; + } + /* If there was any data contained within this ACK, + * we'd better pass it on to the application as well. */ + tcp_receive(pcb); + + /* Prevent ACK for SYN to generate a sent event */ + if (recv_acked != 0) { + recv_acked--; + } + + pcb->cwnd = LWIP_TCP_CALC_INITIAL_CWND(pcb->mss); + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_process (SYN_RCVD): cwnd %"TCPWNDSIZE_F + " ssthresh %"TCPWNDSIZE_F"\n", + pcb->cwnd, pcb->ssthresh)); + + if (recv_flags & TF_GOT_FIN) { + tcp_ack_now(pcb); + pcb->state = CLOSE_WAIT; + } + } else { + /* incorrect ACK number, send RST */ + tcp_rst(pcb, ackno, seqno + tcplen, ip_current_dest_addr(), + ip_current_src_addr(), tcphdr->dest, tcphdr->src); + } + } else if ((flags & TCP_SYN) && (seqno == pcb->rcv_nxt - 1)) { + /* Looks like another copy of the SYN - retransmit our SYN-ACK */ + tcp_rexmit(pcb); + } + break; + case CLOSE_WAIT: + /* FALLTHROUGH */ + case ESTABLISHED: + tcp_receive(pcb); + if (recv_flags & TF_GOT_FIN) { /* passive close */ + tcp_ack_now(pcb); + pcb->state = CLOSE_WAIT; + } + break; + case FIN_WAIT_1: + tcp_receive(pcb); + if (recv_flags & TF_GOT_FIN) { + if ((flags & TCP_ACK) && (ackno == pcb->snd_nxt) && + pcb->unsent == NULL) { + LWIP_DEBUGF(TCP_DEBUG, + ("TCP connection closed: FIN_WAIT_1 %"U16_F" -> %"U16_F".\n", inseg.tcphdr->src, inseg.tcphdr->dest)); + tcp_ack_now(pcb); + tcp_pcb_purge(pcb); + TCP_RMV_ACTIVE(pcb); + pcb->state = TIME_WAIT; + TCP_REG(&tcp_tw_pcbs, pcb); + } else { + tcp_ack_now(pcb); + pcb->state = CLOSING; + } + } else if ((flags & TCP_ACK) && (ackno == pcb->snd_nxt) && + pcb->unsent == NULL) { + pcb->state = FIN_WAIT_2; + } + break; + case FIN_WAIT_2: + tcp_receive(pcb); + if (recv_flags & TF_GOT_FIN) { + LWIP_DEBUGF(TCP_DEBUG, ("TCP connection closed: FIN_WAIT_2 %"U16_F" -> %"U16_F".\n", inseg.tcphdr->src, inseg.tcphdr->dest)); + tcp_ack_now(pcb); + tcp_pcb_purge(pcb); + TCP_RMV_ACTIVE(pcb); + pcb->state = TIME_WAIT; + TCP_REG(&tcp_tw_pcbs, pcb); + } + break; + case CLOSING: + tcp_receive(pcb); + if ((flags & TCP_ACK) && ackno == pcb->snd_nxt && pcb->unsent == NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("TCP connection closed: CLOSING %"U16_F" -> %"U16_F".\n", inseg.tcphdr->src, inseg.tcphdr->dest)); + tcp_pcb_purge(pcb); + TCP_RMV_ACTIVE(pcb); + pcb->state = TIME_WAIT; + TCP_REG(&tcp_tw_pcbs, pcb); + } + break; + case LAST_ACK: + tcp_receive(pcb); + if ((flags & TCP_ACK) && ackno == pcb->snd_nxt && pcb->unsent == NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("TCP connection closed: LAST_ACK %"U16_F" -> %"U16_F".\n", inseg.tcphdr->src, inseg.tcphdr->dest)); + /* bugfix #21699: don't set pcb->state to CLOSED here or we risk leaking segments */ + recv_flags |= TF_CLOSED; + } + break; + default: + break; + } + return ERR_OK; +} + +#if TCP_QUEUE_OOSEQ +/** + * Insert segment into the list (segments covered with new one will be deleted) + * + * Called from tcp_receive() + */ +static void +tcp_oos_insert_segment(struct tcp_seg *cseg, struct tcp_seg *next) +{ + struct tcp_seg *old_seg; + + LWIP_ASSERT("tcp_oos_insert_segment: invalid cseg", cseg != NULL); + + if (TCPH_FLAGS(cseg->tcphdr) & TCP_FIN) { + /* received segment overlaps all following segments */ + tcp_segs_free(next); + next = NULL; + } else { + /* delete some following segments + oos queue may have segments with FIN flag */ + while (next && + TCP_SEQ_GEQ((seqno + cseg->len), + (next->tcphdr->seqno + next->len))) { + /* cseg with FIN already processed */ + if (TCPH_FLAGS(next->tcphdr) & TCP_FIN) { + TCPH_SET_FLAG(cseg->tcphdr, TCP_FIN); + } + old_seg = next; + next = next->next; + tcp_seg_free(old_seg); + } + if (next && + TCP_SEQ_GT(seqno + cseg->len, next->tcphdr->seqno)) { + /* We need to trim the incoming segment. */ + cseg->len = (u16_t)(next->tcphdr->seqno - seqno); + pbuf_realloc(cseg->p, cseg->len); + } + } + cseg->next = next; +} +#endif /* TCP_QUEUE_OOSEQ */ + +/** Remove segments from a list if the incoming ACK acknowledges them */ +static struct tcp_seg * +tcp_free_acked_segments(struct tcp_pcb *pcb, struct tcp_seg *seg_list, const char *dbg_list_name, + struct tcp_seg *dbg_other_seg_list) +{ + struct tcp_seg *next; + u16_t clen; + + LWIP_UNUSED_ARG(dbg_list_name); + LWIP_UNUSED_ARG(dbg_other_seg_list); + + while (seg_list != NULL && + TCP_SEQ_LEQ(lwip_ntohl(seg_list->tcphdr->seqno) + + TCP_TCPLEN(seg_list), ackno)) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: removing %"U32_F":%"U32_F" from pcb->%s\n", + lwip_ntohl(seg_list->tcphdr->seqno), + lwip_ntohl(seg_list->tcphdr->seqno) + TCP_TCPLEN(seg_list), + dbg_list_name)); + + next = seg_list; + seg_list = seg_list->next; + + clen = pbuf_clen(next->p); + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_receive: queuelen %"TCPWNDSIZE_F" ... ", + (tcpwnd_size_t)pcb->snd_queuelen)); + LWIP_ASSERT("pcb->snd_queuelen >= pbuf_clen(next->p)", (pcb->snd_queuelen >= clen)); + + pcb->snd_queuelen = (u16_t)(pcb->snd_queuelen - clen); + recv_acked = (tcpwnd_size_t)(recv_acked + next->len); + tcp_seg_free(next); + + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("%"TCPWNDSIZE_F" (after freeing %s)\n", + (tcpwnd_size_t)pcb->snd_queuelen, + dbg_list_name)); + if (pcb->snd_queuelen != 0) { + LWIP_ASSERT("tcp_receive: valid queue length", + seg_list != NULL || dbg_other_seg_list != NULL); + } + } + return seg_list; +} + +/** + * Called by tcp_process. Checks if the given segment is an ACK for outstanding + * data, and if so frees the memory of the buffered data. Next, it places the + * segment on any of the receive queues (pcb->recved or pcb->ooseq). If the segment + * is buffered, the pbuf is referenced by pbuf_ref so that it will not be freed until + * it has been removed from the buffer. + * + * If the incoming segment constitutes an ACK for a segment that was used for RTT + * estimation, the RTT is estimated here as well. + * + * Called from tcp_process(). + */ +static void +tcp_receive(struct tcp_pcb *pcb) +{ + s16_t m; + u32_t right_wnd_edge; + int found_dupack = 0; + + LWIP_ASSERT("tcp_receive: invalid pcb", pcb != NULL); + LWIP_ASSERT("tcp_receive: wrong state", pcb->state >= ESTABLISHED); + + if (flags & TCP_ACK) { + right_wnd_edge = pcb->snd_wnd + pcb->snd_wl2; + + /* Update window. */ + if (TCP_SEQ_LT(pcb->snd_wl1, seqno) || + (pcb->snd_wl1 == seqno && TCP_SEQ_LT(pcb->snd_wl2, ackno)) || + (pcb->snd_wl2 == ackno && (u32_t)SND_WND_SCALE(pcb, tcphdr->wnd) > pcb->snd_wnd)) { + pcb->snd_wnd = SND_WND_SCALE(pcb, tcphdr->wnd); + /* keep track of the biggest window announced by the remote host to calculate + the maximum segment size */ + if (pcb->snd_wnd_max < pcb->snd_wnd) { + pcb->snd_wnd_max = pcb->snd_wnd; + } + pcb->snd_wl1 = seqno; + pcb->snd_wl2 = ackno; + LWIP_DEBUGF(TCP_WND_DEBUG, ("tcp_receive: window update %"TCPWNDSIZE_F"\n", pcb->snd_wnd)); +#if TCP_WND_DEBUG + } else { + if (pcb->snd_wnd != (tcpwnd_size_t)SND_WND_SCALE(pcb, tcphdr->wnd)) { + LWIP_DEBUGF(TCP_WND_DEBUG, + ("tcp_receive: no window update lastack %"U32_F" ackno %" + U32_F" wl1 %"U32_F" seqno %"U32_F" wl2 %"U32_F"\n", + pcb->lastack, ackno, pcb->snd_wl1, seqno, pcb->snd_wl2)); + } +#endif /* TCP_WND_DEBUG */ + } + + /* (From Stevens TCP/IP Illustrated Vol II, p970.) Its only a + * duplicate ack if: + * 1) It doesn't ACK new data + * 2) length of received packet is zero (i.e. no payload) + * 3) the advertised window hasn't changed + * 4) There is outstanding unacknowledged data (retransmission timer running) + * 5) The ACK is == biggest ACK sequence number so far seen (snd_una) + * + * If it passes all five, should process as a dupack: + * a) dupacks < 3: do nothing + * b) dupacks == 3: fast retransmit + * c) dupacks > 3: increase cwnd + * + * If it only passes 1-3, should reset dupack counter (and add to + * stats, which we don't do in lwIP) + * + * If it only passes 1, should reset dupack counter + * + */ + + /* Clause 1 */ + if (TCP_SEQ_LEQ(ackno, pcb->lastack)) { + /* Clause 2 */ + if (tcplen == 0) { + /* Clause 3 */ + if (pcb->snd_wl2 + pcb->snd_wnd == right_wnd_edge) { + /* Clause 4 */ + if (pcb->rtime >= 0) { + /* Clause 5 */ + if (pcb->lastack == ackno) { + found_dupack = 1; + if ((u8_t)(pcb->dupacks + 1) > pcb->dupacks) { + ++pcb->dupacks; + } + if (pcb->dupacks > 3) { + /* Inflate the congestion window */ + TCP_WND_INC(pcb->cwnd, pcb->mss); + } + if (pcb->dupacks >= 3) { + /* Do fast retransmit (checked via TF_INFR, not via dupacks count) */ + tcp_rexmit_fast(pcb); + } + } + } + } + } + /* If Clause (1) or more is true, but not a duplicate ack, reset + * count of consecutive duplicate acks */ + if (!found_dupack) { + pcb->dupacks = 0; + } + } else if (TCP_SEQ_BETWEEN(ackno, pcb->lastack + 1, pcb->snd_nxt)) { + /* We come here when the ACK acknowledges new data. */ + tcpwnd_size_t acked; + + /* Reset the "IN Fast Retransmit" flag, since we are no longer + in fast retransmit. Also reset the congestion window to the + slow start threshold. */ + if (pcb->flags & TF_INFR) { + tcp_clear_flags(pcb, TF_INFR); + pcb->cwnd = pcb->ssthresh; + pcb->bytes_acked = 0; + } + + /* Reset the number of retransmissions. */ + pcb->nrtx = 0; + + /* Reset the retransmission time-out. */ + pcb->rto = (s16_t)((pcb->sa >> 3) + pcb->sv); + + /* Record how much data this ACK acks */ + acked = (tcpwnd_size_t)(ackno - pcb->lastack); + + /* Reset the fast retransmit variables. */ + pcb->dupacks = 0; + pcb->lastack = ackno; + + /* Update the congestion control variables (cwnd and + ssthresh). */ + if (pcb->state >= ESTABLISHED) { + if (pcb->cwnd < pcb->ssthresh) { + tcpwnd_size_t increase; + /* limit to 1 SMSS segment during period following RTO */ + u8_t num_seg = (pcb->flags & TF_RTO) ? 1 : 2; + /* RFC 3465, section 2.2 Slow Start */ + increase = LWIP_MIN(acked, (tcpwnd_size_t)(num_seg * pcb->mss)); + TCP_WND_INC(pcb->cwnd, increase); + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_receive: slow start cwnd %"TCPWNDSIZE_F"\n", pcb->cwnd)); + } else { + /* RFC 3465, section 2.1 Congestion Avoidance */ + TCP_WND_INC(pcb->bytes_acked, acked); + if (pcb->bytes_acked >= pcb->cwnd) { + pcb->bytes_acked = (tcpwnd_size_t)(pcb->bytes_acked - pcb->cwnd); + TCP_WND_INC(pcb->cwnd, pcb->mss); + } + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_receive: congestion avoidance cwnd %"TCPWNDSIZE_F"\n", pcb->cwnd)); + } + } + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: ACK for %"U32_F", unacked->seqno %"U32_F":%"U32_F"\n", + ackno, + pcb->unacked != NULL ? + lwip_ntohl(pcb->unacked->tcphdr->seqno) : 0, + pcb->unacked != NULL ? + lwip_ntohl(pcb->unacked->tcphdr->seqno) + TCP_TCPLEN(pcb->unacked) : 0)); + + /* Remove segment from the unacknowledged list if the incoming + ACK acknowledges them. */ + pcb->unacked = tcp_free_acked_segments(pcb, pcb->unacked, "unacked", pcb->unsent); + /* We go through the ->unsent list to see if any of the segments + on the list are acknowledged by the ACK. This may seem + strange since an "unsent" segment shouldn't be acked. The + rationale is that lwIP puts all outstanding segments on the + ->unsent list after a retransmission, so these segments may + in fact have been sent once. */ + pcb->unsent = tcp_free_acked_segments(pcb, pcb->unsent, "unsent", pcb->unacked); + + /* If there's nothing left to acknowledge, stop the retransmit + timer, otherwise reset it to start again */ + if (pcb->unacked == NULL) { + pcb->rtime = -1; + } else { + pcb->rtime = 0; + } + + pcb->polltmr = 0; + +#if TCP_OVERSIZE + if (pcb->unsent == NULL) { + pcb->unsent_oversize = 0; + } +#endif /* TCP_OVERSIZE */ + +#if LWIP_IPV6 && LWIP_ND6_TCP_REACHABILITY_HINTS + if (ip_current_is_v6()) { + /* Inform neighbor reachability of forward progress. */ + nd6_reachability_hint(ip6_current_src_addr()); + } +#endif /* LWIP_IPV6 && LWIP_ND6_TCP_REACHABILITY_HINTS*/ + + pcb->snd_buf = (tcpwnd_size_t)(pcb->snd_buf + recv_acked); + /* check if this ACK ends our retransmission of in-flight data */ + if (pcb->flags & TF_RTO) { + /* RTO is done if + 1) both queues are empty or + 2) unacked is empty and unsent head contains data not part of RTO or + 3) unacked head contains data not part of RTO */ + if (pcb->unacked == NULL) { + if ((pcb->unsent == NULL) || + (TCP_SEQ_LEQ(pcb->rto_end, lwip_ntohl(pcb->unsent->tcphdr->seqno)))) { + tcp_clear_flags(pcb, TF_RTO); + } + } else if (TCP_SEQ_LEQ(pcb->rto_end, lwip_ntohl(pcb->unacked->tcphdr->seqno))) { + tcp_clear_flags(pcb, TF_RTO); + } + } + /* End of ACK for new data processing. */ + } else { + /* Out of sequence ACK, didn't really ack anything */ + tcp_send_empty_ack(pcb); + } + + LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_receive: pcb->rttest %"U32_F" rtseq %"U32_F" ackno %"U32_F"\n", + pcb->rttest, pcb->rtseq, ackno)); + + /* RTT estimation calculations. This is done by checking if the + incoming segment acknowledges the segment we use to take a + round-trip time measurement. */ + if (pcb->rttest && TCP_SEQ_LT(pcb->rtseq, ackno)) { + /* diff between this shouldn't exceed 32K since this are tcp timer ticks + and a round-trip shouldn't be that long... */ + m = (s16_t)(tcp_ticks - pcb->rttest); + + LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_receive: experienced rtt %"U16_F" ticks (%"U16_F" msec).\n", + m, (u16_t)(m * TCP_SLOW_INTERVAL))); + + /* This is taken directly from VJs original code in his paper */ + m = (s16_t)(m - (pcb->sa >> 3)); + pcb->sa = (s16_t)(pcb->sa + m); + if (m < 0) { + m = (s16_t) - m; + } + m = (s16_t)(m - (pcb->sv >> 2)); + pcb->sv = (s16_t)(pcb->sv + m); + pcb->rto = (s16_t)((pcb->sa >> 3) + pcb->sv); + + LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_receive: RTO %"U16_F" (%"U16_F" milliseconds)\n", + pcb->rto, (u16_t)(pcb->rto * TCP_SLOW_INTERVAL))); + + pcb->rttest = 0; + } + } + + /* If the incoming segment contains data, we must process it + further unless the pcb already received a FIN. + (RFC 793, chapter 3.9, "SEGMENT ARRIVES" in states CLOSE-WAIT, CLOSING, + LAST-ACK and TIME-WAIT: "Ignore the segment text.") */ + if ((tcplen > 0) && (pcb->state < CLOSE_WAIT)) { + /* This code basically does three things: + + +) If the incoming segment contains data that is the next + in-sequence data, this data is passed to the application. This + might involve trimming the first edge of the data. The rcv_nxt + variable and the advertised window are adjusted. + + +) If the incoming segment has data that is above the next + sequence number expected (->rcv_nxt), the segment is placed on + the ->ooseq queue. This is done by finding the appropriate + place in the ->ooseq queue (which is ordered by sequence + number) and trim the segment in both ends if needed. An + immediate ACK is sent to indicate that we received an + out-of-sequence segment. + + +) Finally, we check if the first segment on the ->ooseq queue + now is in sequence (i.e., if rcv_nxt >= ooseq->seqno). If + rcv_nxt > ooseq->seqno, we must trim the first edge of the + segment on ->ooseq before we adjust rcv_nxt. The data in the + segments that are now on sequence are chained onto the + incoming segment so that we only need to call the application + once. + */ + + /* First, we check if we must trim the first edge. We have to do + this if the sequence number of the incoming segment is less + than rcv_nxt, and the sequence number plus the length of the + segment is larger than rcv_nxt. */ + /* if (TCP_SEQ_LT(seqno, pcb->rcv_nxt)) { + if (TCP_SEQ_LT(pcb->rcv_nxt, seqno + tcplen)) {*/ + if (TCP_SEQ_BETWEEN(pcb->rcv_nxt, seqno + 1, seqno + tcplen - 1)) { + /* Trimming the first edge is done by pushing the payload + pointer in the pbuf downwards. This is somewhat tricky since + we do not want to discard the full contents of the pbuf up to + the new starting point of the data since we have to keep the + TCP header which is present in the first pbuf in the chain. + + What is done is really quite a nasty hack: the first pbuf in + the pbuf chain is pointed to by inseg.p. Since we need to be + able to deallocate the whole pbuf, we cannot change this + inseg.p pointer to point to any of the later pbufs in the + chain. Instead, we point the ->payload pointer in the first + pbuf to data in one of the later pbufs. We also set the + inseg.data pointer to point to the right place. This way, the + ->p pointer will still point to the first pbuf, but the + ->p->payload pointer will point to data in another pbuf. + + After we are done with adjusting the pbuf pointers we must + adjust the ->data pointer in the seg and the segment + length.*/ + + struct pbuf *p = inseg.p; + u32_t off32 = pcb->rcv_nxt - seqno; + u16_t new_tot_len, off; + LWIP_ASSERT("inseg.p != NULL", inseg.p); + LWIP_ASSERT("insane offset!", (off32 < 0xffff)); + off = (u16_t)off32; + LWIP_ASSERT("pbuf too short!", (((s32_t)inseg.p->tot_len) >= off)); + inseg.len -= off; + new_tot_len = (u16_t)(inseg.p->tot_len - off); + while (p->len < off) { + off -= p->len; + /* all pbufs up to and including this one have len==0, so tot_len is equal */ + p->tot_len = new_tot_len; + p->len = 0; + p = p->next; + } + /* cannot fail... */ + pbuf_remove_header(p, off); + inseg.tcphdr->seqno = seqno = pcb->rcv_nxt; + } else { + if (TCP_SEQ_LT(seqno, pcb->rcv_nxt)) { + /* the whole segment is < rcv_nxt */ + /* must be a duplicate of a packet that has already been correctly handled */ + + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: duplicate seqno %"U32_F"\n", seqno)); + tcp_ack_now(pcb); + } + } + + /* The sequence number must be within the window (above rcv_nxt + and below rcv_nxt + rcv_wnd) in order to be further + processed. */ + if (TCP_SEQ_BETWEEN(seqno, pcb->rcv_nxt, + pcb->rcv_nxt + pcb->rcv_wnd - 1)) { + if (pcb->rcv_nxt == seqno) { + /* The incoming segment is the next in sequence. We check if + we have to trim the end of the segment and update rcv_nxt + and pass the data to the application. */ + tcplen = TCP_TCPLEN(&inseg); + + if (tcplen > pcb->rcv_wnd) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, + ("tcp_receive: other end overran receive window" + "seqno %"U32_F" len %"U16_F" right edge %"U32_F"\n", + seqno, tcplen, pcb->rcv_nxt + pcb->rcv_wnd)); + if (TCPH_FLAGS(inseg.tcphdr) & TCP_FIN) { + /* Must remove the FIN from the header as we're trimming + * that byte of sequence-space from the packet */ + TCPH_FLAGS_SET(inseg.tcphdr, TCPH_FLAGS(inseg.tcphdr) & ~(unsigned int)TCP_FIN); + } + /* Adjust length of segment to fit in the window. */ + TCPWND_CHECK16(pcb->rcv_wnd); + inseg.len = (u16_t)pcb->rcv_wnd; + if (TCPH_FLAGS(inseg.tcphdr) & TCP_SYN) { + inseg.len -= 1; + } + pbuf_realloc(inseg.p, inseg.len); + tcplen = TCP_TCPLEN(&inseg); + LWIP_ASSERT("tcp_receive: segment not trimmed correctly to rcv_wnd\n", + (seqno + tcplen) == (pcb->rcv_nxt + pcb->rcv_wnd)); + } +#if TCP_QUEUE_OOSEQ + /* Received in-sequence data, adjust ooseq data if: + - FIN has been received or + - inseq overlaps with ooseq */ + if (pcb->ooseq != NULL) { + if (TCPH_FLAGS(inseg.tcphdr) & TCP_FIN) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, + ("tcp_receive: received in-order FIN, binning ooseq queue\n")); + /* Received in-order FIN means anything that was received + * out of order must now have been received in-order, so + * bin the ooseq queue */ + while (pcb->ooseq != NULL) { + struct tcp_seg *old_ooseq = pcb->ooseq; + pcb->ooseq = pcb->ooseq->next; + tcp_seg_free(old_ooseq); + } + } else { + struct tcp_seg *next = pcb->ooseq; + /* Remove all segments on ooseq that are covered by inseg already. + * FIN is copied from ooseq to inseg if present. */ + while (next && + TCP_SEQ_GEQ(seqno + tcplen, + next->tcphdr->seqno + next->len)) { + struct tcp_seg *tmp; + /* inseg cannot have FIN here (already processed above) */ + if ((TCPH_FLAGS(next->tcphdr) & TCP_FIN) != 0 && + (TCPH_FLAGS(inseg.tcphdr) & TCP_SYN) == 0) { + TCPH_SET_FLAG(inseg.tcphdr, TCP_FIN); + tcplen = TCP_TCPLEN(&inseg); + } + tmp = next; + next = next->next; + tcp_seg_free(tmp); + } + /* Now trim right side of inseg if it overlaps with the first + * segment on ooseq */ + if (next && + TCP_SEQ_GT(seqno + tcplen, + next->tcphdr->seqno)) { + /* inseg cannot have FIN here (already processed above) */ + inseg.len = (u16_t)(next->tcphdr->seqno - seqno); + if (TCPH_FLAGS(inseg.tcphdr) & TCP_SYN) { + inseg.len -= 1; + } + pbuf_realloc(inseg.p, inseg.len); + tcplen = TCP_TCPLEN(&inseg); + LWIP_ASSERT("tcp_receive: segment not trimmed correctly to ooseq queue\n", + (seqno + tcplen) == next->tcphdr->seqno); + } + pcb->ooseq = next; + } + } +#endif /* TCP_QUEUE_OOSEQ */ + + pcb->rcv_nxt = seqno + tcplen; + + /* Update the receiver's (our) window. */ + LWIP_ASSERT("tcp_receive: tcplen > rcv_wnd\n", pcb->rcv_wnd >= tcplen); + pcb->rcv_wnd -= tcplen; + + tcp_update_rcv_ann_wnd(pcb); + + /* If there is data in the segment, we make preparations to + pass this up to the application. The ->recv_data variable + is used for holding the pbuf that goes to the + application. The code for reassembling out-of-sequence data + chains its data on this pbuf as well. + + If the segment was a FIN, we set the TF_GOT_FIN flag that will + be used to indicate to the application that the remote side has + closed its end of the connection. */ + if (inseg.p->tot_len > 0) { + recv_data = inseg.p; + /* Since this pbuf now is the responsibility of the + application, we delete our reference to it so that we won't + (mistakingly) deallocate it. */ + inseg.p = NULL; + } + if (TCPH_FLAGS(inseg.tcphdr) & TCP_FIN) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: received FIN.\n")); + recv_flags |= TF_GOT_FIN; + } + +#if TCP_QUEUE_OOSEQ + /* We now check if we have segments on the ->ooseq queue that + are now in sequence. */ + while (pcb->ooseq != NULL && + pcb->ooseq->tcphdr->seqno == pcb->rcv_nxt) { + + struct tcp_seg *cseg = pcb->ooseq; + seqno = pcb->ooseq->tcphdr->seqno; + + pcb->rcv_nxt += TCP_TCPLEN(cseg); + LWIP_ASSERT("tcp_receive: ooseq tcplen > rcv_wnd\n", + pcb->rcv_wnd >= TCP_TCPLEN(cseg)); + pcb->rcv_wnd -= TCP_TCPLEN(cseg); + + tcp_update_rcv_ann_wnd(pcb); + + if (cseg->p->tot_len > 0) { + /* Chain this pbuf onto the pbuf that we will pass to + the application. */ + /* With window scaling, this can overflow recv_data->tot_len, but + that's not a problem since we explicitly fix that before passing + recv_data to the application. */ + if (recv_data) { + pbuf_cat(recv_data, cseg->p); + } else { + recv_data = cseg->p; + } + cseg->p = NULL; + } + if (TCPH_FLAGS(cseg->tcphdr) & TCP_FIN) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: dequeued FIN.\n")); + recv_flags |= TF_GOT_FIN; + if (pcb->state == ESTABLISHED) { /* force passive close or we can move to active close */ + pcb->state = CLOSE_WAIT; + } + } + + pcb->ooseq = cseg->next; + tcp_seg_free(cseg); + } +#if LWIP_TCP_SACK_OUT + if (pcb->flags & TF_SACK) { + if (pcb->ooseq != NULL) { + /* Some segments may have been removed from ooseq, let's remove all SACKs that + describe anything before the new beginning of that list. */ + tcp_remove_sacks_lt(pcb, pcb->ooseq->tcphdr->seqno); + } else if (LWIP_TCP_SACK_VALID(pcb, 0)) { + /* ooseq has been cleared. Nothing to SACK */ + memset(pcb->rcv_sacks, 0, sizeof(pcb->rcv_sacks)); + } + } +#endif /* LWIP_TCP_SACK_OUT */ +#endif /* TCP_QUEUE_OOSEQ */ + + + /* Acknowledge the segment(s). */ + tcp_ack(pcb); + +#if LWIP_TCP_SACK_OUT + if (LWIP_TCP_SACK_VALID(pcb, 0)) { + /* Normally the ACK for the data received could be piggy-backed on a data packet, + but lwIP currently does not support including SACKs in data packets. So we force + it to respond with an empty ACK packet (only if there is at least one SACK to be sent). + NOTE: tcp_send_empty_ack() on success clears the ACK flags (set by tcp_ack()) */ + tcp_send_empty_ack(pcb); + } +#endif /* LWIP_TCP_SACK_OUT */ + +#if LWIP_IPV6 && LWIP_ND6_TCP_REACHABILITY_HINTS + if (ip_current_is_v6()) { + /* Inform neighbor reachability of forward progress. */ + nd6_reachability_hint(ip6_current_src_addr()); + } +#endif /* LWIP_IPV6 && LWIP_ND6_TCP_REACHABILITY_HINTS*/ + + } else { + /* We get here if the incoming segment is out-of-sequence. */ + +#if TCP_QUEUE_OOSEQ + /* We queue the segment on the ->ooseq queue. */ + if (pcb->ooseq == NULL) { + pcb->ooseq = tcp_seg_copy(&inseg); +#if LWIP_TCP_SACK_OUT + if (pcb->flags & TF_SACK) { + /* All the SACKs should be invalid, so we can simply store the most recent one: */ + pcb->rcv_sacks[0].left = seqno; + pcb->rcv_sacks[0].right = seqno + inseg.len; + } +#endif /* LWIP_TCP_SACK_OUT */ + } else { + /* If the queue is not empty, we walk through the queue and + try to find a place where the sequence number of the + incoming segment is between the sequence numbers of the + previous and the next segment on the ->ooseq queue. That is + the place where we put the incoming segment. If needed, we + trim the second edges of the previous and the incoming + segment so that it will fit into the sequence. + + If the incoming segment has the same sequence number as a + segment on the ->ooseq queue, we discard the segment that + contains less data. */ + +#if LWIP_TCP_SACK_OUT + /* This is the left edge of the lowest possible SACK range. + It may start before the newly received segment (possibly adjusted below). */ + u32_t sackbeg = TCP_SEQ_LT(seqno, pcb->ooseq->tcphdr->seqno) ? seqno : pcb->ooseq->tcphdr->seqno; +#endif /* LWIP_TCP_SACK_OUT */ + struct tcp_seg *next, *prev = NULL; + for (next = pcb->ooseq; next != NULL; next = next->next) { + if (seqno == next->tcphdr->seqno) { + /* The sequence number of the incoming segment is the + same as the sequence number of the segment on + ->ooseq. We check the lengths to see which one to + discard. */ + if (inseg.len > next->len) { + /* The incoming segment is larger than the old + segment. We replace some segments with the new + one. */ + struct tcp_seg *cseg = tcp_seg_copy(&inseg); + if (cseg != NULL) { + if (prev != NULL) { + prev->next = cseg; + } else { + pcb->ooseq = cseg; + } + tcp_oos_insert_segment(cseg, next); + } + break; + } else { + /* Either the lengths are the same or the incoming + segment was smaller than the old one; in either + case, we ditch the incoming segment. */ + break; + } + } else { + if (prev == NULL) { + if (TCP_SEQ_LT(seqno, next->tcphdr->seqno)) { + /* The sequence number of the incoming segment is lower + than the sequence number of the first segment on the + queue. We put the incoming segment first on the + queue. */ + struct tcp_seg *cseg = tcp_seg_copy(&inseg); + if (cseg != NULL) { + pcb->ooseq = cseg; + tcp_oos_insert_segment(cseg, next); + } + break; + } + } else { + /*if (TCP_SEQ_LT(prev->tcphdr->seqno, seqno) && + TCP_SEQ_LT(seqno, next->tcphdr->seqno)) {*/ + if (TCP_SEQ_BETWEEN(seqno, prev->tcphdr->seqno + 1, next->tcphdr->seqno - 1)) { + /* The sequence number of the incoming segment is in + between the sequence numbers of the previous and + the next segment on ->ooseq. We trim trim the previous + segment, delete next segments that included in received segment + and trim received, if needed. */ + struct tcp_seg *cseg = tcp_seg_copy(&inseg); + if (cseg != NULL) { + if (TCP_SEQ_GT(prev->tcphdr->seqno + prev->len, seqno)) { + /* We need to trim the prev segment. */ + prev->len = (u16_t)(seqno - prev->tcphdr->seqno); + pbuf_realloc(prev->p, prev->len); + } + prev->next = cseg; + tcp_oos_insert_segment(cseg, next); + } + break; + } + } + +#if LWIP_TCP_SACK_OUT + /* The new segment goes after the 'next' one. If there is a "hole" in sequence numbers + between 'prev' and the beginning of 'next', we want to move sackbeg. */ + if (prev != NULL && prev->tcphdr->seqno + prev->len != next->tcphdr->seqno) { + sackbeg = next->tcphdr->seqno; + } +#endif /* LWIP_TCP_SACK_OUT */ + + /* We don't use 'prev' below, so let's set it to current 'next'. + This way even if we break the loop below, 'prev' will be pointing + at the segment right in front of the newly added one. */ + prev = next; + + /* If the "next" segment is the last segment on the + ooseq queue, we add the incoming segment to the end + of the list. */ + if (next->next == NULL && + TCP_SEQ_GT(seqno, next->tcphdr->seqno)) { + if (TCPH_FLAGS(next->tcphdr) & TCP_FIN) { + /* segment "next" already contains all data */ + break; + } + next->next = tcp_seg_copy(&inseg); + if (next->next != NULL) { + if (TCP_SEQ_GT(next->tcphdr->seqno + next->len, seqno)) { + /* We need to trim the last segment. */ + next->len = (u16_t)(seqno - next->tcphdr->seqno); + pbuf_realloc(next->p, next->len); + } + /* check if the remote side overruns our receive window */ + if (TCP_SEQ_GT((u32_t)tcplen + seqno, pcb->rcv_nxt + (u32_t)pcb->rcv_wnd)) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, + ("tcp_receive: other end overran receive window" + "seqno %"U32_F" len %"U16_F" right edge %"U32_F"\n", + seqno, tcplen, pcb->rcv_nxt + pcb->rcv_wnd)); + if (TCPH_FLAGS(next->next->tcphdr) & TCP_FIN) { + /* Must remove the FIN from the header as we're trimming + * that byte of sequence-space from the packet */ + TCPH_FLAGS_SET(next->next->tcphdr, TCPH_FLAGS(next->next->tcphdr) & ~TCP_FIN); + } + /* Adjust length of segment to fit in the window. */ + next->next->len = (u16_t)(pcb->rcv_nxt + pcb->rcv_wnd - seqno); + pbuf_realloc(next->next->p, next->next->len); + tcplen = TCP_TCPLEN(next->next); + LWIP_ASSERT("tcp_receive: segment not trimmed correctly to rcv_wnd\n", + (seqno + tcplen) == (pcb->rcv_nxt + pcb->rcv_wnd)); + } + } + break; + } + } + } + +#if LWIP_TCP_SACK_OUT + if (pcb->flags & TF_SACK) { + if (prev == NULL) { + /* The new segment is at the beginning. sackbeg should already be set properly. + We need to find the right edge. */ + next = pcb->ooseq; + } else if (prev->next != NULL) { + /* The new segment was added after 'prev'. If there is a "hole" between 'prev' and 'prev->next', + we need to move sackbeg. After that we should find the right edge. */ + next = prev->next; + if (prev->tcphdr->seqno + prev->len != next->tcphdr->seqno) { + sackbeg = next->tcphdr->seqno; + } + } else { + next = NULL; + } + if (next != NULL) { + u32_t sackend = next->tcphdr->seqno; + for ( ; (next != NULL) && (sackend == next->tcphdr->seqno); next = next->next) { + sackend += next->len; + } + tcp_add_sack(pcb, sackbeg, sackend); + } + } +#endif /* LWIP_TCP_SACK_OUT */ + } +#if defined(TCP_OOSEQ_BYTES_LIMIT) || defined(TCP_OOSEQ_PBUFS_LIMIT) + { + /* Check that the data on ooseq doesn't exceed one of the limits + and throw away everything above that limit. */ +#ifdef TCP_OOSEQ_BYTES_LIMIT + const u32_t ooseq_max_blen = TCP_OOSEQ_BYTES_LIMIT(pcb); + u32_t ooseq_blen = 0; +#endif +#ifdef TCP_OOSEQ_PBUFS_LIMIT + const u16_t ooseq_max_qlen = TCP_OOSEQ_PBUFS_LIMIT(pcb); + u16_t ooseq_qlen = 0; +#endif + struct tcp_seg *next, *prev = NULL; + for (next = pcb->ooseq; next != NULL; prev = next, next = next->next) { + struct pbuf *p = next->p; + int stop_here = 0; +#ifdef TCP_OOSEQ_BYTES_LIMIT + ooseq_blen += p->tot_len; + if (ooseq_blen > ooseq_max_blen) { + stop_here = 1; + } +#endif +#ifdef TCP_OOSEQ_PBUFS_LIMIT + ooseq_qlen += pbuf_clen(p); + if (ooseq_qlen > ooseq_max_qlen) { + stop_here = 1; + } +#endif + if (stop_here) { +#if LWIP_TCP_SACK_OUT + if (pcb->flags & TF_SACK) { + /* Let's remove all SACKs from next's seqno up. */ + tcp_remove_sacks_gt(pcb, next->tcphdr->seqno); + } +#endif /* LWIP_TCP_SACK_OUT */ + /* too much ooseq data, dump this and everything after it */ + tcp_segs_free(next); + if (prev == NULL) { + /* first ooseq segment is too much, dump the whole queue */ + pcb->ooseq = NULL; + } else { + /* just dump 'next' and everything after it */ + prev->next = NULL; + } + break; + } + } + } +#endif /* TCP_OOSEQ_BYTES_LIMIT || TCP_OOSEQ_PBUFS_LIMIT */ +#endif /* TCP_QUEUE_OOSEQ */ + + /* We send the ACK packet after we've (potentially) dealt with SACKs, + so they can be included in the acknowledgment. */ + tcp_send_empty_ack(pcb); + } + } else { + /* The incoming segment is not within the window. */ + tcp_send_empty_ack(pcb); + } + } else { + /* Segments with length 0 is taken care of here. Segments that + fall out of the window are ACKed. */ + if (!TCP_SEQ_BETWEEN(seqno, pcb->rcv_nxt, pcb->rcv_nxt + pcb->rcv_wnd - 1)) { + tcp_ack_now(pcb); + } + } +} + +static u8_t +tcp_get_next_optbyte(void) +{ + u16_t optidx = tcp_optidx++; + if ((tcphdr_opt2 == NULL) || (optidx < tcphdr_opt1len)) { + u8_t *opts = (u8_t *)tcphdr + TCP_HLEN; + return opts[optidx]; + } else { + u8_t idx = (u8_t)(optidx - tcphdr_opt1len); + return tcphdr_opt2[idx]; + } +} + +/** + * Parses the options contained in the incoming segment. + * + * Called from tcp_listen_input() and tcp_process(). + * Currently, only the MSS option is supported! + * + * @param pcb the tcp_pcb for which a segment arrived + */ +static void +tcp_parseopt(struct tcp_pcb *pcb) +{ + u8_t data; + u16_t mss; +#if LWIP_TCP_TIMESTAMPS + u32_t tsval; +#endif + + LWIP_ASSERT("tcp_parseopt: invalid pcb", pcb != NULL); + + /* Parse the TCP MSS option, if present. */ + if (tcphdr_optlen != 0) { + for (tcp_optidx = 0; tcp_optidx < tcphdr_optlen; ) { + u8_t opt = tcp_get_next_optbyte(); + switch (opt) { + case LWIP_TCP_OPT_EOL: + /* End of options. */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: EOL\n")); + return; + case LWIP_TCP_OPT_NOP: + /* NOP option. */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: NOP\n")); + break; + case LWIP_TCP_OPT_MSS: + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: MSS\n")); + if (tcp_get_next_optbyte() != LWIP_TCP_OPT_LEN_MSS || (tcp_optidx - 2 + LWIP_TCP_OPT_LEN_MSS) > tcphdr_optlen) { + /* Bad length */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: bad length\n")); + return; + } + /* An MSS option with the right option length. */ + mss = (u16_t)(tcp_get_next_optbyte() << 8); + mss |= tcp_get_next_optbyte(); + /* Limit the mss to the configured TCP_MSS and prevent division by zero */ + pcb->mss = ((mss > TCP_MSS) || (mss == 0)) ? TCP_MSS : mss; + break; +#if LWIP_WND_SCALE + case LWIP_TCP_OPT_WS: + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: WND_SCALE\n")); + if (tcp_get_next_optbyte() != LWIP_TCP_OPT_LEN_WS || (tcp_optidx - 2 + LWIP_TCP_OPT_LEN_WS) > tcphdr_optlen) { + /* Bad length */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: bad length\n")); + return; + } + /* An WND_SCALE option with the right option length. */ + data = tcp_get_next_optbyte(); + /* If syn was received with wnd scale option, + activate wnd scale opt, but only if this is not a retransmission */ + if ((flags & TCP_SYN) && !(pcb->flags & TF_WND_SCALE)) { + pcb->snd_scale = data; + if (pcb->snd_scale > 14U) { + pcb->snd_scale = 14U; + } + pcb->rcv_scale = TCP_RCV_SCALE; + tcp_set_flags(pcb, TF_WND_SCALE); + /* window scaling is enabled, we can use the full receive window */ + LWIP_ASSERT("window not at default value", pcb->rcv_wnd == TCPWND_MIN16(TCP_WND)); + LWIP_ASSERT("window not at default value", pcb->rcv_ann_wnd == TCPWND_MIN16(TCP_WND)); + pcb->rcv_wnd = pcb->rcv_ann_wnd = TCP_WND; + } + break; +#endif /* LWIP_WND_SCALE */ +#if LWIP_TCP_TIMESTAMPS + case LWIP_TCP_OPT_TS: + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: TS\n")); + if (tcp_get_next_optbyte() != LWIP_TCP_OPT_LEN_TS || (tcp_optidx - 2 + LWIP_TCP_OPT_LEN_TS) > tcphdr_optlen) { + /* Bad length */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: bad length\n")); + return; + } + /* TCP timestamp option with valid length */ + tsval = tcp_get_next_optbyte(); + tsval |= (tcp_get_next_optbyte() << 8); + tsval |= (tcp_get_next_optbyte() << 16); + tsval |= (tcp_get_next_optbyte() << 24); + if (flags & TCP_SYN) { + pcb->ts_recent = lwip_ntohl(tsval); + /* Enable sending timestamps in every segment now that we know + the remote host supports it. */ + tcp_set_flags(pcb, TF_TIMESTAMP); + } else if (TCP_SEQ_BETWEEN(pcb->ts_lastacksent, seqno, seqno + tcplen)) { + pcb->ts_recent = lwip_ntohl(tsval); + } + /* Advance to next option (6 bytes already read) */ + tcp_optidx += LWIP_TCP_OPT_LEN_TS - 6; + break; +#endif /* LWIP_TCP_TIMESTAMPS */ +#if LWIP_TCP_SACK_OUT + case LWIP_TCP_OPT_SACK_PERM: + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: SACK_PERM\n")); + if (tcp_get_next_optbyte() != LWIP_TCP_OPT_LEN_SACK_PERM || (tcp_optidx - 2 + LWIP_TCP_OPT_LEN_SACK_PERM) > tcphdr_optlen) { + /* Bad length */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: bad length\n")); + return; + } + /* TCP SACK_PERM option with valid length */ + if (flags & TCP_SYN) { + /* We only set it if we receive it in a SYN (or SYN+ACK) packet */ + tcp_set_flags(pcb, TF_SACK); + } + break; +#endif /* LWIP_TCP_SACK_OUT */ + default: + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: other\n")); + data = tcp_get_next_optbyte(); + if (data < 2) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: bad length\n")); + /* If the length field is zero, the options are malformed + and we don't process them further. */ + return; + } + /* All other options have a length field, so that we easily + can skip past them. */ + tcp_optidx += data - 2; + } + } + } +} + +void +tcp_trigger_input_pcb_close(void) +{ + recv_flags |= TF_CLOSED; +} + +#if LWIP_TCP_SACK_OUT +/** + * Called by tcp_receive() to add new SACK entry. + * + * The new SACK entry will be placed at the beginning of rcv_sacks[], as the newest one. + * Existing SACK entries will be "pushed back", to preserve their order. + * This is the behavior described in RFC 2018, section 4. + * + * @param pcb the tcp_pcb for which a segment arrived + * @param left the left side of the SACK (the first sequence number) + * @param right the right side of the SACK (the first sequence number past this SACK) + */ +static void +tcp_add_sack(struct tcp_pcb *pcb, u32_t left, u32_t right) +{ + u8_t i; + u8_t unused_idx; + + if ((pcb->flags & TF_SACK) == 0 || !TCP_SEQ_LT(left, right)) { + return; + } + + /* First, let's remove all SACKs that are no longer needed (because they overlap with the newest one), + while moving all other SACKs forward. + We run this loop for all entries, until we find the first invalid one. + There is no point checking after that. */ + for (i = unused_idx = 0; (i < LWIP_TCP_MAX_SACK_NUM) && LWIP_TCP_SACK_VALID(pcb, i); ++i) { + /* We only want to use SACK at [i] if it doesn't overlap with left:right range. + It does not overlap if its right side is before the newly added SACK, + or if its left side is after the newly added SACK. + NOTE: The equality should not really happen, but it doesn't hurt. */ + if (TCP_SEQ_LEQ(pcb->rcv_sacks[i].right, left) || TCP_SEQ_LEQ(right, pcb->rcv_sacks[i].left)) { + if (unused_idx != i) { + /* We don't need to copy if it's already in the right spot */ + pcb->rcv_sacks[unused_idx] = pcb->rcv_sacks[i]; + } + ++unused_idx; + } + } + + /* Now 'unused_idx' is the index of the first invalid SACK entry, + anywhere between 0 (no valid entries) and LWIP_TCP_MAX_SACK_NUM (all entries are valid). + We want to clear this and all following SACKs. + However, we will be adding another one in the front (and shifting everything else back). + So let's just iterate from the back, and set each entry to the one to the left if it's valid, + or to 0 if it is not. */ + for (i = LWIP_TCP_MAX_SACK_NUM - 1; i > 0; --i) { + /* [i] is the index we are setting, and the value should be at index [i-1], + or 0 if that index is unused (>= unused_idx). */ + if (i - 1 >= unused_idx) { + /* [i-1] is unused. Let's clear [i]. */ + pcb->rcv_sacks[i].left = pcb->rcv_sacks[i].right = 0; + } else { + pcb->rcv_sacks[i] = pcb->rcv_sacks[i - 1]; + } + } + + /* And now we can store the newest SACK */ + pcb->rcv_sacks[0].left = left; + pcb->rcv_sacks[0].right = right; +} + +/** + * Called to remove a range of SACKs. + * + * SACK entries will be removed or adjusted to not acknowledge any sequence + * numbers that are less than 'seq' passed. It not only invalidates entries, + * but also moves all entries that are still valid to the beginning. + * + * @param pcb the tcp_pcb to modify + * @param seq the lowest sequence number to keep in SACK entries + */ +static void +tcp_remove_sacks_lt(struct tcp_pcb *pcb, u32_t seq) +{ + u8_t i; + u8_t unused_idx; + + /* We run this loop for all entries, until we find the first invalid one. + There is no point checking after that. */ + for (i = unused_idx = 0; (i < LWIP_TCP_MAX_SACK_NUM) && LWIP_TCP_SACK_VALID(pcb, i); ++i) { + /* We only want to use SACK at index [i] if its right side is > 'seq'. */ + if (TCP_SEQ_GT(pcb->rcv_sacks[i].right, seq)) { + if (unused_idx != i) { + /* We only copy it if it's not in the right spot already. */ + pcb->rcv_sacks[unused_idx] = pcb->rcv_sacks[i]; + } + /* NOTE: It is possible that its left side is < 'seq', in which case we should adjust it. */ + if (TCP_SEQ_LT(pcb->rcv_sacks[unused_idx].left, seq)) { + pcb->rcv_sacks[unused_idx].left = seq; + } + ++unused_idx; + } + } + + /* We also need to invalidate everything from 'unused_idx' till the end */ + for (i = unused_idx; i < LWIP_TCP_MAX_SACK_NUM; ++i) { + pcb->rcv_sacks[i].left = pcb->rcv_sacks[i].right = 0; + } +} + +#if defined(TCP_OOSEQ_BYTES_LIMIT) || defined(TCP_OOSEQ_PBUFS_LIMIT) +/** + * Called to remove a range of SACKs. + * + * SACK entries will be removed or adjusted to not acknowledge any sequence + * numbers that are greater than (or equal to) 'seq' passed. It not only invalidates entries, + * but also moves all entries that are still valid to the beginning. + * + * @param pcb the tcp_pcb to modify + * @param seq the highest sequence number to keep in SACK entries + */ +static void +tcp_remove_sacks_gt(struct tcp_pcb *pcb, u32_t seq) +{ + u8_t i; + u8_t unused_idx; + + /* We run this loop for all entries, until we find the first invalid one. + There is no point checking after that. */ + for (i = unused_idx = 0; (i < LWIP_TCP_MAX_SACK_NUM) && LWIP_TCP_SACK_VALID(pcb, i); ++i) { + /* We only want to use SACK at index [i] if its left side is < 'seq'. */ + if (TCP_SEQ_LT(pcb->rcv_sacks[i].left, seq)) { + if (unused_idx != i) { + /* We only copy it if it's not in the right spot already. */ + pcb->rcv_sacks[unused_idx] = pcb->rcv_sacks[i]; + } + /* NOTE: It is possible that its right side is > 'seq', in which case we should adjust it. */ + if (TCP_SEQ_GT(pcb->rcv_sacks[unused_idx].right, seq)) { + pcb->rcv_sacks[unused_idx].right = seq; + } + ++unused_idx; + } + } + + /* We also need to invalidate everything from 'unused_idx' till the end */ + for (i = unused_idx; i < LWIP_TCP_MAX_SACK_NUM; ++i) { + pcb->rcv_sacks[i].left = pcb->rcv_sacks[i].right = 0; + } +} +#endif /* TCP_OOSEQ_BYTES_LIMIT || TCP_OOSEQ_PBUFS_LIMIT */ + +#endif /* LWIP_TCP_SACK_OUT */ + +#endif /* LWIP_TCP */ diff --git a/Middlewares/Third_Party/LwIP/src/core/tcp_out.c b/Middlewares/Third_Party/LwIP/src/core/tcp_out.c new file mode 100644 index 0000000..724df10 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/tcp_out.c @@ -0,0 +1,2190 @@ +/** + * @file + * Transmission Control Protocol, outgoing traffic + * + * The output functions of TCP. + * + * There are two distinct ways for TCP segments to get sent: + * - queued data: these are segments transferring data or segments containing + * SYN or FIN (which both count as one sequence number). They are created as + * struct @ref pbuf together with a struct tcp_seg and enqueue to the + * unsent list of the pcb. They are sent by tcp_output: + * - @ref tcp_write : creates data segments + * - @ref tcp_split_unsent_seg : splits a data segment + * - @ref tcp_enqueue_flags : creates SYN-only or FIN-only segments + * - @ref tcp_output / tcp_output_segment : finalize the tcp header + * (e.g. sequence numbers, options, checksum) and output to IP + * - the various tcp_rexmit functions shuffle around segments between the + * unsent an unacked lists to retransmit them + * - tcp_create_segment and tcp_pbuf_prealloc allocate pbuf and + * segment for these functions + * - direct send: these segments don't contain data but control the connection + * behaviour. They are created as pbuf only and sent directly without + * enqueueing them: + * - @ref tcp_send_empty_ack sends an ACK-only segment + * - @ref tcp_rst sends a RST segment + * - @ref tcp_keepalive sends a keepalive segment + * - @ref tcp_zero_window_probe sends a window probe segment + * - tcp_output_alloc_header allocates a header-only pbuf for these functions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_TCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/priv/tcp_priv.h" +#include "lwip/def.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/inet_chksum.h" +#include "lwip/stats.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#if LWIP_TCP_TIMESTAMPS +#include "lwip/sys.h" +#endif + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +/* Allow to add custom TCP header options by defining this hook */ +#ifdef LWIP_HOOK_TCP_OUT_TCPOPT_LENGTH +#define LWIP_TCP_OPT_LENGTH_SEGMENT(flags, pcb) LWIP_HOOK_TCP_OUT_TCPOPT_LENGTH(pcb, LWIP_TCP_OPT_LENGTH(flags)) +#else +#define LWIP_TCP_OPT_LENGTH_SEGMENT(flags, pcb) LWIP_TCP_OPT_LENGTH(flags) +#endif + +/* Define some copy-macros for checksum-on-copy so that the code looks + nicer by preventing too many ifdef's. */ +#if TCP_CHECKSUM_ON_COPY +#define TCP_DATA_COPY(dst, src, len, seg) do { \ + tcp_seg_add_chksum(LWIP_CHKSUM_COPY(dst, src, len), \ + len, &seg->chksum, &seg->chksum_swapped); \ + seg->flags |= TF_SEG_DATA_CHECKSUMMED; } while(0) +#define TCP_DATA_COPY2(dst, src, len, chksum, chksum_swapped) \ + tcp_seg_add_chksum(LWIP_CHKSUM_COPY(dst, src, len), len, chksum, chksum_swapped); +#else /* TCP_CHECKSUM_ON_COPY*/ +#define TCP_DATA_COPY(dst, src, len, seg) MEMCPY(dst, src, len) +#define TCP_DATA_COPY2(dst, src, len, chksum, chksum_swapped) MEMCPY(dst, src, len) +#endif /* TCP_CHECKSUM_ON_COPY*/ + +/** Define this to 1 for an extra check that the output checksum is valid + * (usefule when the checksum is generated by the application, not the stack) */ +#ifndef TCP_CHECKSUM_ON_COPY_SANITY_CHECK +#define TCP_CHECKSUM_ON_COPY_SANITY_CHECK 0 +#endif +/* Allow to override the failure of sanity check from warning to e.g. hard failure */ +#if TCP_CHECKSUM_ON_COPY_SANITY_CHECK +#ifndef TCP_CHECKSUM_ON_COPY_SANITY_CHECK_FAIL +#define TCP_CHECKSUM_ON_COPY_SANITY_CHECK_FAIL(msg) LWIP_DEBUGF(TCP_DEBUG | LWIP_DBG_LEVEL_WARNING, msg) +#endif +#endif + +#if TCP_OVERSIZE +/** The size of segment pbufs created when TCP_OVERSIZE is enabled */ +#ifndef TCP_OVERSIZE_CALC_LENGTH +#define TCP_OVERSIZE_CALC_LENGTH(length) ((length) + TCP_OVERSIZE) +#endif +#endif + +/* Forward declarations.*/ +static err_t tcp_output_segment(struct tcp_seg *seg, struct tcp_pcb *pcb, struct netif *netif); + +/* tcp_route: common code that returns a fixed bound netif or calls ip_route */ +static struct netif * +tcp_route(const struct tcp_pcb *pcb, const ip_addr_t *src, const ip_addr_t *dst) +{ + LWIP_UNUSED_ARG(src); /* in case IPv4-only and source-based routing is disabled */ + + if ((pcb != NULL) && (pcb->netif_idx != NETIF_NO_INDEX)) { + return netif_get_by_index(pcb->netif_idx); + } else { + return ip_route(src, dst); + } +} + +/** + * Create a TCP segment with prefilled header. + * + * Called by @ref tcp_write, @ref tcp_enqueue_flags and @ref tcp_split_unsent_seg + * + * @param pcb Protocol control block for the TCP connection. + * @param p pbuf that is used to hold the TCP header. + * @param hdrflags TCP flags for header. + * @param seqno TCP sequence number of this packet + * @param optflags options to include in TCP header + * @return a new tcp_seg pointing to p, or NULL. + * The TCP header is filled in except ackno and wnd. + * p is freed on failure. + */ +static struct tcp_seg * +tcp_create_segment(const struct tcp_pcb *pcb, struct pbuf *p, u8_t hdrflags, u32_t seqno, u8_t optflags) +{ + struct tcp_seg *seg; + u8_t optlen; + + LWIP_ASSERT("tcp_create_segment: invalid pcb", pcb != NULL); + LWIP_ASSERT("tcp_create_segment: invalid pbuf", p != NULL); + + optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(optflags, pcb); + + if ((seg = (struct tcp_seg *)memp_malloc(MEMP_TCP_SEG)) == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_create_segment: no memory.\n")); + pbuf_free(p); + return NULL; + } + seg->flags = optflags; + seg->next = NULL; + seg->p = p; + LWIP_ASSERT("p->tot_len >= optlen", p->tot_len >= optlen); + seg->len = p->tot_len - optlen; +#if TCP_OVERSIZE_DBGCHECK + seg->oversize_left = 0; +#endif /* TCP_OVERSIZE_DBGCHECK */ +#if TCP_CHECKSUM_ON_COPY + seg->chksum = 0; + seg->chksum_swapped = 0; + /* check optflags */ + LWIP_ASSERT("invalid optflags passed: TF_SEG_DATA_CHECKSUMMED", + (optflags & TF_SEG_DATA_CHECKSUMMED) == 0); +#endif /* TCP_CHECKSUM_ON_COPY */ + + /* build TCP header */ + if (pbuf_add_header(p, TCP_HLEN)) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_create_segment: no room for TCP header in pbuf.\n")); + TCP_STATS_INC(tcp.err); + tcp_seg_free(seg); + return NULL; + } + seg->tcphdr = (struct tcp_hdr *)seg->p->payload; + seg->tcphdr->src = lwip_htons(pcb->local_port); + seg->tcphdr->dest = lwip_htons(pcb->remote_port); + seg->tcphdr->seqno = lwip_htonl(seqno); + /* ackno is set in tcp_output */ + TCPH_HDRLEN_FLAGS_SET(seg->tcphdr, (5 + optlen / 4), hdrflags); + /* wnd and chksum are set in tcp_output */ + seg->tcphdr->urgp = 0; + return seg; +} + +/** + * Allocate a PBUF_RAM pbuf, perhaps with extra space at the end. + * + * This function is like pbuf_alloc(layer, length, PBUF_RAM) except + * there may be extra bytes available at the end. + * + * Called by @ref tcp_write + * + * @param layer flag to define header size. + * @param length size of the pbuf's payload. + * @param max_length maximum usable size of payload+oversize. + * @param oversize pointer to a u16_t that will receive the number of usable tail bytes. + * @param pcb The TCP connection that will enqueue the pbuf. + * @param apiflags API flags given to tcp_write. + * @param first_seg true when this pbuf will be used in the first enqueued segment. + */ +#if TCP_OVERSIZE +static struct pbuf * +tcp_pbuf_prealloc(pbuf_layer layer, u16_t length, u16_t max_length, + u16_t *oversize, const struct tcp_pcb *pcb, u8_t apiflags, + u8_t first_seg) +{ + struct pbuf *p; + u16_t alloc = length; + + LWIP_ASSERT("tcp_pbuf_prealloc: invalid oversize", oversize != NULL); + LWIP_ASSERT("tcp_pbuf_prealloc: invalid pcb", pcb != NULL); + +#if LWIP_NETIF_TX_SINGLE_PBUF + LWIP_UNUSED_ARG(max_length); + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(apiflags); + LWIP_UNUSED_ARG(first_seg); + alloc = max_length; +#else /* LWIP_NETIF_TX_SINGLE_PBUF */ + if (length < max_length) { + /* Should we allocate an oversized pbuf, or just the minimum + * length required? If tcp_write is going to be called again + * before this segment is transmitted, we want the oversized + * buffer. If the segment will be transmitted immediately, we can + * save memory by allocating only length. We use a simple + * heuristic based on the following information: + * + * Did the user set TCP_WRITE_FLAG_MORE? + * + * Will the Nagle algorithm defer transmission of this segment? + */ + if ((apiflags & TCP_WRITE_FLAG_MORE) || + (!(pcb->flags & TF_NODELAY) && + (!first_seg || + pcb->unsent != NULL || + pcb->unacked != NULL))) { + alloc = LWIP_MIN(max_length, LWIP_MEM_ALIGN_SIZE(TCP_OVERSIZE_CALC_LENGTH(length))); + } + } +#endif /* LWIP_NETIF_TX_SINGLE_PBUF */ + p = pbuf_alloc(layer, alloc, PBUF_RAM); + if (p == NULL) { + return NULL; + } + LWIP_ASSERT("need unchained pbuf", p->next == NULL); + *oversize = p->len - length; + /* trim p->len to the currently used size */ + p->len = p->tot_len = length; + return p; +} +#else /* TCP_OVERSIZE */ +#define tcp_pbuf_prealloc(layer, length, mx, os, pcb, api, fst) pbuf_alloc((layer), (length), PBUF_RAM) +#endif /* TCP_OVERSIZE */ + +#if TCP_CHECKSUM_ON_COPY +/** Add a checksum of newly added data to the segment. + * + * Called by tcp_write and tcp_split_unsent_seg. + */ +static void +tcp_seg_add_chksum(u16_t chksum, u16_t len, u16_t *seg_chksum, + u8_t *seg_chksum_swapped) +{ + u32_t helper; + /* add chksum to old chksum and fold to u16_t */ + helper = chksum + *seg_chksum; + chksum = FOLD_U32T(helper); + if ((len & 1) != 0) { + *seg_chksum_swapped = 1 - *seg_chksum_swapped; + chksum = SWAP_BYTES_IN_WORD(chksum); + } + *seg_chksum = chksum; +} +#endif /* TCP_CHECKSUM_ON_COPY */ + +/** Checks if tcp_write is allowed or not (checks state, snd_buf and snd_queuelen). + * + * @param pcb the tcp pcb to check for + * @param len length of data to send (checked agains snd_buf) + * @return ERR_OK if tcp_write is allowed to proceed, another err_t otherwise + */ +static err_t +tcp_write_checks(struct tcp_pcb *pcb, u16_t len) +{ + LWIP_ASSERT("tcp_write_checks: invalid pcb", pcb != NULL); + + /* connection is in invalid state for data transmission? */ + if ((pcb->state != ESTABLISHED) && + (pcb->state != CLOSE_WAIT) && + (pcb->state != SYN_SENT) && + (pcb->state != SYN_RCVD)) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_STATE | LWIP_DBG_LEVEL_SEVERE, ("tcp_write() called in invalid state\n")); + return ERR_CONN; + } else if (len == 0) { + return ERR_OK; + } + + /* fail on too much data */ + if (len > pcb->snd_buf) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("tcp_write: too much data (len=%"U16_F" > snd_buf=%"TCPWNDSIZE_F")\n", + len, pcb->snd_buf)); + tcp_set_flags(pcb, TF_NAGLEMEMERR); + return ERR_MEM; + } + + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_write: queuelen: %"TCPWNDSIZE_F"\n", (tcpwnd_size_t)pcb->snd_queuelen)); + + /* If total number of pbufs on the unsent/unacked queues exceeds the + * configured maximum, return an error */ + /* check for configured max queuelen and possible overflow */ + if (pcb->snd_queuelen >= LWIP_MIN(TCP_SND_QUEUELEN, (TCP_SNDQUEUELEN_OVERFLOW + 1))) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("tcp_write: too long queue %"U16_F" (max %"U16_F")\n", + pcb->snd_queuelen, (u16_t)TCP_SND_QUEUELEN)); + TCP_STATS_INC(tcp.memerr); + tcp_set_flags(pcb, TF_NAGLEMEMERR); + return ERR_MEM; + } + if (pcb->snd_queuelen != 0) { + LWIP_ASSERT("tcp_write: pbufs on queue => at least one queue non-empty", + pcb->unacked != NULL || pcb->unsent != NULL); + } else { + LWIP_ASSERT("tcp_write: no pbufs on queue => both queues empty", + pcb->unacked == NULL && pcb->unsent == NULL); + } + return ERR_OK; +} + +/** + * @ingroup tcp_raw + * Write data for sending (but does not send it immediately). + * + * It waits in the expectation of more data being sent soon (as + * it can send them more efficiently by combining them together). + * To prompt the system to send data now, call tcp_output() after + * calling tcp_write(). + * + * This function enqueues the data pointed to by the argument dataptr. The length of + * the data is passed as the len parameter. The apiflags can be one or more of: + * - TCP_WRITE_FLAG_COPY: indicates whether the new memory should be allocated + * for the data to be copied into. If this flag is not given, no new memory + * should be allocated and the data should only be referenced by pointer. This + * also means that the memory behind dataptr must not change until the data is + * ACKed by the remote host + * - TCP_WRITE_FLAG_MORE: indicates that more data follows. If this is omitted, + * the PSH flag is set in the last segment created by this call to tcp_write. + * If this flag is given, the PSH flag is not set. + * + * The tcp_write() function will fail and return ERR_MEM if the length + * of the data exceeds the current send buffer size or if the length of + * the queue of outgoing segment is larger than the upper limit defined + * in lwipopts.h. The number of bytes available in the output queue can + * be retrieved with the tcp_sndbuf() function. + * + * The proper way to use this function is to call the function with at + * most tcp_sndbuf() bytes of data. If the function returns ERR_MEM, + * the application should wait until some of the currently enqueued + * data has been successfully received by the other host and try again. + * + * @param pcb Protocol control block for the TCP connection to enqueue data for. + * @param arg Pointer to the data to be enqueued for sending. + * @param len Data length in bytes + * @param apiflags combination of following flags : + * - TCP_WRITE_FLAG_COPY (0x01) data will be copied into memory belonging to the stack + * - TCP_WRITE_FLAG_MORE (0x02) for TCP connection, PSH flag will not be set on last segment sent, + * @return ERR_OK if enqueued, another err_t on error + */ +err_t +tcp_write(struct tcp_pcb *pcb, const void *arg, u16_t len, u8_t apiflags) +{ + struct pbuf *concat_p = NULL; + struct tcp_seg *last_unsent = NULL, *seg = NULL, *prev_seg = NULL, *queue = NULL; + u16_t pos = 0; /* position in 'arg' data */ + u16_t queuelen; + u8_t optlen; + u8_t optflags = 0; +#if TCP_OVERSIZE + u16_t oversize = 0; + u16_t oversize_used = 0; +#if TCP_OVERSIZE_DBGCHECK + u16_t oversize_add = 0; +#endif /* TCP_OVERSIZE_DBGCHECK*/ +#endif /* TCP_OVERSIZE */ + u16_t extendlen = 0; +#if TCP_CHECKSUM_ON_COPY + u16_t concat_chksum = 0; + u8_t concat_chksum_swapped = 0; + u16_t concat_chksummed = 0; +#endif /* TCP_CHECKSUM_ON_COPY */ + err_t err; + u16_t mss_local; + + LWIP_ERROR("tcp_write: invalid pcb", pcb != NULL, return ERR_ARG); + + /* don't allocate segments bigger than half the maximum window we ever received */ + mss_local = LWIP_MIN(pcb->mss, TCPWND_MIN16(pcb->snd_wnd_max / 2)); + mss_local = mss_local ? mss_local : pcb->mss; + + LWIP_ASSERT_CORE_LOCKED(); + +#if LWIP_NETIF_TX_SINGLE_PBUF + /* Always copy to try to create single pbufs for TX */ + apiflags |= TCP_WRITE_FLAG_COPY; +#endif /* LWIP_NETIF_TX_SINGLE_PBUF */ + + LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_write(pcb=%p, data=%p, len=%"U16_F", apiflags=%"U16_F")\n", + (void *)pcb, arg, len, (u16_t)apiflags)); + LWIP_ERROR("tcp_write: arg == NULL (programmer violates API)", + arg != NULL, return ERR_ARG;); + + err = tcp_write_checks(pcb, len); + if (err != ERR_OK) { + return err; + } + queuelen = pcb->snd_queuelen; + +#if LWIP_TCP_TIMESTAMPS + if ((pcb->flags & TF_TIMESTAMP)) { + /* Make sure the timestamp option is only included in data segments if we + agreed about it with the remote host. */ + optflags = TF_SEG_OPTS_TS; + optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(TF_SEG_OPTS_TS, pcb); + /* ensure that segments can hold at least one data byte... */ + mss_local = LWIP_MAX(mss_local, LWIP_TCP_OPT_LEN_TS + 1); + } else +#endif /* LWIP_TCP_TIMESTAMPS */ + { + optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(0, pcb); + } + + + /* + * TCP segmentation is done in three phases with increasing complexity: + * + * 1. Copy data directly into an oversized pbuf. + * 2. Chain a new pbuf to the end of pcb->unsent. + * 3. Create new segments. + * + * We may run out of memory at any point. In that case we must + * return ERR_MEM and not change anything in pcb. Therefore, all + * changes are recorded in local variables and committed at the end + * of the function. Some pcb fields are maintained in local copies: + * + * queuelen = pcb->snd_queuelen + * oversize = pcb->unsent_oversize + * + * These variables are set consistently by the phases: + * + * seg points to the last segment tampered with. + * + * pos records progress as data is segmented. + */ + + /* Find the tail of the unsent queue. */ + if (pcb->unsent != NULL) { + u16_t space; + u16_t unsent_optlen; + + /* @todo: this could be sped up by keeping last_unsent in the pcb */ + for (last_unsent = pcb->unsent; last_unsent->next != NULL; + last_unsent = last_unsent->next); + + /* Usable space at the end of the last unsent segment */ + unsent_optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(last_unsent->flags, pcb); + LWIP_ASSERT("mss_local is too small", mss_local >= last_unsent->len + unsent_optlen); + space = mss_local - (last_unsent->len + unsent_optlen); + + /* + * Phase 1: Copy data directly into an oversized pbuf. + * + * The number of bytes copied is recorded in the oversize_used + * variable. The actual copying is done at the bottom of the + * function. + */ +#if TCP_OVERSIZE +#if TCP_OVERSIZE_DBGCHECK + /* check that pcb->unsent_oversize matches last_unsent->oversize_left */ + LWIP_ASSERT("unsent_oversize mismatch (pcb vs. last_unsent)", + pcb->unsent_oversize == last_unsent->oversize_left); +#endif /* TCP_OVERSIZE_DBGCHECK */ + oversize = pcb->unsent_oversize; + if (oversize > 0) { + LWIP_ASSERT("inconsistent oversize vs. space", oversize <= space); + seg = last_unsent; + oversize_used = LWIP_MIN(space, LWIP_MIN(oversize, len)); + pos += oversize_used; + oversize -= oversize_used; + space -= oversize_used; + } + /* now we are either finished or oversize is zero */ + LWIP_ASSERT("inconsistent oversize vs. len", (oversize == 0) || (pos == len)); +#endif /* TCP_OVERSIZE */ + +#if !LWIP_NETIF_TX_SINGLE_PBUF + /* + * Phase 2: Chain a new pbuf to the end of pcb->unsent. + * + * As an exception when NOT copying the data, if the given data buffer + * directly follows the last unsent data buffer in memory, extend the last + * ROM pbuf reference to the buffer, thus saving a ROM pbuf allocation. + * + * We don't extend segments containing SYN/FIN flags or options + * (len==0). The new pbuf is kept in concat_p and pbuf_cat'ed at + * the end. + * + * This phase is skipped for LWIP_NETIF_TX_SINGLE_PBUF as we could only execute + * it after rexmit puts a segment from unacked to unsent and at this point, + * oversize info is lost. + */ + if ((pos < len) && (space > 0) && (last_unsent->len > 0)) { + u16_t seglen = LWIP_MIN(space, len - pos); + seg = last_unsent; + + /* Create a pbuf with a copy or reference to seglen bytes. We + * can use PBUF_RAW here since the data appears in the middle of + * a segment. A header will never be prepended. */ + if (apiflags & TCP_WRITE_FLAG_COPY) { + /* Data is copied */ + if ((concat_p = tcp_pbuf_prealloc(PBUF_RAW, seglen, space, &oversize, pcb, apiflags, 1)) == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("tcp_write : could not allocate memory for pbuf copy size %"U16_F"\n", + seglen)); + goto memerr; + } +#if TCP_OVERSIZE_DBGCHECK + oversize_add = oversize; +#endif /* TCP_OVERSIZE_DBGCHECK */ + TCP_DATA_COPY2(concat_p->payload, (const u8_t *)arg + pos, seglen, &concat_chksum, &concat_chksum_swapped); +#if TCP_CHECKSUM_ON_COPY + concat_chksummed += seglen; +#endif /* TCP_CHECKSUM_ON_COPY */ + queuelen += pbuf_clen(concat_p); + } else { + /* Data is not copied */ + /* If the last unsent pbuf is of type PBUF_ROM, try to extend it. */ + struct pbuf *p; + for (p = last_unsent->p; p->next != NULL; p = p->next); + if (((p->type_internal & (PBUF_TYPE_FLAG_STRUCT_DATA_CONTIGUOUS | PBUF_TYPE_FLAG_DATA_VOLATILE)) == 0) && + (const u8_t *)p->payload + p->len == (const u8_t *)arg) { + LWIP_ASSERT("tcp_write: ROM pbufs cannot be oversized", pos == 0); + extendlen = seglen; + } else { + if ((concat_p = pbuf_alloc(PBUF_RAW, seglen, PBUF_ROM)) == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("tcp_write: could not allocate memory for zero-copy pbuf\n")); + goto memerr; + } + /* reference the non-volatile payload data */ + ((struct pbuf_rom *)concat_p)->payload = (const u8_t *)arg + pos; + queuelen += pbuf_clen(concat_p); + } +#if TCP_CHECKSUM_ON_COPY + /* calculate the checksum of nocopy-data */ + tcp_seg_add_chksum(~inet_chksum((const u8_t *)arg + pos, seglen), seglen, + &concat_chksum, &concat_chksum_swapped); + concat_chksummed += seglen; +#endif /* TCP_CHECKSUM_ON_COPY */ + } + + pos += seglen; + } +#endif /* !LWIP_NETIF_TX_SINGLE_PBUF */ + } else { +#if TCP_OVERSIZE + LWIP_ASSERT("unsent_oversize mismatch (pcb->unsent is NULL)", + pcb->unsent_oversize == 0); +#endif /* TCP_OVERSIZE */ + } + + /* + * Phase 3: Create new segments. + * + * The new segments are chained together in the local 'queue' + * variable, ready to be appended to pcb->unsent. + */ + while (pos < len) { + struct pbuf *p; + u16_t left = len - pos; + u16_t max_len = mss_local - optlen; + u16_t seglen = LWIP_MIN(left, max_len); +#if TCP_CHECKSUM_ON_COPY + u16_t chksum = 0; + u8_t chksum_swapped = 0; +#endif /* TCP_CHECKSUM_ON_COPY */ + + if (apiflags & TCP_WRITE_FLAG_COPY) { + /* If copy is set, memory should be allocated and data copied + * into pbuf */ + if ((p = tcp_pbuf_prealloc(PBUF_TRANSPORT, seglen + optlen, mss_local, &oversize, pcb, apiflags, queue == NULL)) == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_write : could not allocate memory for pbuf copy size %"U16_F"\n", seglen)); + goto memerr; + } + LWIP_ASSERT("tcp_write: check that first pbuf can hold the complete seglen", + (p->len >= seglen)); + TCP_DATA_COPY2((char *)p->payload + optlen, (const u8_t *)arg + pos, seglen, &chksum, &chksum_swapped); + } else { + /* Copy is not set: First allocate a pbuf for holding the data. + * Since the referenced data is available at least until it is + * sent out on the link (as it has to be ACKed by the remote + * party) we can safely use PBUF_ROM instead of PBUF_REF here. + */ + struct pbuf *p2; +#if TCP_OVERSIZE + LWIP_ASSERT("oversize == 0", oversize == 0); +#endif /* TCP_OVERSIZE */ + if ((p2 = pbuf_alloc(PBUF_TRANSPORT, seglen, PBUF_ROM)) == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_write: could not allocate memory for zero-copy pbuf\n")); + goto memerr; + } +#if TCP_CHECKSUM_ON_COPY + /* calculate the checksum of nocopy-data */ + chksum = ~inet_chksum((const u8_t *)arg + pos, seglen); + if (seglen & 1) { + chksum_swapped = 1; + chksum = SWAP_BYTES_IN_WORD(chksum); + } +#endif /* TCP_CHECKSUM_ON_COPY */ + /* reference the non-volatile payload data */ + ((struct pbuf_rom *)p2)->payload = (const u8_t *)arg + pos; + + /* Second, allocate a pbuf for the headers. */ + if ((p = pbuf_alloc(PBUF_TRANSPORT, optlen, PBUF_RAM)) == NULL) { + /* If allocation fails, we have to deallocate the data pbuf as + * well. */ + pbuf_free(p2); + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_write: could not allocate memory for header pbuf\n")); + goto memerr; + } + /* Concatenate the headers and data pbufs together. */ + pbuf_cat(p/*header*/, p2/*data*/); + } + + queuelen += pbuf_clen(p); + + /* Now that there are more segments queued, we check again if the + * length of the queue exceeds the configured maximum or + * overflows. */ + if (queuelen > LWIP_MIN(TCP_SND_QUEUELEN, TCP_SNDQUEUELEN_OVERFLOW)) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_write: queue too long %"U16_F" (%d)\n", + queuelen, (int)TCP_SND_QUEUELEN)); + pbuf_free(p); + goto memerr; + } + + if ((seg = tcp_create_segment(pcb, p, 0, pcb->snd_lbb + pos, optflags)) == NULL) { + goto memerr; + } +#if TCP_OVERSIZE_DBGCHECK + seg->oversize_left = oversize; +#endif /* TCP_OVERSIZE_DBGCHECK */ +#if TCP_CHECKSUM_ON_COPY + seg->chksum = chksum; + seg->chksum_swapped = chksum_swapped; + seg->flags |= TF_SEG_DATA_CHECKSUMMED; +#endif /* TCP_CHECKSUM_ON_COPY */ + + /* first segment of to-be-queued data? */ + if (queue == NULL) { + queue = seg; + } else { + /* Attach the segment to the end of the queued segments */ + LWIP_ASSERT("prev_seg != NULL", prev_seg != NULL); + prev_seg->next = seg; + } + /* remember last segment of to-be-queued data for next iteration */ + prev_seg = seg; + + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_TRACE, ("tcp_write: queueing %"U32_F":%"U32_F"\n", + lwip_ntohl(seg->tcphdr->seqno), + lwip_ntohl(seg->tcphdr->seqno) + TCP_TCPLEN(seg))); + + pos += seglen; + } + + /* + * All three segmentation phases were successful. We can commit the + * transaction. + */ +#if TCP_OVERSIZE_DBGCHECK + if ((last_unsent != NULL) && (oversize_add != 0)) { + last_unsent->oversize_left += oversize_add; + } +#endif /* TCP_OVERSIZE_DBGCHECK */ + + /* + * Phase 1: If data has been added to the preallocated tail of + * last_unsent, we update the length fields of the pbuf chain. + */ +#if TCP_OVERSIZE + if (oversize_used > 0) { + struct pbuf *p; + /* Bump tot_len of whole chain, len of tail */ + for (p = last_unsent->p; p; p = p->next) { + p->tot_len += oversize_used; + if (p->next == NULL) { + TCP_DATA_COPY((char *)p->payload + p->len, arg, oversize_used, last_unsent); + p->len += oversize_used; + } + } + last_unsent->len += oversize_used; +#if TCP_OVERSIZE_DBGCHECK + LWIP_ASSERT("last_unsent->oversize_left >= oversize_used", + last_unsent->oversize_left >= oversize_used); + last_unsent->oversize_left -= oversize_used; +#endif /* TCP_OVERSIZE_DBGCHECK */ + } + pcb->unsent_oversize = oversize; +#endif /* TCP_OVERSIZE */ + + /* + * Phase 2: concat_p can be concatenated onto last_unsent->p, unless we + * determined that the last ROM pbuf can be extended to include the new data. + */ + if (concat_p != NULL) { + LWIP_ASSERT("tcp_write: cannot concatenate when pcb->unsent is empty", + (last_unsent != NULL)); + pbuf_cat(last_unsent->p, concat_p); + last_unsent->len += concat_p->tot_len; + } else if (extendlen > 0) { + struct pbuf *p; + LWIP_ASSERT("tcp_write: extension of reference requires reference", + last_unsent != NULL && last_unsent->p != NULL); + for (p = last_unsent->p; p->next != NULL; p = p->next) { + p->tot_len += extendlen; + } + p->tot_len += extendlen; + p->len += extendlen; + last_unsent->len += extendlen; + } + +#if TCP_CHECKSUM_ON_COPY + if (concat_chksummed) { + LWIP_ASSERT("tcp_write: concat checksum needs concatenated data", + concat_p != NULL || extendlen > 0); + /*if concat checksumm swapped - swap it back */ + if (concat_chksum_swapped) { + concat_chksum = SWAP_BYTES_IN_WORD(concat_chksum); + } + tcp_seg_add_chksum(concat_chksum, concat_chksummed, &last_unsent->chksum, + &last_unsent->chksum_swapped); + last_unsent->flags |= TF_SEG_DATA_CHECKSUMMED; + } +#endif /* TCP_CHECKSUM_ON_COPY */ + + /* + * Phase 3: Append queue to pcb->unsent. Queue may be NULL, but that + * is harmless + */ + if (last_unsent == NULL) { + pcb->unsent = queue; + } else { + last_unsent->next = queue; + } + + /* + * Finally update the pcb state. + */ + pcb->snd_lbb += len; + pcb->snd_buf -= len; + pcb->snd_queuelen = queuelen; + + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_write: %"S16_F" (after enqueued)\n", + pcb->snd_queuelen)); + if (pcb->snd_queuelen != 0) { + LWIP_ASSERT("tcp_write: valid queue length", + pcb->unacked != NULL || pcb->unsent != NULL); + } + + /* Set the PSH flag in the last segment that we enqueued. */ + if (seg != NULL && seg->tcphdr != NULL && ((apiflags & TCP_WRITE_FLAG_MORE) == 0)) { + TCPH_SET_FLAG(seg->tcphdr, TCP_PSH); + } + + return ERR_OK; +memerr: + tcp_set_flags(pcb, TF_NAGLEMEMERR); + TCP_STATS_INC(tcp.memerr); + + if (concat_p != NULL) { + pbuf_free(concat_p); + } + if (queue != NULL) { + tcp_segs_free(queue); + } + if (pcb->snd_queuelen != 0) { + LWIP_ASSERT("tcp_write: valid queue length", pcb->unacked != NULL || + pcb->unsent != NULL); + } + LWIP_DEBUGF(TCP_QLEN_DEBUG | LWIP_DBG_STATE, ("tcp_write: %"S16_F" (with mem err)\n", pcb->snd_queuelen)); + return ERR_MEM; +} + +/** + * Split segment on the head of the unsent queue. If return is not + * ERR_OK, existing head remains intact + * + * The split is accomplished by creating a new TCP segment and pbuf + * which holds the remainder payload after the split. The original + * pbuf is trimmed to new length. This allows splitting of read-only + * pbufs + * + * @param pcb the tcp_pcb for which to split the unsent head + * @param split the amount of payload to remain in the head + */ +err_t +tcp_split_unsent_seg(struct tcp_pcb *pcb, u16_t split) +{ + struct tcp_seg *seg = NULL, *useg = NULL; + struct pbuf *p = NULL; + u8_t optlen; + u8_t optflags; + u8_t split_flags; + u8_t remainder_flags; + u16_t remainder; + u16_t offset; +#if TCP_CHECKSUM_ON_COPY + u16_t chksum = 0; + u8_t chksum_swapped = 0; + struct pbuf *q; +#endif /* TCP_CHECKSUM_ON_COPY */ + + LWIP_ASSERT("tcp_split_unsent_seg: invalid pcb", pcb != NULL); + + useg = pcb->unsent; + if (useg == NULL) { + return ERR_MEM; + } + + if (split == 0) { + LWIP_ASSERT("Can't split segment into length 0", 0); + return ERR_VAL; + } + + if (useg->len <= split) { + return ERR_OK; + } + + LWIP_ASSERT("split <= mss", split <= pcb->mss); + LWIP_ASSERT("useg->len > 0", useg->len > 0); + + /* We should check that we don't exceed TCP_SND_QUEUELEN but we need + * to split this packet so we may actually exceed the max value by + * one! + */ + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_enqueue: split_unsent_seg: %u\n", (unsigned int)pcb->snd_queuelen)); + + optflags = useg->flags; +#if TCP_CHECKSUM_ON_COPY + /* Remove since checksum is not stored until after tcp_create_segment() */ + optflags &= ~TF_SEG_DATA_CHECKSUMMED; +#endif /* TCP_CHECKSUM_ON_COPY */ + optlen = LWIP_TCP_OPT_LENGTH(optflags); + remainder = useg->len - split; + + /* Create new pbuf for the remainder of the split */ + p = pbuf_alloc(PBUF_TRANSPORT, remainder + optlen, PBUF_RAM); + if (p == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("tcp_split_unsent_seg: could not allocate memory for pbuf remainder %u\n", remainder)); + goto memerr; + } + + /* Offset into the original pbuf is past TCP/IP headers, options, and split amount */ + offset = useg->p->tot_len - useg->len + split; + /* Copy remainder into new pbuf, headers and options will not be filled out */ + if (pbuf_copy_partial(useg->p, (u8_t *)p->payload + optlen, remainder, offset ) != remainder) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("tcp_split_unsent_seg: could not copy pbuf remainder %u\n", remainder)); + goto memerr; + } +#if TCP_CHECKSUM_ON_COPY + /* calculate the checksum on remainder data */ + tcp_seg_add_chksum(~inet_chksum((const u8_t *)p->payload + optlen, remainder), remainder, + &chksum, &chksum_swapped); +#endif /* TCP_CHECKSUM_ON_COPY */ + + /* Options are created when calling tcp_output() */ + + /* Migrate flags from original segment */ + split_flags = TCPH_FLAGS(useg->tcphdr); + remainder_flags = 0; /* ACK added in tcp_output() */ + + if (split_flags & TCP_PSH) { + split_flags &= ~TCP_PSH; + remainder_flags |= TCP_PSH; + } + if (split_flags & TCP_FIN) { + split_flags &= ~TCP_FIN; + remainder_flags |= TCP_FIN; + } + /* SYN should be left on split, RST should not be present with data */ + + seg = tcp_create_segment(pcb, p, remainder_flags, lwip_ntohl(useg->tcphdr->seqno) + split, optflags); + if (seg == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("tcp_split_unsent_seg: could not create new TCP segment\n")); + goto memerr; + } + +#if TCP_CHECKSUM_ON_COPY + seg->chksum = chksum; + seg->chksum_swapped = chksum_swapped; + seg->flags |= TF_SEG_DATA_CHECKSUMMED; +#endif /* TCP_CHECKSUM_ON_COPY */ + + /* Remove this segment from the queue since trimming it may free pbufs */ + pcb->snd_queuelen -= pbuf_clen(useg->p); + + /* Trim the original pbuf into our split size. At this point our remainder segment must be setup + successfully because we are modifying the original segment */ + pbuf_realloc(useg->p, useg->p->tot_len - remainder); + useg->len -= remainder; + TCPH_SET_FLAG(useg->tcphdr, split_flags); +#if TCP_OVERSIZE_DBGCHECK + /* By trimming, realloc may have actually shrunk the pbuf, so clear oversize_left */ + useg->oversize_left = 0; +#endif /* TCP_OVERSIZE_DBGCHECK */ + + /* Add back to the queue with new trimmed pbuf */ + pcb->snd_queuelen += pbuf_clen(useg->p); + +#if TCP_CHECKSUM_ON_COPY + /* The checksum on the split segment is now incorrect. We need to re-run it over the split */ + useg->chksum = 0; + useg->chksum_swapped = 0; + q = useg->p; + offset = q->tot_len - useg->len; /* Offset due to exposed headers */ + + /* Advance to the pbuf where the offset ends */ + while (q != NULL && offset > q->len) { + offset -= q->len; + q = q->next; + } + LWIP_ASSERT("Found start of payload pbuf", q != NULL); + /* Checksum the first payload pbuf accounting for offset, then other pbufs are all payload */ + for (; q != NULL; offset = 0, q = q->next) { + tcp_seg_add_chksum(~inet_chksum((const u8_t *)q->payload + offset, q->len - offset), q->len - offset, + &useg->chksum, &useg->chksum_swapped); + } +#endif /* TCP_CHECKSUM_ON_COPY */ + + /* Update number of segments on the queues. Note that length now may + * exceed TCP_SND_QUEUELEN! We don't have to touch pcb->snd_buf + * because the total amount of data is constant when packet is split */ + pcb->snd_queuelen += pbuf_clen(seg->p); + + /* Finally insert remainder into queue after split (which stays head) */ + seg->next = useg->next; + useg->next = seg; + +#if TCP_OVERSIZE + /* If remainder is last segment on the unsent, ensure we clear the oversize amount + * because the remainder is always sized to the exact remaining amount */ + if (seg->next == NULL) { + pcb->unsent_oversize = 0; + } +#endif /* TCP_OVERSIZE */ + + return ERR_OK; +memerr: + TCP_STATS_INC(tcp.memerr); + + LWIP_ASSERT("seg == NULL", seg == NULL); + if (p != NULL) { + pbuf_free(p); + } + + return ERR_MEM; +} + +/** + * Called by tcp_close() to send a segment including FIN flag but not data. + * This FIN may be added to an existing segment or a new, otherwise empty + * segment is enqueued. + * + * @param pcb the tcp_pcb over which to send a segment + * @return ERR_OK if sent, another err_t otherwise + */ +err_t +tcp_send_fin(struct tcp_pcb *pcb) +{ + LWIP_ASSERT("tcp_send_fin: invalid pcb", pcb != NULL); + + /* first, try to add the fin to the last unsent segment */ + if (pcb->unsent != NULL) { + struct tcp_seg *last_unsent; + for (last_unsent = pcb->unsent; last_unsent->next != NULL; + last_unsent = last_unsent->next); + + if ((TCPH_FLAGS(last_unsent->tcphdr) & (TCP_SYN | TCP_FIN | TCP_RST)) == 0) { + /* no SYN/FIN/RST flag in the header, we can add the FIN flag */ + TCPH_SET_FLAG(last_unsent->tcphdr, TCP_FIN); + tcp_set_flags(pcb, TF_FIN); + return ERR_OK; + } + } + /* no data, no length, flags, copy=1, no optdata */ + return tcp_enqueue_flags(pcb, TCP_FIN); +} + +/** + * Enqueue SYN or FIN for transmission. + * + * Called by @ref tcp_connect, tcp_listen_input, and @ref tcp_close + * (via @ref tcp_send_fin) + * + * @param pcb Protocol control block for the TCP connection. + * @param flags TCP header flags to set in the outgoing segment. + */ +err_t +tcp_enqueue_flags(struct tcp_pcb *pcb, u8_t flags) +{ + struct pbuf *p; + struct tcp_seg *seg; + u8_t optflags = 0; + u8_t optlen = 0; + + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_enqueue_flags: queuelen: %"U16_F"\n", (u16_t)pcb->snd_queuelen)); + + LWIP_ASSERT("tcp_enqueue_flags: need either TCP_SYN or TCP_FIN in flags (programmer violates API)", + (flags & (TCP_SYN | TCP_FIN)) != 0); + LWIP_ASSERT("tcp_enqueue_flags: invalid pcb", pcb != NULL); + + /* No need to check pcb->snd_queuelen if only SYN or FIN are allowed! */ + + /* Get options for this segment. This is a special case since this is the + only place where a SYN can be sent. */ + if (flags & TCP_SYN) { + optflags = TF_SEG_OPTS_MSS; +#if LWIP_WND_SCALE + if ((pcb->state != SYN_RCVD) || (pcb->flags & TF_WND_SCALE)) { + /* In a (sent in state SYN_RCVD), the window scale option may only + be sent if we received a window scale option from the remote host. */ + optflags |= TF_SEG_OPTS_WND_SCALE; + } +#endif /* LWIP_WND_SCALE */ +#if LWIP_TCP_SACK_OUT + if ((pcb->state != SYN_RCVD) || (pcb->flags & TF_SACK)) { + /* In a (sent in state SYN_RCVD), the SACK_PERM option may only + be sent if we received a SACK_PERM option from the remote host. */ + optflags |= TF_SEG_OPTS_SACK_PERM; + } +#endif /* LWIP_TCP_SACK_OUT */ + } +#if LWIP_TCP_TIMESTAMPS + if ((pcb->flags & TF_TIMESTAMP) || ((flags & TCP_SYN) && (pcb->state != SYN_RCVD))) { + /* Make sure the timestamp option is only included in data segments if we + agreed about it with the remote host (and in active open SYN segments). */ + optflags |= TF_SEG_OPTS_TS; + } +#endif /* LWIP_TCP_TIMESTAMPS */ + optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(optflags, pcb); + + /* Allocate pbuf with room for TCP header + options */ + if ((p = pbuf_alloc(PBUF_TRANSPORT, optlen, PBUF_RAM)) == NULL) { + tcp_set_flags(pcb, TF_NAGLEMEMERR); + TCP_STATS_INC(tcp.memerr); + return ERR_MEM; + } + LWIP_ASSERT("tcp_enqueue_flags: check that first pbuf can hold optlen", + (p->len >= optlen)); + + /* Allocate memory for tcp_seg, and fill in fields. */ + if ((seg = tcp_create_segment(pcb, p, flags, pcb->snd_lbb, optflags)) == NULL) { + tcp_set_flags(pcb, TF_NAGLEMEMERR); + TCP_STATS_INC(tcp.memerr); + return ERR_MEM; + } + LWIP_ASSERT("seg->tcphdr not aligned", ((mem_ptr_t)seg->tcphdr % LWIP_MIN(MEM_ALIGNMENT, 4)) == 0); + LWIP_ASSERT("tcp_enqueue_flags: invalid segment length", seg->len == 0); + + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_TRACE, + ("tcp_enqueue_flags: queueing %"U32_F":%"U32_F" (0x%"X16_F")\n", + lwip_ntohl(seg->tcphdr->seqno), + lwip_ntohl(seg->tcphdr->seqno) + TCP_TCPLEN(seg), + (u16_t)flags)); + + /* Now append seg to pcb->unsent queue */ + if (pcb->unsent == NULL) { + pcb->unsent = seg; + } else { + struct tcp_seg *useg; + for (useg = pcb->unsent; useg->next != NULL; useg = useg->next); + useg->next = seg; + } +#if TCP_OVERSIZE + /* The new unsent tail has no space */ + pcb->unsent_oversize = 0; +#endif /* TCP_OVERSIZE */ + + /* SYN and FIN bump the sequence number */ + if ((flags & TCP_SYN) || (flags & TCP_FIN)) { + pcb->snd_lbb++; + /* optlen does not influence snd_buf */ + } + if (flags & TCP_FIN) { + tcp_set_flags(pcb, TF_FIN); + } + + /* update number of segments on the queues */ + pcb->snd_queuelen += pbuf_clen(seg->p); + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_enqueue_flags: %"S16_F" (after enqueued)\n", pcb->snd_queuelen)); + if (pcb->snd_queuelen != 0) { + LWIP_ASSERT("tcp_enqueue_flags: invalid queue length", + pcb->unacked != NULL || pcb->unsent != NULL); + } + + return ERR_OK; +} + +#if LWIP_TCP_TIMESTAMPS +/* Build a timestamp option (12 bytes long) at the specified options pointer) + * + * @param pcb tcp_pcb + * @param opts option pointer where to store the timestamp option + */ +static void +tcp_build_timestamp_option(const struct tcp_pcb *pcb, u32_t *opts) +{ + LWIP_ASSERT("tcp_build_timestamp_option: invalid pcb", pcb != NULL); + + /* Pad with two NOP options to make everything nicely aligned */ + opts[0] = PP_HTONL(0x0101080A); + opts[1] = lwip_htonl(sys_now()); + opts[2] = lwip_htonl(pcb->ts_recent); +} +#endif + +#if LWIP_TCP_SACK_OUT +/** + * Calculates the number of SACK entries that should be generated. + * It takes into account whether TF_SACK flag is set, + * the number of SACK entries in tcp_pcb that are valid, + * as well as the available options size. + * + * @param pcb tcp_pcb + * @param optlen the length of other TCP options (in bytes) + * @return the number of SACK ranges that can be used + */ +static u8_t +tcp_get_num_sacks(const struct tcp_pcb *pcb, u8_t optlen) +{ + u8_t num_sacks = 0; + + LWIP_ASSERT("tcp_get_num_sacks: invalid pcb", pcb != NULL); + + if (pcb->flags & TF_SACK) { + u8_t i; + + /* The first SACK takes up 12 bytes (it includes SACK header and two NOP options), + each additional one - 8 bytes. */ + optlen += 12; + + /* Max options size = 40, number of SACK array entries = LWIP_TCP_MAX_SACK_NUM */ + for (i = 0; (i < LWIP_TCP_MAX_SACK_NUM) && (optlen <= TCP_MAX_OPTION_BYTES) && + LWIP_TCP_SACK_VALID(pcb, i); ++i) { + ++num_sacks; + optlen += 8; + } + } + + return num_sacks; +} + +/** Build a SACK option (12 or more bytes long) at the specified options pointer) + * + * @param pcb tcp_pcb + * @param opts option pointer where to store the SACK option + * @param num_sacks the number of SACKs to store + */ +static void +tcp_build_sack_option(const struct tcp_pcb *pcb, u32_t *opts, u8_t num_sacks) +{ + u8_t i; + + LWIP_ASSERT("tcp_build_sack_option: invalid pcb", pcb != NULL); + LWIP_ASSERT("tcp_build_sack_option: invalid opts", opts != NULL); + + /* Pad with two NOP options to make everything nicely aligned. + We add the length (of just the SACK option, not the NOPs in front of it), + which is 2B of header, plus 8B for each SACK. */ + *(opts++) = PP_HTONL(0x01010500 + 2 + num_sacks * 8); + + for (i = 0; i < num_sacks; ++i) { + *(opts++) = lwip_htonl(pcb->rcv_sacks[i].left); + *(opts++) = lwip_htonl(pcb->rcv_sacks[i].right); + } +} + +#endif + +#if LWIP_WND_SCALE +/** Build a window scale option (3 bytes long) at the specified options pointer) + * + * @param opts option pointer where to store the window scale option + */ +static void +tcp_build_wnd_scale_option(u32_t *opts) +{ + LWIP_ASSERT("tcp_build_wnd_scale_option: invalid opts", opts != NULL); + + /* Pad with one NOP option to make everything nicely aligned */ + opts[0] = PP_HTONL(0x01030300 | TCP_RCV_SCALE); +} +#endif + +/** + * @ingroup tcp_raw + * Find out what we can send and send it + * + * @param pcb Protocol control block for the TCP connection to send data + * @return ERR_OK if data has been sent or nothing to send + * another err_t on error + */ +err_t +tcp_output(struct tcp_pcb *pcb) +{ + struct tcp_seg *seg, *useg; + u32_t wnd, snd_nxt; + err_t err; + struct netif *netif; +#if TCP_CWND_DEBUG + s16_t i = 0; +#endif /* TCP_CWND_DEBUG */ + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ASSERT("tcp_output: invalid pcb", pcb != NULL); + /* pcb->state LISTEN not allowed here */ + LWIP_ASSERT("don't call tcp_output for listen-pcbs", + pcb->state != LISTEN); + + /* First, check if we are invoked by the TCP input processing + code. If so, we do not output anything. Instead, we rely on the + input processing code to call us when input processing is done + with. */ + if (tcp_input_pcb == pcb) { + return ERR_OK; + } + + wnd = LWIP_MIN(pcb->snd_wnd, pcb->cwnd); + + seg = pcb->unsent; + + if (seg == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_output: nothing to send (%p)\n", + (void *)pcb->unsent)); + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_output: snd_wnd %"TCPWNDSIZE_F + ", cwnd %"TCPWNDSIZE_F", wnd %"U32_F + ", seg == NULL, ack %"U32_F"\n", + pcb->snd_wnd, pcb->cwnd, wnd, pcb->lastack)); + + /* If the TF_ACK_NOW flag is set and the ->unsent queue is empty, construct + * an empty ACK segment and send it. */ + if (pcb->flags & TF_ACK_NOW) { + return tcp_send_empty_ack(pcb); + } + /* nothing to send: shortcut out of here */ + goto output_done; + } else { + LWIP_DEBUGF(TCP_CWND_DEBUG, + ("tcp_output: snd_wnd %"TCPWNDSIZE_F", cwnd %"TCPWNDSIZE_F", wnd %"U32_F + ", effwnd %"U32_F", seq %"U32_F", ack %"U32_F"\n", + pcb->snd_wnd, pcb->cwnd, wnd, + lwip_ntohl(seg->tcphdr->seqno) - pcb->lastack + seg->len, + lwip_ntohl(seg->tcphdr->seqno), pcb->lastack)); + } + + netif = tcp_route(pcb, &pcb->local_ip, &pcb->remote_ip); + if (netif == NULL) { + return ERR_RTE; + } + + /* If we don't have a local IP address, we get one from netif */ + if (ip_addr_isany(&pcb->local_ip)) { + const ip_addr_t *local_ip = ip_netif_get_local_ip(netif, &pcb->remote_ip); + if (local_ip == NULL) { + return ERR_RTE; + } + ip_addr_copy(pcb->local_ip, *local_ip); + } + + /* Handle the current segment not fitting within the window */ + if (lwip_ntohl(seg->tcphdr->seqno) - pcb->lastack + seg->len > wnd) { + /* We need to start the persistent timer when the next unsent segment does not fit + * within the remaining (could be 0) send window and RTO timer is not running (we + * have no in-flight data). If window is still too small after persist timer fires, + * then we split the segment. We don't consider the congestion window since a cwnd + * smaller than 1 SMSS implies in-flight data + */ + if (wnd == pcb->snd_wnd && pcb->unacked == NULL && pcb->persist_backoff == 0) { + pcb->persist_cnt = 0; + pcb->persist_backoff = 1; + pcb->persist_probe = 0; + } + /* We need an ACK, but can't send data now, so send an empty ACK */ + if (pcb->flags & TF_ACK_NOW) { + return tcp_send_empty_ack(pcb); + } + goto output_done; + } + /* Stop persist timer, above conditions are not active */ + pcb->persist_backoff = 0; + + /* useg should point to last segment on unacked queue */ + useg = pcb->unacked; + if (useg != NULL) { + for (; useg->next != NULL; useg = useg->next); + } + /* data available and window allows it to be sent? */ + while (seg != NULL && + lwip_ntohl(seg->tcphdr->seqno) - pcb->lastack + seg->len <= wnd) { + LWIP_ASSERT("RST not expected here!", + (TCPH_FLAGS(seg->tcphdr) & TCP_RST) == 0); + /* Stop sending if the nagle algorithm would prevent it + * Don't stop: + * - if tcp_write had a memory error before (prevent delayed ACK timeout) or + * - if FIN was already enqueued for this PCB (SYN is always alone in a segment - + * either seg->next != NULL or pcb->unacked == NULL; + * RST is no sent using tcp_write/tcp_output. + */ + if ((tcp_do_output_nagle(pcb) == 0) && + ((pcb->flags & (TF_NAGLEMEMERR | TF_FIN)) == 0)) { + break; + } +#if TCP_CWND_DEBUG + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_output: snd_wnd %"TCPWNDSIZE_F", cwnd %"TCPWNDSIZE_F", wnd %"U32_F", effwnd %"U32_F", seq %"U32_F", ack %"U32_F", i %"S16_F"\n", + pcb->snd_wnd, pcb->cwnd, wnd, + lwip_ntohl(seg->tcphdr->seqno) + seg->len - + pcb->lastack, + lwip_ntohl(seg->tcphdr->seqno), pcb->lastack, i)); + ++i; +#endif /* TCP_CWND_DEBUG */ + + if (pcb->state != SYN_SENT) { + TCPH_SET_FLAG(seg->tcphdr, TCP_ACK); + } + + err = tcp_output_segment(seg, pcb, netif); + if (err != ERR_OK) { + /* segment could not be sent, for whatever reason */ + tcp_set_flags(pcb, TF_NAGLEMEMERR); + return err; + } +#if TCP_OVERSIZE_DBGCHECK + seg->oversize_left = 0; +#endif /* TCP_OVERSIZE_DBGCHECK */ + pcb->unsent = seg->next; + if (pcb->state != SYN_SENT) { + tcp_clear_flags(pcb, TF_ACK_DELAY | TF_ACK_NOW); + } + snd_nxt = lwip_ntohl(seg->tcphdr->seqno) + TCP_TCPLEN(seg); + if (TCP_SEQ_LT(pcb->snd_nxt, snd_nxt)) { + pcb->snd_nxt = snd_nxt; + } + /* put segment on unacknowledged list if length > 0 */ + if (TCP_TCPLEN(seg) > 0) { + seg->next = NULL; + /* unacked list is empty? */ + if (pcb->unacked == NULL) { + pcb->unacked = seg; + useg = seg; + /* unacked list is not empty? */ + } else { + /* In the case of fast retransmit, the packet should not go to the tail + * of the unacked queue, but rather somewhere before it. We need to check for + * this case. -STJ Jul 27, 2004 */ + if (TCP_SEQ_LT(lwip_ntohl(seg->tcphdr->seqno), lwip_ntohl(useg->tcphdr->seqno))) { + /* add segment to before tail of unacked list, keeping the list sorted */ + struct tcp_seg **cur_seg = &(pcb->unacked); + while (*cur_seg && + TCP_SEQ_LT(lwip_ntohl((*cur_seg)->tcphdr->seqno), lwip_ntohl(seg->tcphdr->seqno))) { + cur_seg = &((*cur_seg)->next ); + } + seg->next = (*cur_seg); + (*cur_seg) = seg; + } else { + /* add segment to tail of unacked list */ + useg->next = seg; + useg = useg->next; + } + } + /* do not queue empty segments on the unacked list */ + } else { + tcp_seg_free(seg); + } + seg = pcb->unsent; + } +#if TCP_OVERSIZE + if (pcb->unsent == NULL) { + /* last unsent has been removed, reset unsent_oversize */ + pcb->unsent_oversize = 0; + } +#endif /* TCP_OVERSIZE */ + +output_done: + tcp_clear_flags(pcb, TF_NAGLEMEMERR); + return ERR_OK; +} + +/** Check if a segment's pbufs are used by someone else than TCP. + * This can happen on retransmission if the pbuf of this segment is still + * referenced by the netif driver due to deferred transmission. + * This is the case (only!) if someone down the TX call path called + * pbuf_ref() on one of the pbufs! + * + * @arg seg the tcp segment to check + * @return 1 if ref != 1, 0 if ref == 1 + */ +static int +tcp_output_segment_busy(const struct tcp_seg *seg) +{ + LWIP_ASSERT("tcp_output_segment_busy: invalid seg", seg != NULL); + + /* We only need to check the first pbuf here: + If a pbuf is queued for transmission, a driver calls pbuf_ref(), + which only changes the ref count of the first pbuf */ + if (seg->p->ref != 1) { + /* other reference found */ + return 1; + } + /* no other references found */ + return 0; +} + +/** + * Called by tcp_output() to actually send a TCP segment over IP. + * + * @param seg the tcp_seg to send + * @param pcb the tcp_pcb for the TCP connection used to send the segment + * @param netif the netif used to send the segment + */ +static err_t +tcp_output_segment(struct tcp_seg *seg, struct tcp_pcb *pcb, struct netif *netif) +{ + err_t err; + u16_t len; + u32_t *opts; +#if TCP_CHECKSUM_ON_COPY + int seg_chksum_was_swapped = 0; +#endif + + LWIP_ASSERT("tcp_output_segment: invalid seg", seg != NULL); + LWIP_ASSERT("tcp_output_segment: invalid pcb", pcb != NULL); + LWIP_ASSERT("tcp_output_segment: invalid netif", netif != NULL); + + if (tcp_output_segment_busy(seg)) { + /* This should not happen: rexmit functions should have checked this. + However, since this function modifies p->len, we must not continue in this case. */ + LWIP_DEBUGF(TCP_RTO_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_output_segment: segment busy\n")); + return ERR_OK; + } + + /* The TCP header has already been constructed, but the ackno and + wnd fields remain. */ + seg->tcphdr->ackno = lwip_htonl(pcb->rcv_nxt); + + /* advertise our receive window size in this TCP segment */ +#if LWIP_WND_SCALE + if (seg->flags & TF_SEG_OPTS_WND_SCALE) { + /* The Window field in a SYN segment itself (the only type where we send + the window scale option) is never scaled. */ + seg->tcphdr->wnd = lwip_htons(TCPWND_MIN16(pcb->rcv_ann_wnd)); + } else +#endif /* LWIP_WND_SCALE */ + { + seg->tcphdr->wnd = lwip_htons(TCPWND_MIN16(RCV_WND_SCALE(pcb, pcb->rcv_ann_wnd))); + } + + pcb->rcv_ann_right_edge = pcb->rcv_nxt + pcb->rcv_ann_wnd; + + /* Add any requested options. NB MSS option is only set on SYN + packets, so ignore it here */ + /* cast through void* to get rid of alignment warnings */ + opts = (u32_t *)(void *)(seg->tcphdr + 1); + if (seg->flags & TF_SEG_OPTS_MSS) { + u16_t mss; +#if TCP_CALCULATE_EFF_SEND_MSS + mss = tcp_eff_send_mss_netif(TCP_MSS, netif, &pcb->remote_ip); +#else /* TCP_CALCULATE_EFF_SEND_MSS */ + mss = TCP_MSS; +#endif /* TCP_CALCULATE_EFF_SEND_MSS */ + *opts = TCP_BUILD_MSS_OPTION(mss); + opts += 1; + } +#if LWIP_TCP_TIMESTAMPS + pcb->ts_lastacksent = pcb->rcv_nxt; + + if (seg->flags & TF_SEG_OPTS_TS) { + tcp_build_timestamp_option(pcb, opts); + opts += 3; + } +#endif +#if LWIP_WND_SCALE + if (seg->flags & TF_SEG_OPTS_WND_SCALE) { + tcp_build_wnd_scale_option(opts); + opts += 1; + } +#endif +#if LWIP_TCP_SACK_OUT + if (seg->flags & TF_SEG_OPTS_SACK_PERM) { + /* Pad with two NOP options to make everything nicely aligned + * NOTE: When we send both timestamp and SACK_PERM options, + * we could use the first two NOPs before the timestamp to store SACK_PERM option, + * but that would complicate the code. + */ + *(opts++) = PP_HTONL(0x01010402); + } +#endif + + /* Set retransmission timer running if it is not currently enabled + This must be set before checking the route. */ + if (pcb->rtime < 0) { + pcb->rtime = 0; + } + + if (pcb->rttest == 0) { + pcb->rttest = tcp_ticks; + pcb->rtseq = lwip_ntohl(seg->tcphdr->seqno); + + LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_output_segment: rtseq %"U32_F"\n", pcb->rtseq)); + } + LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_output_segment: %"U32_F":%"U32_F"\n", + lwip_htonl(seg->tcphdr->seqno), lwip_htonl(seg->tcphdr->seqno) + + seg->len)); + + len = (u16_t)((u8_t *)seg->tcphdr - (u8_t *)seg->p->payload); + if (len == 0) { + /** Exclude retransmitted segments from this count. */ + MIB2_STATS_INC(mib2.tcpoutsegs); + } + + seg->p->len -= len; + seg->p->tot_len -= len; + + seg->p->payload = seg->tcphdr; + + seg->tcphdr->chksum = 0; + +#ifdef LWIP_HOOK_TCP_OUT_ADD_TCPOPTS + opts = LWIP_HOOK_TCP_OUT_ADD_TCPOPTS(seg->p, seg->tcphdr, pcb, opts); +#endif + LWIP_ASSERT("options not filled", (u8_t *)opts == ((u8_t *)(seg->tcphdr + 1)) + LWIP_TCP_OPT_LENGTH_SEGMENT(seg->flags, pcb)); + +#if CHECKSUM_GEN_TCP + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_TCP) { +#if TCP_CHECKSUM_ON_COPY + u32_t acc; +#if TCP_CHECKSUM_ON_COPY_SANITY_CHECK + u16_t chksum_slow = ip_chksum_pseudo(seg->p, IP_PROTO_TCP, + seg->p->tot_len, &pcb->local_ip, &pcb->remote_ip); +#endif /* TCP_CHECKSUM_ON_COPY_SANITY_CHECK */ + if ((seg->flags & TF_SEG_DATA_CHECKSUMMED) == 0) { + LWIP_ASSERT("data included but not checksummed", + seg->p->tot_len == TCPH_HDRLEN_BYTES(seg->tcphdr)); + } + + /* rebuild TCP header checksum (TCP header changes for retransmissions!) */ + acc = ip_chksum_pseudo_partial(seg->p, IP_PROTO_TCP, + seg->p->tot_len, TCPH_HDRLEN_BYTES(seg->tcphdr), &pcb->local_ip, &pcb->remote_ip); + /* add payload checksum */ + if (seg->chksum_swapped) { + seg_chksum_was_swapped = 1; + seg->chksum = SWAP_BYTES_IN_WORD(seg->chksum); + seg->chksum_swapped = 0; + } + acc = (u16_t)~acc + seg->chksum; + seg->tcphdr->chksum = (u16_t)~FOLD_U32T(acc); +#if TCP_CHECKSUM_ON_COPY_SANITY_CHECK + if (chksum_slow != seg->tcphdr->chksum) { + TCP_CHECKSUM_ON_COPY_SANITY_CHECK_FAIL( + ("tcp_output_segment: calculated checksum is %"X16_F" instead of %"X16_F"\n", + seg->tcphdr->chksum, chksum_slow)); + seg->tcphdr->chksum = chksum_slow; + } +#endif /* TCP_CHECKSUM_ON_COPY_SANITY_CHECK */ +#else /* TCP_CHECKSUM_ON_COPY */ + seg->tcphdr->chksum = ip_chksum_pseudo(seg->p, IP_PROTO_TCP, + seg->p->tot_len, &pcb->local_ip, &pcb->remote_ip); +#endif /* TCP_CHECKSUM_ON_COPY */ + } +#endif /* CHECKSUM_GEN_TCP */ + TCP_STATS_INC(tcp.xmit); + + NETIF_SET_HINTS(netif, &(pcb->netif_hints)); + err = ip_output_if(seg->p, &pcb->local_ip, &pcb->remote_ip, pcb->ttl, + pcb->tos, IP_PROTO_TCP, netif); + NETIF_RESET_HINTS(netif); + +#if TCP_CHECKSUM_ON_COPY + if (seg_chksum_was_swapped) { + /* if data is added to this segment later, chksum needs to be swapped, + so restore this now */ + seg->chksum = SWAP_BYTES_IN_WORD(seg->chksum); + seg->chksum_swapped = 1; + } +#endif + + return err; +} + +/** + * Requeue all unacked segments for retransmission + * + * Called by tcp_slowtmr() for slow retransmission. + * + * @param pcb the tcp_pcb for which to re-enqueue all unacked segments + */ +err_t +tcp_rexmit_rto_prepare(struct tcp_pcb *pcb) +{ + struct tcp_seg *seg; + + LWIP_ASSERT("tcp_rexmit_rto_prepare: invalid pcb", pcb != NULL); + + if (pcb->unacked == NULL) { + return ERR_VAL; + } + + /* Move all unacked segments to the head of the unsent queue. + However, give up if any of the unsent pbufs are still referenced by the + netif driver due to deferred transmission. No point loading the link further + if it is struggling to flush its buffered writes. */ + for (seg = pcb->unacked; seg->next != NULL; seg = seg->next) { + if (tcp_output_segment_busy(seg)) { + LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_rexmit_rto: segment busy\n")); + return ERR_VAL; + } + } + if (tcp_output_segment_busy(seg)) { + LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_rexmit_rto: segment busy\n")); + return ERR_VAL; + } + /* concatenate unsent queue after unacked queue */ + seg->next = pcb->unsent; +#if TCP_OVERSIZE_DBGCHECK + /* if last unsent changed, we need to update unsent_oversize */ + if (pcb->unsent == NULL) { + pcb->unsent_oversize = seg->oversize_left; + } +#endif /* TCP_OVERSIZE_DBGCHECK */ + /* unsent queue is the concatenated queue (of unacked, unsent) */ + pcb->unsent = pcb->unacked; + /* unacked queue is now empty */ + pcb->unacked = NULL; + + /* Mark RTO in-progress */ + tcp_set_flags(pcb, TF_RTO); + /* Record the next byte following retransmit */ + pcb->rto_end = lwip_ntohl(seg->tcphdr->seqno) + TCP_TCPLEN(seg); + /* Don't take any RTT measurements after retransmitting. */ + pcb->rttest = 0; + + return ERR_OK; +} + +/** + * Requeue all unacked segments for retransmission + * + * Called by tcp_slowtmr() for slow retransmission. + * + * @param pcb the tcp_pcb for which to re-enqueue all unacked segments + */ +void +tcp_rexmit_rto_commit(struct tcp_pcb *pcb) +{ + LWIP_ASSERT("tcp_rexmit_rto_commit: invalid pcb", pcb != NULL); + + /* increment number of retransmissions */ + if (pcb->nrtx < 0xFF) { + ++pcb->nrtx; + } + /* Do the actual retransmission */ + tcp_output(pcb); +} + +/** + * Requeue all unacked segments for retransmission + * + * Called by tcp_process() only, tcp_slowtmr() needs to do some things between + * "prepare" and "commit". + * + * @param pcb the tcp_pcb for which to re-enqueue all unacked segments + */ +void +tcp_rexmit_rto(struct tcp_pcb *pcb) +{ + LWIP_ASSERT("tcp_rexmit_rto: invalid pcb", pcb != NULL); + + if (tcp_rexmit_rto_prepare(pcb) == ERR_OK) { + tcp_rexmit_rto_commit(pcb); + } +} + +/** + * Requeue the first unacked segment for retransmission + * + * Called by tcp_receive() for fast retransmit. + * + * @param pcb the tcp_pcb for which to retransmit the first unacked segment + */ +err_t +tcp_rexmit(struct tcp_pcb *pcb) +{ + struct tcp_seg *seg; + struct tcp_seg **cur_seg; + + LWIP_ASSERT("tcp_rexmit: invalid pcb", pcb != NULL); + + if (pcb->unacked == NULL) { + return ERR_VAL; + } + + seg = pcb->unacked; + + /* Give up if the segment is still referenced by the netif driver + due to deferred transmission. */ + if (tcp_output_segment_busy(seg)) { + LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_rexmit busy\n")); + return ERR_VAL; + } + + /* Move the first unacked segment to the unsent queue */ + /* Keep the unsent queue sorted. */ + pcb->unacked = seg->next; + + cur_seg = &(pcb->unsent); + while (*cur_seg && + TCP_SEQ_LT(lwip_ntohl((*cur_seg)->tcphdr->seqno), lwip_ntohl(seg->tcphdr->seqno))) { + cur_seg = &((*cur_seg)->next ); + } + seg->next = *cur_seg; + *cur_seg = seg; +#if TCP_OVERSIZE + if (seg->next == NULL) { + /* the retransmitted segment is last in unsent, so reset unsent_oversize */ + pcb->unsent_oversize = 0; + } +#endif /* TCP_OVERSIZE */ + + if (pcb->nrtx < 0xFF) { + ++pcb->nrtx; + } + + /* Don't take any rtt measurements after retransmitting. */ + pcb->rttest = 0; + + /* Do the actual retransmission. */ + MIB2_STATS_INC(mib2.tcpretranssegs); + /* No need to call tcp_output: we are always called from tcp_input() + and thus tcp_output directly returns. */ + return ERR_OK; +} + + +/** + * Handle retransmission after three dupacks received + * + * @param pcb the tcp_pcb for which to retransmit the first unacked segment + */ +void +tcp_rexmit_fast(struct tcp_pcb *pcb) +{ + LWIP_ASSERT("tcp_rexmit_fast: invalid pcb", pcb != NULL); + + if (pcb->unacked != NULL && !(pcb->flags & TF_INFR)) { + /* This is fast retransmit. Retransmit the first unacked segment. */ + LWIP_DEBUGF(TCP_FR_DEBUG, + ("tcp_receive: dupacks %"U16_F" (%"U32_F + "), fast retransmit %"U32_F"\n", + (u16_t)pcb->dupacks, pcb->lastack, + lwip_ntohl(pcb->unacked->tcphdr->seqno))); + if (tcp_rexmit(pcb) == ERR_OK) { + /* Set ssthresh to half of the minimum of the current + * cwnd and the advertised window */ + pcb->ssthresh = LWIP_MIN(pcb->cwnd, pcb->snd_wnd) / 2; + + /* The minimum value for ssthresh should be 2 MSS */ + if (pcb->ssthresh < (2U * pcb->mss)) { + LWIP_DEBUGF(TCP_FR_DEBUG, + ("tcp_receive: The minimum value for ssthresh %"TCPWNDSIZE_F + " should be min 2 mss %"U16_F"...\n", + pcb->ssthresh, (u16_t)(2 * pcb->mss))); + pcb->ssthresh = 2 * pcb->mss; + } + + pcb->cwnd = pcb->ssthresh + 3 * pcb->mss; + tcp_set_flags(pcb, TF_INFR); + + /* Reset the retransmission timer to prevent immediate rto retransmissions */ + pcb->rtime = 0; + } + } +} + +static struct pbuf * +tcp_output_alloc_header_common(u32_t ackno, u16_t optlen, u16_t datalen, + u32_t seqno_be /* already in network byte order */, + u16_t src_port, u16_t dst_port, u8_t flags, u16_t wnd) +{ + struct tcp_hdr *tcphdr; + struct pbuf *p; + + p = pbuf_alloc(PBUF_IP, TCP_HLEN + optlen + datalen, PBUF_RAM); + if (p != NULL) { + LWIP_ASSERT("check that first pbuf can hold struct tcp_hdr", + (p->len >= TCP_HLEN + optlen)); + tcphdr = (struct tcp_hdr *)p->payload; + tcphdr->src = lwip_htons(src_port); + tcphdr->dest = lwip_htons(dst_port); + tcphdr->seqno = seqno_be; + tcphdr->ackno = lwip_htonl(ackno); + TCPH_HDRLEN_FLAGS_SET(tcphdr, (5 + optlen / 4), flags); + tcphdr->wnd = lwip_htons(wnd); + tcphdr->chksum = 0; + tcphdr->urgp = 0; + } + return p; +} + +/** Allocate a pbuf and create a tcphdr at p->payload, used for output + * functions other than the default tcp_output -> tcp_output_segment + * (e.g. tcp_send_empty_ack, etc.) + * + * @param pcb tcp pcb for which to send a packet (used to initialize tcp_hdr) + * @param optlen length of header-options + * @param datalen length of tcp data to reserve in pbuf + * @param seqno_be seqno in network byte order (big-endian) + * @return pbuf with p->payload being the tcp_hdr + */ +static struct pbuf * +tcp_output_alloc_header(struct tcp_pcb *pcb, u16_t optlen, u16_t datalen, + u32_t seqno_be /* already in network byte order */) +{ + struct pbuf *p; + + LWIP_ASSERT("tcp_output_alloc_header: invalid pcb", pcb != NULL); + + p = tcp_output_alloc_header_common(pcb->rcv_nxt, optlen, datalen, + seqno_be, pcb->local_port, pcb->remote_port, TCP_ACK, + TCPWND_MIN16(RCV_WND_SCALE(pcb, pcb->rcv_ann_wnd))); + if (p != NULL) { + /* If we're sending a packet, update the announced right window edge */ + pcb->rcv_ann_right_edge = pcb->rcv_nxt + pcb->rcv_ann_wnd; + } + return p; +} + +/* Fill in options for control segments */ +static void +tcp_output_fill_options(const struct tcp_pcb *pcb, struct pbuf *p, u8_t optflags, u8_t num_sacks) +{ + struct tcp_hdr *tcphdr; + u32_t *opts; + u16_t sacks_len = 0; + + LWIP_ASSERT("tcp_output_fill_options: invalid pbuf", p != NULL); + + tcphdr = (struct tcp_hdr *)p->payload; + opts = (u32_t *)(void *)(tcphdr + 1); + + /* NB. MSS and window scale options are only sent on SYNs, so ignore them here */ + +#if LWIP_TCP_TIMESTAMPS + if (optflags & TF_SEG_OPTS_TS) { + tcp_build_timestamp_option(pcb, opts); + opts += 3; + } +#endif + +#if LWIP_TCP_SACK_OUT + if (pcb && (num_sacks > 0)) { + tcp_build_sack_option(pcb, opts, num_sacks); + /* 1 word for SACKs header (including 2xNOP), and 2 words for each SACK */ + sacks_len = 1 + num_sacks * 2; + opts += sacks_len; + } +#else + LWIP_UNUSED_ARG(num_sacks); +#endif + +#ifdef LWIP_HOOK_TCP_OUT_ADD_TCPOPTS + opts = LWIP_HOOK_TCP_OUT_ADD_TCPOPTS(p, tcphdr, pcb, opts); +#endif + + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(sacks_len); + LWIP_ASSERT("options not filled", (u8_t *)opts == ((u8_t *)(tcphdr + 1)) + sacks_len * 4 + LWIP_TCP_OPT_LENGTH_SEGMENT(optflags, pcb)); + LWIP_UNUSED_ARG(optflags); /* for LWIP_NOASSERT */ + LWIP_UNUSED_ARG(opts); /* for LWIP_NOASSERT */ +} + +/** Output a control segment pbuf to IP. + * + * Called from tcp_rst, tcp_send_empty_ack, tcp_keepalive and tcp_zero_window_probe, + * this function combines selecting a netif for transmission, generating the tcp + * header checksum and calling ip_output_if while handling netif hints and stats. + */ +static err_t +tcp_output_control_segment(const struct tcp_pcb *pcb, struct pbuf *p, + const ip_addr_t *src, const ip_addr_t *dst) +{ + err_t err; + struct netif *netif; + + LWIP_ASSERT("tcp_output_control_segment: invalid pbuf", p != NULL); + + netif = tcp_route(pcb, src, dst); + if (netif == NULL) { + err = ERR_RTE; + } else { + u8_t ttl, tos; +#if CHECKSUM_GEN_TCP + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_TCP) { + struct tcp_hdr *tcphdr = (struct tcp_hdr *)p->payload; + tcphdr->chksum = ip_chksum_pseudo(p, IP_PROTO_TCP, p->tot_len, + src, dst); + } +#endif + if (pcb != NULL) { + NETIF_SET_HINTS(netif, LWIP_CONST_CAST(struct netif_hint*, &(pcb->netif_hints))); + ttl = pcb->ttl; + tos = pcb->tos; + } else { + /* Send output with hardcoded TTL/HL since we have no access to the pcb */ + ttl = TCP_TTL; + tos = 0; + } + TCP_STATS_INC(tcp.xmit); + err = ip_output_if(p, src, dst, ttl, tos, IP_PROTO_TCP, netif); + NETIF_RESET_HINTS(netif); + } + pbuf_free(p); + return err; +} + +/** + * Send a TCP RESET packet (empty segment with RST flag set) either to + * abort a connection or to show that there is no matching local connection + * for a received segment. + * + * Called by tcp_abort() (to abort a local connection), tcp_input() (if no + * matching local pcb was found), tcp_listen_input() (if incoming segment + * has ACK flag set) and tcp_process() (received segment in the wrong state) + * + * Since a RST segment is in most cases not sent for an active connection, + * tcp_rst() has a number of arguments that are taken from a tcp_pcb for + * most other segment output functions. + * + * @param pcb TCP pcb (may be NULL if no pcb is available) + * @param seqno the sequence number to use for the outgoing segment + * @param ackno the acknowledge number to use for the outgoing segment + * @param local_ip the local IP address to send the segment from + * @param remote_ip the remote IP address to send the segment to + * @param local_port the local TCP port to send the segment from + * @param remote_port the remote TCP port to send the segment to + */ +void +tcp_rst(const struct tcp_pcb *pcb, u32_t seqno, u32_t ackno, + const ip_addr_t *local_ip, const ip_addr_t *remote_ip, + u16_t local_port, u16_t remote_port) +{ + struct pbuf *p; + u16_t wnd; + u8_t optlen; + + LWIP_ASSERT("tcp_rst: invalid local_ip", local_ip != NULL); + LWIP_ASSERT("tcp_rst: invalid remote_ip", remote_ip != NULL); + + optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(0, pcb); + +#if LWIP_WND_SCALE + wnd = PP_HTONS(((TCP_WND >> TCP_RCV_SCALE) & 0xFFFF)); +#else + wnd = PP_HTONS(TCP_WND); +#endif + + p = tcp_output_alloc_header_common(ackno, optlen, 0, lwip_htonl(seqno), local_port, + remote_port, TCP_RST | TCP_ACK, wnd); + if (p == NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_rst: could not allocate memory for pbuf\n")); + return; + } + tcp_output_fill_options(pcb, p, 0, optlen); + + MIB2_STATS_INC(mib2.tcpoutrsts); + + tcp_output_control_segment(pcb, p, local_ip, remote_ip); + LWIP_DEBUGF(TCP_RST_DEBUG, ("tcp_rst: seqno %"U32_F" ackno %"U32_F".\n", seqno, ackno)); +} + +/** + * Send an ACK without data. + * + * @param pcb Protocol control block for the TCP connection to send the ACK + */ +err_t +tcp_send_empty_ack(struct tcp_pcb *pcb) +{ + err_t err; + struct pbuf *p; + u8_t optlen, optflags = 0; + u8_t num_sacks = 0; + + LWIP_ASSERT("tcp_send_empty_ack: invalid pcb", pcb != NULL); + +#if LWIP_TCP_TIMESTAMPS + if (pcb->flags & TF_TIMESTAMP) { + optflags = TF_SEG_OPTS_TS; + } +#endif + optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(optflags, pcb); + +#if LWIP_TCP_SACK_OUT + /* For now, SACKs are only sent with empty ACKs */ + if ((num_sacks = tcp_get_num_sacks(pcb, optlen)) > 0) { + optlen += 4 + num_sacks * 8; /* 4 bytes for header (including 2*NOP), plus 8B for each SACK */ + } +#endif + + p = tcp_output_alloc_header(pcb, optlen, 0, lwip_htonl(pcb->snd_nxt)); + if (p == NULL) { + /* let tcp_fasttmr retry sending this ACK */ + tcp_set_flags(pcb, TF_ACK_DELAY | TF_ACK_NOW); + LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_output: (ACK) could not allocate pbuf\n")); + return ERR_BUF; + } + tcp_output_fill_options(pcb, p, optflags, num_sacks); + +#if LWIP_TCP_TIMESTAMPS + pcb->ts_lastacksent = pcb->rcv_nxt; +#endif + + LWIP_DEBUGF(TCP_OUTPUT_DEBUG, + ("tcp_output: sending ACK for %"U32_F"\n", pcb->rcv_nxt)); + err = tcp_output_control_segment(pcb, p, &pcb->local_ip, &pcb->remote_ip); + if (err != ERR_OK) { + /* let tcp_fasttmr retry sending this ACK */ + tcp_set_flags(pcb, TF_ACK_DELAY | TF_ACK_NOW); + } else { + /* remove ACK flags from the PCB, as we sent an empty ACK now */ + tcp_clear_flags(pcb, TF_ACK_DELAY | TF_ACK_NOW); + } + + return err; +} + +/** + * Send keepalive packets to keep a connection active although + * no data is sent over it. + * + * Called by tcp_slowtmr() + * + * @param pcb the tcp_pcb for which to send a keepalive packet + */ +err_t +tcp_keepalive(struct tcp_pcb *pcb) +{ + err_t err; + struct pbuf *p; + u8_t optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(0, pcb); + + LWIP_ASSERT("tcp_keepalive: invalid pcb", pcb != NULL); + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_keepalive: sending KEEPALIVE probe to ")); + ip_addr_debug_print_val(TCP_DEBUG, pcb->remote_ip); + LWIP_DEBUGF(TCP_DEBUG, ("\n")); + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_keepalive: tcp_ticks %"U32_F" pcb->tmr %"U32_F" pcb->keep_cnt_sent %"U16_F"\n", + tcp_ticks, pcb->tmr, (u16_t)pcb->keep_cnt_sent)); + + p = tcp_output_alloc_header(pcb, optlen, 0, lwip_htonl(pcb->snd_nxt - 1)); + if (p == NULL) { + LWIP_DEBUGF(TCP_DEBUG, + ("tcp_keepalive: could not allocate memory for pbuf\n")); + return ERR_MEM; + } + tcp_output_fill_options(pcb, p, 0, optlen); + err = tcp_output_control_segment(pcb, p, &pcb->local_ip, &pcb->remote_ip); + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_keepalive: seqno %"U32_F" ackno %"U32_F" err %d.\n", + pcb->snd_nxt - 1, pcb->rcv_nxt, (int)err)); + return err; +} + +/** + * Send persist timer zero-window probes to keep a connection active + * when a window update is lost. + * + * Called by tcp_slowtmr() + * + * @param pcb the tcp_pcb for which to send a zero-window probe packet + */ +err_t +tcp_zero_window_probe(struct tcp_pcb *pcb) +{ + err_t err; + struct pbuf *p; + struct tcp_hdr *tcphdr; + struct tcp_seg *seg; + u16_t len; + u8_t is_fin; + u32_t snd_nxt; + u8_t optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(0, pcb); + + LWIP_ASSERT("tcp_zero_window_probe: invalid pcb", pcb != NULL); + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_zero_window_probe: sending ZERO WINDOW probe to ")); + ip_addr_debug_print_val(TCP_DEBUG, pcb->remote_ip); + LWIP_DEBUGF(TCP_DEBUG, ("\n")); + + LWIP_DEBUGF(TCP_DEBUG, + ("tcp_zero_window_probe: tcp_ticks %"U32_F + " pcb->tmr %"U32_F" pcb->keep_cnt_sent %"U16_F"\n", + tcp_ticks, pcb->tmr, (u16_t)pcb->keep_cnt_sent)); + + /* Only consider unsent, persist timer should be off when there is data in-flight */ + seg = pcb->unsent; + if (seg == NULL) { + /* Not expected, persist timer should be off when the send buffer is empty */ + return ERR_OK; + } + + /* increment probe count. NOTE: we record probe even if it fails + to actually transmit due to an error. This ensures memory exhaustion/ + routing problem doesn't leave a zero-window pcb as an indefinite zombie. + RTO mechanism has similar behavior, see pcb->nrtx */ + if (pcb->persist_probe < 0xFF) { + ++pcb->persist_probe; + } + + is_fin = ((TCPH_FLAGS(seg->tcphdr) & TCP_FIN) != 0) && (seg->len == 0); + /* we want to send one seqno: either FIN or data (no options) */ + len = is_fin ? 0 : 1; + + p = tcp_output_alloc_header(pcb, optlen, len, seg->tcphdr->seqno); + if (p == NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_zero_window_probe: no memory for pbuf\n")); + return ERR_MEM; + } + tcphdr = (struct tcp_hdr *)p->payload; + + if (is_fin) { + /* FIN segment, no data */ + TCPH_FLAGS_SET(tcphdr, TCP_ACK | TCP_FIN); + } else { + /* Data segment, copy in one byte from the head of the unacked queue */ + char *d = ((char *)p->payload + TCP_HLEN); + /* Depending on whether the segment has already been sent (unacked) or not + (unsent), seg->p->payload points to the IP header or TCP header. + Ensure we copy the first TCP data byte: */ + pbuf_copy_partial(seg->p, d, 1, seg->p->tot_len - seg->len); + } + + /* The byte may be acknowledged without the window being opened. */ + snd_nxt = lwip_ntohl(seg->tcphdr->seqno) + 1; + if (TCP_SEQ_LT(pcb->snd_nxt, snd_nxt)) { + pcb->snd_nxt = snd_nxt; + } + tcp_output_fill_options(pcb, p, 0, optlen); + + err = tcp_output_control_segment(pcb, p, &pcb->local_ip, &pcb->remote_ip); + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_zero_window_probe: seqno %"U32_F + " ackno %"U32_F" err %d.\n", + pcb->snd_nxt - 1, pcb->rcv_nxt, (int)err)); + return err; +} +#endif /* LWIP_TCP */ diff --git a/Middlewares/Third_Party/LwIP/src/core/timeouts.c b/Middlewares/Third_Party/LwIP/src/core/timeouts.c new file mode 100644 index 0000000..f37acfe --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/timeouts.c @@ -0,0 +1,451 @@ +/** + * @file + * Stack-internal timers implementation. + * This file includes timer callbacks for stack-internal timers as well as + * functions to set up or stop timers and check for expired timers. + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * Simon Goldschmidt + * + */ + +#include "lwip/opt.h" + +#include "lwip/timeouts.h" +#include "lwip/priv/tcp_priv.h" + +#include "lwip/def.h" +#include "lwip/memp.h" +#include "lwip/priv/tcpip_priv.h" + +#include "lwip/ip4_frag.h" +#include "lwip/etharp.h" +#include "lwip/dhcp.h" +#include "lwip/autoip.h" +#include "lwip/igmp.h" +#include "lwip/dns.h" +#include "lwip/nd6.h" +#include "lwip/ip6_frag.h" +#include "lwip/mld6.h" +#include "lwip/dhcp6.h" +#include "lwip/sys.h" +#include "lwip/pbuf.h" + +#if LWIP_DEBUG_TIMERNAMES +#define HANDLER(x) x, #x +#else /* LWIP_DEBUG_TIMERNAMES */ +#define HANDLER(x) x +#endif /* LWIP_DEBUG_TIMERNAMES */ + +#define LWIP_MAX_TIMEOUT 0x7fffffff + +/* Check if timer's expiry time is greater than time and care about u32_t wraparounds */ +#define TIME_LESS_THAN(t, compare_to) ( (((u32_t)((t)-(compare_to))) > LWIP_MAX_TIMEOUT) ? 1 : 0 ) + +/** This array contains all stack-internal cyclic timers. To get the number of + * timers, use LWIP_ARRAYSIZE() */ +const struct lwip_cyclic_timer lwip_cyclic_timers[] = { +#if LWIP_TCP + /* The TCP timer is a special case: it does not have to run always and + is triggered to start from TCP using tcp_timer_needed() */ + {TCP_TMR_INTERVAL, HANDLER(tcp_tmr)}, +#endif /* LWIP_TCP */ +#if LWIP_IPV4 +#if IP_REASSEMBLY + {IP_TMR_INTERVAL, HANDLER(ip_reass_tmr)}, +#endif /* IP_REASSEMBLY */ +#if LWIP_ARP + {ARP_TMR_INTERVAL, HANDLER(etharp_tmr)}, +#endif /* LWIP_ARP */ +#if LWIP_DHCP + {DHCP_COARSE_TIMER_MSECS, HANDLER(dhcp_coarse_tmr)}, + {DHCP_FINE_TIMER_MSECS, HANDLER(dhcp_fine_tmr)}, +#endif /* LWIP_DHCP */ +#if LWIP_AUTOIP + {AUTOIP_TMR_INTERVAL, HANDLER(autoip_tmr)}, +#endif /* LWIP_AUTOIP */ +#if LWIP_IGMP + {IGMP_TMR_INTERVAL, HANDLER(igmp_tmr)}, +#endif /* LWIP_IGMP */ +#endif /* LWIP_IPV4 */ +#if LWIP_DNS + {DNS_TMR_INTERVAL, HANDLER(dns_tmr)}, +#endif /* LWIP_DNS */ +#if LWIP_IPV6 + {ND6_TMR_INTERVAL, HANDLER(nd6_tmr)}, +#if LWIP_IPV6_REASS + {IP6_REASS_TMR_INTERVAL, HANDLER(ip6_reass_tmr)}, +#endif /* LWIP_IPV6_REASS */ +#if LWIP_IPV6_MLD + {MLD6_TMR_INTERVAL, HANDLER(mld6_tmr)}, +#endif /* LWIP_IPV6_MLD */ +#if LWIP_IPV6_DHCP6 + {DHCP6_TIMER_MSECS, HANDLER(dhcp6_tmr)}, +#endif /* LWIP_IPV6_DHCP6 */ +#endif /* LWIP_IPV6 */ +}; +const int lwip_num_cyclic_timers = LWIP_ARRAYSIZE(lwip_cyclic_timers); + +#if LWIP_TIMERS && !LWIP_TIMERS_CUSTOM + +/** The one and only timeout list */ +static struct sys_timeo *next_timeout; + +static u32_t current_timeout_due_time; + +#if LWIP_TESTMODE +struct sys_timeo** +sys_timeouts_get_next_timeout(void) +{ + return &next_timeout; +} +#endif + +#if LWIP_TCP +/** global variable that shows if the tcp timer is currently scheduled or not */ +static int tcpip_tcp_timer_active; + +/** + * Timer callback function that calls tcp_tmr() and reschedules itself. + * + * @param arg unused argument + */ +static void +tcpip_tcp_timer(void *arg) +{ + LWIP_UNUSED_ARG(arg); + + /* call TCP timer handler */ + tcp_tmr(); + /* timer still needed? */ + if (tcp_active_pcbs || tcp_tw_pcbs) { + /* restart timer */ + sys_timeout(TCP_TMR_INTERVAL, tcpip_tcp_timer, NULL); + } else { + /* disable timer */ + tcpip_tcp_timer_active = 0; + } +} + +/** + * Called from TCP_REG when registering a new PCB: + * the reason is to have the TCP timer only running when + * there are active (or time-wait) PCBs. + */ +void +tcp_timer_needed(void) +{ + LWIP_ASSERT_CORE_LOCKED(); + + /* timer is off but needed again? */ + if (!tcpip_tcp_timer_active && (tcp_active_pcbs || tcp_tw_pcbs)) { + /* enable and start timer */ + tcpip_tcp_timer_active = 1; + sys_timeout(TCP_TMR_INTERVAL, tcpip_tcp_timer, NULL); + } +} +#endif /* LWIP_TCP */ + +static void +#if LWIP_DEBUG_TIMERNAMES +sys_timeout_abs(u32_t abs_time, sys_timeout_handler handler, void *arg, const char *handler_name) +#else /* LWIP_DEBUG_TIMERNAMES */ +sys_timeout_abs(u32_t abs_time, sys_timeout_handler handler, void *arg) +#endif +{ + struct sys_timeo *timeout, *t; + + timeout = (struct sys_timeo *)memp_malloc(MEMP_SYS_TIMEOUT); + if (timeout == NULL) { + LWIP_ASSERT("sys_timeout: timeout != NULL, pool MEMP_SYS_TIMEOUT is empty", timeout != NULL); + return; + } + + timeout->next = NULL; + timeout->h = handler; + timeout->arg = arg; + timeout->time = abs_time; + +#if LWIP_DEBUG_TIMERNAMES + timeout->handler_name = handler_name; + LWIP_DEBUGF(TIMERS_DEBUG, ("sys_timeout: %p abs_time=%"U32_F" handler=%s arg=%p\n", + (void *)timeout, abs_time, handler_name, (void *)arg)); +#endif /* LWIP_DEBUG_TIMERNAMES */ + + if (next_timeout == NULL) { + next_timeout = timeout; + return; + } + if (TIME_LESS_THAN(timeout->time, next_timeout->time)) { + timeout->next = next_timeout; + next_timeout = timeout; + } else { + for (t = next_timeout; t != NULL; t = t->next) { + if ((t->next == NULL) || TIME_LESS_THAN(timeout->time, t->next->time)) { + timeout->next = t->next; + t->next = timeout; + break; + } + } + } +} + +/** + * Timer callback function that calls cyclic->handler() and reschedules itself. + * + * @param arg unused argument + */ +#if !LWIP_TESTMODE +static +#endif +void +lwip_cyclic_timer(void *arg) +{ + u32_t now; + u32_t next_timeout_time; + const struct lwip_cyclic_timer *cyclic = (const struct lwip_cyclic_timer *)arg; + +#if LWIP_DEBUG_TIMERNAMES + LWIP_DEBUGF(TIMERS_DEBUG, ("tcpip: %s()\n", cyclic->handler_name)); +#endif + cyclic->handler(); + + now = sys_now(); + next_timeout_time = (u32_t)(current_timeout_due_time + cyclic->interval_ms); /* overflow handled by TIME_LESS_THAN macro */ + if (TIME_LESS_THAN(next_timeout_time, now)) { + /* timer would immediately expire again -> "overload" -> restart without any correction */ +#if LWIP_DEBUG_TIMERNAMES + sys_timeout_abs((u32_t)(now + cyclic->interval_ms), lwip_cyclic_timer, arg, cyclic->handler_name); +#else + sys_timeout_abs((u32_t)(now + cyclic->interval_ms), lwip_cyclic_timer, arg); +#endif + + } else { + /* correct cyclic interval with handler execution delay and sys_check_timeouts jitter */ +#if LWIP_DEBUG_TIMERNAMES + sys_timeout_abs(next_timeout_time, lwip_cyclic_timer, arg, cyclic->handler_name); +#else + sys_timeout_abs(next_timeout_time, lwip_cyclic_timer, arg); +#endif + } +} + +/** Initialize this module */ +void sys_timeouts_init(void) +{ + size_t i; + /* tcp_tmr() at index 0 is started on demand */ + for (i = (LWIP_TCP ? 1 : 0); i < LWIP_ARRAYSIZE(lwip_cyclic_timers); i++) { + /* we have to cast via size_t to get rid of const warning + (this is OK as cyclic_timer() casts back to const* */ + sys_timeout(lwip_cyclic_timers[i].interval_ms, lwip_cyclic_timer, LWIP_CONST_CAST(void *, &lwip_cyclic_timers[i])); + } +} + +/** + * Create a one-shot timer (aka timeout). Timeouts are processed in the + * following cases: + * - while waiting for a message using sys_timeouts_mbox_fetch() + * - by calling sys_check_timeouts() (NO_SYS==1 only) + * + * @param msecs time in milliseconds after that the timer should expire + * @param handler callback function to call when msecs have elapsed + * @param arg argument to pass to the callback function + */ +#if LWIP_DEBUG_TIMERNAMES +void +sys_timeout_debug(u32_t msecs, sys_timeout_handler handler, void *arg, const char *handler_name) +#else /* LWIP_DEBUG_TIMERNAMES */ +void +sys_timeout(u32_t msecs, sys_timeout_handler handler, void *arg) +#endif /* LWIP_DEBUG_TIMERNAMES */ +{ + u32_t next_timeout_time; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ASSERT("Timeout time too long, max is LWIP_UINT32_MAX/4 msecs", msecs <= (LWIP_UINT32_MAX / 4)); + + next_timeout_time = (u32_t)(sys_now() + msecs); /* overflow handled by TIME_LESS_THAN macro */ + +#if LWIP_DEBUG_TIMERNAMES + sys_timeout_abs(next_timeout_time, handler, arg, handler_name); +#else + sys_timeout_abs(next_timeout_time, handler, arg); +#endif +} + +/** + * Go through timeout list (for this task only) and remove the first matching + * entry (subsequent entries remain untouched), even though the timeout has not + * triggered yet. + * + * @param handler callback function that would be called by the timeout + * @param arg callback argument that would be passed to handler +*/ +void +sys_untimeout(sys_timeout_handler handler, void *arg) +{ + struct sys_timeo *prev_t, *t; + + LWIP_ASSERT_CORE_LOCKED(); + + if (next_timeout == NULL) { + return; + } + + for (t = next_timeout, prev_t = NULL; t != NULL; prev_t = t, t = t->next) { + if ((t->h == handler) && (t->arg == arg)) { + /* We have a match */ + /* Unlink from previous in list */ + if (prev_t == NULL) { + next_timeout = t->next; + } else { + prev_t->next = t->next; + } + memp_free(MEMP_SYS_TIMEOUT, t); + return; + } + } + return; +} + +/** + * @ingroup lwip_nosys + * Handle timeouts for NO_SYS==1 (i.e. without using + * tcpip_thread/sys_timeouts_mbox_fetch(). Uses sys_now() to call timeout + * handler functions when timeouts expire. + * + * Must be called periodically from your main loop. + */ +void +sys_check_timeouts(void) +{ + u32_t now; + + LWIP_ASSERT_CORE_LOCKED(); + + /* Process only timers expired at the start of the function. */ + now = sys_now(); + + do { + struct sys_timeo *tmptimeout; + sys_timeout_handler handler; + void *arg; + + PBUF_CHECK_FREE_OOSEQ(); + + tmptimeout = next_timeout; + if (tmptimeout == NULL) { + return; + } + + if (TIME_LESS_THAN(now, tmptimeout->time)) { + return; + } + + /* Timeout has expired */ + next_timeout = tmptimeout->next; + handler = tmptimeout->h; + arg = tmptimeout->arg; + current_timeout_due_time = tmptimeout->time; +#if LWIP_DEBUG_TIMERNAMES + if (handler != NULL) { + LWIP_DEBUGF(TIMERS_DEBUG, ("sct calling h=%s t=%"U32_F" arg=%p\n", + tmptimeout->handler_name, sys_now() - tmptimeout->time, arg)); + } +#endif /* LWIP_DEBUG_TIMERNAMES */ + memp_free(MEMP_SYS_TIMEOUT, tmptimeout); + if (handler != NULL) { + handler(arg); + } + LWIP_TCPIP_THREAD_ALIVE(); + + /* Repeat until all expired timers have been called */ + } while (1); +} + +/** Rebase the timeout times to the current time. + * This is necessary if sys_check_timeouts() hasn't been called for a long + * time (e.g. while saving energy) to prevent all timer functions of that + * period being called. + */ +void +sys_restart_timeouts(void) +{ + u32_t now; + u32_t base; + struct sys_timeo *t; + + if (next_timeout == NULL) { + return; + } + + now = sys_now(); + base = next_timeout->time; + + for (t = next_timeout; t != NULL; t = t->next) { + t->time = (t->time - base) + now; + } +} + +/** Return the time left before the next timeout is due. If no timeouts are + * enqueued, returns 0xffffffff + */ +u32_t +sys_timeouts_sleeptime(void) +{ + u32_t now; + + LWIP_ASSERT_CORE_LOCKED(); + + if (next_timeout == NULL) { + return SYS_TIMEOUTS_SLEEPTIME_INFINITE; + } + now = sys_now(); + if (TIME_LESS_THAN(next_timeout->time, now)) { + return 0; + } else { + u32_t ret = (u32_t)(next_timeout->time - now); + LWIP_ASSERT("invalid sleeptime", ret <= LWIP_MAX_TIMEOUT); + return ret; + } +} + +#else /* LWIP_TIMERS && !LWIP_TIMERS_CUSTOM */ +/* Satisfy the TCP code which calls this function */ +void +tcp_timer_needed(void) +{ +} +#endif /* LWIP_TIMERS && !LWIP_TIMERS_CUSTOM */ diff --git a/Middlewares/Third_Party/LwIP/src/core/udp.c b/Middlewares/Third_Party/LwIP/src/core/udp.c new file mode 100644 index 0000000..9d2cb4a --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/udp.c @@ -0,0 +1,1314 @@ +/** + * @file + * User Datagram Protocol module\n + * The code for the User Datagram Protocol UDP & UDPLite (RFC 3828).\n + * See also @ref udp_raw + * + * @defgroup udp_raw UDP + * @ingroup callbackstyle_api + * User Datagram Protocol module\n + * @see @ref api + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +/* @todo Check the use of '(struct udp_pcb).chksum_len_rx'! + */ + +#include "lwip/opt.h" + +#if LWIP_UDP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/udp.h" +#include "lwip/def.h" +#include "lwip/memp.h" +#include "lwip/inet_chksum.h" +#include "lwip/ip_addr.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/netif.h" +#include "lwip/icmp.h" +#include "lwip/icmp6.h" +#include "lwip/stats.h" +#include "lwip/snmp.h" +#include "lwip/dhcp.h" + +#include + +#ifndef UDP_LOCAL_PORT_RANGE_START +/* From http://www.iana.org/assignments/port-numbers: + "The Dynamic and/or Private Ports are those from 49152 through 65535" */ +#define UDP_LOCAL_PORT_RANGE_START 0xc000 +#define UDP_LOCAL_PORT_RANGE_END 0xffff +#define UDP_ENSURE_LOCAL_PORT_RANGE(port) ((u16_t)(((port) & (u16_t)~UDP_LOCAL_PORT_RANGE_START) + UDP_LOCAL_PORT_RANGE_START)) +#endif + +/* last local UDP port */ +static u16_t udp_port = UDP_LOCAL_PORT_RANGE_START; + +/* The list of UDP PCBs */ +/* exported in udp.h (was static) */ +struct udp_pcb *udp_pcbs; + +/** + * Initialize this module. + */ +void +udp_init(void) +{ +#ifdef LWIP_RAND + udp_port = UDP_ENSURE_LOCAL_PORT_RANGE(LWIP_RAND()); +#endif /* LWIP_RAND */ +} + +/** + * Allocate a new local UDP port. + * + * @return a new (free) local UDP port number + */ +static u16_t +udp_new_port(void) +{ + u16_t n = 0; + struct udp_pcb *pcb; + +again: + if (udp_port++ == UDP_LOCAL_PORT_RANGE_END) { + udp_port = UDP_LOCAL_PORT_RANGE_START; + } + /* Check all PCBs. */ + for (pcb = udp_pcbs; pcb != NULL; pcb = pcb->next) { + if (pcb->local_port == udp_port) { + if (++n > (UDP_LOCAL_PORT_RANGE_END - UDP_LOCAL_PORT_RANGE_START)) { + return 0; + } + goto again; + } + } + return udp_port; +} + +/** Common code to see if the current input packet matches the pcb + * (current input packet is accessed via ip(4/6)_current_* macros) + * + * @param pcb pcb to check + * @param inp network interface on which the datagram was received (only used for IPv4) + * @param broadcast 1 if his is an IPv4 broadcast (global or subnet-only), 0 otherwise (only used for IPv4) + * @return 1 on match, 0 otherwise + */ +static u8_t +udp_input_local_match(struct udp_pcb *pcb, struct netif *inp, u8_t broadcast) +{ + LWIP_UNUSED_ARG(inp); /* in IPv6 only case */ + LWIP_UNUSED_ARG(broadcast); /* in IPv6 only case */ + + LWIP_ASSERT("udp_input_local_match: invalid pcb", pcb != NULL); + LWIP_ASSERT("udp_input_local_match: invalid netif", inp != NULL); + + /* check if PCB is bound to specific netif */ + if ((pcb->netif_idx != NETIF_NO_INDEX) && + (pcb->netif_idx != netif_get_index(ip_data.current_input_netif))) { + return 0; + } + + /* Dual-stack: PCBs listening to any IP type also listen to any IP address */ + if (IP_IS_ANY_TYPE_VAL(pcb->local_ip)) { +#if LWIP_IPV4 && IP_SOF_BROADCAST_RECV + if ((broadcast != 0) && !ip_get_option(pcb, SOF_BROADCAST)) { + return 0; + } +#endif /* LWIP_IPV4 && IP_SOF_BROADCAST_RECV */ + return 1; + } + + /* Only need to check PCB if incoming IP version matches PCB IP version */ + if (IP_ADDR_PCB_VERSION_MATCH_EXACT(pcb, ip_current_dest_addr())) { +#if LWIP_IPV4 + /* Special case: IPv4 broadcast: all or broadcasts in my subnet + * Note: broadcast variable can only be 1 if it is an IPv4 broadcast */ + if (broadcast != 0) { +#if IP_SOF_BROADCAST_RECV + if (ip_get_option(pcb, SOF_BROADCAST)) +#endif /* IP_SOF_BROADCAST_RECV */ + { + if (ip4_addr_isany(ip_2_ip4(&pcb->local_ip)) || + ((ip4_current_dest_addr()->addr == IPADDR_BROADCAST)) || + ip4_addr_netcmp(ip_2_ip4(&pcb->local_ip), ip4_current_dest_addr(), netif_ip4_netmask(inp))) { + return 1; + } + } + } else +#endif /* LWIP_IPV4 */ + /* Handle IPv4 and IPv6: all or exact match */ + if (ip_addr_isany(&pcb->local_ip) || ip_addr_cmp(&pcb->local_ip, ip_current_dest_addr())) { + return 1; + } + } + + return 0; +} + +/** + * Process an incoming UDP datagram. + * + * Given an incoming UDP datagram (as a chain of pbufs) this function + * finds a corresponding UDP PCB and hands over the pbuf to the pcbs + * recv function. If no pcb is found or the datagram is incorrect, the + * pbuf is freed. + * + * @param p pbuf to be demultiplexed to a UDP PCB (p->payload pointing to the UDP header) + * @param inp network interface on which the datagram was received. + * + */ +void +udp_input(struct pbuf *p, struct netif *inp) +{ + struct udp_hdr *udphdr; + struct udp_pcb *pcb, *prev; + struct udp_pcb *uncon_pcb; + u16_t src, dest; + u8_t broadcast; + u8_t for_us = 0; + + LWIP_UNUSED_ARG(inp); + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ASSERT("udp_input: invalid pbuf", p != NULL); + LWIP_ASSERT("udp_input: invalid netif", inp != NULL); + + PERF_START; + + UDP_STATS_INC(udp.recv); + + /* Check minimum length (UDP header) */ + if (p->len < UDP_HLEN) { + /* drop short packets */ + LWIP_DEBUGF(UDP_DEBUG, + ("udp_input: short UDP datagram (%"U16_F" bytes) discarded\n", p->tot_len)); + UDP_STATS_INC(udp.lenerr); + UDP_STATS_INC(udp.drop); + MIB2_STATS_INC(mib2.udpinerrors); + pbuf_free(p); + goto end; + } + + udphdr = (struct udp_hdr *)p->payload; + + /* is broadcast packet ? */ + broadcast = ip_addr_isbroadcast(ip_current_dest_addr(), ip_current_netif()); + + LWIP_DEBUGF(UDP_DEBUG, ("udp_input: received datagram of length %"U16_F"\n", p->tot_len)); + + /* convert src and dest ports to host byte order */ + src = lwip_ntohs(udphdr->src); + dest = lwip_ntohs(udphdr->dest); + + udp_debug_print(udphdr); + + /* print the UDP source and destination */ + LWIP_DEBUGF(UDP_DEBUG, ("udp (")); + ip_addr_debug_print_val(UDP_DEBUG, *ip_current_dest_addr()); + LWIP_DEBUGF(UDP_DEBUG, (", %"U16_F") <-- (", lwip_ntohs(udphdr->dest))); + ip_addr_debug_print_val(UDP_DEBUG, *ip_current_src_addr()); + LWIP_DEBUGF(UDP_DEBUG, (", %"U16_F")\n", lwip_ntohs(udphdr->src))); + + pcb = NULL; + prev = NULL; + uncon_pcb = NULL; + /* Iterate through the UDP pcb list for a matching pcb. + * 'Perfect match' pcbs (connected to the remote port & ip address) are + * preferred. If no perfect match is found, the first unconnected pcb that + * matches the local port and ip address gets the datagram. */ + for (pcb = udp_pcbs; pcb != NULL; pcb = pcb->next) { + /* print the PCB local and remote address */ + LWIP_DEBUGF(UDP_DEBUG, ("pcb (")); + ip_addr_debug_print_val(UDP_DEBUG, pcb->local_ip); + LWIP_DEBUGF(UDP_DEBUG, (", %"U16_F") <-- (", pcb->local_port)); + ip_addr_debug_print_val(UDP_DEBUG, pcb->remote_ip); + LWIP_DEBUGF(UDP_DEBUG, (", %"U16_F")\n", pcb->remote_port)); + + /* compare PCB local addr+port to UDP destination addr+port */ + if ((pcb->local_port == dest) && + (udp_input_local_match(pcb, inp, broadcast) != 0)) { + if ((pcb->flags & UDP_FLAGS_CONNECTED) == 0) { + if (uncon_pcb == NULL) { + /* the first unconnected matching PCB */ + uncon_pcb = pcb; +#if LWIP_IPV4 + } else if (broadcast && ip4_current_dest_addr()->addr == IPADDR_BROADCAST) { + /* global broadcast address (only valid for IPv4; match was checked before) */ + if (!IP_IS_V4_VAL(uncon_pcb->local_ip) || !ip4_addr_cmp(ip_2_ip4(&uncon_pcb->local_ip), netif_ip4_addr(inp))) { + /* uncon_pcb does not match the input netif, check this pcb */ + if (IP_IS_V4_VAL(pcb->local_ip) && ip4_addr_cmp(ip_2_ip4(&pcb->local_ip), netif_ip4_addr(inp))) { + /* better match */ + uncon_pcb = pcb; + } + } +#endif /* LWIP_IPV4 */ + } +#if SO_REUSE + else if (!ip_addr_isany(&pcb->local_ip)) { + /* prefer specific IPs over catch-all */ + uncon_pcb = pcb; + } +#endif /* SO_REUSE */ + } + + /* compare PCB remote addr+port to UDP source addr+port */ + if ((pcb->remote_port == src) && + (ip_addr_isany_val(pcb->remote_ip) || + ip_addr_cmp(&pcb->remote_ip, ip_current_src_addr()))) { + /* the first fully matching PCB */ + if (prev != NULL) { + /* move the pcb to the front of udp_pcbs so that is + found faster next time */ + prev->next = pcb->next; + pcb->next = udp_pcbs; + udp_pcbs = pcb; + } else { + UDP_STATS_INC(udp.cachehit); + } + break; + } + } + + prev = pcb; + } + /* no fully matching pcb found? then look for an unconnected pcb */ + if (pcb == NULL) { + pcb = uncon_pcb; + } + + /* Check checksum if this is a match or if it was directed at us. */ + if (pcb != NULL) { + for_us = 1; + } else { +#if LWIP_IPV6 + if (ip_current_is_v6()) { + for_us = netif_get_ip6_addr_match(inp, ip6_current_dest_addr()) >= 0; + } +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 + if (!ip_current_is_v6()) { + for_us = ip4_addr_cmp(netif_ip4_addr(inp), ip4_current_dest_addr()); + } +#endif /* LWIP_IPV4 */ + } + + if (for_us) { + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, ("udp_input: calculating checksum\n")); +#if CHECKSUM_CHECK_UDP + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_CHECK_UDP) { +#if LWIP_UDPLITE + if (ip_current_header_proto() == IP_PROTO_UDPLITE) { + /* Do the UDP Lite checksum */ + u16_t chklen = lwip_ntohs(udphdr->len); + if (chklen < sizeof(struct udp_hdr)) { + if (chklen == 0) { + /* For UDP-Lite, checksum length of 0 means checksum + over the complete packet (See RFC 3828 chap. 3.1) */ + chklen = p->tot_len; + } else { + /* At least the UDP-Lite header must be covered by the + checksum! (Again, see RFC 3828 chap. 3.1) */ + goto chkerr; + } + } + if (ip_chksum_pseudo_partial(p, IP_PROTO_UDPLITE, + p->tot_len, chklen, + ip_current_src_addr(), ip_current_dest_addr()) != 0) { + goto chkerr; + } + } else +#endif /* LWIP_UDPLITE */ + { + if (udphdr->chksum != 0) { + if (ip_chksum_pseudo(p, IP_PROTO_UDP, p->tot_len, + ip_current_src_addr(), + ip_current_dest_addr()) != 0) { + goto chkerr; + } + } + } + } +#endif /* CHECKSUM_CHECK_UDP */ + if (pbuf_remove_header(p, UDP_HLEN)) { + /* Can we cope with this failing? Just assert for now */ + LWIP_ASSERT("pbuf_remove_header failed\n", 0); + UDP_STATS_INC(udp.drop); + MIB2_STATS_INC(mib2.udpinerrors); + pbuf_free(p); + goto end; + } + + if (pcb != NULL) { + MIB2_STATS_INC(mib2.udpindatagrams); +#if SO_REUSE && SO_REUSE_RXTOALL + if (ip_get_option(pcb, SOF_REUSEADDR) && + (broadcast || ip_addr_ismulticast(ip_current_dest_addr()))) { + /* pass broadcast- or multicast packets to all multicast pcbs + if SOF_REUSEADDR is set on the first match */ + struct udp_pcb *mpcb; + for (mpcb = udp_pcbs; mpcb != NULL; mpcb = mpcb->next) { + if (mpcb != pcb) { + /* compare PCB local addr+port to UDP destination addr+port */ + if ((mpcb->local_port == dest) && + (udp_input_local_match(mpcb, inp, broadcast) != 0)) { + /* pass a copy of the packet to all local matches */ + if (mpcb->recv != NULL) { + struct pbuf *q; + q = pbuf_clone(PBUF_RAW, PBUF_POOL, p); + if (q != NULL) { + mpcb->recv(mpcb->recv_arg, mpcb, q, ip_current_src_addr(), src); + } + } + } + } + } + } +#endif /* SO_REUSE && SO_REUSE_RXTOALL */ + /* callback */ + if (pcb->recv != NULL) { + /* now the recv function is responsible for freeing p */ + pcb->recv(pcb->recv_arg, pcb, p, ip_current_src_addr(), src); + } else { + /* no recv function registered? then we have to free the pbuf! */ + pbuf_free(p); + goto end; + } + } else { + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, ("udp_input: not for us.\n")); + +#if LWIP_ICMP || LWIP_ICMP6 + /* No match was found, send ICMP destination port unreachable unless + destination address was broadcast/multicast. */ + if (!broadcast && !ip_addr_ismulticast(ip_current_dest_addr())) { + /* move payload pointer back to ip header */ + pbuf_header_force(p, (s16_t)(ip_current_header_tot_len() + UDP_HLEN)); + icmp_port_unreach(ip_current_is_v6(), p); + } +#endif /* LWIP_ICMP || LWIP_ICMP6 */ + UDP_STATS_INC(udp.proterr); + UDP_STATS_INC(udp.drop); + MIB2_STATS_INC(mib2.udpnoports); + pbuf_free(p); + } + } else { + pbuf_free(p); + } +end: + PERF_STOP("udp_input"); + return; +#if CHECKSUM_CHECK_UDP +chkerr: + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("udp_input: UDP (or UDP Lite) datagram discarded due to failing checksum\n")); + UDP_STATS_INC(udp.chkerr); + UDP_STATS_INC(udp.drop); + MIB2_STATS_INC(mib2.udpinerrors); + pbuf_free(p); + PERF_STOP("udp_input"); +#endif /* CHECKSUM_CHECK_UDP */ +} + +/** + * @ingroup udp_raw + * Sends the pbuf p using UDP. The pbuf is not deallocated. + * + * + * @param pcb UDP PCB used to send the data. + * @param p chain of pbuf's to be sent. + * + * The datagram will be sent to the current remote_ip & remote_port + * stored in pcb. If the pcb is not bound to a port, it will + * automatically be bound to a random port. + * + * @return lwIP error code. + * - ERR_OK. Successful. No error occurred. + * - ERR_MEM. Out of memory. + * - ERR_RTE. Could not find route to destination address. + * - ERR_VAL. No PCB or PCB is dual-stack + * - More errors could be returned by lower protocol layers. + * + * @see udp_disconnect() udp_sendto() + */ +err_t +udp_send(struct udp_pcb *pcb, struct pbuf *p) +{ + LWIP_ERROR("udp_send: invalid pcb", pcb != NULL, return ERR_ARG); + LWIP_ERROR("udp_send: invalid pbuf", p != NULL, return ERR_ARG); + + if (IP_IS_ANY_TYPE_VAL(pcb->remote_ip)) { + return ERR_VAL; + } + + /* send to the packet using remote ip and port stored in the pcb */ + return udp_sendto(pcb, p, &pcb->remote_ip, pcb->remote_port); +} + +#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP +/** @ingroup udp_raw + * Same as udp_send() but with checksum + */ +err_t +udp_send_chksum(struct udp_pcb *pcb, struct pbuf *p, + u8_t have_chksum, u16_t chksum) +{ + LWIP_ERROR("udp_send_chksum: invalid pcb", pcb != NULL, return ERR_ARG); + LWIP_ERROR("udp_send_chksum: invalid pbuf", p != NULL, return ERR_ARG); + + if (IP_IS_ANY_TYPE_VAL(pcb->remote_ip)) { + return ERR_VAL; + } + + /* send to the packet using remote ip and port stored in the pcb */ + return udp_sendto_chksum(pcb, p, &pcb->remote_ip, pcb->remote_port, + have_chksum, chksum); +} +#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ + +/** + * @ingroup udp_raw + * Send data to a specified address using UDP. + * + * @param pcb UDP PCB used to send the data. + * @param p chain of pbuf's to be sent. + * @param dst_ip Destination IP address. + * @param dst_port Destination UDP port. + * + * dst_ip & dst_port are expected to be in the same byte order as in the pcb. + * + * If the PCB already has a remote address association, it will + * be restored after the data is sent. + * + * @return lwIP error code (@see udp_send for possible error codes) + * + * @see udp_disconnect() udp_send() + */ +err_t +udp_sendto(struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port) +{ +#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP + return udp_sendto_chksum(pcb, p, dst_ip, dst_port, 0, 0); +} + +/** @ingroup udp_raw + * Same as udp_sendto(), but with checksum */ +err_t +udp_sendto_chksum(struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *dst_ip, + u16_t dst_port, u8_t have_chksum, u16_t chksum) +{ +#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ + struct netif *netif; + + LWIP_ERROR("udp_sendto: invalid pcb", pcb != NULL, return ERR_ARG); + LWIP_ERROR("udp_sendto: invalid pbuf", p != NULL, return ERR_ARG); + LWIP_ERROR("udp_sendto: invalid dst_ip", dst_ip != NULL, return ERR_ARG); + + if (!IP_ADDR_PCB_VERSION_MATCH(pcb, dst_ip)) { + return ERR_VAL; + } + + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, ("udp_send\n")); + + if (pcb->netif_idx != NETIF_NO_INDEX) { + netif = netif_get_by_index(pcb->netif_idx); + } else { +#if LWIP_MULTICAST_TX_OPTIONS + netif = NULL; + if (ip_addr_ismulticast(dst_ip)) { + /* For IPv6, the interface to use for packets with a multicast destination + * is specified using an interface index. The same approach may be used for + * IPv4 as well, in which case it overrides the IPv4 multicast override + * address below. Here we have to look up the netif by going through the + * list, but by doing so we skip a route lookup. If the interface index has + * gone stale, we fall through and do the regular route lookup after all. */ + if (pcb->mcast_ifindex != NETIF_NO_INDEX) { + netif = netif_get_by_index(pcb->mcast_ifindex); + } +#if LWIP_IPV4 + else +#if LWIP_IPV6 + if (IP_IS_V4(dst_ip)) +#endif /* LWIP_IPV6 */ + { + /* IPv4 does not use source-based routing by default, so we use an + administratively selected interface for multicast by default. + However, this can be overridden by setting an interface address + in pcb->mcast_ip4 that is used for routing. If this routing lookup + fails, we try regular routing as though no override was set. */ + if (!ip4_addr_isany_val(pcb->mcast_ip4) && + !ip4_addr_cmp(&pcb->mcast_ip4, IP4_ADDR_BROADCAST)) { + netif = ip4_route_src(ip_2_ip4(&pcb->local_ip), &pcb->mcast_ip4); + } + } +#endif /* LWIP_IPV4 */ + } + + if (netif == NULL) +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + { + /* find the outgoing network interface for this packet */ + netif = ip_route(&pcb->local_ip, dst_ip); + } + } + + /* no outgoing network interface could be found? */ + if (netif == NULL) { + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("udp_send: No route to ")); + ip_addr_debug_print(UDP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, dst_ip); + LWIP_DEBUGF(UDP_DEBUG, ("\n")); + UDP_STATS_INC(udp.rterr); + return ERR_RTE; + } +#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP + return udp_sendto_if_chksum(pcb, p, dst_ip, dst_port, netif, have_chksum, chksum); +#else /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ + return udp_sendto_if(pcb, p, dst_ip, dst_port, netif); +#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ +} + +/** + * @ingroup udp_raw + * Send data to a specified address using UDP. + * The netif used for sending can be specified. + * + * This function exists mainly for DHCP, to be able to send UDP packets + * on a netif that is still down. + * + * @param pcb UDP PCB used to send the data. + * @param p chain of pbuf's to be sent. + * @param dst_ip Destination IP address. + * @param dst_port Destination UDP port. + * @param netif the netif used for sending. + * + * dst_ip & dst_port are expected to be in the same byte order as in the pcb. + * + * @return lwIP error code (@see udp_send for possible error codes) + * + * @see udp_disconnect() udp_send() + */ +err_t +udp_sendto_if(struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port, struct netif *netif) +{ +#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP + return udp_sendto_if_chksum(pcb, p, dst_ip, dst_port, netif, 0, 0); +} + +/** Same as udp_sendto_if(), but with checksum */ +err_t +udp_sendto_if_chksum(struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *dst_ip, + u16_t dst_port, struct netif *netif, u8_t have_chksum, + u16_t chksum) +{ +#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ + const ip_addr_t *src_ip; + + LWIP_ERROR("udp_sendto_if: invalid pcb", pcb != NULL, return ERR_ARG); + LWIP_ERROR("udp_sendto_if: invalid pbuf", p != NULL, return ERR_ARG); + LWIP_ERROR("udp_sendto_if: invalid dst_ip", dst_ip != NULL, return ERR_ARG); + LWIP_ERROR("udp_sendto_if: invalid netif", netif != NULL, return ERR_ARG); + + if (!IP_ADDR_PCB_VERSION_MATCH(pcb, dst_ip)) { + return ERR_VAL; + } + + /* PCB local address is IP_ANY_ADDR or multicast? */ +#if LWIP_IPV6 + if (IP_IS_V6(dst_ip)) { + if (ip6_addr_isany(ip_2_ip6(&pcb->local_ip)) || + ip6_addr_ismulticast(ip_2_ip6(&pcb->local_ip))) { + src_ip = ip6_select_source_address(netif, ip_2_ip6(dst_ip)); + if (src_ip == NULL) { + /* No suitable source address was found. */ + return ERR_RTE; + } + } else { + /* use UDP PCB local IPv6 address as source address, if still valid. */ + if (netif_get_ip6_addr_match(netif, ip_2_ip6(&pcb->local_ip)) < 0) { + /* Address isn't valid anymore. */ + return ERR_RTE; + } + src_ip = &pcb->local_ip; + } + } +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 && LWIP_IPV6 + else +#endif /* LWIP_IPV4 && LWIP_IPV6 */ +#if LWIP_IPV4 + if (ip4_addr_isany(ip_2_ip4(&pcb->local_ip)) || + ip4_addr_ismulticast(ip_2_ip4(&pcb->local_ip))) { + /* if the local_ip is any or multicast + * use the outgoing network interface IP address as source address */ + src_ip = netif_ip_addr4(netif); + } else { + /* check if UDP PCB local IP address is correct + * this could be an old address if netif->ip_addr has changed */ + if (!ip4_addr_cmp(ip_2_ip4(&(pcb->local_ip)), netif_ip4_addr(netif))) { + /* local_ip doesn't match, drop the packet */ + return ERR_RTE; + } + /* use UDP PCB local IP address as source address */ + src_ip = &pcb->local_ip; + } +#endif /* LWIP_IPV4 */ +#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP + return udp_sendto_if_src_chksum(pcb, p, dst_ip, dst_port, netif, have_chksum, chksum, src_ip); +#else /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ + return udp_sendto_if_src(pcb, p, dst_ip, dst_port, netif, src_ip); +#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ +} + +/** @ingroup udp_raw + * Same as @ref udp_sendto_if, but with source address */ +err_t +udp_sendto_if_src(struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port, struct netif *netif, const ip_addr_t *src_ip) +{ +#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP + return udp_sendto_if_src_chksum(pcb, p, dst_ip, dst_port, netif, 0, 0, src_ip); +} + +/** Same as udp_sendto_if_src(), but with checksum */ +err_t +udp_sendto_if_src_chksum(struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *dst_ip, + u16_t dst_port, struct netif *netif, u8_t have_chksum, + u16_t chksum, const ip_addr_t *src_ip) +{ +#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ + struct udp_hdr *udphdr; + err_t err; + struct pbuf *q; /* q will be sent down the stack */ + u8_t ip_proto; + u8_t ttl; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("udp_sendto_if_src: invalid pcb", pcb != NULL, return ERR_ARG); + LWIP_ERROR("udp_sendto_if_src: invalid pbuf", p != NULL, return ERR_ARG); + LWIP_ERROR("udp_sendto_if_src: invalid dst_ip", dst_ip != NULL, return ERR_ARG); + LWIP_ERROR("udp_sendto_if_src: invalid src_ip", src_ip != NULL, return ERR_ARG); + LWIP_ERROR("udp_sendto_if_src: invalid netif", netif != NULL, return ERR_ARG); + + if (!IP_ADDR_PCB_VERSION_MATCH(pcb, src_ip) || + !IP_ADDR_PCB_VERSION_MATCH(pcb, dst_ip)) { + return ERR_VAL; + } + +#if LWIP_IPV4 && IP_SOF_BROADCAST + /* broadcast filter? */ + if (!ip_get_option(pcb, SOF_BROADCAST) && +#if LWIP_IPV6 + IP_IS_V4(dst_ip) && +#endif /* LWIP_IPV6 */ + ip_addr_isbroadcast(dst_ip, netif)) { + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("udp_sendto_if: SOF_BROADCAST not enabled on pcb %p\n", (void *)pcb)); + return ERR_VAL; + } +#endif /* LWIP_IPV4 && IP_SOF_BROADCAST */ + + /* if the PCB is not yet bound to a port, bind it here */ + if (pcb->local_port == 0) { + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, ("udp_send: not yet bound to a port, binding now\n")); + err = udp_bind(pcb, &pcb->local_ip, pcb->local_port); + if (err != ERR_OK) { + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("udp_send: forced port bind failed\n")); + return err; + } + } + + /* packet too large to add a UDP header without causing an overflow? */ + if ((u16_t)(p->tot_len + UDP_HLEN) < p->tot_len) { + return ERR_MEM; + } + /* not enough space to add an UDP header to first pbuf in given p chain? */ + if (pbuf_add_header(p, UDP_HLEN)) { + /* allocate header in a separate new pbuf */ + q = pbuf_alloc(PBUF_IP, UDP_HLEN, PBUF_RAM); + /* new header pbuf could not be allocated? */ + if (q == NULL) { + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("udp_send: could not allocate header\n")); + return ERR_MEM; + } + if (p->tot_len != 0) { + /* chain header q in front of given pbuf p (only if p contains data) */ + pbuf_chain(q, p); + } + /* first pbuf q points to header pbuf */ + LWIP_DEBUGF(UDP_DEBUG, + ("udp_send: added header pbuf %p before given pbuf %p\n", (void *)q, (void *)p)); + } else { + /* adding space for header within p succeeded */ + /* first pbuf q equals given pbuf */ + q = p; + LWIP_DEBUGF(UDP_DEBUG, ("udp_send: added header in given pbuf %p\n", (void *)p)); + } + LWIP_ASSERT("check that first pbuf can hold struct udp_hdr", + (q->len >= sizeof(struct udp_hdr))); + /* q now represents the packet to be sent */ + udphdr = (struct udp_hdr *)q->payload; + udphdr->src = lwip_htons(pcb->local_port); + udphdr->dest = lwip_htons(dst_port); + /* in UDP, 0 checksum means 'no checksum' */ + udphdr->chksum = 0x0000; + + /* Multicast Loop? */ +#if LWIP_MULTICAST_TX_OPTIONS + if (((pcb->flags & UDP_FLAGS_MULTICAST_LOOP) != 0) && ip_addr_ismulticast(dst_ip)) { + q->flags |= PBUF_FLAG_MCASTLOOP; + } +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + + LWIP_DEBUGF(UDP_DEBUG, ("udp_send: sending datagram of length %"U16_F"\n", q->tot_len)); + +#if LWIP_UDPLITE + /* UDP Lite protocol? */ + if (pcb->flags & UDP_FLAGS_UDPLITE) { + u16_t chklen, chklen_hdr; + LWIP_DEBUGF(UDP_DEBUG, ("udp_send: UDP LITE packet length %"U16_F"\n", q->tot_len)); + /* set UDP message length in UDP header */ + chklen_hdr = chklen = pcb->chksum_len_tx; + if ((chklen < sizeof(struct udp_hdr)) || (chklen > q->tot_len)) { + if (chklen != 0) { + LWIP_DEBUGF(UDP_DEBUG, ("udp_send: UDP LITE pcb->chksum_len is illegal: %"U16_F"\n", chklen)); + } + /* For UDP-Lite, checksum length of 0 means checksum + over the complete packet. (See RFC 3828 chap. 3.1) + At least the UDP-Lite header must be covered by the + checksum, therefore, if chksum_len has an illegal + value, we generate the checksum over the complete + packet to be safe. */ + chklen_hdr = 0; + chklen = q->tot_len; + } + udphdr->len = lwip_htons(chklen_hdr); + /* calculate checksum */ +#if CHECKSUM_GEN_UDP + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_UDP) { +#if LWIP_CHECKSUM_ON_COPY + if (have_chksum) { + chklen = UDP_HLEN; + } +#endif /* LWIP_CHECKSUM_ON_COPY */ + udphdr->chksum = ip_chksum_pseudo_partial(q, IP_PROTO_UDPLITE, + q->tot_len, chklen, src_ip, dst_ip); +#if LWIP_CHECKSUM_ON_COPY + if (have_chksum) { + u32_t acc; + acc = udphdr->chksum + (u16_t)~(chksum); + udphdr->chksum = FOLD_U32T(acc); + } +#endif /* LWIP_CHECKSUM_ON_COPY */ + + /* chksum zero must become 0xffff, as zero means 'no checksum' */ + if (udphdr->chksum == 0x0000) { + udphdr->chksum = 0xffff; + } + } +#endif /* CHECKSUM_GEN_UDP */ + + ip_proto = IP_PROTO_UDPLITE; + } else +#endif /* LWIP_UDPLITE */ + { /* UDP */ + LWIP_DEBUGF(UDP_DEBUG, ("udp_send: UDP packet length %"U16_F"\n", q->tot_len)); + udphdr->len = lwip_htons(q->tot_len); + /* calculate checksum */ +#if CHECKSUM_GEN_UDP + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_UDP) { + /* Checksum is mandatory over IPv6. */ + if (IP_IS_V6(dst_ip) || (pcb->flags & UDP_FLAGS_NOCHKSUM) == 0) { + u16_t udpchksum; +#if LWIP_CHECKSUM_ON_COPY + if (have_chksum) { + u32_t acc; + udpchksum = ip_chksum_pseudo_partial(q, IP_PROTO_UDP, + q->tot_len, UDP_HLEN, src_ip, dst_ip); + acc = udpchksum + (u16_t)~(chksum); + udpchksum = FOLD_U32T(acc); + } else +#endif /* LWIP_CHECKSUM_ON_COPY */ + { + udpchksum = ip_chksum_pseudo(q, IP_PROTO_UDP, q->tot_len, + src_ip, dst_ip); + } + + /* chksum zero must become 0xffff, as zero means 'no checksum' */ + if (udpchksum == 0x0000) { + udpchksum = 0xffff; + } + udphdr->chksum = udpchksum; + } + } +#endif /* CHECKSUM_GEN_UDP */ + ip_proto = IP_PROTO_UDP; + } + + /* Determine TTL to use */ +#if LWIP_MULTICAST_TX_OPTIONS + ttl = (ip_addr_ismulticast(dst_ip) ? udp_get_multicast_ttl(pcb) : pcb->ttl); +#else /* LWIP_MULTICAST_TX_OPTIONS */ + ttl = pcb->ttl; +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + + LWIP_DEBUGF(UDP_DEBUG, ("udp_send: UDP checksum 0x%04"X16_F"\n", udphdr->chksum)); + LWIP_DEBUGF(UDP_DEBUG, ("udp_send: ip_output_if (,,,,0x%02"X16_F",)\n", (u16_t)ip_proto)); + /* output to IP */ + NETIF_SET_HINTS(netif, &(pcb->netif_hints)); + err = ip_output_if_src(q, src_ip, dst_ip, ttl, pcb->tos, ip_proto, netif); + NETIF_RESET_HINTS(netif); + + /* @todo: must this be increased even if error occurred? */ + MIB2_STATS_INC(mib2.udpoutdatagrams); + + /* did we chain a separate header pbuf earlier? */ + if (q != p) { + /* free the header pbuf */ + pbuf_free(q); + q = NULL; + /* p is still referenced by the caller, and will live on */ + } + + UDP_STATS_INC(udp.xmit); + return err; +} + +/** + * @ingroup udp_raw + * Bind an UDP PCB. + * + * @param pcb UDP PCB to be bound with a local address ipaddr and port. + * @param ipaddr local IP address to bind with. Use IP_ANY_TYPE to + * bind to all local interfaces. + * @param port local UDP port to bind with. Use 0 to automatically bind + * to a random port between UDP_LOCAL_PORT_RANGE_START and + * UDP_LOCAL_PORT_RANGE_END. + * + * ipaddr & port are expected to be in the same byte order as in the pcb. + * + * @return lwIP error code. + * - ERR_OK. Successful. No error occurred. + * - ERR_USE. The specified ipaddr and port are already bound to by + * another UDP PCB. + * + * @see udp_disconnect() + */ +err_t +udp_bind(struct udp_pcb *pcb, const ip_addr_t *ipaddr, u16_t port) +{ + struct udp_pcb *ipcb; + u8_t rebind; +#if LWIP_IPV6 && LWIP_IPV6_SCOPES + ip_addr_t zoned_ipaddr; +#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */ + + LWIP_ASSERT_CORE_LOCKED(); + +#if LWIP_IPV4 + /* Don't propagate NULL pointer (IPv4 ANY) to subsequent functions */ + if (ipaddr == NULL) { + ipaddr = IP4_ADDR_ANY; + } +#else /* LWIP_IPV4 */ + LWIP_ERROR("udp_bind: invalid ipaddr", ipaddr != NULL, return ERR_ARG); +#endif /* LWIP_IPV4 */ + + LWIP_ERROR("udp_bind: invalid pcb", pcb != NULL, return ERR_ARG); + + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, ("udp_bind(ipaddr = ")); + ip_addr_debug_print(UDP_DEBUG | LWIP_DBG_TRACE, ipaddr); + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, (", port = %"U16_F")\n", port)); + + rebind = 0; + /* Check for double bind and rebind of the same pcb */ + for (ipcb = udp_pcbs; ipcb != NULL; ipcb = ipcb->next) { + /* is this UDP PCB already on active list? */ + if (pcb == ipcb) { + rebind = 1; + break; + } + } + +#if LWIP_IPV6 && LWIP_IPV6_SCOPES + /* If the given IP address should have a zone but doesn't, assign one now. + * This is legacy support: scope-aware callers should always provide properly + * zoned source addresses. Do the zone selection before the address-in-use + * check below; as such we have to make a temporary copy of the address. */ + if (IP_IS_V6(ipaddr) && ip6_addr_lacks_zone(ip_2_ip6(ipaddr), IP6_UNKNOWN)) { + ip_addr_copy(zoned_ipaddr, *ipaddr); + ip6_addr_select_zone(ip_2_ip6(&zoned_ipaddr), ip_2_ip6(&zoned_ipaddr)); + ipaddr = &zoned_ipaddr; + } +#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */ + + /* no port specified? */ + if (port == 0) { + port = udp_new_port(); + if (port == 0) { + /* no more ports available in local range */ + LWIP_DEBUGF(UDP_DEBUG, ("udp_bind: out of free UDP ports\n")); + return ERR_USE; + } + } else { + for (ipcb = udp_pcbs; ipcb != NULL; ipcb = ipcb->next) { + if (pcb != ipcb) { + /* By default, we don't allow to bind to a port that any other udp + PCB is already bound to, unless *all* PCBs with that port have tha + REUSEADDR flag set. */ +#if SO_REUSE + if (!ip_get_option(pcb, SOF_REUSEADDR) || + !ip_get_option(ipcb, SOF_REUSEADDR)) +#endif /* SO_REUSE */ + { + /* port matches that of PCB in list and REUSEADDR not set -> reject */ + if ((ipcb->local_port == port) && + /* IP address matches or any IP used? */ + (ip_addr_cmp(&ipcb->local_ip, ipaddr) || ip_addr_isany(ipaddr) || + ip_addr_isany(&ipcb->local_ip))) { + /* other PCB already binds to this local IP and port */ + LWIP_DEBUGF(UDP_DEBUG, + ("udp_bind: local port %"U16_F" already bound by another pcb\n", port)); + return ERR_USE; + } + } + } + } + } + + ip_addr_set_ipaddr(&pcb->local_ip, ipaddr); + + pcb->local_port = port; + mib2_udp_bind(pcb); + /* pcb not active yet? */ + if (rebind == 0) { + /* place the PCB on the active list if not already there */ + pcb->next = udp_pcbs; + udp_pcbs = pcb; + } + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("udp_bind: bound to ")); + ip_addr_debug_print_val(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, pcb->local_ip); + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, (", port %"U16_F")\n", pcb->local_port)); + return ERR_OK; +} + +/** + * @ingroup udp_raw + * Bind an UDP PCB to a specific netif. + * After calling this function, all packets received via this PCB + * are guaranteed to have come in via the specified netif, and all + * outgoing packets will go out via the specified netif. + * + * @param pcb UDP PCB to be bound. + * @param netif netif to bind udp pcb to. Can be NULL. + * + * @see udp_disconnect() + */ +void +udp_bind_netif(struct udp_pcb *pcb, const struct netif *netif) +{ + LWIP_ASSERT_CORE_LOCKED(); + + if (netif != NULL) { + pcb->netif_idx = netif_get_index(netif); + } else { + pcb->netif_idx = NETIF_NO_INDEX; + } +} + +/** + * @ingroup udp_raw + * Sets the remote end of the pcb. This function does not generate any + * network traffic, but only sets the remote address of the pcb. + * + * @param pcb UDP PCB to be connected with remote address ipaddr and port. + * @param ipaddr remote IP address to connect with. + * @param port remote UDP port to connect with. + * + * @return lwIP error code + * + * ipaddr & port are expected to be in the same byte order as in the pcb. + * + * The udp pcb is bound to a random local port if not already bound. + * + * @see udp_disconnect() + */ +err_t +udp_connect(struct udp_pcb *pcb, const ip_addr_t *ipaddr, u16_t port) +{ + struct udp_pcb *ipcb; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("udp_connect: invalid pcb", pcb != NULL, return ERR_ARG); + LWIP_ERROR("udp_connect: invalid ipaddr", ipaddr != NULL, return ERR_ARG); + + if (pcb->local_port == 0) { + err_t err = udp_bind(pcb, &pcb->local_ip, pcb->local_port); + if (err != ERR_OK) { + return err; + } + } + + ip_addr_set_ipaddr(&pcb->remote_ip, ipaddr); +#if LWIP_IPV6 && LWIP_IPV6_SCOPES + /* If the given IP address should have a zone but doesn't, assign one now, + * using the bound address to make a more informed decision when possible. */ + if (IP_IS_V6(&pcb->remote_ip) && + ip6_addr_lacks_zone(ip_2_ip6(&pcb->remote_ip), IP6_UNKNOWN)) { + ip6_addr_select_zone(ip_2_ip6(&pcb->remote_ip), ip_2_ip6(&pcb->local_ip)); + } +#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */ + + pcb->remote_port = port; + pcb->flags |= UDP_FLAGS_CONNECTED; + + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("udp_connect: connected to ")); + ip_addr_debug_print_val(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + pcb->remote_ip); + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, (", port %"U16_F")\n", pcb->remote_port)); + + /* Insert UDP PCB into the list of active UDP PCBs. */ + for (ipcb = udp_pcbs; ipcb != NULL; ipcb = ipcb->next) { + if (pcb == ipcb) { + /* already on the list, just return */ + return ERR_OK; + } + } + /* PCB not yet on the list, add PCB now */ + pcb->next = udp_pcbs; + udp_pcbs = pcb; + return ERR_OK; +} + +/** + * @ingroup udp_raw + * Remove the remote end of the pcb. This function does not generate + * any network traffic, but only removes the remote address of the pcb. + * + * @param pcb the udp pcb to disconnect. + */ +void +udp_disconnect(struct udp_pcb *pcb) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("udp_disconnect: invalid pcb", pcb != NULL, return); + + /* reset remote address association */ +#if LWIP_IPV4 && LWIP_IPV6 + if (IP_IS_ANY_TYPE_VAL(pcb->local_ip)) { + ip_addr_copy(pcb->remote_ip, *IP_ANY_TYPE); + } else { +#endif + ip_addr_set_any(IP_IS_V6_VAL(pcb->remote_ip), &pcb->remote_ip); +#if LWIP_IPV4 && LWIP_IPV6 + } +#endif + pcb->remote_port = 0; + pcb->netif_idx = NETIF_NO_INDEX; + /* mark PCB as unconnected */ + udp_clear_flags(pcb, UDP_FLAGS_CONNECTED); +} + +/** + * @ingroup udp_raw + * Set a receive callback for a UDP PCB. + * This callback will be called when receiving a datagram for the pcb. + * + * @param pcb the pcb for which to set the recv callback + * @param recv function pointer of the callback function + * @param recv_arg additional argument to pass to the callback function + */ +void +udp_recv(struct udp_pcb *pcb, udp_recv_fn recv, void *recv_arg) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("udp_recv: invalid pcb", pcb != NULL, return); + + /* remember recv() callback and user data */ + pcb->recv = recv; + pcb->recv_arg = recv_arg; +} + +/** + * @ingroup udp_raw + * Removes and deallocates the pcb. + * + * @param pcb UDP PCB to be removed. The PCB is removed from the list of + * UDP PCB's and the data structure is freed from memory. + * + * @see udp_new() + */ +void +udp_remove(struct udp_pcb *pcb) +{ + struct udp_pcb *pcb2; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("udp_remove: invalid pcb", pcb != NULL, return); + + mib2_udp_unbind(pcb); + /* pcb to be removed is first in list? */ + if (udp_pcbs == pcb) { + /* make list start at 2nd pcb */ + udp_pcbs = udp_pcbs->next; + /* pcb not 1st in list */ + } else { + for (pcb2 = udp_pcbs; pcb2 != NULL; pcb2 = pcb2->next) { + /* find pcb in udp_pcbs list */ + if (pcb2->next != NULL && pcb2->next == pcb) { + /* remove pcb from list */ + pcb2->next = pcb->next; + break; + } + } + } + memp_free(MEMP_UDP_PCB, pcb); +} + +/** + * @ingroup udp_raw + * Creates a new UDP pcb which can be used for UDP communication. The + * pcb is not active until it has either been bound to a local address + * or connected to a remote address. + * + * @return The UDP PCB which was created. NULL if the PCB data structure + * could not be allocated. + * + * @see udp_remove() + */ +struct udp_pcb * +udp_new(void) +{ + struct udp_pcb *pcb; + + LWIP_ASSERT_CORE_LOCKED(); + + pcb = (struct udp_pcb *)memp_malloc(MEMP_UDP_PCB); + /* could allocate UDP PCB? */ + if (pcb != NULL) { + /* UDP Lite: by initializing to all zeroes, chksum_len is set to 0 + * which means checksum is generated over the whole datagram per default + * (recommended as default by RFC 3828). */ + /* initialize PCB to all zeroes */ + memset(pcb, 0, sizeof(struct udp_pcb)); + pcb->ttl = UDP_TTL; +#if LWIP_MULTICAST_TX_OPTIONS + udp_set_multicast_ttl(pcb, UDP_TTL); +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + } + return pcb; +} + +/** + * @ingroup udp_raw + * Create a UDP PCB for specific IP type. + * The pcb is not active until it has either been bound to a local address + * or connected to a remote address. + * + * @param type IP address type, see @ref lwip_ip_addr_type definitions. + * If you want to listen to IPv4 and IPv6 (dual-stack) packets, + * supply @ref IPADDR_TYPE_ANY as argument and bind to @ref IP_ANY_TYPE. + * @return The UDP PCB which was created. NULL if the PCB data structure + * could not be allocated. + * + * @see udp_remove() + */ +struct udp_pcb * +udp_new_ip_type(u8_t type) +{ + struct udp_pcb *pcb; + + LWIP_ASSERT_CORE_LOCKED(); + + pcb = udp_new(); +#if LWIP_IPV4 && LWIP_IPV6 + if (pcb != NULL) { + IP_SET_TYPE_VAL(pcb->local_ip, type); + IP_SET_TYPE_VAL(pcb->remote_ip, type); + } +#else + LWIP_UNUSED_ARG(type); +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + return pcb; +} + +/** This function is called from netif.c when address is changed + * + * @param old_addr IP address of the netif before change + * @param new_addr IP address of the netif after change + */ +void udp_netif_ip_addr_changed(const ip_addr_t *old_addr, const ip_addr_t *new_addr) +{ + struct udp_pcb *upcb; + + if (!ip_addr_isany(old_addr) && !ip_addr_isany(new_addr)) { + for (upcb = udp_pcbs; upcb != NULL; upcb = upcb->next) { + /* PCB bound to current local interface address? */ + if (ip_addr_cmp(&upcb->local_ip, old_addr)) { + /* The PCB is bound to the old ipaddr and + * is set to bound to the new one instead */ + ip_addr_copy(upcb->local_ip, *new_addr); + } + } + } +} + +#if UDP_DEBUG +/** + * Print UDP header information for debug purposes. + * + * @param udphdr pointer to the udp header in memory. + */ +void +udp_debug_print(struct udp_hdr *udphdr) +{ + LWIP_DEBUGF(UDP_DEBUG, ("UDP header:\n")); + LWIP_DEBUGF(UDP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(UDP_DEBUG, ("| %5"U16_F" | %5"U16_F" | (src port, dest port)\n", + lwip_ntohs(udphdr->src), lwip_ntohs(udphdr->dest))); + LWIP_DEBUGF(UDP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(UDP_DEBUG, ("| %5"U16_F" | 0x%04"X16_F" | (len, chksum)\n", + lwip_ntohs(udphdr->len), lwip_ntohs(udphdr->chksum))); + LWIP_DEBUGF(UDP_DEBUG, ("+-------------------------------+\n")); +} +#endif /* UDP_DEBUG */ + +#endif /* LWIP_UDP */ diff --git a/Middlewares/Third_Party/LwIP/src/include/compat/posix/arpa/inet.h b/Middlewares/Third_Party/LwIP/src/include/compat/posix/arpa/inet.h new file mode 100644 index 0000000..0ed9baf --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/compat/posix/arpa/inet.h @@ -0,0 +1,33 @@ +/** + * @file + * This file is a posix wrapper for lwip/sockets.h. + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "lwip/sockets.h" diff --git a/Middlewares/Third_Party/LwIP/src/include/compat/posix/net/if.h b/Middlewares/Third_Party/LwIP/src/include/compat/posix/net/if.h new file mode 100644 index 0000000..6b8e63a --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/compat/posix/net/if.h @@ -0,0 +1,36 @@ +/** + * @file + * This file is a posix wrapper for lwip/if_api.h. + */ + +/* + * Copyright (c) 2017 Joel Cunningham, Garmin International, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "lwip/if_api.h" diff --git a/Middlewares/Third_Party/LwIP/src/include/compat/posix/netdb.h b/Middlewares/Third_Party/LwIP/src/include/compat/posix/netdb.h new file mode 100644 index 0000000..12d4c7f --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/compat/posix/netdb.h @@ -0,0 +1,33 @@ +/** + * @file + * This file is a posix wrapper for lwip/netdb.h. + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "lwip/netdb.h" diff --git a/Middlewares/Third_Party/LwIP/src/include/compat/posix/sys/socket.h b/Middlewares/Third_Party/LwIP/src/include/compat/posix/sys/socket.h new file mode 100644 index 0000000..0ed9baf --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/compat/posix/sys/socket.h @@ -0,0 +1,33 @@ +/** + * @file + * This file is a posix wrapper for lwip/sockets.h. + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "lwip/sockets.h" diff --git a/Middlewares/Third_Party/LwIP/src/include/compat/stdc/errno.h b/Middlewares/Third_Party/LwIP/src/include/compat/stdc/errno.h new file mode 100644 index 0000000..98a9aec --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/compat/stdc/errno.h @@ -0,0 +1,33 @@ +/** + * @file + * This file is a posix/stdc wrapper for lwip/errno.h. + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "lwip/errno.h" diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/altcp.h b/Middlewares/Third_Party/LwIP/src/include/lwip/altcp.h new file mode 100644 index 0000000..97abc54 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/altcp.h @@ -0,0 +1,201 @@ +/** + * @file + * Application layered TCP connection API (to be used from TCPIP thread)\n + * + * This file contains the generic API. + * For more details see @ref altcp_api. + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_ALTCP_H +#define LWIP_HDR_ALTCP_H + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/tcpbase.h" +#include "lwip/err.h" +#include "lwip/pbuf.h" +#include "lwip/ip_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct altcp_pcb; +struct altcp_functions; + +typedef err_t (*altcp_accept_fn)(void *arg, struct altcp_pcb *new_conn, err_t err); +typedef err_t (*altcp_connected_fn)(void *arg, struct altcp_pcb *conn, err_t err); +typedef err_t (*altcp_recv_fn)(void *arg, struct altcp_pcb *conn, struct pbuf *p, err_t err); +typedef err_t (*altcp_sent_fn)(void *arg, struct altcp_pcb *conn, u16_t len); +typedef err_t (*altcp_poll_fn)(void *arg, struct altcp_pcb *conn); +typedef void (*altcp_err_fn)(void *arg, err_t err); + +typedef struct altcp_pcb* (*altcp_new_fn)(void *arg, u8_t ip_type); + +struct altcp_pcb { + const struct altcp_functions *fns; + struct altcp_pcb *inner_conn; + void *arg; + void *state; + /* application callbacks */ + altcp_accept_fn accept; + altcp_connected_fn connected; + altcp_recv_fn recv; + altcp_sent_fn sent; + altcp_poll_fn poll; + altcp_err_fn err; + u8_t pollinterval; +}; + +/** @ingroup altcp */ +typedef struct altcp_allocator_s { + /** Allocator function */ + altcp_new_fn alloc; + /** Argument to allocator function */ + void *arg; +} altcp_allocator_t; + +struct altcp_pcb *altcp_new(altcp_allocator_t *allocator); +struct altcp_pcb *altcp_new_ip6(altcp_allocator_t *allocator); +struct altcp_pcb *altcp_new_ip_type(altcp_allocator_t *allocator, u8_t ip_type); + +void altcp_arg(struct altcp_pcb *conn, void *arg); +void altcp_accept(struct altcp_pcb *conn, altcp_accept_fn accept); +void altcp_recv(struct altcp_pcb *conn, altcp_recv_fn recv); +void altcp_sent(struct altcp_pcb *conn, altcp_sent_fn sent); +void altcp_poll(struct altcp_pcb *conn, altcp_poll_fn poll, u8_t interval); +void altcp_err(struct altcp_pcb *conn, altcp_err_fn err); + +void altcp_recved(struct altcp_pcb *conn, u16_t len); +err_t altcp_bind(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port); +err_t altcp_connect(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port, altcp_connected_fn connected); + +/* return conn for source code compatibility to tcp callback API only */ +struct altcp_pcb *altcp_listen_with_backlog_and_err(struct altcp_pcb *conn, u8_t backlog, err_t *err); +#define altcp_listen_with_backlog(conn, backlog) altcp_listen_with_backlog_and_err(conn, backlog, NULL) +/** @ingroup altcp */ +#define altcp_listen(conn) altcp_listen_with_backlog_and_err(conn, TCP_DEFAULT_LISTEN_BACKLOG, NULL) + +void altcp_abort(struct altcp_pcb *conn); +err_t altcp_close(struct altcp_pcb *conn); +err_t altcp_shutdown(struct altcp_pcb *conn, int shut_rx, int shut_tx); + +err_t altcp_write(struct altcp_pcb *conn, const void *dataptr, u16_t len, u8_t apiflags); +err_t altcp_output(struct altcp_pcb *conn); + +u16_t altcp_mss(struct altcp_pcb *conn); +u16_t altcp_sndbuf(struct altcp_pcb *conn); +u16_t altcp_sndqueuelen(struct altcp_pcb *conn); +void altcp_nagle_disable(struct altcp_pcb *conn); +void altcp_nagle_enable(struct altcp_pcb *conn); +int altcp_nagle_disabled(struct altcp_pcb *conn); + +void altcp_setprio(struct altcp_pcb *conn, u8_t prio); + +err_t altcp_get_tcp_addrinfo(struct altcp_pcb *conn, int local, ip_addr_t *addr, u16_t *port); +ip_addr_t *altcp_get_ip(struct altcp_pcb *conn, int local); +u16_t altcp_get_port(struct altcp_pcb *conn, int local); + +#ifdef LWIP_DEBUG +enum tcp_state altcp_dbg_get_tcp_state(struct altcp_pcb *conn); +#endif + +#ifdef __cplusplus +} +#endif + +#else /* LWIP_ALTCP */ + +/* ALTCP disabled, define everything to link against tcp callback API (e.g. to get a small non-ssl httpd) */ + +#include "lwip/tcp.h" + +#define altcp_accept_fn tcp_accept_fn +#define altcp_connected_fn tcp_connected_fn +#define altcp_recv_fn tcp_recv_fn +#define altcp_sent_fn tcp_sent_fn +#define altcp_poll_fn tcp_poll_fn +#define altcp_err_fn tcp_err_fn + +#define altcp_pcb tcp_pcb +#define altcp_tcp_new_ip_type tcp_new_ip_type +#define altcp_tcp_new tcp_new +#define altcp_tcp_new_ip6 tcp_new_ip6 + +#define altcp_new(allocator) tcp_new() +#define altcp_new_ip6(allocator) tcp_new_ip6() +#define altcp_new_ip_type(allocator, ip_type) tcp_new_ip_type(ip_type) + +#define altcp_arg tcp_arg +#define altcp_accept tcp_accept +#define altcp_recv tcp_recv +#define altcp_sent tcp_sent +#define altcp_poll tcp_poll +#define altcp_err tcp_err + +#define altcp_recved tcp_recved +#define altcp_bind tcp_bind +#define altcp_connect tcp_connect + +#define altcp_listen_with_backlog_and_err tcp_listen_with_backlog_and_err +#define altcp_listen_with_backlog tcp_listen_with_backlog +#define altcp_listen tcp_listen + +#define altcp_abort tcp_abort +#define altcp_close tcp_close +#define altcp_shutdown tcp_shutdown + +#define altcp_write tcp_write +#define altcp_output tcp_output + +#define altcp_mss tcp_mss +#define altcp_sndbuf tcp_sndbuf +#define altcp_sndqueuelen tcp_sndqueuelen +#define altcp_nagle_disable tcp_nagle_disable +#define altcp_nagle_enable tcp_nagle_enable +#define altcp_nagle_disabled tcp_nagle_disabled +#define altcp_setprio tcp_setprio + +#define altcp_get_tcp_addrinfo tcp_get_tcp_addrinfo +#define altcp_get_ip(pcb, local) ((local) ? (&(pcb)->local_ip) : (&(pcb)->remote_ip)) + +#ifdef LWIP_DEBUG +#define altcp_dbg_get_tcp_state tcp_dbg_get_tcp_state +#endif + +#endif /* LWIP_ALTCP */ + +#endif /* LWIP_HDR_ALTCP_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/altcp_tcp.h b/Middlewares/Third_Party/LwIP/src/include/lwip/altcp_tcp.h new file mode 100644 index 0000000..dbde584 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/altcp_tcp.h @@ -0,0 +1,72 @@ +/** + * @file + * Application layered TCP connection API (to be used from TCPIP thread)\n + * This interface mimics the tcp callback API to the application while preventing + * direct linking (much like virtual functions). + * This way, an application can make use of other application layer protocols + * on top of TCP without knowing the details (e.g. TLS, proxy connection). + * + * This file contains the base implementation calling into tcp. + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_ALTCP_TCP_H +#define LWIP_HDR_ALTCP_TCP_H + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/altcp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct altcp_pcb *altcp_tcp_new_ip_type(u8_t ip_type); + +#define altcp_tcp_new() altcp_tcp_new_ip_type(IPADDR_TYPE_V4) +#define altcp_tcp_new_ip6() altcp_tcp_new_ip_type(IPADDR_TYPE_V6) + +struct altcp_pcb *altcp_tcp_alloc(void *arg, u8_t ip_type); + +struct tcp_pcb; +struct altcp_pcb *altcp_tcp_wrap(struct tcp_pcb *tpcb); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_ALTCP */ + +#endif /* LWIP_HDR_ALTCP_TCP_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/altcp_tls.h b/Middlewares/Third_Party/LwIP/src/include/lwip/altcp_tls.h new file mode 100644 index 0000000..7b17c60 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/altcp_tls.h @@ -0,0 +1,117 @@ +/** + * @file + * Application layered TCP/TLS connection API (to be used from TCPIP thread) + * + * @defgroup altcp_tls TLS layer + * @ingroup altcp + * This file contains function prototypes for a TLS layer. + * A port to ARM mbedtls is provided in the apps/ tree + * (LWIP_ALTCP_TLS_MBEDTLS option). + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_ALTCP_TLS_H +#define LWIP_HDR_ALTCP_TLS_H + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +#if LWIP_ALTCP_TLS + +#include "lwip/altcp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** @ingroup altcp_tls + * ALTCP_TLS configuration handle, content depends on port (e.g. mbedtls) + */ +struct altcp_tls_config; + +/** @ingroup altcp_tls + * Create an ALTCP_TLS server configuration handle + */ +struct altcp_tls_config *altcp_tls_create_config_server_privkey_cert(const u8_t *privkey, size_t privkey_len, + const u8_t *privkey_pass, size_t privkey_pass_len, + const u8_t *cert, size_t cert_len); + +/** @ingroup altcp_tls + * Create an ALTCP_TLS client configuration handle + */ +struct altcp_tls_config *altcp_tls_create_config_client(const u8_t *cert, size_t cert_len); + +/** @ingroup altcp_tls + * Create an ALTCP_TLS client configuration handle with two-way server/client authentication + */ +struct altcp_tls_config *altcp_tls_create_config_client_2wayauth(const u8_t *ca, size_t ca_len, const u8_t *privkey, size_t privkey_len, + const u8_t *privkey_pass, size_t privkey_pass_len, + const u8_t *cert, size_t cert_len); + +/** @ingroup altcp_tls + * Free an ALTCP_TLS configuration handle + */ +void altcp_tls_free_config(struct altcp_tls_config *conf); + +/** @ingroup altcp_tls + * Create new ALTCP_TLS layer wrapping an existing pcb as inner connection (e.g. TLS over TCP) + */ +struct altcp_pcb *altcp_tls_wrap(struct altcp_tls_config *config, struct altcp_pcb *inner_pcb); + +/** @ingroup altcp_tls + * Create new ALTCP_TLS pcb and its inner tcp pcb + */ +struct altcp_pcb *altcp_tls_new(struct altcp_tls_config *config, u8_t ip_type); + +/** @ingroup altcp_tls + * Create new ALTCP_TLS layer pcb and its inner tcp pcb. + * Same as @ref altcp_tls_new but this allocator function fits to + * @ref altcp_allocator_t / @ref altcp_new.\n + 'arg' must contain a struct altcp_tls_config *. + */ +struct altcp_pcb *altcp_tls_alloc(void *arg, u8_t ip_type); + +/** @ingroup altcp_tls + * Return pointer to internal TLS context so application can tweak it. + * Real type depends on port (e.g. mbedtls) + */ +void *altcp_tls_context(struct altcp_pcb *conn); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_ALTCP_TLS */ +#endif /* LWIP_ALTCP */ +#endif /* LWIP_HDR_ALTCP_TLS_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/api.h b/Middlewares/Third_Party/LwIP/src/include/lwip/api.h new file mode 100644 index 0000000..c2afaf2 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/api.h @@ -0,0 +1,431 @@ +/** + * @file + * netconn API (to be used from non-TCPIP threads) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_API_H +#define LWIP_HDR_API_H + +#include "lwip/opt.h" + +#if LWIP_NETCONN || LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */ +/* Note: Netconn API is always available when sockets are enabled - + * sockets are implemented on top of them */ + +#include "lwip/arch.h" +#include "lwip/netbuf.h" +#include "lwip/sys.h" +#include "lwip/ip_addr.h" +#include "lwip/err.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Throughout this file, IP addresses and port numbers are expected to be in + * the same byte order as in the corresponding pcb. + */ + +/* Flags for netconn_write (u8_t) */ +#define NETCONN_NOFLAG 0x00 +#define NETCONN_NOCOPY 0x00 /* Only for source code compatibility */ +#define NETCONN_COPY 0x01 +#define NETCONN_MORE 0x02 +#define NETCONN_DONTBLOCK 0x04 +#define NETCONN_NOAUTORCVD 0x08 /* prevent netconn_recv_data_tcp() from updating the tcp window - must be done manually via netconn_tcp_recvd() */ +#define NETCONN_NOFIN 0x10 /* upper layer already received data, leave FIN in queue until called again */ + +/* Flags for struct netconn.flags (u8_t) */ +/** This netconn had an error, don't block on recvmbox/acceptmbox any more */ +#define NETCONN_FLAG_MBOXCLOSED 0x01 +/** Should this netconn avoid blocking? */ +#define NETCONN_FLAG_NON_BLOCKING 0x02 +/** Was the last connect action a non-blocking one? */ +#define NETCONN_FLAG_IN_NONBLOCKING_CONNECT 0x04 +#if LWIP_NETCONN_FULLDUPLEX + /** The mbox of this netconn is being deallocated, don't use it anymore */ +#define NETCONN_FLAG_MBOXINVALID 0x08 +#endif /* LWIP_NETCONN_FULLDUPLEX */ +/** If a nonblocking write has been rejected before, poll_tcp needs to + check if the netconn is writable again */ +#define NETCONN_FLAG_CHECK_WRITESPACE 0x10 +#if LWIP_IPV6 +/** If this flag is set then only IPv6 communication is allowed on the + netconn. As per RFC#3493 this features defaults to OFF allowing + dual-stack usage by default. */ +#define NETCONN_FLAG_IPV6_V6ONLY 0x20 +#endif /* LWIP_IPV6 */ +#if LWIP_NETBUF_RECVINFO +/** Received packet info will be recorded for this netconn */ +#define NETCONN_FLAG_PKTINFO 0x40 +#endif /* LWIP_NETBUF_RECVINFO */ +/** A FIN has been received but not passed to the application yet */ +#define NETCONN_FIN_RX_PENDING 0x80 + +/* Helpers to process several netconn_types by the same code */ +#define NETCONNTYPE_GROUP(t) ((t)&0xF0) +#define NETCONNTYPE_DATAGRAM(t) ((t)&0xE0) +#if LWIP_IPV6 +#define NETCONN_TYPE_IPV6 0x08 +#define NETCONNTYPE_ISIPV6(t) (((t)&NETCONN_TYPE_IPV6) != 0) +#define NETCONNTYPE_ISUDPLITE(t) (((t)&0xF3) == NETCONN_UDPLITE) +#define NETCONNTYPE_ISUDPNOCHKSUM(t) (((t)&0xF3) == NETCONN_UDPNOCHKSUM) +#else /* LWIP_IPV6 */ +#define NETCONNTYPE_ISIPV6(t) (0) +#define NETCONNTYPE_ISUDPLITE(t) ((t) == NETCONN_UDPLITE) +#define NETCONNTYPE_ISUDPNOCHKSUM(t) ((t) == NETCONN_UDPNOCHKSUM) +#endif /* LWIP_IPV6 */ + +/** @ingroup netconn_common + * Protocol family and type of the netconn + */ +enum netconn_type { + NETCONN_INVALID = 0, + /** TCP IPv4 */ + NETCONN_TCP = 0x10, +#if LWIP_IPV6 + /** TCP IPv6 */ + NETCONN_TCP_IPV6 = NETCONN_TCP | NETCONN_TYPE_IPV6 /* 0x18 */, +#endif /* LWIP_IPV6 */ + /** UDP IPv4 */ + NETCONN_UDP = 0x20, + /** UDP IPv4 lite */ + NETCONN_UDPLITE = 0x21, + /** UDP IPv4 no checksum */ + NETCONN_UDPNOCHKSUM = 0x22, + +#if LWIP_IPV6 + /** UDP IPv6 (dual-stack by default, unless you call @ref netconn_set_ipv6only) */ + NETCONN_UDP_IPV6 = NETCONN_UDP | NETCONN_TYPE_IPV6 /* 0x28 */, + /** UDP IPv6 lite (dual-stack by default, unless you call @ref netconn_set_ipv6only) */ + NETCONN_UDPLITE_IPV6 = NETCONN_UDPLITE | NETCONN_TYPE_IPV6 /* 0x29 */, + /** UDP IPv6 no checksum (dual-stack by default, unless you call @ref netconn_set_ipv6only) */ + NETCONN_UDPNOCHKSUM_IPV6 = NETCONN_UDPNOCHKSUM | NETCONN_TYPE_IPV6 /* 0x2a */, +#endif /* LWIP_IPV6 */ + + /** Raw connection IPv4 */ + NETCONN_RAW = 0x40 +#if LWIP_IPV6 + /** Raw connection IPv6 (dual-stack by default, unless you call @ref netconn_set_ipv6only) */ + , NETCONN_RAW_IPV6 = NETCONN_RAW | NETCONN_TYPE_IPV6 /* 0x48 */ +#endif /* LWIP_IPV6 */ +}; + +/** Current state of the netconn. Non-TCP netconns are always + * in state NETCONN_NONE! */ +enum netconn_state { + NETCONN_NONE, + NETCONN_WRITE, + NETCONN_LISTEN, + NETCONN_CONNECT, + NETCONN_CLOSE +}; + +/** Used to inform the callback function about changes + * + * Event explanation: + * + * In the netconn implementation, there are three ways to block a client: + * + * - accept mbox (sys_arch_mbox_fetch(&conn->acceptmbox, &accept_ptr, 0); in netconn_accept()) + * - receive mbox (sys_arch_mbox_fetch(&conn->recvmbox, &buf, 0); in netconn_recv_data()) + * - send queue is full (sys_arch_sem_wait(LWIP_API_MSG_SEM(msg), 0); in lwip_netconn_do_write()) + * + * The events have to be seen as events signaling the state of these mboxes/semaphores. For non-blocking + * connections, you need to know in advance whether a call to a netconn function call would block or not, + * and these events tell you about that. + * + * RCVPLUS events say: Safe to perform a potentially blocking call call once more. + * They are counted in sockets - three RCVPLUS events for accept mbox means you are safe + * to call netconn_accept 3 times without being blocked. + * Same thing for receive mbox. + * + * RCVMINUS events say: Your call to to a possibly blocking function is "acknowledged". + * Socket implementation decrements the counter. + * + * For TX, there is no need to count, its merely a flag. SENDPLUS means you may send something. + * SENDPLUS occurs when enough data was delivered to peer so netconn_send() can be called again. + * A SENDMINUS event occurs when the next call to a netconn_send() would be blocking. + */ +enum netconn_evt { + NETCONN_EVT_RCVPLUS, + NETCONN_EVT_RCVMINUS, + NETCONN_EVT_SENDPLUS, + NETCONN_EVT_SENDMINUS, + NETCONN_EVT_ERROR +}; + +#if LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) +/** Used for netconn_join_leave_group() */ +enum netconn_igmp { + NETCONN_JOIN, + NETCONN_LEAVE +}; +#endif /* LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) */ + +#if LWIP_DNS +/* Used for netconn_gethostbyname_addrtype(), these should match the DNS_ADDRTYPE defines in dns.h */ +#define NETCONN_DNS_DEFAULT NETCONN_DNS_IPV4_IPV6 +#define NETCONN_DNS_IPV4 0 +#define NETCONN_DNS_IPV6 1 +#define NETCONN_DNS_IPV4_IPV6 2 /* try to resolve IPv4 first, try IPv6 if IPv4 fails only */ +#define NETCONN_DNS_IPV6_IPV4 3 /* try to resolve IPv6 first, try IPv4 if IPv6 fails only */ +#endif /* LWIP_DNS */ + +/* forward-declare some structs to avoid to include their headers */ +struct ip_pcb; +struct tcp_pcb; +struct udp_pcb; +struct raw_pcb; +struct netconn; +struct api_msg; + +/** A callback prototype to inform about events for a netconn */ +typedef void (* netconn_callback)(struct netconn *, enum netconn_evt, u16_t len); + +/** A netconn descriptor */ +struct netconn { + /** type of the netconn (TCP, UDP or RAW) */ + enum netconn_type type; + /** current state of the netconn */ + enum netconn_state state; + /** the lwIP internal protocol control block */ + union { + struct ip_pcb *ip; + struct tcp_pcb *tcp; + struct udp_pcb *udp; + struct raw_pcb *raw; + } pcb; + /** the last asynchronous unreported error this netconn had */ + err_t pending_err; +#if !LWIP_NETCONN_SEM_PER_THREAD + /** sem that is used to synchronously execute functions in the core context */ + sys_sem_t op_completed; +#endif + /** mbox where received packets are stored until they are fetched + by the netconn application thread (can grow quite big) */ + sys_mbox_t recvmbox; +#if LWIP_TCP + /** mbox where new connections are stored until processed + by the application thread */ + sys_mbox_t acceptmbox; +#endif /* LWIP_TCP */ +#if LWIP_NETCONN_FULLDUPLEX + /** number of threads waiting on an mbox. This is required to unblock + all threads when closing while threads are waiting. */ + int mbox_threads_waiting; +#endif + /** only used for socket layer */ +#if LWIP_SOCKET + int socket; +#endif /* LWIP_SOCKET */ +#if LWIP_SO_SNDTIMEO + /** timeout to wait for sending data (which means enqueueing data for sending + in internal buffers) in milliseconds */ + s32_t send_timeout; +#endif /* LWIP_SO_RCVTIMEO */ +#if LWIP_SO_RCVTIMEO + /** timeout in milliseconds to wait for new data to be received + (or connections to arrive for listening netconns) */ + u32_t recv_timeout; +#endif /* LWIP_SO_RCVTIMEO */ +#if LWIP_SO_RCVBUF + /** maximum amount of bytes queued in recvmbox + not used for TCP: adjust TCP_WND instead! */ + int recv_bufsize; + /** number of bytes currently in recvmbox to be received, + tested against recv_bufsize to limit bytes on recvmbox + for UDP and RAW, used for FIONREAD */ + int recv_avail; +#endif /* LWIP_SO_RCVBUF */ +#if LWIP_SO_LINGER + /** values <0 mean linger is disabled, values > 0 are seconds to linger */ + s16_t linger; +#endif /* LWIP_SO_LINGER */ + /** flags holding more netconn-internal state, see NETCONN_FLAG_* defines */ + u8_t flags; +#if LWIP_TCP + /** TCP: when data passed to netconn_write doesn't fit into the send buffer, + this temporarily stores the message. + Also used during connect and close. */ + struct api_msg *current_msg; +#endif /* LWIP_TCP */ + /** A callback function that is informed about events for this netconn */ + netconn_callback callback; +}; + +/** This vector type is passed to @ref netconn_write_vectors_partly to send + * multiple buffers at once. + * ATTENTION: This type has to directly map struct iovec since one is casted + * into the other! + */ +struct netvector { + /** pointer to the application buffer that contains the data to send */ + const void *ptr; + /** size of the application data to send */ + size_t len; +}; + +/** Register an Network connection event */ +#define API_EVENT(c,e,l) if (c->callback) { \ + (*c->callback)(c, e, l); \ + } + +/* Network connection functions: */ + +/** @ingroup netconn_common + * Create new netconn connection + * @param t @ref netconn_type */ +#define netconn_new(t) netconn_new_with_proto_and_callback(t, 0, NULL) +#define netconn_new_with_callback(t, c) netconn_new_with_proto_and_callback(t, 0, c) +struct netconn *netconn_new_with_proto_and_callback(enum netconn_type t, u8_t proto, + netconn_callback callback); +err_t netconn_prepare_delete(struct netconn *conn); +err_t netconn_delete(struct netconn *conn); +/** Get the type of a netconn (as enum netconn_type). */ +#define netconn_type(conn) (conn->type) + +err_t netconn_getaddr(struct netconn *conn, ip_addr_t *addr, + u16_t *port, u8_t local); +/** @ingroup netconn_common */ +#define netconn_peer(c,i,p) netconn_getaddr(c,i,p,0) +/** @ingroup netconn_common */ +#define netconn_addr(c,i,p) netconn_getaddr(c,i,p,1) + +err_t netconn_bind(struct netconn *conn, const ip_addr_t *addr, u16_t port); +err_t netconn_bind_if(struct netconn *conn, u8_t if_idx); +err_t netconn_connect(struct netconn *conn, const ip_addr_t *addr, u16_t port); +err_t netconn_disconnect (struct netconn *conn); +err_t netconn_listen_with_backlog(struct netconn *conn, u8_t backlog); +/** @ingroup netconn_tcp */ +#define netconn_listen(conn) netconn_listen_with_backlog(conn, TCP_DEFAULT_LISTEN_BACKLOG) +err_t netconn_accept(struct netconn *conn, struct netconn **new_conn); +err_t netconn_recv(struct netconn *conn, struct netbuf **new_buf); +err_t netconn_recv_udp_raw_netbuf(struct netconn *conn, struct netbuf **new_buf); +err_t netconn_recv_udp_raw_netbuf_flags(struct netconn *conn, struct netbuf **new_buf, u8_t apiflags); +err_t netconn_recv_tcp_pbuf(struct netconn *conn, struct pbuf **new_buf); +err_t netconn_recv_tcp_pbuf_flags(struct netconn *conn, struct pbuf **new_buf, u8_t apiflags); +err_t netconn_tcp_recvd(struct netconn *conn, size_t len); +err_t netconn_sendto(struct netconn *conn, struct netbuf *buf, + const ip_addr_t *addr, u16_t port); +err_t netconn_send(struct netconn *conn, struct netbuf *buf); +err_t netconn_write_partly(struct netconn *conn, const void *dataptr, size_t size, + u8_t apiflags, size_t *bytes_written); +err_t netconn_write_vectors_partly(struct netconn *conn, struct netvector *vectors, u16_t vectorcnt, + u8_t apiflags, size_t *bytes_written); +/** @ingroup netconn_tcp */ +#define netconn_write(conn, dataptr, size, apiflags) \ + netconn_write_partly(conn, dataptr, size, apiflags, NULL) +err_t netconn_close(struct netconn *conn); +err_t netconn_shutdown(struct netconn *conn, u8_t shut_rx, u8_t shut_tx); + +#if LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) +err_t netconn_join_leave_group(struct netconn *conn, const ip_addr_t *multiaddr, + const ip_addr_t *netif_addr, enum netconn_igmp join_or_leave); +err_t netconn_join_leave_group_netif(struct netconn *conn, const ip_addr_t *multiaddr, + u8_t if_idx, enum netconn_igmp join_or_leave); +#endif /* LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) */ +#if LWIP_DNS +#if LWIP_IPV4 && LWIP_IPV6 +err_t netconn_gethostbyname_addrtype(const char *name, ip_addr_t *addr, u8_t dns_addrtype); +#define netconn_gethostbyname(name, addr) netconn_gethostbyname_addrtype(name, addr, NETCONN_DNS_DEFAULT) +#else /* LWIP_IPV4 && LWIP_IPV6 */ +err_t netconn_gethostbyname(const char *name, ip_addr_t *addr); +#define netconn_gethostbyname_addrtype(name, addr, dns_addrtype) netconn_gethostbyname(name, addr) +#endif /* LWIP_IPV4 && LWIP_IPV6 */ +#endif /* LWIP_DNS */ + +err_t netconn_err(struct netconn *conn); +#define netconn_recv_bufsize(conn) ((conn)->recv_bufsize) + +#define netconn_set_flags(conn, set_flags) do { (conn)->flags = (u8_t)((conn)->flags | (set_flags)); } while(0) +#define netconn_clear_flags(conn, clr_flags) do { (conn)->flags = (u8_t)((conn)->flags & (u8_t)(~(clr_flags) & 0xff)); } while(0) +#define netconn_is_flag_set(conn, flag) (((conn)->flags & (flag)) != 0) + +/** Set the blocking status of netconn calls (@todo: write/send is missing) */ +#define netconn_set_nonblocking(conn, val) do { if(val) { \ + netconn_set_flags(conn, NETCONN_FLAG_NON_BLOCKING); \ +} else { \ + netconn_clear_flags(conn, NETCONN_FLAG_NON_BLOCKING); }} while(0) +/** Get the blocking status of netconn calls (@todo: write/send is missing) */ +#define netconn_is_nonblocking(conn) (((conn)->flags & NETCONN_FLAG_NON_BLOCKING) != 0) + +#if LWIP_IPV6 +/** @ingroup netconn_common + * TCP: Set the IPv6 ONLY status of netconn calls (see NETCONN_FLAG_IPV6_V6ONLY) + */ +#define netconn_set_ipv6only(conn, val) do { if(val) { \ + netconn_set_flags(conn, NETCONN_FLAG_IPV6_V6ONLY); \ +} else { \ + netconn_clear_flags(conn, NETCONN_FLAG_IPV6_V6ONLY); }} while(0) +/** @ingroup netconn_common + * TCP: Get the IPv6 ONLY status of netconn calls (see NETCONN_FLAG_IPV6_V6ONLY) + */ +#define netconn_get_ipv6only(conn) (((conn)->flags & NETCONN_FLAG_IPV6_V6ONLY) != 0) +#endif /* LWIP_IPV6 */ + +#if LWIP_SO_SNDTIMEO +/** Set the send timeout in milliseconds */ +#define netconn_set_sendtimeout(conn, timeout) ((conn)->send_timeout = (timeout)) +/** Get the send timeout in milliseconds */ +#define netconn_get_sendtimeout(conn) ((conn)->send_timeout) +#endif /* LWIP_SO_SNDTIMEO */ +#if LWIP_SO_RCVTIMEO +/** Set the receive timeout in milliseconds */ +#define netconn_set_recvtimeout(conn, timeout) ((conn)->recv_timeout = (timeout)) +/** Get the receive timeout in milliseconds */ +#define netconn_get_recvtimeout(conn) ((conn)->recv_timeout) +#endif /* LWIP_SO_RCVTIMEO */ +#if LWIP_SO_RCVBUF +/** Set the receive buffer in bytes */ +#define netconn_set_recvbufsize(conn, recvbufsize) ((conn)->recv_bufsize = (recvbufsize)) +/** Get the receive buffer in bytes */ +#define netconn_get_recvbufsize(conn) ((conn)->recv_bufsize) +#endif /* LWIP_SO_RCVBUF*/ + +#if LWIP_NETCONN_SEM_PER_THREAD +void netconn_thread_init(void); +void netconn_thread_cleanup(void); +#else /* LWIP_NETCONN_SEM_PER_THREAD */ +#define netconn_thread_init() +#define netconn_thread_cleanup() +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_NETCONN || LWIP_SOCKET */ + +#endif /* LWIP_HDR_API_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/altcp_proxyconnect.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/altcp_proxyconnect.h new file mode 100644 index 0000000..65d3127 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/altcp_proxyconnect.h @@ -0,0 +1,79 @@ +/** + * @file + * Application layered TCP connection API that executes a proxy-connect. + * + * This file provides a starting layer that executes a proxy-connect e.g. to + * set up TLS connections through a http proxy. + */ + +/* + * Copyright (c) 2018 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#ifndef LWIP_HDR_APPS_ALTCP_PROXYCONNECT_H +#define LWIP_HDR_APPS_ALTCP_PROXYCONNECT_H + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/ip_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct altcp_proxyconnect_config { + ip_addr_t proxy_addr; + u16_t proxy_port; +}; + + +struct altcp_pcb *altcp_proxyconnect_new(struct altcp_proxyconnect_config *config, struct altcp_pcb *inner_pcb); +struct altcp_pcb *altcp_proxyconnect_new_tcp(struct altcp_proxyconnect_config *config, u8_t ip_type); + +struct altcp_pcb *altcp_proxyconnect_alloc(void *arg, u8_t ip_type); + +#if LWIP_ALTCP_TLS +struct altcp_proxyconnect_tls_config { + struct altcp_proxyconnect_config proxy; + struct altcp_tls_config *tls_config; +}; + +struct altcp_pcb *altcp_proxyconnect_tls_alloc(void *arg, u8_t ip_type); +#endif /* LWIP_ALTCP_TLS */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_ALTCP */ +#endif /* LWIP_HDR_APPS_ALTCP_PROXYCONNECT_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/altcp_tls_mbedtls_opts.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/altcp_tls_mbedtls_opts.h new file mode 100644 index 0000000..36cddd9 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/altcp_tls_mbedtls_opts.h @@ -0,0 +1,67 @@ +/** + * @file + * Application layered TCP/TLS connection API (to be used from TCPIP thread) + * + * This file contains options for an mbedtls port of the TLS layer. + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_ALTCP_TLS_OPTS_H +#define LWIP_HDR_ALTCP_TLS_OPTS_H + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +/** LWIP_ALTCP_TLS_MBEDTLS==1: use mbedTLS for TLS support for altcp API + * mbedtls include directory must be reachable via include search path + */ +#ifndef LWIP_ALTCP_TLS_MBEDTLS +#define LWIP_ALTCP_TLS_MBEDTLS 0 +#endif + +/** Configure debug level of this file */ +#ifndef ALTCP_MBEDTLS_DEBUG +#define ALTCP_MBEDTLS_DEBUG LWIP_DBG_OFF +#endif + +/** Set a session timeout in seconds for the basic session cache + * ATTENTION: Using a session cache can lower security by reusing keys! + */ +#ifndef ALTCP_MBEDTLS_SESSION_CACHE_TIMEOUT_SECONDS +#define ALTCP_MBEDTLS_SESSION_CACHE_TIMEOUT_SECONDS 0 +#endif + +#endif /* LWIP_ALTCP */ + +#endif /* LWIP_HDR_ALTCP_TLS_OPTS_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/fs.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/fs.h new file mode 100644 index 0000000..67b9a60 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/fs.h @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_APPS_FS_H +#define LWIP_HDR_APPS_FS_H + +#include "httpd_opts.h" +#include "lwip/err.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define FS_READ_EOF -1 +#define FS_READ_DELAYED -2 + +#if HTTPD_PRECALCULATED_CHECKSUM +struct fsdata_chksum { + u32_t offset; + u16_t chksum; + u16_t len; +}; +#endif /* HTTPD_PRECALCULATED_CHECKSUM */ + +#define FS_FILE_FLAGS_HEADER_INCLUDED 0x01 +#define FS_FILE_FLAGS_HEADER_PERSISTENT 0x02 +#define FS_FILE_FLAGS_HEADER_HTTPVER_1_1 0x04 +#define FS_FILE_FLAGS_SSI 0x08 + +/** Define FS_FILE_EXTENSION_T_DEFINED if you have typedef'ed to your private + * pointer type (defaults to 'void' so the default usage is 'void*') + */ +#ifndef FS_FILE_EXTENSION_T_DEFINED +typedef void fs_file_extension; +#endif + +struct fs_file { + const char *data; + int len; + int index; + /* pextension is free for implementations to hold private (extensional) + arbitrary data, e.g. holding some file state or file system handle */ + fs_file_extension *pextension; +#if HTTPD_PRECALCULATED_CHECKSUM + const struct fsdata_chksum *chksum; + u16_t chksum_count; +#endif /* HTTPD_PRECALCULATED_CHECKSUM */ + u8_t flags; +#if LWIP_HTTPD_CUSTOM_FILES + u8_t is_custom_file; +#endif /* LWIP_HTTPD_CUSTOM_FILES */ +#if LWIP_HTTPD_FILE_STATE + void *state; +#endif /* LWIP_HTTPD_FILE_STATE */ +}; + +#if LWIP_HTTPD_FS_ASYNC_READ +typedef void (*fs_wait_cb)(void *arg); +#endif /* LWIP_HTTPD_FS_ASYNC_READ */ + +err_t fs_open(struct fs_file *file, const char *name); +void fs_close(struct fs_file *file); +#if LWIP_HTTPD_DYNAMIC_FILE_READ +#if LWIP_HTTPD_FS_ASYNC_READ +int fs_read_async(struct fs_file *file, char *buffer, int count, fs_wait_cb callback_fn, void *callback_arg); +#else /* LWIP_HTTPD_FS_ASYNC_READ */ +int fs_read(struct fs_file *file, char *buffer, int count); +#endif /* LWIP_HTTPD_FS_ASYNC_READ */ +#endif /* LWIP_HTTPD_DYNAMIC_FILE_READ */ +#if LWIP_HTTPD_FS_ASYNC_READ +int fs_is_file_ready(struct fs_file *file, fs_wait_cb callback_fn, void *callback_arg); +#endif /* LWIP_HTTPD_FS_ASYNC_READ */ +int fs_bytes_left(struct fs_file *file); + +#if LWIP_HTTPD_FILE_STATE +/** This user-defined function is called when a file is opened. */ +void *fs_state_init(struct fs_file *file, const char *name); +/** This user-defined function is called when a file is closed. */ +void fs_state_free(struct fs_file *file, void *state); +#endif /* #if LWIP_HTTPD_FILE_STATE */ + +struct fsdata_file { + const struct fsdata_file *next; + const unsigned char *name; + const unsigned char *data; + int len; + u8_t flags; +#if HTTPD_PRECALCULATED_CHECKSUM + u16_t chksum_count; + const struct fsdata_chksum *chksum; +#endif /* HTTPD_PRECALCULATED_CHECKSUM */ +}; + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_FS_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/http_client.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/http_client.h new file mode 100644 index 0000000..8a06308 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/http_client.h @@ -0,0 +1,160 @@ +/** + * @file + * HTTP client + */ + +/* + * Copyright (c) 2018 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#ifndef LWIP_HDR_APPS_HTTP_CLIENT_H +#define LWIP_HDR_APPS_HTTP_CLIENT_H + +#include "lwip/opt.h" +#include "lwip/ip_addr.h" +#include "lwip/err.h" +#include "lwip/altcp.h" +#include "lwip/prot/iana.h" +#include "lwip/pbuf.h" + +#if LWIP_TCP && LWIP_CALLBACK_API + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @ingroup httpc + * HTTPC_HAVE_FILE_IO: define this to 1 to have functions dowloading directly + * to disk via fopen/fwrite. + * These functions are example implementations of the interface only. + */ +#ifndef LWIP_HTTPC_HAVE_FILE_IO +#define LWIP_HTTPC_HAVE_FILE_IO 0 +#endif + +/** + * @ingroup httpc + * The default TCP port used for HTTP + */ +#define HTTP_DEFAULT_PORT LWIP_IANA_PORT_HTTP + +/** + * @ingroup httpc + * HTTP client result codes + */ +typedef enum ehttpc_result { + /** File successfully received */ + HTTPC_RESULT_OK = 0, + /** Unknown error */ + HTTPC_RESULT_ERR_UNKNOWN = 1, + /** Connection to server failed */ + HTTPC_RESULT_ERR_CONNECT = 2, + /** Failed to resolve server hostname */ + HTTPC_RESULT_ERR_HOSTNAME = 3, + /** Connection unexpectedly closed by remote server */ + HTTPC_RESULT_ERR_CLOSED = 4, + /** Connection timed out (server didn't respond in time) */ + HTTPC_RESULT_ERR_TIMEOUT = 5, + /** Server responded with an error code */ + HTTPC_RESULT_ERR_SVR_RESP = 6, + /** Local memory error */ + HTTPC_RESULT_ERR_MEM = 7, + /** Local abort */ + HTTPC_RESULT_LOCAL_ABORT = 8, + /** Content length mismatch */ + HTTPC_RESULT_ERR_CONTENT_LEN = 9 +} httpc_result_t; + +typedef struct _httpc_state httpc_state_t; + +/** + * @ingroup httpc + * Prototype of a http client callback function + * + * @param arg argument specified when initiating the request + * @param httpc_result result of the http transfer (see enum httpc_result_t) + * @param rx_content_len number of bytes received (without headers) + * @param srv_res this contains the http status code received (if any) + * @param err an error returned by internal lwip functions, can help to specify + * the source of the error but must not necessarily be != ERR_OK + */ +typedef void (*httpc_result_fn)(void *arg, httpc_result_t httpc_result, u32_t rx_content_len, u32_t srv_res, err_t err); + +/** + * @ingroup httpc + * Prototype of http client callback: called when the headers are received + * + * @param connection http client connection + * @param arg argument specified when initiating the request + * @param hdr header pbuf(s) (may contain data also) + * @param hdr_len length of the heders in 'hdr' + * @param content_len content length as received in the headers (-1 if not received) + * @return if != ERR_OK is returned, the connection is aborted + */ +typedef err_t (*httpc_headers_done_fn)(httpc_state_t *connection, void *arg, struct pbuf *hdr, u16_t hdr_len, u32_t content_len); + +typedef struct _httpc_connection { + ip_addr_t proxy_addr; + u16_t proxy_port; + u8_t use_proxy; + /* @todo: add username:pass? */ + +#if LWIP_ALTCP + altcp_allocator_t *altcp_allocator; +#endif + + /* this callback is called when the transfer is finished (or aborted) */ + httpc_result_fn result_fn; + /* this callback is called after receiving the http headers + It can abort the connection by returning != ERR_OK */ + httpc_headers_done_fn headers_done_fn; +} httpc_connection_t; + +err_t httpc_get_file(const ip_addr_t* server_addr, u16_t port, const char* uri, const httpc_connection_t *settings, + altcp_recv_fn recv_fn, void* callback_arg, httpc_state_t **connection); +err_t httpc_get_file_dns(const char* server_name, u16_t port, const char* uri, const httpc_connection_t *settings, + altcp_recv_fn recv_fn, void* callback_arg, httpc_state_t **connection); + +#if LWIP_HTTPC_HAVE_FILE_IO +err_t httpc_get_file_to_disk(const ip_addr_t* server_addr, u16_t port, const char* uri, const httpc_connection_t *settings, + void* callback_arg, const char* local_file_name, httpc_state_t **connection); +err_t httpc_get_file_dns_to_disk(const char* server_name, u16_t port, const char* uri, const httpc_connection_t *settings, + void* callback_arg, const char* local_file_name, httpc_state_t **connection); +#endif /* LWIP_HTTPC_HAVE_FILE_IO */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_TCP && LWIP_CALLBACK_API */ + +#endif /* LWIP_HDR_APPS_HTTP_CLIENT_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/httpd.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/httpd.h new file mode 100644 index 0000000..e872429 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/httpd.h @@ -0,0 +1,255 @@ +/** + * @file + * HTTP server + */ + +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + * This version of the file has been modified by Texas Instruments to offer + * simple server-side-include (SSI) and Common Gateway Interface (CGI) + * capability. + */ + +#ifndef LWIP_HDR_APPS_HTTPD_H +#define LWIP_HDR_APPS_HTTPD_H + +#include "httpd_opts.h" +#include "lwip/err.h" +#include "lwip/pbuf.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_HTTPD_CGI + +/** + * @ingroup httpd + * Function pointer for a CGI script handler. + * + * This function is called each time the HTTPD server is asked for a file + * whose name was previously registered as a CGI function using a call to + * http_set_cgi_handlers. The iIndex parameter provides the index of the + * CGI within the cgis array passed to http_set_cgi_handlers. Parameters + * pcParam and pcValue provide access to the parameters provided along with + * the URI. iNumParams provides a count of the entries in the pcParam and + * pcValue arrays. Each entry in the pcParam array contains the name of a + * parameter with the corresponding entry in the pcValue array containing the + * value for that parameter. Note that pcParam may contain multiple elements + * with the same name if, for example, a multi-selection list control is used + * in the form generating the data. + * + * The function should return a pointer to a character string which is the + * path and filename of the response that is to be sent to the connected + * browser, for example "/thanks.htm" or "/response/error.ssi". + * + * The maximum number of parameters that will be passed to this function via + * iNumParams is defined by LWIP_HTTPD_MAX_CGI_PARAMETERS. Any parameters in + * the incoming HTTP request above this number will be discarded. + * + * Requests intended for use by this CGI mechanism must be sent using the GET + * method (which encodes all parameters within the URI rather than in a block + * later in the request). Attempts to use the POST method will result in the + * request being ignored. + * + */ +typedef const char *(*tCGIHandler)(int iIndex, int iNumParams, char *pcParam[], + char *pcValue[]); + +/** + * @ingroup httpd + * Structure defining the base filename (URL) of a CGI and the associated + * function which is to be called when that URL is requested. + */ +typedef struct +{ + const char *pcCGIName; + tCGIHandler pfnCGIHandler; +} tCGI; + +void http_set_cgi_handlers(const tCGI *pCGIs, int iNumHandlers); + +#endif /* LWIP_HTTPD_CGI */ + +#if LWIP_HTTPD_CGI || LWIP_HTTPD_CGI_SSI + +#if LWIP_HTTPD_CGI_SSI +/* we have to prototype this struct here to make it available for the handler */ +struct fs_file; + +/** Define this generic CGI handler in your application. + * It is called once for every URI with parameters. + * The parameters can be stored to the object passed as connection_state, which + * is allocated to file->state via fs_state_init() from fs_open() or fs_open_custom(). + * Content creation via SSI or complete dynamic files can retrieve the CGI params from there. + */ +extern void httpd_cgi_handler(struct fs_file *file, const char* uri, int iNumParams, + char **pcParam, char **pcValue +#if defined(LWIP_HTTPD_FILE_STATE) && LWIP_HTTPD_FILE_STATE + , void *connection_state +#endif /* LWIP_HTTPD_FILE_STATE */ + ); +#endif /* LWIP_HTTPD_CGI_SSI */ + +#endif /* LWIP_HTTPD_CGI || LWIP_HTTPD_CGI_SSI */ + +#if LWIP_HTTPD_SSI + +/** + * @ingroup httpd + * Function pointer for the SSI tag handler callback. + * + * This function will be called each time the HTTPD server detects a tag of the + * form in files with extensions mentioned in the g_pcSSIExtensions + * array (currently .shtml, .shtm, .ssi, .xml, .json) where "name" appears as + * one of the tags supplied to http_set_ssi_handler in the tags array. The + * returned insert string, which will be appended after the the string + * "" in file sent back to the client, should be written to pointer + * pcInsert. iInsertLen contains the size of the buffer pointed to by + * pcInsert. The iIndex parameter provides the zero-based index of the tag as + * found in the tags array and identifies the tag that is to be processed. + * + * The handler returns the number of characters written to pcInsert excluding + * any terminating NULL or HTTPD_SSI_TAG_UNKNOWN when tag is not recognized. + * + * Note that the behavior of this SSI mechanism is somewhat different from the + * "normal" SSI processing as found in, for example, the Apache web server. In + * this case, the inserted text is appended following the SSI tag rather than + * replacing the tag entirely. This allows for an implementation that does not + * require significant additional buffering of output data yet which will still + * offer usable SSI functionality. One downside to this approach is when + * attempting to use SSI within JavaScript. The SSI tag is structured to + * resemble an HTML comment but this syntax does not constitute a comment + * within JavaScript and, hence, leaving the tag in place will result in + * problems in these cases. In order to avoid these problems, define + * LWIP_HTTPD_SSI_INCLUDE_TAG as zero in your lwip options file, or use JavaScript + * style block comments in the form / * # name * / (without the spaces). + */ +typedef u16_t (*tSSIHandler)( +#if LWIP_HTTPD_SSI_RAW + const char* ssi_tag_name, +#else /* LWIP_HTTPD_SSI_RAW */ + int iIndex, +#endif /* LWIP_HTTPD_SSI_RAW */ + char *pcInsert, int iInsertLen +#if LWIP_HTTPD_SSI_MULTIPART + , u16_t current_tag_part, u16_t *next_tag_part +#endif /* LWIP_HTTPD_SSI_MULTIPART */ +#if defined(LWIP_HTTPD_FILE_STATE) && LWIP_HTTPD_FILE_STATE + , void *connection_state +#endif /* LWIP_HTTPD_FILE_STATE */ + ); + +/** Set the SSI handler function + * (if LWIP_HTTPD_SSI_RAW==1, only the first argument is used) + */ +void http_set_ssi_handler(tSSIHandler pfnSSIHandler, + const char **ppcTags, int iNumTags); + +/** For LWIP_HTTPD_SSI_RAW==1, return this to indicate the tag is unknown. + * In this case, the webserver writes a warning into the page. + * You can also just return 0 to write nothing for unknown tags. + */ +#define HTTPD_SSI_TAG_UNKNOWN 0xFFFF + +#endif /* LWIP_HTTPD_SSI */ + +#if LWIP_HTTPD_SUPPORT_POST + +/* These functions must be implemented by the application */ + +/** + * @ingroup httpd + * Called when a POST request has been received. The application can decide + * whether to accept it or not. + * + * @param connection Unique connection identifier, valid until httpd_post_end + * is called. + * @param uri The HTTP header URI receiving the POST request. + * @param http_request The raw HTTP request (the first packet, normally). + * @param http_request_len Size of 'http_request'. + * @param content_len Content-Length from HTTP header. + * @param response_uri Filename of response file, to be filled when denying the + * request + * @param response_uri_len Size of the 'response_uri' buffer. + * @param post_auto_wnd Set this to 0 to let the callback code handle window + * updates by calling 'httpd_post_data_recved' (to throttle rx speed) + * default is 1 (httpd handles window updates automatically) + * @return ERR_OK: Accept the POST request, data may be passed in + * another err_t: Deny the POST request, send back 'bad request'. + */ +err_t httpd_post_begin(void *connection, const char *uri, const char *http_request, + u16_t http_request_len, int content_len, char *response_uri, + u16_t response_uri_len, u8_t *post_auto_wnd); + +/** + * @ingroup httpd + * Called for each pbuf of data that has been received for a POST. + * ATTENTION: The application is responsible for freeing the pbufs passed in! + * + * @param connection Unique connection identifier. + * @param p Received data. + * @return ERR_OK: Data accepted. + * another err_t: Data denied, http_post_get_response_uri will be called. + */ +err_t httpd_post_receive_data(void *connection, struct pbuf *p); + +/** + * @ingroup httpd + * Called when all data is received or when the connection is closed. + * The application must return the filename/URI of a file to send in response + * to this POST request. If the response_uri buffer is untouched, a 404 + * response is returned. + * + * @param connection Unique connection identifier. + * @param response_uri Filename of response file, to be filled when denying the request + * @param response_uri_len Size of the 'response_uri' buffer. + */ +void httpd_post_finished(void *connection, char *response_uri, u16_t response_uri_len); + +#if LWIP_HTTPD_POST_MANUAL_WND +void httpd_post_data_recved(void *connection, u16_t recved_len); +#endif /* LWIP_HTTPD_POST_MANUAL_WND */ + +#endif /* LWIP_HTTPD_SUPPORT_POST */ + +void httpd_init(void); + +#if HTTPD_ENABLE_HTTPS +struct altcp_tls_config; +void httpd_inits(struct altcp_tls_config *conf); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_HTTPD_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/httpd_opts.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/httpd_opts.h new file mode 100644 index 0000000..8723961 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/httpd_opts.h @@ -0,0 +1,396 @@ +/** + * @file + * HTTP server options list + */ + +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + * This version of the file has been modified by Texas Instruments to offer + * simple server-side-include (SSI) and Common Gateway Interface (CGI) + * capability. + */ + +#ifndef LWIP_HDR_APPS_HTTPD_OPTS_H +#define LWIP_HDR_APPS_HTTPD_OPTS_H + +#include "lwip/opt.h" +#include "lwip/prot/iana.h" + +/** + * @defgroup httpd_opts Options + * @ingroup httpd + * @{ + */ + +/** Set this to 1 to support CGI (old style). + * + * This old style CGI support works by registering an array of URLs and + * associated CGI handler functions (@ref http_set_cgi_handlers). + * This list is scanned just before fs_open is called from request handling. + * The handler can return a new URL that is used internally by the httpd to + * load the returned page (passed to fs_open). + * + * Use this CGI type e.g. to execute specific actions and return a page that + * does not depend on the CGI parameters. + */ +#if !defined LWIP_HTTPD_CGI || defined __DOXYGEN__ +#define LWIP_HTTPD_CGI 0 +#endif + +/** Set this to 1 to support CGI (new style). + * + * This new style CGI support works by calling a global function + * (@ref tCGIHandler) for all URLs that are found. fs_open is called first + * and the URL can not be written by the CGI handler. Instead, this handler gets + * passed the http file state, an object where it can store information derived + * from the CGI URL or parameters. This file state is later passed to SSI, so + * the SSI code can return data depending on CGI input. + * + * Use this CGI handler if you want CGI information passed on to SSI. + */ +#if !defined LWIP_HTTPD_CGI_SSI || defined __DOXYGEN__ +#define LWIP_HTTPD_CGI_SSI 0 +#endif + +/** Set this to 1 to support SSI (Server-Side-Includes) + * + * In contrast to other http servers, this only calls a preregistered callback + * function (@see http_set_ssi_handler) for each tag (in the format of + * ) encountered in SSI-enabled pages. + * SSI-enabled pages must have one of the predefined SSI-enabled file extensions. + * All files with one of these extensions are parsed when sent. + * + * A downside of the current SSI implementation is that persistent connections + * don't work, as the file length is not known in advance (and httpd currently + * relies on the Content-Length header for persistent connections). + * + * To save memory, the maximum tag length is limited (@see LWIP_HTTPD_MAX_TAG_NAME_LEN). + * To save memory, the maximum insertion string length is limited (@see + * LWIP_HTTPD_MAX_TAG_INSERT_LEN). If this is not enought, @ref LWIP_HTTPD_SSI_MULTIPART + * can be used. + */ +#if !defined LWIP_HTTPD_SSI || defined __DOXYGEN__ +#define LWIP_HTTPD_SSI 0 +#endif + +/** Set this to 1 to implement an SSI tag handler callback that gets a const char* + * to the tag (instead of an index into a pre-registered array of known tags) + * If this is 0, the SSI handler callback function is only called pre-registered tags. + */ +#if !defined LWIP_HTTPD_SSI_RAW || defined __DOXYGEN__ +#define LWIP_HTTPD_SSI_RAW 0 +#endif + +/** Set this to 0 to prevent parsing the file extension at runtime to decide + * if a file should be scanned for SSI tags or not. + * Default is 1 (file extensions are checked using the g_pcSSIExtensions array) + * Set to 2 to override this runtime test function. + * + * This is enabled by default, but if you only use a newer version of makefsdata + * supporting the "-ssi" option, this info is already present in + */ +#if !defined LWIP_HTTPD_SSI_BY_FILE_EXTENSION || defined __DOXYGEN__ +#define LWIP_HTTPD_SSI_BY_FILE_EXTENSION 1 +#endif + +/** Set this to 1 to support HTTP POST */ +#if !defined LWIP_HTTPD_SUPPORT_POST || defined __DOXYGEN__ +#define LWIP_HTTPD_SUPPORT_POST 0 +#endif + +/* The maximum number of parameters that the CGI handler can be sent. */ +#if !defined LWIP_HTTPD_MAX_CGI_PARAMETERS || defined __DOXYGEN__ +#define LWIP_HTTPD_MAX_CGI_PARAMETERS 16 +#endif + +/** LWIP_HTTPD_SSI_MULTIPART==1: SSI handler function is called with 2 more + * arguments indicating a counter for insert string that are too long to be + * inserted at once: the SSI handler function must then set 'next_tag_part' + * which will be passed back to it in the next call. */ +#if !defined LWIP_HTTPD_SSI_MULTIPART || defined __DOXYGEN__ +#define LWIP_HTTPD_SSI_MULTIPART 0 +#endif + +/* The maximum length of the string comprising the SSI tag name + * ATTENTION: tags longer than this are ignored, not truncated! + */ +#if !defined LWIP_HTTPD_MAX_TAG_NAME_LEN || defined __DOXYGEN__ +#define LWIP_HTTPD_MAX_TAG_NAME_LEN 8 +#endif + +/* The maximum length of string that can be returned to replace any given tag + * If this buffer is not long enough, use LWIP_HTTPD_SSI_MULTIPART. + */ +#if !defined LWIP_HTTPD_MAX_TAG_INSERT_LEN || defined __DOXYGEN__ +#define LWIP_HTTPD_MAX_TAG_INSERT_LEN 192 +#endif + +#if !defined LWIP_HTTPD_POST_MANUAL_WND || defined __DOXYGEN__ +#define LWIP_HTTPD_POST_MANUAL_WND 0 +#endif + +/** This string is passed in the HTTP header as "Server: " */ +#if !defined HTTPD_SERVER_AGENT || defined __DOXYGEN__ +#define HTTPD_SERVER_AGENT "lwIP/" LWIP_VERSION_STRING " (http://savannah.nongnu.org/projects/lwip)" +#endif + +/** Set this to 1 if you want to include code that creates HTTP headers + * at runtime. Default is off: HTTP headers are then created statically + * by the makefsdata tool. Static headers mean smaller code size, but + * the (readonly) fsdata will grow a bit as every file includes the HTTP + * header. */ +#if !defined LWIP_HTTPD_DYNAMIC_HEADERS || defined __DOXYGEN__ +#define LWIP_HTTPD_DYNAMIC_HEADERS 0 +#endif + +#if !defined HTTPD_DEBUG || defined __DOXYGEN__ +#define HTTPD_DEBUG LWIP_DBG_OFF +#endif + +/** Set this to 1 to use a memp pool for allocating + * struct http_state instead of the heap. + * If enabled, you'll need to define MEMP_NUM_PARALLEL_HTTPD_CONNS + * (and MEMP_NUM_PARALLEL_HTTPD_SSI_CONNS for SSI) to set the size of + * the pool(s). + */ +#if !defined HTTPD_USE_MEM_POOL || defined __DOXYGEN__ +#define HTTPD_USE_MEM_POOL 0 +#endif + +/** The server port for HTTPD to use */ +#if !defined HTTPD_SERVER_PORT || defined __DOXYGEN__ +#define HTTPD_SERVER_PORT LWIP_IANA_PORT_HTTP +#endif + +/** The https server port for HTTPD to use */ +#if !defined HTTPD_SERVER_PORT_HTTPS || defined __DOXYGEN__ +#define HTTPD_SERVER_PORT_HTTPS LWIP_IANA_PORT_HTTPS +#endif + +/** Enable https support? */ +#if !defined HTTPD_ENABLE_HTTPS || defined __DOXYGEN__ +#define HTTPD_ENABLE_HTTPS 0 +#endif + +/** Maximum retries before the connection is aborted/closed. + * - number of times pcb->poll is called -> default is 4*500ms = 2s; + * - reset when pcb->sent is called + */ +#if !defined HTTPD_MAX_RETRIES || defined __DOXYGEN__ +#define HTTPD_MAX_RETRIES 4 +#endif + +/** The poll delay is X*500ms */ +#if !defined HTTPD_POLL_INTERVAL || defined __DOXYGEN__ +#define HTTPD_POLL_INTERVAL 4 +#endif + +/** Priority for tcp pcbs created by HTTPD (very low by default). + * Lower priorities get killed first when running out of memory. + */ +#if !defined HTTPD_TCP_PRIO || defined __DOXYGEN__ +#define HTTPD_TCP_PRIO TCP_PRIO_MIN +#endif + +/** Set this to 1 to enable timing each file sent */ +#if !defined LWIP_HTTPD_TIMING || defined __DOXYGEN__ +#define LWIP_HTTPD_TIMING 0 +#endif +/** Set this to 1 to enable timing each file sent */ +#if !defined HTTPD_DEBUG_TIMING || defined __DOXYGEN__ +#define HTTPD_DEBUG_TIMING LWIP_DBG_OFF +#endif + +/** Set this to one to show error pages when parsing a request fails instead + of simply closing the connection. */ +#if !defined LWIP_HTTPD_SUPPORT_EXTSTATUS || defined __DOXYGEN__ +#define LWIP_HTTPD_SUPPORT_EXTSTATUS 0 +#endif + +/** Set this to 0 to drop support for HTTP/0.9 clients (to save some bytes) */ +#if !defined LWIP_HTTPD_SUPPORT_V09 || defined __DOXYGEN__ +#define LWIP_HTTPD_SUPPORT_V09 1 +#endif + +/** Set this to 1 to enable HTTP/1.1 persistent connections. + * ATTENTION: If the generated file system includes HTTP headers, these must + * include the "Connection: keep-alive" header (pass argument "-11" to makefsdata). + */ +#if !defined LWIP_HTTPD_SUPPORT_11_KEEPALIVE || defined __DOXYGEN__ +#define LWIP_HTTPD_SUPPORT_11_KEEPALIVE 0 +#endif + +/** Set this to 1 to support HTTP request coming in in multiple packets/pbufs */ +#if !defined LWIP_HTTPD_SUPPORT_REQUESTLIST || defined __DOXYGEN__ +#define LWIP_HTTPD_SUPPORT_REQUESTLIST 1 +#endif + +#if LWIP_HTTPD_SUPPORT_REQUESTLIST +/** Number of rx pbufs to enqueue to parse an incoming request (up to the first + newline) */ +#if !defined LWIP_HTTPD_REQ_QUEUELEN || defined __DOXYGEN__ +#define LWIP_HTTPD_REQ_QUEUELEN 5 +#endif + +/** Number of (TCP payload-) bytes (in pbufs) to enqueue to parse and incoming + request (up to the first double-newline) */ +#if !defined LWIP_HTTPD_REQ_BUFSIZE || defined __DOXYGEN__ +#define LWIP_HTTPD_REQ_BUFSIZE LWIP_HTTPD_MAX_REQ_LENGTH +#endif + +/** Defines the maximum length of a HTTP request line (up to the first CRLF, + copied from pbuf into this a global buffer when pbuf- or packet-queues + are received - otherwise the input pbuf is used directly) */ +#if !defined LWIP_HTTPD_MAX_REQ_LENGTH || defined __DOXYGEN__ +#define LWIP_HTTPD_MAX_REQ_LENGTH LWIP_MIN(1023, (LWIP_HTTPD_REQ_QUEUELEN * PBUF_POOL_BUFSIZE)) +#endif +#endif /* LWIP_HTTPD_SUPPORT_REQUESTLIST */ + +/** This is the size of a static buffer used when URIs end with '/'. + * In this buffer, the directory requested is concatenated with all the + * configured default file names. + * Set to 0 to disable checking default filenames on non-root directories. + */ +#if !defined LWIP_HTTPD_MAX_REQUEST_URI_LEN || defined __DOXYGEN__ +#define LWIP_HTTPD_MAX_REQUEST_URI_LEN 63 +#endif + +/** Maximum length of the filename to send as response to a POST request, + * filled in by the application when a POST is finished. + */ +#if !defined LWIP_HTTPD_POST_MAX_RESPONSE_URI_LEN || defined __DOXYGEN__ +#define LWIP_HTTPD_POST_MAX_RESPONSE_URI_LEN 63 +#endif + +/** Set this to 0 to not send the SSI tag (default is on, so the tag will + * be sent in the HTML page */ +#if !defined LWIP_HTTPD_SSI_INCLUDE_TAG || defined __DOXYGEN__ +#define LWIP_HTTPD_SSI_INCLUDE_TAG 1 +#endif + +/** Set this to 1 to call tcp_abort when tcp_close fails with memory error. + * This can be used to prevent consuming all memory in situations where the + * HTTP server has low priority compared to other communication. */ +#if !defined LWIP_HTTPD_ABORT_ON_CLOSE_MEM_ERROR || defined __DOXYGEN__ +#define LWIP_HTTPD_ABORT_ON_CLOSE_MEM_ERROR 0 +#endif + +/** Set this to 1 to kill the oldest connection when running out of + * memory for 'struct http_state' or 'struct http_ssi_state'. + * ATTENTION: This puts all connections on a linked list, so may be kind of slow. + */ +#if !defined LWIP_HTTPD_KILL_OLD_ON_CONNECTIONS_EXCEEDED || defined __DOXYGEN__ +#define LWIP_HTTPD_KILL_OLD_ON_CONNECTIONS_EXCEEDED 0 +#endif + +/** Set this to 1 to send URIs without extension without headers + * (who uses this at all??) */ +#if !defined LWIP_HTTPD_OMIT_HEADER_FOR_EXTENSIONLESS_URI || defined __DOXYGEN__ +#define LWIP_HTTPD_OMIT_HEADER_FOR_EXTENSIONLESS_URI 0 +#endif + +/** Default: Tags are sent from struct http_state and are therefore volatile */ +#if !defined HTTP_IS_TAG_VOLATILE || defined __DOXYGEN__ +#define HTTP_IS_TAG_VOLATILE(ptr) TCP_WRITE_FLAG_COPY +#endif + +/* By default, the httpd is limited to send 2*pcb->mss to keep resource usage low + when http is not an important protocol in the device. */ +#if !defined HTTPD_LIMIT_SENDING_TO_2MSS || defined __DOXYGEN__ +#define HTTPD_LIMIT_SENDING_TO_2MSS 1 +#endif + +/* Define this to a function that returns the maximum amount of data to enqueue. + The function have this signature: u16_t fn(struct altcp_pcb* pcb); + The best place to define this is the hooks file (@see LWIP_HOOK_FILENAME) */ +#if !defined HTTPD_MAX_WRITE_LEN || defined __DOXYGEN__ +#if HTTPD_LIMIT_SENDING_TO_2MSS +#define HTTPD_MAX_WRITE_LEN(pcb) ((u16_t)(2 * altcp_mss(pcb))) +#endif +#endif + +/*------------------- FS OPTIONS -------------------*/ + +/** Set this to 1 and provide the functions: + * - "int fs_open_custom(struct fs_file *file, const char *name)" + * Called first for every opened file to allow opening files + * that are not included in fsdata(_custom).c + * - "void fs_close_custom(struct fs_file *file)" + * Called to free resources allocated by fs_open_custom(). + */ +#if !defined LWIP_HTTPD_CUSTOM_FILES || defined __DOXYGEN__ +#define LWIP_HTTPD_CUSTOM_FILES 0 +#endif + +/** Set this to 1 to support fs_read() to dynamically read file data. + * Without this (default=off), only one-block files are supported, + * and the contents must be ready after fs_open(). + */ +#if !defined LWIP_HTTPD_DYNAMIC_FILE_READ || defined __DOXYGEN__ +#define LWIP_HTTPD_DYNAMIC_FILE_READ 0 +#endif + +/** Set this to 1 to include an application state argument per file + * that is opened. This allows to keep a state per connection/file. + */ +#if !defined LWIP_HTTPD_FILE_STATE || defined __DOXYGEN__ +#define LWIP_HTTPD_FILE_STATE 0 +#endif + +/** HTTPD_PRECALCULATED_CHECKSUM==1: include precompiled checksums for + * predefined (MSS-sized) chunks of the files to prevent having to calculate + * the checksums at runtime. */ +#if !defined HTTPD_PRECALCULATED_CHECKSUM || defined __DOXYGEN__ +#define HTTPD_PRECALCULATED_CHECKSUM 0 +#endif + +/** LWIP_HTTPD_FS_ASYNC_READ==1: support asynchronous read operations + * (fs_read_async returns FS_READ_DELAYED and calls a callback when finished). + */ +#if !defined LWIP_HTTPD_FS_ASYNC_READ || defined __DOXYGEN__ +#define LWIP_HTTPD_FS_ASYNC_READ 0 +#endif + +/** Filename (including path) to use as FS data file */ +#if !defined HTTPD_FSDATA_FILE || defined __DOXYGEN__ +/* HTTPD_USE_CUSTOM_FSDATA: Compatibility with deprecated lwIP option */ +#if defined(HTTPD_USE_CUSTOM_FSDATA) && (HTTPD_USE_CUSTOM_FSDATA != 0) +#define HTTPD_FSDATA_FILE "fsdata_custom.c" +#else +#define HTTPD_FSDATA_FILE "fsdata.c" +#endif +#endif + +/** + * @} + */ + +#endif /* LWIP_HDR_APPS_HTTPD_OPTS_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/lwiperf.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/lwiperf.h new file mode 100644 index 0000000..cc86e7f --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/lwiperf.h @@ -0,0 +1,100 @@ +/** + * @file + * lwIP iPerf server implementation + */ + +/* + * Copyright (c) 2014 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_APPS_LWIPERF_H +#define LWIP_HDR_APPS_LWIPERF_H + +#include "lwip/opt.h" +#include "lwip/ip_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define LWIPERF_TCP_PORT_DEFAULT 5001 + +/** lwIPerf test results */ +enum lwiperf_report_type +{ + /** The server side test is done */ + LWIPERF_TCP_DONE_SERVER, + /** The client side test is done */ + LWIPERF_TCP_DONE_CLIENT, + /** Local error lead to test abort */ + LWIPERF_TCP_ABORTED_LOCAL, + /** Data check error lead to test abort */ + LWIPERF_TCP_ABORTED_LOCAL_DATAERROR, + /** Transmit error lead to test abort */ + LWIPERF_TCP_ABORTED_LOCAL_TXERROR, + /** Remote side aborted the test */ + LWIPERF_TCP_ABORTED_REMOTE +}; + +/** Control */ +enum lwiperf_client_type +{ + /** Unidirectional tx only test */ + LWIPERF_CLIENT, + /** Do a bidirectional test simultaneously */ + LWIPERF_DUAL, + /** Do a bidirectional test individually */ + LWIPERF_TRADEOFF +}; + +/** Prototype of a report function that is called when a session is finished. + This report function can show the test results. + @param report_type contains the test result */ +typedef void (*lwiperf_report_fn)(void *arg, enum lwiperf_report_type report_type, + const ip_addr_t* local_addr, u16_t local_port, const ip_addr_t* remote_addr, u16_t remote_port, + u32_t bytes_transferred, u32_t ms_duration, u32_t bandwidth_kbitpsec); + +void* lwiperf_start_tcp_server(const ip_addr_t* local_addr, u16_t local_port, + lwiperf_report_fn report_fn, void* report_arg); +void* lwiperf_start_tcp_server_default(lwiperf_report_fn report_fn, void* report_arg); +void* lwiperf_start_tcp_client(const ip_addr_t* remote_addr, u16_t remote_port, + enum lwiperf_client_type type, + lwiperf_report_fn report_fn, void* report_arg); +void* lwiperf_start_tcp_client_default(const ip_addr_t* remote_addr, + lwiperf_report_fn report_fn, void* report_arg); + +void lwiperf_abort(void* lwiperf_session); + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_LWIPERF_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns.h new file mode 100644 index 0000000..20d7ee2 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns.h @@ -0,0 +1,105 @@ +/** + * @file + * MDNS responder + */ + + /* + * Copyright (c) 2015 Verisure Innovation AB + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Erik Ekman + * + */ + +#ifndef LWIP_HDR_APPS_MDNS_H +#define LWIP_HDR_APPS_MDNS_H + +#include "lwip/apps/mdns_opts.h" +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_MDNS_RESPONDER + +enum mdns_sd_proto { + DNSSD_PROTO_UDP = 0, + DNSSD_PROTO_TCP = 1 +}; + +#define MDNS_PROBING_CONFLICT 0 +#define MDNS_PROBING_SUCCESSFUL 1 + +#define MDNS_LABEL_MAXLEN 63 + +struct mdns_host; +struct mdns_service; + +/** Callback function to add text to a reply, called when generating the reply */ +typedef void (*service_get_txt_fn_t)(struct mdns_service *service, void *txt_userdata); + +/** Callback function to let application know the result of probing network for name + * uniqueness, called with result MDNS_PROBING_SUCCESSFUL if no other node claimed + * use for the name for the netif or a service and is safe to use, or MDNS_PROBING_CONFLICT + * if another node is already using it and mdns is disabled on this interface */ +typedef void (*mdns_name_result_cb_t)(struct netif* netif, u8_t result); + +void mdns_resp_init(void); + +void mdns_resp_register_name_result_cb(mdns_name_result_cb_t cb); + +err_t mdns_resp_add_netif(struct netif *netif, const char *hostname, u32_t dns_ttl); +err_t mdns_resp_remove_netif(struct netif *netif); +err_t mdns_resp_rename_netif(struct netif *netif, const char *hostname); + +s8_t mdns_resp_add_service(struct netif *netif, const char *name, const char *service, enum mdns_sd_proto proto, u16_t port, u32_t dns_ttl, service_get_txt_fn_t txt_fn, void *txt_userdata); +err_t mdns_resp_del_service(struct netif *netif, s8_t slot); +err_t mdns_resp_rename_service(struct netif *netif, s8_t slot, const char *name); + +err_t mdns_resp_add_service_txtitem(struct mdns_service *service, const char *txt, u8_t txt_len); + +void mdns_resp_restart(struct netif *netif); +void mdns_resp_announce(struct netif *netif); + +/** + * @ingroup mdns + * Announce IP settings have changed on netif. + * Call this in your callback registered by netif_set_status_callback(). + * No need to call this function when LWIP_NETIF_EXT_STATUS_CALLBACK==1, + * this handled automatically for you. + * @param netif The network interface where settings have changed. + */ +#define mdns_resp_netif_settings_changed(netif) mdns_resp_announce(netif) + +#endif /* LWIP_MDNS_RESPONDER */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_MDNS_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns_opts.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns_opts.h new file mode 100644 index 0000000..45f2c50 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns_opts.h @@ -0,0 +1,81 @@ +/** + * @file + * MDNS responder + */ + + /* + * Copyright (c) 2015 Verisure Innovation AB + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Erik Ekman + * + */ + +#ifndef LWIP_HDR_APPS_MDNS_OPTS_H +#define LWIP_HDR_APPS_MDNS_OPTS_H + +#include "lwip/opt.h" + +/** + * @defgroup mdns_opts Options + * @ingroup mdns + * @{ + */ + +/** + * LWIP_MDNS_RESPONDER==1: Turn on multicast DNS module. UDP must be available for MDNS + * transport. IGMP is needed for IPv4 multicast. + */ +#ifndef LWIP_MDNS_RESPONDER +#define LWIP_MDNS_RESPONDER 0 +#endif /* LWIP_MDNS_RESPONDER */ + +/** The maximum number of services per netif */ +#ifndef MDNS_MAX_SERVICES +#define MDNS_MAX_SERVICES 1 +#endif + +/** MDNS_RESP_USENETIF_EXTCALLBACK==1: register an ext_callback on the netif + * to automatically restart probing/announcing on status or address change. + */ +#ifndef MDNS_RESP_USENETIF_EXTCALLBACK +#define MDNS_RESP_USENETIF_EXTCALLBACK LWIP_NETIF_EXT_STATUS_CALLBACK +#endif + +/** + * MDNS_DEBUG: Enable debugging for multicast DNS. + */ +#ifndef MDNS_DEBUG +#define MDNS_DEBUG LWIP_DBG_OFF +#endif + +/** + * @} + */ + +#endif /* LWIP_HDR_APPS_MDNS_OPTS_H */ + diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns_priv.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns_priv.h new file mode 100644 index 0000000..9635b5b --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns_priv.h @@ -0,0 +1,74 @@ +/** + * @file + * MDNS responder private definitions + */ + + /* + * Copyright (c) 2015 Verisure Innovation AB + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Erik Ekman + * + */ +#ifndef LWIP_HDR_MDNS_PRIV_H +#define LWIP_HDR_MDNS_PRIV_H + +#include "lwip/apps/mdns_opts.h" +#include "lwip/pbuf.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_MDNS_RESPONDER + +/* Domain struct and methods - visible for unit tests */ + +#define MDNS_DOMAIN_MAXLEN 256 +#define MDNS_READNAME_ERROR 0xFFFF + +struct mdns_domain { + /* Encoded domain name */ + u8_t name[MDNS_DOMAIN_MAXLEN]; + /* Total length of domain name, including zero */ + u16_t length; + /* Set if compression of this domain is not allowed */ + u8_t skip_compression; +}; + +err_t mdns_domain_add_label(struct mdns_domain *domain, const char *label, u8_t len); +u16_t mdns_readname(struct pbuf *p, u16_t offset, struct mdns_domain *domain); +int mdns_domain_eq(struct mdns_domain *a, struct mdns_domain *b); +u16_t mdns_compress_domain(struct pbuf *pbuf, u16_t *offset, struct mdns_domain *domain); + +#endif /* LWIP_MDNS_RESPONDER */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_MDNS_PRIV_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt.h new file mode 100644 index 0000000..c2bb228 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt.h @@ -0,0 +1,205 @@ +/** + * @file + * MQTT client + */ + +/* + * Copyright (c) 2016 Erik Andersson + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Erik Andersson + * + */ +#ifndef LWIP_HDR_APPS_MQTT_CLIENT_H +#define LWIP_HDR_APPS_MQTT_CLIENT_H + +#include "lwip/apps/mqtt_opts.h" +#include "lwip/err.h" +#include "lwip/ip_addr.h" +#include "lwip/prot/iana.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct mqtt_client_s mqtt_client_t; + +#if LWIP_ALTCP && LWIP_ALTCP_TLS +struct altcp_tls_config; +#endif + +/** @ingroup mqtt + * Default MQTT port (non-TLS) */ +#define MQTT_PORT LWIP_IANA_PORT_MQTT +/** @ingroup mqtt + * Default MQTT TLS port */ +#define MQTT_TLS_PORT LWIP_IANA_PORT_SECURE_MQTT + +/*---------------------------------------------------------------------------------------------- */ +/* Connection with server */ + +/** + * @ingroup mqtt + * Client information and connection parameters */ +struct mqtt_connect_client_info_t { + /** Client identifier, must be set by caller */ + const char *client_id; + /** User name, set to NULL if not used */ + const char* client_user; + /** Password, set to NULL if not used */ + const char* client_pass; + /** keep alive time in seconds, 0 to disable keep alive functionality*/ + u16_t keep_alive; + /** will topic, set to NULL if will is not to be used, + will_msg, will_qos and will retain are then ignored */ + const char* will_topic; + /** will_msg, see will_topic */ + const char* will_msg; + /** will_qos, see will_topic */ + u8_t will_qos; + /** will_retain, see will_topic */ + u8_t will_retain; +#if LWIP_ALTCP && LWIP_ALTCP_TLS + /** TLS configuration for secure connections */ + struct altcp_tls_config *tls_config; +#endif +}; + +/** + * @ingroup mqtt + * Connection status codes */ +typedef enum +{ + /** Accepted */ + MQTT_CONNECT_ACCEPTED = 0, + /** Refused protocol version */ + MQTT_CONNECT_REFUSED_PROTOCOL_VERSION = 1, + /** Refused identifier */ + MQTT_CONNECT_REFUSED_IDENTIFIER = 2, + /** Refused server */ + MQTT_CONNECT_REFUSED_SERVER = 3, + /** Refused user credentials */ + MQTT_CONNECT_REFUSED_USERNAME_PASS = 4, + /** Refused not authorized */ + MQTT_CONNECT_REFUSED_NOT_AUTHORIZED_ = 5, + /** Disconnected */ + MQTT_CONNECT_DISCONNECTED = 256, + /** Timeout */ + MQTT_CONNECT_TIMEOUT = 257 +} mqtt_connection_status_t; + +/** + * @ingroup mqtt + * Function prototype for mqtt connection status callback. Called when + * client has connected to the server after initiating a mqtt connection attempt by + * calling mqtt_client_connect() or when connection is closed by server or an error + * + * @param client MQTT client itself + * @param arg Additional argument to pass to the callback function + * @param status Connect result code or disconnection notification @see mqtt_connection_status_t + * + */ +typedef void (*mqtt_connection_cb_t)(mqtt_client_t *client, void *arg, mqtt_connection_status_t status); + + +/** + * @ingroup mqtt + * Data callback flags */ +enum { + /** Flag set when last fragment of data arrives in data callback */ + MQTT_DATA_FLAG_LAST = 1 +}; + +/** + * @ingroup mqtt + * Function prototype for MQTT incoming publish data callback function. Called when data + * arrives to a subscribed topic @see mqtt_subscribe + * + * @param arg Additional argument to pass to the callback function + * @param data User data, pointed object, data may not be referenced after callback return, + NULL is passed when all publish data are delivered + * @param len Length of publish data fragment + * @param flags MQTT_DATA_FLAG_LAST set when this call contains the last part of data from publish message + * + */ +typedef void (*mqtt_incoming_data_cb_t)(void *arg, const u8_t *data, u16_t len, u8_t flags); + + +/** + * @ingroup mqtt + * Function prototype for MQTT incoming publish function. Called when an incoming publish + * arrives to a subscribed topic @see mqtt_subscribe + * + * @param arg Additional argument to pass to the callback function + * @param topic Zero terminated Topic text string, topic may not be referenced after callback return + * @param tot_len Total length of publish data, if set to 0 (no publish payload) data callback will not be invoked + */ +typedef void (*mqtt_incoming_publish_cb_t)(void *arg, const char *topic, u32_t tot_len); + + +/** + * @ingroup mqtt + * Function prototype for mqtt request callback. Called when a subscribe, unsubscribe + * or publish request has completed + * @param arg Pointer to user data supplied when invoking request + * @param err ERR_OK on success + * ERR_TIMEOUT if no response was received within timeout, + * ERR_ABRT if (un)subscribe was denied + */ +typedef void (*mqtt_request_cb_t)(void *arg, err_t err); + + +err_t mqtt_client_connect(mqtt_client_t *client, const ip_addr_t *ipaddr, u16_t port, mqtt_connection_cb_t cb, void *arg, + const struct mqtt_connect_client_info_t *client_info); + +void mqtt_disconnect(mqtt_client_t *client); + +mqtt_client_t *mqtt_client_new(void); +void mqtt_client_free(mqtt_client_t* client); + +u8_t mqtt_client_is_connected(mqtt_client_t *client); + +void mqtt_set_inpub_callback(mqtt_client_t *client, mqtt_incoming_publish_cb_t, + mqtt_incoming_data_cb_t data_cb, void *arg); + +err_t mqtt_sub_unsub(mqtt_client_t *client, const char *topic, u8_t qos, mqtt_request_cb_t cb, void *arg, u8_t sub); + +/** @ingroup mqtt + *Subscribe to topic */ +#define mqtt_subscribe(client, topic, qos, cb, arg) mqtt_sub_unsub(client, topic, qos, cb, arg, 1) +/** @ingroup mqtt + * Unsubscribe to topic */ +#define mqtt_unsubscribe(client, topic, cb, arg) mqtt_sub_unsub(client, topic, 0, cb, arg, 0) + +err_t mqtt_publish(mqtt_client_t *client, const char *topic, const void *payload, u16_t payload_length, u8_t qos, u8_t retain, + mqtt_request_cb_t cb, void *arg); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_MQTT_CLIENT_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt_opts.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt_opts.h new file mode 100644 index 0000000..4226d21 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt_opts.h @@ -0,0 +1,103 @@ +/** + * @file + * MQTT client options + */ + +/* + * Copyright (c) 2016 Erik Andersson + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Erik Andersson + * + */ +#ifndef LWIP_HDR_APPS_MQTT_OPTS_H +#define LWIP_HDR_APPS_MQTT_OPTS_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup mqtt_opts Options + * @ingroup mqtt + * @{ + */ + +/** + * Output ring-buffer size, must be able to fit largest outgoing publish message topic+payloads + */ +#ifndef MQTT_OUTPUT_RINGBUF_SIZE +#define MQTT_OUTPUT_RINGBUF_SIZE 256 +#endif + +/** + * Number of bytes in receive buffer, must be at least the size of the longest incoming topic + 8 + * If one wants to avoid fragmented incoming publish, set length to max incoming topic length + max payload length + 8 + */ +#ifndef MQTT_VAR_HEADER_BUFFER_LEN +#define MQTT_VAR_HEADER_BUFFER_LEN 128 +#endif + +/** + * Maximum number of pending subscribe, unsubscribe and publish requests to server . + */ +#ifndef MQTT_REQ_MAX_IN_FLIGHT +#define MQTT_REQ_MAX_IN_FLIGHT 4 +#endif + +/** + * Seconds between each cyclic timer call. + */ +#ifndef MQTT_CYCLIC_TIMER_INTERVAL +#define MQTT_CYCLIC_TIMER_INTERVAL 5 +#endif + +/** + * Publish, subscribe and unsubscribe request timeout in seconds. + */ +#ifndef MQTT_REQ_TIMEOUT +#define MQTT_REQ_TIMEOUT 30 +#endif + +/** + * Seconds for MQTT connect response timeout after sending connect request + */ +#ifndef MQTT_CONNECT_TIMOUT +#define MQTT_CONNECT_TIMOUT 100 +#endif + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_MQTT_OPTS_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt_priv.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt_priv.h new file mode 100644 index 0000000..b775913 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt_priv.h @@ -0,0 +1,104 @@ +/** + * @file + * MQTT client (private interface) + */ + +/* + * Copyright (c) 2016 Erik Andersson + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Erik Andersson + * + */ +#ifndef LWIP_HDR_APPS_MQTT_PRIV_H +#define LWIP_HDR_APPS_MQTT_PRIV_H + +#include "lwip/apps/mqtt.h" +#include "lwip/altcp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** Pending request item, binds application callback to pending server requests */ +struct mqtt_request_t +{ + /** Next item in list, NULL means this is the last in chain, + next pointing at itself means request is unallocated */ + struct mqtt_request_t *next; + /** Callback to upper layer */ + mqtt_request_cb_t cb; + void *arg; + /** MQTT packet identifier */ + u16_t pkt_id; + /** Expire time relative to element before this */ + u16_t timeout_diff; +}; + +/** Ring buffer */ +struct mqtt_ringbuf_t { + u16_t put; + u16_t get; + u8_t buf[MQTT_OUTPUT_RINGBUF_SIZE]; +}; + +/** MQTT client */ +struct mqtt_client_s +{ + /** Timers and timeouts */ + u16_t cyclic_tick; + u16_t keep_alive; + u16_t server_watchdog; + /** Packet identifier generator*/ + u16_t pkt_id_seq; + /** Packet identifier of pending incoming publish */ + u16_t inpub_pkt_id; + /** Connection state */ + u8_t conn_state; + struct altcp_pcb *conn; + /** Connection callback */ + void *connect_arg; + mqtt_connection_cb_t connect_cb; + /** Pending requests to server */ + struct mqtt_request_t *pend_req_queue; + struct mqtt_request_t req_list[MQTT_REQ_MAX_IN_FLIGHT]; + void *inpub_arg; + /** Incoming data callback */ + mqtt_incoming_data_cb_t data_cb; + mqtt_incoming_publish_cb_t pub_cb; + /** Input */ + u32_t msg_idx; + u8_t rx_buffer[MQTT_VAR_HEADER_BUFFER_LEN]; + /** Output ring-buffer */ + struct mqtt_ringbuf_t output; +}; + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_MQTT_PRIV_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/netbiosns.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/netbiosns.h new file mode 100644 index 0000000..a326d33 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/netbiosns.h @@ -0,0 +1,51 @@ +/** + * @file + * NETBIOS name service responder + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ +#ifndef LWIP_HDR_APPS_NETBIOS_H +#define LWIP_HDR_APPS_NETBIOS_H + +#include "lwip/apps/netbiosns_opts.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void netbiosns_init(void); +#ifndef NETBIOS_LWIP_NAME +void netbiosns_set_name(const char* hostname); +#endif +void netbiosns_stop(void); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_NETBIOS_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/netbiosns_opts.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/netbiosns_opts.h new file mode 100644 index 0000000..1f51ab0 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/netbiosns_opts.h @@ -0,0 +1,66 @@ +/** + * @file + * NETBIOS name service responder options + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ +#ifndef LWIP_HDR_APPS_NETBIOS_OPTS_H +#define LWIP_HDR_APPS_NETBIOS_OPTS_H + +#include "lwip/opt.h" + +/** + * @defgroup netbiosns_opts Options + * @ingroup netbiosns + * @{ + */ + +/** NetBIOS name of lwip device + * This must be uppercase until NETBIOS_STRCMP() is defined to a string + * comparision function that is case insensitive. + * If you want to use the netif's hostname, use this (with LWIP_NETIF_HOSTNAME): + * (ip_current_netif() != NULL ? ip_current_netif()->hostname != NULL ? ip_current_netif()->hostname : "" : "") + * + * If this is not defined, netbiosns_set_name() can be called at runtime to change the name. + */ +#ifdef __DOXYGEN__ +#define NETBIOS_LWIP_NAME "NETBIOSLWIPDEV" +#endif + +/** Respond to NetBIOS name queries + * Default is disabled + */ +#if !defined LWIP_NETBIOS_RESPOND_NAME_QUERY || defined __DOXYGEN__ +#define LWIP_NETBIOS_RESPOND_NAME_QUERY 0 +#endif + +/** + * @} + */ + +#endif /* LWIP_HDR_APPS_NETBIOS_OPTS_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/smtp.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/smtp.h new file mode 100644 index 0000000..fb4a4a7 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/smtp.h @@ -0,0 +1,128 @@ +#ifndef LWIP_HDR_APPS_SMTP_H +#define LWIP_HDR_APPS_SMTP_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "lwip/apps/smtp_opts.h" +#include "lwip/err.h" +#include "lwip/prot/iana.h" + +/** The default TCP port used for SMTP */ +#define SMTP_DEFAULT_PORT LWIP_IANA_PORT_SMTP +/** The default TCP port used for SMTPS */ +#define SMTPS_DEFAULT_PORT LWIP_IANA_PORT_SMTPS + +/** Email successfully sent */ +#define SMTP_RESULT_OK 0 +/** Unknown error */ +#define SMTP_RESULT_ERR_UNKNOWN 1 +/** Connection to server failed */ +#define SMTP_RESULT_ERR_CONNECT 2 +/** Failed to resolve server hostname */ +#define SMTP_RESULT_ERR_HOSTNAME 3 +/** Connection unexpectedly closed by remote server */ +#define SMTP_RESULT_ERR_CLOSED 4 +/** Connection timed out (server didn't respond in time) */ +#define SMTP_RESULT_ERR_TIMEOUT 5 +/** Server responded with an unknown response code */ +#define SMTP_RESULT_ERR_SVR_RESP 6 +/** Out of resources locally */ +#define SMTP_RESULT_ERR_MEM 7 + +/** Prototype of an smtp callback function + * + * @param arg argument specified when initiating the email + * @param smtp_result result of the mail transfer (see defines SMTP_RESULT_*) + * @param srv_err if aborted by the server, this contains the error code received + * @param err an error returned by internal lwip functions, can help to specify + * the source of the error but must not necessarily be != ERR_OK + */ +typedef void (*smtp_result_fn)(void *arg, u8_t smtp_result, u16_t srv_err, err_t err); + +/** This structure is used as argument for smtp_send_mail_int(), + * which in turn can be used with tcpip_callback() to send mail + * from interrupt context, e.g. like this: + * struct smtp_send_request *req; (to be filled) + * tcpip_try_callback(smtp_send_mail_int, (void*)req); + * + * For member description, see parameter description of smtp_send_mail(). + * When using with tcpip_callback, this structure has to stay allocated + * (e.g. using mem_malloc/mem_free) until its 'callback_fn' is called. + */ +struct smtp_send_request { + const char *from; + const char* to; + const char* subject; + const char* body; + smtp_result_fn callback_fn; + void* callback_arg; + /** If this is != 0, data is *not* copied into an extra buffer + * but used from the pointers supplied in this struct. + * This means less memory usage, but data must stay untouched until + * the callback function is called. */ + u8_t static_data; +}; + + +#if SMTP_BODYDH + +#ifndef SMTP_BODYDH_BUFFER_SIZE +#define SMTP_BODYDH_BUFFER_SIZE 256 +#endif /* SMTP_BODYDH_BUFFER_SIZE */ + +struct smtp_bodydh { + u16_t state; + u16_t length; /* Length of content in buffer */ + char buffer[SMTP_BODYDH_BUFFER_SIZE]; /* buffer for generated content */ +#ifdef SMTP_BODYDH_USER_SIZE + u8_t user[SMTP_BODYDH_USER_SIZE]; +#endif /* SMTP_BODYDH_USER_SIZE */ +}; + +enum bdh_retvals_e { + BDH_DONE = 0, + BDH_WORKING +}; + +/** Prototype of an smtp body callback function + * It receives a struct smtp_bodydh, and a buffer to write data, + * must return BDH_WORKING to be called again and BDH_DONE when + * it has finished processing. This one tries to fill one TCP buffer with + * data, your function will be repeatedly called until that happens; so if you + * know you'll be taking too long to serve your request, pause once in a while + * by writing length=0 to avoid hogging system resources + * + * @param arg argument specified when initiating the email + * @param smtp_bodydh state handling + buffer structure + */ +typedef int (*smtp_bodycback_fn)(void *arg, struct smtp_bodydh *bodydh); + +err_t smtp_send_mail_bodycback(const char *from, const char* to, const char* subject, + smtp_bodycback_fn bodycback_fn, smtp_result_fn callback_fn, void* callback_arg); + +#endif /* SMTP_BODYDH */ + + +err_t smtp_set_server_addr(const char* server); +void smtp_set_server_port(u16_t port); +#if LWIP_ALTCP && LWIP_ALTCP_TLS +struct altcp_tls_config; +void smtp_set_tls_config(struct altcp_tls_config *tls_config); +#endif +err_t smtp_set_auth(const char* username, const char* pass); +err_t smtp_send_mail(const char *from, const char* to, const char* subject, const char* body, + smtp_result_fn callback_fn, void* callback_arg); +err_t smtp_send_mail_static(const char *from, const char* to, const char* subject, const char* body, + smtp_result_fn callback_fn, void* callback_arg); +void smtp_send_mail_int(void *arg); +#ifdef LWIP_DEBUG +const char* smtp_result_str(u8_t smtp_result); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SMTP_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/smtp_opts.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/smtp_opts.h new file mode 100644 index 0000000..bc743f6 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/smtp_opts.h @@ -0,0 +1,81 @@ +#ifndef LWIP_HDR_APPS_SMTP_OPTS_H +#define LWIP_HDR_APPS_SMTP_OPTS_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup smtp_opts Options + * @ingroup smtp + * + * @{ + */ + +/** Set this to 1 to enable data handler callback on BODY */ +#ifndef SMTP_BODYDH +#define SMTP_BODYDH 0 +#endif + +/** SMTP_DEBUG: Enable debugging for SNTP. */ +#ifndef SMTP_DEBUG +#define SMTP_DEBUG LWIP_DBG_OFF +#endif + +/** Maximum length reserved for server name including terminating 0 byte */ +#ifndef SMTP_MAX_SERVERNAME_LEN +#define SMTP_MAX_SERVERNAME_LEN 256 +#endif + +/** Maximum length reserved for username */ +#ifndef SMTP_MAX_USERNAME_LEN +#define SMTP_MAX_USERNAME_LEN 32 +#endif + +/** Maximum length reserved for password */ +#ifndef SMTP_MAX_PASS_LEN +#define SMTP_MAX_PASS_LEN 32 +#endif + +/** Set this to 0 if you know the authentication data will not change + * during the smtp session, which saves some heap space. */ +#ifndef SMTP_COPY_AUTHDATA +#define SMTP_COPY_AUTHDATA 1 +#endif + +/** Set this to 0 to save some code space if you know for sure that all data + * passed to this module conforms to the requirements in the SMTP RFC. + * WARNING: use this with care! + */ +#ifndef SMTP_CHECK_DATA +#define SMTP_CHECK_DATA 1 +#endif + +/** Set this to 1 to enable AUTH PLAIN support */ +#ifndef SMTP_SUPPORT_AUTH_PLAIN +#define SMTP_SUPPORT_AUTH_PLAIN 1 +#endif + +/** Set this to 1 to enable AUTH LOGIN support */ +#ifndef SMTP_SUPPORT_AUTH_LOGIN +#define SMTP_SUPPORT_AUTH_LOGIN 1 +#endif + +/* Memory allocation/deallocation can be overridden... */ +#ifndef SMTP_STATE_MALLOC +#define SMTP_STATE_MALLOC(size) mem_malloc(size) +#define SMTP_STATE_FREE(ptr) mem_free(ptr) +#endif + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* SMTP_OPTS_H */ + diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp.h new file mode 100644 index 0000000..a3f8eb1 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp.h @@ -0,0 +1,135 @@ +/** + * @file + * SNMP server main API - start and basic configuration + */ + +/* + * Copyright (c) 2001, 2002 Leon Woestenberg + * Copyright (c) 2001, 2002 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Leon Woestenberg + * Martin Hentschel + * + */ +#ifndef LWIP_HDR_APPS_SNMP_H +#define LWIP_HDR_APPS_SNMP_H + +#include "lwip/apps/snmp_opts.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/err.h" +#include "lwip/apps/snmp_core.h" + +/** SNMP variable binding descriptor (publically needed for traps) */ +struct snmp_varbind +{ + /** pointer to next varbind, NULL for last in list */ + struct snmp_varbind *next; + /** pointer to previous varbind, NULL for first in list */ + struct snmp_varbind *prev; + + /** object identifier */ + struct snmp_obj_id oid; + + /** value ASN1 type */ + u8_t type; + /** object value length */ + u16_t value_len; + /** object value */ + void *value; +}; + +/** + * @ingroup snmp_core + * Agent setup, start listening to port 161. + */ +void snmp_init(void); +void snmp_set_mibs(const struct snmp_mib **mibs, u8_t num_mibs); + +void snmp_set_device_enterprise_oid(const struct snmp_obj_id* device_enterprise_oid); +const struct snmp_obj_id* snmp_get_device_enterprise_oid(void); + +void snmp_trap_dst_enable(u8_t dst_idx, u8_t enable); +void snmp_trap_dst_ip_set(u8_t dst_idx, const ip_addr_t *dst); + +/** Generic trap: cold start */ +#define SNMP_GENTRAP_COLDSTART 0 +/** Generic trap: warm start */ +#define SNMP_GENTRAP_WARMSTART 1 +/** Generic trap: link down */ +#define SNMP_GENTRAP_LINKDOWN 2 +/** Generic trap: link up */ +#define SNMP_GENTRAP_LINKUP 3 +/** Generic trap: authentication failure */ +#define SNMP_GENTRAP_AUTH_FAILURE 4 +/** Generic trap: EGP neighbor lost */ +#define SNMP_GENTRAP_EGP_NEIGHBOR_LOSS 5 +/** Generic trap: enterprise specific */ +#define SNMP_GENTRAP_ENTERPRISE_SPECIFIC 6 + +err_t snmp_send_trap_generic(s32_t generic_trap); +err_t snmp_send_trap_specific(s32_t specific_trap, struct snmp_varbind *varbinds); +err_t snmp_send_trap(const struct snmp_obj_id* oid, s32_t generic_trap, s32_t specific_trap, struct snmp_varbind *varbinds); + +#define SNMP_AUTH_TRAPS_DISABLED 0 +#define SNMP_AUTH_TRAPS_ENABLED 1 +void snmp_set_auth_traps_enabled(u8_t enable); +u8_t snmp_get_auth_traps_enabled(void); + +u8_t snmp_v1_enabled(void); +u8_t snmp_v2c_enabled(void); +u8_t snmp_v3_enabled(void); +void snmp_v1_enable(u8_t enable); +void snmp_v2c_enable(u8_t enable); +void snmp_v3_enable(u8_t enable); + +const char * snmp_get_community(void); +const char * snmp_get_community_write(void); +const char * snmp_get_community_trap(void); +void snmp_set_community(const char * const community); +void snmp_set_community_write(const char * const community); +void snmp_set_community_trap(const char * const community); + +void snmp_coldstart_trap(void); +void snmp_authfail_trap(void); + +typedef void (*snmp_write_callback_fct)(const u32_t* oid, u8_t oid_len, void* callback_arg); +void snmp_set_write_callback(snmp_write_callback_fct write_callback, void* callback_arg); + +#endif /* LWIP_SNMP */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SNMP_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_core.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_core.h new file mode 100644 index 0000000..6021c72 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_core.h @@ -0,0 +1,377 @@ +/** + * @file + * SNMP core API for implementing MIBs + */ + +/* + * Copyright (c) 2006 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * Author: Christiaan Simons + * Martin Hentschel + */ + +#ifndef LWIP_HDR_APPS_SNMP_CORE_H +#define LWIP_HDR_APPS_SNMP_CORE_H + +#include "lwip/apps/snmp_opts.h" + +#if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/ip_addr.h" +#include "lwip/err.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* basic ASN1 defines */ +#define SNMP_ASN1_CLASS_UNIVERSAL 0x00 +#define SNMP_ASN1_CLASS_APPLICATION 0x40 +#define SNMP_ASN1_CLASS_CONTEXT 0x80 +#define SNMP_ASN1_CLASS_PRIVATE 0xC0 + +#define SNMP_ASN1_CONTENTTYPE_PRIMITIVE 0x00 +#define SNMP_ASN1_CONTENTTYPE_CONSTRUCTED 0x20 + +/* universal tags (from ASN.1 spec.) */ +#define SNMP_ASN1_UNIVERSAL_END_OF_CONTENT 0 +#define SNMP_ASN1_UNIVERSAL_INTEGER 2 +#define SNMP_ASN1_UNIVERSAL_OCTET_STRING 4 +#define SNMP_ASN1_UNIVERSAL_NULL 5 +#define SNMP_ASN1_UNIVERSAL_OBJECT_ID 6 +#define SNMP_ASN1_UNIVERSAL_SEQUENCE_OF 16 + +/* application specific (SNMP) tags (from SNMPv2-SMI) */ +#define SNMP_ASN1_APPLICATION_IPADDR 0 /* [APPLICATION 0] IMPLICIT OCTET STRING (SIZE (4)) */ +#define SNMP_ASN1_APPLICATION_COUNTER 1 /* [APPLICATION 1] IMPLICIT INTEGER (0..4294967295) => u32_t */ +#define SNMP_ASN1_APPLICATION_GAUGE 2 /* [APPLICATION 2] IMPLICIT INTEGER (0..4294967295) => u32_t */ +#define SNMP_ASN1_APPLICATION_TIMETICKS 3 /* [APPLICATION 3] IMPLICIT INTEGER (0..4294967295) => u32_t */ +#define SNMP_ASN1_APPLICATION_OPAQUE 4 /* [APPLICATION 4] IMPLICIT OCTET STRING */ +#define SNMP_ASN1_APPLICATION_COUNTER64 6 /* [APPLICATION 6] IMPLICIT INTEGER (0..18446744073709551615) */ + +/* context specific (SNMP) tags (from RFC 1905) */ +#define SNMP_ASN1_CONTEXT_VARBIND_NO_SUCH_INSTANCE 1 + +/* full ASN1 type defines */ +#define SNMP_ASN1_TYPE_END_OF_CONTENT (SNMP_ASN1_CLASS_UNIVERSAL | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_UNIVERSAL_END_OF_CONTENT) +#define SNMP_ASN1_TYPE_INTEGER (SNMP_ASN1_CLASS_UNIVERSAL | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_UNIVERSAL_INTEGER) +#define SNMP_ASN1_TYPE_OCTET_STRING (SNMP_ASN1_CLASS_UNIVERSAL | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_UNIVERSAL_OCTET_STRING) +#define SNMP_ASN1_TYPE_NULL (SNMP_ASN1_CLASS_UNIVERSAL | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_UNIVERSAL_NULL) +#define SNMP_ASN1_TYPE_OBJECT_ID (SNMP_ASN1_CLASS_UNIVERSAL | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_UNIVERSAL_OBJECT_ID) +#define SNMP_ASN1_TYPE_SEQUENCE (SNMP_ASN1_CLASS_UNIVERSAL | SNMP_ASN1_CONTENTTYPE_CONSTRUCTED | SNMP_ASN1_UNIVERSAL_SEQUENCE_OF) +#define SNMP_ASN1_TYPE_IPADDR (SNMP_ASN1_CLASS_APPLICATION | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_APPLICATION_IPADDR) +#define SNMP_ASN1_TYPE_IPADDRESS SNMP_ASN1_TYPE_IPADDR +#define SNMP_ASN1_TYPE_COUNTER (SNMP_ASN1_CLASS_APPLICATION | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_APPLICATION_COUNTER) +#define SNMP_ASN1_TYPE_COUNTER32 SNMP_ASN1_TYPE_COUNTER +#define SNMP_ASN1_TYPE_GAUGE (SNMP_ASN1_CLASS_APPLICATION | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_APPLICATION_GAUGE) +#define SNMP_ASN1_TYPE_GAUGE32 SNMP_ASN1_TYPE_GAUGE +#define SNMP_ASN1_TYPE_UNSIGNED32 SNMP_ASN1_TYPE_GAUGE +#define SNMP_ASN1_TYPE_TIMETICKS (SNMP_ASN1_CLASS_APPLICATION | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_APPLICATION_TIMETICKS) +#define SNMP_ASN1_TYPE_OPAQUE (SNMP_ASN1_CLASS_APPLICATION | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_APPLICATION_OPAQUE) +#if LWIP_HAVE_INT64 +#define SNMP_ASN1_TYPE_COUNTER64 (SNMP_ASN1_CLASS_APPLICATION | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_APPLICATION_COUNTER64) +#endif + +#define SNMP_VARBIND_EXCEPTION_OFFSET 0xF0 +#define SNMP_VARBIND_EXCEPTION_MASK 0x0F + +/** error codes predefined by SNMP prot. */ +typedef enum { + SNMP_ERR_NOERROR = 0, +/* +outdated v1 error codes. do not use anmore! +#define SNMP_ERR_NOSUCHNAME 2 use SNMP_ERR_NOSUCHINSTANCE instead +#define SNMP_ERR_BADVALUE 3 use SNMP_ERR_WRONGTYPE,SNMP_ERR_WRONGLENGTH,SNMP_ERR_WRONGENCODING or SNMP_ERR_WRONGVALUE instead +#define SNMP_ERR_READONLY 4 use SNMP_ERR_NOTWRITABLE instead +*/ + SNMP_ERR_GENERROR = 5, + SNMP_ERR_NOACCESS = 6, + SNMP_ERR_WRONGTYPE = 7, + SNMP_ERR_WRONGLENGTH = 8, + SNMP_ERR_WRONGENCODING = 9, + SNMP_ERR_WRONGVALUE = 10, + SNMP_ERR_NOCREATION = 11, + SNMP_ERR_INCONSISTENTVALUE = 12, + SNMP_ERR_RESOURCEUNAVAILABLE = 13, + SNMP_ERR_COMMITFAILED = 14, + SNMP_ERR_UNDOFAILED = 15, + SNMP_ERR_NOTWRITABLE = 17, + SNMP_ERR_INCONSISTENTNAME = 18, + + SNMP_ERR_NOSUCHINSTANCE = SNMP_VARBIND_EXCEPTION_OFFSET + SNMP_ASN1_CONTEXT_VARBIND_NO_SUCH_INSTANCE +} snmp_err_t; + +/** internal object identifier representation */ +struct snmp_obj_id +{ + u8_t len; + u32_t id[SNMP_MAX_OBJ_ID_LEN]; +}; + +struct snmp_obj_id_const_ref +{ + u8_t len; + const u32_t* id; +}; + +extern const struct snmp_obj_id_const_ref snmp_zero_dot_zero; /* administrative identifier from SNMPv2-SMI */ + +/** SNMP variant value, used as reference in struct snmp_node_instance and table implementation */ +union snmp_variant_value +{ + void* ptr; + const void* const_ptr; + u32_t u32; + s32_t s32; +#if LWIP_HAVE_INT64 + u64_t u64; +#endif +}; + + +/** +SNMP MIB node types + tree node is the only node the stack can process in order to walk the tree, + all other nodes are assumed to be leaf nodes. + This cannot be an enum because users may want to define their own node types. +*/ +#define SNMP_NODE_TREE 0x00 +/* predefined leaf node types */ +#define SNMP_NODE_SCALAR 0x01 +#define SNMP_NODE_SCALAR_ARRAY 0x02 +#define SNMP_NODE_TABLE 0x03 +#define SNMP_NODE_THREADSYNC 0x04 + +/** node "base class" layout, the mandatory fields for a node */ +struct snmp_node +{ + /** one out of SNMP_NODE_TREE or any leaf node type (like SNMP_NODE_SCALAR) */ + u8_t node_type; + /** the number assigned to this node which used as part of the full OID */ + u32_t oid; +}; + +/** SNMP node instance access types */ +typedef enum { + SNMP_NODE_INSTANCE_ACCESS_READ = 1, + SNMP_NODE_INSTANCE_ACCESS_WRITE = 2, + SNMP_NODE_INSTANCE_READ_ONLY = SNMP_NODE_INSTANCE_ACCESS_READ, + SNMP_NODE_INSTANCE_READ_WRITE = (SNMP_NODE_INSTANCE_ACCESS_READ | SNMP_NODE_INSTANCE_ACCESS_WRITE), + SNMP_NODE_INSTANCE_WRITE_ONLY = SNMP_NODE_INSTANCE_ACCESS_WRITE, + SNMP_NODE_INSTANCE_NOT_ACCESSIBLE = 0 +} snmp_access_t; + +struct snmp_node_instance; + +typedef s16_t (*node_instance_get_value_method)(struct snmp_node_instance*, void*); +typedef snmp_err_t (*node_instance_set_test_method)(struct snmp_node_instance*, u16_t, void*); +typedef snmp_err_t (*node_instance_set_value_method)(struct snmp_node_instance*, u16_t, void*); +typedef void (*node_instance_release_method)(struct snmp_node_instance*); + +#define SNMP_GET_VALUE_RAW_DATA 0x4000 /* do not use 0x8000 because return value of node_instance_get_value_method is signed16 and 0x8000 would be the signed bit */ + +/** SNMP node instance */ +struct snmp_node_instance +{ + /** prefilled with the node, get_instance() is called on; may be changed by user to any value to pass an arbitrary node between calls to get_instance() and get_value/test_value/set_value */ + const struct snmp_node* node; + /** prefilled with the instance id requested; for get_instance() this is the exact oid requested; for get_next_instance() this is the relative starting point, stack expects relative oid of next node here */ + struct snmp_obj_id instance_oid; + + /** ASN type for this object (see snmp_asn1.h for definitions) */ + u8_t asn1_type; + /** one out of instance access types defined above (SNMP_NODE_INSTANCE_READ_ONLY,...) */ + snmp_access_t access; + + /** returns object value for the given object identifier. Return values <0 to indicate an error */ + node_instance_get_value_method get_value; + /** tests length and/or range BEFORE setting */ + node_instance_set_test_method set_test; + /** sets object value, only called when set_test() was successful */ + node_instance_set_value_method set_value; + /** called in any case when the instance is not required anymore by stack (useful for freeing memory allocated in get_instance/get_next_instance methods) */ + node_instance_release_method release_instance; + + /** reference to pass arbitrary value between calls to get_instance() and get_value/test_value/set_value */ + union snmp_variant_value reference; + /** see reference (if reference is a pointer, the length of underlying data may be stored here or anything else) */ + u32_t reference_len; +}; + + +/** SNMP tree node */ +struct snmp_tree_node +{ + /** inherited "base class" members */ + struct snmp_node node; + u16_t subnode_count; + const struct snmp_node* const *subnodes; +}; + +#define SNMP_CREATE_TREE_NODE(oid, subnodes) \ + {{ SNMP_NODE_TREE, (oid) }, \ + (u16_t)LWIP_ARRAYSIZE(subnodes), (subnodes) } + +#define SNMP_CREATE_EMPTY_TREE_NODE(oid) \ + {{ SNMP_NODE_TREE, (oid) }, \ + 0, NULL } + +/** SNMP leaf node */ +struct snmp_leaf_node +{ + /** inherited "base class" members */ + struct snmp_node node; + snmp_err_t (*get_instance)(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); + snmp_err_t (*get_next_instance)(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); +}; + +/** represents a single mib with its base oid and root node */ +struct snmp_mib +{ + const u32_t *base_oid; + u8_t base_oid_len; + const struct snmp_node *root_node; +}; + +#define SNMP_MIB_CREATE(oid_list, root_node) { (oid_list), (u8_t)LWIP_ARRAYSIZE(oid_list), root_node } + +/** OID range structure */ +struct snmp_oid_range +{ + u32_t min; + u32_t max; +}; + +/** checks if incoming OID length and values are in allowed ranges */ +u8_t snmp_oid_in_range(const u32_t *oid_in, u8_t oid_len, const struct snmp_oid_range *oid_ranges, u8_t oid_ranges_len); + +typedef enum { + SNMP_NEXT_OID_STATUS_SUCCESS, + SNMP_NEXT_OID_STATUS_NO_MATCH, + SNMP_NEXT_OID_STATUS_BUF_TO_SMALL +} snmp_next_oid_status_t; + +/** state for next_oid_init / next_oid_check functions */ +struct snmp_next_oid_state +{ + const u32_t* start_oid; + u8_t start_oid_len; + + u32_t* next_oid; + u8_t next_oid_len; + u8_t next_oid_max_len; + + snmp_next_oid_status_t status; + void* reference; +}; + +void snmp_next_oid_init(struct snmp_next_oid_state *state, + const u32_t *start_oid, u8_t start_oid_len, + u32_t *next_oid_buf, u8_t next_oid_max_len); +u8_t snmp_next_oid_precheck(struct snmp_next_oid_state *state, const u32_t *oid, u8_t oid_len); +u8_t snmp_next_oid_check(struct snmp_next_oid_state *state, const u32_t *oid, u8_t oid_len, void* reference); + +void snmp_oid_assign(struct snmp_obj_id* target, const u32_t *oid, u8_t oid_len); +void snmp_oid_combine(struct snmp_obj_id* target, const u32_t *oid1, u8_t oid1_len, const u32_t *oid2, u8_t oid2_len); +void snmp_oid_prefix(struct snmp_obj_id* target, const u32_t *oid, u8_t oid_len); +void snmp_oid_append(struct snmp_obj_id* target, const u32_t *oid, u8_t oid_len); +u8_t snmp_oid_equal(const u32_t *oid1, u8_t oid1_len, const u32_t *oid2, u8_t oid2_len); +s8_t snmp_oid_compare(const u32_t *oid1, u8_t oid1_len, const u32_t *oid2, u8_t oid2_len); + +#if LWIP_IPV4 +u8_t snmp_oid_to_ip4(const u32_t *oid, ip4_addr_t *ip); +void snmp_ip4_to_oid(const ip4_addr_t *ip, u32_t *oid); +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 +u8_t snmp_oid_to_ip6(const u32_t *oid, ip6_addr_t *ip); +void snmp_ip6_to_oid(const ip6_addr_t *ip, u32_t *oid); +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 || LWIP_IPV6 +u8_t snmp_ip_to_oid(const ip_addr_t *ip, u32_t *oid); +u8_t snmp_ip_port_to_oid(const ip_addr_t *ip, u16_t port, u32_t *oid); + +u8_t snmp_oid_to_ip(const u32_t *oid, u8_t oid_len, ip_addr_t *ip); +u8_t snmp_oid_to_ip_port(const u32_t *oid, u8_t oid_len, ip_addr_t *ip, u16_t *port); +#endif /* LWIP_IPV4 || LWIP_IPV6 */ + +struct netif; +u8_t netif_to_num(const struct netif *netif); + +snmp_err_t snmp_set_test_ok(struct snmp_node_instance* instance, u16_t value_len, void* value); /* generic function which can be used if test is always successful */ + +err_t snmp_decode_bits(const u8_t *buf, u32_t buf_len, u32_t *bit_value); +err_t snmp_decode_truthvalue(const s32_t *asn1_value, u8_t *bool_value); +u8_t snmp_encode_bits(u8_t *buf, u32_t buf_len, u32_t bit_value, u8_t bit_count); +u8_t snmp_encode_truthvalue(s32_t *asn1_value, u32_t bool_value); + +struct snmp_statistics +{ + u32_t inpkts; + u32_t outpkts; + u32_t inbadversions; + u32_t inbadcommunitynames; + u32_t inbadcommunityuses; + u32_t inasnparseerrs; + u32_t intoobigs; + u32_t innosuchnames; + u32_t inbadvalues; + u32_t inreadonlys; + u32_t ingenerrs; + u32_t intotalreqvars; + u32_t intotalsetvars; + u32_t ingetrequests; + u32_t ingetnexts; + u32_t insetrequests; + u32_t ingetresponses; + u32_t intraps; + u32_t outtoobigs; + u32_t outnosuchnames; + u32_t outbadvalues; + u32_t outgenerrs; + u32_t outgetrequests; + u32_t outgetnexts; + u32_t outsetrequests; + u32_t outgetresponses; + u32_t outtraps; +#if LWIP_SNMP_V3 + u32_t unsupportedseclevels; + u32_t notintimewindows; + u32_t unknownusernames; + u32_t unknownengineids; + u32_t wrongdigests; + u32_t decryptionerrors; +#endif +}; + +extern struct snmp_statistics snmp_stats; + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_SNMP */ + +#endif /* LWIP_HDR_APPS_SNMP_CORE_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_mib2.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_mib2.h new file mode 100644 index 0000000..2f4a689 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_mib2.h @@ -0,0 +1,78 @@ +/** + * @file + * SNMP MIB2 API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Dirk Ziegelmeier + * + */ +#ifndef LWIP_HDR_APPS_SNMP_MIB2_H +#define LWIP_HDR_APPS_SNMP_MIB2_H + +#include "lwip/apps/snmp_opts.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ +#if SNMP_LWIP_MIB2 + +#include "lwip/apps/snmp_core.h" + +extern const struct snmp_mib mib2; + +#if SNMP_USE_NETCONN +#include "lwip/apps/snmp_threadsync.h" +void snmp_mib2_lwip_synchronizer(snmp_threadsync_called_fn fn, void* arg); +extern struct snmp_threadsync_instance snmp_mib2_lwip_locks; +#endif + +#ifndef SNMP_SYSSERVICES +#define SNMP_SYSSERVICES ((1 << 6) | (1 << 3) | ((IP_FORWARD) << 2)) +#endif + +void snmp_mib2_set_sysdescr(const u8_t* str, const u16_t* len); /* read-only be defintion */ +void snmp_mib2_set_syscontact(u8_t *ocstr, u16_t *ocstrlen, u16_t bufsize); +void snmp_mib2_set_syscontact_readonly(const u8_t *ocstr, const u16_t *ocstrlen); +void snmp_mib2_set_sysname(u8_t *ocstr, u16_t *ocstrlen, u16_t bufsize); +void snmp_mib2_set_sysname_readonly(const u8_t *ocstr, const u16_t *ocstrlen); +void snmp_mib2_set_syslocation(u8_t *ocstr, u16_t *ocstrlen, u16_t bufsize); +void snmp_mib2_set_syslocation_readonly(const u8_t *ocstr, const u16_t *ocstrlen); + +#endif /* SNMP_LWIP_MIB2 */ +#endif /* LWIP_SNMP */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SNMP_MIB2_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_opts.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_opts.h new file mode 100644 index 0000000..c892d22 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_opts.h @@ -0,0 +1,297 @@ +/** + * @file + * SNMP server options list + */ + +/* + * Copyright (c) 2015 Dirk Ziegelmeier + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Dirk Ziegelmeier + * + */ +#ifndef LWIP_HDR_SNMP_OPTS_H +#define LWIP_HDR_SNMP_OPTS_H + +#include "lwip/opt.h" + +/** + * @defgroup snmp_opts Options + * @ingroup snmp + * @{ + */ + +/** + * LWIP_SNMP==1: This enables the lwIP SNMP agent. UDP must be available + * for SNMP transport. + * If you want to use your own SNMP agent, leave this disabled. + * To integrate MIB2 of an external agent, you need to enable + * LWIP_MIB2_CALLBACKS and MIB2_STATS. This will give you the callbacks + * and statistics counters you need to get MIB2 working. + */ +#if !defined LWIP_SNMP || defined __DOXYGEN__ +#define LWIP_SNMP 0 +#endif + +/** + * SNMP_USE_NETCONN: Use netconn API instead of raw API. + * Makes SNMP agent run in a worker thread, so blocking operations + * can be done in MIB calls. + */ +#if !defined SNMP_USE_NETCONN || defined __DOXYGEN__ +#define SNMP_USE_NETCONN 0 +#endif + +/** + * SNMP_USE_RAW: Use raw API. + * SNMP agent does not run in a worker thread, so blocking operations + * should not be done in MIB calls. + */ +#if !defined SNMP_USE_RAW || defined __DOXYGEN__ +#define SNMP_USE_RAW 1 +#endif + +#if SNMP_USE_NETCONN && SNMP_USE_RAW +#error SNMP stack can use only one of the APIs {raw, netconn} +#endif + +#if LWIP_SNMP && !SNMP_USE_NETCONN && !SNMP_USE_RAW +#error SNMP stack needs a receive API and UDP {raw, netconn} +#endif + +#if SNMP_USE_NETCONN +/** + * SNMP_STACK_SIZE: Stack size of SNMP netconn worker thread + */ +#if !defined SNMP_STACK_SIZE || defined __DOXYGEN__ +#define SNMP_STACK_SIZE DEFAULT_THREAD_STACKSIZE +#endif + +/** + * SNMP_THREAD_PRIO: SNMP netconn worker thread priority + */ +#if !defined SNMP_THREAD_PRIO || defined __DOXYGEN__ +#define SNMP_THREAD_PRIO DEFAULT_THREAD_PRIO +#endif +#endif /* SNMP_USE_NETCONN */ + +/** + * SNMP_TRAP_DESTINATIONS: Number of trap destinations. At least one trap + * destination is required + */ +#if !defined SNMP_TRAP_DESTINATIONS || defined __DOXYGEN__ +#define SNMP_TRAP_DESTINATIONS 1 +#endif + +/** + * Only allow SNMP write actions that are 'safe' (e.g. disabling netifs is not + * a safe action and disabled when SNMP_SAFE_REQUESTS = 1). + * Unsafe requests are disabled by default! + */ +#if !defined SNMP_SAFE_REQUESTS || defined __DOXYGEN__ +#define SNMP_SAFE_REQUESTS 1 +#endif + +/** + * The maximum length of strings used. + */ +#if !defined SNMP_MAX_OCTET_STRING_LEN || defined __DOXYGEN__ +#define SNMP_MAX_OCTET_STRING_LEN 127 +#endif + +/** + * The maximum number of Sub ID's inside an object identifier. + * Indirectly this also limits the maximum depth of SNMP tree. + */ +#if !defined SNMP_MAX_OBJ_ID_LEN || defined __DOXYGEN__ +#define SNMP_MAX_OBJ_ID_LEN 50 +#endif + +#if !defined SNMP_MAX_VALUE_SIZE || defined __DOXYGEN__ +/** + * The minimum size of a value. + */ +#define SNMP_MIN_VALUE_SIZE (2 * sizeof(u32_t*)) /* size required to store the basic types (8 bytes for counter64) */ +/** + * The maximum size of a value. + */ +#define SNMP_MAX_VALUE_SIZE LWIP_MAX(LWIP_MAX((SNMP_MAX_OCTET_STRING_LEN), sizeof(u32_t)*(SNMP_MAX_OBJ_ID_LEN)), SNMP_MIN_VALUE_SIZE) +#endif + +/** + * The snmp read-access community. Used for write-access and traps, too + * unless SNMP_COMMUNITY_WRITE or SNMP_COMMUNITY_TRAP are enabled, respectively. + */ +#if !defined SNMP_COMMUNITY || defined __DOXYGEN__ +#define SNMP_COMMUNITY "public" +#endif + +/** + * The snmp write-access community. + * Set this community to "" in order to disallow any write access. + */ +#if !defined SNMP_COMMUNITY_WRITE || defined __DOXYGEN__ +#define SNMP_COMMUNITY_WRITE "private" +#endif + +/** + * The snmp community used for sending traps. + */ +#if !defined SNMP_COMMUNITY_TRAP || defined __DOXYGEN__ +#define SNMP_COMMUNITY_TRAP "public" +#endif + +/** + * The maximum length of community string. + * If community names shall be adjusted at runtime via snmp_set_community() calls, + * enter here the possible maximum length (+1 for terminating null character). + */ +#if !defined SNMP_MAX_COMMUNITY_STR_LEN || defined __DOXYGEN__ +#define SNMP_MAX_COMMUNITY_STR_LEN LWIP_MAX(LWIP_MAX(sizeof(SNMP_COMMUNITY), sizeof(SNMP_COMMUNITY_WRITE)), sizeof(SNMP_COMMUNITY_TRAP)) +#endif + +/** + * The OID identifiying the device. This may be the enterprise OID itself or any OID located below it in tree. + */ +#if !defined SNMP_DEVICE_ENTERPRISE_OID || defined __DOXYGEN__ +#define SNMP_LWIP_ENTERPRISE_OID 26381 +/** + * IANA assigned enterprise ID for lwIP is 26381 + * @see http://www.iana.org/assignments/enterprise-numbers + * + * @note this enterprise ID is assigned to the lwIP project, + * all object identifiers living under this ID are assigned + * by the lwIP maintainers! + * @note don't change this define, use snmp_set_device_enterprise_oid() + * + * If you need to create your own private MIB you'll need + * to apply for your own enterprise ID with IANA: + * http://www.iana.org/numbers.html + */ +#define SNMP_DEVICE_ENTERPRISE_OID {1, 3, 6, 1, 4, 1, SNMP_LWIP_ENTERPRISE_OID} +/** + * Length of SNMP_DEVICE_ENTERPRISE_OID + */ +#define SNMP_DEVICE_ENTERPRISE_OID_LEN 7 +#endif + +/** + * SNMP_DEBUG: Enable debugging for SNMP messages. + */ +#if !defined SNMP_DEBUG || defined __DOXYGEN__ +#define SNMP_DEBUG LWIP_DBG_OFF +#endif + +/** + * SNMP_MIB_DEBUG: Enable debugging for SNMP MIBs. + */ +#if !defined SNMP_MIB_DEBUG || defined __DOXYGEN__ +#define SNMP_MIB_DEBUG LWIP_DBG_OFF +#endif + +/** + * Indicates if the MIB2 implementation of LWIP SNMP stack is used. + */ +#if !defined SNMP_LWIP_MIB2 || defined __DOXYGEN__ +#define SNMP_LWIP_MIB2 LWIP_SNMP +#endif + +/** + * Value return for sysDesc field of MIB2. + */ +#if !defined SNMP_LWIP_MIB2_SYSDESC || defined __DOXYGEN__ +#define SNMP_LWIP_MIB2_SYSDESC "lwIP" +#endif + +/** + * Value return for sysName field of MIB2. + * To make sysName field settable, call snmp_mib2_set_sysname() to provide the necessary buffers. + */ +#if !defined SNMP_LWIP_MIB2_SYSNAME || defined __DOXYGEN__ +#define SNMP_LWIP_MIB2_SYSNAME "FQDN-unk" +#endif + +/** + * Value return for sysContact field of MIB2. + * To make sysContact field settable, call snmp_mib2_set_syscontact() to provide the necessary buffers. + */ +#if !defined SNMP_LWIP_MIB2_SYSCONTACT || defined __DOXYGEN__ +#define SNMP_LWIP_MIB2_SYSCONTACT "" +#endif + +/** + * Value return for sysLocation field of MIB2. + * To make sysLocation field settable, call snmp_mib2_set_syslocation() to provide the necessary buffers. + */ +#if !defined SNMP_LWIP_MIB2_SYSLOCATION || defined __DOXYGEN__ +#define SNMP_LWIP_MIB2_SYSLOCATION "" +#endif + +/** + * This value is used to limit the repetitions processed in GetBulk requests (value == 0 means no limitation). + * This may be useful to limit the load for a single request. + * According to SNMP RFC 1905 it is allowed to not return all requested variables from a GetBulk request if system load would be too high. + * so the effect is that the client will do more requests to gather all data. + * For the stack this could be useful in case that SNMP processing is done in TCP/IP thread. In this situation a request with many + * repetitions could block the thread for a longer time. Setting limit here will keep the stack more responsive. + */ +#if !defined SNMP_LWIP_GETBULK_MAX_REPETITIONS || defined __DOXYGEN__ +#define SNMP_LWIP_GETBULK_MAX_REPETITIONS 0 +#endif + +/** + * @} + */ + +/* + ------------------------------------ + ---------- SNMPv3 options ---------- + ------------------------------------ +*/ + +/** + * LWIP_SNMP_V3==1: This enables EXPERIMENTAL SNMPv3 support. LWIP_SNMP must + * also be enabled. + * THIS IS UNDER DEVELOPMENT AND SHOULD NOT BE ENABLED IN PRODUCTS. + */ +#ifndef LWIP_SNMP_V3 +#define LWIP_SNMP_V3 0 +#endif + +#ifndef LWIP_SNMP_V3_MBEDTLS +#define LWIP_SNMP_V3_MBEDTLS LWIP_SNMP_V3 +#endif + +#ifndef LWIP_SNMP_V3_CRYPTO +#define LWIP_SNMP_V3_CRYPTO LWIP_SNMP_V3_MBEDTLS +#endif + +#ifndef LWIP_SNMP_CONFIGURE_VERSIONS +#define LWIP_SNMP_CONFIGURE_VERSIONS 0 +#endif + +#endif /* LWIP_HDR_SNMP_OPTS_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_scalar.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_scalar.h new file mode 100644 index 0000000..40a060c --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_scalar.h @@ -0,0 +1,113 @@ +/** + * @file + * SNMP server MIB API to implement scalar nodes + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Martin Hentschel + * + */ + +#ifndef LWIP_HDR_APPS_SNMP_SCALAR_H +#define LWIP_HDR_APPS_SNMP_SCALAR_H + +#include "lwip/apps/snmp_opts.h" +#include "lwip/apps/snmp_core.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ + +/** basic scalar node */ +struct snmp_scalar_node +{ + /** inherited "base class" members */ + struct snmp_leaf_node node; + u8_t asn1_type; + snmp_access_t access; + node_instance_get_value_method get_value; + node_instance_set_test_method set_test; + node_instance_set_value_method set_value; +}; + + +snmp_err_t snmp_scalar_get_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); +snmp_err_t snmp_scalar_get_next_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); + +#define SNMP_SCALAR_CREATE_NODE(oid, access, asn1_type, get_value_method, set_test_method, set_value_method) \ + {{{ SNMP_NODE_SCALAR, (oid) }, \ + snmp_scalar_get_instance, \ + snmp_scalar_get_next_instance }, \ + (asn1_type), (access), (get_value_method), (set_test_method), (set_value_method) } + +#define SNMP_SCALAR_CREATE_NODE_READONLY(oid, asn1_type, get_value_method) SNMP_SCALAR_CREATE_NODE(oid, SNMP_NODE_INSTANCE_READ_ONLY, asn1_type, get_value_method, NULL, NULL) + +/** scalar array node - a tree node which contains scalars only as children */ +struct snmp_scalar_array_node_def +{ + u32_t oid; + u8_t asn1_type; + snmp_access_t access; +}; + +typedef s16_t (*snmp_scalar_array_get_value_method)(const struct snmp_scalar_array_node_def*, void*); +typedef snmp_err_t (*snmp_scalar_array_set_test_method)(const struct snmp_scalar_array_node_def*, u16_t, void*); +typedef snmp_err_t (*snmp_scalar_array_set_value_method)(const struct snmp_scalar_array_node_def*, u16_t, void*); + +/** basic scalar array node */ +struct snmp_scalar_array_node +{ + /** inherited "base class" members */ + struct snmp_leaf_node node; + u16_t array_node_count; + const struct snmp_scalar_array_node_def* array_nodes; + snmp_scalar_array_get_value_method get_value; + snmp_scalar_array_set_test_method set_test; + snmp_scalar_array_set_value_method set_value; +}; + +snmp_err_t snmp_scalar_array_get_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); +snmp_err_t snmp_scalar_array_get_next_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); + +#define SNMP_SCALAR_CREATE_ARRAY_NODE(oid, array_nodes, get_value_method, set_test_method, set_value_method) \ + {{{ SNMP_NODE_SCALAR_ARRAY, (oid) }, \ + snmp_scalar_array_get_instance, \ + snmp_scalar_array_get_next_instance }, \ + (u16_t)LWIP_ARRAYSIZE(array_nodes), (array_nodes), (get_value_method), (set_test_method), (set_value_method) } + +#endif /* LWIP_SNMP */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SNMP_SCALAR_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_snmpv2_framework.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_snmpv2_framework.h new file mode 100644 index 0000000..47409cc --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_snmpv2_framework.h @@ -0,0 +1,32 @@ +/* +Generated by LwipMibCompiler +*/ + +#ifndef LWIP_HDR_APPS_SNMP_FRAMEWORK_MIB_H +#define LWIP_HDR_APPS_SNMP_FRAMEWORK_MIB_H + +#include "lwip/apps/snmp_opts.h" +#if LWIP_SNMP + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#include "lwip/apps/snmp_core.h" + +extern const struct snmp_obj_id usmNoAuthProtocol; +extern const struct snmp_obj_id usmHMACMD5AuthProtocol; +extern const struct snmp_obj_id usmHMACSHAAuthProtocol; + +extern const struct snmp_obj_id usmNoPrivProtocol; +extern const struct snmp_obj_id usmDESPrivProtocol; +extern const struct snmp_obj_id usmAESPrivProtocol; + +extern const struct snmp_mib snmpframeworkmib; + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* LWIP_SNMP */ +#endif /* LWIP_HDR_APPS_SNMP_FRAMEWORK_MIB_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_snmpv2_usm.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_snmpv2_usm.h new file mode 100644 index 0000000..88cfcd8 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_snmpv2_usm.h @@ -0,0 +1,24 @@ +/* +Generated by LwipMibCompiler +*/ + +#ifndef LWIP_HDR_APPS_SNMP_USER_BASED_SM_MIB_H +#define LWIP_HDR_APPS_SNMP_USER_BASED_SM_MIB_H + +#include "lwip/apps/snmp_opts.h" +#if LWIP_SNMP + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#include "lwip/apps/snmp_core.h" + +extern const struct snmp_mib snmpusmmib; + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* LWIP_SNMP */ +#endif /* LWIP_HDR_APPS_SNMP_USER_BASED_SM_MIB_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_table.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_table.h new file mode 100644 index 0000000..4988b51 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_table.h @@ -0,0 +1,134 @@ +/** + * @file + * SNMP server MIB API to implement table nodes + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Martin Hentschel + * + */ + +#ifndef LWIP_HDR_APPS_SNMP_TABLE_H +#define LWIP_HDR_APPS_SNMP_TABLE_H + +#include "lwip/apps/snmp_opts.h" +#include "lwip/apps/snmp_core.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ + +/** default (customizable) read/write table */ +struct snmp_table_col_def +{ + u32_t index; + u8_t asn1_type; + snmp_access_t access; +}; + +/** table node */ +struct snmp_table_node +{ + /** inherited "base class" members */ + struct snmp_leaf_node node; + u16_t column_count; + const struct snmp_table_col_def* columns; + snmp_err_t (*get_cell_instance)(const u32_t* column, const u32_t* row_oid, u8_t row_oid_len, struct snmp_node_instance* cell_instance); + snmp_err_t (*get_next_cell_instance)(const u32_t* column, struct snmp_obj_id* row_oid, struct snmp_node_instance* cell_instance); + /** returns object value for the given object identifier */ + node_instance_get_value_method get_value; + /** tests length and/or range BEFORE setting */ + node_instance_set_test_method set_test; + /** sets object value, only called when set_test() was successful */ + node_instance_set_value_method set_value; +}; + +snmp_err_t snmp_table_get_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); +snmp_err_t snmp_table_get_next_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); + +#define SNMP_TABLE_CREATE(oid, columns, get_cell_instance_method, get_next_cell_instance_method, get_value_method, set_test_method, set_value_method) \ + {{{ SNMP_NODE_TABLE, (oid) }, \ + snmp_table_get_instance, \ + snmp_table_get_next_instance }, \ + (u16_t)LWIP_ARRAYSIZE(columns), (columns), \ + (get_cell_instance_method), (get_next_cell_instance_method), \ + (get_value_method), (set_test_method), (set_value_method)} + +#define SNMP_TABLE_GET_COLUMN_FROM_OID(oid) ((oid)[1]) /* first array value is (fixed) row entry (fixed to 1) and 2nd value is column, follow3ed by instance */ + + +/** simple read-only table */ +typedef enum { + SNMP_VARIANT_VALUE_TYPE_U32, + SNMP_VARIANT_VALUE_TYPE_S32, + SNMP_VARIANT_VALUE_TYPE_PTR, + SNMP_VARIANT_VALUE_TYPE_CONST_PTR +} snmp_table_column_data_type_t; + +struct snmp_table_simple_col_def +{ + u32_t index; + u8_t asn1_type; + snmp_table_column_data_type_t data_type; /* depending of what union member is used to store the value*/ +}; + +/** simple read-only table node */ +struct snmp_table_simple_node +{ + /* inherited "base class" members */ + struct snmp_leaf_node node; + u16_t column_count; + const struct snmp_table_simple_col_def* columns; + snmp_err_t (*get_cell_value)(const u32_t* column, const u32_t* row_oid, u8_t row_oid_len, union snmp_variant_value* value, u32_t* value_len); + snmp_err_t (*get_next_cell_instance_and_value)(const u32_t* column, struct snmp_obj_id* row_oid, union snmp_variant_value* value, u32_t* value_len); +}; + +snmp_err_t snmp_table_simple_get_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); +snmp_err_t snmp_table_simple_get_next_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); + +#define SNMP_TABLE_CREATE_SIMPLE(oid, columns, get_cell_value_method, get_next_cell_instance_and_value_method) \ + {{{ SNMP_NODE_TABLE, (oid) }, \ + snmp_table_simple_get_instance, \ + snmp_table_simple_get_next_instance }, \ + (u16_t)LWIP_ARRAYSIZE(columns), (columns), (get_cell_value_method), (get_next_cell_instance_and_value_method) } + +s16_t snmp_table_extract_value_from_s32ref(struct snmp_node_instance* instance, void* value); +s16_t snmp_table_extract_value_from_u32ref(struct snmp_node_instance* instance, void* value); +s16_t snmp_table_extract_value_from_refconstptr(struct snmp_node_instance* instance, void* value); + +#endif /* LWIP_SNMP */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SNMP_TABLE_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_threadsync.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_threadsync.h new file mode 100644 index 0000000..a25dbf2 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_threadsync.h @@ -0,0 +1,114 @@ +/** + * @file + * SNMP server MIB API to implement thread synchronization + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Dirk Ziegelmeier + * + */ + +#ifndef LWIP_HDR_APPS_SNMP_THREADSYNC_H +#define LWIP_HDR_APPS_SNMP_THREADSYNC_H + +#include "lwip/apps/snmp_opts.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/apps/snmp_core.h" +#include "lwip/sys.h" + +typedef void (*snmp_threadsync_called_fn)(void* arg); +typedef void (*snmp_threadsync_synchronizer_fn)(snmp_threadsync_called_fn fn, void* arg); + + +/** Thread sync runtime data. For internal usage only. */ +struct threadsync_data +{ + union { + snmp_err_t err; + s16_t s16; + } retval; + union { + const u32_t *root_oid; + void *value; + } arg1; + union { + u8_t root_oid_len; + u16_t len; + } arg2; + const struct snmp_threadsync_node *threadsync_node; + struct snmp_node_instance proxy_instance; +}; + +/** Thread sync instance. Needed EXCATLY once for every thread to be synced into. */ +struct snmp_threadsync_instance +{ + sys_sem_t sem; + sys_mutex_t sem_usage_mutex; + snmp_threadsync_synchronizer_fn sync_fn; + struct threadsync_data data; +}; + +/** SNMP thread sync proxy leaf node */ +struct snmp_threadsync_node +{ + /* inherited "base class" members */ + struct snmp_leaf_node node; + + const struct snmp_leaf_node *target; + struct snmp_threadsync_instance *instance; +}; + +snmp_err_t snmp_threadsync_get_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); +snmp_err_t snmp_threadsync_get_next_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); + +/** Create thread sync proxy node */ +#define SNMP_CREATE_THREAD_SYNC_NODE(oid, target_leaf_node, threadsync_instance) \ + {{{ SNMP_NODE_THREADSYNC, (oid) }, \ + snmp_threadsync_get_instance, \ + snmp_threadsync_get_next_instance }, \ + (target_leaf_node), \ + (threadsync_instance) } + +/** Create thread sync instance data */ +void snmp_threadsync_init(struct snmp_threadsync_instance *instance, snmp_threadsync_synchronizer_fn sync_fn); + +#endif /* LWIP_SNMP */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SNMP_THREADSYNC_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmpv3.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmpv3.h new file mode 100644 index 0000000..e0dda64 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmpv3.h @@ -0,0 +1,114 @@ +/** + * @file + * Additional SNMPv3 functionality RFC3414 and RFC3826. + */ + +/* + * Copyright (c) 2016 Elias Oenal. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * Author: Elias Oenal + */ + +#ifndef LWIP_HDR_APPS_SNMP_V3_H +#define LWIP_HDR_APPS_SNMP_V3_H + +#include "lwip/apps/snmp_opts.h" +#include "lwip/err.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_SNMP && LWIP_SNMP_V3 + +typedef enum +{ + SNMP_V3_AUTH_ALGO_INVAL = 0, + SNMP_V3_AUTH_ALGO_MD5 = 1, + SNMP_V3_AUTH_ALGO_SHA = 2 +} snmpv3_auth_algo_t; + +typedef enum +{ + SNMP_V3_PRIV_ALGO_INVAL = 0, + SNMP_V3_PRIV_ALGO_DES = 1, + SNMP_V3_PRIV_ALGO_AES = 2 +} snmpv3_priv_algo_t; + +typedef enum +{ + SNMP_V3_USER_STORAGETYPE_OTHER = 1, + SNMP_V3_USER_STORAGETYPE_VOLATILE = 2, + SNMP_V3_USER_STORAGETYPE_NONVOLATILE = 3, + SNMP_V3_USER_STORAGETYPE_PERMANENT = 4, + SNMP_V3_USER_STORAGETYPE_READONLY = 5 +} snmpv3_user_storagetype_t; + +/* + * The following callback functions must be implemented by the application. + * There is a dummy implementation in snmpv3_dummy.c. + */ + +void snmpv3_get_engine_id(const char **id, u8_t *len); +err_t snmpv3_set_engine_id(const char* id, u8_t len); + +u32_t snmpv3_get_engine_boots(void); +void snmpv3_set_engine_boots(u32_t boots); + +u32_t snmpv3_get_engine_time(void); +void snmpv3_reset_engine_time(void); + +err_t snmpv3_get_user(const char* username, snmpv3_auth_algo_t *auth_algo, u8_t *auth_key, snmpv3_priv_algo_t *priv_algo, u8_t *priv_key); +u8_t snmpv3_get_amount_of_users(void); +err_t snmpv3_get_user_storagetype(const char *username, snmpv3_user_storagetype_t *storagetype); +err_t snmpv3_get_username(char *username, u8_t index); + +/* The following functions are provided by the SNMPv3 agent */ + +void snmpv3_engine_id_changed(void); +s32_t snmpv3_get_engine_time_internal(void); + +void snmpv3_password_to_key_md5( + const u8_t *password, /* IN */ + size_t passwordlen, /* IN */ + const u8_t *engineID, /* IN - pointer to snmpEngineID */ + u8_t engineLength, /* IN - length of snmpEngineID */ + u8_t *key); /* OUT - pointer to caller 16-octet buffer */ + +void snmpv3_password_to_key_sha( + const u8_t *password, /* IN */ + size_t passwordlen, /* IN */ + const u8_t *engineID, /* IN - pointer to snmpEngineID */ + u8_t engineLength, /* IN - length of snmpEngineID */ + u8_t *key); /* OUT - pointer to caller 20-octet buffer */ + +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SNMP_V3_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/sntp.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/sntp.h new file mode 100644 index 0000000..3c0f95f --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/sntp.h @@ -0,0 +1,80 @@ +/** + * @file + * SNTP client API + */ + +/* + * Copyright (c) 2007-2009 Frédéric Bernon, Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Frédéric Bernon, Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_APPS_SNTP_H +#define LWIP_HDR_APPS_SNTP_H + +#include "lwip/apps/sntp_opts.h" +#include "lwip/ip_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* SNTP operating modes: default is to poll using unicast. + The mode has to be set before calling sntp_init(). */ +#define SNTP_OPMODE_POLL 0 +#define SNTP_OPMODE_LISTENONLY 1 +void sntp_setoperatingmode(u8_t operating_mode); +u8_t sntp_getoperatingmode(void); + +void sntp_init(void); +void sntp_stop(void); +u8_t sntp_enabled(void); + +void sntp_setserver(u8_t idx, const ip_addr_t *addr); +const ip_addr_t* sntp_getserver(u8_t idx); + +#if SNTP_MONITOR_SERVER_REACHABILITY +u8_t sntp_getreachability(u8_t idx); +#endif /* SNTP_MONITOR_SERVER_REACHABILITY */ + +#if SNTP_SERVER_DNS +void sntp_setservername(u8_t idx, const char *server); +const char *sntp_getservername(u8_t idx); +#endif /* SNTP_SERVER_DNS */ + +#if SNTP_GET_SERVERS_FROM_DHCP +void sntp_servermode_dhcp(int set_servers_from_dhcp); +#else /* SNTP_GET_SERVERS_FROM_DHCP */ +#define sntp_servermode_dhcp(x) +#endif /* SNTP_GET_SERVERS_FROM_DHCP */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SNTP_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/sntp_opts.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/sntp_opts.h new file mode 100644 index 0000000..ed98040 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/sntp_opts.h @@ -0,0 +1,209 @@ +/** + * @file + * SNTP client options list + */ + +/* + * Copyright (c) 2007-2009 Frédéric Bernon, Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Frédéric Bernon, Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_APPS_SNTP_OPTS_H +#define LWIP_HDR_APPS_SNTP_OPTS_H + +#include "lwip/opt.h" +#include "lwip/prot/iana.h" + +/** + * @defgroup sntp_opts Options + * @ingroup sntp + * @{ + */ + +/** SNTP macro to change system time in seconds + * Define SNTP_SET_SYSTEM_TIME_US(sec, us) to set the time in microseconds + * instead of this one if you need the additional precision. Alternatively, + * define SNTP_SET_SYSTEM_TIME_NTP(sec, frac) in order to work with native + * NTP timestamps instead. + */ +#if !defined SNTP_SET_SYSTEM_TIME || defined __DOXYGEN__ +#define SNTP_SET_SYSTEM_TIME(sec) LWIP_UNUSED_ARG(sec) +#endif + +/** The maximum number of SNTP servers that can be set */ +#if !defined SNTP_MAX_SERVERS || defined __DOXYGEN__ +#define SNTP_MAX_SERVERS LWIP_DHCP_MAX_NTP_SERVERS +#endif + +/** Set this to 1 to implement the callback function called by dhcp when + * NTP servers are received. */ +#if !defined SNTP_GET_SERVERS_FROM_DHCP || defined __DOXYGEN__ +#define SNTP_GET_SERVERS_FROM_DHCP LWIP_DHCP_GET_NTP_SRV +#endif + +/** Set this to 1 to support DNS names (or IP address strings) to set sntp servers + * One server address/name can be defined as default if SNTP_SERVER_DNS == 1: + * \#define SNTP_SERVER_ADDRESS "pool.ntp.org" + */ +#if !defined SNTP_SERVER_DNS || defined __DOXYGEN__ +#define SNTP_SERVER_DNS 0 +#endif + +/** + * SNTP_DEBUG: Enable debugging for SNTP. + */ +#if !defined SNTP_DEBUG || defined __DOXYGEN__ +#define SNTP_DEBUG LWIP_DBG_OFF +#endif + +/** SNTP server port */ +#if !defined SNTP_PORT || defined __DOXYGEN__ +#define SNTP_PORT LWIP_IANA_PORT_SNTP +#endif + +/** Sanity check: + * Define this to + * - 0 to turn off sanity checks (default; smaller code) + * - >= 1 to check address and port of the response packet to ensure the + * response comes from the server we sent the request to. + * - >= 2 to check returned Originate Timestamp against Transmit Timestamp + * sent to the server (to ensure response to older request). + * - >= 3 @todo: discard reply if any of the VN, Stratum, or Transmit Timestamp + * fields is 0 or the Mode field is not 4 (unicast) or 5 (broadcast). + * - >= 4 @todo: to check that the Root Delay and Root Dispersion fields are each + * greater than or equal to 0 and less than infinity, where infinity is + * currently a cozy number like one second. This check avoids using a + * server whose synchronization source has expired for a very long time. + */ +#if !defined SNTP_CHECK_RESPONSE || defined __DOXYGEN__ +#define SNTP_CHECK_RESPONSE 0 +#endif + +/** Enable round-trip delay compensation. + * Compensate for the round-trip delay by calculating the clock offset from + * the originate, receive, transmit and destination timestamps, as per RFC. + * + * The calculation requires compiler support for 64-bit integers. Also, either + * SNTP_SET_SYSTEM_TIME_US or SNTP_SET_SYSTEM_TIME_NTP has to be implemented + * for setting the system clock with sub-second precision. Likewise, either + * SNTP_GET_SYSTEM_TIME or SNTP_GET_SYSTEM_TIME_NTP needs to be implemented + * with sub-second precision. + * + * Although not strictly required, it makes sense to combine this option with + * SNTP_CHECK_RESPONSE >= 2 for sanity-checking of the received timestamps. + * Also, in order for the round-trip calculation to work, the difference + * between the local clock and the NTP server clock must not be larger than + * about 34 years. If that limit is exceeded, the implementation will fall back + * to setting the clock without compensation. In order to ensure that the local + * clock is always within the permitted range for compensation, even at first + * try, it may be necessary to store at least the current year in non-volatile + * memory. + */ +#if !defined SNTP_COMP_ROUNDTRIP || defined __DOXYGEN__ +#define SNTP_COMP_ROUNDTRIP 0 +#endif + +/** According to the RFC, this shall be a random delay + * between 1 and 5 minutes (in milliseconds) to prevent load peaks. + * This can be defined to a random generation function, + * which must return the delay in milliseconds as u32_t. + * Turned off by default. + */ +#if !defined SNTP_STARTUP_DELAY || defined __DOXYGEN__ +#ifdef LWIP_RAND +#define SNTP_STARTUP_DELAY 1 +#else +#define SNTP_STARTUP_DELAY 0 +#endif +#endif + +/** If you want the startup delay to be a function, define this + * to a function (including the brackets) and define SNTP_STARTUP_DELAY to 1. + */ +#if !defined SNTP_STARTUP_DELAY_FUNC || defined __DOXYGEN__ +#define SNTP_STARTUP_DELAY_FUNC (LWIP_RAND() % 5000) +#endif + +/** SNTP receive timeout - in milliseconds + * Also used as retry timeout - this shouldn't be too low. + * Default is 15 seconds. Must not be beolw 15 seconds by specification (i.e. 15000) + */ +#if !defined SNTP_RECV_TIMEOUT || defined __DOXYGEN__ +#define SNTP_RECV_TIMEOUT 15000 +#endif + +/** SNTP update delay - in milliseconds + * Default is 1 hour. Must not be beolw 60 seconds by specification (i.e. 60000) + */ +#if !defined SNTP_UPDATE_DELAY || defined __DOXYGEN__ +#define SNTP_UPDATE_DELAY 3600000 +#endif + +/** SNTP macro to get system time, used with SNTP_CHECK_RESPONSE >= 2 + * to send in request and compare in response. Also used for round-trip + * delay compensation if SNTP_COMP_ROUNDTRIP != 0. + * Alternatively, define SNTP_GET_SYSTEM_TIME_NTP(sec, frac) in order to + * work with native NTP timestamps instead. + */ +#if !defined SNTP_GET_SYSTEM_TIME || defined __DOXYGEN__ +#define SNTP_GET_SYSTEM_TIME(sec, us) do { (sec) = 0; (us) = 0; } while(0) +#endif + +/** Default retry timeout (in milliseconds) if the response + * received is invalid. + * This is doubled with each retry until SNTP_RETRY_TIMEOUT_MAX is reached. + */ +#if !defined SNTP_RETRY_TIMEOUT || defined __DOXYGEN__ +#define SNTP_RETRY_TIMEOUT SNTP_RECV_TIMEOUT +#endif + +/** Maximum retry timeout (in milliseconds). */ +#if !defined SNTP_RETRY_TIMEOUT_MAX || defined __DOXYGEN__ +#define SNTP_RETRY_TIMEOUT_MAX (SNTP_RETRY_TIMEOUT * 10) +#endif + +/** Increase retry timeout with every retry sent + * Default is on to conform to RFC. + */ +#if !defined SNTP_RETRY_TIMEOUT_EXP || defined __DOXYGEN__ +#define SNTP_RETRY_TIMEOUT_EXP 1 +#endif + +/** Keep a reachability shift register per server + * Default is on to conform to RFC. + */ +#if !defined SNTP_MONITOR_SERVER_REACHABILITY || defined __DOXYGEN__ +#define SNTP_MONITOR_SERVER_REACHABILITY 1 +#endif + +/** + * @} + */ + +#endif /* LWIP_HDR_APPS_SNTP_OPTS_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/tftp_opts.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/tftp_opts.h new file mode 100644 index 0000000..198f632 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/tftp_opts.h @@ -0,0 +1,106 @@ +/** + * + * @file tftp_opts.h + * + * @author Logan Gunthorpe + * + * @brief Trivial File Transfer Protocol (RFC 1350) implementation options + * + * Copyright (c) Deltatee Enterprises Ltd. 2013 + * All rights reserved. + * + */ + +/* + * Redistribution and use in source and binary forms, with or without + * modification,are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO + * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Author: Logan Gunthorpe + * + */ + +#ifndef LWIP_HDR_APPS_TFTP_OPTS_H +#define LWIP_HDR_APPS_TFTP_OPTS_H + +#include "lwip/opt.h" +#include "lwip/prot/iana.h" + +/** + * @defgroup tftp_opts Options + * @ingroup tftp + * @{ + */ + +/** + * Enable TFTP debug messages + */ +#if !defined TFTP_DEBUG || defined __DOXYGEN__ +#define TFTP_DEBUG LWIP_DBG_OFF +#endif + +/** + * TFTP server port + */ +#if !defined TFTP_PORT || defined __DOXYGEN__ +#define TFTP_PORT LWIP_IANA_PORT_TFTP +#endif + +/** + * TFTP timeout + */ +#if !defined TFTP_TIMEOUT_MSECS || defined __DOXYGEN__ +#define TFTP_TIMEOUT_MSECS 10000 +#endif + +/** + * Max. number of retries when a file is read from server + */ +#if !defined TFTP_MAX_RETRIES || defined __DOXYGEN__ +#define TFTP_MAX_RETRIES 5 +#endif + +/** + * TFTP timer cyclic interval + */ +#if !defined TFTP_TIMER_MSECS || defined __DOXYGEN__ +#define TFTP_TIMER_MSECS (TFTP_TIMEOUT_MSECS / 10) +#endif + +/** + * Max. length of TFTP filename + */ +#if !defined TFTP_MAX_FILENAME_LEN || defined __DOXYGEN__ +#define TFTP_MAX_FILENAME_LEN 20 +#endif + +/** + * Max. length of TFTP mode + */ +#if !defined TFTP_MAX_MODE_LEN || defined __DOXYGEN__ +#define TFTP_MAX_MODE_LEN 7 +#endif + +/** + * @} + */ + +#endif /* LWIP_HDR_APPS_TFTP_OPTS_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/tftp_server.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/tftp_server.h new file mode 100644 index 0000000..0a7fbee --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/tftp_server.h @@ -0,0 +1,95 @@ +/** + * + * @file tftp_server.h + * + * @author Logan Gunthorpe + * + * @brief Trivial File Transfer Protocol (RFC 1350) + * + * Copyright (c) Deltatee Enterprises Ltd. 2013 + * All rights reserved. + * + */ + +/* + * Redistribution and use in source and binary forms, with or without + * modification,are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO + * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Author: Logan Gunthorpe + * + */ + +#ifndef LWIP_HDR_APPS_TFTP_SERVER_H +#define LWIP_HDR_APPS_TFTP_SERVER_H + +#include "lwip/apps/tftp_opts.h" +#include "lwip/err.h" +#include "lwip/pbuf.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** @ingroup tftp + * TFTP context containing callback functions for TFTP transfers + */ +struct tftp_context { + /** + * Open file for read/write. + * @param fname Filename + * @param mode Mode string from TFTP RFC 1350 (netascii, octet, mail) + * @param write Flag indicating read (0) or write (!= 0) access + * @returns File handle supplied to other functions + */ + void* (*open)(const char* fname, const char* mode, u8_t write); + /** + * Close file handle + * @param handle File handle returned by open() + */ + void (*close)(void* handle); + /** + * Read from file + * @param handle File handle returned by open() + * @param buf Target buffer to copy read data to + * @param bytes Number of bytes to copy to buf + * @returns >= 0: Success; < 0: Error + */ + int (*read)(void* handle, void* buf, int bytes); + /** + * Write to file + * @param handle File handle returned by open() + * @param pbuf PBUF adjusted such that payload pointer points + * to the beginning of write data. In other words, + * TFTP headers are stripped off. + * @returns >= 0: Success; < 0: Error + */ + int (*write)(void* handle, struct pbuf* p); +}; + +err_t tftp_init(const struct tftp_context* ctx); +void tftp_cleanup(void); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_TFTP_SERVER_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/arch.h b/Middlewares/Third_Party/LwIP/src/include/lwip/arch.h new file mode 100644 index 0000000..58dae33 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/arch.h @@ -0,0 +1,393 @@ +/** + * @file + * Support for different processor and compiler architectures + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_ARCH_H +#define LWIP_HDR_ARCH_H + +#ifndef LITTLE_ENDIAN +#define LITTLE_ENDIAN 1234 +#endif + +#ifndef BIG_ENDIAN +#define BIG_ENDIAN 4321 +#endif + +#include "arch/cc.h" + +/** + * @defgroup compiler_abstraction Compiler/platform abstraction + * @ingroup sys_layer + * All defines related to this section must not be placed in lwipopts.h, + * but in arch/cc.h! + * If the compiler does not provide memset() this file must include a + * definition of it, or include a file which defines it. + * These options cannot be \#defined in lwipopts.h since they are not options + * of lwIP itself, but options of the lwIP port to your system. + * @{ + */ + +/** Define the byte order of the system. + * Needed for conversion of network data to host byte order. + * Allowed values: LITTLE_ENDIAN and BIG_ENDIAN + */ +#ifndef BYTE_ORDER +#define BYTE_ORDER LITTLE_ENDIAN +#endif + +/** Define random number generator function of your system */ +#ifdef __DOXYGEN__ +#define LWIP_RAND() ((u32_t)rand()) +#endif + +/** Platform specific diagnostic output.\n + * Note the default implementation pulls in printf, which may + * in turn pull in a lot of standard libary code. In resource-constrained + * systems, this should be defined to something less resource-consuming. + */ +#ifndef LWIP_PLATFORM_DIAG +#define LWIP_PLATFORM_DIAG(x) do {printf x;} while(0) +#include +#include +#endif + +/** Platform specific assertion handling.\n + * Note the default implementation pulls in printf, fflush and abort, which may + * in turn pull in a lot of standard libary code. In resource-constrained + * systems, this should be defined to something less resource-consuming. + */ +#ifndef LWIP_PLATFORM_ASSERT +#define LWIP_PLATFORM_ASSERT(x) do {printf("Assertion \"%s\" failed at line %d in %s\n", \ + x, __LINE__, __FILE__); fflush(NULL); abort();} while(0) +#include +#include +#endif + +/** Define this to 1 in arch/cc.h of your port if you do not want to + * include stddef.h header to get size_t. You need to typedef size_t + * by yourself in this case. + */ +#ifndef LWIP_NO_STDDEF_H +#define LWIP_NO_STDDEF_H 0 +#endif + +#if !LWIP_NO_STDDEF_H +#include /* for size_t */ +#endif + +/** Define this to 1 in arch/cc.h of your port if your compiler does not provide + * the stdint.h header. You need to typedef the generic types listed in + * lwip/arch.h yourself in this case (u8_t, u16_t...). + */ +#ifndef LWIP_NO_STDINT_H +#define LWIP_NO_STDINT_H 0 +#endif + +/* Define generic types used in lwIP */ +#if !LWIP_NO_STDINT_H +#include +/* stdint.h is C99 which should also provide support for 64-bit integers */ +#if !defined(LWIP_HAVE_INT64) && defined(UINT64_MAX) +#define LWIP_HAVE_INT64 1 +#endif +typedef uint8_t u8_t; +typedef int8_t s8_t; +typedef uint16_t u16_t; +typedef int16_t s16_t; +typedef uint32_t u32_t; +typedef int32_t s32_t; +#if LWIP_HAVE_INT64 +typedef uint64_t u64_t; +typedef int64_t s64_t; +#endif +typedef uintptr_t mem_ptr_t; +#endif + +/** Define this to 1 in arch/cc.h of your port if your compiler does not provide + * the inttypes.h header. You need to define the format strings listed in + * lwip/arch.h yourself in this case (X8_F, U16_F...). + */ +#ifndef LWIP_NO_INTTYPES_H +#define LWIP_NO_INTTYPES_H 0 +#endif + +/* Define (sn)printf formatters for these lwIP types */ +#if !LWIP_NO_INTTYPES_H +#include +#ifndef X8_F +#define X8_F "02" PRIx8 +#endif +#ifndef U16_F +#define U16_F PRIu16 +#endif +#ifndef S16_F +#define S16_F PRId16 +#endif +#ifndef X16_F +#define X16_F PRIx16 +#endif +#ifndef U32_F +#define U32_F PRIu32 +#endif +#ifndef S32_F +#define S32_F PRId32 +#endif +#ifndef X32_F +#define X32_F PRIx32 +#endif +#ifndef SZT_F +#define SZT_F PRIuPTR +#endif +#endif + +/** Define this to 1 in arch/cc.h of your port if your compiler does not provide + * the limits.h header. You need to define the type limits yourself in this case + * (e.g. INT_MAX, SSIZE_MAX). + */ +#ifndef LWIP_NO_LIMITS_H +#define LWIP_NO_LIMITS_H 0 +#endif + +/* Include limits.h? */ +#if !LWIP_NO_LIMITS_H +#include +#endif + +/* Do we need to define ssize_t? This is a compatibility hack: + * Unfortunately, this type seems to be unavailable on some systems (even if + * sys/types or unistd.h are available). + * Being like that, we define it to 'int' if SSIZE_MAX is not defined. + */ +#ifdef SSIZE_MAX +/* If SSIZE_MAX is defined, unistd.h should provide the type as well */ +#ifndef LWIP_NO_UNISTD_H +#define LWIP_NO_UNISTD_H 0 +#endif +#if !LWIP_NO_UNISTD_H +#include +#endif +#else /* SSIZE_MAX */ +typedef int ssize_t; +#define SSIZE_MAX INT_MAX +#endif /* SSIZE_MAX */ + +/* some maximum values needed in lwip code */ +#define LWIP_UINT32_MAX 0xffffffff + +/** Define this to 1 in arch/cc.h of your port if your compiler does not provide + * the ctype.h header. If ctype.h is available, a few character functions + * are mapped to the appropriate functions (lwip_islower, lwip_isdigit...), if + * not, a private implementation is provided. + */ +#ifndef LWIP_NO_CTYPE_H +#define LWIP_NO_CTYPE_H 0 +#endif + +#if LWIP_NO_CTYPE_H +#define lwip_in_range(c, lo, up) ((u8_t)(c) >= (lo) && (u8_t)(c) <= (up)) +#define lwip_isdigit(c) lwip_in_range((c), '0', '9') +#define lwip_isxdigit(c) (lwip_isdigit(c) || lwip_in_range((c), 'a', 'f') || lwip_in_range((c), 'A', 'F')) +#define lwip_islower(c) lwip_in_range((c), 'a', 'z') +#define lwip_isspace(c) ((c) == ' ' || (c) == '\f' || (c) == '\n' || (c) == '\r' || (c) == '\t' || (c) == '\v') +#define lwip_isupper(c) lwip_in_range((c), 'A', 'Z') +#define lwip_tolower(c) (lwip_isupper(c) ? (c) - 'A' + 'a' : c) +#define lwip_toupper(c) (lwip_islower(c) ? (c) - 'a' + 'A' : c) +#else +#include +#define lwip_isdigit(c) isdigit((unsigned char)(c)) +#define lwip_isxdigit(c) isxdigit((unsigned char)(c)) +#define lwip_islower(c) islower((unsigned char)(c)) +#define lwip_isspace(c) isspace((unsigned char)(c)) +#define lwip_isupper(c) isupper((unsigned char)(c)) +#define lwip_tolower(c) tolower((unsigned char)(c)) +#define lwip_toupper(c) toupper((unsigned char)(c)) +#endif + +/** C++ const_cast(val) equivalent to remove constness from a value (GCC -Wcast-qual) */ +#ifndef LWIP_CONST_CAST +#define LWIP_CONST_CAST(target_type, val) ((target_type)((ptrdiff_t)val)) +#endif + +/** Get rid of alignment cast warnings (GCC -Wcast-align) */ +#ifndef LWIP_ALIGNMENT_CAST +#define LWIP_ALIGNMENT_CAST(target_type, val) LWIP_CONST_CAST(target_type, val) +#endif + +/** Get rid of warnings related to pointer-to-numeric and vice-versa casts, + * e.g. "conversion from 'u8_t' to 'void *' of greater size" + */ +#ifndef LWIP_PTR_NUMERIC_CAST +#define LWIP_PTR_NUMERIC_CAST(target_type, val) LWIP_CONST_CAST(target_type, val) +#endif + +/** Avoid warnings/errors related to implicitly casting away packed attributes by doing a explicit cast */ +#ifndef LWIP_PACKED_CAST +#define LWIP_PACKED_CAST(target_type, val) LWIP_CONST_CAST(target_type, val) +#endif + +/** Allocates a memory buffer of specified size that is of sufficient size to align + * its start address using LWIP_MEM_ALIGN. + * You can declare your own version here e.g. to enforce alignment without adding + * trailing padding bytes (see LWIP_MEM_ALIGN_BUFFER) or your own section placement + * requirements.\n + * e.g. if you use gcc and need 32 bit alignment:\n + * \#define LWIP_DECLARE_MEMORY_ALIGNED(variable_name, size) u8_t variable_name[size] \_\_attribute\_\_((aligned(4)))\n + * or more portable:\n + * \#define LWIP_DECLARE_MEMORY_ALIGNED(variable_name, size) u32_t variable_name[(size + sizeof(u32_t) - 1) / sizeof(u32_t)] + */ +#ifndef LWIP_DECLARE_MEMORY_ALIGNED +#define LWIP_DECLARE_MEMORY_ALIGNED(variable_name, size) u8_t variable_name[LWIP_MEM_ALIGN_BUFFER(size)] +#endif + +/** Calculate memory size for an aligned buffer - returns the next highest + * multiple of MEM_ALIGNMENT (e.g. LWIP_MEM_ALIGN_SIZE(3) and + * LWIP_MEM_ALIGN_SIZE(4) will both yield 4 for MEM_ALIGNMENT == 4). + */ +#ifndef LWIP_MEM_ALIGN_SIZE +#define LWIP_MEM_ALIGN_SIZE(size) (((size) + MEM_ALIGNMENT - 1U) & ~(MEM_ALIGNMENT-1U)) +#endif + +/** Calculate safe memory size for an aligned buffer when using an unaligned + * type as storage. This includes a safety-margin on (MEM_ALIGNMENT - 1) at the + * start (e.g. if buffer is u8_t[] and actual data will be u32_t*) + */ +#ifndef LWIP_MEM_ALIGN_BUFFER +#define LWIP_MEM_ALIGN_BUFFER(size) (((size) + MEM_ALIGNMENT - 1U)) +#endif + +/** Align a memory pointer to the alignment defined by MEM_ALIGNMENT + * so that ADDR % MEM_ALIGNMENT == 0 + */ +#ifndef LWIP_MEM_ALIGN +#define LWIP_MEM_ALIGN(addr) ((void *)(((mem_ptr_t)(addr) + MEM_ALIGNMENT - 1) & ~(mem_ptr_t)(MEM_ALIGNMENT-1))) +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/** Packed structs support. + * Placed BEFORE declaration of a packed struct.\n + * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n + * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. + */ +#ifndef PACK_STRUCT_BEGIN +#define PACK_STRUCT_BEGIN +#endif /* PACK_STRUCT_BEGIN */ + +/** Packed structs support. + * Placed AFTER declaration of a packed struct.\n + * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n + * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. + */ +#ifndef PACK_STRUCT_END +#define PACK_STRUCT_END +#endif /* PACK_STRUCT_END */ + +/** Packed structs support. + * Placed between end of declaration of a packed struct and trailing semicolon.\n + * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n + * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. + */ +#ifndef PACK_STRUCT_STRUCT +#if defined(__GNUC__) || defined(__clang__) +#define PACK_STRUCT_STRUCT __attribute__((packed)) +#else +#define PACK_STRUCT_STRUCT +#endif +#endif /* PACK_STRUCT_STRUCT */ + +/** Packed structs support. + * Wraps u32_t and u16_t members.\n + * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n + * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. + */ +#ifndef PACK_STRUCT_FIELD +#define PACK_STRUCT_FIELD(x) x +#endif /* PACK_STRUCT_FIELD */ + +/** Packed structs support. + * Wraps u8_t members, where some compilers warn that packing is not necessary.\n + * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n + * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. + */ +#ifndef PACK_STRUCT_FLD_8 +#define PACK_STRUCT_FLD_8(x) PACK_STRUCT_FIELD(x) +#endif /* PACK_STRUCT_FLD_8 */ + +/** Packed structs support. + * Wraps members that are packed structs themselves, where some compilers warn that packing is not necessary.\n + * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n + * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. + */ +#ifndef PACK_STRUCT_FLD_S +#define PACK_STRUCT_FLD_S(x) PACK_STRUCT_FIELD(x) +#endif /* PACK_STRUCT_FLD_S */ + +/** PACK_STRUCT_USE_INCLUDES==1: Packed structs support using \#include files before and after struct to be packed.\n + * The file included BEFORE the struct is "arch/bpstruct.h".\n + * The file included AFTER the struct is "arch/epstruct.h".\n + * This can be used to implement struct packing on MS Visual C compilers, see + * the Win32 port in the lwIP contrib repository for reference. + * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n + * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. + */ +#ifdef __DOXYGEN__ +#define PACK_STRUCT_USE_INCLUDES +#endif + +/** Eliminates compiler warning about unused arguments (GCC -Wextra -Wunused). */ +#ifndef LWIP_UNUSED_ARG +#define LWIP_UNUSED_ARG(x) (void)x +#endif /* LWIP_UNUSED_ARG */ + +/** LWIP_PROVIDE_ERRNO==1: Let lwIP provide ERRNO values and the 'errno' variable. + * If this is disabled, cc.h must either define 'errno', include , + * define LWIP_ERRNO_STDINCLUDE to get included or + * define LWIP_ERRNO_INCLUDE to or equivalent. + */ +#if defined __DOXYGEN__ +#define LWIP_PROVIDE_ERRNO +#endif + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_ARCH_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/autoip.h b/Middlewares/Third_Party/LwIP/src/include/lwip/autoip.h new file mode 100644 index 0000000..1d85bcc --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/autoip.h @@ -0,0 +1,99 @@ +/** + * @file + * + * AutoIP Automatic LinkLocal IP Configuration + */ + +/* + * + * Copyright (c) 2007 Dominik Spies + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * Author: Dominik Spies + * + * This is a AutoIP implementation for the lwIP TCP/IP stack. It aims to conform + * with RFC 3927. + * + */ + +#ifndef LWIP_HDR_AUTOIP_H +#define LWIP_HDR_AUTOIP_H + +#include "lwip/opt.h" + +#if LWIP_IPV4 && LWIP_AUTOIP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/netif.h" +/* #include "lwip/udp.h" */ +#include "lwip/etharp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** AutoIP Timing */ +#define AUTOIP_TMR_INTERVAL 100 +#define AUTOIP_TICKS_PER_SECOND (1000 / AUTOIP_TMR_INTERVAL) + +/** AutoIP state information per netif */ +struct autoip +{ + /** the currently selected, probed, announced or used LL IP-Address */ + ip4_addr_t llipaddr; + /** current AutoIP state machine state */ + u8_t state; + /** sent number of probes or announces, dependent on state */ + u8_t sent_num; + /** ticks to wait, tick is AUTOIP_TMR_INTERVAL long */ + u16_t ttw; + /** ticks until a conflict can be solved by defending */ + u8_t lastconflict; + /** total number of probed/used Link Local IP-Addresses */ + u8_t tried_llipaddr; +}; + + +void autoip_set_struct(struct netif *netif, struct autoip *autoip); +/** Remove a struct autoip previously set to the netif using autoip_set_struct() */ +#define autoip_remove_struct(netif) do { (netif)->autoip = NULL; } while (0) +err_t autoip_start(struct netif *netif); +err_t autoip_stop(struct netif *netif); +void autoip_arp_reply(struct netif *netif, struct etharp_hdr *hdr); +void autoip_tmr(void); +void autoip_network_changed(struct netif *netif); +u8_t autoip_supplied_address(const struct netif *netif); + +/* for lwIP internal use by ip4.c */ +u8_t autoip_accept_packet(struct netif *netif, const ip4_addr_t *addr); + +#define netif_autoip_data(netif) ((struct autoip*)netif_get_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_AUTOIP)) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV4 && LWIP_AUTOIP */ + +#endif /* LWIP_HDR_AUTOIP_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/debug.h b/Middlewares/Third_Party/LwIP/src/include/lwip/debug.h new file mode 100644 index 0000000..baa6a40 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/debug.h @@ -0,0 +1,161 @@ +/** + * @file + * Debug messages infrastructure + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_DEBUG_H +#define LWIP_HDR_DEBUG_H + +#include "lwip/arch.h" +#include "lwip/opt.h" + +/** + * @defgroup debugging_levels LWIP_DBG_MIN_LEVEL and LWIP_DBG_TYPES_ON values + * @ingroup lwip_opts_debugmsg + * @{ + */ + +/** @name Debug level (LWIP_DBG_MIN_LEVEL) + * @{ + */ +/** Debug level: ALL messages*/ +#define LWIP_DBG_LEVEL_ALL 0x00 +/** Debug level: Warnings. bad checksums, dropped packets, ... */ +#define LWIP_DBG_LEVEL_WARNING 0x01 +/** Debug level: Serious. memory allocation failures, ... */ +#define LWIP_DBG_LEVEL_SERIOUS 0x02 +/** Debug level: Severe */ +#define LWIP_DBG_LEVEL_SEVERE 0x03 +/** + * @} + */ + +#define LWIP_DBG_MASK_LEVEL 0x03 +/* compatibility define only */ +#define LWIP_DBG_LEVEL_OFF LWIP_DBG_LEVEL_ALL + +/** @name Enable/disable debug messages completely (LWIP_DBG_TYPES_ON) + * @{ + */ +/** flag for LWIP_DEBUGF to enable that debug message */ +#define LWIP_DBG_ON 0x80U +/** flag for LWIP_DEBUGF to disable that debug message */ +#define LWIP_DBG_OFF 0x00U +/** + * @} + */ + +/** @name Debug message types (LWIP_DBG_TYPES_ON) + * @{ + */ +/** flag for LWIP_DEBUGF indicating a tracing message (to follow program flow) */ +#define LWIP_DBG_TRACE 0x40U +/** flag for LWIP_DEBUGF indicating a state debug message (to follow module states) */ +#define LWIP_DBG_STATE 0x20U +/** flag for LWIP_DEBUGF indicating newly added code, not thoroughly tested yet */ +#define LWIP_DBG_FRESH 0x10U +/** flag for LWIP_DEBUGF to halt after printing this debug message */ +#define LWIP_DBG_HALT 0x08U +/** + * @} + */ + +/** + * @} + */ + +/** + * @defgroup lwip_assertions Assertion handling + * @ingroup lwip_opts_debug + * @{ + */ +/** + * LWIP_NOASSERT: Disable LWIP_ASSERT checks: + * To disable assertions define LWIP_NOASSERT in arch/cc.h. + */ +#ifdef __DOXYGEN__ +#define LWIP_NOASSERT +#undef LWIP_NOASSERT +#endif +/** + * @} + */ + +#ifndef LWIP_NOASSERT +#define LWIP_ASSERT(message, assertion) do { if (!(assertion)) { \ + LWIP_PLATFORM_ASSERT(message); }} while(0) +#else /* LWIP_NOASSERT */ +#define LWIP_ASSERT(message, assertion) +#endif /* LWIP_NOASSERT */ + +#ifndef LWIP_ERROR +#ifndef LWIP_NOASSERT +#define LWIP_PLATFORM_ERROR(message) LWIP_PLATFORM_ASSERT(message) +#elif defined LWIP_DEBUG +#define LWIP_PLATFORM_ERROR(message) LWIP_PLATFORM_DIAG((message)) +#else +#define LWIP_PLATFORM_ERROR(message) +#endif + +/* if "expression" isn't true, then print "message" and execute "handler" expression */ +#define LWIP_ERROR(message, expression, handler) do { if (!(expression)) { \ + LWIP_PLATFORM_ERROR(message); handler;}} while(0) +#endif /* LWIP_ERROR */ + +/** Enable debug message printing, but only if debug message type is enabled + * AND is of correct type AND is at least LWIP_DBG_LEVEL. + */ +#ifdef __DOXYGEN__ +#define LWIP_DEBUG +#undef LWIP_DEBUG +#endif + +#ifdef LWIP_DEBUG +#define LWIP_DEBUGF(debug, message) do { \ + if ( \ + ((debug) & LWIP_DBG_ON) && \ + ((debug) & LWIP_DBG_TYPES_ON) && \ + ((s16_t)((debug) & LWIP_DBG_MASK_LEVEL) >= LWIP_DBG_MIN_LEVEL)) { \ + LWIP_PLATFORM_DIAG(message); \ + if ((debug) & LWIP_DBG_HALT) { \ + while(1); \ + } \ + } \ + } while(0) + +#else /* LWIP_DEBUG */ +#define LWIP_DEBUGF(debug, message) +#endif /* LWIP_DEBUG */ + +#endif /* LWIP_HDR_DEBUG_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/def.h b/Middlewares/Third_Party/LwIP/src/include/lwip/def.h new file mode 100644 index 0000000..dfb266d --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/def.h @@ -0,0 +1,152 @@ +/** + * @file + * various utility macros + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +/** + * @defgroup perf Performance measurement + * @ingroup sys_layer + * All defines related to this section must not be placed in lwipopts.h, + * but in arch/perf.h! + * Measurement calls made throughout lwip, these can be defined to nothing. + * - PERF_START: start measuring something. + * - PERF_STOP(x): stop measuring something, and record the result. + */ + +#ifndef LWIP_HDR_DEF_H +#define LWIP_HDR_DEF_H + +/* arch.h might define NULL already */ +#include "lwip/arch.h" +#include "lwip/opt.h" +#if LWIP_PERF +#include "arch/perf.h" +#else /* LWIP_PERF */ +#define PERF_START /* null definition */ +#define PERF_STOP(x) /* null definition */ +#endif /* LWIP_PERF */ + +#ifdef __cplusplus +extern "C" { +#endif + +#define LWIP_MAX(x , y) (((x) > (y)) ? (x) : (y)) +#define LWIP_MIN(x , y) (((x) < (y)) ? (x) : (y)) + +/* Get the number of entries in an array ('x' must NOT be a pointer!) */ +#define LWIP_ARRAYSIZE(x) (sizeof(x)/sizeof((x)[0])) + +/** Create u32_t value from bytes */ +#define LWIP_MAKEU32(a,b,c,d) (((u32_t)((a) & 0xff) << 24) | \ + ((u32_t)((b) & 0xff) << 16) | \ + ((u32_t)((c) & 0xff) << 8) | \ + (u32_t)((d) & 0xff)) + +#ifndef NULL +#ifdef __cplusplus +#define NULL 0 +#else +#define NULL ((void *)0) +#endif +#endif + +#if BYTE_ORDER == BIG_ENDIAN +#define lwip_htons(x) ((u16_t)(x)) +#define lwip_ntohs(x) ((u16_t)(x)) +#define lwip_htonl(x) ((u32_t)(x)) +#define lwip_ntohl(x) ((u32_t)(x)) +#define PP_HTONS(x) ((u16_t)(x)) +#define PP_NTOHS(x) ((u16_t)(x)) +#define PP_HTONL(x) ((u32_t)(x)) +#define PP_NTOHL(x) ((u32_t)(x)) +#else /* BYTE_ORDER != BIG_ENDIAN */ +#ifndef lwip_htons +u16_t lwip_htons(u16_t x); +#endif +#define lwip_ntohs(x) lwip_htons(x) + +#ifndef lwip_htonl +u32_t lwip_htonl(u32_t x); +#endif +#define lwip_ntohl(x) lwip_htonl(x) + +/* These macros should be calculated by the preprocessor and are used + with compile-time constants only (so that there is no little-endian + overhead at runtime). */ +#define PP_HTONS(x) ((u16_t)((((x) & (u16_t)0x00ffU) << 8) | (((x) & (u16_t)0xff00U) >> 8))) +#define PP_NTOHS(x) PP_HTONS(x) +#define PP_HTONL(x) ((((x) & (u32_t)0x000000ffUL) << 24) | \ + (((x) & (u32_t)0x0000ff00UL) << 8) | \ + (((x) & (u32_t)0x00ff0000UL) >> 8) | \ + (((x) & (u32_t)0xff000000UL) >> 24)) +#define PP_NTOHL(x) PP_HTONL(x) +#endif /* BYTE_ORDER == BIG_ENDIAN */ + +/* Provide usual function names as macros for users, but this can be turned off */ +#ifndef LWIP_DONT_PROVIDE_BYTEORDER_FUNCTIONS +#define htons(x) lwip_htons(x) +#define ntohs(x) lwip_ntohs(x) +#define htonl(x) lwip_htonl(x) +#define ntohl(x) lwip_ntohl(x) +#endif + +/* Functions that are not available as standard implementations. + * In cc.h, you can #define these to implementations available on + * your platform to save some code bytes if you use these functions + * in your application, too. + */ + +#ifndef lwip_itoa +/* This can be #defined to itoa() or snprintf(result, bufsize, "%d", number) depending on your platform */ +void lwip_itoa(char* result, size_t bufsize, int number); +#endif +#ifndef lwip_strnicmp +/* This can be #defined to strnicmp() or strncasecmp() depending on your platform */ +int lwip_strnicmp(const char* str1, const char* str2, size_t len); +#endif +#ifndef lwip_stricmp +/* This can be #defined to stricmp() or strcasecmp() depending on your platform */ +int lwip_stricmp(const char* str1, const char* str2); +#endif +#ifndef lwip_strnstr +/* This can be #defined to strnstr() depending on your platform */ +char* lwip_strnstr(const char* buffer, const char* token, size_t n); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_DEF_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/dhcp.h b/Middlewares/Third_Party/LwIP/src/include/lwip/dhcp.h new file mode 100644 index 0000000..c78aa0b --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/dhcp.h @@ -0,0 +1,139 @@ +/** + * @file + * DHCP client API + */ + +/* + * Copyright (c) 2001-2004 Leon Woestenberg + * Copyright (c) 2001-2004 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Leon Woestenberg + * + */ +#ifndef LWIP_HDR_DHCP_H +#define LWIP_HDR_DHCP_H + +#include "lwip/opt.h" + +#if LWIP_DHCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/netif.h" +#include "lwip/udp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** period (in seconds) of the application calling dhcp_coarse_tmr() */ +#define DHCP_COARSE_TIMER_SECS 60 +/** period (in milliseconds) of the application calling dhcp_coarse_tmr() */ +#define DHCP_COARSE_TIMER_MSECS (DHCP_COARSE_TIMER_SECS * 1000UL) +/** period (in milliseconds) of the application calling dhcp_fine_tmr() */ +#define DHCP_FINE_TIMER_MSECS 500 + +#define DHCP_BOOT_FILE_LEN 128U + +/* AutoIP cooperation flags (struct dhcp.autoip_coop_state) */ +typedef enum { + DHCP_AUTOIP_COOP_STATE_OFF = 0, + DHCP_AUTOIP_COOP_STATE_ON = 1 +} dhcp_autoip_coop_state_enum_t; + +struct dhcp +{ + /** transaction identifier of last sent request */ + u32_t xid; + /** track PCB allocation state */ + u8_t pcb_allocated; + /** current DHCP state machine state */ + u8_t state; + /** retries of current request */ + u8_t tries; +#if LWIP_DHCP_AUTOIP_COOP + u8_t autoip_coop_state; +#endif + u8_t subnet_mask_given; + + u16_t request_timeout; /* #ticks with period DHCP_FINE_TIMER_SECS for request timeout */ + u16_t t1_timeout; /* #ticks with period DHCP_COARSE_TIMER_SECS for renewal time */ + u16_t t2_timeout; /* #ticks with period DHCP_COARSE_TIMER_SECS for rebind time */ + u16_t t1_renew_time; /* #ticks with period DHCP_COARSE_TIMER_SECS until next renew try */ + u16_t t2_rebind_time; /* #ticks with period DHCP_COARSE_TIMER_SECS until next rebind try */ + u16_t lease_used; /* #ticks with period DHCP_COARSE_TIMER_SECS since last received DHCP ack */ + u16_t t0_timeout; /* #ticks with period DHCP_COARSE_TIMER_SECS for lease time */ + ip_addr_t server_ip_addr; /* dhcp server address that offered this lease (ip_addr_t because passed to UDP) */ + ip4_addr_t offered_ip_addr; + ip4_addr_t offered_sn_mask; + ip4_addr_t offered_gw_addr; + + u32_t offered_t0_lease; /* lease period (in seconds) */ + u32_t offered_t1_renew; /* recommended renew time (usually 50% of lease period) */ + u32_t offered_t2_rebind; /* recommended rebind time (usually 87.5 of lease period) */ +#if LWIP_DHCP_BOOTP_FILE + ip4_addr_t offered_si_addr; + char boot_file_name[DHCP_BOOT_FILE_LEN]; +#endif /* LWIP_DHCP_BOOTPFILE */ +}; + + +void dhcp_set_struct(struct netif *netif, struct dhcp *dhcp); +/** Remove a struct dhcp previously set to the netif using dhcp_set_struct() */ +#define dhcp_remove_struct(netif) netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP, NULL) +void dhcp_cleanup(struct netif *netif); +err_t dhcp_start(struct netif *netif); +err_t dhcp_renew(struct netif *netif); +err_t dhcp_release(struct netif *netif); +void dhcp_stop(struct netif *netif); +void dhcp_release_and_stop(struct netif *netif); +void dhcp_inform(struct netif *netif); +void dhcp_network_changed(struct netif *netif); +#if DHCP_DOES_ARP_CHECK +void dhcp_arp_reply(struct netif *netif, const ip4_addr_t *addr); +#endif +u8_t dhcp_supplied_address(const struct netif *netif); +/* to be called every minute */ +void dhcp_coarse_tmr(void); +/* to be called every half second */ +void dhcp_fine_tmr(void); + +#if LWIP_DHCP_GET_NTP_SRV +/** This function must exist, in other to add offered NTP servers to + * the NTP (or SNTP) engine. + * See LWIP_DHCP_MAX_NTP_SERVERS */ +extern void dhcp_set_ntp_servers(u8_t num_ntp_servers, const ip4_addr_t* ntp_server_addrs); +#endif /* LWIP_DHCP_GET_NTP_SRV */ + +#define netif_dhcp_data(netif) ((struct dhcp*)netif_get_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP)) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_DHCP */ + +#endif /*LWIP_HDR_DHCP_H*/ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/dhcp6.h b/Middlewares/Third_Party/LwIP/src/include/lwip/dhcp6.h new file mode 100644 index 0000000..5cc4a01 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/dhcp6.h @@ -0,0 +1,104 @@ +/** + * @file + * + * DHCPv6 client: IPv6 address autoconfiguration as per + * RFC 3315 (stateful DHCPv6) and + * RFC 3736 (stateless DHCPv6). + */ + +/* + * Copyright (c) 2018 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + */ + +#ifndef LWIP_HDR_IP6_DHCP6_H +#define LWIP_HDR_IP6_DHCP6_H + +#include "lwip/opt.h" + +#if LWIP_IPV6_DHCP6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/err.h" +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** period (in milliseconds) of the application calling dhcp6_tmr() */ +#define DHCP6_TIMER_MSECS 500 + +struct dhcp6 +{ + /** transaction identifier of last sent request */ + u32_t xid; + /** track PCB allocation state */ + u8_t pcb_allocated; + /** current DHCPv6 state machine state */ + u8_t state; + /** retries of current request */ + u8_t tries; + /** if request config is triggered while another action is active, this keeps track of it */ + u8_t request_config_pending; + /** #ticks with period DHCP6_TIMER_MSECS for request timeout */ + u16_t request_timeout; +#if LWIP_IPV6_DHCP6_STATEFUL + /* @todo: add more members here to keep track of stateful DHCPv6 data, like lease times */ +#endif /* LWIP_IPV6_DHCP6_STATEFUL */ +}; + +void dhcp6_set_struct(struct netif *netif, struct dhcp6 *dhcp6); +/** Remove a struct dhcp6 previously set to the netif using dhcp6_set_struct() */ +#define dhcp6_remove_struct(netif) netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP6, NULL) +void dhcp6_cleanup(struct netif *netif); + +err_t dhcp6_enable_stateful(struct netif *netif); +err_t dhcp6_enable_stateless(struct netif *netif); +void dhcp6_disable(struct netif *netif); + +void dhcp6_tmr(void); + +void dhcp6_nd6_ra_trigger(struct netif *netif, u8_t managed_addr_config, u8_t other_config); + +#if LWIP_DHCP6_GET_NTP_SRV +/** This function must exist, in other to add offered NTP servers to + * the NTP (or SNTP) engine. + * See LWIP_DHCP6_MAX_NTP_SERVERS */ +extern void dhcp6_set_ntp_servers(u8_t num_ntp_servers, const ip_addr_t* ntp_server_addrs); +#endif /* LWIP_DHCP6_GET_NTP_SRV */ + +#define netif_dhcp6_data(netif) ((struct dhcp6*)netif_get_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP6)) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6_DHCP6 */ + +#endif /* LWIP_HDR_IP6_DHCP6_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/dns.h b/Middlewares/Third_Party/LwIP/src/include/lwip/dns.h new file mode 100644 index 0000000..0913415 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/dns.h @@ -0,0 +1,131 @@ +/** + * @file + * DNS API + */ + +/** + * lwip DNS resolver header file. + + * Author: Jim Pettinato + * April 2007 + + * ported from uIP resolv.c Copyright (c) 2002-2003, Adam Dunkels. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef LWIP_HDR_DNS_H +#define LWIP_HDR_DNS_H + +#include "lwip/opt.h" + +#if LWIP_DNS + +#include "lwip/ip_addr.h" +#include "lwip/err.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** DNS timer period */ +#define DNS_TMR_INTERVAL 1000 + +/* DNS resolve types: */ +#define LWIP_DNS_ADDRTYPE_IPV4 0 +#define LWIP_DNS_ADDRTYPE_IPV6 1 +#define LWIP_DNS_ADDRTYPE_IPV4_IPV6 2 /* try to resolve IPv4 first, try IPv6 if IPv4 fails only */ +#define LWIP_DNS_ADDRTYPE_IPV6_IPV4 3 /* try to resolve IPv6 first, try IPv4 if IPv6 fails only */ +#if LWIP_IPV4 && LWIP_IPV6 +#ifndef LWIP_DNS_ADDRTYPE_DEFAULT +#define LWIP_DNS_ADDRTYPE_DEFAULT LWIP_DNS_ADDRTYPE_IPV4_IPV6 +#endif +#elif LWIP_IPV4 +#define LWIP_DNS_ADDRTYPE_DEFAULT LWIP_DNS_ADDRTYPE_IPV4 +#else +#define LWIP_DNS_ADDRTYPE_DEFAULT LWIP_DNS_ADDRTYPE_IPV6 +#endif + +#if DNS_LOCAL_HOSTLIST +/** struct used for local host-list */ +struct local_hostlist_entry { + /** static hostname */ + const char *name; + /** static host address in network byteorder */ + ip_addr_t addr; + struct local_hostlist_entry *next; +}; +#define DNS_LOCAL_HOSTLIST_ELEM(name, addr_init) {name, addr_init, NULL} +#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC +#ifndef DNS_LOCAL_HOSTLIST_MAX_NAMELEN +#define DNS_LOCAL_HOSTLIST_MAX_NAMELEN DNS_MAX_NAME_LENGTH +#endif +#define LOCALHOSTLIST_ELEM_SIZE ((sizeof(struct local_hostlist_entry) + DNS_LOCAL_HOSTLIST_MAX_NAMELEN + 1)) +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ +#endif /* DNS_LOCAL_HOSTLIST */ + +#if LWIP_IPV4 +extern const ip_addr_t dns_mquery_v4group; +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 +extern const ip_addr_t dns_mquery_v6group; +#endif /* LWIP_IPV6 */ + +/** Callback which is invoked when a hostname is found. + * A function of this type must be implemented by the application using the DNS resolver. + * @param name pointer to the name that was looked up. + * @param ipaddr pointer to an ip_addr_t containing the IP address of the hostname, + * or NULL if the name could not be found (or on any other error). + * @param callback_arg a user-specified callback argument passed to dns_gethostbyname +*/ +typedef void (*dns_found_callback)(const char *name, const ip_addr_t *ipaddr, void *callback_arg); + +void dns_init(void); +void dns_tmr(void); +void dns_setserver(u8_t numdns, const ip_addr_t *dnsserver); +const ip_addr_t* dns_getserver(u8_t numdns); +err_t dns_gethostbyname(const char *hostname, ip_addr_t *addr, + dns_found_callback found, void *callback_arg); +err_t dns_gethostbyname_addrtype(const char *hostname, ip_addr_t *addr, + dns_found_callback found, void *callback_arg, + u8_t dns_addrtype); + + +#if DNS_LOCAL_HOSTLIST +size_t dns_local_iterate(dns_found_callback iterator_fn, void *iterator_arg); +err_t dns_local_lookup(const char *hostname, ip_addr_t *addr, u8_t dns_addrtype); +#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC +int dns_local_removehost(const char *hostname, const ip_addr_t *addr); +err_t dns_local_addhost(const char *hostname, const ip_addr_t *addr); +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ +#endif /* DNS_LOCAL_HOSTLIST */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_DNS */ + +#endif /* LWIP_HDR_DNS_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/err.h b/Middlewares/Third_Party/LwIP/src/include/lwip/err.h new file mode 100644 index 0000000..887d9b3 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/err.h @@ -0,0 +1,117 @@ +/** + * @file + * lwIP Error codes + */ +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_ERR_H +#define LWIP_HDR_ERR_H + +#include "lwip/opt.h" +#include "lwip/arch.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup infrastructure_errors Error codes + * @ingroup infrastructure + * @{ + */ + +/** Definitions for error constants. */ +typedef enum { +/** No error, everything OK. */ + ERR_OK = 0, +/** Out of memory error. */ + ERR_MEM = -1, +/** Buffer error. */ + ERR_BUF = -2, +/** Timeout. */ + ERR_TIMEOUT = -3, +/** Routing problem. */ + ERR_RTE = -4, +/** Operation in progress */ + ERR_INPROGRESS = -5, +/** Illegal value. */ + ERR_VAL = -6, +/** Operation would block. */ + ERR_WOULDBLOCK = -7, +/** Address in use. */ + ERR_USE = -8, +/** Already connecting. */ + ERR_ALREADY = -9, +/** Conn already established.*/ + ERR_ISCONN = -10, +/** Not connected. */ + ERR_CONN = -11, +/** Low-level netif error */ + ERR_IF = -12, + +/** Connection aborted. */ + ERR_ABRT = -13, +/** Connection reset. */ + ERR_RST = -14, +/** Connection closed. */ + ERR_CLSD = -15, +/** Illegal argument. */ + ERR_ARG = -16 +} err_enum_t; + +/** Define LWIP_ERR_T in cc.h if you want to use + * a different type for your platform (must be signed). */ +#ifdef LWIP_ERR_T +typedef LWIP_ERR_T err_t; +#else /* LWIP_ERR_T */ +typedef s8_t err_t; +#endif /* LWIP_ERR_T*/ + +/** + * @} + */ + +#ifdef LWIP_DEBUG +extern const char *lwip_strerr(err_t err); +#else +#define lwip_strerr(x) "" +#endif /* LWIP_DEBUG */ + +#if !NO_SYS +int err_to_errno(err_t err); +#endif /* !NO_SYS */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_ERR_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/errno.h b/Middlewares/Third_Party/LwIP/src/include/lwip/errno.h new file mode 100644 index 0000000..48d6b53 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/errno.h @@ -0,0 +1,198 @@ +/** + * @file + * Posix Errno defines + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_ERRNO_H +#define LWIP_HDR_ERRNO_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef LWIP_PROVIDE_ERRNO + +#define EPERM 1 /* Operation not permitted */ +#define ENOENT 2 /* No such file or directory */ +#define ESRCH 3 /* No such process */ +#define EINTR 4 /* Interrupted system call */ +#define EIO 5 /* I/O error */ +#define ENXIO 6 /* No such device or address */ +#define E2BIG 7 /* Arg list too long */ +#define ENOEXEC 8 /* Exec format error */ +#define EBADF 9 /* Bad file number */ +#define ECHILD 10 /* No child processes */ +#define EAGAIN 11 /* Try again */ +#define ENOMEM 12 /* Out of memory */ +#define EACCES 13 /* Permission denied */ +#define EFAULT 14 /* Bad address */ +#define ENOTBLK 15 /* Block device required */ +#define EBUSY 16 /* Device or resource busy */ +#define EEXIST 17 /* File exists */ +#define EXDEV 18 /* Cross-device link */ +#define ENODEV 19 /* No such device */ +#define ENOTDIR 20 /* Not a directory */ +#define EISDIR 21 /* Is a directory */ +#define EINVAL 22 /* Invalid argument */ +#define ENFILE 23 /* File table overflow */ +#define EMFILE 24 /* Too many open files */ +#define ENOTTY 25 /* Not a typewriter */ +#define ETXTBSY 26 /* Text file busy */ +#define EFBIG 27 /* File too large */ +#define ENOSPC 28 /* No space left on device */ +#define ESPIPE 29 /* Illegal seek */ +#define EROFS 30 /* Read-only file system */ +#define EMLINK 31 /* Too many links */ +#define EPIPE 32 /* Broken pipe */ +#define EDOM 33 /* Math argument out of domain of func */ +#define ERANGE 34 /* Math result not representable */ +#define EDEADLK 35 /* Resource deadlock would occur */ +#define ENAMETOOLONG 36 /* File name too long */ +#define ENOLCK 37 /* No record locks available */ +#define ENOSYS 38 /* Function not implemented */ +#define ENOTEMPTY 39 /* Directory not empty */ +#define ELOOP 40 /* Too many symbolic links encountered */ +#define EWOULDBLOCK EAGAIN /* Operation would block */ +#define ENOMSG 42 /* No message of desired type */ +#define EIDRM 43 /* Identifier removed */ +#define ECHRNG 44 /* Channel number out of range */ +#define EL2NSYNC 45 /* Level 2 not synchronized */ +#define EL3HLT 46 /* Level 3 halted */ +#define EL3RST 47 /* Level 3 reset */ +#define ELNRNG 48 /* Link number out of range */ +#define EUNATCH 49 /* Protocol driver not attached */ +#define ENOCSI 50 /* No CSI structure available */ +#define EL2HLT 51 /* Level 2 halted */ +#define EBADE 52 /* Invalid exchange */ +#define EBADR 53 /* Invalid request descriptor */ +#define EXFULL 54 /* Exchange full */ +#define ENOANO 55 /* No anode */ +#define EBADRQC 56 /* Invalid request code */ +#define EBADSLT 57 /* Invalid slot */ + +#define EDEADLOCK EDEADLK + +#define EBFONT 59 /* Bad font file format */ +#define ENOSTR 60 /* Device not a stream */ +#define ENODATA 61 /* No data available */ +#define ETIME 62 /* Timer expired */ +#define ENOSR 63 /* Out of streams resources */ +#define ENONET 64 /* Machine is not on the network */ +#define ENOPKG 65 /* Package not installed */ +#define EREMOTE 66 /* Object is remote */ +#define ENOLINK 67 /* Link has been severed */ +#define EADV 68 /* Advertise error */ +#define ESRMNT 69 /* Srmount error */ +#define ECOMM 70 /* Communication error on send */ +#define EPROTO 71 /* Protocol error */ +#define EMULTIHOP 72 /* Multihop attempted */ +#define EDOTDOT 73 /* RFS specific error */ +#define EBADMSG 74 /* Not a data message */ +#define EOVERFLOW 75 /* Value too large for defined data type */ +#define ENOTUNIQ 76 /* Name not unique on network */ +#define EBADFD 77 /* File descriptor in bad state */ +#define EREMCHG 78 /* Remote address changed */ +#define ELIBACC 79 /* Can not access a needed shared library */ +#define ELIBBAD 80 /* Accessing a corrupted shared library */ +#define ELIBSCN 81 /* .lib section in a.out corrupted */ +#define ELIBMAX 82 /* Attempting to link in too many shared libraries */ +#define ELIBEXEC 83 /* Cannot exec a shared library directly */ +#define EILSEQ 84 /* Illegal byte sequence */ +#define ERESTART 85 /* Interrupted system call should be restarted */ +#define ESTRPIPE 86 /* Streams pipe error */ +#define EUSERS 87 /* Too many users */ +#define ENOTSOCK 88 /* Socket operation on non-socket */ +#define EDESTADDRREQ 89 /* Destination address required */ +#define EMSGSIZE 90 /* Message too long */ +#define EPROTOTYPE 91 /* Protocol wrong type for socket */ +#define ENOPROTOOPT 92 /* Protocol not available */ +#define EPROTONOSUPPORT 93 /* Protocol not supported */ +#define ESOCKTNOSUPPORT 94 /* Socket type not supported */ +#define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */ +#define EPFNOSUPPORT 96 /* Protocol family not supported */ +#define EAFNOSUPPORT 97 /* Address family not supported by protocol */ +#define EADDRINUSE 98 /* Address already in use */ +#define EADDRNOTAVAIL 99 /* Cannot assign requested address */ +#define ENETDOWN 100 /* Network is down */ +#define ENETUNREACH 101 /* Network is unreachable */ +#define ENETRESET 102 /* Network dropped connection because of reset */ +#define ECONNABORTED 103 /* Software caused connection abort */ +#define ECONNRESET 104 /* Connection reset by peer */ +#define ENOBUFS 105 /* No buffer space available */ +#define EISCONN 106 /* Transport endpoint is already connected */ +#define ENOTCONN 107 /* Transport endpoint is not connected */ +#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */ +#define ETOOMANYREFS 109 /* Too many references: cannot splice */ +#define ETIMEDOUT 110 /* Connection timed out */ +#define ECONNREFUSED 111 /* Connection refused */ +#define EHOSTDOWN 112 /* Host is down */ +#define EHOSTUNREACH 113 /* No route to host */ +#define EALREADY 114 /* Operation already in progress */ +#define EINPROGRESS 115 /* Operation now in progress */ +#define ESTALE 116 /* Stale NFS file handle */ +#define EUCLEAN 117 /* Structure needs cleaning */ +#define ENOTNAM 118 /* Not a XENIX named type file */ +#define ENAVAIL 119 /* No XENIX semaphores available */ +#define EISNAM 120 /* Is a named type file */ +#define EREMOTEIO 121 /* Remote I/O error */ +#define EDQUOT 122 /* Quota exceeded */ + +#define ENOMEDIUM 123 /* No medium found */ +#define EMEDIUMTYPE 124 /* Wrong medium type */ + +#ifndef errno +extern int errno; +#endif + +#else /* LWIP_PROVIDE_ERRNO */ + +/* Define LWIP_ERRNO_STDINCLUDE if you want to include here */ +#ifdef LWIP_ERRNO_STDINCLUDE +#include +#else /* LWIP_ERRNO_STDINCLUDE */ +/* Define LWIP_ERRNO_INCLUDE to an equivalent of to include the error defines here */ +#ifdef LWIP_ERRNO_INCLUDE +#include LWIP_ERRNO_INCLUDE +#endif /* LWIP_ERRNO_INCLUDE */ +#endif /* LWIP_ERRNO_STDINCLUDE */ + +#endif /* LWIP_PROVIDE_ERRNO */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_ERRNO_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/etharp.h b/Middlewares/Third_Party/LwIP/src/include/lwip/etharp.h new file mode 100644 index 0000000..2036b24 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/etharp.h @@ -0,0 +1,105 @@ +/** + * @file + * Ethernet output function - handles OUTGOING ethernet level traffic, implements + * ARP resolving. + * To be used in most low-level netif implementations + */ + +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * Copyright (c) 2003-2004 Leon Woestenberg + * Copyright (c) 2003-2004 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#ifndef LWIP_HDR_NETIF_ETHARP_H +#define LWIP_HDR_NETIF_ETHARP_H + +#include "lwip/opt.h" + +#if LWIP_ARP || LWIP_ETHERNET /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/pbuf.h" +#include "lwip/ip4_addr.h" +#include "lwip/netif.h" +#include "lwip/ip4.h" +#include "lwip/prot/ethernet.h" + +#if LWIP_IPV4 && LWIP_ARP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/prot/etharp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** 1 seconds period */ +#define ARP_TMR_INTERVAL 1000 + +#if ARP_QUEUEING +/** struct for queueing outgoing packets for unknown address + * defined here to be accessed by memp.h + */ +struct etharp_q_entry { + struct etharp_q_entry *next; + struct pbuf *p; +}; +#endif /* ARP_QUEUEING */ + +#define etharp_init() /* Compatibility define, no init needed. */ +void etharp_tmr(void); +ssize_t etharp_find_addr(struct netif *netif, const ip4_addr_t *ipaddr, + struct eth_addr **eth_ret, const ip4_addr_t **ip_ret); +int etharp_get_entry(size_t i, ip4_addr_t **ipaddr, struct netif **netif, struct eth_addr **eth_ret); +err_t etharp_output(struct netif *netif, struct pbuf *q, const ip4_addr_t *ipaddr); +err_t etharp_query(struct netif *netif, const ip4_addr_t *ipaddr, struct pbuf *q); +err_t etharp_request(struct netif *netif, const ip4_addr_t *ipaddr); +/** For Ethernet network interfaces, we might want to send "gratuitous ARP"; + * this is an ARP packet sent by a node in order to spontaneously cause other + * nodes to update an entry in their ARP cache. + * From RFC 3220 "IP Mobility Support for IPv4" section 4.6. */ +#define etharp_gratuitous(netif) etharp_request((netif), netif_ip4_addr(netif)) +void etharp_cleanup_netif(struct netif *netif); + +#if ETHARP_SUPPORT_STATIC_ENTRIES +err_t etharp_add_static_entry(const ip4_addr_t *ipaddr, struct eth_addr *ethaddr); +err_t etharp_remove_static_entry(const ip4_addr_t *ipaddr); +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ + +void etharp_input(struct pbuf *p, struct netif *netif); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV4 && LWIP_ARP */ +#endif /* LWIP_ARP || LWIP_ETHERNET */ + +#endif /* LWIP_HDR_NETIF_ETHARP_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/ethip6.h b/Middlewares/Third_Party/LwIP/src/include/lwip/ethip6.h new file mode 100644 index 0000000..5e88dff --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/ethip6.h @@ -0,0 +1,68 @@ +/** + * @file + * + * Ethernet output for IPv6. Uses ND tables for link-layer addressing. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#ifndef LWIP_HDR_ETHIP6_H +#define LWIP_HDR_ETHIP6_H + +#include "lwip/opt.h" + +#if LWIP_IPV6 && LWIP_ETHERNET /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/pbuf.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/netif.h" + + +#ifdef __cplusplus +extern "C" { +#endif + + +err_t ethip6_output(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6 && LWIP_ETHERNET */ + +#endif /* LWIP_HDR_ETHIP6_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/icmp.h b/Middlewares/Third_Party/LwIP/src/include/lwip/icmp.h new file mode 100644 index 0000000..f5a31fd --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/icmp.h @@ -0,0 +1,110 @@ +/** + * @file + * ICMP API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_ICMP_H +#define LWIP_HDR_ICMP_H + +#include "lwip/opt.h" +#include "lwip/pbuf.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/prot/icmp.h" + +#if LWIP_IPV6 && LWIP_ICMP6 +#include "lwip/icmp6.h" +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/** ICMP destination unreachable codes */ +enum icmp_dur_type { + /** net unreachable */ + ICMP_DUR_NET = 0, + /** host unreachable */ + ICMP_DUR_HOST = 1, + /** protocol unreachable */ + ICMP_DUR_PROTO = 2, + /** port unreachable */ + ICMP_DUR_PORT = 3, + /** fragmentation needed and DF set */ + ICMP_DUR_FRAG = 4, + /** source route failed */ + ICMP_DUR_SR = 5 +}; + +/** ICMP time exceeded codes */ +enum icmp_te_type { + /** time to live exceeded in transit */ + ICMP_TE_TTL = 0, + /** fragment reassembly time exceeded */ + ICMP_TE_FRAG = 1 +}; + +#if LWIP_IPV4 && LWIP_ICMP /* don't build if not configured for use in lwipopts.h */ + +void icmp_input(struct pbuf *p, struct netif *inp); +void icmp_dest_unreach(struct pbuf *p, enum icmp_dur_type t); +void icmp_time_exceeded(struct pbuf *p, enum icmp_te_type t); + +#endif /* LWIP_IPV4 && LWIP_ICMP */ + +#if LWIP_IPV4 && LWIP_IPV6 +#if LWIP_ICMP && LWIP_ICMP6 +#define icmp_port_unreach(isipv6, pbuf) ((isipv6) ? \ + icmp6_dest_unreach(pbuf, ICMP6_DUR_PORT) : \ + icmp_dest_unreach(pbuf, ICMP_DUR_PORT)) +#elif LWIP_ICMP +#define icmp_port_unreach(isipv6, pbuf) do{ if(!(isipv6)) { icmp_dest_unreach(pbuf, ICMP_DUR_PORT);}}while(0) +#elif LWIP_ICMP6 +#define icmp_port_unreach(isipv6, pbuf) do{ if(isipv6) { icmp6_dest_unreach(pbuf, ICMP6_DUR_PORT);}}while(0) +#else +#define icmp_port_unreach(isipv6, pbuf) +#endif +#elif LWIP_IPV6 && LWIP_ICMP6 +#define icmp_port_unreach(isipv6, pbuf) icmp6_dest_unreach(pbuf, ICMP6_DUR_PORT) +#elif LWIP_IPV4 && LWIP_ICMP +#define icmp_port_unreach(isipv6, pbuf) icmp_dest_unreach(pbuf, ICMP_DUR_PORT) +#else /* (LWIP_IPV6 && LWIP_ICMP6) || (LWIP_IPV4 && LWIP_ICMP) */ +#define icmp_port_unreach(isipv6, pbuf) +#endif /* (LWIP_IPV6 && LWIP_ICMP6) || (LWIP_IPV4 && LWIP_ICMP) LWIP_IPV4*/ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_ICMP_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/icmp6.h b/Middlewares/Third_Party/LwIP/src/include/lwip/icmp6.h new file mode 100644 index 0000000..0ccb789 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/icmp6.h @@ -0,0 +1,72 @@ +/** + * @file + * + * IPv6 version of ICMP, as per RFC 4443. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ +#ifndef LWIP_HDR_ICMP6_H +#define LWIP_HDR_ICMP6_H + +#include "lwip/opt.h" +#include "lwip/pbuf.h" +#include "lwip/ip6_addr.h" +#include "lwip/netif.h" +#include "lwip/prot/icmp6.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_ICMP6 && LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +void icmp6_input(struct pbuf *p, struct netif *inp); +void icmp6_dest_unreach(struct pbuf *p, enum icmp6_dur_code c); +void icmp6_packet_too_big(struct pbuf *p, u32_t mtu); +void icmp6_time_exceeded(struct pbuf *p, enum icmp6_te_code c); +void icmp6_time_exceeded_with_addrs(struct pbuf *p, enum icmp6_te_code c, + const ip6_addr_t *src_addr, const ip6_addr_t *dest_addr); +void icmp6_param_problem(struct pbuf *p, enum icmp6_pp_code c, const void *pointer); + +#endif /* LWIP_ICMP6 && LWIP_IPV6 */ + + +#ifdef __cplusplus +} +#endif + + +#endif /* LWIP_HDR_ICMP6_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/if_api.h b/Middlewares/Third_Party/LwIP/src/include/lwip/if_api.h new file mode 100644 index 0000000..39017ab --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/if_api.h @@ -0,0 +1,68 @@ +/** + * @file + * Interface Identification APIs from: + * RFC 3493: Basic Socket Interface Extensions for IPv6 + * Section 4: Interface Identification + */ + +/* + * Copyright (c) 2017 Joel Cunningham, Garmin International, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Joel Cunningham + * + */ +#ifndef LWIP_HDR_IF_H +#define LWIP_HDR_IF_H + +#include "lwip/opt.h" + +#if LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define IF_NAMESIZE NETIF_NAMESIZE + +char * lwip_if_indextoname(unsigned int ifindex, char *ifname); +unsigned int lwip_if_nametoindex(const char *ifname); + +#if LWIP_COMPAT_SOCKETS +#define if_indextoname(ifindex, ifname) lwip_if_indextoname(ifindex,ifname) +#define if_nametoindex(ifname) lwip_if_nametoindex(ifname) +#endif /* LWIP_COMPAT_SOCKETS */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_SOCKET */ + +#endif /* LWIP_HDR_IF_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/igmp.h b/Middlewares/Third_Party/LwIP/src/include/lwip/igmp.h new file mode 100644 index 0000000..ffd80e6 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/igmp.h @@ -0,0 +1,115 @@ +/** + * @file + * IGMP API + */ + +/* + * Copyright (c) 2002 CITEL Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of CITEL Technologies Ltd nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY CITEL TECHNOLOGIES AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL CITEL TECHNOLOGIES OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * This file is a contribution to the lwIP TCP/IP stack. + * The Swedish Institute of Computer Science and Adam Dunkels + * are specifically granted permission to redistribute this + * source code. +*/ + +#ifndef LWIP_HDR_IGMP_H +#define LWIP_HDR_IGMP_H + +#include "lwip/opt.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/pbuf.h" + +#if LWIP_IPV4 && LWIP_IGMP /* don't build if not configured for use in lwipopts.h */ + +#ifdef __cplusplus +extern "C" { +#endif + +/* IGMP timer */ +#define IGMP_TMR_INTERVAL 100 /* Milliseconds */ +#define IGMP_V1_DELAYING_MEMBER_TMR (1000/IGMP_TMR_INTERVAL) +#define IGMP_JOIN_DELAYING_MEMBER_TMR (500 /IGMP_TMR_INTERVAL) + +/* Compatibility defines (don't use for new code) */ +#define IGMP_DEL_MAC_FILTER NETIF_DEL_MAC_FILTER +#define IGMP_ADD_MAC_FILTER NETIF_ADD_MAC_FILTER + +/** + * igmp group structure - there is + * a list of groups for each interface + * these should really be linked from the interface, but + * if we keep them separate we will not affect the lwip original code + * too much + * + * There will be a group for the all systems group address but this + * will not run the state machine as it is used to kick off reports + * from all the other groups + */ +struct igmp_group { + /** next link */ + struct igmp_group *next; + /** multicast address */ + ip4_addr_t group_address; + /** signifies we were the last person to report */ + u8_t last_reporter_flag; + /** current state of the group */ + u8_t group_state; + /** timer for reporting, negative is OFF */ + u16_t timer; + /** counter of simultaneous uses */ + u8_t use; +}; + +/* Prototypes */ +void igmp_init(void); +err_t igmp_start(struct netif *netif); +err_t igmp_stop(struct netif *netif); +void igmp_report_groups(struct netif *netif); +struct igmp_group *igmp_lookfor_group(struct netif *ifp, const ip4_addr_t *addr); +void igmp_input(struct pbuf *p, struct netif *inp, const ip4_addr_t *dest); +err_t igmp_joingroup(const ip4_addr_t *ifaddr, const ip4_addr_t *groupaddr); +err_t igmp_joingroup_netif(struct netif *netif, const ip4_addr_t *groupaddr); +err_t igmp_leavegroup(const ip4_addr_t *ifaddr, const ip4_addr_t *groupaddr); +err_t igmp_leavegroup_netif(struct netif *netif, const ip4_addr_t *groupaddr); +void igmp_tmr(void); + +/** @ingroup igmp + * Get list head of IGMP groups for netif. + * Note: The allsystems group IP is contained in the list as first entry. + * @see @ref netif_set_igmp_mac_filter() + */ +#define netif_igmp_data(netif) ((struct igmp_group *)netif_get_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_IGMP)) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV4 && LWIP_IGMP */ + +#endif /* LWIP_HDR_IGMP_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/inet.h b/Middlewares/Third_Party/LwIP/src/include/lwip/inet.h new file mode 100644 index 0000000..2982a0f --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/inet.h @@ -0,0 +1,169 @@ +/** + * @file + * This file (together with sockets.h) aims to provide structs and functions from + * - arpa/inet.h + * - netinet/in.h + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_INET_H +#define LWIP_HDR_INET_H + +#include "lwip/opt.h" +#include "lwip/def.h" +#include "lwip/ip_addr.h" +#include "lwip/ip6_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* If your port already typedef's in_addr_t, define IN_ADDR_T_DEFINED + to prevent this code from redefining it. */ +#if !defined(in_addr_t) && !defined(IN_ADDR_T_DEFINED) +typedef u32_t in_addr_t; +#endif + +struct in_addr { + in_addr_t s_addr; +}; + +struct in6_addr { + union { + u32_t u32_addr[4]; + u8_t u8_addr[16]; + } un; +#define s6_addr un.u8_addr +}; + +/** 255.255.255.255 */ +#define INADDR_NONE IPADDR_NONE +/** 127.0.0.1 */ +#define INADDR_LOOPBACK IPADDR_LOOPBACK +/** 0.0.0.0 */ +#define INADDR_ANY IPADDR_ANY +/** 255.255.255.255 */ +#define INADDR_BROADCAST IPADDR_BROADCAST + +/** This macro can be used to initialize a variable of type struct in6_addr + to the IPv6 wildcard address. */ +#define IN6ADDR_ANY_INIT {{{0,0,0,0}}} +/** This macro can be used to initialize a variable of type struct in6_addr + to the IPv6 loopback address. */ +#define IN6ADDR_LOOPBACK_INIT {{{0,0,0,PP_HTONL(1)}}} +/** This variable is initialized by the system to contain the wildcard IPv6 address. */ +extern const struct in6_addr in6addr_any; + +/* Definitions of the bits in an (IPv4) Internet address integer. + + On subnets, host and network parts are found according to + the subnet mask, not these masks. */ +#define IN_CLASSA(a) IP_CLASSA(a) +#define IN_CLASSA_NET IP_CLASSA_NET +#define IN_CLASSA_NSHIFT IP_CLASSA_NSHIFT +#define IN_CLASSA_HOST IP_CLASSA_HOST +#define IN_CLASSA_MAX IP_CLASSA_MAX + +#define IN_CLASSB(b) IP_CLASSB(b) +#define IN_CLASSB_NET IP_CLASSB_NET +#define IN_CLASSB_NSHIFT IP_CLASSB_NSHIFT +#define IN_CLASSB_HOST IP_CLASSB_HOST +#define IN_CLASSB_MAX IP_CLASSB_MAX + +#define IN_CLASSC(c) IP_CLASSC(c) +#define IN_CLASSC_NET IP_CLASSC_NET +#define IN_CLASSC_NSHIFT IP_CLASSC_NSHIFT +#define IN_CLASSC_HOST IP_CLASSC_HOST +#define IN_CLASSC_MAX IP_CLASSC_MAX + +#define IN_CLASSD(d) IP_CLASSD(d) +#define IN_CLASSD_NET IP_CLASSD_NET /* These ones aren't really */ +#define IN_CLASSD_NSHIFT IP_CLASSD_NSHIFT /* net and host fields, but */ +#define IN_CLASSD_HOST IP_CLASSD_HOST /* routing needn't know. */ +#define IN_CLASSD_MAX IP_CLASSD_MAX + +#define IN_MULTICAST(a) IP_MULTICAST(a) + +#define IN_EXPERIMENTAL(a) IP_EXPERIMENTAL(a) +#define IN_BADCLASS(a) IP_BADCLASS(a) + +#define IN_LOOPBACKNET IP_LOOPBACKNET + + +#ifndef INET_ADDRSTRLEN +#define INET_ADDRSTRLEN IP4ADDR_STRLEN_MAX +#endif +#if LWIP_IPV6 +#ifndef INET6_ADDRSTRLEN +#define INET6_ADDRSTRLEN IP6ADDR_STRLEN_MAX +#endif +#endif + +#if LWIP_IPV4 + +#define inet_addr_from_ip4addr(target_inaddr, source_ipaddr) ((target_inaddr)->s_addr = ip4_addr_get_u32(source_ipaddr)) +#define inet_addr_to_ip4addr(target_ipaddr, source_inaddr) (ip4_addr_set_u32(target_ipaddr, (source_inaddr)->s_addr)) + +/* directly map this to the lwip internal functions */ +#define inet_addr(cp) ipaddr_addr(cp) +#define inet_aton(cp, addr) ip4addr_aton(cp, (ip4_addr_t*)addr) +#define inet_ntoa(addr) ip4addr_ntoa((const ip4_addr_t*)&(addr)) +#define inet_ntoa_r(addr, buf, buflen) ip4addr_ntoa_r((const ip4_addr_t*)&(addr), buf, buflen) + +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 +#define inet6_addr_from_ip6addr(target_in6addr, source_ip6addr) {(target_in6addr)->un.u32_addr[0] = (source_ip6addr)->addr[0]; \ + (target_in6addr)->un.u32_addr[1] = (source_ip6addr)->addr[1]; \ + (target_in6addr)->un.u32_addr[2] = (source_ip6addr)->addr[2]; \ + (target_in6addr)->un.u32_addr[3] = (source_ip6addr)->addr[3];} +#define inet6_addr_to_ip6addr(target_ip6addr, source_in6addr) {(target_ip6addr)->addr[0] = (source_in6addr)->un.u32_addr[0]; \ + (target_ip6addr)->addr[1] = (source_in6addr)->un.u32_addr[1]; \ + (target_ip6addr)->addr[2] = (source_in6addr)->un.u32_addr[2]; \ + (target_ip6addr)->addr[3] = (source_in6addr)->un.u32_addr[3]; \ + ip6_addr_clear_zone(target_ip6addr);} + +/* directly map this to the lwip internal functions */ +#define inet6_aton(cp, addr) ip6addr_aton(cp, (ip6_addr_t*)addr) +#define inet6_ntoa(addr) ip6addr_ntoa((const ip6_addr_t*)&(addr)) +#define inet6_ntoa_r(addr, buf, buflen) ip6addr_ntoa_r((const ip6_addr_t*)&(addr), buf, buflen) + +#endif /* LWIP_IPV6 */ + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_INET_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/inet_chksum.h b/Middlewares/Third_Party/LwIP/src/include/lwip/inet_chksum.h new file mode 100644 index 0000000..76893ef --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/inet_chksum.h @@ -0,0 +1,105 @@ +/** + * @file + * IP checksum calculation functions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_INET_CHKSUM_H +#define LWIP_HDR_INET_CHKSUM_H + +#include "lwip/opt.h" + +#include "lwip/pbuf.h" +#include "lwip/ip_addr.h" + +/** Swap the bytes in an u16_t: much like lwip_htons() for little-endian */ +#ifndef SWAP_BYTES_IN_WORD +#define SWAP_BYTES_IN_WORD(w) (((w) & 0xff) << 8) | (((w) & 0xff00) >> 8) +#endif /* SWAP_BYTES_IN_WORD */ + +/** Split an u32_t in two u16_ts and add them up */ +#ifndef FOLD_U32T +#define FOLD_U32T(u) ((u32_t)(((u) >> 16) + ((u) & 0x0000ffffUL))) +#endif + +#if LWIP_CHECKSUM_ON_COPY +/** Function-like macro: same as MEMCPY but returns the checksum of copied data + as u16_t */ +# ifndef LWIP_CHKSUM_COPY +# define LWIP_CHKSUM_COPY(dst, src, len) lwip_chksum_copy(dst, src, len) +# ifndef LWIP_CHKSUM_COPY_ALGORITHM +# define LWIP_CHKSUM_COPY_ALGORITHM 1 +# endif /* LWIP_CHKSUM_COPY_ALGORITHM */ +# else /* LWIP_CHKSUM_COPY */ +# define LWIP_CHKSUM_COPY_ALGORITHM 0 +# endif /* LWIP_CHKSUM_COPY */ +#else /* LWIP_CHECKSUM_ON_COPY */ +# define LWIP_CHKSUM_COPY_ALGORITHM 0 +#endif /* LWIP_CHECKSUM_ON_COPY */ + +#ifdef __cplusplus +extern "C" { +#endif + +u16_t inet_chksum(const void *dataptr, u16_t len); +u16_t inet_chksum_pbuf(struct pbuf *p); +#if LWIP_CHKSUM_COPY_ALGORITHM +u16_t lwip_chksum_copy(void *dst, const void *src, u16_t len); +#endif /* LWIP_CHKSUM_COPY_ALGORITHM */ + +#if LWIP_IPV4 +u16_t inet_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len, + const ip4_addr_t *src, const ip4_addr_t *dest); +u16_t inet_chksum_pseudo_partial(struct pbuf *p, u8_t proto, + u16_t proto_len, u16_t chksum_len, const ip4_addr_t *src, const ip4_addr_t *dest); +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 +u16_t ip6_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len, + const ip6_addr_t *src, const ip6_addr_t *dest); +u16_t ip6_chksum_pseudo_partial(struct pbuf *p, u8_t proto, u16_t proto_len, + u16_t chksum_len, const ip6_addr_t *src, const ip6_addr_t *dest); +#endif /* LWIP_IPV6 */ + + +u16_t ip_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len, + const ip_addr_t *src, const ip_addr_t *dest); +u16_t ip_chksum_pseudo_partial(struct pbuf *p, u8_t proto, u16_t proto_len, + u16_t chksum_len, const ip_addr_t *src, const ip_addr_t *dest); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_INET_H */ + diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/init.h b/Middlewares/Third_Party/LwIP/src/include/lwip/init.h new file mode 100644 index 0000000..a149be1 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/init.h @@ -0,0 +1,100 @@ +/** + * @file + * lwIP initialization API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_INIT_H +#define LWIP_HDR_INIT_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup lwip_version Version + * @ingroup lwip + * @{ + */ + +/** X.x.x: Major version of the stack */ +#define LWIP_VERSION_MAJOR 2 +/** x.X.x: Minor version of the stack */ +#define LWIP_VERSION_MINOR 1 +/** x.x.X: Revision of the stack */ +#define LWIP_VERSION_REVISION 2 +/** For release candidates, this is set to 1..254 + * For official releases, this is set to 255 (LWIP_RC_RELEASE) + * For development versions (Git), this is set to 0 (LWIP_RC_DEVELOPMENT) */ +#define LWIP_VERSION_RC LWIP_RC_RELEASE + +/** LWIP_VERSION_RC is set to LWIP_RC_RELEASE for official releases */ +#define LWIP_RC_RELEASE 255 +/** LWIP_VERSION_RC is set to LWIP_RC_DEVELOPMENT for Git versions */ +#define LWIP_RC_DEVELOPMENT 0 + +#define LWIP_VERSION_IS_RELEASE (LWIP_VERSION_RC == LWIP_RC_RELEASE) +#define LWIP_VERSION_IS_DEVELOPMENT (LWIP_VERSION_RC == LWIP_RC_DEVELOPMENT) +#define LWIP_VERSION_IS_RC ((LWIP_VERSION_RC != LWIP_RC_RELEASE) && (LWIP_VERSION_RC != LWIP_RC_DEVELOPMENT)) + +/* Some helper defines to get a version string */ +#define LWIP_VERSTR2(x) #x +#define LWIP_VERSTR(x) LWIP_VERSTR2(x) +#if LWIP_VERSION_IS_RELEASE +#define LWIP_VERSION_STRING_SUFFIX "" +#elif LWIP_VERSION_IS_DEVELOPMENT +#define LWIP_VERSION_STRING_SUFFIX "d" +#else +#define LWIP_VERSION_STRING_SUFFIX "rc" LWIP_VERSTR(LWIP_VERSION_RC) +#endif + +/** Provides the version of the stack */ +#define LWIP_VERSION ((LWIP_VERSION_MAJOR) << 24 | (LWIP_VERSION_MINOR) << 16 | \ + (LWIP_VERSION_REVISION) << 8 | (LWIP_VERSION_RC)) +/** Provides the version of the stack as string */ +#define LWIP_VERSION_STRING LWIP_VERSTR(LWIP_VERSION_MAJOR) "." LWIP_VERSTR(LWIP_VERSION_MINOR) "." LWIP_VERSTR(LWIP_VERSION_REVISION) LWIP_VERSION_STRING_SUFFIX + +/** + * @} + */ + +/* Modules initialization */ +void lwip_init(void); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_INIT_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/ip.h b/Middlewares/Third_Party/LwIP/src/include/lwip/ip.h new file mode 100644 index 0000000..653c3b2 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/ip.h @@ -0,0 +1,330 @@ +/** + * @file + * IP API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_IP_H +#define LWIP_HDR_IP_H + +#include "lwip/opt.h" + +#include "lwip/def.h" +#include "lwip/pbuf.h" +#include "lwip/ip_addr.h" +#include "lwip/err.h" +#include "lwip/netif.h" +#include "lwip/ip4.h" +#include "lwip/ip6.h" +#include "lwip/prot/ip.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* This is passed as the destination address to ip_output_if (not + to ip_output), meaning that an IP header already is constructed + in the pbuf. This is used when TCP retransmits. */ +#define LWIP_IP_HDRINCL NULL + +/** pbufs passed to IP must have a ref-count of 1 as their payload pointer + gets altered as the packet is passed down the stack */ +#ifndef LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX +#define LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p) LWIP_ASSERT("p->ref == 1", (p)->ref == 1) +#endif + +#if LWIP_NETIF_USE_HINTS +#define IP_PCB_NETIFHINT ;struct netif_hint netif_hints +#else /* LWIP_NETIF_USE_HINTS */ +#define IP_PCB_NETIFHINT +#endif /* LWIP_NETIF_USE_HINTS */ + +/** This is the common part of all PCB types. It needs to be at the + beginning of a PCB type definition. It is located here so that + changes to this common part are made in one location instead of + having to change all PCB structs. */ +#define IP_PCB \ + /* ip addresses in network byte order */ \ + ip_addr_t local_ip; \ + ip_addr_t remote_ip; \ + /* Bound netif index */ \ + u8_t netif_idx; \ + /* Socket options */ \ + u8_t so_options; \ + /* Type Of Service */ \ + u8_t tos; \ + /* Time To Live */ \ + u8_t ttl \ + /* link layer address resolution hint */ \ + IP_PCB_NETIFHINT + +struct ip_pcb { + /* Common members of all PCB types */ + IP_PCB; +}; + +/* + * Option flags per-socket. These are the same like SO_XXX in sockets.h + */ +#define SOF_REUSEADDR 0x04U /* allow local address reuse */ +#define SOF_KEEPALIVE 0x08U /* keep connections alive */ +#define SOF_BROADCAST 0x20U /* permit to send and to receive broadcast messages (see IP_SOF_BROADCAST option) */ + +/* These flags are inherited (e.g. from a listen-pcb to a connection-pcb): */ +#define SOF_INHERITED (SOF_REUSEADDR|SOF_KEEPALIVE) + +/** Global variables of this module, kept in a struct for efficient access using base+index. */ +struct ip_globals +{ + /** The interface that accepted the packet for the current callback invocation. */ + struct netif *current_netif; + /** The interface that received the packet for the current callback invocation. */ + struct netif *current_input_netif; +#if LWIP_IPV4 + /** Header of the input packet currently being processed. */ + const struct ip_hdr *current_ip4_header; +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 + /** Header of the input IPv6 packet currently being processed. */ + struct ip6_hdr *current_ip6_header; +#endif /* LWIP_IPV6 */ + /** Total header length of current_ip4/6_header (i.e. after this, the UDP/TCP header starts) */ + u16_t current_ip_header_tot_len; + /** Source IP address of current_header */ + ip_addr_t current_iphdr_src; + /** Destination IP address of current_header */ + ip_addr_t current_iphdr_dest; +}; +extern struct ip_globals ip_data; + + +/** Get the interface that accepted the current packet. + * This may or may not be the receiving netif, depending on your netif/network setup. + * This function must only be called from a receive callback (udp_recv, + * raw_recv, tcp_accept). It will return NULL otherwise. */ +#define ip_current_netif() (ip_data.current_netif) +/** Get the interface that received the current packet. + * This function must only be called from a receive callback (udp_recv, + * raw_recv, tcp_accept). It will return NULL otherwise. */ +#define ip_current_input_netif() (ip_data.current_input_netif) +/** Total header length of ip(6)_current_header() (i.e. after this, the UDP/TCP header starts) */ +#define ip_current_header_tot_len() (ip_data.current_ip_header_tot_len) +/** Source IP address of current_header */ +#define ip_current_src_addr() (&ip_data.current_iphdr_src) +/** Destination IP address of current_header */ +#define ip_current_dest_addr() (&ip_data.current_iphdr_dest) + +#if LWIP_IPV4 && LWIP_IPV6 +/** Get the IPv4 header of the current packet. + * This function must only be called from a receive callback (udp_recv, + * raw_recv, tcp_accept). It will return NULL otherwise. */ +#define ip4_current_header() ip_data.current_ip4_header +/** Get the IPv6 header of the current packet. + * This function must only be called from a receive callback (udp_recv, + * raw_recv, tcp_accept). It will return NULL otherwise. */ +#define ip6_current_header() ((const struct ip6_hdr*)(ip_data.current_ip6_header)) +/** Returns TRUE if the current IP input packet is IPv6, FALSE if it is IPv4 */ +#define ip_current_is_v6() (ip6_current_header() != NULL) +/** Source IPv6 address of current_header */ +#define ip6_current_src_addr() (ip_2_ip6(&ip_data.current_iphdr_src)) +/** Destination IPv6 address of current_header */ +#define ip6_current_dest_addr() (ip_2_ip6(&ip_data.current_iphdr_dest)) +/** Get the transport layer protocol */ +#define ip_current_header_proto() (ip_current_is_v6() ? \ + IP6H_NEXTH(ip6_current_header()) :\ + IPH_PROTO(ip4_current_header())) +/** Get the transport layer header */ +#define ip_next_header_ptr() ((const void*)((ip_current_is_v6() ? \ + (const u8_t*)ip6_current_header() : (const u8_t*)ip4_current_header()) + ip_current_header_tot_len())) + +/** Source IP4 address of current_header */ +#define ip4_current_src_addr() (ip_2_ip4(&ip_data.current_iphdr_src)) +/** Destination IP4 address of current_header */ +#define ip4_current_dest_addr() (ip_2_ip4(&ip_data.current_iphdr_dest)) + +#elif LWIP_IPV4 /* LWIP_IPV4 && LWIP_IPV6 */ + +/** Get the IPv4 header of the current packet. + * This function must only be called from a receive callback (udp_recv, + * raw_recv, tcp_accept). It will return NULL otherwise. */ +#define ip4_current_header() ip_data.current_ip4_header +/** Always returns FALSE when only supporting IPv4 only */ +#define ip_current_is_v6() 0 +/** Get the transport layer protocol */ +#define ip_current_header_proto() IPH_PROTO(ip4_current_header()) +/** Get the transport layer header */ +#define ip_next_header_ptr() ((const void*)((const u8_t*)ip4_current_header() + ip_current_header_tot_len())) +/** Source IP4 address of current_header */ +#define ip4_current_src_addr() (&ip_data.current_iphdr_src) +/** Destination IP4 address of current_header */ +#define ip4_current_dest_addr() (&ip_data.current_iphdr_dest) + +#elif LWIP_IPV6 /* LWIP_IPV4 && LWIP_IPV6 */ + +/** Get the IPv6 header of the current packet. + * This function must only be called from a receive callback (udp_recv, + * raw_recv, tcp_accept). It will return NULL otherwise. */ +#define ip6_current_header() ((const struct ip6_hdr*)(ip_data.current_ip6_header)) +/** Always returns TRUE when only supporting IPv6 only */ +#define ip_current_is_v6() 1 +/** Get the transport layer protocol */ +#define ip_current_header_proto() IP6H_NEXTH(ip6_current_header()) +/** Get the transport layer header */ +#define ip_next_header_ptr() ((const void*)(((const u8_t*)ip6_current_header()) + ip_current_header_tot_len())) +/** Source IP6 address of current_header */ +#define ip6_current_src_addr() (&ip_data.current_iphdr_src) +/** Destination IP6 address of current_header */ +#define ip6_current_dest_addr() (&ip_data.current_iphdr_dest) + +#endif /* LWIP_IPV6 */ + +/** Union source address of current_header */ +#define ip_current_src_addr() (&ip_data.current_iphdr_src) +/** Union destination address of current_header */ +#define ip_current_dest_addr() (&ip_data.current_iphdr_dest) + +/** Gets an IP pcb option (SOF_* flags) */ +#define ip_get_option(pcb, opt) ((pcb)->so_options & (opt)) +/** Sets an IP pcb option (SOF_* flags) */ +#define ip_set_option(pcb, opt) ((pcb)->so_options = (u8_t)((pcb)->so_options | (opt))) +/** Resets an IP pcb option (SOF_* flags) */ +#define ip_reset_option(pcb, opt) ((pcb)->so_options = (u8_t)((pcb)->so_options & ~(opt))) + +#if LWIP_IPV4 && LWIP_IPV6 +/** + * @ingroup ip + * Output IP packet, netif is selected by source address + */ +#define ip_output(p, src, dest, ttl, tos, proto) \ + (IP_IS_V6(dest) ? \ + ip6_output(p, ip_2_ip6(src), ip_2_ip6(dest), ttl, tos, proto) : \ + ip4_output(p, ip_2_ip4(src), ip_2_ip4(dest), ttl, tos, proto)) +/** + * @ingroup ip + * Output IP packet to specified interface + */ +#define ip_output_if(p, src, dest, ttl, tos, proto, netif) \ + (IP_IS_V6(dest) ? \ + ip6_output_if(p, ip_2_ip6(src), ip_2_ip6(dest), ttl, tos, proto, netif) : \ + ip4_output_if(p, ip_2_ip4(src), ip_2_ip4(dest), ttl, tos, proto, netif)) +/** + * @ingroup ip + * Output IP packet to interface specifying source address + */ +#define ip_output_if_src(p, src, dest, ttl, tos, proto, netif) \ + (IP_IS_V6(dest) ? \ + ip6_output_if_src(p, ip_2_ip6(src), ip_2_ip6(dest), ttl, tos, proto, netif) : \ + ip4_output_if_src(p, ip_2_ip4(src), ip_2_ip4(dest), ttl, tos, proto, netif)) +/** Output IP packet that already includes an IP header. */ +#define ip_output_if_hdrincl(p, src, dest, netif) \ + (IP_IS_V6(dest) ? \ + ip6_output_if(p, ip_2_ip6(src), LWIP_IP_HDRINCL, 0, 0, 0, netif) : \ + ip4_output_if(p, ip_2_ip4(src), LWIP_IP_HDRINCL, 0, 0, 0, netif)) +/** Output IP packet with netif_hint */ +#define ip_output_hinted(p, src, dest, ttl, tos, proto, netif_hint) \ + (IP_IS_V6(dest) ? \ + ip6_output_hinted(p, ip_2_ip6(src), ip_2_ip6(dest), ttl, tos, proto, netif_hint) : \ + ip4_output_hinted(p, ip_2_ip4(src), ip_2_ip4(dest), ttl, tos, proto, netif_hint)) +/** + * @ingroup ip + * Get netif for address combination. See \ref ip6_route and \ref ip4_route + */ +#define ip_route(src, dest) \ + (IP_IS_V6(dest) ? \ + ip6_route(ip_2_ip6(src), ip_2_ip6(dest)) : \ + ip4_route_src(ip_2_ip4(src), ip_2_ip4(dest))) +/** + * @ingroup ip + * Get netif for IP. + */ +#define ip_netif_get_local_ip(netif, dest) (IP_IS_V6(dest) ? \ + ip6_netif_get_local_ip(netif, ip_2_ip6(dest)) : \ + ip4_netif_get_local_ip(netif)) +#define ip_debug_print(is_ipv6, p) ((is_ipv6) ? ip6_debug_print(p) : ip4_debug_print(p)) + +err_t ip_input(struct pbuf *p, struct netif *inp); + +#elif LWIP_IPV4 /* LWIP_IPV4 && LWIP_IPV6 */ + +#define ip_output(p, src, dest, ttl, tos, proto) \ + ip4_output(p, src, dest, ttl, tos, proto) +#define ip_output_if(p, src, dest, ttl, tos, proto, netif) \ + ip4_output_if(p, src, dest, ttl, tos, proto, netif) +#define ip_output_if_src(p, src, dest, ttl, tos, proto, netif) \ + ip4_output_if_src(p, src, dest, ttl, tos, proto, netif) +#define ip_output_hinted(p, src, dest, ttl, tos, proto, netif_hint) \ + ip4_output_hinted(p, src, dest, ttl, tos, proto, netif_hint) +#define ip_output_if_hdrincl(p, src, dest, netif) \ + ip4_output_if(p, src, LWIP_IP_HDRINCL, 0, 0, 0, netif) +#define ip_route(src, dest) \ + ip4_route_src(src, dest) +#define ip_netif_get_local_ip(netif, dest) \ + ip4_netif_get_local_ip(netif) +#define ip_debug_print(is_ipv6, p) ip4_debug_print(p) + +#define ip_input ip4_input + +#elif LWIP_IPV6 /* LWIP_IPV4 && LWIP_IPV6 */ + +#define ip_output(p, src, dest, ttl, tos, proto) \ + ip6_output(p, src, dest, ttl, tos, proto) +#define ip_output_if(p, src, dest, ttl, tos, proto, netif) \ + ip6_output_if(p, src, dest, ttl, tos, proto, netif) +#define ip_output_if_src(p, src, dest, ttl, tos, proto, netif) \ + ip6_output_if_src(p, src, dest, ttl, tos, proto, netif) +#define ip_output_hinted(p, src, dest, ttl, tos, proto, netif_hint) \ + ip6_output_hinted(p, src, dest, ttl, tos, proto, netif_hint) +#define ip_output_if_hdrincl(p, src, dest, netif) \ + ip6_output_if(p, src, LWIP_IP_HDRINCL, 0, 0, 0, netif) +#define ip_route(src, dest) \ + ip6_route(src, dest) +#define ip_netif_get_local_ip(netif, dest) \ + ip6_netif_get_local_ip(netif, dest) +#define ip_debug_print(is_ipv6, p) ip6_debug_print(p) + +#define ip_input ip6_input + +#endif /* LWIP_IPV6 */ + +#define ip_route_get_local_ip(src, dest, netif, ipaddr) do { \ + (netif) = ip_route(src, dest); \ + (ipaddr) = ip_netif_get_local_ip(netif, dest); \ +}while(0) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_IP_H */ + + diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/ip4.h b/Middlewares/Third_Party/LwIP/src/include/lwip/ip4.h new file mode 100644 index 0000000..fd35a33 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/ip4.h @@ -0,0 +1,111 @@ +/** + * @file + * IPv4 API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_IP4_H +#define LWIP_HDR_IP4_H + +#include "lwip/opt.h" + +#if LWIP_IPV4 + +#include "lwip/def.h" +#include "lwip/pbuf.h" +#include "lwip/ip4_addr.h" +#include "lwip/err.h" +#include "lwip/netif.h" +#include "lwip/prot/ip4.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef LWIP_HOOK_IP4_ROUTE_SRC +#define LWIP_IPV4_SRC_ROUTING 1 +#else +#define LWIP_IPV4_SRC_ROUTING 0 +#endif + +/** Currently, the function ip_output_if_opt() is only used with IGMP */ +#define IP_OPTIONS_SEND (LWIP_IPV4 && LWIP_IGMP) + +#define ip_init() /* Compatibility define, no init needed. */ +struct netif *ip4_route(const ip4_addr_t *dest); +#if LWIP_IPV4_SRC_ROUTING +struct netif *ip4_route_src(const ip4_addr_t *src, const ip4_addr_t *dest); +#else /* LWIP_IPV4_SRC_ROUTING */ +#define ip4_route_src(src, dest) ip4_route(dest) +#endif /* LWIP_IPV4_SRC_ROUTING */ +err_t ip4_input(struct pbuf *p, struct netif *inp); +err_t ip4_output(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto); +err_t ip4_output_if(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif *netif); +err_t ip4_output_if_src(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif *netif); +#if LWIP_NETIF_USE_HINTS +err_t ip4_output_hinted(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif_hint *netif_hint); +#endif /* LWIP_NETIF_USE_HINTS */ +#if IP_OPTIONS_SEND +err_t ip4_output_if_opt(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif *netif, void *ip_options, + u16_t optlen); +err_t ip4_output_if_opt_src(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif *netif, void *ip_options, + u16_t optlen); +#endif /* IP_OPTIONS_SEND */ + +#if LWIP_MULTICAST_TX_OPTIONS +void ip4_set_default_multicast_netif(struct netif* default_multicast_netif); +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + +#define ip4_netif_get_local_ip(netif) (((netif) != NULL) ? netif_ip_addr4(netif) : NULL) + +#if IP_DEBUG +void ip4_debug_print(struct pbuf *p); +#else +#define ip4_debug_print(p) +#endif /* IP_DEBUG */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV4 */ + +#endif /* LWIP_HDR_IP_H */ + + diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/ip4_addr.h b/Middlewares/Third_Party/LwIP/src/include/lwip/ip4_addr.h new file mode 100644 index 0000000..f244c4f --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/ip4_addr.h @@ -0,0 +1,216 @@ +/** + * @file + * IPv4 address API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_IP4_ADDR_H +#define LWIP_HDR_IP4_ADDR_H + +#include "lwip/opt.h" +#include "lwip/def.h" + +#if LWIP_IPV4 + +#ifdef __cplusplus +extern "C" { +#endif + +/** This is the aligned version of ip4_addr_t, + used as local variable, on the stack, etc. */ +struct ip4_addr { + u32_t addr; +}; + +/** ip4_addr_t uses a struct for convenience only, so that the same defines can + * operate both on ip4_addr_t as well as on ip4_addr_p_t. */ +typedef struct ip4_addr ip4_addr_t; + +/* Forward declaration to not include netif.h */ +struct netif; + +/** 255.255.255.255 */ +#define IPADDR_NONE ((u32_t)0xffffffffUL) +/** 127.0.0.1 */ +#define IPADDR_LOOPBACK ((u32_t)0x7f000001UL) +/** 0.0.0.0 */ +#define IPADDR_ANY ((u32_t)0x00000000UL) +/** 255.255.255.255 */ +#define IPADDR_BROADCAST ((u32_t)0xffffffffUL) + +/* Definitions of the bits in an Internet address integer. + + On subnets, host and network parts are found according to + the subnet mask, not these masks. */ +#define IP_CLASSA(a) ((((u32_t)(a)) & 0x80000000UL) == 0) +#define IP_CLASSA_NET 0xff000000 +#define IP_CLASSA_NSHIFT 24 +#define IP_CLASSA_HOST (0xffffffff & ~IP_CLASSA_NET) +#define IP_CLASSA_MAX 128 + +#define IP_CLASSB(a) ((((u32_t)(a)) & 0xc0000000UL) == 0x80000000UL) +#define IP_CLASSB_NET 0xffff0000 +#define IP_CLASSB_NSHIFT 16 +#define IP_CLASSB_HOST (0xffffffff & ~IP_CLASSB_NET) +#define IP_CLASSB_MAX 65536 + +#define IP_CLASSC(a) ((((u32_t)(a)) & 0xe0000000UL) == 0xc0000000UL) +#define IP_CLASSC_NET 0xffffff00 +#define IP_CLASSC_NSHIFT 8 +#define IP_CLASSC_HOST (0xffffffff & ~IP_CLASSC_NET) + +#define IP_CLASSD(a) (((u32_t)(a) & 0xf0000000UL) == 0xe0000000UL) +#define IP_CLASSD_NET 0xf0000000 /* These ones aren't really */ +#define IP_CLASSD_NSHIFT 28 /* net and host fields, but */ +#define IP_CLASSD_HOST 0x0fffffff /* routing needn't know. */ +#define IP_MULTICAST(a) IP_CLASSD(a) + +#define IP_EXPERIMENTAL(a) (((u32_t)(a) & 0xf0000000UL) == 0xf0000000UL) +#define IP_BADCLASS(a) (((u32_t)(a) & 0xf0000000UL) == 0xf0000000UL) + +#define IP_LOOPBACKNET 127 /* official! */ + +/** Set an IP address given by the four byte-parts */ +#define IP4_ADDR(ipaddr, a,b,c,d) (ipaddr)->addr = PP_HTONL(LWIP_MAKEU32(a,b,c,d)) + +/** Copy IP address - faster than ip4_addr_set: no NULL check */ +#define ip4_addr_copy(dest, src) ((dest).addr = (src).addr) +/** Safely copy one IP address to another (src may be NULL) */ +#define ip4_addr_set(dest, src) ((dest)->addr = \ + ((src) == NULL ? 0 : \ + (src)->addr)) +/** Set complete address to zero */ +#define ip4_addr_set_zero(ipaddr) ((ipaddr)->addr = 0) +/** Set address to IPADDR_ANY (no need for lwip_htonl()) */ +#define ip4_addr_set_any(ipaddr) ((ipaddr)->addr = IPADDR_ANY) +/** Set address to loopback address */ +#define ip4_addr_set_loopback(ipaddr) ((ipaddr)->addr = PP_HTONL(IPADDR_LOOPBACK)) +/** Check if an address is in the loopback region */ +#define ip4_addr_isloopback(ipaddr) (((ipaddr)->addr & PP_HTONL(IP_CLASSA_NET)) == PP_HTONL(((u32_t)IP_LOOPBACKNET) << 24)) +/** Safely copy one IP address to another and change byte order + * from host- to network-order. */ +#define ip4_addr_set_hton(dest, src) ((dest)->addr = \ + ((src) == NULL ? 0:\ + lwip_htonl((src)->addr))) +/** IPv4 only: set the IP address given as an u32_t */ +#define ip4_addr_set_u32(dest_ipaddr, src_u32) ((dest_ipaddr)->addr = (src_u32)) +/** IPv4 only: get the IP address as an u32_t */ +#define ip4_addr_get_u32(src_ipaddr) ((src_ipaddr)->addr) + +/** Get the network address by combining host address with netmask */ +#define ip4_addr_get_network(target, host, netmask) do { ((target)->addr = ((host)->addr) & ((netmask)->addr)); } while(0) + +/** + * Determine if two address are on the same network. + * + * @arg addr1 IP address 1 + * @arg addr2 IP address 2 + * @arg mask network identifier mask + * @return !0 if the network identifiers of both address match + */ +#define ip4_addr_netcmp(addr1, addr2, mask) (((addr1)->addr & \ + (mask)->addr) == \ + ((addr2)->addr & \ + (mask)->addr)) +#define ip4_addr_cmp(addr1, addr2) ((addr1)->addr == (addr2)->addr) + +#define ip4_addr_isany_val(addr1) ((addr1).addr == IPADDR_ANY) +#define ip4_addr_isany(addr1) ((addr1) == NULL || ip4_addr_isany_val(*(addr1))) + +#define ip4_addr_isbroadcast(addr1, netif) ip4_addr_isbroadcast_u32((addr1)->addr, netif) +u8_t ip4_addr_isbroadcast_u32(u32_t addr, const struct netif *netif); + +#define ip_addr_netmask_valid(netmask) ip4_addr_netmask_valid((netmask)->addr) +u8_t ip4_addr_netmask_valid(u32_t netmask); + +#define ip4_addr_ismulticast(addr1) (((addr1)->addr & PP_HTONL(0xf0000000UL)) == PP_HTONL(0xe0000000UL)) + +#define ip4_addr_islinklocal(addr1) (((addr1)->addr & PP_HTONL(0xffff0000UL)) == PP_HTONL(0xa9fe0000UL)) + +#define ip4_addr_debug_print_parts(debug, a, b, c, d) \ + LWIP_DEBUGF(debug, ("%" U16_F ".%" U16_F ".%" U16_F ".%" U16_F, a, b, c, d)) +#define ip4_addr_debug_print(debug, ipaddr) \ + ip4_addr_debug_print_parts(debug, \ + (u16_t)((ipaddr) != NULL ? ip4_addr1_16(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? ip4_addr2_16(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? ip4_addr3_16(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? ip4_addr4_16(ipaddr) : 0)) +#define ip4_addr_debug_print_val(debug, ipaddr) \ + ip4_addr_debug_print_parts(debug, \ + ip4_addr1_16_val(ipaddr), \ + ip4_addr2_16_val(ipaddr), \ + ip4_addr3_16_val(ipaddr), \ + ip4_addr4_16_val(ipaddr)) + +/* Get one byte from the 4-byte address */ +#define ip4_addr_get_byte(ipaddr, idx) (((const u8_t*)(&(ipaddr)->addr))[idx]) +#define ip4_addr1(ipaddr) ip4_addr_get_byte(ipaddr, 0) +#define ip4_addr2(ipaddr) ip4_addr_get_byte(ipaddr, 1) +#define ip4_addr3(ipaddr) ip4_addr_get_byte(ipaddr, 2) +#define ip4_addr4(ipaddr) ip4_addr_get_byte(ipaddr, 3) +/* Get one byte from the 4-byte address, but argument is 'ip4_addr_t', + * not a pointer */ +#define ip4_addr_get_byte_val(ipaddr, idx) ((u8_t)(((ipaddr).addr >> (idx * 8)) & 0xff)) +#define ip4_addr1_val(ipaddr) ip4_addr_get_byte_val(ipaddr, 0) +#define ip4_addr2_val(ipaddr) ip4_addr_get_byte_val(ipaddr, 1) +#define ip4_addr3_val(ipaddr) ip4_addr_get_byte_val(ipaddr, 2) +#define ip4_addr4_val(ipaddr) ip4_addr_get_byte_val(ipaddr, 3) +/* These are cast to u16_t, with the intent that they are often arguments + * to printf using the U16_F format from cc.h. */ +#define ip4_addr1_16(ipaddr) ((u16_t)ip4_addr1(ipaddr)) +#define ip4_addr2_16(ipaddr) ((u16_t)ip4_addr2(ipaddr)) +#define ip4_addr3_16(ipaddr) ((u16_t)ip4_addr3(ipaddr)) +#define ip4_addr4_16(ipaddr) ((u16_t)ip4_addr4(ipaddr)) +#define ip4_addr1_16_val(ipaddr) ((u16_t)ip4_addr1_val(ipaddr)) +#define ip4_addr2_16_val(ipaddr) ((u16_t)ip4_addr2_val(ipaddr)) +#define ip4_addr3_16_val(ipaddr) ((u16_t)ip4_addr3_val(ipaddr)) +#define ip4_addr4_16_val(ipaddr) ((u16_t)ip4_addr4_val(ipaddr)) + +#define IP4ADDR_STRLEN_MAX 16 + +/** For backwards compatibility */ +#define ip_ntoa(ipaddr) ipaddr_ntoa(ipaddr) + +u32_t ipaddr_addr(const char *cp); +int ip4addr_aton(const char *cp, ip4_addr_t *addr); +/** returns ptr to static buffer; not reentrant! */ +char *ip4addr_ntoa(const ip4_addr_t *addr); +char *ip4addr_ntoa_r(const ip4_addr_t *addr, char *buf, int buflen); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV4 */ + +#endif /* LWIP_HDR_IP_ADDR_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/ip4_frag.h b/Middlewares/Third_Party/LwIP/src/include/lwip/ip4_frag.h new file mode 100644 index 0000000..ed5bf14 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/ip4_frag.h @@ -0,0 +1,100 @@ +/** + * @file + * IP fragmentation/reassembly + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Jani Monoses + * + */ + +#ifndef LWIP_HDR_IP4_FRAG_H +#define LWIP_HDR_IP4_FRAG_H + +#include "lwip/opt.h" +#include "lwip/err.h" +#include "lwip/pbuf.h" +#include "lwip/netif.h" +#include "lwip/ip_addr.h" +#include "lwip/ip.h" + +#if LWIP_IPV4 + +#ifdef __cplusplus +extern "C" { +#endif + +#if IP_REASSEMBLY +/* The IP reassembly timer interval in milliseconds. */ +#define IP_TMR_INTERVAL 1000 + +/** IP reassembly helper struct. + * This is exported because memp needs to know the size. + */ +struct ip_reassdata { + struct ip_reassdata *next; + struct pbuf *p; + struct ip_hdr iphdr; + u16_t datagram_len; + u8_t flags; + u8_t timer; +}; + +void ip_reass_init(void); +void ip_reass_tmr(void); +struct pbuf * ip4_reass(struct pbuf *p); +#endif /* IP_REASSEMBLY */ + +#if IP_FRAG +#if !LWIP_NETIF_TX_SINGLE_PBUF +#ifndef LWIP_PBUF_CUSTOM_REF_DEFINED +#define LWIP_PBUF_CUSTOM_REF_DEFINED +/** A custom pbuf that holds a reference to another pbuf, which is freed + * when this custom pbuf is freed. This is used to create a custom PBUF_REF + * that points into the original pbuf. */ +struct pbuf_custom_ref { + /** 'base class' */ + struct pbuf_custom pc; + /** pointer to the original pbuf that is referenced */ + struct pbuf *original; +}; +#endif /* LWIP_PBUF_CUSTOM_REF_DEFINED */ +#endif /* !LWIP_NETIF_TX_SINGLE_PBUF */ + +err_t ip4_frag(struct pbuf *p, struct netif *netif, const ip4_addr_t *dest); +#endif /* IP_FRAG */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV4 */ + +#endif /* LWIP_HDR_IP4_FRAG_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/ip6.h b/Middlewares/Third_Party/LwIP/src/include/lwip/ip6.h new file mode 100644 index 0000000..f894e06 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/ip6.h @@ -0,0 +1,93 @@ +/** + * @file + * + * IPv6 layer. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ +#ifndef LWIP_HDR_IP6_H +#define LWIP_HDR_IP6_H + +#include "lwip/opt.h" + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/ip6_addr.h" +#include "lwip/prot/ip6.h" +#include "lwip/def.h" +#include "lwip/pbuf.h" +#include "lwip/netif.h" + +#include "lwip/err.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct netif *ip6_route(const ip6_addr_t *src, const ip6_addr_t *dest); +const ip_addr_t *ip6_select_source_address(struct netif *netif, const ip6_addr_t * dest); +err_t ip6_input(struct pbuf *p, struct netif *inp); +err_t ip6_output(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, u8_t nexth); +err_t ip6_output_if(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, u8_t nexth, struct netif *netif); +err_t ip6_output_if_src(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, u8_t nexth, struct netif *netif); +#if LWIP_NETIF_USE_HINTS +err_t ip6_output_hinted(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, u8_t nexth, struct netif_hint *netif_hint); +#endif /* LWIP_NETIF_USE_HINTS */ +#if LWIP_IPV6_MLD +err_t ip6_options_add_hbh_ra(struct pbuf * p, u8_t nexth, u8_t value); +#endif /* LWIP_IPV6_MLD */ + +#define ip6_netif_get_local_ip(netif, dest) (((netif) != NULL) ? \ + ip6_select_source_address(netif, dest) : NULL) + +#if IP6_DEBUG +void ip6_debug_print(struct pbuf *p); +#else +#define ip6_debug_print(p) +#endif /* IP6_DEBUG */ + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6 */ + +#endif /* LWIP_HDR_IP6_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_addr.h b/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_addr.h new file mode 100644 index 0000000..29c2a34 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_addr.h @@ -0,0 +1,352 @@ +/** + * @file + * + * IPv6 addresses. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * Structs and macros for handling IPv6 addresses. + * + * Please coordinate changes and requests with Ivan Delamer + * + */ +#ifndef LWIP_HDR_IP6_ADDR_H +#define LWIP_HDR_IP6_ADDR_H + +#include "lwip/opt.h" +#include "def.h" + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/ip6_zone.h" + +#ifdef __cplusplus +extern "C" { +#endif + + +/** This is the aligned version of ip6_addr_t, + used as local variable, on the stack, etc. */ +struct ip6_addr { + u32_t addr[4]; +#if LWIP_IPV6_SCOPES + u8_t zone; +#endif /* LWIP_IPV6_SCOPES */ +}; + +/** IPv6 address */ +typedef struct ip6_addr ip6_addr_t; + +/** Set an IPv6 partial address given by byte-parts */ +#define IP6_ADDR_PART(ip6addr, index, a,b,c,d) \ + (ip6addr)->addr[index] = PP_HTONL(LWIP_MAKEU32(a,b,c,d)) + +/** Set a full IPv6 address by passing the 4 u32_t indices in network byte order + (use PP_HTONL() for constants) */ +#define IP6_ADDR(ip6addr, idx0, idx1, idx2, idx3) do { \ + (ip6addr)->addr[0] = idx0; \ + (ip6addr)->addr[1] = idx1; \ + (ip6addr)->addr[2] = idx2; \ + (ip6addr)->addr[3] = idx3; \ + ip6_addr_clear_zone(ip6addr); } while(0) + +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK1(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[0]) >> 16) & 0xffff)) +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK2(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[0])) & 0xffff)) +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK3(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[1]) >> 16) & 0xffff)) +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK4(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[1])) & 0xffff)) +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK5(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[2]) >> 16) & 0xffff)) +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK6(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[2])) & 0xffff)) +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK7(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[3]) >> 16) & 0xffff)) +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK8(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[3])) & 0xffff)) + +/** Copy IPv6 address - faster than ip6_addr_set: no NULL check */ +#define ip6_addr_copy(dest, src) do{(dest).addr[0] = (src).addr[0]; \ + (dest).addr[1] = (src).addr[1]; \ + (dest).addr[2] = (src).addr[2]; \ + (dest).addr[3] = (src).addr[3]; \ + ip6_addr_copy_zone((dest), (src)); }while(0) +/** Safely copy one IPv6 address to another (src may be NULL) */ +#define ip6_addr_set(dest, src) do{(dest)->addr[0] = (src) == NULL ? 0 : (src)->addr[0]; \ + (dest)->addr[1] = (src) == NULL ? 0 : (src)->addr[1]; \ + (dest)->addr[2] = (src) == NULL ? 0 : (src)->addr[2]; \ + (dest)->addr[3] = (src) == NULL ? 0 : (src)->addr[3]; \ + ip6_addr_set_zone((dest), (src) == NULL ? IP6_NO_ZONE : ip6_addr_zone(src)); }while(0) + +/** Copy packed IPv6 address to unpacked IPv6 address; zone is not set */ +#define ip6_addr_copy_from_packed(dest, src) do{(dest).addr[0] = (src).addr[0]; \ + (dest).addr[1] = (src).addr[1]; \ + (dest).addr[2] = (src).addr[2]; \ + (dest).addr[3] = (src).addr[3]; \ + ip6_addr_clear_zone(&dest); }while(0) + +/** Copy unpacked IPv6 address to packed IPv6 address; zone is lost */ +#define ip6_addr_copy_to_packed(dest, src) do{(dest).addr[0] = (src).addr[0]; \ + (dest).addr[1] = (src).addr[1]; \ + (dest).addr[2] = (src).addr[2]; \ + (dest).addr[3] = (src).addr[3]; }while(0) + +/** Set complete address to zero */ +#define ip6_addr_set_zero(ip6addr) do{(ip6addr)->addr[0] = 0; \ + (ip6addr)->addr[1] = 0; \ + (ip6addr)->addr[2] = 0; \ + (ip6addr)->addr[3] = 0; \ + ip6_addr_clear_zone(ip6addr);}while(0) + +/** Set address to ipv6 'any' (no need for lwip_htonl()) */ +#define ip6_addr_set_any(ip6addr) ip6_addr_set_zero(ip6addr) +/** Set address to ipv6 loopback address */ +#define ip6_addr_set_loopback(ip6addr) do{(ip6addr)->addr[0] = 0; \ + (ip6addr)->addr[1] = 0; \ + (ip6addr)->addr[2] = 0; \ + (ip6addr)->addr[3] = PP_HTONL(0x00000001UL); \ + ip6_addr_clear_zone(ip6addr);}while(0) +/** Safely copy one IPv6 address to another and change byte order + * from host- to network-order. */ +#define ip6_addr_set_hton(dest, src) do{(dest)->addr[0] = (src) == NULL ? 0 : lwip_htonl((src)->addr[0]); \ + (dest)->addr[1] = (src) == NULL ? 0 : lwip_htonl((src)->addr[1]); \ + (dest)->addr[2] = (src) == NULL ? 0 : lwip_htonl((src)->addr[2]); \ + (dest)->addr[3] = (src) == NULL ? 0 : lwip_htonl((src)->addr[3]); \ + ip6_addr_set_zone((dest), (src) == NULL ? IP6_NO_ZONE : ip6_addr_zone(src));}while(0) + + +/** Compare IPv6 networks, ignoring zone information. To be used sparingly! */ +#define ip6_addr_netcmp_zoneless(addr1, addr2) (((addr1)->addr[0] == (addr2)->addr[0]) && \ + ((addr1)->addr[1] == (addr2)->addr[1])) + +/** + * Determine if two IPv6 address are on the same network. + * + * @param addr1 IPv6 address 1 + * @param addr2 IPv6 address 2 + * @return 1 if the network identifiers of both address match, 0 if not + */ +#define ip6_addr_netcmp(addr1, addr2) (ip6_addr_netcmp_zoneless((addr1), (addr2)) && \ + ip6_addr_cmp_zone((addr1), (addr2))) + +/* Exact-host comparison *after* ip6_addr_netcmp() succeeded, for efficiency. */ +#define ip6_addr_nethostcmp(addr1, addr2) (((addr1)->addr[2] == (addr2)->addr[2]) && \ + ((addr1)->addr[3] == (addr2)->addr[3])) + +/** Compare IPv6 addresses, ignoring zone information. To be used sparingly! */ +#define ip6_addr_cmp_zoneless(addr1, addr2) (((addr1)->addr[0] == (addr2)->addr[0]) && \ + ((addr1)->addr[1] == (addr2)->addr[1]) && \ + ((addr1)->addr[2] == (addr2)->addr[2]) && \ + ((addr1)->addr[3] == (addr2)->addr[3])) +/** + * Determine if two IPv6 addresses are the same. In particular, the address + * part of both must be the same, and the zone must be compatible. + * + * @param addr1 IPv6 address 1 + * @param addr2 IPv6 address 2 + * @return 1 if the addresses are considered equal, 0 if not + */ +#define ip6_addr_cmp(addr1, addr2) (ip6_addr_cmp_zoneless((addr1), (addr2)) && \ + ip6_addr_cmp_zone((addr1), (addr2))) + +/** Compare IPv6 address to packed address and zone */ +#define ip6_addr_cmp_packed(ip6addr, paddr, zone_idx) (((ip6addr)->addr[0] == (paddr)->addr[0]) && \ + ((ip6addr)->addr[1] == (paddr)->addr[1]) && \ + ((ip6addr)->addr[2] == (paddr)->addr[2]) && \ + ((ip6addr)->addr[3] == (paddr)->addr[3]) && \ + ip6_addr_equals_zone((ip6addr), (zone_idx))) + +#define ip6_get_subnet_id(ip6addr) (lwip_htonl((ip6addr)->addr[2]) & 0x0000ffffUL) + +#define ip6_addr_isany_val(ip6addr) (((ip6addr).addr[0] == 0) && \ + ((ip6addr).addr[1] == 0) && \ + ((ip6addr).addr[2] == 0) && \ + ((ip6addr).addr[3] == 0)) +#define ip6_addr_isany(ip6addr) (((ip6addr) == NULL) || ip6_addr_isany_val(*(ip6addr))) + +#define ip6_addr_isloopback(ip6addr) (((ip6addr)->addr[0] == 0UL) && \ + ((ip6addr)->addr[1] == 0UL) && \ + ((ip6addr)->addr[2] == 0UL) && \ + ((ip6addr)->addr[3] == PP_HTONL(0x00000001UL))) + +#define ip6_addr_isglobal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xe0000000UL)) == PP_HTONL(0x20000000UL)) + +#define ip6_addr_islinklocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xffc00000UL)) == PP_HTONL(0xfe800000UL)) + +#define ip6_addr_issitelocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xffc00000UL)) == PP_HTONL(0xfec00000UL)) + +#define ip6_addr_isuniquelocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xfe000000UL)) == PP_HTONL(0xfc000000UL)) + +#define ip6_addr_isipv4mappedipv6(ip6addr) (((ip6addr)->addr[0] == 0) && ((ip6addr)->addr[1] == 0) && (((ip6addr)->addr[2]) == PP_HTONL(0x0000FFFFUL))) + +#define ip6_addr_ismulticast(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xff000000UL)) == PP_HTONL(0xff000000UL)) +#define ip6_addr_multicast_transient_flag(ip6addr) ((ip6addr)->addr[0] & PP_HTONL(0x00100000UL)) +#define ip6_addr_multicast_prefix_flag(ip6addr) ((ip6addr)->addr[0] & PP_HTONL(0x00200000UL)) +#define ip6_addr_multicast_rendezvous_flag(ip6addr) ((ip6addr)->addr[0] & PP_HTONL(0x00400000UL)) +#define ip6_addr_multicast_scope(ip6addr) ((lwip_htonl((ip6addr)->addr[0]) >> 16) & 0xf) +#define IP6_MULTICAST_SCOPE_RESERVED 0x0 +#define IP6_MULTICAST_SCOPE_RESERVED0 0x0 +#define IP6_MULTICAST_SCOPE_INTERFACE_LOCAL 0x1 +#define IP6_MULTICAST_SCOPE_LINK_LOCAL 0x2 +#define IP6_MULTICAST_SCOPE_RESERVED3 0x3 +#define IP6_MULTICAST_SCOPE_ADMIN_LOCAL 0x4 +#define IP6_MULTICAST_SCOPE_SITE_LOCAL 0x5 +#define IP6_MULTICAST_SCOPE_ORGANIZATION_LOCAL 0x8 +#define IP6_MULTICAST_SCOPE_GLOBAL 0xe +#define IP6_MULTICAST_SCOPE_RESERVEDF 0xf +#define ip6_addr_ismulticast_iflocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xff8f0000UL)) == PP_HTONL(0xff010000UL)) +#define ip6_addr_ismulticast_linklocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xff8f0000UL)) == PP_HTONL(0xff020000UL)) +#define ip6_addr_ismulticast_adminlocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xff8f0000UL)) == PP_HTONL(0xff040000UL)) +#define ip6_addr_ismulticast_sitelocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xff8f0000UL)) == PP_HTONL(0xff050000UL)) +#define ip6_addr_ismulticast_orglocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xff8f0000UL)) == PP_HTONL(0xff080000UL)) +#define ip6_addr_ismulticast_global(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xff8f0000UL)) == PP_HTONL(0xff0e0000UL)) + +/* Scoping note: while interface-local and link-local multicast addresses do + * have a scope (i.e., they are meaningful only in the context of a particular + * interface), the following functions are not assigning or comparing zone + * indices. The reason for this is backward compatibility. Any call site that + * produces a non-global multicast address must assign a multicast address as + * appropriate itself. */ + +#define ip6_addr_isallnodes_iflocal(ip6addr) (((ip6addr)->addr[0] == PP_HTONL(0xff010000UL)) && \ + ((ip6addr)->addr[1] == 0UL) && \ + ((ip6addr)->addr[2] == 0UL) && \ + ((ip6addr)->addr[3] == PP_HTONL(0x00000001UL))) + +#define ip6_addr_isallnodes_linklocal(ip6addr) (((ip6addr)->addr[0] == PP_HTONL(0xff020000UL)) && \ + ((ip6addr)->addr[1] == 0UL) && \ + ((ip6addr)->addr[2] == 0UL) && \ + ((ip6addr)->addr[3] == PP_HTONL(0x00000001UL))) +#define ip6_addr_set_allnodes_linklocal(ip6addr) do{(ip6addr)->addr[0] = PP_HTONL(0xff020000UL); \ + (ip6addr)->addr[1] = 0; \ + (ip6addr)->addr[2] = 0; \ + (ip6addr)->addr[3] = PP_HTONL(0x00000001UL); \ + ip6_addr_clear_zone(ip6addr); }while(0) + +#define ip6_addr_isallrouters_linklocal(ip6addr) (((ip6addr)->addr[0] == PP_HTONL(0xff020000UL)) && \ + ((ip6addr)->addr[1] == 0UL) && \ + ((ip6addr)->addr[2] == 0UL) && \ + ((ip6addr)->addr[3] == PP_HTONL(0x00000002UL))) +#define ip6_addr_set_allrouters_linklocal(ip6addr) do{(ip6addr)->addr[0] = PP_HTONL(0xff020000UL); \ + (ip6addr)->addr[1] = 0; \ + (ip6addr)->addr[2] = 0; \ + (ip6addr)->addr[3] = PP_HTONL(0x00000002UL); \ + ip6_addr_clear_zone(ip6addr); }while(0) + +#define ip6_addr_issolicitednode(ip6addr) ( ((ip6addr)->addr[0] == PP_HTONL(0xff020000UL)) && \ + ((ip6addr)->addr[2] == PP_HTONL(0x00000001UL)) && \ + (((ip6addr)->addr[3] & PP_HTONL(0xff000000UL)) == PP_HTONL(0xff000000UL)) ) + +#define ip6_addr_set_solicitednode(ip6addr, if_id) do{(ip6addr)->addr[0] = PP_HTONL(0xff020000UL); \ + (ip6addr)->addr[1] = 0; \ + (ip6addr)->addr[2] = PP_HTONL(0x00000001UL); \ + (ip6addr)->addr[3] = (PP_HTONL(0xff000000UL) | (if_id)); \ + ip6_addr_clear_zone(ip6addr); }while(0) + +#define ip6_addr_cmp_solicitednode(ip6addr, sn_addr) (((ip6addr)->addr[0] == PP_HTONL(0xff020000UL)) && \ + ((ip6addr)->addr[1] == 0) && \ + ((ip6addr)->addr[2] == PP_HTONL(0x00000001UL)) && \ + ((ip6addr)->addr[3] == (PP_HTONL(0xff000000UL) | (sn_addr)->addr[3]))) + +/* IPv6 address states. */ +#define IP6_ADDR_INVALID 0x00 +#define IP6_ADDR_TENTATIVE 0x08 +#define IP6_ADDR_TENTATIVE_1 0x09 /* 1 probe sent */ +#define IP6_ADDR_TENTATIVE_2 0x0a /* 2 probes sent */ +#define IP6_ADDR_TENTATIVE_3 0x0b /* 3 probes sent */ +#define IP6_ADDR_TENTATIVE_4 0x0c /* 4 probes sent */ +#define IP6_ADDR_TENTATIVE_5 0x0d /* 5 probes sent */ +#define IP6_ADDR_TENTATIVE_6 0x0e /* 6 probes sent */ +#define IP6_ADDR_TENTATIVE_7 0x0f /* 7 probes sent */ +#define IP6_ADDR_VALID 0x10 /* This bit marks an address as valid (preferred or deprecated) */ +#define IP6_ADDR_PREFERRED 0x30 +#define IP6_ADDR_DEPRECATED 0x10 /* Same as VALID (valid but not preferred) */ +#define IP6_ADDR_DUPLICATED 0x40 /* Failed DAD test, not valid */ + +#define IP6_ADDR_TENTATIVE_COUNT_MASK 0x07 /* 1-7 probes sent */ + +#define ip6_addr_isinvalid(addr_state) (addr_state == IP6_ADDR_INVALID) +#define ip6_addr_istentative(addr_state) (addr_state & IP6_ADDR_TENTATIVE) +#define ip6_addr_isvalid(addr_state) (addr_state & IP6_ADDR_VALID) /* Include valid, preferred, and deprecated. */ +#define ip6_addr_ispreferred(addr_state) (addr_state == IP6_ADDR_PREFERRED) +#define ip6_addr_isdeprecated(addr_state) (addr_state == IP6_ADDR_DEPRECATED) +#define ip6_addr_isduplicated(addr_state) (addr_state == IP6_ADDR_DUPLICATED) + +#if LWIP_IPV6_ADDRESS_LIFETIMES +#define IP6_ADDR_LIFE_STATIC (0) +#define IP6_ADDR_LIFE_INFINITE (0xffffffffUL) +#define ip6_addr_life_isstatic(addr_life) ((addr_life) == IP6_ADDR_LIFE_STATIC) +#define ip6_addr_life_isinfinite(addr_life) ((addr_life) == IP6_ADDR_LIFE_INFINITE) +#endif /* LWIP_IPV6_ADDRESS_LIFETIMES */ + +#define ip6_addr_debug_print_parts(debug, a, b, c, d, e, f, g, h) \ + LWIP_DEBUGF(debug, ("%" X16_F ":%" X16_F ":%" X16_F ":%" X16_F ":%" X16_F ":%" X16_F ":%" X16_F ":%" X16_F, \ + a, b, c, d, e, f, g, h)) +#define ip6_addr_debug_print(debug, ipaddr) \ + ip6_addr_debug_print_parts(debug, \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK1(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK2(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK3(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK4(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK5(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK6(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK7(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK8(ipaddr) : 0)) +#define ip6_addr_debug_print_val(debug, ipaddr) \ + ip6_addr_debug_print_parts(debug, \ + IP6_ADDR_BLOCK1(&(ipaddr)), \ + IP6_ADDR_BLOCK2(&(ipaddr)), \ + IP6_ADDR_BLOCK3(&(ipaddr)), \ + IP6_ADDR_BLOCK4(&(ipaddr)), \ + IP6_ADDR_BLOCK5(&(ipaddr)), \ + IP6_ADDR_BLOCK6(&(ipaddr)), \ + IP6_ADDR_BLOCK7(&(ipaddr)), \ + IP6_ADDR_BLOCK8(&(ipaddr))) + +#define IP6ADDR_STRLEN_MAX 46 + +int ip6addr_aton(const char *cp, ip6_addr_t *addr); +/** returns ptr to static buffer; not reentrant! */ +char *ip6addr_ntoa(const ip6_addr_t *addr); +char *ip6addr_ntoa_r(const ip6_addr_t *addr, char *buf, int buflen); + + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6 */ + +#endif /* LWIP_HDR_IP6_ADDR_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_frag.h b/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_frag.h new file mode 100644 index 0000000..87e0e86 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_frag.h @@ -0,0 +1,144 @@ +/** + * @file + * + * IPv6 fragmentation and reassembly. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ +#ifndef LWIP_HDR_IP6_FRAG_H +#define LWIP_HDR_IP6_FRAG_H + +#include "lwip/opt.h" +#include "lwip/pbuf.h" +#include "lwip/ip6_addr.h" +#include "lwip/ip6.h" +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + + +#if LWIP_IPV6 && LWIP_IPV6_REASS /* don't build if not configured for use in lwipopts.h */ + +/** The IPv6 reassembly timer interval in milliseconds. */ +#define IP6_REASS_TMR_INTERVAL 1000 + +/** IP6_FRAG_COPYHEADER==1: for platforms where sizeof(void*) > 4, "struct + * ip6_reass_helper" is too large to be stored in the IPv6 fragment header, and + * will bleed into the header before it, which may be the IPv6 header or an + * extension header. This means that for each first fragment packet, we need to + * 1) make a copy of some IPv6 header fields (src+dest) that we need later on, + * just in case we do overwrite part of the IPv6 header, and 2) make a copy of + * the header data that we overwrote, so that we can restore it before either + * completing reassembly or sending an ICMPv6 reply. The last part is true even + * if this setting is disabled, but if it is enabled, we need to save a bit + * more data (up to the size of a pointer) because we overwrite more. */ +#ifndef IPV6_FRAG_COPYHEADER +#define IPV6_FRAG_COPYHEADER 0 +#endif + +/* With IPV6_FRAG_COPYHEADER==1, a helper structure may (or, depending on the + * presence of extensions, may not) overwrite part of the IP header. Therefore, + * we copy the fields that we need from the IP header for as long as the helper + * structure may still be in place. This is easier than temporarily restoring + * those fields in the IP header each time we need to perform checks on them. */ +#if IPV6_FRAG_COPYHEADER +#define IPV6_FRAG_SRC(ipr) ((ipr)->src) +#define IPV6_FRAG_DEST(ipr) ((ipr)->dest) +#else /* IPV6_FRAG_COPYHEADER */ +#define IPV6_FRAG_SRC(ipr) ((ipr)->iphdr->src) +#define IPV6_FRAG_DEST(ipr) ((ipr)->iphdr->dest) +#endif /* IPV6_FRAG_COPYHEADER */ + +/** IPv6 reassembly helper struct. + * This is exported because memp needs to know the size. + */ +struct ip6_reassdata { + struct ip6_reassdata *next; + struct pbuf *p; + struct ip6_hdr *iphdr; /* pointer to the first (original) IPv6 header */ +#if IPV6_FRAG_COPYHEADER + ip6_addr_p_t src; /* copy of the source address in the IP header */ + ip6_addr_p_t dest; /* copy of the destination address in the IP header */ + /* This buffer (for the part of the original header that we overwrite) will + * be slightly oversized, but we cannot compute the exact size from here. */ + u8_t orig_hdr[sizeof(struct ip6_frag_hdr) + sizeof(void*)]; +#else /* IPV6_FRAG_COPYHEADER */ + /* In this case we still need the buffer, for sending ICMPv6 replies. */ + u8_t orig_hdr[sizeof(struct ip6_frag_hdr)]; +#endif /* IPV6_FRAG_COPYHEADER */ + u32_t identification; + u16_t datagram_len; + u8_t nexth; + u8_t timer; +#if LWIP_IPV6_SCOPES + u8_t src_zone; /* zone of original packet's source address */ + u8_t dest_zone; /* zone of original packet's destination address */ +#endif /* LWIP_IPV6_SCOPES */ +}; + +#define ip6_reass_init() /* Compatibility define */ +void ip6_reass_tmr(void); +struct pbuf *ip6_reass(struct pbuf *p); + +#endif /* LWIP_IPV6 && LWIP_IPV6_REASS */ + +#if LWIP_IPV6 && LWIP_IPV6_FRAG /* don't build if not configured for use in lwipopts.h */ + +#ifndef LWIP_PBUF_CUSTOM_REF_DEFINED +#define LWIP_PBUF_CUSTOM_REF_DEFINED +/** A custom pbuf that holds a reference to another pbuf, which is freed + * when this custom pbuf is freed. This is used to create a custom PBUF_REF + * that points into the original pbuf. */ +struct pbuf_custom_ref { + /** 'base class' */ + struct pbuf_custom pc; + /** pointer to the original pbuf that is referenced */ + struct pbuf *original; +}; +#endif /* LWIP_PBUF_CUSTOM_REF_DEFINED */ + +err_t ip6_frag(struct pbuf *p, struct netif *netif, const ip6_addr_t *dest); + +#endif /* LWIP_IPV6 && LWIP_IPV6_FRAG */ + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_IP6_FRAG_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_zone.h b/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_zone.h new file mode 100644 index 0000000..525790e --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_zone.h @@ -0,0 +1,304 @@ +/** + * @file + * + * IPv6 address scopes, zones, and scoping policy. + * + * This header provides the means to implement support for IPv6 address scopes, + * as per RFC 4007. An address scope can be either global or more constrained. + * In lwIP, we say that an address "has a scope" or "is scoped" when its scope + * is constrained, in which case the address is meaningful only in a specific + * "zone." For unicast addresses, only link-local addresses have a scope; in + * that case, the scope is the link. For multicast addresses, there are various + * scopes defined by RFC 4007 and others. For any constrained scope, a system + * must establish a (potentially one-to-many) mapping between zones and local + * interfaces. For example, a link-local address is valid on only one link (its + * zone). That link may be attached to one or more local interfaces. The + * decisions on which scopes are constrained and the mapping between zones and + * interfaces is together what we refer to as the "scoping policy" - more on + * this in a bit. + * + * In lwIP, each IPv6 address has an associated zone index. This zone index may + * be set to "no zone" (IP6_NO_ZONE, 0) or an actual zone. We say that an + * address "has a zone" or "is zoned" when its zone index is *not* set to "no + * zone." In lwIP, in principle, each address should be "properly zoned," which + * means that if the address has a zone if and only if has a scope. As such, it + * is a rule that an unscoped (e.g., global) address must never have a zone. + * Even though one could argue that there is always one zone even for global + * scopes, this rule exists for implementation simplicity. Violation of the + * rule will trigger assertions or otherwise result in undesired behavior. + * + * Backward compatibility prevents us from requiring that applications always + * provide properly zoned addresses. We do enforce the rule that the in the + * lwIP link layer (everything below netif->output_ip6() and in particular ND6) + * *all* addresses are properly zoned. Thus, on the output paths down the + * stack, various places deal with the case of addresses that lack a zone. + * Some of them are best-effort for efficiency (e.g. the PCB bind and connect + * API calls' attempts to add missing zones); ultimately the IPv6 output + * handler (@ref ip6_output_if_src) will set a zone if necessary. + * + * Aside from dealing with scoped addresses lacking a zone, a proper IPv6 + * implementation must also ensure that a packet with a scoped source and/or + * destination address does not leave its zone. This is currently implemented + * in the input and forward functions. However, for output, these checks are + * deliberately omitted in order to keep the implementation lightweight. The + * routing algorithm in @ref ip6_route will take decisions such that it will + * not cause zone violations unless the application sets bad addresses, though. + * + * In terms of scoping policy, lwIP implements the default policy from RFC 4007 + * using macros in this file. This policy considers link-local unicast + * addresses and (only) interface-local and link-local multicast addresses as + * having a scope. For all these addresses, the zone is equal to the interface. + * As shown below in this file, it is possible to implement a custom policy. + */ + +/* + * Copyright (c) 2017 The MINIX 3 Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: David van Moolenbroek + * + */ +#ifndef LWIP_HDR_IP6_ZONE_H +#define LWIP_HDR_IP6_ZONE_H + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup ip6_zones IPv6 Zones + * @ingroup ip6 + * @{ + */ + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +/** Identifier for "no zone". */ +#define IP6_NO_ZONE 0 + +#if LWIP_IPV6_SCOPES + +/** Zone initializer for static IPv6 address initialization, including comma. */ +#define IPADDR6_ZONE_INIT , IP6_NO_ZONE + +/** Return the zone index of the given IPv6 address; possibly "no zone". */ +#define ip6_addr_zone(ip6addr) ((ip6addr)->zone) + +/** Does the given IPv6 address have a zone set? (0/1) */ +#define ip6_addr_has_zone(ip6addr) (ip6_addr_zone(ip6addr) != IP6_NO_ZONE) + +/** Set the zone field of an IPv6 address to a particular value. */ +#define ip6_addr_set_zone(ip6addr, zone_idx) ((ip6addr)->zone = (zone_idx)) + +/** Clear the zone field of an IPv6 address, setting it to "no zone". */ +#define ip6_addr_clear_zone(ip6addr) ((ip6addr)->zone = IP6_NO_ZONE) + +/** Copy the zone field from the second IPv6 address to the first one. */ +#define ip6_addr_copy_zone(ip6addr1, ip6addr2) ((ip6addr1).zone = (ip6addr2).zone) + +/** Is the zone field of the given IPv6 address equal to the given zone index? (0/1) */ +#define ip6_addr_equals_zone(ip6addr, zone_idx) ((ip6addr)->zone == (zone_idx)) + +/** Are the zone fields of the given IPv6 addresses equal? (0/1) + * This macro must only be used on IPv6 addresses of the same scope. */ +#define ip6_addr_cmp_zone(ip6addr1, ip6addr2) ((ip6addr1)->zone == (ip6addr2)->zone) + +/** Symbolic constants for the 'type' parameters in some of the macros. + * These exist for efficiency only, allowing the macros to avoid certain tests + * when the address is known not to be of a certain type. Dead code elimination + * will do the rest. IP6_MULTICAST is supported but currently not optimized. + * @see ip6_addr_has_scope, ip6_addr_assign_zone, ip6_addr_lacks_zone. + */ +enum lwip_ipv6_scope_type +{ + /** Unknown */ + IP6_UNKNOWN = 0, + /** Unicast */ + IP6_UNICAST = 1, + /** Multicast */ + IP6_MULTICAST = 2 +}; + +/** IPV6_CUSTOM_SCOPES: together, the following three macro definitions, + * @ref ip6_addr_has_scope, @ref ip6_addr_assign_zone, and + * @ref ip6_addr_test_zone, completely define the lwIP scoping policy. + * The definitions below implement the default policy from RFC 4007 Sec. 6. + * Should an implementation desire to implement a different policy, it can + * define IPV6_CUSTOM_SCOPES to 1 and supply its own definitions for the three + * macros instead. + */ +#ifndef IPV6_CUSTOM_SCOPES +#define IPV6_CUSTOM_SCOPES 0 +#endif /* !IPV6_CUSTOM_SCOPES */ + +#if !IPV6_CUSTOM_SCOPES + +/** + * Determine whether an IPv6 address has a constrained scope, and as such is + * meaningful only if accompanied by a zone index to identify the scope's zone. + * The given address type may be used to eliminate at compile time certain + * checks that will evaluate to false at run time anyway. + * + * This default implementation follows the default model of RFC 4007, where + * only interface-local and link-local scopes are defined. + * + * Even though the unicast loopback address does have an implied link-local + * scope, in this implementation it does not have an explicitly assigned zone + * index. As such it should not be tested for in this macro. + * + * @param ip6addr the IPv6 address (const); only its address part is examined. + * @param type address type; see @ref lwip_ipv6_scope_type. + * @return 1 if the address has a constrained scope, 0 if it does not. + */ +#define ip6_addr_has_scope(ip6addr, type) \ + (ip6_addr_islinklocal(ip6addr) || (((type) != IP6_UNICAST) && \ + (ip6_addr_ismulticast_iflocal(ip6addr) || \ + ip6_addr_ismulticast_linklocal(ip6addr)))) + +/** + * Assign a zone index to an IPv6 address, based on a network interface. If the + * given address has a scope, the assigned zone index is that scope's zone of + * the given netif; otherwise, the assigned zone index is "no zone". + * + * This default implementation follows the default model of RFC 4007, where + * only interface-local and link-local scopes are defined, and the zone index + * of both of those scopes always equals the index of the network interface. + * As such, this default implementation need not distinguish between different + * constrained scopes when assigning the zone. + * + * @param ip6addr the IPv6 address; its address part is examined, and its zone + * index is assigned. + * @param type address type; see @ref lwip_ipv6_scope_type. + * @param netif the network interface (const). + */ +#define ip6_addr_assign_zone(ip6addr, type, netif) \ + (ip6_addr_set_zone((ip6addr), \ + ip6_addr_has_scope((ip6addr), (type)) ? netif_get_index(netif) : 0)) + +/** + * Test whether an IPv6 address is "zone-compatible" with a network interface. + * That is, test whether the network interface is part of the zone associated + * with the address. For efficiency, this macro is only ever called if the + * given address is either scoped or zoned, and thus, it need not test this. + * If an address is scoped but not zoned, or zoned and not scoped, it is + * considered not zone-compatible with any netif. + * + * This default implementation follows the default model of RFC 4007, where + * only interface-local and link-local scopes are defined, and the zone index + * of both of those scopes always equals the index of the network interface. + * As such, there is always only one matching netif for a specific zone index, + * but all call sites of this macro currently support multiple matching netifs + * as well (at no additional expense in the common case). + * + * @param ip6addr the IPv6 address (const). + * @param netif the network interface (const). + * @return 1 if the address is scope-compatible with the netif, 0 if not. + */ +#define ip6_addr_test_zone(ip6addr, netif) \ + (ip6_addr_equals_zone((ip6addr), netif_get_index(netif))) + +#endif /* !IPV6_CUSTOM_SCOPES */ + +/** Does the given IPv6 address have a scope, and as such should also have a + * zone to be meaningful, but does not actually have a zone? (0/1) */ +#define ip6_addr_lacks_zone(ip6addr, type) \ + (!ip6_addr_has_zone(ip6addr) && ip6_addr_has_scope((ip6addr), (type))) + +/** + * Try to select a zone for a scoped address that does not yet have a zone. + * Called from PCB bind and connect routines, for two reasons: 1) to save on + * this (relatively expensive) selection for every individual packet route + * operation and 2) to allow the application to obtain the selected zone from + * the PCB as is customary for e.g. getsockname/getpeername BSD socket calls. + * + * Ideally, callers would always supply a properly zoned address, in which case + * this function would not be needed. It exists both for compatibility with the + * BSD socket API (which accepts zoneless destination addresses) and for + * backward compatibility with pre-scoping lwIP code. + * + * It may be impossible to select a zone, e.g. if there are no netifs. In that + * case, the address's zone field will be left as is. + * + * @param dest the IPv6 address for which to select and set a zone. + * @param src source IPv6 address (const); may be equal to dest. + */ +#define ip6_addr_select_zone(dest, src) do { struct netif *selected_netif; \ + selected_netif = ip6_route((src), (dest)); \ + if (selected_netif != NULL) { \ + ip6_addr_assign_zone((dest), IP6_UNKNOWN, selected_netif); \ + } } while (0) + +/** + * @} + */ + +#else /* LWIP_IPV6_SCOPES */ + +#define IPADDR6_ZONE_INIT +#define ip6_addr_zone(ip6addr) (IP6_NO_ZONE) +#define ip6_addr_has_zone(ip6addr) (0) +#define ip6_addr_set_zone(ip6addr, zone_idx) +#define ip6_addr_clear_zone(ip6addr) +#define ip6_addr_copy_zone(ip6addr1, ip6addr2) +#define ip6_addr_equals_zone(ip6addr, zone_idx) (1) +#define ip6_addr_cmp_zone(ip6addr1, ip6addr2) (1) +#define IPV6_CUSTOM_SCOPES 0 +#define ip6_addr_has_scope(ip6addr, type) (0) +#define ip6_addr_assign_zone(ip6addr, type, netif) +#define ip6_addr_test_zone(ip6addr, netif) (1) +#define ip6_addr_lacks_zone(ip6addr, type) (0) +#define ip6_addr_select_zone(ip6addr, src) + +#endif /* LWIP_IPV6_SCOPES */ + +#if LWIP_IPV6_SCOPES && LWIP_IPV6_SCOPES_DEBUG + +/** Verify that the given IPv6 address is properly zoned. */ +#define IP6_ADDR_ZONECHECK(ip6addr) LWIP_ASSERT("IPv6 zone check failed", \ + ip6_addr_has_scope(ip6addr, IP6_UNKNOWN) == ip6_addr_has_zone(ip6addr)) + +/** Verify that the given IPv6 address is properly zoned for the given netif. */ +#define IP6_ADDR_ZONECHECK_NETIF(ip6addr, netif) LWIP_ASSERT("IPv6 netif zone check failed", \ + ip6_addr_has_scope(ip6addr, IP6_UNKNOWN) ? \ + (ip6_addr_has_zone(ip6addr) && \ + (((netif) == NULL) || ip6_addr_test_zone((ip6addr), (netif)))) : \ + !ip6_addr_has_zone(ip6addr)) + +#else /* LWIP_IPV6_SCOPES && LWIP_IPV6_SCOPES_DEBUG */ + +#define IP6_ADDR_ZONECHECK(ip6addr) +#define IP6_ADDR_ZONECHECK_NETIF(ip6addr, netif) + +#endif /* LWIP_IPV6_SCOPES && LWIP_IPV6_SCOPES_DEBUG */ + +#endif /* LWIP_IPV6 */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_IP6_ZONE_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/ip_addr.h b/Middlewares/Third_Party/LwIP/src/include/lwip/ip_addr.h new file mode 100644 index 0000000..2f97770 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/ip_addr.h @@ -0,0 +1,438 @@ +/** + * @file + * IP address API (common IPv4 and IPv6) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_IP_ADDR_H +#define LWIP_HDR_IP_ADDR_H + +#include "lwip/opt.h" +#include "lwip/def.h" + +#include "lwip/ip4_addr.h" +#include "lwip/ip6_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** @ingroup ipaddr + * IP address types for use in ip_addr_t.type member. + * @see tcp_new_ip_type(), udp_new_ip_type(), raw_new_ip_type(). + */ +enum lwip_ip_addr_type { + /** IPv4 */ + IPADDR_TYPE_V4 = 0U, + /** IPv6 */ + IPADDR_TYPE_V6 = 6U, + /** IPv4+IPv6 ("dual-stack") */ + IPADDR_TYPE_ANY = 46U +}; + +#if LWIP_IPV4 && LWIP_IPV6 +/** + * @ingroup ipaddr + * A union struct for both IP version's addresses. + * ATTENTION: watch out for its size when adding IPv6 address scope! + */ +typedef struct ip_addr { + union { + ip6_addr_t ip6; + ip4_addr_t ip4; + } u_addr; + /** @ref lwip_ip_addr_type */ + u8_t type; +} ip_addr_t; + +extern const ip_addr_t ip_addr_any_type; + +/** @ingroup ip4addr */ +#define IPADDR4_INIT(u32val) { { { { u32val, 0ul, 0ul, 0ul } IPADDR6_ZONE_INIT } }, IPADDR_TYPE_V4 } +/** @ingroup ip4addr */ +#define IPADDR4_INIT_BYTES(a,b,c,d) IPADDR4_INIT(PP_HTONL(LWIP_MAKEU32(a,b,c,d))) + +/** @ingroup ip6addr */ +#define IPADDR6_INIT(a, b, c, d) { { { { a, b, c, d } IPADDR6_ZONE_INIT } }, IPADDR_TYPE_V6 } +/** @ingroup ip6addr */ +#define IPADDR6_INIT_HOST(a, b, c, d) { { { { PP_HTONL(a), PP_HTONL(b), PP_HTONL(c), PP_HTONL(d) } IPADDR6_ZONE_INIT } }, IPADDR_TYPE_V6 } + +/** @ingroup ipaddr */ +#define IP_IS_ANY_TYPE_VAL(ipaddr) (IP_GET_TYPE(&ipaddr) == IPADDR_TYPE_ANY) +/** @ingroup ipaddr */ +#define IPADDR_ANY_TYPE_INIT { { { { 0ul, 0ul, 0ul, 0ul } IPADDR6_ZONE_INIT } }, IPADDR_TYPE_ANY } + +/** @ingroup ip4addr */ +#define IP_IS_V4_VAL(ipaddr) (IP_GET_TYPE(&ipaddr) == IPADDR_TYPE_V4) +/** @ingroup ip6addr */ +#define IP_IS_V6_VAL(ipaddr) (IP_GET_TYPE(&ipaddr) == IPADDR_TYPE_V6) +/** @ingroup ip4addr */ +#define IP_IS_V4(ipaddr) (((ipaddr) == NULL) || IP_IS_V4_VAL(*(ipaddr))) +/** @ingroup ip6addr */ +#define IP_IS_V6(ipaddr) (((ipaddr) != NULL) && IP_IS_V6_VAL(*(ipaddr))) + +#define IP_SET_TYPE_VAL(ipaddr, iptype) do { (ipaddr).type = (iptype); }while(0) +#define IP_SET_TYPE(ipaddr, iptype) do { if((ipaddr) != NULL) { IP_SET_TYPE_VAL(*(ipaddr), iptype); }}while(0) +#define IP_GET_TYPE(ipaddr) ((ipaddr)->type) + +#define IP_ADDR_RAW_SIZE(ipaddr) (IP_GET_TYPE(&ipaddr) == IPADDR_TYPE_V4 ? sizeof(ip4_addr_t) : sizeof(ip6_addr_t)) + +#define IP_ADDR_PCB_VERSION_MATCH_EXACT(pcb, ipaddr) (IP_GET_TYPE(&pcb->local_ip) == IP_GET_TYPE(ipaddr)) +#define IP_ADDR_PCB_VERSION_MATCH(pcb, ipaddr) (IP_IS_ANY_TYPE_VAL(pcb->local_ip) || IP_ADDR_PCB_VERSION_MATCH_EXACT(pcb, ipaddr)) + +/** @ingroup ip6addr + * Convert generic ip address to specific protocol version + */ +#define ip_2_ip6(ipaddr) (&((ipaddr)->u_addr.ip6)) +/** @ingroup ip4addr + * Convert generic ip address to specific protocol version + */ +#define ip_2_ip4(ipaddr) (&((ipaddr)->u_addr.ip4)) + +/** @ingroup ip4addr */ +#define IP_ADDR4(ipaddr,a,b,c,d) do { IP4_ADDR(ip_2_ip4(ipaddr),a,b,c,d); \ + IP_SET_TYPE_VAL(*(ipaddr), IPADDR_TYPE_V4); } while(0) +/** @ingroup ip6addr */ +#define IP_ADDR6(ipaddr,i0,i1,i2,i3) do { IP6_ADDR(ip_2_ip6(ipaddr),i0,i1,i2,i3); \ + IP_SET_TYPE_VAL(*(ipaddr), IPADDR_TYPE_V6); } while(0) +/** @ingroup ip6addr */ +#define IP_ADDR6_HOST(ipaddr,i0,i1,i2,i3) IP_ADDR6(ipaddr,PP_HTONL(i0),PP_HTONL(i1),PP_HTONL(i2),PP_HTONL(i3)) + +#define ip_clear_no4(ipaddr) do { ip_2_ip6(ipaddr)->addr[1] = \ + ip_2_ip6(ipaddr)->addr[2] = \ + ip_2_ip6(ipaddr)->addr[3] = 0; \ + ip6_addr_clear_zone(ip_2_ip6(ipaddr)); }while(0) + +/** @ingroup ipaddr */ +#define ip_addr_copy(dest, src) do{ IP_SET_TYPE_VAL(dest, IP_GET_TYPE(&src)); if(IP_IS_V6_VAL(src)){ \ + ip6_addr_copy(*ip_2_ip6(&(dest)), *ip_2_ip6(&(src))); }else{ \ + ip4_addr_copy(*ip_2_ip4(&(dest)), *ip_2_ip4(&(src))); ip_clear_no4(&dest); }}while(0) +/** @ingroup ip6addr */ +#define ip_addr_copy_from_ip6(dest, src) do{ \ + ip6_addr_copy(*ip_2_ip6(&(dest)), src); IP_SET_TYPE_VAL(dest, IPADDR_TYPE_V6); }while(0) +/** @ingroup ip6addr */ +#define ip_addr_copy_from_ip6_packed(dest, src) do{ \ + ip6_addr_copy_from_packed(*ip_2_ip6(&(dest)), src); IP_SET_TYPE_VAL(dest, IPADDR_TYPE_V6); }while(0) +/** @ingroup ip4addr */ +#define ip_addr_copy_from_ip4(dest, src) do{ \ + ip4_addr_copy(*ip_2_ip4(&(dest)), src); IP_SET_TYPE_VAL(dest, IPADDR_TYPE_V4); ip_clear_no4(&dest); }while(0) +/** @ingroup ip4addr */ +#define ip_addr_set_ip4_u32(ipaddr, val) do{if(ipaddr){ip4_addr_set_u32(ip_2_ip4(ipaddr), val); \ + IP_SET_TYPE(ipaddr, IPADDR_TYPE_V4); ip_clear_no4(ipaddr); }}while(0) +/** @ingroup ip4addr */ +#define ip_addr_set_ip4_u32_val(ipaddr, val) do{ ip4_addr_set_u32(ip_2_ip4(&(ipaddr)), val); \ + IP_SET_TYPE_VAL(ipaddr, IPADDR_TYPE_V4); ip_clear_no4(&ipaddr); }while(0) +/** @ingroup ip4addr */ +#define ip_addr_get_ip4_u32(ipaddr) (((ipaddr) && IP_IS_V4(ipaddr)) ? \ + ip4_addr_get_u32(ip_2_ip4(ipaddr)) : 0) +/** @ingroup ipaddr */ +#define ip_addr_set(dest, src) do{ IP_SET_TYPE(dest, IP_GET_TYPE(src)); if(IP_IS_V6(src)){ \ + ip6_addr_set(ip_2_ip6(dest), ip_2_ip6(src)); }else{ \ + ip4_addr_set(ip_2_ip4(dest), ip_2_ip4(src)); ip_clear_no4(dest); }}while(0) +/** @ingroup ipaddr */ +#define ip_addr_set_ipaddr(dest, src) ip_addr_set(dest, src) +/** @ingroup ipaddr */ +#define ip_addr_set_zero(ipaddr) do{ \ + ip6_addr_set_zero(ip_2_ip6(ipaddr)); IP_SET_TYPE(ipaddr, 0); }while(0) +/** @ingroup ip5addr */ +#define ip_addr_set_zero_ip4(ipaddr) do{ \ + ip6_addr_set_zero(ip_2_ip6(ipaddr)); IP_SET_TYPE(ipaddr, IPADDR_TYPE_V4); }while(0) +/** @ingroup ip6addr */ +#define ip_addr_set_zero_ip6(ipaddr) do{ \ + ip6_addr_set_zero(ip_2_ip6(ipaddr)); IP_SET_TYPE(ipaddr, IPADDR_TYPE_V6); }while(0) +/** @ingroup ipaddr */ +#define ip_addr_set_any(is_ipv6, ipaddr) do{if(is_ipv6){ \ + ip6_addr_set_any(ip_2_ip6(ipaddr)); IP_SET_TYPE(ipaddr, IPADDR_TYPE_V6); }else{ \ + ip4_addr_set_any(ip_2_ip4(ipaddr)); IP_SET_TYPE(ipaddr, IPADDR_TYPE_V4); ip_clear_no4(ipaddr); }}while(0) +/** @ingroup ipaddr */ +#define ip_addr_set_any_val(is_ipv6, ipaddr) do{if(is_ipv6){ \ + ip6_addr_set_any(ip_2_ip6(&(ipaddr))); IP_SET_TYPE_VAL(ipaddr, IPADDR_TYPE_V6); }else{ \ + ip4_addr_set_any(ip_2_ip4(&(ipaddr))); IP_SET_TYPE_VAL(ipaddr, IPADDR_TYPE_V4); ip_clear_no4(&ipaddr); }}while(0) +/** @ingroup ipaddr */ +#define ip_addr_set_loopback(is_ipv6, ipaddr) do{if(is_ipv6){ \ + ip6_addr_set_loopback(ip_2_ip6(ipaddr)); IP_SET_TYPE(ipaddr, IPADDR_TYPE_V6); }else{ \ + ip4_addr_set_loopback(ip_2_ip4(ipaddr)); IP_SET_TYPE(ipaddr, IPADDR_TYPE_V4); ip_clear_no4(ipaddr); }}while(0) +/** @ingroup ipaddr */ +#define ip_addr_set_loopback_val(is_ipv6, ipaddr) do{if(is_ipv6){ \ + ip6_addr_set_loopback(ip_2_ip6(&(ipaddr))); IP_SET_TYPE_VAL(ipaddr, IPADDR_TYPE_V6); }else{ \ + ip4_addr_set_loopback(ip_2_ip4(&(ipaddr))); IP_SET_TYPE_VAL(ipaddr, IPADDR_TYPE_V4); ip_clear_no4(&ipaddr); }}while(0) +/** @ingroup ipaddr */ +#define ip_addr_set_hton(dest, src) do{if(IP_IS_V6(src)){ \ + ip6_addr_set_hton(ip_2_ip6(dest), ip_2_ip6(src)); IP_SET_TYPE(dest, IPADDR_TYPE_V6); }else{ \ + ip4_addr_set_hton(ip_2_ip4(dest), ip_2_ip4(src)); IP_SET_TYPE(dest, IPADDR_TYPE_V4); ip_clear_no4(ipaddr); }}while(0) +/** @ingroup ipaddr */ +#define ip_addr_get_network(target, host, netmask) do{if(IP_IS_V6(host)){ \ + ip4_addr_set_zero(ip_2_ip4(target)); IP_SET_TYPE(target, IPADDR_TYPE_V6); } else { \ + ip4_addr_get_network(ip_2_ip4(target), ip_2_ip4(host), ip_2_ip4(netmask)); IP_SET_TYPE(target, IPADDR_TYPE_V4); }}while(0) +/** @ingroup ipaddr */ +#define ip_addr_netcmp(addr1, addr2, mask) ((IP_IS_V6(addr1) && IP_IS_V6(addr2)) ? \ + 0 : \ + ip4_addr_netcmp(ip_2_ip4(addr1), ip_2_ip4(addr2), mask)) +/** @ingroup ipaddr */ +#define ip_addr_cmp(addr1, addr2) ((IP_GET_TYPE(addr1) != IP_GET_TYPE(addr2)) ? 0 : (IP_IS_V6_VAL(*(addr1)) ? \ + ip6_addr_cmp(ip_2_ip6(addr1), ip_2_ip6(addr2)) : \ + ip4_addr_cmp(ip_2_ip4(addr1), ip_2_ip4(addr2)))) +/** @ingroup ipaddr */ +#define ip_addr_cmp_zoneless(addr1, addr2) ((IP_GET_TYPE(addr1) != IP_GET_TYPE(addr2)) ? 0 : (IP_IS_V6_VAL(*(addr1)) ? \ + ip6_addr_cmp_zoneless(ip_2_ip6(addr1), ip_2_ip6(addr2)) : \ + ip4_addr_cmp(ip_2_ip4(addr1), ip_2_ip4(addr2)))) +/** @ingroup ipaddr */ +#define ip_addr_isany(ipaddr) (((ipaddr) == NULL) ? 1 : ((IP_IS_V6(ipaddr)) ? \ + ip6_addr_isany(ip_2_ip6(ipaddr)) : \ + ip4_addr_isany(ip_2_ip4(ipaddr)))) +/** @ingroup ipaddr */ +#define ip_addr_isany_val(ipaddr) ((IP_IS_V6_VAL(ipaddr)) ? \ + ip6_addr_isany_val(*ip_2_ip6(&(ipaddr))) : \ + ip4_addr_isany_val(*ip_2_ip4(&(ipaddr)))) +/** @ingroup ipaddr */ +#define ip_addr_isbroadcast(ipaddr, netif) ((IP_IS_V6(ipaddr)) ? \ + 0 : \ + ip4_addr_isbroadcast(ip_2_ip4(ipaddr), netif)) +/** @ingroup ipaddr */ +#define ip_addr_ismulticast(ipaddr) ((IP_IS_V6(ipaddr)) ? \ + ip6_addr_ismulticast(ip_2_ip6(ipaddr)) : \ + ip4_addr_ismulticast(ip_2_ip4(ipaddr))) +/** @ingroup ipaddr */ +#define ip_addr_isloopback(ipaddr) ((IP_IS_V6(ipaddr)) ? \ + ip6_addr_isloopback(ip_2_ip6(ipaddr)) : \ + ip4_addr_isloopback(ip_2_ip4(ipaddr))) +/** @ingroup ipaddr */ +#define ip_addr_islinklocal(ipaddr) ((IP_IS_V6(ipaddr)) ? \ + ip6_addr_islinklocal(ip_2_ip6(ipaddr)) : \ + ip4_addr_islinklocal(ip_2_ip4(ipaddr))) +#define ip_addr_debug_print(debug, ipaddr) do { if(IP_IS_V6(ipaddr)) { \ + ip6_addr_debug_print(debug, ip_2_ip6(ipaddr)); } else { \ + ip4_addr_debug_print(debug, ip_2_ip4(ipaddr)); }}while(0) +#define ip_addr_debug_print_val(debug, ipaddr) do { if(IP_IS_V6_VAL(ipaddr)) { \ + ip6_addr_debug_print_val(debug, *ip_2_ip6(&(ipaddr))); } else { \ + ip4_addr_debug_print_val(debug, *ip_2_ip4(&(ipaddr))); }}while(0) +char *ipaddr_ntoa(const ip_addr_t *addr); +char *ipaddr_ntoa_r(const ip_addr_t *addr, char *buf, int buflen); +int ipaddr_aton(const char *cp, ip_addr_t *addr); + +/** @ingroup ipaddr */ +#define IPADDR_STRLEN_MAX IP6ADDR_STRLEN_MAX + +/** @ingroup ipaddr */ +#define ip4_2_ipv4_mapped_ipv6(ip6addr, ip4addr) do { \ + (ip6addr)->addr[3] = (ip4addr)->addr; \ + (ip6addr)->addr[2] = PP_HTONL(0x0000FFFFUL); \ + (ip6addr)->addr[1] = 0; \ + (ip6addr)->addr[0] = 0; \ + ip6_addr_clear_zone(ip6addr); } while(0); + +/** @ingroup ipaddr */ +#define unmap_ipv4_mapped_ipv6(ip4addr, ip6addr) \ + (ip4addr)->addr = (ip6addr)->addr[3]; + +#define IP46_ADDR_ANY(type) (((type) == IPADDR_TYPE_V6)? IP6_ADDR_ANY : IP4_ADDR_ANY) + +#else /* LWIP_IPV4 && LWIP_IPV6 */ + +#define IP_ADDR_PCB_VERSION_MATCH(addr, pcb) 1 +#define IP_ADDR_PCB_VERSION_MATCH_EXACT(pcb, ipaddr) 1 + +#define ip_addr_set_any_val(is_ipv6, ipaddr) ip_addr_set_any(is_ipv6, &(ipaddr)) +#define ip_addr_set_loopback_val(is_ipv6, ipaddr) ip_addr_set_loopback(is_ipv6, &(ipaddr)) + +#if LWIP_IPV4 + +typedef ip4_addr_t ip_addr_t; +#define IPADDR4_INIT(u32val) { u32val } +#define IPADDR4_INIT_BYTES(a,b,c,d) IPADDR4_INIT(PP_HTONL(LWIP_MAKEU32(a,b,c,d))) +#define IP_IS_V4_VAL(ipaddr) 1 +#define IP_IS_V6_VAL(ipaddr) 0 +#define IP_IS_V4(ipaddr) 1 +#define IP_IS_V6(ipaddr) 0 +#define IP_IS_ANY_TYPE_VAL(ipaddr) 0 +#define IP_SET_TYPE_VAL(ipaddr, iptype) +#define IP_SET_TYPE(ipaddr, iptype) +#define IP_GET_TYPE(ipaddr) IPADDR_TYPE_V4 +#define IP_ADDR_RAW_SIZE(ipaddr) sizeof(ip4_addr_t) +#define ip_2_ip4(ipaddr) (ipaddr) +#define IP_ADDR4(ipaddr,a,b,c,d) IP4_ADDR(ipaddr,a,b,c,d) + +#define ip_addr_copy(dest, src) ip4_addr_copy(dest, src) +#define ip_addr_copy_from_ip4(dest, src) ip4_addr_copy(dest, src) +#define ip_addr_set_ip4_u32(ipaddr, val) ip4_addr_set_u32(ip_2_ip4(ipaddr), val) +#define ip_addr_set_ip4_u32_val(ipaddr, val) ip_addr_set_ip4_u32(&(ipaddr), val) +#define ip_addr_get_ip4_u32(ipaddr) ip4_addr_get_u32(ip_2_ip4(ipaddr)) +#define ip_addr_set(dest, src) ip4_addr_set(dest, src) +#define ip_addr_set_ipaddr(dest, src) ip4_addr_set(dest, src) +#define ip_addr_set_zero(ipaddr) ip4_addr_set_zero(ipaddr) +#define ip_addr_set_zero_ip4(ipaddr) ip4_addr_set_zero(ipaddr) +#define ip_addr_set_any(is_ipv6, ipaddr) ip4_addr_set_any(ipaddr) +#define ip_addr_set_loopback(is_ipv6, ipaddr) ip4_addr_set_loopback(ipaddr) +#define ip_addr_set_hton(dest, src) ip4_addr_set_hton(dest, src) +#define ip_addr_get_network(target, host, mask) ip4_addr_get_network(target, host, mask) +#define ip_addr_netcmp(addr1, addr2, mask) ip4_addr_netcmp(addr1, addr2, mask) +#define ip_addr_cmp(addr1, addr2) ip4_addr_cmp(addr1, addr2) +#define ip_addr_isany(ipaddr) ip4_addr_isany(ipaddr) +#define ip_addr_isany_val(ipaddr) ip4_addr_isany_val(ipaddr) +#define ip_addr_isloopback(ipaddr) ip4_addr_isloopback(ipaddr) +#define ip_addr_islinklocal(ipaddr) ip4_addr_islinklocal(ipaddr) +#define ip_addr_isbroadcast(addr, netif) ip4_addr_isbroadcast(addr, netif) +#define ip_addr_ismulticast(ipaddr) ip4_addr_ismulticast(ipaddr) +#define ip_addr_debug_print(debug, ipaddr) ip4_addr_debug_print(debug, ipaddr) +#define ip_addr_debug_print_val(debug, ipaddr) ip4_addr_debug_print_val(debug, ipaddr) +#define ipaddr_ntoa(ipaddr) ip4addr_ntoa(ipaddr) +#define ipaddr_ntoa_r(ipaddr, buf, buflen) ip4addr_ntoa_r(ipaddr, buf, buflen) +#define ipaddr_aton(cp, addr) ip4addr_aton(cp, addr) + +#define IPADDR_STRLEN_MAX IP4ADDR_STRLEN_MAX + +#define IP46_ADDR_ANY(type) (IP4_ADDR_ANY) + +#else /* LWIP_IPV4 */ + +typedef ip6_addr_t ip_addr_t; +#define IPADDR6_INIT(a, b, c, d) { { a, b, c, d } IPADDR6_ZONE_INIT } +#define IPADDR6_INIT_HOST(a, b, c, d) { { PP_HTONL(a), PP_HTONL(b), PP_HTONL(c), PP_HTONL(d) } IPADDR6_ZONE_INIT } +#define IP_IS_V4_VAL(ipaddr) 0 +#define IP_IS_V6_VAL(ipaddr) 1 +#define IP_IS_V4(ipaddr) 0 +#define IP_IS_V6(ipaddr) 1 +#define IP_IS_ANY_TYPE_VAL(ipaddr) 0 +#define IP_SET_TYPE_VAL(ipaddr, iptype) +#define IP_SET_TYPE(ipaddr, iptype) +#define IP_GET_TYPE(ipaddr) IPADDR_TYPE_V6 +#define IP_ADDR_RAW_SIZE(ipaddr) sizeof(ip6_addr_t) +#define ip_2_ip6(ipaddr) (ipaddr) +#define IP_ADDR6(ipaddr,i0,i1,i2,i3) IP6_ADDR(ipaddr,i0,i1,i2,i3) +#define IP_ADDR6_HOST(ipaddr,i0,i1,i2,i3) IP_ADDR6(ipaddr,PP_HTONL(i0),PP_HTONL(i1),PP_HTONL(i2),PP_HTONL(i3)) + +#define ip_addr_copy(dest, src) ip6_addr_copy(dest, src) +#define ip_addr_copy_from_ip6(dest, src) ip6_addr_copy(dest, src) +#define ip_addr_copy_from_ip6_packed(dest, src) ip6_addr_copy_from_packed(dest, src) +#define ip_addr_set(dest, src) ip6_addr_set(dest, src) +#define ip_addr_set_ipaddr(dest, src) ip6_addr_set(dest, src) +#define ip_addr_set_zero(ipaddr) ip6_addr_set_zero(ipaddr) +#define ip_addr_set_zero_ip6(ipaddr) ip6_addr_set_zero(ipaddr) +#define ip_addr_set_any(is_ipv6, ipaddr) ip6_addr_set_any(ipaddr) +#define ip_addr_set_loopback(is_ipv6, ipaddr) ip6_addr_set_loopback(ipaddr) +#define ip_addr_set_hton(dest, src) ip6_addr_set_hton(dest, src) +#define ip_addr_get_network(target, host, mask) ip6_addr_set_zero(target) +#define ip_addr_netcmp(addr1, addr2, mask) 0 +#define ip_addr_cmp(addr1, addr2) ip6_addr_cmp(addr1, addr2) +#define ip_addr_cmp_zoneless(addr1, addr2) ip6_addr_cmp_zoneless(addr1, addr2) +#define ip_addr_isany(ipaddr) ip6_addr_isany(ipaddr) +#define ip_addr_isany_val(ipaddr) ip6_addr_isany_val(ipaddr) +#define ip_addr_isloopback(ipaddr) ip6_addr_isloopback(ipaddr) +#define ip_addr_islinklocal(ipaddr) ip6_addr_islinklocal(ipaddr) +#define ip_addr_isbroadcast(addr, netif) 0 +#define ip_addr_ismulticast(ipaddr) ip6_addr_ismulticast(ipaddr) +#define ip_addr_debug_print(debug, ipaddr) ip6_addr_debug_print(debug, ipaddr) +#define ip_addr_debug_print_val(debug, ipaddr) ip6_addr_debug_print_val(debug, ipaddr) +#define ipaddr_ntoa(ipaddr) ip6addr_ntoa(ipaddr) +#define ipaddr_ntoa_r(ipaddr, buf, buflen) ip6addr_ntoa_r(ipaddr, buf, buflen) +#define ipaddr_aton(cp, addr) ip6addr_aton(cp, addr) + +#define IPADDR_STRLEN_MAX IP6ADDR_STRLEN_MAX + +#define IP46_ADDR_ANY(type) (IP6_ADDR_ANY) + +#endif /* LWIP_IPV4 */ +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + +#if LWIP_IPV4 + +extern const ip_addr_t ip_addr_any; +extern const ip_addr_t ip_addr_broadcast; + +/** + * @ingroup ip4addr + * Can be used as a fixed/const ip_addr_t + * for the IP wildcard. + * Defined to @ref IP4_ADDR_ANY when IPv4 is enabled. + * Defined to @ref IP6_ADDR_ANY in IPv6 only systems. + * Use this if you can handle IPv4 _AND_ IPv6 addresses. + * Use @ref IP4_ADDR_ANY or @ref IP6_ADDR_ANY when the IP + * type matters. + */ +#define IP_ADDR_ANY IP4_ADDR_ANY +/** + * @ingroup ip4addr + * Can be used as a fixed/const ip_addr_t + * for the IPv4 wildcard and the broadcast address + */ +#define IP4_ADDR_ANY (&ip_addr_any) +/** + * @ingroup ip4addr + * Can be used as a fixed/const ip4_addr_t + * for the wildcard and the broadcast address + */ +#define IP4_ADDR_ANY4 (ip_2_ip4(&ip_addr_any)) + +/** @ingroup ip4addr */ +#define IP_ADDR_BROADCAST (&ip_addr_broadcast) +/** @ingroup ip4addr */ +#define IP4_ADDR_BROADCAST (ip_2_ip4(&ip_addr_broadcast)) + +#endif /* LWIP_IPV4*/ + +#if LWIP_IPV6 + +extern const ip_addr_t ip6_addr_any; + +/** + * @ingroup ip6addr + * IP6_ADDR_ANY can be used as a fixed ip_addr_t + * for the IPv6 wildcard address + */ +#define IP6_ADDR_ANY (&ip6_addr_any) +/** + * @ingroup ip6addr + * IP6_ADDR_ANY6 can be used as a fixed ip6_addr_t + * for the IPv6 wildcard address + */ +#define IP6_ADDR_ANY6 (ip_2_ip6(&ip6_addr_any)) + +#if !LWIP_IPV4 +/** IPv6-only configurations */ +#define IP_ADDR_ANY IP6_ADDR_ANY +#endif /* !LWIP_IPV4 */ + +#endif + +#if LWIP_IPV4 && LWIP_IPV6 +/** @ingroup ipaddr */ +#define IP_ANY_TYPE (&ip_addr_any_type) +#else +#define IP_ANY_TYPE IP_ADDR_ANY +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_IP_ADDR_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/mem.h b/Middlewares/Third_Party/LwIP/src/include/lwip/mem.h new file mode 100644 index 0000000..ff208d2 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/mem.h @@ -0,0 +1,82 @@ +/** + * @file + * Heap API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_MEM_H +#define LWIP_HDR_MEM_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if MEM_LIBC_MALLOC + +#include "lwip/arch.h" + +typedef size_t mem_size_t; +#define MEM_SIZE_F SZT_F + +#elif MEM_USE_POOLS + +typedef u16_t mem_size_t; +#define MEM_SIZE_F U16_F + +#else + +/* MEM_SIZE would have to be aligned, but using 64000 here instead of + * 65535 leaves some room for alignment... + */ +#if MEM_SIZE > 64000L +typedef u32_t mem_size_t; +#define MEM_SIZE_F U32_F +#else +typedef u16_t mem_size_t; +#define MEM_SIZE_F U16_F +#endif /* MEM_SIZE > 64000 */ +#endif + +void mem_init(void); +void *mem_trim(void *mem, mem_size_t size); +void *mem_malloc(mem_size_t size); +void *mem_calloc(mem_size_t count, mem_size_t size); +void mem_free(void *mem); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_MEM_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/memp.h b/Middlewares/Third_Party/LwIP/src/include/lwip/memp.h new file mode 100644 index 0000000..1630b26 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/memp.h @@ -0,0 +1,155 @@ +/** + * @file + * Memory pool API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#ifndef LWIP_HDR_MEMP_H +#define LWIP_HDR_MEMP_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* run once with empty definition to handle all custom includes in lwippools.h */ +#define LWIP_MEMPOOL(name,num,size,desc) +#include "lwip/priv/memp_std.h" + +/** Create the list of all memory pools managed by memp. MEMP_MAX represents a NULL pool at the end */ +typedef enum { +#define LWIP_MEMPOOL(name,num,size,desc) MEMP_##name, +#include "lwip/priv/memp_std.h" + MEMP_MAX +} memp_t; + +#include "lwip/priv/memp_priv.h" +#include "lwip/stats.h" + +extern const struct memp_desc* const memp_pools[MEMP_MAX]; + +/** + * @ingroup mempool + * Declare prototype for private memory pool if it is used in multiple files + */ +#define LWIP_MEMPOOL_PROTOTYPE(name) extern const struct memp_desc memp_ ## name + +#if MEMP_MEM_MALLOC + +#define LWIP_MEMPOOL_DECLARE(name,num,size,desc) \ + LWIP_MEMPOOL_DECLARE_STATS_INSTANCE(memp_stats_ ## name) \ + const struct memp_desc memp_ ## name = { \ + DECLARE_LWIP_MEMPOOL_DESC(desc) \ + LWIP_MEMPOOL_DECLARE_STATS_REFERENCE(memp_stats_ ## name) \ + LWIP_MEM_ALIGN_SIZE(size) \ + }; + +#else /* MEMP_MEM_MALLOC */ + +/** + * @ingroup mempool + * Declare a private memory pool + * Private mempools example: + * .h: only when pool is used in multiple .c files: LWIP_MEMPOOL_PROTOTYPE(my_private_pool); + * .c: + * - in global variables section: LWIP_MEMPOOL_DECLARE(my_private_pool, 10, sizeof(foo), "Some description") + * - call ONCE before using pool (e.g. in some init() function): LWIP_MEMPOOL_INIT(my_private_pool); + * - allocate: void* my_new_mem = LWIP_MEMPOOL_ALLOC(my_private_pool); + * - free: LWIP_MEMPOOL_FREE(my_private_pool, my_new_mem); + * + * To relocate a pool, declare it as extern in cc.h. Example for GCC: + * extern u8_t \_\_attribute\_\_((section(".onchip_mem"))) memp_memory_my_private_pool_base[]; + */ +#define LWIP_MEMPOOL_DECLARE(name,num,size,desc) \ + LWIP_DECLARE_MEMORY_ALIGNED(memp_memory_ ## name ## _base, ((num) * (MEMP_SIZE + MEMP_ALIGN_SIZE(size)))); \ + \ + LWIP_MEMPOOL_DECLARE_STATS_INSTANCE(memp_stats_ ## name) \ + \ + static struct memp *memp_tab_ ## name; \ + \ + const struct memp_desc memp_ ## name = { \ + DECLARE_LWIP_MEMPOOL_DESC(desc) \ + LWIP_MEMPOOL_DECLARE_STATS_REFERENCE(memp_stats_ ## name) \ + LWIP_MEM_ALIGN_SIZE(size), \ + (num), \ + memp_memory_ ## name ## _base, \ + &memp_tab_ ## name \ + }; + +#endif /* MEMP_MEM_MALLOC */ + +/** + * @ingroup mempool + * Initialize a private memory pool + */ +#define LWIP_MEMPOOL_INIT(name) memp_init_pool(&memp_ ## name) +/** + * @ingroup mempool + * Allocate from a private memory pool + */ +#define LWIP_MEMPOOL_ALLOC(name) memp_malloc_pool(&memp_ ## name) +/** + * @ingroup mempool + * Free element from a private memory pool + */ +#define LWIP_MEMPOOL_FREE(name, x) memp_free_pool(&memp_ ## name, (x)) + +#if MEM_USE_POOLS +/** This structure is used to save the pool one element came from. + * This has to be defined here as it is required for pool size calculation. */ +struct memp_malloc_helper +{ + memp_t poolnr; +#if MEMP_OVERFLOW_CHECK || (LWIP_STATS && MEM_STATS) + u16_t size; +#endif /* MEMP_OVERFLOW_CHECK || (LWIP_STATS && MEM_STATS) */ +}; +#endif /* MEM_USE_POOLS */ + +void memp_init(void); + +#if MEMP_OVERFLOW_CHECK +void *memp_malloc_fn(memp_t type, const char* file, const int line); +#define memp_malloc(t) memp_malloc_fn((t), __FILE__, __LINE__) +#else +void *memp_malloc(memp_t type); +#endif +void memp_free(memp_t type, void *mem); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_MEMP_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/mld6.h b/Middlewares/Third_Party/LwIP/src/include/lwip/mld6.h new file mode 100644 index 0000000..7fa0797 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/mld6.h @@ -0,0 +1,99 @@ +/** + * @file + * + * Multicast listener discovery for IPv6. Aims to be compliant with RFC 2710. + * No support for MLDv2. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#ifndef LWIP_HDR_MLD6_H +#define LWIP_HDR_MLD6_H + +#include "lwip/opt.h" + +#if LWIP_IPV6_MLD && LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/pbuf.h" +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** MLD group */ +struct mld_group { + /** next link */ + struct mld_group *next; + /** multicast address */ + ip6_addr_t group_address; + /** signifies we were the last person to report */ + u8_t last_reporter_flag; + /** current state of the group */ + u8_t group_state; + /** timer for reporting */ + u16_t timer; + /** counter of simultaneous uses */ + u8_t use; +}; + +#define MLD6_TMR_INTERVAL 100 /* Milliseconds */ + +err_t mld6_stop(struct netif *netif); +void mld6_report_groups(struct netif *netif); +void mld6_tmr(void); +struct mld_group *mld6_lookfor_group(struct netif *ifp, const ip6_addr_t *addr); +void mld6_input(struct pbuf *p, struct netif *inp); +err_t mld6_joingroup(const ip6_addr_t *srcaddr, const ip6_addr_t *groupaddr); +err_t mld6_joingroup_netif(struct netif *netif, const ip6_addr_t *groupaddr); +err_t mld6_leavegroup(const ip6_addr_t *srcaddr, const ip6_addr_t *groupaddr); +err_t mld6_leavegroup_netif(struct netif *netif, const ip6_addr_t *groupaddr); + +/** @ingroup mld6 + * Get list head of MLD6 groups for netif. + * Note: The allnodes group IP is NOT in the list, since it must always + * be received for correct IPv6 operation. + * @see @ref netif_set_mld_mac_filter() + */ +#define netif_mld6_data(netif) ((struct mld_group *)netif_get_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_MLD6)) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6_MLD && LWIP_IPV6 */ + +#endif /* LWIP_HDR_MLD6_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/nd6.h b/Middlewares/Third_Party/LwIP/src/include/lwip/nd6.h new file mode 100644 index 0000000..c30e624 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/nd6.h @@ -0,0 +1,90 @@ +/** + * @file + * + * Neighbor discovery and stateless address autoconfiguration for IPv6. + * Aims to be compliant with RFC 4861 (Neighbor discovery) and RFC 4862 + * (Address autoconfiguration). + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#ifndef LWIP_HDR_ND6_H +#define LWIP_HDR_ND6_H + +#include "lwip/opt.h" + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/ip6_addr.h" +#include "lwip/err.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** 1 second period */ +#define ND6_TMR_INTERVAL 1000 + +/** Router solicitations are sent in 4 second intervals (see RFC 4861, ch. 6.3.7) */ +#ifndef ND6_RTR_SOLICITATION_INTERVAL +#define ND6_RTR_SOLICITATION_INTERVAL 4000 +#endif + +struct pbuf; +struct netif; + +void nd6_tmr(void); +void nd6_input(struct pbuf *p, struct netif *inp); +void nd6_clear_destination_cache(void); +struct netif *nd6_find_route(const ip6_addr_t *ip6addr); +err_t nd6_get_next_hop_addr_or_queue(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr, const u8_t **hwaddrp); +u16_t nd6_get_destination_mtu(const ip6_addr_t *ip6addr, struct netif *netif); +#if LWIP_ND6_TCP_REACHABILITY_HINTS +void nd6_reachability_hint(const ip6_addr_t *ip6addr); +#endif /* LWIP_ND6_TCP_REACHABILITY_HINTS */ +void nd6_cleanup_netif(struct netif *netif); +#if LWIP_IPV6_MLD +void nd6_adjust_mld_membership(struct netif *netif, s8_t addr_idx, u8_t new_state); +#endif /* LWIP_IPV6_MLD */ +void nd6_restart_netif(struct netif *netif); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6 */ + +#endif /* LWIP_HDR_ND6_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/netbuf.h b/Middlewares/Third_Party/LwIP/src/include/lwip/netbuf.h new file mode 100644 index 0000000..42a911b --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/netbuf.h @@ -0,0 +1,116 @@ +/** + * @file + * netbuf API (for netconn API) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_NETBUF_H +#define LWIP_HDR_NETBUF_H + +#include "lwip/opt.h" + +#if LWIP_NETCONN || LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */ +/* Note: Netconn API is always available when sockets are enabled - + * sockets are implemented on top of them */ + +#include "lwip/pbuf.h" +#include "lwip/ip_addr.h" +#include "lwip/ip6_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** This netbuf has dest-addr/port set */ +#define NETBUF_FLAG_DESTADDR 0x01 +/** This netbuf includes a checksum */ +#define NETBUF_FLAG_CHKSUM 0x02 + +/** "Network buffer" - contains data and addressing info */ +struct netbuf { + struct pbuf *p, *ptr; + ip_addr_t addr; + u16_t port; +#if LWIP_NETBUF_RECVINFO || LWIP_CHECKSUM_ON_COPY + u8_t flags; + u16_t toport_chksum; +#if LWIP_NETBUF_RECVINFO + ip_addr_t toaddr; +#endif /* LWIP_NETBUF_RECVINFO */ +#endif /* LWIP_NETBUF_RECVINFO || LWIP_CHECKSUM_ON_COPY */ +}; + +/* Network buffer functions: */ +struct netbuf * netbuf_new (void); +void netbuf_delete (struct netbuf *buf); +void * netbuf_alloc (struct netbuf *buf, u16_t size); +void netbuf_free (struct netbuf *buf); +err_t netbuf_ref (struct netbuf *buf, + const void *dataptr, u16_t size); +void netbuf_chain (struct netbuf *head, struct netbuf *tail); + +err_t netbuf_data (struct netbuf *buf, + void **dataptr, u16_t *len); +s8_t netbuf_next (struct netbuf *buf); +void netbuf_first (struct netbuf *buf); + + +#define netbuf_copy_partial(buf, dataptr, len, offset) \ + pbuf_copy_partial((buf)->p, (dataptr), (len), (offset)) +#define netbuf_copy(buf,dataptr,len) netbuf_copy_partial(buf, dataptr, len, 0) +#define netbuf_take(buf, dataptr, len) pbuf_take((buf)->p, dataptr, len) +#define netbuf_len(buf) ((buf)->p->tot_len) +#define netbuf_fromaddr(buf) (&((buf)->addr)) +#define netbuf_set_fromaddr(buf, fromaddr) ip_addr_set(&((buf)->addr), fromaddr) +#define netbuf_fromport(buf) ((buf)->port) +#if LWIP_NETBUF_RECVINFO +#define netbuf_destaddr(buf) (&((buf)->toaddr)) +#define netbuf_set_destaddr(buf, destaddr) ip_addr_set(&((buf)->toaddr), destaddr) +#if LWIP_CHECKSUM_ON_COPY +#define netbuf_destport(buf) (((buf)->flags & NETBUF_FLAG_DESTADDR) ? (buf)->toport_chksum : 0) +#else /* LWIP_CHECKSUM_ON_COPY */ +#define netbuf_destport(buf) ((buf)->toport_chksum) +#endif /* LWIP_CHECKSUM_ON_COPY */ +#endif /* LWIP_NETBUF_RECVINFO */ +#if LWIP_CHECKSUM_ON_COPY +#define netbuf_set_chksum(buf, chksum) do { (buf)->flags = NETBUF_FLAG_CHKSUM; \ + (buf)->toport_chksum = chksum; } while(0) +#endif /* LWIP_CHECKSUM_ON_COPY */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_NETCONN || LWIP_SOCKET */ + +#endif /* LWIP_HDR_NETBUF_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/netdb.h b/Middlewares/Third_Party/LwIP/src/include/lwip/netdb.h new file mode 100644 index 0000000..d3d15df --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/netdb.h @@ -0,0 +1,150 @@ +/** + * @file + * NETDB API (sockets) + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_NETDB_H +#define LWIP_HDR_NETDB_H + +#include "lwip/opt.h" + +#if LWIP_DNS && LWIP_SOCKET + +#include "lwip/arch.h" +#include "lwip/inet.h" +#include "lwip/sockets.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* some rarely used options */ +#ifndef LWIP_DNS_API_DECLARE_H_ERRNO +#define LWIP_DNS_API_DECLARE_H_ERRNO 1 +#endif + +#ifndef LWIP_DNS_API_DEFINE_ERRORS +#define LWIP_DNS_API_DEFINE_ERRORS 1 +#endif + +#ifndef LWIP_DNS_API_DEFINE_FLAGS +#define LWIP_DNS_API_DEFINE_FLAGS 1 +#endif + +#ifndef LWIP_DNS_API_DECLARE_STRUCTS +#define LWIP_DNS_API_DECLARE_STRUCTS 1 +#endif + +#if LWIP_DNS_API_DEFINE_ERRORS +/** Errors used by the DNS API functions, h_errno can be one of them */ +#define EAI_NONAME 200 +#define EAI_SERVICE 201 +#define EAI_FAIL 202 +#define EAI_MEMORY 203 +#define EAI_FAMILY 204 + +#define HOST_NOT_FOUND 210 +#define NO_DATA 211 +#define NO_RECOVERY 212 +#define TRY_AGAIN 213 +#endif /* LWIP_DNS_API_DEFINE_ERRORS */ + +#if LWIP_DNS_API_DEFINE_FLAGS +/* input flags for struct addrinfo */ +#define AI_PASSIVE 0x01 +#define AI_CANONNAME 0x02 +#define AI_NUMERICHOST 0x04 +#define AI_NUMERICSERV 0x08 +#define AI_V4MAPPED 0x10 +#define AI_ALL 0x20 +#define AI_ADDRCONFIG 0x40 +#endif /* LWIP_DNS_API_DEFINE_FLAGS */ + +#if LWIP_DNS_API_DECLARE_STRUCTS +struct hostent { + char *h_name; /* Official name of the host. */ + char **h_aliases; /* A pointer to an array of pointers to alternative host names, + terminated by a null pointer. */ + int h_addrtype; /* Address type. */ + int h_length; /* The length, in bytes, of the address. */ + char **h_addr_list; /* A pointer to an array of pointers to network addresses (in + network byte order) for the host, terminated by a null pointer. */ +#define h_addr h_addr_list[0] /* for backward compatibility */ +}; + +struct addrinfo { + int ai_flags; /* Input flags. */ + int ai_family; /* Address family of socket. */ + int ai_socktype; /* Socket type. */ + int ai_protocol; /* Protocol of socket. */ + socklen_t ai_addrlen; /* Length of socket address. */ + struct sockaddr *ai_addr; /* Socket address of socket. */ + char *ai_canonname; /* Canonical name of service location. */ + struct addrinfo *ai_next; /* Pointer to next in list. */ +}; +#endif /* LWIP_DNS_API_DECLARE_STRUCTS */ + +#define NETDB_ELEM_SIZE (sizeof(struct addrinfo) + sizeof(struct sockaddr_storage) + DNS_MAX_NAME_LENGTH + 1) + +#if LWIP_DNS_API_DECLARE_H_ERRNO +/* application accessible error code set by the DNS API functions */ +extern int h_errno; +#endif /* LWIP_DNS_API_DECLARE_H_ERRNO*/ + +struct hostent *lwip_gethostbyname(const char *name); +int lwip_gethostbyname_r(const char *name, struct hostent *ret, char *buf, + size_t buflen, struct hostent **result, int *h_errnop); +void lwip_freeaddrinfo(struct addrinfo *ai); +int lwip_getaddrinfo(const char *nodename, + const char *servname, + const struct addrinfo *hints, + struct addrinfo **res); + +#if LWIP_COMPAT_SOCKETS +/** @ingroup netdbapi */ +#define gethostbyname(name) lwip_gethostbyname(name) +/** @ingroup netdbapi */ +#define gethostbyname_r(name, ret, buf, buflen, result, h_errnop) \ + lwip_gethostbyname_r(name, ret, buf, buflen, result, h_errnop) +/** @ingroup netdbapi */ +#define freeaddrinfo(addrinfo) lwip_freeaddrinfo(addrinfo) +/** @ingroup netdbapi */ +#define getaddrinfo(nodname, servname, hints, res) \ + lwip_getaddrinfo(nodname, servname, hints, res) +#endif /* LWIP_COMPAT_SOCKETS */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_DNS && LWIP_SOCKET */ + +#endif /* LWIP_HDR_NETDB_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/netif.h b/Middlewares/Third_Party/LwIP/src/include/lwip/netif.h new file mode 100644 index 0000000..911196a --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/netif.h @@ -0,0 +1,669 @@ +/** + * @file + * netif API (to be used from TCPIP thread) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_NETIF_H +#define LWIP_HDR_NETIF_H + +#include "lwip/opt.h" + +#define ENABLE_LOOPBACK (LWIP_NETIF_LOOPBACK || LWIP_HAVE_LOOPIF) + +#include "lwip/err.h" + +#include "lwip/ip_addr.h" + +#include "lwip/def.h" +#include "lwip/pbuf.h" +#include "lwip/stats.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Throughout this file, IP addresses are expected to be in + * the same byte order as in IP_PCB. */ + +/** Must be the maximum of all used hardware address lengths + across all types of interfaces in use. + This does not have to be changed, normally. */ +#ifndef NETIF_MAX_HWADDR_LEN +#define NETIF_MAX_HWADDR_LEN 6U +#endif + +/** The size of a fully constructed netif name which the + * netif can be identified by in APIs. Composed of + * 2 chars, 3 (max) digits, and 1 \0 + */ +#define NETIF_NAMESIZE 6 + +/** + * @defgroup netif_flags Flags + * @ingroup netif + * @{ + */ + +/** Whether the network interface is 'up'. This is + * a software flag used to control whether this network + * interface is enabled and processes traffic. + * It must be set by the startup code before this netif can be used + * (also for dhcp/autoip). + */ +#define NETIF_FLAG_UP 0x01U +/** If set, the netif has broadcast capability. + * Set by the netif driver in its init function. */ +#define NETIF_FLAG_BROADCAST 0x02U +/** If set, the interface has an active link + * (set by the network interface driver). + * Either set by the netif driver in its init function (if the link + * is up at that time) or at a later point once the link comes up + * (if link detection is supported by the hardware). */ +#define NETIF_FLAG_LINK_UP 0x04U +/** If set, the netif is an ethernet device using ARP. + * Set by the netif driver in its init function. + * Used to check input packet types and use of DHCP. */ +#define NETIF_FLAG_ETHARP 0x08U +/** If set, the netif is an ethernet device. It might not use + * ARP or TCP/IP if it is used for PPPoE only. + */ +#define NETIF_FLAG_ETHERNET 0x10U +/** If set, the netif has IGMP capability. + * Set by the netif driver in its init function. */ +#define NETIF_FLAG_IGMP 0x20U +/** If set, the netif has MLD6 capability. + * Set by the netif driver in its init function. */ +#define NETIF_FLAG_MLD6 0x40U + +/** + * @} + */ + +enum lwip_internal_netif_client_data_index +{ +#if LWIP_IPV4 +#if LWIP_DHCP + LWIP_NETIF_CLIENT_DATA_INDEX_DHCP, +#endif +#if LWIP_AUTOIP + LWIP_NETIF_CLIENT_DATA_INDEX_AUTOIP, +#endif +#if LWIP_IGMP + LWIP_NETIF_CLIENT_DATA_INDEX_IGMP, +#endif +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 +#if LWIP_IPV6_DHCP6 + LWIP_NETIF_CLIENT_DATA_INDEX_DHCP6, +#endif +#if LWIP_IPV6_MLD + LWIP_NETIF_CLIENT_DATA_INDEX_MLD6, +#endif +#endif /* LWIP_IPV6 */ + LWIP_NETIF_CLIENT_DATA_INDEX_MAX +}; + +#if LWIP_CHECKSUM_CTRL_PER_NETIF +#define NETIF_CHECKSUM_GEN_IP 0x0001 +#define NETIF_CHECKSUM_GEN_UDP 0x0002 +#define NETIF_CHECKSUM_GEN_TCP 0x0004 +#define NETIF_CHECKSUM_GEN_ICMP 0x0008 +#define NETIF_CHECKSUM_GEN_ICMP6 0x0010 +#define NETIF_CHECKSUM_CHECK_IP 0x0100 +#define NETIF_CHECKSUM_CHECK_UDP 0x0200 +#define NETIF_CHECKSUM_CHECK_TCP 0x0400 +#define NETIF_CHECKSUM_CHECK_ICMP 0x0800 +#define NETIF_CHECKSUM_CHECK_ICMP6 0x1000 +#define NETIF_CHECKSUM_ENABLE_ALL 0xFFFF +#define NETIF_CHECKSUM_DISABLE_ALL 0x0000 +#endif /* LWIP_CHECKSUM_CTRL_PER_NETIF */ + +struct netif; + +/** MAC Filter Actions, these are passed to a netif's igmp_mac_filter or + * mld_mac_filter callback function. */ +enum netif_mac_filter_action { + /** Delete a filter entry */ + NETIF_DEL_MAC_FILTER = 0, + /** Add a filter entry */ + NETIF_ADD_MAC_FILTER = 1 +}; + +/** Function prototype for netif init functions. Set up flags and output/linkoutput + * callback functions in this function. + * + * @param netif The netif to initialize + */ +typedef err_t (*netif_init_fn)(struct netif *netif); +/** Function prototype for netif->input functions. This function is saved as 'input' + * callback function in the netif struct. Call it when a packet has been received. + * + * @param p The received packet, copied into a pbuf + * @param inp The netif which received the packet + * @return ERR_OK if the packet was handled + * != ERR_OK is the packet was NOT handled, in this case, the caller has + * to free the pbuf + */ +typedef err_t (*netif_input_fn)(struct pbuf *p, struct netif *inp); + +#if LWIP_IPV4 +/** Function prototype for netif->output functions. Called by lwIP when a packet + * shall be sent. For ethernet netif, set this to 'etharp_output' and set + * 'linkoutput'. + * + * @param netif The netif which shall send a packet + * @param p The packet to send (p->payload points to IP header) + * @param ipaddr The IP address to which the packet shall be sent + */ +typedef err_t (*netif_output_fn)(struct netif *netif, struct pbuf *p, + const ip4_addr_t *ipaddr); +#endif /* LWIP_IPV4*/ + +#if LWIP_IPV6 +/** Function prototype for netif->output_ip6 functions. Called by lwIP when a packet + * shall be sent. For ethernet netif, set this to 'ethip6_output' and set + * 'linkoutput'. + * + * @param netif The netif which shall send a packet + * @param p The packet to send (p->payload points to IP header) + * @param ipaddr The IPv6 address to which the packet shall be sent + */ +typedef err_t (*netif_output_ip6_fn)(struct netif *netif, struct pbuf *p, + const ip6_addr_t *ipaddr); +#endif /* LWIP_IPV6 */ + +/** Function prototype for netif->linkoutput functions. Only used for ethernet + * netifs. This function is called by ARP when a packet shall be sent. + * + * @param netif The netif which shall send a packet + * @param p The packet to send (raw ethernet packet) + */ +typedef err_t (*netif_linkoutput_fn)(struct netif *netif, struct pbuf *p); +/** Function prototype for netif status- or link-callback functions. */ +typedef void (*netif_status_callback_fn)(struct netif *netif); +#if LWIP_IPV4 && LWIP_IGMP +/** Function prototype for netif igmp_mac_filter functions */ +typedef err_t (*netif_igmp_mac_filter_fn)(struct netif *netif, + const ip4_addr_t *group, enum netif_mac_filter_action action); +#endif /* LWIP_IPV4 && LWIP_IGMP */ +#if LWIP_IPV6 && LWIP_IPV6_MLD +/** Function prototype for netif mld_mac_filter functions */ +typedef err_t (*netif_mld_mac_filter_fn)(struct netif *netif, + const ip6_addr_t *group, enum netif_mac_filter_action action); +#endif /* LWIP_IPV6 && LWIP_IPV6_MLD */ + +#if LWIP_DHCP || LWIP_AUTOIP || LWIP_IGMP || LWIP_IPV6_MLD || LWIP_IPV6_DHCP6 || (LWIP_NUM_NETIF_CLIENT_DATA > 0) +#if LWIP_NUM_NETIF_CLIENT_DATA > 0 +u8_t netif_alloc_client_data_id(void); +#endif +/** @ingroup netif_cd + * Set client data. Obtain ID from netif_alloc_client_data_id(). + */ +#define netif_set_client_data(netif, id, data) netif_get_client_data(netif, id) = (data) +/** @ingroup netif_cd + * Get client data. Obtain ID from netif_alloc_client_data_id(). + */ +#define netif_get_client_data(netif, id) (netif)->client_data[(id)] +#endif + +#if (LWIP_IPV4 && LWIP_ARP && (ARP_TABLE_SIZE > 0x7f)) || (LWIP_IPV6 && (LWIP_ND6_NUM_DESTINATIONS > 0x7f)) +typedef u16_t netif_addr_idx_t; +#define NETIF_ADDR_IDX_MAX 0x7FFF +#else +typedef u8_t netif_addr_idx_t; +#define NETIF_ADDR_IDX_MAX 0x7F +#endif + +#if LWIP_NETIF_HWADDRHINT +#define LWIP_NETIF_USE_HINTS 1 +struct netif_hint { + netif_addr_idx_t addr_hint; +}; +#else /* LWIP_NETIF_HWADDRHINT */ +#define LWIP_NETIF_USE_HINTS 0 +#endif /* LWIP_NETIF_HWADDRHINT */ + +/** Generic data structure used for all lwIP network interfaces. + * The following fields should be filled in by the initialization + * function for the device driver: hwaddr_len, hwaddr[], mtu, flags */ +struct netif { +#if !LWIP_SINGLE_NETIF + /** pointer to next in linked list */ + struct netif *next; +#endif + +#if LWIP_IPV4 + /** IP address configuration in network byte order */ + ip_addr_t ip_addr; + ip_addr_t netmask; + ip_addr_t gw; +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 + /** Array of IPv6 addresses for this netif. */ + ip_addr_t ip6_addr[LWIP_IPV6_NUM_ADDRESSES]; + /** The state of each IPv6 address (Tentative, Preferred, etc). + * @see ip6_addr.h */ + u8_t ip6_addr_state[LWIP_IPV6_NUM_ADDRESSES]; +#if LWIP_IPV6_ADDRESS_LIFETIMES + /** Remaining valid and preferred lifetime of each IPv6 address, in seconds. + * For valid lifetimes, the special value of IP6_ADDR_LIFE_STATIC (0) + * indicates the address is static and has no lifetimes. */ + u32_t ip6_addr_valid_life[LWIP_IPV6_NUM_ADDRESSES]; + u32_t ip6_addr_pref_life[LWIP_IPV6_NUM_ADDRESSES]; +#endif /* LWIP_IPV6_ADDRESS_LIFETIMES */ +#endif /* LWIP_IPV6 */ + /** This function is called by the network device driver + * to pass a packet up the TCP/IP stack. */ + netif_input_fn input; +#if LWIP_IPV4 + /** This function is called by the IP module when it wants + * to send a packet on the interface. This function typically + * first resolves the hardware address, then sends the packet. + * For ethernet physical layer, this is usually etharp_output() */ + netif_output_fn output; +#endif /* LWIP_IPV4 */ + /** This function is called by ethernet_output() when it wants + * to send a packet on the interface. This function outputs + * the pbuf as-is on the link medium. */ + netif_linkoutput_fn linkoutput; +#if LWIP_IPV6 + /** This function is called by the IPv6 module when it wants + * to send a packet on the interface. This function typically + * first resolves the hardware address, then sends the packet. + * For ethernet physical layer, this is usually ethip6_output() */ + netif_output_ip6_fn output_ip6; +#endif /* LWIP_IPV6 */ +#if LWIP_NETIF_STATUS_CALLBACK + /** This function is called when the netif state is set to up or down + */ + netif_status_callback_fn status_callback; +#endif /* LWIP_NETIF_STATUS_CALLBACK */ +#if LWIP_NETIF_LINK_CALLBACK + /** This function is called when the netif link is set to up or down + */ + netif_status_callback_fn link_callback; +#endif /* LWIP_NETIF_LINK_CALLBACK */ +#if LWIP_NETIF_REMOVE_CALLBACK + /** This function is called when the netif has been removed */ + netif_status_callback_fn remove_callback; +#endif /* LWIP_NETIF_REMOVE_CALLBACK */ + /** This field can be set by the device driver and could point + * to state information for the device. */ + void *state; +#ifdef netif_get_client_data + void* client_data[LWIP_NETIF_CLIENT_DATA_INDEX_MAX + LWIP_NUM_NETIF_CLIENT_DATA]; +#endif +#if LWIP_NETIF_HOSTNAME + /* the hostname for this netif, NULL is a valid value */ + const char* hostname; +#endif /* LWIP_NETIF_HOSTNAME */ +#if LWIP_CHECKSUM_CTRL_PER_NETIF + u16_t chksum_flags; +#endif /* LWIP_CHECKSUM_CTRL_PER_NETIF*/ + /** maximum transfer unit (in bytes) */ + u16_t mtu; +#if LWIP_IPV6 && LWIP_ND6_ALLOW_RA_UPDATES + /** maximum transfer unit (in bytes), updated by RA */ + u16_t mtu6; +#endif /* LWIP_IPV6 && LWIP_ND6_ALLOW_RA_UPDATES */ + /** link level hardware address of this interface */ + u8_t hwaddr[NETIF_MAX_HWADDR_LEN]; + /** number of bytes used in hwaddr */ + u8_t hwaddr_len; + /** flags (@see @ref netif_flags) */ + u8_t flags; + /** descriptive abbreviation */ + char name[2]; + /** number of this interface. Used for @ref if_api and @ref netifapi_netif, + * as well as for IPv6 zones */ + u8_t num; +#if LWIP_IPV6_AUTOCONFIG + /** is this netif enabled for IPv6 autoconfiguration */ + u8_t ip6_autoconfig_enabled; +#endif /* LWIP_IPV6_AUTOCONFIG */ +#if LWIP_IPV6_SEND_ROUTER_SOLICIT + /** Number of Router Solicitation messages that remain to be sent. */ + u8_t rs_count; +#endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ +#if MIB2_STATS + /** link type (from "snmp_ifType" enum from snmp_mib2.h) */ + u8_t link_type; + /** (estimate) link speed */ + u32_t link_speed; + /** timestamp at last change made (up/down) */ + u32_t ts; + /** counters */ + struct stats_mib2_netif_ctrs mib2_counters; +#endif /* MIB2_STATS */ +#if LWIP_IPV4 && LWIP_IGMP + /** This function could be called to add or delete an entry in the multicast + filter table of the ethernet MAC.*/ + netif_igmp_mac_filter_fn igmp_mac_filter; +#endif /* LWIP_IPV4 && LWIP_IGMP */ +#if LWIP_IPV6 && LWIP_IPV6_MLD + /** This function could be called to add or delete an entry in the IPv6 multicast + filter table of the ethernet MAC. */ + netif_mld_mac_filter_fn mld_mac_filter; +#endif /* LWIP_IPV6 && LWIP_IPV6_MLD */ +#if LWIP_NETIF_USE_HINTS + struct netif_hint *hints; +#endif /* LWIP_NETIF_USE_HINTS */ +#if ENABLE_LOOPBACK + /* List of packets to be queued for ourselves. */ + struct pbuf *loop_first; + struct pbuf *loop_last; +#if LWIP_LOOPBACK_MAX_PBUFS + u16_t loop_cnt_current; +#endif /* LWIP_LOOPBACK_MAX_PBUFS */ +#endif /* ENABLE_LOOPBACK */ +}; + +#if LWIP_CHECKSUM_CTRL_PER_NETIF +#define NETIF_SET_CHECKSUM_CTRL(netif, chksumflags) do { \ + (netif)->chksum_flags = chksumflags; } while(0) +#define IF__NETIF_CHECKSUM_ENABLED(netif, chksumflag) if (((netif) == NULL) || (((netif)->chksum_flags & (chksumflag)) != 0)) +#else /* LWIP_CHECKSUM_CTRL_PER_NETIF */ +#define NETIF_SET_CHECKSUM_CTRL(netif, chksumflags) +#define IF__NETIF_CHECKSUM_ENABLED(netif, chksumflag) +#endif /* LWIP_CHECKSUM_CTRL_PER_NETIF */ + +#if LWIP_SINGLE_NETIF +#define NETIF_FOREACH(netif) if (((netif) = netif_default) != NULL) +#else /* LWIP_SINGLE_NETIF */ +/** The list of network interfaces. */ +extern struct netif *netif_list; +#define NETIF_FOREACH(netif) for ((netif) = netif_list; (netif) != NULL; (netif) = (netif)->next) +#endif /* LWIP_SINGLE_NETIF */ +/** The default network interface. */ +extern struct netif *netif_default; + +void netif_init(void); + +struct netif *netif_add_noaddr(struct netif *netif, void *state, netif_init_fn init, netif_input_fn input); + +#if LWIP_IPV4 +struct netif *netif_add(struct netif *netif, + const ip4_addr_t *ipaddr, const ip4_addr_t *netmask, const ip4_addr_t *gw, + void *state, netif_init_fn init, netif_input_fn input); +void netif_set_addr(struct netif *netif, const ip4_addr_t *ipaddr, const ip4_addr_t *netmask, + const ip4_addr_t *gw); +#else /* LWIP_IPV4 */ +struct netif *netif_add(struct netif *netif, void *state, netif_init_fn init, netif_input_fn input); +#endif /* LWIP_IPV4 */ +void netif_remove(struct netif * netif); + +/* Returns a network interface given its name. The name is of the form + "et0", where the first two letters are the "name" field in the + netif structure, and the digit is in the num field in the same + structure. */ +struct netif *netif_find(const char *name); + +void netif_set_default(struct netif *netif); + +#if LWIP_IPV4 +void netif_set_ipaddr(struct netif *netif, const ip4_addr_t *ipaddr); +void netif_set_netmask(struct netif *netif, const ip4_addr_t *netmask); +void netif_set_gw(struct netif *netif, const ip4_addr_t *gw); +/** @ingroup netif_ip4 */ +#define netif_ip4_addr(netif) ((const ip4_addr_t*)ip_2_ip4(&((netif)->ip_addr))) +/** @ingroup netif_ip4 */ +#define netif_ip4_netmask(netif) ((const ip4_addr_t*)ip_2_ip4(&((netif)->netmask))) +/** @ingroup netif_ip4 */ +#define netif_ip4_gw(netif) ((const ip4_addr_t*)ip_2_ip4(&((netif)->gw))) +/** @ingroup netif_ip4 */ +#define netif_ip_addr4(netif) ((const ip_addr_t*)&((netif)->ip_addr)) +/** @ingroup netif_ip4 */ +#define netif_ip_netmask4(netif) ((const ip_addr_t*)&((netif)->netmask)) +/** @ingroup netif_ip4 */ +#define netif_ip_gw4(netif) ((const ip_addr_t*)&((netif)->gw)) +#endif /* LWIP_IPV4 */ + +#define netif_set_flags(netif, set_flags) do { (netif)->flags = (u8_t)((netif)->flags | (set_flags)); } while(0) +#define netif_clear_flags(netif, clr_flags) do { (netif)->flags = (u8_t)((netif)->flags & (u8_t)(~(clr_flags) & 0xff)); } while(0) +#define netif_is_flag_set(nefif, flag) (((netif)->flags & (flag)) != 0) + +void netif_set_up(struct netif *netif); +void netif_set_down(struct netif *netif); +/** @ingroup netif + * Ask if an interface is up + */ +#define netif_is_up(netif) (((netif)->flags & NETIF_FLAG_UP) ? (u8_t)1 : (u8_t)0) + +#if LWIP_NETIF_STATUS_CALLBACK +void netif_set_status_callback(struct netif *netif, netif_status_callback_fn status_callback); +#endif /* LWIP_NETIF_STATUS_CALLBACK */ +#if LWIP_NETIF_REMOVE_CALLBACK +void netif_set_remove_callback(struct netif *netif, netif_status_callback_fn remove_callback); +#endif /* LWIP_NETIF_REMOVE_CALLBACK */ + +void netif_set_link_up(struct netif *netif); +void netif_set_link_down(struct netif *netif); +/** Ask if a link is up */ +#define netif_is_link_up(netif) (((netif)->flags & NETIF_FLAG_LINK_UP) ? (u8_t)1 : (u8_t)0) + +#if LWIP_NETIF_LINK_CALLBACK +void netif_set_link_callback(struct netif *netif, netif_status_callback_fn link_callback); +#endif /* LWIP_NETIF_LINK_CALLBACK */ + +#if LWIP_NETIF_HOSTNAME +/** @ingroup netif */ +#define netif_set_hostname(netif, name) do { if((netif) != NULL) { (netif)->hostname = name; }}while(0) +/** @ingroup netif */ +#define netif_get_hostname(netif) (((netif) != NULL) ? ((netif)->hostname) : NULL) +#endif /* LWIP_NETIF_HOSTNAME */ + +#if LWIP_IGMP +/** @ingroup netif */ +#define netif_set_igmp_mac_filter(netif, function) do { if((netif) != NULL) { (netif)->igmp_mac_filter = function; }}while(0) +#define netif_get_igmp_mac_filter(netif) (((netif) != NULL) ? ((netif)->igmp_mac_filter) : NULL) +#endif /* LWIP_IGMP */ + +#if LWIP_IPV6 && LWIP_IPV6_MLD +/** @ingroup netif */ +#define netif_set_mld_mac_filter(netif, function) do { if((netif) != NULL) { (netif)->mld_mac_filter = function; }}while(0) +#define netif_get_mld_mac_filter(netif) (((netif) != NULL) ? ((netif)->mld_mac_filter) : NULL) +#define netif_mld_mac_filter(netif, addr, action) do { if((netif) && (netif)->mld_mac_filter) { (netif)->mld_mac_filter((netif), (addr), (action)); }}while(0) +#endif /* LWIP_IPV6 && LWIP_IPV6_MLD */ + +#if ENABLE_LOOPBACK +err_t netif_loop_output(struct netif *netif, struct pbuf *p); +void netif_poll(struct netif *netif); +#if !LWIP_NETIF_LOOPBACK_MULTITHREADING +void netif_poll_all(void); +#endif /* !LWIP_NETIF_LOOPBACK_MULTITHREADING */ +#endif /* ENABLE_LOOPBACK */ + +err_t netif_input(struct pbuf *p, struct netif *inp); + +#if LWIP_IPV6 +/** @ingroup netif_ip6 */ +#define netif_ip_addr6(netif, i) ((const ip_addr_t*)(&((netif)->ip6_addr[i]))) +/** @ingroup netif_ip6 */ +#define netif_ip6_addr(netif, i) ((const ip6_addr_t*)ip_2_ip6(&((netif)->ip6_addr[i]))) +void netif_ip6_addr_set(struct netif *netif, s8_t addr_idx, const ip6_addr_t *addr6); +void netif_ip6_addr_set_parts(struct netif *netif, s8_t addr_idx, u32_t i0, u32_t i1, u32_t i2, u32_t i3); +#define netif_ip6_addr_state(netif, i) ((netif)->ip6_addr_state[i]) +void netif_ip6_addr_set_state(struct netif* netif, s8_t addr_idx, u8_t state); +s8_t netif_get_ip6_addr_match(struct netif *netif, const ip6_addr_t *ip6addr); +void netif_create_ip6_linklocal_address(struct netif *netif, u8_t from_mac_48bit); +err_t netif_add_ip6_address(struct netif *netif, const ip6_addr_t *ip6addr, s8_t *chosen_idx); +#define netif_set_ip6_autoconfig_enabled(netif, action) do { if(netif) { (netif)->ip6_autoconfig_enabled = (action); }}while(0) +#if LWIP_IPV6_ADDRESS_LIFETIMES +#define netif_ip6_addr_valid_life(netif, i) \ + (((netif) != NULL) ? ((netif)->ip6_addr_valid_life[i]) : IP6_ADDR_LIFE_STATIC) +#define netif_ip6_addr_set_valid_life(netif, i, secs) \ + do { if (netif != NULL) { (netif)->ip6_addr_valid_life[i] = (secs); }} while (0) +#define netif_ip6_addr_pref_life(netif, i) \ + (((netif) != NULL) ? ((netif)->ip6_addr_pref_life[i]) : IP6_ADDR_LIFE_STATIC) +#define netif_ip6_addr_set_pref_life(netif, i, secs) \ + do { if (netif != NULL) { (netif)->ip6_addr_pref_life[i] = (secs); }} while (0) +#define netif_ip6_addr_isstatic(netif, i) \ + (netif_ip6_addr_valid_life((netif), (i)) == IP6_ADDR_LIFE_STATIC) +#else /* !LWIP_IPV6_ADDRESS_LIFETIMES */ +#define netif_ip6_addr_isstatic(netif, i) (1) /* all addresses are static */ +#endif /* !LWIP_IPV6_ADDRESS_LIFETIMES */ +#if LWIP_ND6_ALLOW_RA_UPDATES +#define netif_mtu6(netif) ((netif)->mtu6) +#else /* LWIP_ND6_ALLOW_RA_UPDATES */ +#define netif_mtu6(netif) ((netif)->mtu) +#endif /* LWIP_ND6_ALLOW_RA_UPDATES */ +#endif /* LWIP_IPV6 */ + +#if LWIP_NETIF_USE_HINTS +#define NETIF_SET_HINTS(netif, netifhint) (netif)->hints = (netifhint) +#define NETIF_RESET_HINTS(netif) (netif)->hints = NULL +#else /* LWIP_NETIF_USE_HINTS */ +#define NETIF_SET_HINTS(netif, netifhint) +#define NETIF_RESET_HINTS(netif) +#endif /* LWIP_NETIF_USE_HINTS */ + +u8_t netif_name_to_index(const char *name); +char * netif_index_to_name(u8_t idx, char *name); +struct netif* netif_get_by_index(u8_t idx); + +/* Interface indexes always start at 1 per RFC 3493, section 4, num starts at 0 (internal index is 0..254)*/ +#define netif_get_index(netif) ((u8_t)((netif)->num + 1)) +#define NETIF_NO_INDEX (0) + +/** + * @ingroup netif + * Extended netif status callback (NSC) reasons flags. + * May be extended in the future! + */ +typedef u16_t netif_nsc_reason_t; + +/* used for initialization only */ +#define LWIP_NSC_NONE 0x0000 +/** netif was added. arg: NULL. Called AFTER netif was added. */ +#define LWIP_NSC_NETIF_ADDED 0x0001 +/** netif was removed. arg: NULL. Called BEFORE netif is removed. */ +#define LWIP_NSC_NETIF_REMOVED 0x0002 +/** link changed */ +#define LWIP_NSC_LINK_CHANGED 0x0004 +/** netif administrative status changed.\n + * up is called AFTER netif is set up.\n + * down is called BEFORE the netif is actually set down. */ +#define LWIP_NSC_STATUS_CHANGED 0x0008 +/** IPv4 address has changed */ +#define LWIP_NSC_IPV4_ADDRESS_CHANGED 0x0010 +/** IPv4 gateway has changed */ +#define LWIP_NSC_IPV4_GATEWAY_CHANGED 0x0020 +/** IPv4 netmask has changed */ +#define LWIP_NSC_IPV4_NETMASK_CHANGED 0x0040 +/** called AFTER IPv4 address/gateway/netmask changes have been applied */ +#define LWIP_NSC_IPV4_SETTINGS_CHANGED 0x0080 +/** IPv6 address was added */ +#define LWIP_NSC_IPV6_SET 0x0100 +/** IPv6 address state has changed */ +#define LWIP_NSC_IPV6_ADDR_STATE_CHANGED 0x0200 + +/** @ingroup netif + * Argument supplied to netif_ext_callback_fn. + */ +typedef union +{ + /** Args to LWIP_NSC_LINK_CHANGED callback */ + struct link_changed_s + { + /** 1: up; 0: down */ + u8_t state; + } link_changed; + /** Args to LWIP_NSC_STATUS_CHANGED callback */ + struct status_changed_s + { + /** 1: up; 0: down */ + u8_t state; + } status_changed; + /** Args to LWIP_NSC_IPV4_ADDRESS_CHANGED|LWIP_NSC_IPV4_GATEWAY_CHANGED|LWIP_NSC_IPV4_NETMASK_CHANGED|LWIP_NSC_IPV4_SETTINGS_CHANGED callback */ + struct ipv4_changed_s + { + /** Old IPv4 address */ + const ip_addr_t* old_address; + const ip_addr_t* old_netmask; + const ip_addr_t* old_gw; + } ipv4_changed; + /** Args to LWIP_NSC_IPV6_SET callback */ + struct ipv6_set_s + { + /** Index of changed IPv6 address */ + s8_t addr_index; + /** Old IPv6 address */ + const ip_addr_t* old_address; + } ipv6_set; + /** Args to LWIP_NSC_IPV6_ADDR_STATE_CHANGED callback */ + struct ipv6_addr_state_changed_s + { + /** Index of affected IPv6 address */ + s8_t addr_index; + /** Old IPv6 address state */ + u8_t old_state; + /** Affected IPv6 address */ + const ip_addr_t* address; + } ipv6_addr_state_changed; +} netif_ext_callback_args_t; + +/** + * @ingroup netif + * Function used for extended netif status callbacks + * Note: When parsing reason argument, keep in mind that more reasons may be added in the future! + * @param netif netif that is affected by change + * @param reason change reason + * @param args depends on reason, see reason description + */ +typedef void (*netif_ext_callback_fn)(struct netif* netif, netif_nsc_reason_t reason, const netif_ext_callback_args_t* args); + +#if LWIP_NETIF_EXT_STATUS_CALLBACK +struct netif_ext_callback; +typedef struct netif_ext_callback +{ + netif_ext_callback_fn callback_fn; + struct netif_ext_callback* next; +} netif_ext_callback_t; + +#define NETIF_DECLARE_EXT_CALLBACK(name) static netif_ext_callback_t name; +void netif_add_ext_callback(netif_ext_callback_t* callback, netif_ext_callback_fn fn); +void netif_remove_ext_callback(netif_ext_callback_t* callback); +void netif_invoke_ext_callback(struct netif* netif, netif_nsc_reason_t reason, const netif_ext_callback_args_t* args); +#else +#define NETIF_DECLARE_EXT_CALLBACK(name) +#define netif_add_ext_callback(callback, fn) +#define netif_remove_ext_callback(callback) +#define netif_invoke_ext_callback(netif, reason, args) +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_NETIF_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/netifapi.h b/Middlewares/Third_Party/LwIP/src/include/lwip/netifapi.h new file mode 100644 index 0000000..e063179 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/netifapi.h @@ -0,0 +1,161 @@ +/** + * @file + * netif API (to be used from non-TCPIP threads) + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ +#ifndef LWIP_HDR_NETIFAPI_H +#define LWIP_HDR_NETIFAPI_H + +#include "lwip/opt.h" + +#if LWIP_NETIF_API /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/sys.h" +#include "lwip/netif.h" +#include "lwip/dhcp.h" +#include "lwip/autoip.h" +#include "lwip/priv/tcpip_priv.h" +#include "lwip/priv/api_msg.h" +#include "lwip/prot/ethernet.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* API for application */ +#if LWIP_ARP && LWIP_IPV4 +/* Used for netfiapi_arp_* APIs */ +enum netifapi_arp_entry { + NETIFAPI_ARP_PERM /* Permanent entry */ + /* Other entry types can be added here */ +}; + +/** @ingroup netifapi_arp */ +err_t netifapi_arp_add(const ip4_addr_t *ipaddr, struct eth_addr *ethaddr, enum netifapi_arp_entry type); +/** @ingroup netifapi_arp */ +err_t netifapi_arp_remove(const ip4_addr_t *ipaddr, enum netifapi_arp_entry type); +#endif /* LWIP_ARP && LWIP_IPV4 */ + +err_t netifapi_netif_add(struct netif *netif, +#if LWIP_IPV4 + const ip4_addr_t *ipaddr, const ip4_addr_t *netmask, const ip4_addr_t *gw, +#endif /* LWIP_IPV4 */ + void *state, netif_init_fn init, netif_input_fn input); + +#if LWIP_IPV4 +err_t netifapi_netif_set_addr(struct netif *netif, const ip4_addr_t *ipaddr, + const ip4_addr_t *netmask, const ip4_addr_t *gw); +#endif /* LWIP_IPV4*/ + +err_t netifapi_netif_common(struct netif *netif, netifapi_void_fn voidfunc, + netifapi_errt_fn errtfunc); + +/** @ingroup netifapi_netif */ +err_t netifapi_netif_name_to_index(const char *name, u8_t *index); +/** @ingroup netifapi_netif */ +err_t netifapi_netif_index_to_name(u8_t index, char *name); + +/** @ingroup netifapi_netif + * @see netif_remove() + */ +#define netifapi_netif_remove(n) netifapi_netif_common(n, netif_remove, NULL) +/** @ingroup netifapi_netif + * @see netif_set_up() + */ +#define netifapi_netif_set_up(n) netifapi_netif_common(n, netif_set_up, NULL) +/** @ingroup netifapi_netif + * @see netif_set_down() + */ +#define netifapi_netif_set_down(n) netifapi_netif_common(n, netif_set_down, NULL) +/** @ingroup netifapi_netif + * @see netif_set_default() + */ +#define netifapi_netif_set_default(n) netifapi_netif_common(n, netif_set_default, NULL) +/** @ingroup netifapi_netif + * @see netif_set_link_up() + */ +#define netifapi_netif_set_link_up(n) netifapi_netif_common(n, netif_set_link_up, NULL) +/** @ingroup netifapi_netif + * @see netif_set_link_down() + */ +#define netifapi_netif_set_link_down(n) netifapi_netif_common(n, netif_set_link_down, NULL) + +/** + * @defgroup netifapi_dhcp4 DHCPv4 + * @ingroup netifapi + * To be called from non-TCPIP threads + */ +/** @ingroup netifapi_dhcp4 + * @see dhcp_start() + */ +#define netifapi_dhcp_start(n) netifapi_netif_common(n, NULL, dhcp_start) +/** + * @ingroup netifapi_dhcp4 + * @deprecated Use netifapi_dhcp_release_and_stop() instead. + */ +#define netifapi_dhcp_stop(n) netifapi_netif_common(n, dhcp_stop, NULL) +/** @ingroup netifapi_dhcp4 + * @see dhcp_inform() + */ +#define netifapi_dhcp_inform(n) netifapi_netif_common(n, dhcp_inform, NULL) +/** @ingroup netifapi_dhcp4 + * @see dhcp_renew() + */ +#define netifapi_dhcp_renew(n) netifapi_netif_common(n, NULL, dhcp_renew) +/** + * @ingroup netifapi_dhcp4 + * @deprecated Use netifapi_dhcp_release_and_stop() instead. + */ +#define netifapi_dhcp_release(n) netifapi_netif_common(n, NULL, dhcp_release) +/** @ingroup netifapi_dhcp4 + * @see dhcp_release_and_stop() + */ +#define netifapi_dhcp_release_and_stop(n) netifapi_netif_common(n, dhcp_release_and_stop, NULL) + +/** + * @defgroup netifapi_autoip AUTOIP + * @ingroup netifapi + * To be called from non-TCPIP threads + */ +/** @ingroup netifapi_autoip + * @see autoip_start() + */ +#define netifapi_autoip_start(n) netifapi_netif_common(n, NULL, autoip_start) +/** @ingroup netifapi_autoip + * @see autoip_stop() + */ +#define netifapi_autoip_stop(n) netifapi_netif_common(n, NULL, autoip_stop) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_NETIF_API */ + +#endif /* LWIP_HDR_NETIFAPI_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/opt.h b/Middlewares/Third_Party/LwIP/src/include/lwip/opt.h new file mode 100644 index 0000000..82c420c --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/opt.h @@ -0,0 +1,3519 @@ +/** + * @file + * + * lwIP Options Configuration + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +/* + * NOTE: || defined __DOXYGEN__ is a workaround for doxygen bug - + * without this, doxygen does not see the actual #define + */ + +#if !defined LWIP_HDR_OPT_H +#define LWIP_HDR_OPT_H + +/* + * Include user defined options first. Anything not defined in these files + * will be set to standard values. Override anything you don't like! + */ +#include "lwipopts.h" +#include "lwip/debug.h" + +/** + * @defgroup lwip_opts Options (lwipopts.h) + * @ingroup lwip + * + * @defgroup lwip_opts_debug Debugging + * @ingroup lwip_opts + * + * @defgroup lwip_opts_infrastructure Infrastructure + * @ingroup lwip_opts + * + * @defgroup lwip_opts_callback Callback-style APIs + * @ingroup lwip_opts + * + * @defgroup lwip_opts_threadsafe_apis Thread-safe APIs + * @ingroup lwip_opts + */ + + /* + ------------------------------------ + -------------- NO SYS -------------- + ------------------------------------ +*/ +/** + * @defgroup lwip_opts_nosys NO_SYS + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * NO_SYS==1: Use lwIP without OS-awareness (no thread, semaphores, mutexes or + * mboxes). This means threaded APIs cannot be used (socket, netconn, + * i.e. everything in the 'api' folder), only the callback-style raw API is + * available (and you have to watch out for yourself that you don't access + * lwIP functions/structures from more than one context at a time!) + */ +#if !defined NO_SYS || defined __DOXYGEN__ +#define NO_SYS 0 +#endif +/** + * @} + */ + +/** + * @defgroup lwip_opts_timers Timers + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * LWIP_TIMERS==0: Drop support for sys_timeout and lwip-internal cyclic timers. + * (the array of lwip-internal cyclic timers is still provided) + * (check NO_SYS_NO_TIMERS for compatibility to old versions) + */ +#if !defined LWIP_TIMERS || defined __DOXYGEN__ +#ifdef NO_SYS_NO_TIMERS +#define LWIP_TIMERS (!NO_SYS || (NO_SYS && !NO_SYS_NO_TIMERS)) +#else +#define LWIP_TIMERS 1 +#endif +#endif + +/** + * LWIP_TIMERS_CUSTOM==1: Provide your own timer implementation. + * Function prototypes in timeouts.h and the array of lwip-internal cyclic timers + * are still included, but the implementation is not. The following functions + * will be required: sys_timeouts_init(), sys_timeout(), sys_untimeout(), + * sys_timeouts_mbox_fetch() + */ +#if !defined LWIP_TIMERS_CUSTOM || defined __DOXYGEN__ +#define LWIP_TIMERS_CUSTOM 0 +#endif +/** + * @} + */ + +/** + * @defgroup lwip_opts_memcpy memcpy + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * MEMCPY: override this if you have a faster implementation at hand than the + * one included in your C library + */ +#if !defined MEMCPY || defined __DOXYGEN__ +#define MEMCPY(dst,src,len) memcpy(dst,src,len) +#endif + +/** + * SMEMCPY: override this with care! Some compilers (e.g. gcc) can inline a + * call to memcpy() if the length is known at compile time and is small. + */ +#if !defined SMEMCPY || defined __DOXYGEN__ +#define SMEMCPY(dst,src,len) memcpy(dst,src,len) +#endif + +/** + * MEMMOVE: override this if you have a faster implementation at hand than the + * one included in your C library. lwIP currently uses MEMMOVE only when IPv6 + * fragmentation support is enabled. + */ +#if !defined MEMMOVE || defined __DOXYGEN__ +#define MEMMOVE(dst,src,len) memmove(dst,src,len) +#endif +/** + * @} + */ + +/* + ------------------------------------ + ----------- Core locking ----------- + ------------------------------------ +*/ +/** + * @defgroup lwip_opts_lock Core locking and MPU + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * LWIP_MPU_COMPATIBLE: enables special memory management mechanism + * which makes lwip able to work on MPU (Memory Protection Unit) system + * by not passing stack-pointers to other threads + * (this decreases performance as memory is allocated from pools instead + * of keeping it on the stack) + */ +#if !defined LWIP_MPU_COMPATIBLE || defined __DOXYGEN__ +#define LWIP_MPU_COMPATIBLE 0 +#endif + +/** + * LWIP_TCPIP_CORE_LOCKING + * Creates a global mutex that is held during TCPIP thread operations. + * Can be locked by client code to perform lwIP operations without changing + * into TCPIP thread using callbacks. See LOCK_TCPIP_CORE() and + * UNLOCK_TCPIP_CORE(). + * Your system should provide mutexes supporting priority inversion to use this. + */ +#if !defined LWIP_TCPIP_CORE_LOCKING || defined __DOXYGEN__ +#define LWIP_TCPIP_CORE_LOCKING 1 +#endif + +/** + * LWIP_TCPIP_CORE_LOCKING_INPUT: when LWIP_TCPIP_CORE_LOCKING is enabled, + * this lets tcpip_input() grab the mutex for input packets as well, + * instead of allocating a message and passing it to tcpip_thread. + * + * ATTENTION: this does not work when tcpip_input() is called from + * interrupt context! + */ +#if !defined LWIP_TCPIP_CORE_LOCKING_INPUT || defined __DOXYGEN__ +#define LWIP_TCPIP_CORE_LOCKING_INPUT 0 +#endif + +/** + * SYS_LIGHTWEIGHT_PROT==1: enable inter-task protection (and task-vs-interrupt + * protection) for certain critical regions during buffer allocation, deallocation + * and memory allocation and deallocation. + * ATTENTION: This is required when using lwIP from more than one context! If + * you disable this, you must be sure what you are doing! + */ +#if !defined SYS_LIGHTWEIGHT_PROT || defined __DOXYGEN__ +#define SYS_LIGHTWEIGHT_PROT 1 +#endif + +/** + * Macro/function to check whether lwIP's threading/locking + * requirements are satisfied during current function call. + * This macro usually calls a function that is implemented in the OS-dependent + * sys layer and performs the following checks: + * - Not in ISR (this should be checked for NO_SYS==1, too!) + * - If @ref LWIP_TCPIP_CORE_LOCKING = 1: TCPIP core lock is held + * - If @ref LWIP_TCPIP_CORE_LOCKING = 0: function is called from TCPIP thread + * @see @ref multithreading + */ +#if !defined LWIP_ASSERT_CORE_LOCKED || defined __DOXYGEN__ +#define LWIP_ASSERT_CORE_LOCKED() +#endif + +/** + * Called as first thing in the lwIP TCPIP thread. Can be used in conjunction + * with @ref LWIP_ASSERT_CORE_LOCKED to check core locking. + * @see @ref multithreading + */ +#if !defined LWIP_MARK_TCPIP_THREAD || defined __DOXYGEN__ +#define LWIP_MARK_TCPIP_THREAD() +#endif +/** + * @} + */ + +/* + ------------------------------------ + ---------- Memory options ---------- + ------------------------------------ +*/ +/** + * @defgroup lwip_opts_mem Heap and memory pools + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * MEM_LIBC_MALLOC==1: Use malloc/free/realloc provided by your C-library + * instead of the lwip internal allocator. Can save code size if you + * already use it. + */ +#if !defined MEM_LIBC_MALLOC || defined __DOXYGEN__ +#define MEM_LIBC_MALLOC 0 +#endif + +/** + * MEMP_MEM_MALLOC==1: Use mem_malloc/mem_free instead of the lwip pool allocator. + * Especially useful with MEM_LIBC_MALLOC but handle with care regarding execution + * speed (heap alloc can be much slower than pool alloc) and usage from interrupts + * (especially if your netif driver allocates PBUF_POOL pbufs for received frames + * from interrupt)! + * ATTENTION: Currently, this uses the heap for ALL pools (also for private pools, + * not only for internal pools defined in memp_std.h)! + */ +#if !defined MEMP_MEM_MALLOC || defined __DOXYGEN__ +#define MEMP_MEM_MALLOC 0 +#endif + +/** + * MEMP_MEM_INIT==1: Force use of memset to initialize pool memory. + * Useful if pool are moved in uninitialized section of memory. This will ensure + * default values in pcbs struct are well initialized in all conditions. + */ +#if !defined MEMP_MEM_INIT || defined __DOXYGEN__ +#define MEMP_MEM_INIT 0 +#endif + +/** + * MEM_ALIGNMENT: should be set to the alignment of the CPU + * 4 byte alignment -> \#define MEM_ALIGNMENT 4 + * 2 byte alignment -> \#define MEM_ALIGNMENT 2 + */ +#if !defined MEM_ALIGNMENT || defined __DOXYGEN__ +#define MEM_ALIGNMENT 1 +#endif + +/** + * MEM_SIZE: the size of the heap memory. If the application will send + * a lot of data that needs to be copied, this should be set high. + */ +#if !defined MEM_SIZE || defined __DOXYGEN__ +#define MEM_SIZE 1600 +#endif + +/** + * MEMP_OVERFLOW_CHECK: memp overflow protection reserves a configurable + * amount of bytes before and after each memp element in every pool and fills + * it with a prominent default value. + * MEMP_OVERFLOW_CHECK == 0 no checking + * MEMP_OVERFLOW_CHECK == 1 checks each element when it is freed + * MEMP_OVERFLOW_CHECK >= 2 checks each element in every pool every time + * memp_malloc() or memp_free() is called (useful but slow!) + */ +#if !defined MEMP_OVERFLOW_CHECK || defined __DOXYGEN__ +#define MEMP_OVERFLOW_CHECK 0 +#endif + +/** + * MEMP_SANITY_CHECK==1: run a sanity check after each memp_free() to make + * sure that there are no cycles in the linked lists. + */ +#if !defined MEMP_SANITY_CHECK || defined __DOXYGEN__ +#define MEMP_SANITY_CHECK 0 +#endif + +/** + * MEM_OVERFLOW_CHECK: mem overflow protection reserves a configurable + * amount of bytes before and after each heap allocation chunk and fills + * it with a prominent default value. + * MEM_OVERFLOW_CHECK == 0 no checking + * MEM_OVERFLOW_CHECK == 1 checks each element when it is freed + * MEM_OVERFLOW_CHECK >= 2 checks all heap elements every time + * mem_malloc() or mem_free() is called (useful but slow!) + */ +#if !defined MEM_OVERFLOW_CHECK || defined __DOXYGEN__ +#define MEM_OVERFLOW_CHECK 0 +#endif + +/** + * MEM_SANITY_CHECK==1: run a sanity check after each mem_free() to make + * sure that the linked list of heap elements is not corrupted. + */ +#if !defined MEM_SANITY_CHECK || defined __DOXYGEN__ +#define MEM_SANITY_CHECK 0 +#endif + +/** + * MEM_USE_POOLS==1: Use an alternative to malloc() by allocating from a set + * of memory pools of various sizes. When mem_malloc is called, an element of + * the smallest pool that can provide the length needed is returned. + * To use this, MEMP_USE_CUSTOM_POOLS also has to be enabled. + */ +#if !defined MEM_USE_POOLS || defined __DOXYGEN__ +#define MEM_USE_POOLS 0 +#endif + +/** + * MEM_USE_POOLS_TRY_BIGGER_POOL==1: if one malloc-pool is empty, try the next + * bigger pool - WARNING: THIS MIGHT WASTE MEMORY but it can make a system more + * reliable. */ +#if !defined MEM_USE_POOLS_TRY_BIGGER_POOL || defined __DOXYGEN__ +#define MEM_USE_POOLS_TRY_BIGGER_POOL 0 +#endif + +/** + * MEMP_USE_CUSTOM_POOLS==1: whether to include a user file lwippools.h + * that defines additional pools beyond the "standard" ones required + * by lwIP. If you set this to 1, you must have lwippools.h in your + * include path somewhere. + */ +#if !defined MEMP_USE_CUSTOM_POOLS || defined __DOXYGEN__ +#define MEMP_USE_CUSTOM_POOLS 0 +#endif + +/** + * Set this to 1 if you want to free PBUF_RAM pbufs (or call mem_free()) from + * interrupt context (or another context that doesn't allow waiting for a + * semaphore). + * If set to 1, mem_malloc will be protected by a semaphore and SYS_ARCH_PROTECT, + * while mem_free will only use SYS_ARCH_PROTECT. mem_malloc SYS_ARCH_UNPROTECTs + * with each loop so that mem_free can run. + * + * ATTENTION: As you can see from the above description, this leads to dis-/ + * enabling interrupts often, which can be slow! Also, on low memory, mem_malloc + * can need longer. + * + * If you don't want that, at least for NO_SYS=0, you can still use the following + * functions to enqueue a deallocation call which then runs in the tcpip_thread + * context: + * - pbuf_free_callback(p); + * - mem_free_callback(m); + */ +#if !defined LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT || defined __DOXYGEN__ +#define LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT 0 +#endif +/** + * @} + */ + +/* + ------------------------------------------------ + ---------- Internal Memory Pool Sizes ---------- + ------------------------------------------------ +*/ +/** + * @defgroup lwip_opts_memp Internal memory pools + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * MEMP_NUM_PBUF: the number of memp struct pbufs (used for PBUF_ROM and PBUF_REF). + * If the application sends a lot of data out of ROM (or other static memory), + * this should be set high. + */ +#if !defined MEMP_NUM_PBUF || defined __DOXYGEN__ +#define MEMP_NUM_PBUF 16 +#endif + +/** + * MEMP_NUM_RAW_PCB: Number of raw connection PCBs + * (requires the LWIP_RAW option) + */ +#if !defined MEMP_NUM_RAW_PCB || defined __DOXYGEN__ +#define MEMP_NUM_RAW_PCB 4 +#endif + +/** + * MEMP_NUM_UDP_PCB: the number of UDP protocol control blocks. One + * per active UDP "connection". + * (requires the LWIP_UDP option) + */ +#if !defined MEMP_NUM_UDP_PCB || defined __DOXYGEN__ +#define MEMP_NUM_UDP_PCB 4 +#endif + +/** + * MEMP_NUM_TCP_PCB: the number of simultaneously active TCP connections. + * (requires the LWIP_TCP option) + */ +#if !defined MEMP_NUM_TCP_PCB || defined __DOXYGEN__ +#define MEMP_NUM_TCP_PCB 5 +#endif + +/** + * MEMP_NUM_TCP_PCB_LISTEN: the number of listening TCP connections. + * (requires the LWIP_TCP option) + */ +#if !defined MEMP_NUM_TCP_PCB_LISTEN || defined __DOXYGEN__ +#define MEMP_NUM_TCP_PCB_LISTEN 8 +#endif + +/** + * MEMP_NUM_TCP_SEG: the number of simultaneously queued TCP segments. + * (requires the LWIP_TCP option) + */ +#if !defined MEMP_NUM_TCP_SEG || defined __DOXYGEN__ +#define MEMP_NUM_TCP_SEG 16 +#endif + +/** + * MEMP_NUM_ALTCP_PCB: the number of simultaneously active altcp layer pcbs. + * (requires the LWIP_ALTCP option) + * Connections with multiple layers require more than one altcp_pcb (e.g. TLS + * over TCP requires 2 altcp_pcbs, one for TLS and one for TCP). + */ +#if !defined MEMP_NUM_ALTCP_PCB || defined __DOXYGEN__ +#define MEMP_NUM_ALTCP_PCB MEMP_NUM_TCP_PCB +#endif + +/** + * MEMP_NUM_REASSDATA: the number of IP packets simultaneously queued for + * reassembly (whole packets, not fragments!) + */ +#if !defined MEMP_NUM_REASSDATA || defined __DOXYGEN__ +#define MEMP_NUM_REASSDATA 5 +#endif + +/** + * MEMP_NUM_FRAG_PBUF: the number of IP fragments simultaneously sent + * (fragments, not whole packets!). + * This is only used with LWIP_NETIF_TX_SINGLE_PBUF==0 and only has to be > 1 + * with DMA-enabled MACs where the packet is not yet sent when netif->output + * returns. + */ +#if !defined MEMP_NUM_FRAG_PBUF || defined __DOXYGEN__ +#define MEMP_NUM_FRAG_PBUF 15 +#endif + +/** + * MEMP_NUM_ARP_QUEUE: the number of simultaneously queued outgoing + * packets (pbufs) that are waiting for an ARP request (to resolve + * their destination address) to finish. + * (requires the ARP_QUEUEING option) + */ +#if !defined MEMP_NUM_ARP_QUEUE || defined __DOXYGEN__ +#define MEMP_NUM_ARP_QUEUE 30 +#endif + +/** + * MEMP_NUM_IGMP_GROUP: The number of multicast groups whose network interfaces + * can be members at the same time (one per netif - allsystems group -, plus one + * per netif membership). + * (requires the LWIP_IGMP option) + */ +#if !defined MEMP_NUM_IGMP_GROUP || defined __DOXYGEN__ +#define MEMP_NUM_IGMP_GROUP 8 +#endif + +/** + * The number of sys timeouts used by the core stack (not apps) + * The default number of timeouts is calculated here for all enabled modules. + */ +#define LWIP_NUM_SYS_TIMEOUT_INTERNAL (LWIP_TCP + IP_REASSEMBLY + LWIP_ARP + (2*LWIP_DHCP) + LWIP_AUTOIP + LWIP_IGMP + LWIP_DNS + PPP_NUM_TIMEOUTS + (LWIP_IPV6 * (1 + LWIP_IPV6_REASS + LWIP_IPV6_MLD))) + +/** + * MEMP_NUM_SYS_TIMEOUT: the number of simultaneously active timeouts. + * The default number of timeouts is calculated here for all enabled modules. + * The formula expects settings to be either '0' or '1'. + */ +#if !defined MEMP_NUM_SYS_TIMEOUT || defined __DOXYGEN__ +#define MEMP_NUM_SYS_TIMEOUT LWIP_NUM_SYS_TIMEOUT_INTERNAL +#endif + +/** + * MEMP_NUM_NETBUF: the number of struct netbufs. + * (only needed if you use the sequential API, like api_lib.c) + */ +#if !defined MEMP_NUM_NETBUF || defined __DOXYGEN__ +#define MEMP_NUM_NETBUF 2 +#endif + +/** + * MEMP_NUM_NETCONN: the number of struct netconns. + * (only needed if you use the sequential API, like api_lib.c) + */ +#if !defined MEMP_NUM_NETCONN || defined __DOXYGEN__ +#define MEMP_NUM_NETCONN 4 +#endif + +/** + * MEMP_NUM_SELECT_CB: the number of struct lwip_select_cb. + * (Only needed if you have LWIP_MPU_COMPATIBLE==1 and use the socket API. + * In that case, you need one per thread calling lwip_select.) + */ +#if !defined MEMP_NUM_SELECT_CB || defined __DOXYGEN__ +#define MEMP_NUM_SELECT_CB 4 +#endif + +/** + * MEMP_NUM_TCPIP_MSG_API: the number of struct tcpip_msg, which are used + * for callback/timeout API communication. + * (only needed if you use tcpip.c) + */ +#if !defined MEMP_NUM_TCPIP_MSG_API || defined __DOXYGEN__ +#define MEMP_NUM_TCPIP_MSG_API 8 +#endif + +/** + * MEMP_NUM_TCPIP_MSG_INPKT: the number of struct tcpip_msg, which are used + * for incoming packets. + * (only needed if you use tcpip.c) + */ +#if !defined MEMP_NUM_TCPIP_MSG_INPKT || defined __DOXYGEN__ +#define MEMP_NUM_TCPIP_MSG_INPKT 8 +#endif + +/** + * MEMP_NUM_NETDB: the number of concurrently running lwip_addrinfo() calls + * (before freeing the corresponding memory using lwip_freeaddrinfo()). + */ +#if !defined MEMP_NUM_NETDB || defined __DOXYGEN__ +#define MEMP_NUM_NETDB 1 +#endif + +/** + * MEMP_NUM_LOCALHOSTLIST: the number of host entries in the local host list + * if DNS_LOCAL_HOSTLIST_IS_DYNAMIC==1. + */ +#if !defined MEMP_NUM_LOCALHOSTLIST || defined __DOXYGEN__ +#define MEMP_NUM_LOCALHOSTLIST 1 +#endif + +/** + * PBUF_POOL_SIZE: the number of buffers in the pbuf pool. + */ +#if !defined PBUF_POOL_SIZE || defined __DOXYGEN__ +#define PBUF_POOL_SIZE 16 +#endif + +/** MEMP_NUM_API_MSG: the number of concurrently active calls to various + * socket, netconn, and tcpip functions + */ +#if !defined MEMP_NUM_API_MSG || defined __DOXYGEN__ +#define MEMP_NUM_API_MSG MEMP_NUM_TCPIP_MSG_API +#endif + +/** MEMP_NUM_DNS_API_MSG: the number of concurrently active calls to netconn_gethostbyname + */ +#if !defined MEMP_NUM_DNS_API_MSG || defined __DOXYGEN__ +#define MEMP_NUM_DNS_API_MSG MEMP_NUM_TCPIP_MSG_API +#endif + +/** MEMP_NUM_SOCKET_SETGETSOCKOPT_DATA: the number of concurrently active calls + * to getsockopt/setsockopt + */ +#if !defined MEMP_NUM_SOCKET_SETGETSOCKOPT_DATA || defined __DOXYGEN__ +#define MEMP_NUM_SOCKET_SETGETSOCKOPT_DATA MEMP_NUM_TCPIP_MSG_API +#endif + +/** MEMP_NUM_NETIFAPI_MSG: the number of concurrently active calls to the + * netifapi functions + */ +#if !defined MEMP_NUM_NETIFAPI_MSG || defined __DOXYGEN__ +#define MEMP_NUM_NETIFAPI_MSG MEMP_NUM_TCPIP_MSG_API +#endif +/** + * @} + */ + +/* + --------------------------------- + ---------- ARP options ---------- + --------------------------------- +*/ +/** + * @defgroup lwip_opts_arp ARP + * @ingroup lwip_opts_ipv4 + * @{ + */ +/** + * LWIP_ARP==1: Enable ARP functionality. + */ +#if !defined LWIP_ARP || defined __DOXYGEN__ +#define LWIP_ARP 1 +#endif + +/** + * ARP_TABLE_SIZE: Number of active MAC-IP address pairs cached. + */ +#if !defined ARP_TABLE_SIZE || defined __DOXYGEN__ +#define ARP_TABLE_SIZE 10 +#endif + +/** the time an ARP entry stays valid after its last update, + * for ARP_TMR_INTERVAL = 1000, this is + * (60 * 5) seconds = 5 minutes. + */ +#if !defined ARP_MAXAGE || defined __DOXYGEN__ +#define ARP_MAXAGE 300 +#endif + +/** + * ARP_QUEUEING==1: Multiple outgoing packets are queued during hardware address + * resolution. By default, only the most recent packet is queued per IP address. + * This is sufficient for most protocols and mainly reduces TCP connection + * startup time. Set this to 1 if you know your application sends more than one + * packet in a row to an IP address that is not in the ARP cache. + */ +#if !defined ARP_QUEUEING || defined __DOXYGEN__ +#define ARP_QUEUEING 0 +#endif + +/** The maximum number of packets which may be queued for each + * unresolved address by other network layers. Defaults to 3, 0 means disabled. + * Old packets are dropped, new packets are queued. + */ +#if !defined ARP_QUEUE_LEN || defined __DOXYGEN__ +#define ARP_QUEUE_LEN 3 +#endif + +/** + * ETHARP_SUPPORT_VLAN==1: support receiving and sending ethernet packets with + * VLAN header. See the description of LWIP_HOOK_VLAN_CHECK and + * LWIP_HOOK_VLAN_SET hooks to check/set VLAN headers. + * Additionally, you can define ETHARP_VLAN_CHECK to an u16_t VLAN ID to check. + * If ETHARP_VLAN_CHECK is defined, only VLAN-traffic for this VLAN is accepted. + * If ETHARP_VLAN_CHECK is not defined, all traffic is accepted. + * Alternatively, define a function/define ETHARP_VLAN_CHECK_FN(eth_hdr, vlan) + * that returns 1 to accept a packet or 0 to drop a packet. + */ +#if !defined ETHARP_SUPPORT_VLAN || defined __DOXYGEN__ +#define ETHARP_SUPPORT_VLAN 0 +#endif + +/** LWIP_ETHERNET==1: enable ethernet support even though ARP might be disabled + */ +#if !defined LWIP_ETHERNET || defined __DOXYGEN__ +#define LWIP_ETHERNET LWIP_ARP +#endif + +/** ETH_PAD_SIZE: number of bytes added before the ethernet header to ensure + * alignment of payload after that header. Since the header is 14 bytes long, + * without this padding e.g. addresses in the IP header will not be aligned + * on a 32-bit boundary, so setting this to 2 can speed up 32-bit-platforms. + */ +#if !defined ETH_PAD_SIZE || defined __DOXYGEN__ +#define ETH_PAD_SIZE 0 +#endif + +/** ETHARP_SUPPORT_STATIC_ENTRIES==1: enable code to support static ARP table + * entries (using etharp_add_static_entry/etharp_remove_static_entry). + */ +#if !defined ETHARP_SUPPORT_STATIC_ENTRIES || defined __DOXYGEN__ +#define ETHARP_SUPPORT_STATIC_ENTRIES 0 +#endif + +/** ETHARP_TABLE_MATCH_NETIF==1: Match netif for ARP table entries. + * If disabled, duplicate IP address on multiple netifs are not supported + * (but this should only occur for AutoIP). + */ +#if !defined ETHARP_TABLE_MATCH_NETIF || defined __DOXYGEN__ +#define ETHARP_TABLE_MATCH_NETIF !LWIP_SINGLE_NETIF +#endif +/** + * @} + */ + +/* + -------------------------------- + ---------- IP options ---------- + -------------------------------- +*/ +/** + * @defgroup lwip_opts_ipv4 IPv4 + * @ingroup lwip_opts + * @{ + */ +/** + * LWIP_IPV4==1: Enable IPv4 + */ +#if !defined LWIP_IPV4 || defined __DOXYGEN__ +#define LWIP_IPV4 1 +#endif + +/** + * IP_FORWARD==1: Enables the ability to forward IP packets across network + * interfaces. If you are going to run lwIP on a device with only one network + * interface, define this to 0. + */ +#if !defined IP_FORWARD || defined __DOXYGEN__ +#define IP_FORWARD 0 +#endif + +/** + * IP_REASSEMBLY==1: Reassemble incoming fragmented IP packets. Note that + * this option does not affect outgoing packet sizes, which can be controlled + * via IP_FRAG. + */ +#if !defined IP_REASSEMBLY || defined __DOXYGEN__ +#define IP_REASSEMBLY 1 +#endif + +/** + * IP_FRAG==1: Fragment outgoing IP packets if their size exceeds MTU. Note + * that this option does not affect incoming packet sizes, which can be + * controlled via IP_REASSEMBLY. + */ +#if !defined IP_FRAG || defined __DOXYGEN__ +#define IP_FRAG 1 +#endif + +#if !LWIP_IPV4 +/* disable IPv4 extensions when IPv4 is disabled */ +#undef IP_FORWARD +#define IP_FORWARD 0 +#undef IP_REASSEMBLY +#define IP_REASSEMBLY 0 +#undef IP_FRAG +#define IP_FRAG 0 +#endif /* !LWIP_IPV4 */ + +/** + * IP_OPTIONS_ALLOWED: Defines the behavior for IP options. + * IP_OPTIONS_ALLOWED==0: All packets with IP options are dropped. + * IP_OPTIONS_ALLOWED==1: IP options are allowed (but not parsed). + */ +#if !defined IP_OPTIONS_ALLOWED || defined __DOXYGEN__ +#define IP_OPTIONS_ALLOWED 1 +#endif + +/** + * IP_REASS_MAXAGE: Maximum time (in multiples of IP_TMR_INTERVAL - so seconds, normally) + * a fragmented IP packet waits for all fragments to arrive. If not all fragments arrived + * in this time, the whole packet is discarded. + */ +#if !defined IP_REASS_MAXAGE || defined __DOXYGEN__ +#define IP_REASS_MAXAGE 15 +#endif + +/** + * IP_REASS_MAX_PBUFS: Total maximum amount of pbufs waiting to be reassembled. + * Since the received pbufs are enqueued, be sure to configure + * PBUF_POOL_SIZE > IP_REASS_MAX_PBUFS so that the stack is still able to receive + * packets even if the maximum amount of fragments is enqueued for reassembly! + * When IPv4 *and* IPv6 are enabled, this even changes to + * (PBUF_POOL_SIZE > 2 * IP_REASS_MAX_PBUFS)! + */ +#if !defined IP_REASS_MAX_PBUFS || defined __DOXYGEN__ +#define IP_REASS_MAX_PBUFS 10 +#endif + +/** + * IP_DEFAULT_TTL: Default value for Time-To-Live used by transport layers. + */ +#if !defined IP_DEFAULT_TTL || defined __DOXYGEN__ +#define IP_DEFAULT_TTL 255 +#endif + +/** + * IP_SOF_BROADCAST=1: Use the SOF_BROADCAST field to enable broadcast + * filter per pcb on udp and raw send operations. To enable broadcast filter + * on recv operations, you also have to set IP_SOF_BROADCAST_RECV=1. + */ +#if !defined IP_SOF_BROADCAST || defined __DOXYGEN__ +#define IP_SOF_BROADCAST 0 +#endif + +/** + * IP_SOF_BROADCAST_RECV (requires IP_SOF_BROADCAST=1) enable the broadcast + * filter on recv operations. + */ +#if !defined IP_SOF_BROADCAST_RECV || defined __DOXYGEN__ +#define IP_SOF_BROADCAST_RECV 0 +#endif + +/** + * IP_FORWARD_ALLOW_TX_ON_RX_NETIF==1: allow ip_forward() to send packets back + * out on the netif where it was received. This should only be used for + * wireless networks. + * ATTENTION: When this is 1, make sure your netif driver correctly marks incoming + * link-layer-broadcast/multicast packets as such using the corresponding pbuf flags! + */ +#if !defined IP_FORWARD_ALLOW_TX_ON_RX_NETIF || defined __DOXYGEN__ +#define IP_FORWARD_ALLOW_TX_ON_RX_NETIF 0 +#endif +/** + * @} + */ + +/* + ---------------------------------- + ---------- ICMP options ---------- + ---------------------------------- +*/ +/** + * @defgroup lwip_opts_icmp ICMP + * @ingroup lwip_opts_ipv4 + * @{ + */ +/** + * LWIP_ICMP==1: Enable ICMP module inside the IP stack. + * Be careful, disable that make your product non-compliant to RFC1122 + */ +#if !defined LWIP_ICMP || defined __DOXYGEN__ +#define LWIP_ICMP 1 +#endif + +/** + * ICMP_TTL: Default value for Time-To-Live used by ICMP packets. + */ +#if !defined ICMP_TTL || defined __DOXYGEN__ +#define ICMP_TTL IP_DEFAULT_TTL +#endif + +/** + * LWIP_BROADCAST_PING==1: respond to broadcast pings (default is unicast only) + */ +#if !defined LWIP_BROADCAST_PING || defined __DOXYGEN__ +#define LWIP_BROADCAST_PING 0 +#endif + +/** + * LWIP_MULTICAST_PING==1: respond to multicast pings (default is unicast only) + */ +#if !defined LWIP_MULTICAST_PING || defined __DOXYGEN__ +#define LWIP_MULTICAST_PING 0 +#endif +/** + * @} + */ + +/* + --------------------------------- + ---------- RAW options ---------- + --------------------------------- +*/ +/** + * @defgroup lwip_opts_raw RAW + * @ingroup lwip_opts_callback + * @{ + */ +/** + * LWIP_RAW==1: Enable application layer to hook into the IP layer itself. + */ +#if !defined LWIP_RAW || defined __DOXYGEN__ +#define LWIP_RAW 0 +#endif + +/** + * LWIP_RAW==1: Enable application layer to hook into the IP layer itself. + */ +#if !defined RAW_TTL || defined __DOXYGEN__ +#define RAW_TTL IP_DEFAULT_TTL +#endif +/** + * @} + */ + +/* + ---------------------------------- + ---------- DHCP options ---------- + ---------------------------------- +*/ +/** + * @defgroup lwip_opts_dhcp DHCP + * @ingroup lwip_opts_ipv4 + * @{ + */ +/** + * LWIP_DHCP==1: Enable DHCP module. + */ +#if !defined LWIP_DHCP || defined __DOXYGEN__ +#define LWIP_DHCP 0 +#endif +#if !LWIP_IPV4 +/* disable DHCP when IPv4 is disabled */ +#undef LWIP_DHCP +#define LWIP_DHCP 0 +#endif /* !LWIP_IPV4 */ + +/** + * DHCP_DOES_ARP_CHECK==1: Do an ARP check on the offered address. + */ +#if !defined DHCP_DOES_ARP_CHECK || defined __DOXYGEN__ +#define DHCP_DOES_ARP_CHECK (LWIP_DHCP && LWIP_ARP) +#endif + +/** + * LWIP_DHCP_BOOTP_FILE==1: Store offered_si_addr and boot_file_name. + */ +#if !defined LWIP_DHCP_BOOTP_FILE || defined __DOXYGEN__ +#define LWIP_DHCP_BOOTP_FILE 0 +#endif + +/** + * LWIP_DHCP_GETS_NTP==1: Request NTP servers with discover/select. For each + * response packet, an callback is called, which has to be provided by the port: + * void dhcp_set_ntp_servers(u8_t num_ntp_servers, ip_addr_t* ntp_server_addrs); +*/ +#if !defined LWIP_DHCP_GET_NTP_SRV || defined __DOXYGEN__ +#define LWIP_DHCP_GET_NTP_SRV 0 +#endif + +/** + * The maximum of NTP servers requested + */ +#if !defined LWIP_DHCP_MAX_NTP_SERVERS || defined __DOXYGEN__ +#define LWIP_DHCP_MAX_NTP_SERVERS 1 +#endif + +/** + * LWIP_DHCP_MAX_DNS_SERVERS > 0: Request DNS servers with discover/select. + * DNS servers received in the response are passed to DNS via @ref dns_setserver() + * (up to the maximum limit defined here). + */ +#if !defined LWIP_DHCP_MAX_DNS_SERVERS || defined __DOXYGEN__ +#define LWIP_DHCP_MAX_DNS_SERVERS DNS_MAX_SERVERS +#endif +/** + * @} + */ + +/* + ------------------------------------ + ---------- AUTOIP options ---------- + ------------------------------------ +*/ +/** + * @defgroup lwip_opts_autoip AUTOIP + * @ingroup lwip_opts_ipv4 + * @{ + */ +/** + * LWIP_AUTOIP==1: Enable AUTOIP module. + */ +#if !defined LWIP_AUTOIP || defined __DOXYGEN__ +#define LWIP_AUTOIP 0 +#endif +#if !LWIP_IPV4 +/* disable AUTOIP when IPv4 is disabled */ +#undef LWIP_AUTOIP +#define LWIP_AUTOIP 0 +#endif /* !LWIP_IPV4 */ + +/** + * LWIP_DHCP_AUTOIP_COOP==1: Allow DHCP and AUTOIP to be both enabled on + * the same interface at the same time. + */ +#if !defined LWIP_DHCP_AUTOIP_COOP || defined __DOXYGEN__ +#define LWIP_DHCP_AUTOIP_COOP 0 +#endif + +/** + * LWIP_DHCP_AUTOIP_COOP_TRIES: Set to the number of DHCP DISCOVER probes + * that should be sent before falling back on AUTOIP (the DHCP client keeps + * running in this case). This can be set as low as 1 to get an AutoIP address + * very quickly, but you should be prepared to handle a changing IP address + * when DHCP overrides AutoIP. + */ +#if !defined LWIP_DHCP_AUTOIP_COOP_TRIES || defined __DOXYGEN__ +#define LWIP_DHCP_AUTOIP_COOP_TRIES 9 +#endif +/** + * @} + */ + +/* + ---------------------------------- + ----- SNMP MIB2 support ----- + ---------------------------------- +*/ +/** + * @defgroup lwip_opts_mib2 SNMP MIB2 callbacks + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * LWIP_MIB2_CALLBACKS==1: Turn on SNMP MIB2 callbacks. + * Turn this on to get callbacks needed to implement MIB2. + * Usually MIB2_STATS should be enabled, too. + */ +#if !defined LWIP_MIB2_CALLBACKS || defined __DOXYGEN__ +#define LWIP_MIB2_CALLBACKS 0 +#endif +/** + * @} + */ + +/* + ---------------------------------- + -------- Multicast options ------- + ---------------------------------- +*/ +/** + * @defgroup lwip_opts_multicast Multicast + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * LWIP_MULTICAST_TX_OPTIONS==1: Enable multicast TX support like the socket options + * IP_MULTICAST_TTL/IP_MULTICAST_IF/IP_MULTICAST_LOOP, as well as (currently only) + * core support for the corresponding IPv6 options. + */ +#if !defined LWIP_MULTICAST_TX_OPTIONS || defined __DOXYGEN__ +#define LWIP_MULTICAST_TX_OPTIONS ((LWIP_IGMP || LWIP_IPV6_MLD) && (LWIP_UDP || LWIP_RAW)) +#endif +/** + * @} + */ + +/* + ---------------------------------- + ---------- IGMP options ---------- + ---------------------------------- +*/ +/** + * @defgroup lwip_opts_igmp IGMP + * @ingroup lwip_opts_ipv4 + * @{ + */ +/** + * LWIP_IGMP==1: Turn on IGMP module. + */ +#if !defined LWIP_IGMP || defined __DOXYGEN__ +#define LWIP_IGMP 0 +#endif +#if !LWIP_IPV4 +#undef LWIP_IGMP +#define LWIP_IGMP 0 +#endif +/** + * @} + */ + +/* + ---------------------------------- + ---------- DNS options ----------- + ---------------------------------- +*/ +/** + * @defgroup lwip_opts_dns DNS + * @ingroup lwip_opts_callback + * @{ + */ +/** + * LWIP_DNS==1: Turn on DNS module. UDP must be available for DNS + * transport. + */ +#if !defined LWIP_DNS || defined __DOXYGEN__ +#define LWIP_DNS 0 +#endif + +/** DNS maximum number of entries to maintain locally. */ +#if !defined DNS_TABLE_SIZE || defined __DOXYGEN__ +#define DNS_TABLE_SIZE 4 +#endif + +/** DNS maximum host name length supported in the name table. */ +#if !defined DNS_MAX_NAME_LENGTH || defined __DOXYGEN__ +#define DNS_MAX_NAME_LENGTH 256 +#endif + +/** The maximum of DNS servers + * The first server can be initialized automatically by defining + * DNS_SERVER_ADDRESS(ipaddr), where 'ipaddr' is an 'ip_addr_t*' + */ +#if !defined DNS_MAX_SERVERS || defined __DOXYGEN__ +#define DNS_MAX_SERVERS 2 +#endif + +/** DNS maximum number of retries when asking for a name, before "timeout". */ +#if !defined DNS_MAX_RETRIES || defined __DOXYGEN__ +#define DNS_MAX_RETRIES 4 +#endif + +/** DNS do a name checking between the query and the response. */ +#if !defined DNS_DOES_NAME_CHECK || defined __DOXYGEN__ +#define DNS_DOES_NAME_CHECK 1 +#endif + +/** LWIP_DNS_SECURE: controls the security level of the DNS implementation + * Use all DNS security features by default. + * This is overridable but should only be needed by very small targets + * or when using against non standard DNS servers. */ +#if !defined LWIP_DNS_SECURE || defined __DOXYGEN__ +#define LWIP_DNS_SECURE (LWIP_DNS_SECURE_RAND_XID | LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING | LWIP_DNS_SECURE_RAND_SRC_PORT) +#endif + +/* A list of DNS security features follows */ +#define LWIP_DNS_SECURE_RAND_XID 1 +#define LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING 2 +#define LWIP_DNS_SECURE_RAND_SRC_PORT 4 + +/** DNS_LOCAL_HOSTLIST: Implements a local host-to-address list. If enabled, you have to define an initializer: + * \#define DNS_LOCAL_HOSTLIST_INIT {DNS_LOCAL_HOSTLIST_ELEM("host_ip4", IPADDR4_INIT_BYTES(1,2,3,4)), \ + * DNS_LOCAL_HOSTLIST_ELEM("host_ip6", IPADDR6_INIT_HOST(123, 234, 345, 456)} + * + * Instead, you can also use an external function: + * \#define DNS_LOOKUP_LOCAL_EXTERN(x) extern err_t my_lookup_function(const char *name, ip_addr_t *addr, u8_t dns_addrtype) + * that looks up the IP address and returns ERR_OK if found (LWIP_DNS_ADDRTYPE_xxx is passed in dns_addrtype). + */ +#if !defined DNS_LOCAL_HOSTLIST || defined __DOXYGEN__ +#define DNS_LOCAL_HOSTLIST 0 +#endif /* DNS_LOCAL_HOSTLIST */ + +/** If this is turned on, the local host-list can be dynamically changed + * at runtime. */ +#if !defined DNS_LOCAL_HOSTLIST_IS_DYNAMIC || defined __DOXYGEN__ +#define DNS_LOCAL_HOSTLIST_IS_DYNAMIC 0 +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + +/** Set this to 1 to enable querying ".local" names via mDNS + * using a One-Shot Multicast DNS Query */ +#if !defined LWIP_DNS_SUPPORT_MDNS_QUERIES || defined __DOXYGEN__ +#define LWIP_DNS_SUPPORT_MDNS_QUERIES 0 +#endif +/** + * @} + */ + +/* + --------------------------------- + ---------- UDP options ---------- + --------------------------------- +*/ +/** + * @defgroup lwip_opts_udp UDP + * @ingroup lwip_opts_callback + * @{ + */ +/** + * LWIP_UDP==1: Turn on UDP. + */ +#if !defined LWIP_UDP || defined __DOXYGEN__ +#define LWIP_UDP 1 +#endif + +/** + * LWIP_UDPLITE==1: Turn on UDP-Lite. (Requires LWIP_UDP) + */ +#if !defined LWIP_UDPLITE || defined __DOXYGEN__ +#define LWIP_UDPLITE 0 +#endif + +/** + * UDP_TTL: Default Time-To-Live value. + */ +#if !defined UDP_TTL || defined __DOXYGEN__ +#define UDP_TTL IP_DEFAULT_TTL +#endif + +/** + * LWIP_NETBUF_RECVINFO==1: append destination addr and port to every netbuf. + */ +#if !defined LWIP_NETBUF_RECVINFO || defined __DOXYGEN__ +#define LWIP_NETBUF_RECVINFO 0 +#endif +/** + * @} + */ + +/* + --------------------------------- + ---------- TCP options ---------- + --------------------------------- +*/ +/** + * @defgroup lwip_opts_tcp TCP + * @ingroup lwip_opts_callback + * @{ + */ +/** + * LWIP_TCP==1: Turn on TCP. + */ +#if !defined LWIP_TCP || defined __DOXYGEN__ +#define LWIP_TCP 1 +#endif + +/** + * TCP_TTL: Default Time-To-Live value. + */ +#if !defined TCP_TTL || defined __DOXYGEN__ +#define TCP_TTL IP_DEFAULT_TTL +#endif + +/** + * TCP_WND: The size of a TCP window. This must be at least + * (2 * TCP_MSS) for things to work well. + * ATTENTION: when using TCP_RCV_SCALE, TCP_WND is the total size + * with scaling applied. Maximum window value in the TCP header + * will be TCP_WND >> TCP_RCV_SCALE + */ +#if !defined TCP_WND || defined __DOXYGEN__ +#define TCP_WND (4 * TCP_MSS) +#endif + +/** + * TCP_MAXRTX: Maximum number of retransmissions of data segments. + */ +#if !defined TCP_MAXRTX || defined __DOXYGEN__ +#define TCP_MAXRTX 12 +#endif + +/** + * TCP_SYNMAXRTX: Maximum number of retransmissions of SYN segments. + */ +#if !defined TCP_SYNMAXRTX || defined __DOXYGEN__ +#define TCP_SYNMAXRTX 6 +#endif + +/** + * TCP_QUEUE_OOSEQ==1: TCP will queue segments that arrive out of order. + * Define to 0 if your device is low on memory. + */ +#if !defined TCP_QUEUE_OOSEQ || defined __DOXYGEN__ +#define TCP_QUEUE_OOSEQ LWIP_TCP +#endif + +/** + * LWIP_TCP_SACK_OUT==1: TCP will support sending selective acknowledgements (SACKs). + */ +#if !defined LWIP_TCP_SACK_OUT || defined __DOXYGEN__ +#define LWIP_TCP_SACK_OUT 0 +#endif + +/** + * LWIP_TCP_MAX_SACK_NUM: The maximum number of SACK values to include in TCP segments. + * Must be at least 1, but is only used if LWIP_TCP_SACK_OUT is enabled. + * NOTE: Even though we never send more than 3 or 4 SACK ranges in a single segment + * (depending on other options), setting this option to values greater than 4 is not pointless. + * This is basically the max number of SACK ranges we want to keep track of. + * As new data is delivered, some of the SACK ranges may be removed or merged. + * In that case some of those older SACK ranges may be used again. + * The amount of memory used to store SACK ranges is LWIP_TCP_MAX_SACK_NUM * 8 bytes for each TCP PCB. + */ +#if !defined LWIP_TCP_MAX_SACK_NUM || defined __DOXYGEN__ +#define LWIP_TCP_MAX_SACK_NUM 4 +#endif + +/** + * TCP_MSS: TCP Maximum segment size. (default is 536, a conservative default, + * you might want to increase this.) + * For the receive side, this MSS is advertised to the remote side + * when opening a connection. For the transmit size, this MSS sets + * an upper limit on the MSS advertised by the remote host. + */ +#if !defined TCP_MSS || defined __DOXYGEN__ +#define TCP_MSS 536 +#endif + +/** + * TCP_CALCULATE_EFF_SEND_MSS: "The maximum size of a segment that TCP really + * sends, the 'effective send MSS,' MUST be the smaller of the send MSS (which + * reflects the available reassembly buffer size at the remote host) and the + * largest size permitted by the IP layer" (RFC 1122) + * Setting this to 1 enables code that checks TCP_MSS against the MTU of the + * netif used for a connection and limits the MSS if it would be too big otherwise. + */ +#if !defined TCP_CALCULATE_EFF_SEND_MSS || defined __DOXYGEN__ +#define TCP_CALCULATE_EFF_SEND_MSS 1 +#endif + + +/** + * TCP_SND_BUF: TCP sender buffer space (bytes). + * To achieve good performance, this should be at least 2 * TCP_MSS. + */ +#if !defined TCP_SND_BUF || defined __DOXYGEN__ +#define TCP_SND_BUF (2 * TCP_MSS) +#endif + +/** + * TCP_SND_QUEUELEN: TCP sender buffer space (pbufs). This must be at least + * as much as (2 * TCP_SND_BUF/TCP_MSS) for things to work. + */ +#if !defined TCP_SND_QUEUELEN || defined __DOXYGEN__ +#define TCP_SND_QUEUELEN ((4 * (TCP_SND_BUF) + (TCP_MSS - 1))/(TCP_MSS)) +#endif + +/** + * TCP_SNDLOWAT: TCP writable space (bytes). This must be less than + * TCP_SND_BUF. It is the amount of space which must be available in the + * TCP snd_buf for select to return writable (combined with TCP_SNDQUEUELOWAT). + */ +#if !defined TCP_SNDLOWAT || defined __DOXYGEN__ +#define TCP_SNDLOWAT LWIP_MIN(LWIP_MAX(((TCP_SND_BUF)/2), (2 * TCP_MSS) + 1), (TCP_SND_BUF) - 1) +#endif + +/** + * TCP_SNDQUEUELOWAT: TCP writable bufs (pbuf count). This must be less + * than TCP_SND_QUEUELEN. If the number of pbufs queued on a pcb drops below + * this number, select returns writable (combined with TCP_SNDLOWAT). + */ +#if !defined TCP_SNDQUEUELOWAT || defined __DOXYGEN__ +#define TCP_SNDQUEUELOWAT LWIP_MAX(((TCP_SND_QUEUELEN)/2), 5) +#endif + +/** + * TCP_OOSEQ_MAX_BYTES: The default maximum number of bytes queued on ooseq per + * pcb if TCP_OOSEQ_BYTES_LIMIT is not defined. Default is 0 (no limit). + * Only valid for TCP_QUEUE_OOSEQ==1. + */ +#if !defined TCP_OOSEQ_MAX_BYTES || defined __DOXYGEN__ +#define TCP_OOSEQ_MAX_BYTES 0 +#endif + +/** + * TCP_OOSEQ_BYTES_LIMIT(pcb): Return the maximum number of bytes to be queued + * on ooseq per pcb, given the pcb. Only valid for TCP_QUEUE_OOSEQ==1 && + * TCP_OOSEQ_MAX_BYTES==1. + * Use this to override TCP_OOSEQ_MAX_BYTES to a dynamic value per pcb. + */ +#if !defined TCP_OOSEQ_BYTES_LIMIT +#if TCP_OOSEQ_MAX_BYTES +#define TCP_OOSEQ_BYTES_LIMIT(pcb) TCP_OOSEQ_MAX_BYTES +#elif defined __DOXYGEN__ +#define TCP_OOSEQ_BYTES_LIMIT(pcb) +#endif +#endif + +/** + * TCP_OOSEQ_MAX_PBUFS: The default maximum number of pbufs queued on ooseq per + * pcb if TCP_OOSEQ_BYTES_LIMIT is not defined. Default is 0 (no limit). + * Only valid for TCP_QUEUE_OOSEQ==1. + */ +#if !defined TCP_OOSEQ_MAX_PBUFS || defined __DOXYGEN__ +#define TCP_OOSEQ_MAX_PBUFS 0 +#endif + +/** + * TCP_OOSEQ_PBUFS_LIMIT(pcb): Return the maximum number of pbufs to be queued + * on ooseq per pcb, given the pcb. Only valid for TCP_QUEUE_OOSEQ==1 && + * TCP_OOSEQ_MAX_PBUFS==1. + * Use this to override TCP_OOSEQ_MAX_PBUFS to a dynamic value per pcb. + */ +#if !defined TCP_OOSEQ_PBUFS_LIMIT +#if TCP_OOSEQ_MAX_PBUFS +#define TCP_OOSEQ_PBUFS_LIMIT(pcb) TCP_OOSEQ_MAX_PBUFS +#elif defined __DOXYGEN__ +#define TCP_OOSEQ_PBUFS_LIMIT(pcb) +#endif +#endif + +/** + * TCP_LISTEN_BACKLOG: Enable the backlog option for tcp listen pcb. + */ +#if !defined TCP_LISTEN_BACKLOG || defined __DOXYGEN__ +#define TCP_LISTEN_BACKLOG 0 +#endif + +/** + * The maximum allowed backlog for TCP listen netconns. + * This backlog is used unless another is explicitly specified. + * 0xff is the maximum (u8_t). + */ +#if !defined TCP_DEFAULT_LISTEN_BACKLOG || defined __DOXYGEN__ +#define TCP_DEFAULT_LISTEN_BACKLOG 0xff +#endif + +/** + * TCP_OVERSIZE: The maximum number of bytes that tcp_write may + * allocate ahead of time in an attempt to create shorter pbuf chains + * for transmission. The meaningful range is 0 to TCP_MSS. Some + * suggested values are: + * + * 0: Disable oversized allocation. Each tcp_write() allocates a new + pbuf (old behaviour). + * 1: Allocate size-aligned pbufs with minimal excess. Use this if your + * scatter-gather DMA requires aligned fragments. + * 128: Limit the pbuf/memory overhead to 20%. + * TCP_MSS: Try to create unfragmented TCP packets. + * TCP_MSS/4: Try to create 4 fragments or less per TCP packet. + */ +#if !defined TCP_OVERSIZE || defined __DOXYGEN__ +#define TCP_OVERSIZE TCP_MSS +#endif + +/** + * LWIP_TCP_TIMESTAMPS==1: support the TCP timestamp option. + * The timestamp option is currently only used to help remote hosts, it is not + * really used locally. Therefore, it is only enabled when a TS option is + * received in the initial SYN packet from a remote host. + */ +#if !defined LWIP_TCP_TIMESTAMPS || defined __DOXYGEN__ +#define LWIP_TCP_TIMESTAMPS 0 +#endif + +/** + * TCP_WND_UPDATE_THRESHOLD: difference in window to trigger an + * explicit window update + */ +#if !defined TCP_WND_UPDATE_THRESHOLD || defined __DOXYGEN__ +#define TCP_WND_UPDATE_THRESHOLD LWIP_MIN((TCP_WND / 4), (TCP_MSS * 4)) +#endif + +/** + * LWIP_EVENT_API and LWIP_CALLBACK_API: Only one of these should be set to 1. + * LWIP_EVENT_API==1: The user defines lwip_tcp_event() to receive all + * events (accept, sent, etc) that happen in the system. + * LWIP_CALLBACK_API==1: The PCB callback function is called directly + * for the event. This is the default. + */ +#if !defined(LWIP_EVENT_API) && !defined(LWIP_CALLBACK_API) || defined __DOXYGEN__ +#define LWIP_EVENT_API 0 +#define LWIP_CALLBACK_API 1 +#else +#ifndef LWIP_EVENT_API +#define LWIP_EVENT_API 0 +#endif +#ifndef LWIP_CALLBACK_API +#define LWIP_CALLBACK_API 0 +#endif +#endif + +/** + * LWIP_WND_SCALE and TCP_RCV_SCALE: + * Set LWIP_WND_SCALE to 1 to enable window scaling. + * Set TCP_RCV_SCALE to the desired scaling factor (shift count in the + * range of [0..14]). + * When LWIP_WND_SCALE is enabled but TCP_RCV_SCALE is 0, we can use a large + * send window while having a small receive window only. + */ +#if !defined LWIP_WND_SCALE || defined __DOXYGEN__ +#define LWIP_WND_SCALE 0 +#define TCP_RCV_SCALE 0 +#endif + +/** + * LWIP_TCP_PCB_NUM_EXT_ARGS: + * When this is > 0, every tcp pcb (including listen pcb) includes a number of + * additional argument entries in an array (see tcp_ext_arg_alloc_id) + */ +#if !defined LWIP_TCP_PCB_NUM_EXT_ARGS || defined __DOXYGEN__ +#define LWIP_TCP_PCB_NUM_EXT_ARGS 0 +#endif + +/** LWIP_ALTCP==1: enable the altcp API. + * altcp is an abstraction layer that prevents applications linking against the + * tcp.h functions but provides the same functionality. It is used to e.g. add + * SSL/TLS or proxy-connect support to an application written for the tcp callback + * API without that application knowing the protocol details. + * + * With LWIP_ALTCP==0, applications written against the altcp API can still be + * compiled but are directly linked against the tcp.h callback API and then + * cannot use layered protocols. + * + * See @ref altcp_api + */ +#if !defined LWIP_ALTCP || defined __DOXYGEN__ +#define LWIP_ALTCP 0 +#endif + +/** LWIP_ALTCP_TLS==1: enable TLS support for altcp API. + * This needs a port of the functions in altcp_tls.h to a TLS library. + * A port to ARM mbedtls is provided with lwIP, see apps/altcp_tls/ directory + * and LWIP_ALTCP_TLS_MBEDTLS option. + */ +#if !defined LWIP_ALTCP_TLS || defined __DOXYGEN__ +#define LWIP_ALTCP_TLS 0 +#endif + +/** + * @} + */ + +/* + ---------------------------------- + ---------- Pbuf options ---------- + ---------------------------------- +*/ +/** + * @defgroup lwip_opts_pbuf PBUF + * @ingroup lwip_opts + * @{ + */ +/** + * PBUF_LINK_HLEN: the number of bytes that should be allocated for a + * link level header. The default is 14, the standard value for + * Ethernet. + */ +#if !defined PBUF_LINK_HLEN || defined __DOXYGEN__ +#if defined LWIP_HOOK_VLAN_SET && !defined __DOXYGEN__ +#define PBUF_LINK_HLEN (18 + ETH_PAD_SIZE) +#else /* LWIP_HOOK_VLAN_SET */ +#define PBUF_LINK_HLEN (14 + ETH_PAD_SIZE) +#endif /* LWIP_HOOK_VLAN_SET */ +#endif + +/** + * PBUF_LINK_ENCAPSULATION_HLEN: the number of bytes that should be allocated + * for an additional encapsulation header before ethernet headers (e.g. 802.11) + */ +#if !defined PBUF_LINK_ENCAPSULATION_HLEN || defined __DOXYGEN__ +#define PBUF_LINK_ENCAPSULATION_HLEN 0 +#endif + +/** + * PBUF_POOL_BUFSIZE: the size of each pbuf in the pbuf pool. The default is + * designed to accommodate single full size TCP frame in one pbuf, including + * TCP_MSS, IP header, and link header. + */ +#if !defined PBUF_POOL_BUFSIZE || defined __DOXYGEN__ +#define PBUF_POOL_BUFSIZE LWIP_MEM_ALIGN_SIZE(TCP_MSS+40+PBUF_LINK_ENCAPSULATION_HLEN+PBUF_LINK_HLEN) +#endif + +/** + * LWIP_PBUF_REF_T: Refcount type in pbuf. + * Default width of u8_t can be increased if 255 refs are not enough for you. + */ +#if !defined LWIP_PBUF_REF_T || defined __DOXYGEN__ +#define LWIP_PBUF_REF_T u8_t +#endif +/** + * @} + */ + +/* + ------------------------------------------------ + ---------- Network Interfaces options ---------- + ------------------------------------------------ +*/ +/** + * @defgroup lwip_opts_netif NETIF + * @ingroup lwip_opts + * @{ + */ +/** + * LWIP_SINGLE_NETIF==1: use a single netif only. This is the common case for + * small real-life targets. Some code like routing etc. can be left out. + */ +#if !defined LWIP_SINGLE_NETIF || defined __DOXYGEN__ +#define LWIP_SINGLE_NETIF 0 +#endif + +/** + * LWIP_NETIF_HOSTNAME==1: use DHCP_OPTION_HOSTNAME with netif's hostname + * field. + */ +#if !defined LWIP_NETIF_HOSTNAME || defined __DOXYGEN__ +#define LWIP_NETIF_HOSTNAME 0 +#endif + +/** + * LWIP_NETIF_API==1: Support netif api (in netifapi.c) + */ +#if !defined LWIP_NETIF_API || defined __DOXYGEN__ +#define LWIP_NETIF_API 0 +#endif + +/** + * LWIP_NETIF_STATUS_CALLBACK==1: Support a callback function whenever an interface + * changes its up/down status (i.e., due to DHCP IP acquisition) + */ +#if !defined LWIP_NETIF_STATUS_CALLBACK || defined __DOXYGEN__ +#define LWIP_NETIF_STATUS_CALLBACK 0 +#endif + +/** + * LWIP_NETIF_EXT_STATUS_CALLBACK==1: Support an extended callback function + * for several netif related event that supports multiple subscribers. + * @see netif_ext_status_callback + */ +#if !defined LWIP_NETIF_EXT_STATUS_CALLBACK || defined __DOXYGEN__ +#define LWIP_NETIF_EXT_STATUS_CALLBACK 0 +#endif + +/** + * LWIP_NETIF_LINK_CALLBACK==1: Support a callback function from an interface + * whenever the link changes (i.e., link down) + */ +#if !defined LWIP_NETIF_LINK_CALLBACK || defined __DOXYGEN__ +#define LWIP_NETIF_LINK_CALLBACK 0 +#endif + +/** + * LWIP_NETIF_REMOVE_CALLBACK==1: Support a callback function that is called + * when a netif has been removed + */ +#if !defined LWIP_NETIF_REMOVE_CALLBACK || defined __DOXYGEN__ +#define LWIP_NETIF_REMOVE_CALLBACK 0 +#endif + +/** + * LWIP_NETIF_HWADDRHINT==1: Cache link-layer-address hints (e.g. table + * indices) in struct netif. TCP and UDP can make use of this to prevent + * scanning the ARP table for every sent packet. While this is faster for big + * ARP tables or many concurrent connections, it might be counterproductive + * if you have a tiny ARP table or if there never are concurrent connections. + */ +#if !defined LWIP_NETIF_HWADDRHINT || defined __DOXYGEN__ +#define LWIP_NETIF_HWADDRHINT 0 +#endif + +/** + * LWIP_NETIF_TX_SINGLE_PBUF: if this is set to 1, lwIP *tries* to put all data + * to be sent into one single pbuf. This is for compatibility with DMA-enabled + * MACs that do not support scatter-gather. + * Beware that this might involve CPU-memcpy before transmitting that would not + * be needed without this flag! Use this only if you need to! + * + * ATTENTION: a driver should *NOT* rely on getting single pbufs but check TX + * pbufs for being in one piece. If not, @ref pbuf_clone can be used to get + * a single pbuf: + * if (p->next != NULL) { + * struct pbuf *q = pbuf_clone(PBUF_RAW, PBUF_RAM, p); + * if (q == NULL) { + * return ERR_MEM; + * } + * p = q; ATTENTION: do NOT free the old 'p' as the ref belongs to the caller! + * } + */ +#if !defined LWIP_NETIF_TX_SINGLE_PBUF || defined __DOXYGEN__ +#define LWIP_NETIF_TX_SINGLE_PBUF 0 +#endif /* LWIP_NETIF_TX_SINGLE_PBUF */ + +/** + * LWIP_NUM_NETIF_CLIENT_DATA: Number of clients that may store + * data in client_data member array of struct netif (max. 256). + */ +#if !defined LWIP_NUM_NETIF_CLIENT_DATA || defined __DOXYGEN__ +#define LWIP_NUM_NETIF_CLIENT_DATA 0 +#endif +/** + * @} + */ + +/* + ------------------------------------ + ---------- LOOPIF options ---------- + ------------------------------------ +*/ +/** + * @defgroup lwip_opts_loop Loopback interface + * @ingroup lwip_opts_netif + * @{ + */ +/** + * LWIP_HAVE_LOOPIF==1: Support loop interface (127.0.0.1). + * This is only needed when no real netifs are available. If at least one other + * netif is available, loopback traffic uses this netif. + */ +#if !defined LWIP_HAVE_LOOPIF || defined __DOXYGEN__ +#define LWIP_HAVE_LOOPIF (LWIP_NETIF_LOOPBACK && !LWIP_SINGLE_NETIF) +#endif + +/** + * LWIP_LOOPIF_MULTICAST==1: Support multicast/IGMP on loop interface (127.0.0.1). + */ +#if !defined LWIP_LOOPIF_MULTICAST || defined __DOXYGEN__ +#define LWIP_LOOPIF_MULTICAST 0 +#endif + +/** + * LWIP_NETIF_LOOPBACK==1: Support sending packets with a destination IP + * address equal to the netif IP address, looping them back up the stack. + */ +#if !defined LWIP_NETIF_LOOPBACK || defined __DOXYGEN__ +#define LWIP_NETIF_LOOPBACK 0 +#endif + +/** + * LWIP_LOOPBACK_MAX_PBUFS: Maximum number of pbufs on queue for loopback + * sending for each netif (0 = disabled) + */ +#if !defined LWIP_LOOPBACK_MAX_PBUFS || defined __DOXYGEN__ +#define LWIP_LOOPBACK_MAX_PBUFS 0 +#endif + +/** + * LWIP_NETIF_LOOPBACK_MULTITHREADING: Indicates whether threading is enabled in + * the system, as netifs must change how they behave depending on this setting + * for the LWIP_NETIF_LOOPBACK option to work. + * Setting this is needed to avoid reentering non-reentrant functions like + * tcp_input(). + * LWIP_NETIF_LOOPBACK_MULTITHREADING==1: Indicates that the user is using a + * multithreaded environment like tcpip.c. In this case, netif->input() + * is called directly. + * LWIP_NETIF_LOOPBACK_MULTITHREADING==0: Indicates a polling (or NO_SYS) setup. + * The packets are put on a list and netif_poll() must be called in + * the main application loop. + */ +#if !defined LWIP_NETIF_LOOPBACK_MULTITHREADING || defined __DOXYGEN__ +#define LWIP_NETIF_LOOPBACK_MULTITHREADING (!NO_SYS) +#endif +/** + * @} + */ + +/* + ------------------------------------ + ---------- Thread options ---------- + ------------------------------------ +*/ +/** + * @defgroup lwip_opts_thread Threading + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * TCPIP_THREAD_NAME: The name assigned to the main tcpip thread. + */ +#if !defined TCPIP_THREAD_NAME || defined __DOXYGEN__ +#define TCPIP_THREAD_NAME "tcpip_thread" +#endif + +/** + * TCPIP_THREAD_STACKSIZE: The stack size used by the main tcpip thread. + * The stack size value itself is platform-dependent, but is passed to + * sys_thread_new() when the thread is created. + */ +#if !defined TCPIP_THREAD_STACKSIZE || defined __DOXYGEN__ +#define TCPIP_THREAD_STACKSIZE 0 +#endif + +/** + * TCPIP_THREAD_PRIO: The priority assigned to the main tcpip thread. + * The priority value itself is platform-dependent, but is passed to + * sys_thread_new() when the thread is created. + */ +#if !defined TCPIP_THREAD_PRIO || defined __DOXYGEN__ +#define TCPIP_THREAD_PRIO 1 +#endif + +/** + * TCPIP_MBOX_SIZE: The mailbox size for the tcpip thread messages + * The queue size value itself is platform-dependent, but is passed to + * sys_mbox_new() when tcpip_init is called. + */ +#if !defined TCPIP_MBOX_SIZE || defined __DOXYGEN__ +#define TCPIP_MBOX_SIZE 0 +#endif + +/** + * Define this to something that triggers a watchdog. This is called from + * tcpip_thread after processing a message. + */ +#if !defined LWIP_TCPIP_THREAD_ALIVE || defined __DOXYGEN__ +#define LWIP_TCPIP_THREAD_ALIVE() +#endif + +/** + * SLIPIF_THREAD_NAME: The name assigned to the slipif_loop thread. + */ +#if !defined SLIPIF_THREAD_NAME || defined __DOXYGEN__ +#define SLIPIF_THREAD_NAME "slipif_loop" +#endif + +/** + * SLIP_THREAD_STACKSIZE: The stack size used by the slipif_loop thread. + * The stack size value itself is platform-dependent, but is passed to + * sys_thread_new() when the thread is created. + */ +#if !defined SLIPIF_THREAD_STACKSIZE || defined __DOXYGEN__ +#define SLIPIF_THREAD_STACKSIZE 0 +#endif + +/** + * SLIPIF_THREAD_PRIO: The priority assigned to the slipif_loop thread. + * The priority value itself is platform-dependent, but is passed to + * sys_thread_new() when the thread is created. + */ +#if !defined SLIPIF_THREAD_PRIO || defined __DOXYGEN__ +#define SLIPIF_THREAD_PRIO 1 +#endif + +/** + * DEFAULT_THREAD_NAME: The name assigned to any other lwIP thread. + */ +#if !defined DEFAULT_THREAD_NAME || defined __DOXYGEN__ +#define DEFAULT_THREAD_NAME "lwIP" +#endif + +/** + * DEFAULT_THREAD_STACKSIZE: The stack size used by any other lwIP thread. + * The stack size value itself is platform-dependent, but is passed to + * sys_thread_new() when the thread is created. + */ +#if !defined DEFAULT_THREAD_STACKSIZE || defined __DOXYGEN__ +#define DEFAULT_THREAD_STACKSIZE 0 +#endif + +/** + * DEFAULT_THREAD_PRIO: The priority assigned to any other lwIP thread. + * The priority value itself is platform-dependent, but is passed to + * sys_thread_new() when the thread is created. + */ +#if !defined DEFAULT_THREAD_PRIO || defined __DOXYGEN__ +#define DEFAULT_THREAD_PRIO 1 +#endif + +/** + * DEFAULT_RAW_RECVMBOX_SIZE: The mailbox size for the incoming packets on a + * NETCONN_RAW. The queue size value itself is platform-dependent, but is passed + * to sys_mbox_new() when the recvmbox is created. + */ +#if !defined DEFAULT_RAW_RECVMBOX_SIZE || defined __DOXYGEN__ +#define DEFAULT_RAW_RECVMBOX_SIZE 0 +#endif + +/** + * DEFAULT_UDP_RECVMBOX_SIZE: The mailbox size for the incoming packets on a + * NETCONN_UDP. The queue size value itself is platform-dependent, but is passed + * to sys_mbox_new() when the recvmbox is created. + */ +#if !defined DEFAULT_UDP_RECVMBOX_SIZE || defined __DOXYGEN__ +#define DEFAULT_UDP_RECVMBOX_SIZE 0 +#endif + +/** + * DEFAULT_TCP_RECVMBOX_SIZE: The mailbox size for the incoming packets on a + * NETCONN_TCP. The queue size value itself is platform-dependent, but is passed + * to sys_mbox_new() when the recvmbox is created. + */ +#if !defined DEFAULT_TCP_RECVMBOX_SIZE || defined __DOXYGEN__ +#define DEFAULT_TCP_RECVMBOX_SIZE 0 +#endif + +/** + * DEFAULT_ACCEPTMBOX_SIZE: The mailbox size for the incoming connections. + * The queue size value itself is platform-dependent, but is passed to + * sys_mbox_new() when the acceptmbox is created. + */ +#if !defined DEFAULT_ACCEPTMBOX_SIZE || defined __DOXYGEN__ +#define DEFAULT_ACCEPTMBOX_SIZE 0 +#endif +/** + * @} + */ + +/* + ---------------------------------------------- + ---------- Sequential layer options ---------- + ---------------------------------------------- +*/ +/** + * @defgroup lwip_opts_netconn Netconn + * @ingroup lwip_opts_threadsafe_apis + * @{ + */ +/** + * LWIP_NETCONN==1: Enable Netconn API (require to use api_lib.c) + */ +#if !defined LWIP_NETCONN || defined __DOXYGEN__ +#define LWIP_NETCONN 1 +#endif + +/** LWIP_TCPIP_TIMEOUT==1: Enable tcpip_timeout/tcpip_untimeout to create + * timers running in tcpip_thread from another thread. + */ +#if !defined LWIP_TCPIP_TIMEOUT || defined __DOXYGEN__ +#define LWIP_TCPIP_TIMEOUT 0 +#endif + +/** LWIP_NETCONN_SEM_PER_THREAD==1: Use one (thread-local) semaphore per + * thread calling socket/netconn functions instead of allocating one + * semaphore per netconn (and per select etc.) + * ATTENTION: a thread-local semaphore for API calls is needed: + * - LWIP_NETCONN_THREAD_SEM_GET() returning a sys_sem_t* + * - LWIP_NETCONN_THREAD_SEM_ALLOC() creating the semaphore + * - LWIP_NETCONN_THREAD_SEM_FREE() freeing the semaphore + * The latter 2 can be invoked up by calling netconn_thread_init()/netconn_thread_cleanup(). + * Ports may call these for threads created with sys_thread_new(). + */ +#if !defined LWIP_NETCONN_SEM_PER_THREAD || defined __DOXYGEN__ +#define LWIP_NETCONN_SEM_PER_THREAD 0 +#endif + +/** LWIP_NETCONN_FULLDUPLEX==1: Enable code that allows reading from one thread, + * writing from a 2nd thread and closing from a 3rd thread at the same time. + * ATTENTION: This is currently really alpha! Some requirements: + * - LWIP_NETCONN_SEM_PER_THREAD==1 is required to use one socket/netconn from + * multiple threads at once + * - sys_mbox_free() has to unblock receive tasks waiting on recvmbox/acceptmbox + * and prevent a task pending on this during/after deletion + */ +#if !defined LWIP_NETCONN_FULLDUPLEX || defined __DOXYGEN__ +#define LWIP_NETCONN_FULLDUPLEX 0 +#endif +/** + * @} + */ + +/* + ------------------------------------ + ---------- Socket options ---------- + ------------------------------------ +*/ +/** + * @defgroup lwip_opts_socket Sockets + * @ingroup lwip_opts_threadsafe_apis + * @{ + */ +/** + * LWIP_SOCKET==1: Enable Socket API (require to use sockets.c) + */ +#if !defined LWIP_SOCKET || defined __DOXYGEN__ +#define LWIP_SOCKET 1 +#endif + +/** + * LWIP_COMPAT_SOCKETS==1: Enable BSD-style sockets functions names through defines. + * LWIP_COMPAT_SOCKETS==2: Same as ==1 but correctly named functions are created. + * While this helps code completion, it might conflict with existing libraries. + * (only used if you use sockets.c) + */ +#if !defined LWIP_COMPAT_SOCKETS || defined __DOXYGEN__ +#define LWIP_COMPAT_SOCKETS 1 +#endif + +/** + * LWIP_POSIX_SOCKETS_IO_NAMES==1: Enable POSIX-style sockets functions names. + * Disable this option if you use a POSIX operating system that uses the same + * names (read, write & close). (only used if you use sockets.c) + */ +#if !defined LWIP_POSIX_SOCKETS_IO_NAMES || defined __DOXYGEN__ +#define LWIP_POSIX_SOCKETS_IO_NAMES 1 +#endif + +/** + * LWIP_SOCKET_OFFSET==n: Increases the file descriptor number created by LwIP with n. + * This can be useful when there are multiple APIs which create file descriptors. + * When they all start with a different offset and you won't make them overlap you can + * re implement read/write/close/ioctl/fnctl to send the requested action to the right + * library (sharing select will need more work though). + */ +#if !defined LWIP_SOCKET_OFFSET || defined __DOXYGEN__ +#define LWIP_SOCKET_OFFSET 0 +#endif + +/** + * LWIP_TCP_KEEPALIVE==1: Enable TCP_KEEPIDLE, TCP_KEEPINTVL and TCP_KEEPCNT + * options processing. Note that TCP_KEEPIDLE and TCP_KEEPINTVL have to be set + * in seconds. (does not require sockets.c, and will affect tcp.c) + */ +#if !defined LWIP_TCP_KEEPALIVE || defined __DOXYGEN__ +#define LWIP_TCP_KEEPALIVE 0 +#endif + +/** + * LWIP_SO_SNDTIMEO==1: Enable send timeout for sockets/netconns and + * SO_SNDTIMEO processing. + */ +#if !defined LWIP_SO_SNDTIMEO || defined __DOXYGEN__ +#define LWIP_SO_SNDTIMEO 0 +#endif + +/** + * LWIP_SO_RCVTIMEO==1: Enable receive timeout for sockets/netconns and + * SO_RCVTIMEO processing. + */ +#if !defined LWIP_SO_RCVTIMEO || defined __DOXYGEN__ +#define LWIP_SO_RCVTIMEO 0 +#endif + +/** + * LWIP_SO_SNDRCVTIMEO_NONSTANDARD==1: SO_RCVTIMEO/SO_SNDTIMEO take an int + * (milliseconds, much like winsock does) instead of a struct timeval (default). + */ +#if !defined LWIP_SO_SNDRCVTIMEO_NONSTANDARD || defined __DOXYGEN__ +#define LWIP_SO_SNDRCVTIMEO_NONSTANDARD 0 +#endif + +/** + * LWIP_SO_RCVBUF==1: Enable SO_RCVBUF processing. + */ +#if !defined LWIP_SO_RCVBUF || defined __DOXYGEN__ +#define LWIP_SO_RCVBUF 0 +#endif + +/** + * LWIP_SO_LINGER==1: Enable SO_LINGER processing. + */ +#if !defined LWIP_SO_LINGER || defined __DOXYGEN__ +#define LWIP_SO_LINGER 0 +#endif + +/** + * If LWIP_SO_RCVBUF is used, this is the default value for recv_bufsize. + */ +#if !defined RECV_BUFSIZE_DEFAULT || defined __DOXYGEN__ +#define RECV_BUFSIZE_DEFAULT INT_MAX +#endif + +/** + * By default, TCP socket/netconn close waits 20 seconds max to send the FIN + */ +#if !defined LWIP_TCP_CLOSE_TIMEOUT_MS_DEFAULT || defined __DOXYGEN__ +#define LWIP_TCP_CLOSE_TIMEOUT_MS_DEFAULT 20000 +#endif + +/** + * SO_REUSE==1: Enable SO_REUSEADDR option. + */ +#if !defined SO_REUSE || defined __DOXYGEN__ +#define SO_REUSE 0 +#endif + +/** + * SO_REUSE_RXTOALL==1: Pass a copy of incoming broadcast/multicast packets + * to all local matches if SO_REUSEADDR is turned on. + * WARNING: Adds a memcpy for every packet if passing to more than one pcb! + */ +#if !defined SO_REUSE_RXTOALL || defined __DOXYGEN__ +#define SO_REUSE_RXTOALL 0 +#endif + +/** + * LWIP_FIONREAD_LINUXMODE==0 (default): ioctl/FIONREAD returns the amount of + * pending data in the network buffer. This is the way windows does it. It's + * the default for lwIP since it is smaller. + * LWIP_FIONREAD_LINUXMODE==1: ioctl/FIONREAD returns the size of the next + * pending datagram in bytes. This is the way linux does it. This code is only + * here for compatibility. + */ +#if !defined LWIP_FIONREAD_LINUXMODE || defined __DOXYGEN__ +#define LWIP_FIONREAD_LINUXMODE 0 +#endif + +/** + * LWIP_SOCKET_SELECT==1 (default): enable select() for sockets (uses a netconn + * callback to keep track of events). + * This saves RAM (counters per socket) and code (netconn event callback), which + * should improve performance a bit). + */ +#if !defined LWIP_SOCKET_SELECT || defined __DOXYGEN__ +#define LWIP_SOCKET_SELECT 1 +#endif + +/** + * LWIP_SOCKET_POLL==1 (default): enable poll() for sockets (including + * struct pollfd, nfds_t, and constants) + */ +#if !defined LWIP_SOCKET_POLL || defined __DOXYGEN__ +#define LWIP_SOCKET_POLL 1 +#endif +/** + * @} + */ + +/* + ---------------------------------------- + ---------- Statistics options ---------- + ---------------------------------------- +*/ +/** + * @defgroup lwip_opts_stats Statistics + * @ingroup lwip_opts_debug + * @{ + */ +/** + * LWIP_STATS==1: Enable statistics collection in lwip_stats. + */ +#if !defined LWIP_STATS || defined __DOXYGEN__ +#define LWIP_STATS 1 +#endif + +#if LWIP_STATS + +/** + * LWIP_STATS_DISPLAY==1: Compile in the statistics output functions. + */ +#if !defined LWIP_STATS_DISPLAY || defined __DOXYGEN__ +#define LWIP_STATS_DISPLAY 0 +#endif + +/** + * LINK_STATS==1: Enable link stats. + */ +#if !defined LINK_STATS || defined __DOXYGEN__ +#define LINK_STATS 1 +#endif + +/** + * ETHARP_STATS==1: Enable etharp stats. + */ +#if !defined ETHARP_STATS || defined __DOXYGEN__ +#define ETHARP_STATS (LWIP_ARP) +#endif + +/** + * IP_STATS==1: Enable IP stats. + */ +#if !defined IP_STATS || defined __DOXYGEN__ +#define IP_STATS 1 +#endif + +/** + * IPFRAG_STATS==1: Enable IP fragmentation stats. Default is + * on if using either frag or reass. + */ +#if !defined IPFRAG_STATS || defined __DOXYGEN__ +#define IPFRAG_STATS (IP_REASSEMBLY || IP_FRAG) +#endif + +/** + * ICMP_STATS==1: Enable ICMP stats. + */ +#if !defined ICMP_STATS || defined __DOXYGEN__ +#define ICMP_STATS 1 +#endif + +/** + * IGMP_STATS==1: Enable IGMP stats. + */ +#if !defined IGMP_STATS || defined __DOXYGEN__ +#define IGMP_STATS (LWIP_IGMP) +#endif + +/** + * UDP_STATS==1: Enable UDP stats. Default is on if + * UDP enabled, otherwise off. + */ +#if !defined UDP_STATS || defined __DOXYGEN__ +#define UDP_STATS (LWIP_UDP) +#endif + +/** + * TCP_STATS==1: Enable TCP stats. Default is on if TCP + * enabled, otherwise off. + */ +#if !defined TCP_STATS || defined __DOXYGEN__ +#define TCP_STATS (LWIP_TCP) +#endif + +/** + * MEM_STATS==1: Enable mem.c stats. + */ +#if !defined MEM_STATS || defined __DOXYGEN__ +#define MEM_STATS ((MEM_LIBC_MALLOC == 0) && (MEM_USE_POOLS == 0)) +#endif + +/** + * MEMP_STATS==1: Enable memp.c pool stats. + */ +#if !defined MEMP_STATS || defined __DOXYGEN__ +#define MEMP_STATS (MEMP_MEM_MALLOC == 0) +#endif + +/** + * SYS_STATS==1: Enable system stats (sem and mbox counts, etc). + */ +#if !defined SYS_STATS || defined __DOXYGEN__ +#define SYS_STATS (NO_SYS == 0) +#endif + +/** + * IP6_STATS==1: Enable IPv6 stats. + */ +#if !defined IP6_STATS || defined __DOXYGEN__ +#define IP6_STATS (LWIP_IPV6) +#endif + +/** + * ICMP6_STATS==1: Enable ICMP for IPv6 stats. + */ +#if !defined ICMP6_STATS || defined __DOXYGEN__ +#define ICMP6_STATS (LWIP_IPV6 && LWIP_ICMP6) +#endif + +/** + * IP6_FRAG_STATS==1: Enable IPv6 fragmentation stats. + */ +#if !defined IP6_FRAG_STATS || defined __DOXYGEN__ +#define IP6_FRAG_STATS (LWIP_IPV6 && (LWIP_IPV6_FRAG || LWIP_IPV6_REASS)) +#endif + +/** + * MLD6_STATS==1: Enable MLD for IPv6 stats. + */ +#if !defined MLD6_STATS || defined __DOXYGEN__ +#define MLD6_STATS (LWIP_IPV6 && LWIP_IPV6_MLD) +#endif + +/** + * ND6_STATS==1: Enable Neighbor discovery for IPv6 stats. + */ +#if !defined ND6_STATS || defined __DOXYGEN__ +#define ND6_STATS (LWIP_IPV6) +#endif + +/** + * MIB2_STATS==1: Stats for SNMP MIB2. + */ +#if !defined MIB2_STATS || defined __DOXYGEN__ +#define MIB2_STATS 0 +#endif + +#else + +#define LINK_STATS 0 +#define ETHARP_STATS 0 +#define IP_STATS 0 +#define IPFRAG_STATS 0 +#define ICMP_STATS 0 +#define IGMP_STATS 0 +#define UDP_STATS 0 +#define TCP_STATS 0 +#define MEM_STATS 0 +#define MEMP_STATS 0 +#define SYS_STATS 0 +#define LWIP_STATS_DISPLAY 0 +#define IP6_STATS 0 +#define ICMP6_STATS 0 +#define IP6_FRAG_STATS 0 +#define MLD6_STATS 0 +#define ND6_STATS 0 +#define MIB2_STATS 0 + +#endif /* LWIP_STATS */ +/** + * @} + */ + +/* + -------------------------------------- + ---------- Checksum options ---------- + -------------------------------------- +*/ +/** + * @defgroup lwip_opts_checksum Checksum + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * LWIP_CHECKSUM_CTRL_PER_NETIF==1: Checksum generation/check can be enabled/disabled + * per netif. + * ATTENTION: if enabled, the CHECKSUM_GEN_* and CHECKSUM_CHECK_* defines must be enabled! + */ +#if !defined LWIP_CHECKSUM_CTRL_PER_NETIF || defined __DOXYGEN__ +#define LWIP_CHECKSUM_CTRL_PER_NETIF 0 +#endif + +/** + * CHECKSUM_GEN_IP==1: Generate checksums in software for outgoing IP packets. + */ +#if !defined CHECKSUM_GEN_IP || defined __DOXYGEN__ +#define CHECKSUM_GEN_IP 1 +#endif + +/** + * CHECKSUM_GEN_UDP==1: Generate checksums in software for outgoing UDP packets. + */ +#if !defined CHECKSUM_GEN_UDP || defined __DOXYGEN__ +#define CHECKSUM_GEN_UDP 1 +#endif + +/** + * CHECKSUM_GEN_TCP==1: Generate checksums in software for outgoing TCP packets. + */ +#if !defined CHECKSUM_GEN_TCP || defined __DOXYGEN__ +#define CHECKSUM_GEN_TCP 1 +#endif + +/** + * CHECKSUM_GEN_ICMP==1: Generate checksums in software for outgoing ICMP packets. + */ +#if !defined CHECKSUM_GEN_ICMP || defined __DOXYGEN__ +#define CHECKSUM_GEN_ICMP 1 +#endif + +/** + * CHECKSUM_GEN_ICMP6==1: Generate checksums in software for outgoing ICMP6 packets. + */ +#if !defined CHECKSUM_GEN_ICMP6 || defined __DOXYGEN__ +#define CHECKSUM_GEN_ICMP6 1 +#endif + +/** + * CHECKSUM_CHECK_IP==1: Check checksums in software for incoming IP packets. + */ +#if !defined CHECKSUM_CHECK_IP || defined __DOXYGEN__ +#define CHECKSUM_CHECK_IP 1 +#endif + +/** + * CHECKSUM_CHECK_UDP==1: Check checksums in software for incoming UDP packets. + */ +#if !defined CHECKSUM_CHECK_UDP || defined __DOXYGEN__ +#define CHECKSUM_CHECK_UDP 1 +#endif + +/** + * CHECKSUM_CHECK_TCP==1: Check checksums in software for incoming TCP packets. + */ +#if !defined CHECKSUM_CHECK_TCP || defined __DOXYGEN__ +#define CHECKSUM_CHECK_TCP 1 +#endif + +/** + * CHECKSUM_CHECK_ICMP==1: Check checksums in software for incoming ICMP packets. + */ +#if !defined CHECKSUM_CHECK_ICMP || defined __DOXYGEN__ +#define CHECKSUM_CHECK_ICMP 1 +#endif + +/** + * CHECKSUM_CHECK_ICMP6==1: Check checksums in software for incoming ICMPv6 packets + */ +#if !defined CHECKSUM_CHECK_ICMP6 || defined __DOXYGEN__ +#define CHECKSUM_CHECK_ICMP6 1 +#endif + +/** + * LWIP_CHECKSUM_ON_COPY==1: Calculate checksum when copying data from + * application buffers to pbufs. + */ +#if !defined LWIP_CHECKSUM_ON_COPY || defined __DOXYGEN__ +#define LWIP_CHECKSUM_ON_COPY 0 +#endif +/** + * @} + */ + +/* + --------------------------------------- + ---------- IPv6 options --------------- + --------------------------------------- +*/ +/** + * @defgroup lwip_opts_ipv6 IPv6 + * @ingroup lwip_opts + * @{ + */ +/** + * LWIP_IPV6==1: Enable IPv6 + */ +#if !defined LWIP_IPV6 || defined __DOXYGEN__ +#define LWIP_IPV6 0 +#endif + +/** + * IPV6_REASS_MAXAGE: Maximum time (in multiples of IP6_REASS_TMR_INTERVAL - so seconds, normally) + * a fragmented IP packet waits for all fragments to arrive. If not all fragments arrived + * in this time, the whole packet is discarded. + */ +#if !defined IPV6_REASS_MAXAGE || defined __DOXYGEN__ +#define IPV6_REASS_MAXAGE 60 +#endif + +/** + * LWIP_IPV6_SCOPES==1: Enable support for IPv6 address scopes, ensuring that + * e.g. link-local addresses are really treated as link-local. Disable this + * setting only for single-interface configurations. + * All addresses that have a scope according to the default policy (link-local + * unicast addresses, interface-local and link-local multicast addresses) should + * now have a zone set on them before being passed to the core API, although + * lwIP will currently attempt to select a zone on the caller's behalf when + * necessary. Applications that directly assign IPv6 addresses to interfaces + * (which is NOT recommended) must now ensure that link-local addresses carry + * the netif's zone. See the new ip6_zone.h header file for more information and + * relevant macros. For now it is still possible to turn off scopes support + * through the new LWIP_IPV6_SCOPES option. When upgrading an implementation that + * uses the core API directly, it is highly recommended to enable + * LWIP_IPV6_SCOPES_DEBUG at least for a while, to ensure e.g. proper address + * initialization. + */ +#if !defined LWIP_IPV6_SCOPES || defined __DOXYGEN__ +#define LWIP_IPV6_SCOPES (LWIP_IPV6 && !LWIP_SINGLE_NETIF) +#endif + +/** + * LWIP_IPV6_SCOPES_DEBUG==1: Perform run-time checks to verify that addresses + * are properly zoned (see ip6_zone.h on what that means) where it matters. + * Enabling this setting is highly recommended when upgrading from an existing + * installation that is not yet scope-aware; otherwise it may be too expensive. + */ +#if !defined LWIP_IPV6_SCOPES_DEBUG || defined __DOXYGEN__ +#define LWIP_IPV6_SCOPES_DEBUG 0 +#endif + +/** + * LWIP_IPV6_NUM_ADDRESSES: Number of IPv6 addresses per netif. + */ +#if !defined LWIP_IPV6_NUM_ADDRESSES || defined __DOXYGEN__ +#define LWIP_IPV6_NUM_ADDRESSES 3 +#endif + +/** + * LWIP_IPV6_FORWARD==1: Forward IPv6 packets across netifs + */ +#if !defined LWIP_IPV6_FORWARD || defined __DOXYGEN__ +#define LWIP_IPV6_FORWARD 0 +#endif + +/** + * LWIP_IPV6_FRAG==1: Fragment outgoing IPv6 packets that are too big. + */ +#if !defined LWIP_IPV6_FRAG || defined __DOXYGEN__ +#define LWIP_IPV6_FRAG 1 +#endif + +/** + * LWIP_IPV6_REASS==1: reassemble incoming IPv6 packets that fragmented + */ +#if !defined LWIP_IPV6_REASS || defined __DOXYGEN__ +#define LWIP_IPV6_REASS LWIP_IPV6 +#endif + +/** + * LWIP_IPV6_SEND_ROUTER_SOLICIT==1: Send router solicitation messages during + * network startup. + */ +#if !defined LWIP_IPV6_SEND_ROUTER_SOLICIT || defined __DOXYGEN__ +#define LWIP_IPV6_SEND_ROUTER_SOLICIT 1 +#endif + +/** + * LWIP_IPV6_AUTOCONFIG==1: Enable stateless address autoconfiguration as per RFC 4862. + */ +#if !defined LWIP_IPV6_AUTOCONFIG || defined __DOXYGEN__ +#define LWIP_IPV6_AUTOCONFIG LWIP_IPV6 +#endif + +/** + * LWIP_IPV6_ADDRESS_LIFETIMES==1: Keep valid and preferred lifetimes for each + * IPv6 address. Required for LWIP_IPV6_AUTOCONFIG. May still be enabled + * otherwise, in which case the application may assign address lifetimes with + * the appropriate macros. Addresses with no lifetime are assumed to be static. + * If this option is disabled, all addresses are assumed to be static. + */ +#if !defined LWIP_IPV6_ADDRESS_LIFETIMES || defined __DOXYGEN__ +#define LWIP_IPV6_ADDRESS_LIFETIMES LWIP_IPV6_AUTOCONFIG +#endif + +/** + * LWIP_IPV6_DUP_DETECT_ATTEMPTS=[0..7]: Number of duplicate address detection attempts. + */ +#if !defined LWIP_IPV6_DUP_DETECT_ATTEMPTS || defined __DOXYGEN__ +#define LWIP_IPV6_DUP_DETECT_ATTEMPTS 1 +#endif +/** + * @} + */ + +/** + * @defgroup lwip_opts_icmp6 ICMP6 + * @ingroup lwip_opts_ipv6 + * @{ + */ +/** + * LWIP_ICMP6==1: Enable ICMPv6 (mandatory per RFC) + */ +#if !defined LWIP_ICMP6 || defined __DOXYGEN__ +#define LWIP_ICMP6 LWIP_IPV6 +#endif + +/** + * LWIP_ICMP6_DATASIZE: bytes from original packet to send back in + * ICMPv6 error messages. + */ +#if !defined LWIP_ICMP6_DATASIZE || defined __DOXYGEN__ +#define LWIP_ICMP6_DATASIZE 8 +#endif + +/** + * LWIP_ICMP6_HL: default hop limit for ICMPv6 messages + */ +#if !defined LWIP_ICMP6_HL || defined __DOXYGEN__ +#define LWIP_ICMP6_HL 255 +#endif +/** + * @} + */ + +/** + * @defgroup lwip_opts_mld6 Multicast listener discovery + * @ingroup lwip_opts_ipv6 + * @{ + */ +/** + * LWIP_IPV6_MLD==1: Enable multicast listener discovery protocol. + * If LWIP_IPV6 is enabled but this setting is disabled, the MAC layer must + * indiscriminately pass all inbound IPv6 multicast traffic to lwIP. + */ +#if !defined LWIP_IPV6_MLD || defined __DOXYGEN__ +#define LWIP_IPV6_MLD LWIP_IPV6 +#endif + +/** + * MEMP_NUM_MLD6_GROUP: Max number of IPv6 multicast groups that can be joined. + * There must be enough groups so that each netif can join the solicited-node + * multicast group for each of its local addresses, plus one for MDNS if + * applicable, plus any number of groups to be joined on UDP sockets. + */ +#if !defined MEMP_NUM_MLD6_GROUP || defined __DOXYGEN__ +#define MEMP_NUM_MLD6_GROUP 4 +#endif +/** + * @} + */ + +/** + * @defgroup lwip_opts_nd6 Neighbor discovery + * @ingroup lwip_opts_ipv6 + * @{ + */ +/** + * LWIP_ND6_QUEUEING==1: queue outgoing IPv6 packets while MAC address + * is being resolved. + */ +#if !defined LWIP_ND6_QUEUEING || defined __DOXYGEN__ +#define LWIP_ND6_QUEUEING LWIP_IPV6 +#endif + +/** + * MEMP_NUM_ND6_QUEUE: Max number of IPv6 packets to queue during MAC resolution. + */ +#if !defined MEMP_NUM_ND6_QUEUE || defined __DOXYGEN__ +#define MEMP_NUM_ND6_QUEUE 20 +#endif + +/** + * LWIP_ND6_NUM_NEIGHBORS: Number of entries in IPv6 neighbor cache + */ +#if !defined LWIP_ND6_NUM_NEIGHBORS || defined __DOXYGEN__ +#define LWIP_ND6_NUM_NEIGHBORS 10 +#endif + +/** + * LWIP_ND6_NUM_DESTINATIONS: number of entries in IPv6 destination cache + */ +#if !defined LWIP_ND6_NUM_DESTINATIONS || defined __DOXYGEN__ +#define LWIP_ND6_NUM_DESTINATIONS 10 +#endif + +/** + * LWIP_ND6_NUM_PREFIXES: number of entries in IPv6 on-link prefixes cache + */ +#if !defined LWIP_ND6_NUM_PREFIXES || defined __DOXYGEN__ +#define LWIP_ND6_NUM_PREFIXES 5 +#endif + +/** + * LWIP_ND6_NUM_ROUTERS: number of entries in IPv6 default router cache + */ +#if !defined LWIP_ND6_NUM_ROUTERS || defined __DOXYGEN__ +#define LWIP_ND6_NUM_ROUTERS 3 +#endif + +/** + * LWIP_ND6_MAX_MULTICAST_SOLICIT: max number of multicast solicit messages to send + * (neighbor solicit and router solicit) + */ +#if !defined LWIP_ND6_MAX_MULTICAST_SOLICIT || defined __DOXYGEN__ +#define LWIP_ND6_MAX_MULTICAST_SOLICIT 3 +#endif + +/** + * LWIP_ND6_MAX_UNICAST_SOLICIT: max number of unicast neighbor solicitation messages + * to send during neighbor reachability detection. + */ +#if !defined LWIP_ND6_MAX_UNICAST_SOLICIT || defined __DOXYGEN__ +#define LWIP_ND6_MAX_UNICAST_SOLICIT 3 +#endif + +/** + * Unused: See ND RFC (time in milliseconds). + */ +#if !defined LWIP_ND6_MAX_ANYCAST_DELAY_TIME || defined __DOXYGEN__ +#define LWIP_ND6_MAX_ANYCAST_DELAY_TIME 1000 +#endif + +/** + * Unused: See ND RFC + */ +#if !defined LWIP_ND6_MAX_NEIGHBOR_ADVERTISEMENT || defined __DOXYGEN__ +#define LWIP_ND6_MAX_NEIGHBOR_ADVERTISEMENT 3 +#endif + +/** + * LWIP_ND6_REACHABLE_TIME: default neighbor reachable time (in milliseconds). + * May be updated by router advertisement messages. + */ +#if !defined LWIP_ND6_REACHABLE_TIME || defined __DOXYGEN__ +#define LWIP_ND6_REACHABLE_TIME 30000 +#endif + +/** + * LWIP_ND6_RETRANS_TIMER: default retransmission timer for solicitation messages + */ +#if !defined LWIP_ND6_RETRANS_TIMER || defined __DOXYGEN__ +#define LWIP_ND6_RETRANS_TIMER 1000 +#endif + +/** + * LWIP_ND6_DELAY_FIRST_PROBE_TIME: Delay before first unicast neighbor solicitation + * message is sent, during neighbor reachability detection. + */ +#if !defined LWIP_ND6_DELAY_FIRST_PROBE_TIME || defined __DOXYGEN__ +#define LWIP_ND6_DELAY_FIRST_PROBE_TIME 5000 +#endif + +/** + * LWIP_ND6_ALLOW_RA_UPDATES==1: Allow Router Advertisement messages to update + * Reachable time and retransmission timers, and netif MTU. + */ +#if !defined LWIP_ND6_ALLOW_RA_UPDATES || defined __DOXYGEN__ +#define LWIP_ND6_ALLOW_RA_UPDATES 1 +#endif + +/** + * LWIP_ND6_TCP_REACHABILITY_HINTS==1: Allow TCP to provide Neighbor Discovery + * with reachability hints for connected destinations. This helps avoid sending + * unicast neighbor solicitation messages. + */ +#if !defined LWIP_ND6_TCP_REACHABILITY_HINTS || defined __DOXYGEN__ +#define LWIP_ND6_TCP_REACHABILITY_HINTS 1 +#endif + +/** + * LWIP_ND6_RDNSS_MAX_DNS_SERVERS > 0: Use IPv6 Router Advertisement Recursive + * DNS Server Option (as per RFC 6106) to copy a defined maximum number of DNS + * servers to the DNS module. + */ +#if !defined LWIP_ND6_RDNSS_MAX_DNS_SERVERS || defined __DOXYGEN__ +#define LWIP_ND6_RDNSS_MAX_DNS_SERVERS 0 +#endif +/** + * @} + */ + +/** + * @defgroup lwip_opts_dhcpv6 DHCPv6 + * @ingroup lwip_opts_ipv6 + * @{ + */ +/** + * LWIP_IPV6_DHCP6==1: enable DHCPv6 stateful/stateless address autoconfiguration. + */ +#if !defined LWIP_IPV6_DHCP6 || defined __DOXYGEN__ +#define LWIP_IPV6_DHCP6 0 +#endif + +/** + * LWIP_IPV6_DHCP6_STATEFUL==1: enable DHCPv6 stateful address autoconfiguration. + * (not supported, yet!) + */ +#if !defined LWIP_IPV6_DHCP6_STATEFUL || defined __DOXYGEN__ +#define LWIP_IPV6_DHCP6_STATEFUL 0 +#endif + +/** + * LWIP_IPV6_DHCP6_STATELESS==1: enable DHCPv6 stateless address autoconfiguration. + */ +#if !defined LWIP_IPV6_DHCP6_STATELESS || defined __DOXYGEN__ +#define LWIP_IPV6_DHCP6_STATELESS LWIP_IPV6_DHCP6 +#endif + +/** + * LWIP_DHCP6_GETS_NTP==1: Request NTP servers via DHCPv6. For each + * response packet, a callback is called, which has to be provided by the port: + * void dhcp6_set_ntp_servers(u8_t num_ntp_servers, ip_addr_t* ntp_server_addrs); +*/ +#if !defined LWIP_DHCP6_GET_NTP_SRV || defined __DOXYGEN__ +#define LWIP_DHCP6_GET_NTP_SRV 0 +#endif + +/** + * The maximum of NTP servers requested + */ +#if !defined LWIP_DHCP6_MAX_NTP_SERVERS || defined __DOXYGEN__ +#define LWIP_DHCP6_MAX_NTP_SERVERS 1 +#endif + +/** + * LWIP_DHCP6_MAX_DNS_SERVERS > 0: Request DNS servers via DHCPv6. + * DNS servers received in the response are passed to DNS via @ref dns_setserver() + * (up to the maximum limit defined here). + */ +#if !defined LWIP_DHCP6_MAX_DNS_SERVERS || defined __DOXYGEN__ +#define LWIP_DHCP6_MAX_DNS_SERVERS DNS_MAX_SERVERS +#endif +/** + * @} + */ + +/* + --------------------------------------- + ---------- Hook options --------------- + --------------------------------------- +*/ + +/** + * @defgroup lwip_opts_hooks Hooks + * @ingroup lwip_opts_infrastructure + * Hooks are undefined by default, define them to a function if you need them. + * @{ + */ + +/** + * LWIP_HOOK_FILENAME: Custom filename to \#include in files that provide hooks. + * Declare your hook function prototypes in there, you may also \#include all headers + * providing data types that are need in this file. + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_FILENAME "path/to/my/lwip_hooks.h" +#endif + +/** + * LWIP_HOOK_TCP_ISN: + * Hook for generation of the Initial Sequence Number (ISN) for a new TCP + * connection. The default lwIP ISN generation algorithm is very basic and may + * allow for TCP spoofing attacks. This hook provides the means to implement + * the standardized ISN generation algorithm from RFC 6528 (see contrib/adons/tcp_isn), + * or any other desired algorithm as a replacement. + * Called from tcp_connect() and tcp_listen_input() when an ISN is needed for + * a new TCP connection, if TCP support (@ref LWIP_TCP) is enabled.\n + * Signature:\code{.c} + * u32_t my_hook_tcp_isn(const ip_addr_t* local_ip, u16_t local_port, const ip_addr_t* remote_ip, u16_t remote_port); + * \endcode + * - it may be necessary to use "struct ip_addr" (ip4_addr, ip6_addr) instead of "ip_addr_t" in function declarations\n + * Arguments: + * - local_ip: pointer to the local IP address of the connection + * - local_port: local port number of the connection (host-byte order) + * - remote_ip: pointer to the remote IP address of the connection + * - remote_port: remote port number of the connection (host-byte order)\n + * Return value: + * - the 32-bit Initial Sequence Number to use for the new TCP connection. + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_TCP_ISN(local_ip, local_port, remote_ip, remote_port) +#endif + +/** + * LWIP_HOOK_TCP_INPACKET_PCB: + * Hook for intercepting incoming packets before they are passed to a pcb. This + * allows updating some state or even dropping a packet. + * Signature:\code{.c} + * err_t my_hook_tcp_inpkt(struct tcp_pcb *pcb, struct tcp_hdr *hdr, u16_t optlen, u16_t opt1len, u8_t *opt2, struct pbuf *p); + * \endcode + * Arguments: + * - pcb: tcp_pcb selected for input of this packet (ATTENTION: this may be + * struct tcp_pcb_listen if pcb->state == LISTEN) + * - hdr: pointer to tcp header (ATTENTION: tcp options may not be in one piece!) + * - optlen: tcp option length + * - opt1len: tcp option length 1st part + * - opt2: if this is != NULL, tcp options are split among 2 pbufs. In that case, + * options start at right after the tcp header ('(u8_t*)(hdr + 1)') for + * the first 'opt1len' bytes and the rest starts at 'opt2'. opt2len can + * be simply calculated: 'opt2len = optlen - opt1len;' + * - p: input packet, p->payload points to application data (that's why tcp hdr + * and options are passed in seperately) + * Return value: + * - ERR_OK: continue input of this packet as normal + * - != ERR_OK: drop this packet for input (don't continue input processing) + * + * ATTENTION: don't call any tcp api functions that might change tcp state (pcb + * state or any pcb lists) from this callback! + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_TCP_INPACKET_PCB(pcb, hdr, optlen, opt1len, opt2, p) +#endif + +/** + * LWIP_HOOK_TCP_OUT_TCPOPT_LENGTH: + * Hook for increasing the size of the options allocated with a tcp header. + * Together with LWIP_HOOK_TCP_OUT_ADD_TCPOPTS, this can be used to add custom + * options to outgoing tcp segments. + * Signature:\code{.c} + * u8_t my_hook_tcp_out_tcpopt_length(const struct tcp_pcb *pcb, u8_t internal_option_length); + * \endcode + * Arguments: + * - pcb: tcp_pcb that transmits (ATTENTION: this may be NULL or + * struct tcp_pcb_listen if pcb->state == LISTEN) + * - internal_option_length: tcp option length used by the stack internally + * Return value: + * - a number of bytes to allocate for tcp options (internal_option_length <= ret <= 40) + * + * ATTENTION: don't call any tcp api functions that might change tcp state (pcb + * state or any pcb lists) from this callback! + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_TCP_OUT_TCPOPT_LENGTH(pcb, internal_len) +#endif + +/** + * LWIP_HOOK_TCP_OUT_ADD_TCPOPTS: + * Hook for adding custom options to outgoing tcp segments. + * Space for these custom options has to be reserved via LWIP_HOOK_TCP_OUT_TCPOPT_LENGTH. + * Signature:\code{.c} + * u32_t *my_hook_tcp_out_add_tcpopts(struct pbuf *p, struct tcp_hdr *hdr, const struct tcp_pcb *pcb, u32_t *opts); + * \endcode + * Arguments: + * - p: output packet, p->payload pointing to tcp header, data follows + * - hdr: tcp header + * - pcb: tcp_pcb that transmits (ATTENTION: this may be NULL or + * struct tcp_pcb_listen if pcb->state == LISTEN) + * - opts: pointer where to add the custom options (there may already be options + * between the header and these) + * Return value: + * - pointer pointing directly after the inserted options + * + * ATTENTION: don't call any tcp api functions that might change tcp state (pcb + * state or any pcb lists) from this callback! + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_TCP_OUT_ADD_TCPOPTS(p, hdr, pcb, opts) +#endif + +/** + * LWIP_HOOK_IP4_INPUT(pbuf, input_netif): + * Called from ip_input() (IPv4) + * Signature:\code{.c} + * int my_hook(struct pbuf *pbuf, struct netif *input_netif); + * \endcode + * Arguments: + * - pbuf: received struct pbuf passed to ip_input() + * - input_netif: struct netif on which the packet has been received + * Return values: + * - 0: Hook has not consumed the packet, packet is processed as normal + * - != 0: Hook has consumed the packet. + * If the hook consumed the packet, 'pbuf' is in the responsibility of the hook + * (i.e. free it when done). + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_IP4_INPUT(pbuf, input_netif) +#endif + +/** + * LWIP_HOOK_IP4_ROUTE(dest): + * Called from ip_route() (IPv4) + * Signature:\code{.c} + * struct netif *my_hook(const ip4_addr_t *dest); + * \endcode + * Arguments: + * - dest: destination IPv4 address + * Returns values: + * - the destination netif + * - NULL if no destination netif is found. In that case, ip_route() continues as normal. + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_IP4_ROUTE() +#endif + +/** + * LWIP_HOOK_IP4_ROUTE_SRC(src, dest): + * Source-based routing for IPv4 - called from ip_route() (IPv4) + * Signature:\code{.c} + * struct netif *my_hook(const ip4_addr_t *src, const ip4_addr_t *dest); + * \endcode + * Arguments: + * - src: local/source IPv4 address + * - dest: destination IPv4 address + * Returns values: + * - the destination netif + * - NULL if no destination netif is found. In that case, ip_route() continues as normal. + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_IP4_ROUTE_SRC(src, dest) +#endif + +/** + * LWIP_HOOK_IP4_CANFORWARD(src, dest): + * Check if an IPv4 can be forwarded - called from: + * ip4_input() -> ip4_forward() -> ip4_canforward() (IPv4) + * - source address is available via ip4_current_src_addr() + * - calling an output function in this context (e.g. multicast router) is allowed + * Signature:\code{.c} + * int my_hook(struct pbuf *p, u32_t dest_addr_hostorder); + * \endcode + * Arguments: + * - p: packet to forward + * - dest: destination IPv4 address + * Returns values: + * - 1: forward + * - 0: don't forward + * - -1: no decision. In that case, ip4_canforward() continues as normal. + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_IP4_CANFORWARD(src, dest) +#endif + +/** + * LWIP_HOOK_ETHARP_GET_GW(netif, dest): + * Called from etharp_output() (IPv4) + * Signature:\code{.c} + * const ip4_addr_t *my_hook(struct netif *netif, const ip4_addr_t *dest); + * \endcode + * Arguments: + * - netif: the netif used for sending + * - dest: the destination IPv4 address + * Return values: + * - the IPv4 address of the gateway to handle the specified destination IPv4 address + * - NULL, in which case the netif's default gateway is used + * + * The returned address MUST be directly reachable on the specified netif! + * This function is meant to implement advanced IPv4 routing together with + * LWIP_HOOK_IP4_ROUTE(). The actual routing/gateway table implementation is + * not part of lwIP but can e.g. be hidden in the netif's state argument. +*/ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_ETHARP_GET_GW(netif, dest) +#endif + +/** + * LWIP_HOOK_IP6_INPUT(pbuf, input_netif): + * Called from ip6_input() (IPv6) + * Signature:\code{.c} + * int my_hook(struct pbuf *pbuf, struct netif *input_netif); + * \endcode + * Arguments: + * - pbuf: received struct pbuf passed to ip6_input() + * - input_netif: struct netif on which the packet has been received + * Return values: + * - 0: Hook has not consumed the packet, packet is processed as normal + * - != 0: Hook has consumed the packet. + * If the hook consumed the packet, 'pbuf' is in the responsibility of the hook + * (i.e. free it when done). + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_IP6_INPUT(pbuf, input_netif) +#endif + +/** + * LWIP_HOOK_IP6_ROUTE(src, dest): + * Called from ip_route() (IPv6) + * Signature:\code{.c} + * struct netif *my_hook(const ip6_addr_t *dest, const ip6_addr_t *src); + * \endcode + * Arguments: + * - src: source IPv6 address + * - dest: destination IPv6 address + * Return values: + * - the destination netif + * - NULL if no destination netif is found. In that case, ip6_route() continues as normal. + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_IP6_ROUTE(src, dest) +#endif + +/** + * LWIP_HOOK_ND6_GET_GW(netif, dest): + * Called from nd6_get_next_hop_entry() (IPv6) + * Signature:\code{.c} + * const ip6_addr_t *my_hook(struct netif *netif, const ip6_addr_t *dest); + * \endcode + * Arguments: + * - netif: the netif used for sending + * - dest: the destination IPv6 address + * Return values: + * - the IPv6 address of the next hop to handle the specified destination IPv6 address + * - NULL, in which case a NDP-discovered router is used instead + * + * The returned address MUST be directly reachable on the specified netif! + * This function is meant to implement advanced IPv6 routing together with + * LWIP_HOOK_IP6_ROUTE(). The actual routing/gateway table implementation is + * not part of lwIP but can e.g. be hidden in the netif's state argument. +*/ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_ND6_GET_GW(netif, dest) +#endif + +/** + * LWIP_HOOK_VLAN_CHECK(netif, eth_hdr, vlan_hdr): + * Called from ethernet_input() if VLAN support is enabled + * Signature:\code{.c} + * int my_hook(struct netif *netif, struct eth_hdr *eth_hdr, struct eth_vlan_hdr *vlan_hdr); + * \endcode + * Arguments: + * - netif: struct netif on which the packet has been received + * - eth_hdr: struct eth_hdr of the packet + * - vlan_hdr: struct eth_vlan_hdr of the packet + * Return values: + * - 0: Packet must be dropped. + * - != 0: Packet must be accepted. + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_VLAN_CHECK(netif, eth_hdr, vlan_hdr) +#endif + +/** + * LWIP_HOOK_VLAN_SET: + * Hook can be used to set prio_vid field of vlan_hdr. If you need to store data + * on per-netif basis to implement this callback, see @ref netif_cd. + * Called from ethernet_output() if VLAN support (@ref ETHARP_SUPPORT_VLAN) is enabled.\n + * Signature:\code{.c} + * s32_t my_hook_vlan_set(struct netif* netif, struct pbuf* pbuf, const struct eth_addr* src, const struct eth_addr* dst, u16_t eth_type);\n + * \endcode + * Arguments: + * - netif: struct netif that the packet will be sent through + * - p: struct pbuf packet to be sent + * - src: source eth address + * - dst: destination eth address + * - eth_type: ethernet type to packet to be sent\n + * + * + * Return values: + * - <0: Packet shall not contain VLAN header. + * - 0 <= return value <= 0xFFFF: Packet shall contain VLAN header. Return value is prio_vid in host byte order. + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_VLAN_SET(netif, p, src, dst, eth_type) +#endif + +/** + * LWIP_HOOK_MEMP_AVAILABLE(memp_t_type): + * Called from memp_free() when a memp pool was empty and an item is now available + * Signature:\code{.c} + * void my_hook(memp_t type); + * \endcode + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_MEMP_AVAILABLE(memp_t_type) +#endif + +/** + * LWIP_HOOK_UNKNOWN_ETH_PROTOCOL(pbuf, netif): + * Called from ethernet_input() when an unknown eth type is encountered. + * Signature:\code{.c} + * err_t my_hook(struct pbuf* pbuf, struct netif* netif); + * \endcode + * Arguments: + * - p: rx packet with unknown eth type + * - netif: netif on which the packet has been received + * Return values: + * - ERR_OK if packet is accepted (hook function now owns the pbuf) + * - any error code otherwise (pbuf is freed) + * + * Payload points to ethernet header! + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_UNKNOWN_ETH_PROTOCOL(pbuf, netif) +#endif + +/** + * LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, state, msg, msg_type, options_len_ptr): + * Called from various dhcp functions when sending a DHCP message. + * This hook is called just before the DHCP message trailer is added, so the + * options are at the end of a DHCP message. + * Signature:\code{.c} + * void my_hook(struct netif *netif, struct dhcp *dhcp, u8_t state, struct dhcp_msg *msg, + * u8_t msg_type, u16_t *options_len_ptr); + * \endcode + * Arguments: + * - netif: struct netif that the packet will be sent through + * - dhcp: struct dhcp on that netif + * - state: current dhcp state (dhcp_state_enum_t as an u8_t) + * - msg: struct dhcp_msg that will be sent + * - msg_type: dhcp message type to be sent (u8_t) + * - options_len_ptr: pointer to the current length of options in the dhcp_msg "msg" + * (must be increased when options are added!) + * + * Options need to appended like this: + * LWIP_ASSERT("dhcp option overflow", *options_len_ptr + option_len + 2 <= DHCP_OPTIONS_LEN); + * msg->options[(*options_len_ptr)++] = <option_number>; + * msg->options[(*options_len_ptr)++] = <option_len>; + * msg->options[(*options_len_ptr)++] = <option_bytes>; + * [...] + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, state, msg, msg_type, options_len_ptr) +#endif + +/** + * LWIP_HOOK_DHCP_PARSE_OPTION(netif, dhcp, state, msg, msg_type, option, len, pbuf, option_value_offset): + * Called from dhcp_parse_reply when receiving a DHCP message. + * This hook is called for every option in the received message that is not handled internally. + * Signature:\code{.c} + * void my_hook(struct netif *netif, struct dhcp *dhcp, u8_t state, struct dhcp_msg *msg, + * u8_t msg_type, u8_t option, u8_t option_len, struct pbuf *pbuf, u16_t option_value_offset); + * \endcode + * Arguments: + * - netif: struct netif that the packet will be sent through + * - dhcp: struct dhcp on that netif + * - state: current dhcp state (dhcp_state_enum_t as an u8_t) + * - msg: struct dhcp_msg that was received + * - msg_type: dhcp message type received (u8_t, ATTENTION: only valid after + * the message type option has been parsed!) + * - option: option value (u8_t) + * - len: option data length (u8_t) + * - pbuf: pbuf where option data is contained + * - option_value_offset: offset in pbuf where option data begins + * + * A nice way to get the option contents is pbuf_get_contiguous(): + * u8_t buf[32]; + * u8_t *ptr = (u8_t*)pbuf_get_contiguous(p, buf, sizeof(buf), LWIP_MIN(option_len, sizeof(buf)), offset); + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_DHCP_PARSE_OPTION(netif, dhcp, state, msg, msg_type, option, len, pbuf, offset) +#endif + +/** + * LWIP_HOOK_DHCP6_APPEND_OPTIONS(netif, dhcp6, state, msg, msg_type, options_len_ptr, max_len): + * Called from various dhcp6 functions when sending a DHCP6 message. + * This hook is called just before the DHCP6 message is sent, so the + * options are at the end of a DHCP6 message. + * Signature:\code{.c} + * void my_hook(struct netif *netif, struct dhcp6 *dhcp, u8_t state, struct dhcp6_msg *msg, + * u8_t msg_type, u16_t *options_len_ptr); + * \endcode + * Arguments: + * - netif: struct netif that the packet will be sent through + * - dhcp6: struct dhcp6 on that netif + * - state: current dhcp6 state (dhcp6_state_enum_t as an u8_t) + * - msg: struct dhcp6_msg that will be sent + * - msg_type: dhcp6 message type to be sent (u8_t) + * - options_len_ptr: pointer to the current length of options in the dhcp6_msg "msg" + * (must be increased when options are added!) + * + * Options need to appended like this: + * u8_t *options = (u8_t *)(msg + 1); + * LWIP_ASSERT("dhcp option overflow", sizeof(struct dhcp6_msg) + *options_len_ptr + newoptlen <= max_len); + * options[(*options_len_ptr)++] = <option_data>; + * [...] + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_DHCP6_APPEND_OPTIONS(netif, dhcp6, state, msg, msg_type, options_len_ptr, max_len) +#endif + +/** + * LWIP_HOOK_SOCKETS_SETSOCKOPT(s, sock, level, optname, optval, optlen, err) + * Called from socket API to implement setsockopt() for options not provided by lwIP. + * Core lock is held when this hook is called. + * Signature:\code{.c} + * int my_hook(int s, struct lwip_sock *sock, int level, int optname, const void *optval, socklen_t optlen, int *err) + * \endcode + * Arguments: + * - s: socket file descriptor + * - sock: internal socket descriptor (see lwip/priv/sockets_priv.h) + * - level: protocol level at which the option resides + * - optname: option to set + * - optval: value to set + * - optlen: size of optval + * - err: output error + * Return values: + * - 0: Hook has not consumed the option, code continues as normal (to internal options) + * - != 0: Hook has consumed the option, 'err' is returned + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_SOCKETS_SETSOCKOPT(s, sock, level, optname, optval, optlen, err) +#endif + +/** + * LWIP_HOOK_SOCKETS_GETSOCKOPT(s, sock, level, optname, optval, optlen, err) + * Called from socket API to implement getsockopt() for options not provided by lwIP. + * Core lock is held when this hook is called. + * Signature:\code{.c} + * int my_hook(int s, struct lwip_sock *sock, int level, int optname, void *optval, socklen_t *optlen, int *err) + * \endcode + * Arguments: + * - s: socket file descriptor + * - sock: internal socket descriptor (see lwip/priv/sockets_priv.h) + * - level: protocol level at which the option resides + * - optname: option to get + * - optval: value to get + * - optlen: size of optval + * - err: output error + * Return values: + * - 0: Hook has not consumed the option, code continues as normal (to internal options) + * - != 0: Hook has consumed the option, 'err' is returned + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_SOCKETS_GETSOCKOPT(s, sock, level, optname, optval, optlen, err) +#endif + +/** + * LWIP_HOOK_NETCONN_EXTERNAL_RESOLVE(name, addr, addrtype, err) + * Called from netconn APIs (not usable with callback apps) allowing an + * external DNS resolver (which uses sequential API) to handle the query. + * Signature:\code{.c} + * int my_hook(const char *name, ip_addr_t *addr, u8_t addrtype, err_t *err) + * \endcode + * Arguments: + * - name: hostname to resolve + * - addr: output host address + * - addrtype: type of address to query + * - err: output error + * Return values: + * - 0: Hook has not consumed hostname query, query continues into DNS module + * - != 0: Hook has consumed the query + * + * err must also be checked to determine if the hook consumed the query, but + * the query failed + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_NETCONN_EXTERNAL_RESOLVE(name, addr, addrtype, err) +#endif +/** + * @} + */ + +/* + --------------------------------------- + ---------- Debugging options ---------- + --------------------------------------- +*/ +/** + * @defgroup lwip_opts_debugmsg Debug messages + * @ingroup lwip_opts_debug + * @{ + */ +/** + * LWIP_DBG_MIN_LEVEL: After masking, the value of the debug is + * compared against this value. If it is smaller, then debugging + * messages are written. + * @see debugging_levels + */ +#if !defined LWIP_DBG_MIN_LEVEL || defined __DOXYGEN__ +#define LWIP_DBG_MIN_LEVEL LWIP_DBG_LEVEL_ALL +#endif + +/** + * LWIP_DBG_TYPES_ON: A mask that can be used to globally enable/disable + * debug messages of certain types. + * @see debugging_levels + */ +#if !defined LWIP_DBG_TYPES_ON || defined __DOXYGEN__ +#define LWIP_DBG_TYPES_ON LWIP_DBG_ON +#endif + +/** + * ETHARP_DEBUG: Enable debugging in etharp.c. + */ +#if !defined ETHARP_DEBUG || defined __DOXYGEN__ +#define ETHARP_DEBUG LWIP_DBG_OFF +#endif + +/** + * NETIF_DEBUG: Enable debugging in netif.c. + */ +#if !defined NETIF_DEBUG || defined __DOXYGEN__ +#define NETIF_DEBUG LWIP_DBG_OFF +#endif + +/** + * PBUF_DEBUG: Enable debugging in pbuf.c. + */ +#if !defined PBUF_DEBUG || defined __DOXYGEN__ +#define PBUF_DEBUG LWIP_DBG_OFF +#endif + +/** + * API_LIB_DEBUG: Enable debugging in api_lib.c. + */ +#if !defined API_LIB_DEBUG || defined __DOXYGEN__ +#define API_LIB_DEBUG LWIP_DBG_OFF +#endif + +/** + * API_MSG_DEBUG: Enable debugging in api_msg.c. + */ +#if !defined API_MSG_DEBUG || defined __DOXYGEN__ +#define API_MSG_DEBUG LWIP_DBG_OFF +#endif + +/** + * SOCKETS_DEBUG: Enable debugging in sockets.c. + */ +#if !defined SOCKETS_DEBUG || defined __DOXYGEN__ +#define SOCKETS_DEBUG LWIP_DBG_OFF +#endif + +/** + * ICMP_DEBUG: Enable debugging in icmp.c. + */ +#if !defined ICMP_DEBUG || defined __DOXYGEN__ +#define ICMP_DEBUG LWIP_DBG_OFF +#endif + +/** + * IGMP_DEBUG: Enable debugging in igmp.c. + */ +#if !defined IGMP_DEBUG || defined __DOXYGEN__ +#define IGMP_DEBUG LWIP_DBG_OFF +#endif + +/** + * INET_DEBUG: Enable debugging in inet.c. + */ +#if !defined INET_DEBUG || defined __DOXYGEN__ +#define INET_DEBUG LWIP_DBG_OFF +#endif + +/** + * IP_DEBUG: Enable debugging for IP. + */ +#if !defined IP_DEBUG || defined __DOXYGEN__ +#define IP_DEBUG LWIP_DBG_OFF +#endif + +/** + * IP_REASS_DEBUG: Enable debugging in ip_frag.c for both frag & reass. + */ +#if !defined IP_REASS_DEBUG || defined __DOXYGEN__ +#define IP_REASS_DEBUG LWIP_DBG_OFF +#endif + +/** + * RAW_DEBUG: Enable debugging in raw.c. + */ +#if !defined RAW_DEBUG || defined __DOXYGEN__ +#define RAW_DEBUG LWIP_DBG_OFF +#endif + +/** + * MEM_DEBUG: Enable debugging in mem.c. + */ +#if !defined MEM_DEBUG || defined __DOXYGEN__ +#define MEM_DEBUG LWIP_DBG_OFF +#endif + +/** + * MEMP_DEBUG: Enable debugging in memp.c. + */ +#if !defined MEMP_DEBUG || defined __DOXYGEN__ +#define MEMP_DEBUG LWIP_DBG_OFF +#endif + +/** + * SYS_DEBUG: Enable debugging in sys.c. + */ +#if !defined SYS_DEBUG || defined __DOXYGEN__ +#define SYS_DEBUG LWIP_DBG_OFF +#endif + +/** + * TIMERS_DEBUG: Enable debugging in timers.c. + */ +#if !defined TIMERS_DEBUG || defined __DOXYGEN__ +#define TIMERS_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCP_DEBUG: Enable debugging for TCP. + */ +#if !defined TCP_DEBUG || defined __DOXYGEN__ +#define TCP_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCP_INPUT_DEBUG: Enable debugging in tcp_in.c for incoming debug. + */ +#if !defined TCP_INPUT_DEBUG || defined __DOXYGEN__ +#define TCP_INPUT_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCP_FR_DEBUG: Enable debugging in tcp_in.c for fast retransmit. + */ +#if !defined TCP_FR_DEBUG || defined __DOXYGEN__ +#define TCP_FR_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCP_RTO_DEBUG: Enable debugging in TCP for retransmit + * timeout. + */ +#if !defined TCP_RTO_DEBUG || defined __DOXYGEN__ +#define TCP_RTO_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCP_CWND_DEBUG: Enable debugging for TCP congestion window. + */ +#if !defined TCP_CWND_DEBUG || defined __DOXYGEN__ +#define TCP_CWND_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCP_WND_DEBUG: Enable debugging in tcp_in.c for window updating. + */ +#if !defined TCP_WND_DEBUG || defined __DOXYGEN__ +#define TCP_WND_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCP_OUTPUT_DEBUG: Enable debugging in tcp_out.c output functions. + */ +#if !defined TCP_OUTPUT_DEBUG || defined __DOXYGEN__ +#define TCP_OUTPUT_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCP_RST_DEBUG: Enable debugging for TCP with the RST message. + */ +#if !defined TCP_RST_DEBUG || defined __DOXYGEN__ +#define TCP_RST_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCP_QLEN_DEBUG: Enable debugging for TCP queue lengths. + */ +#if !defined TCP_QLEN_DEBUG || defined __DOXYGEN__ +#define TCP_QLEN_DEBUG LWIP_DBG_OFF +#endif + +/** + * UDP_DEBUG: Enable debugging in UDP. + */ +#if !defined UDP_DEBUG || defined __DOXYGEN__ +#define UDP_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCPIP_DEBUG: Enable debugging in tcpip.c. + */ +#if !defined TCPIP_DEBUG || defined __DOXYGEN__ +#define TCPIP_DEBUG LWIP_DBG_OFF +#endif + +/** + * SLIP_DEBUG: Enable debugging in slipif.c. + */ +#if !defined SLIP_DEBUG || defined __DOXYGEN__ +#define SLIP_DEBUG LWIP_DBG_OFF +#endif + +/** + * DHCP_DEBUG: Enable debugging in dhcp.c. + */ +#if !defined DHCP_DEBUG || defined __DOXYGEN__ +#define DHCP_DEBUG LWIP_DBG_OFF +#endif + +/** + * AUTOIP_DEBUG: Enable debugging in autoip.c. + */ +#if !defined AUTOIP_DEBUG || defined __DOXYGEN__ +#define AUTOIP_DEBUG LWIP_DBG_OFF +#endif + +/** + * DNS_DEBUG: Enable debugging for DNS. + */ +#if !defined DNS_DEBUG || defined __DOXYGEN__ +#define DNS_DEBUG LWIP_DBG_OFF +#endif + +/** + * IP6_DEBUG: Enable debugging for IPv6. + */ +#if !defined IP6_DEBUG || defined __DOXYGEN__ +#define IP6_DEBUG LWIP_DBG_OFF +#endif + +/** + * DHCP6_DEBUG: Enable debugging in dhcp6.c. + */ +#if !defined DHCP6_DEBUG || defined __DOXYGEN__ +#define DHCP6_DEBUG LWIP_DBG_OFF +#endif +/** + * @} + */ + +/** + * LWIP_TESTMODE: Changes to make unit test possible + */ +#if !defined LWIP_TESTMODE +#define LWIP_TESTMODE 0 +#endif + +/* + -------------------------------------------------- + ---------- Performance tracking options ---------- + -------------------------------------------------- +*/ +/** + * @defgroup lwip_opts_perf Performance + * @ingroup lwip_opts_debug + * @{ + */ +/** + * LWIP_PERF: Enable performance testing for lwIP + * (if enabled, arch/perf.h is included) + */ +#if !defined LWIP_PERF || defined __DOXYGEN__ +#define LWIP_PERF 0 +#endif +/** + * @} + */ + +#endif /* LWIP_HDR_OPT_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/pbuf.h b/Middlewares/Third_Party/LwIP/src/include/lwip/pbuf.h new file mode 100644 index 0000000..82902a4 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/pbuf.h @@ -0,0 +1,322 @@ +/** + * @file + * pbuf API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#ifndef LWIP_HDR_PBUF_H +#define LWIP_HDR_PBUF_H + +#include "lwip/opt.h" +#include "lwip/err.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** LWIP_SUPPORT_CUSTOM_PBUF==1: Custom pbufs behave much like their pbuf type + * but they are allocated by external code (initialised by calling + * pbuf_alloced_custom()) and when pbuf_free gives up their last reference, they + * are freed by calling pbuf_custom->custom_free_function(). + * Currently, the pbuf_custom code is only needed for one specific configuration + * of IP_FRAG, unless required by external driver/application code. */ +#ifndef LWIP_SUPPORT_CUSTOM_PBUF +#define LWIP_SUPPORT_CUSTOM_PBUF ((IP_FRAG && !LWIP_NETIF_TX_SINGLE_PBUF) || (LWIP_IPV6 && LWIP_IPV6_FRAG)) +#endif + +/** @ingroup pbuf + * PBUF_NEEDS_COPY(p): return a boolean value indicating whether the given + * pbuf needs to be copied in order to be kept around beyond the current call + * stack without risking being corrupted. The default setting provides safety: + * it will make a copy iof any pbuf chain that does not consist entirely of + * PBUF_ROM type pbufs. For setups with zero-copy support, it may be redefined + * to evaluate to true in all cases, for example. However, doing so also has an + * effect on the application side: any buffers that are *not* copied must also + * *not* be reused by the application after passing them to lwIP. For example, + * when setting PBUF_NEEDS_COPY to (0), after using udp_send() with a PBUF_RAM + * pbuf, the application must free the pbuf immediately, rather than reusing it + * for other purposes. For more background information on this, see tasks #6735 + * and #7896, and bugs #11400 and #49914. */ +#ifndef PBUF_NEEDS_COPY +#define PBUF_NEEDS_COPY(p) ((p)->type_internal & PBUF_TYPE_FLAG_DATA_VOLATILE) +#endif /* PBUF_NEEDS_COPY */ + +/* @todo: We need a mechanism to prevent wasting memory in every pbuf + (TCP vs. UDP, IPv4 vs. IPv6: UDP/IPv4 packets may waste up to 28 bytes) */ + +#define PBUF_TRANSPORT_HLEN 20 +#if LWIP_IPV6 +#define PBUF_IP_HLEN 40 +#else +#define PBUF_IP_HLEN 20 +#endif + +/** + * @ingroup pbuf + * Enumeration of pbuf layers + */ +typedef enum { + /** Includes spare room for transport layer header, e.g. UDP header. + * Use this if you intend to pass the pbuf to functions like udp_send(). + */ + PBUF_TRANSPORT = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN, + /** Includes spare room for IP header. + * Use this if you intend to pass the pbuf to functions like raw_send(). + */ + PBUF_IP = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN, + /** Includes spare room for link layer header (ethernet header). + * Use this if you intend to pass the pbuf to functions like ethernet_output(). + * @see PBUF_LINK_HLEN + */ + PBUF_LINK = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN, + /** Includes spare room for additional encapsulation header before ethernet + * headers (e.g. 802.11). + * Use this if you intend to pass the pbuf to functions like netif->linkoutput(). + * @see PBUF_LINK_ENCAPSULATION_HLEN + */ + PBUF_RAW_TX = PBUF_LINK_ENCAPSULATION_HLEN, + /** Use this for input packets in a netif driver when calling netif->input() + * in the most common case - ethernet-layer netif driver. */ + PBUF_RAW = 0 +} pbuf_layer; + + +/* Base flags for pbuf_type definitions: */ + +/** Indicates that the payload directly follows the struct pbuf. + * This makes @ref pbuf_header work in both directions. */ +#define PBUF_TYPE_FLAG_STRUCT_DATA_CONTIGUOUS 0x80 +/** Indicates the data stored in this pbuf can change. If this pbuf needs + * to be queued, it must be copied/duplicated. */ +#define PBUF_TYPE_FLAG_DATA_VOLATILE 0x40 +/** 4 bits are reserved for 16 allocation sources (e.g. heap, pool1, pool2, etc) + * Internally, we use: 0=heap, 1=MEMP_PBUF, 2=MEMP_PBUF_POOL -> 13 types free*/ +#define PBUF_TYPE_ALLOC_SRC_MASK 0x0F +/** Indicates this pbuf is used for RX (if not set, indicates use for TX). + * This information can be used to keep some spare RX buffers e.g. for + * receiving TCP ACKs to unblock a connection) */ +#define PBUF_ALLOC_FLAG_RX 0x0100 +/** Indicates the application needs the pbuf payload to be in one piece */ +#define PBUF_ALLOC_FLAG_DATA_CONTIGUOUS 0x0200 + +#define PBUF_TYPE_ALLOC_SRC_MASK_STD_HEAP 0x00 +#define PBUF_TYPE_ALLOC_SRC_MASK_STD_MEMP_PBUF 0x01 +#define PBUF_TYPE_ALLOC_SRC_MASK_STD_MEMP_PBUF_POOL 0x02 +/** First pbuf allocation type for applications */ +#define PBUF_TYPE_ALLOC_SRC_MASK_APP_MIN 0x03 +/** Last pbuf allocation type for applications */ +#define PBUF_TYPE_ALLOC_SRC_MASK_APP_MAX PBUF_TYPE_ALLOC_SRC_MASK + +/** + * @ingroup pbuf + * Enumeration of pbuf types + */ +typedef enum { + /** pbuf data is stored in RAM, used for TX mostly, struct pbuf and its payload + are allocated in one piece of contiguous memory (so the first payload byte + can be calculated from struct pbuf). + pbuf_alloc() allocates PBUF_RAM pbufs as unchained pbufs (although that might + change in future versions). + This should be used for all OUTGOING packets (TX).*/ + PBUF_RAM = (PBUF_ALLOC_FLAG_DATA_CONTIGUOUS | PBUF_TYPE_FLAG_STRUCT_DATA_CONTIGUOUS | PBUF_TYPE_ALLOC_SRC_MASK_STD_HEAP), + /** pbuf data is stored in ROM, i.e. struct pbuf and its payload are located in + totally different memory areas. Since it points to ROM, payload does not + have to be copied when queued for transmission. */ + PBUF_ROM = PBUF_TYPE_ALLOC_SRC_MASK_STD_MEMP_PBUF, + /** pbuf comes from the pbuf pool. Much like PBUF_ROM but payload might change + so it has to be duplicated when queued before transmitting, depending on + who has a 'ref' to it. */ + PBUF_REF = (PBUF_TYPE_FLAG_DATA_VOLATILE | PBUF_TYPE_ALLOC_SRC_MASK_STD_MEMP_PBUF), + /** pbuf payload refers to RAM. This one comes from a pool and should be used + for RX. Payload can be chained (scatter-gather RX) but like PBUF_RAM, struct + pbuf and its payload are allocated in one piece of contiguous memory (so + the first payload byte can be calculated from struct pbuf). + Don't use this for TX, if the pool becomes empty e.g. because of TCP queuing, + you are unable to receive TCP acks! */ + PBUF_POOL = (PBUF_ALLOC_FLAG_RX | PBUF_TYPE_FLAG_STRUCT_DATA_CONTIGUOUS | PBUF_TYPE_ALLOC_SRC_MASK_STD_MEMP_PBUF_POOL) +} pbuf_type; + + +/** indicates this packet's data should be immediately passed to the application */ +#define PBUF_FLAG_PUSH 0x01U +/** indicates this is a custom pbuf: pbuf_free calls pbuf_custom->custom_free_function() + when the last reference is released (plus custom PBUF_RAM cannot be trimmed) */ +#define PBUF_FLAG_IS_CUSTOM 0x02U +/** indicates this pbuf is UDP multicast to be looped back */ +#define PBUF_FLAG_MCASTLOOP 0x04U +/** indicates this pbuf was received as link-level broadcast */ +#define PBUF_FLAG_LLBCAST 0x08U +/** indicates this pbuf was received as link-level multicast */ +#define PBUF_FLAG_LLMCAST 0x10U +/** indicates this pbuf includes a TCP FIN flag */ +#define PBUF_FLAG_TCP_FIN 0x20U + +/** Main packet buffer struct */ +struct pbuf { + /** next pbuf in singly linked pbuf chain */ + struct pbuf *next; + + /** pointer to the actual data in the buffer */ + void *payload; + + /** + * total length of this buffer and all next buffers in chain + * belonging to the same packet. + * + * For non-queue packet chains this is the invariant: + * p->tot_len == p->len + (p->next? p->next->tot_len: 0) + */ + u16_t tot_len; + + /** length of this buffer */ + u16_t len; + + /** a bit field indicating pbuf type and allocation sources + (see PBUF_TYPE_FLAG_*, PBUF_ALLOC_FLAG_* and PBUF_TYPE_ALLOC_SRC_MASK) + */ + u8_t type_internal; + + /** misc flags */ + u8_t flags; + + /** + * the reference count always equals the number of pointers + * that refer to this pbuf. This can be pointers from an application, + * the stack itself, or pbuf->next pointers from a chain. + */ + LWIP_PBUF_REF_T ref; + + /** For incoming packets, this contains the input netif's index */ + u8_t if_idx; +}; + + +/** Helper struct for const-correctness only. + * The only meaning of this one is to provide a const payload pointer + * for PBUF_ROM type. + */ +struct pbuf_rom { + /** next pbuf in singly linked pbuf chain */ + struct pbuf *next; + + /** pointer to the actual data in the buffer */ + const void *payload; +}; + +#if LWIP_SUPPORT_CUSTOM_PBUF +/** Prototype for a function to free a custom pbuf */ +typedef void (*pbuf_free_custom_fn)(struct pbuf *p); + +/** A custom pbuf: like a pbuf, but following a function pointer to free it. */ +struct pbuf_custom { + /** The actual pbuf */ + struct pbuf pbuf; + /** This function is called when pbuf_free deallocates this pbuf(_custom) */ + pbuf_free_custom_fn custom_free_function; +}; +#endif /* LWIP_SUPPORT_CUSTOM_PBUF */ + +/** Define this to 0 to prevent freeing ooseq pbufs when the PBUF_POOL is empty */ +#ifndef PBUF_POOL_FREE_OOSEQ +#define PBUF_POOL_FREE_OOSEQ 1 +#endif /* PBUF_POOL_FREE_OOSEQ */ +#if LWIP_TCP && TCP_QUEUE_OOSEQ && NO_SYS && PBUF_POOL_FREE_OOSEQ +extern volatile u8_t pbuf_free_ooseq_pending; +void pbuf_free_ooseq(void); +/** When not using sys_check_timeouts(), call PBUF_CHECK_FREE_OOSEQ() + at regular intervals from main level to check if ooseq pbufs need to be + freed! */ +#define PBUF_CHECK_FREE_OOSEQ() do { if(pbuf_free_ooseq_pending) { \ + /* pbuf_alloc() reported PBUF_POOL to be empty -> try to free some \ + ooseq queued pbufs now */ \ + pbuf_free_ooseq(); }}while(0) +#else /* LWIP_TCP && TCP_QUEUE_OOSEQ && NO_SYS && PBUF_POOL_FREE_OOSEQ */ + /* Otherwise declare an empty PBUF_CHECK_FREE_OOSEQ */ + #define PBUF_CHECK_FREE_OOSEQ() +#endif /* LWIP_TCP && TCP_QUEUE_OOSEQ && NO_SYS && PBUF_POOL_FREE_OOSEQ*/ + +/* Initializes the pbuf module. This call is empty for now, but may not be in future. */ +#define pbuf_init() + +struct pbuf *pbuf_alloc(pbuf_layer l, u16_t length, pbuf_type type); +struct pbuf *pbuf_alloc_reference(void *payload, u16_t length, pbuf_type type); +#if LWIP_SUPPORT_CUSTOM_PBUF +struct pbuf *pbuf_alloced_custom(pbuf_layer l, u16_t length, pbuf_type type, + struct pbuf_custom *p, void *payload_mem, + u16_t payload_mem_len); +#endif /* LWIP_SUPPORT_CUSTOM_PBUF */ +void pbuf_realloc(struct pbuf *p, u16_t size); +#define pbuf_get_allocsrc(p) ((p)->type_internal & PBUF_TYPE_ALLOC_SRC_MASK) +#define pbuf_match_allocsrc(p, type) (pbuf_get_allocsrc(p) == ((type) & PBUF_TYPE_ALLOC_SRC_MASK)) +#define pbuf_match_type(p, type) pbuf_match_allocsrc(p, type) +u8_t pbuf_header(struct pbuf *p, s16_t header_size); +u8_t pbuf_header_force(struct pbuf *p, s16_t header_size); +u8_t pbuf_add_header(struct pbuf *p, size_t header_size_increment); +u8_t pbuf_add_header_force(struct pbuf *p, size_t header_size_increment); +u8_t pbuf_remove_header(struct pbuf *p, size_t header_size); +struct pbuf *pbuf_free_header(struct pbuf *q, u16_t size); +void pbuf_ref(struct pbuf *p); +u8_t pbuf_free(struct pbuf *p); +u16_t pbuf_clen(const struct pbuf *p); +void pbuf_cat(struct pbuf *head, struct pbuf *tail); +void pbuf_chain(struct pbuf *head, struct pbuf *tail); +struct pbuf *pbuf_dechain(struct pbuf *p); +err_t pbuf_copy(struct pbuf *p_to, const struct pbuf *p_from); +u16_t pbuf_copy_partial(const struct pbuf *p, void *dataptr, u16_t len, u16_t offset); +void *pbuf_get_contiguous(const struct pbuf *p, void *buffer, size_t bufsize, u16_t len, u16_t offset); +err_t pbuf_take(struct pbuf *buf, const void *dataptr, u16_t len); +err_t pbuf_take_at(struct pbuf *buf, const void *dataptr, u16_t len, u16_t offset); +struct pbuf *pbuf_skip(struct pbuf* in, u16_t in_offset, u16_t* out_offset); +struct pbuf *pbuf_coalesce(struct pbuf *p, pbuf_layer layer); +struct pbuf *pbuf_clone(pbuf_layer l, pbuf_type type, struct pbuf *p); +#if LWIP_CHECKSUM_ON_COPY +err_t pbuf_fill_chksum(struct pbuf *p, u16_t start_offset, const void *dataptr, + u16_t len, u16_t *chksum); +#endif /* LWIP_CHECKSUM_ON_COPY */ +#if LWIP_TCP && TCP_QUEUE_OOSEQ && LWIP_WND_SCALE +void pbuf_split_64k(struct pbuf *p, struct pbuf **rest); +#endif /* LWIP_TCP && TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + +u8_t pbuf_get_at(const struct pbuf* p, u16_t offset); +int pbuf_try_get_at(const struct pbuf* p, u16_t offset); +void pbuf_put_at(struct pbuf* p, u16_t offset, u8_t data); +u16_t pbuf_memcmp(const struct pbuf* p, u16_t offset, const void* s2, u16_t n); +u16_t pbuf_memfind(const struct pbuf* p, const void* mem, u16_t mem_len, u16_t start_offset); +u16_t pbuf_strstr(const struct pbuf* p, const char* substr); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PBUF_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/priv/altcp_priv.h b/Middlewares/Third_Party/LwIP/src/include/lwip/priv/altcp_priv.h new file mode 100644 index 0000000..2d3b2fd --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/priv/altcp_priv.h @@ -0,0 +1,146 @@ +/** + * @file + * Application layered TCP connection API (to be used from TCPIP thread)\n + * This interface mimics the tcp callback API to the application while preventing + * direct linking (much like virtual functions). + * This way, an application can make use of other application layer protocols + * on top of TCP without knowing the details (e.g. TLS, proxy connection). + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_ALTCP_PRIV_H +#define LWIP_HDR_ALTCP_PRIV_H + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/altcp.h" +#include "lwip/ip_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct altcp_pcb *altcp_alloc(void); +void altcp_free(struct altcp_pcb *conn); + +/* Function prototypes for application layers */ +typedef void (*altcp_set_poll_fn)(struct altcp_pcb *conn, u8_t interval); +typedef void (*altcp_recved_fn)(struct altcp_pcb *conn, u16_t len); +typedef err_t (*altcp_bind_fn)(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port); +typedef err_t (*altcp_connect_fn)(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port, altcp_connected_fn connected); + +typedef struct altcp_pcb *(*altcp_listen_fn)(struct altcp_pcb *conn, u8_t backlog, err_t *err); + +typedef void (*altcp_abort_fn)(struct altcp_pcb *conn); +typedef err_t (*altcp_close_fn)(struct altcp_pcb *conn); +typedef err_t (*altcp_shutdown_fn)(struct altcp_pcb *conn, int shut_rx, int shut_tx); + +typedef err_t (*altcp_write_fn)(struct altcp_pcb *conn, const void *dataptr, u16_t len, u8_t apiflags); +typedef err_t (*altcp_output_fn)(struct altcp_pcb *conn); + +typedef u16_t (*altcp_mss_fn)(struct altcp_pcb *conn); +typedef u16_t (*altcp_sndbuf_fn)(struct altcp_pcb *conn); +typedef u16_t (*altcp_sndqueuelen_fn)(struct altcp_pcb *conn); +typedef void (*altcp_nagle_disable_fn)(struct altcp_pcb *conn); +typedef void (*altcp_nagle_enable_fn)(struct altcp_pcb *conn); +typedef int (*altcp_nagle_disabled_fn)(struct altcp_pcb *conn); + +typedef void (*altcp_setprio_fn)(struct altcp_pcb *conn, u8_t prio); + +typedef void (*altcp_dealloc_fn)(struct altcp_pcb *conn); + +typedef err_t (*altcp_get_tcp_addrinfo_fn)(struct altcp_pcb *conn, int local, ip_addr_t *addr, u16_t *port); +typedef ip_addr_t *(*altcp_get_ip_fn)(struct altcp_pcb *conn, int local); +typedef u16_t (*altcp_get_port_fn)(struct altcp_pcb *conn, int local); + +#ifdef LWIP_DEBUG +typedef enum tcp_state (*altcp_dbg_get_tcp_state_fn)(struct altcp_pcb *conn); +#endif + +struct altcp_functions { + altcp_set_poll_fn set_poll; + altcp_recved_fn recved; + altcp_bind_fn bind; + altcp_connect_fn connect; + altcp_listen_fn listen; + altcp_abort_fn abort; + altcp_close_fn close; + altcp_shutdown_fn shutdown; + altcp_write_fn write; + altcp_output_fn output; + altcp_mss_fn mss; + altcp_sndbuf_fn sndbuf; + altcp_sndqueuelen_fn sndqueuelen; + altcp_nagle_disable_fn nagle_disable; + altcp_nagle_enable_fn nagle_enable; + altcp_nagle_disabled_fn nagle_disabled; + altcp_setprio_fn setprio; + altcp_dealloc_fn dealloc; + altcp_get_tcp_addrinfo_fn addrinfo; + altcp_get_ip_fn getip; + altcp_get_port_fn getport; +#ifdef LWIP_DEBUG + altcp_dbg_get_tcp_state_fn dbg_get_tcp_state; +#endif +}; + +void altcp_default_set_poll(struct altcp_pcb *conn, u8_t interval); +void altcp_default_recved(struct altcp_pcb *conn, u16_t len); +err_t altcp_default_bind(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port); +err_t altcp_default_shutdown(struct altcp_pcb *conn, int shut_rx, int shut_tx); +err_t altcp_default_write(struct altcp_pcb *conn, const void *dataptr, u16_t len, u8_t apiflags); +err_t altcp_default_output(struct altcp_pcb *conn); +u16_t altcp_default_mss(struct altcp_pcb *conn); +u16_t altcp_default_sndbuf(struct altcp_pcb *conn); +u16_t altcp_default_sndqueuelen(struct altcp_pcb *conn); +void altcp_default_nagle_disable(struct altcp_pcb *conn); +void altcp_default_nagle_enable(struct altcp_pcb *conn); +int altcp_default_nagle_disabled(struct altcp_pcb *conn); +void altcp_default_setprio(struct altcp_pcb *conn, u8_t prio); +void altcp_default_dealloc(struct altcp_pcb *conn); +err_t altcp_default_get_tcp_addrinfo(struct altcp_pcb *conn, int local, ip_addr_t *addr, u16_t *port); +ip_addr_t *altcp_default_get_ip(struct altcp_pcb *conn, int local); +u16_t altcp_default_get_port(struct altcp_pcb *conn, int local); +#ifdef LWIP_DEBUG +enum tcp_state altcp_default_dbg_get_tcp_state(struct altcp_pcb *conn); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_ALTCP */ + +#endif /* LWIP_HDR_ALTCP_PRIV_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/priv/api_msg.h b/Middlewares/Third_Party/LwIP/src/include/lwip/priv/api_msg.h new file mode 100644 index 0000000..9e8ffc9 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/priv/api_msg.h @@ -0,0 +1,272 @@ +/** + * @file + * netconn API lwIP internal implementations (do not use in application code) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_API_MSG_H +#define LWIP_HDR_API_MSG_H + +#include "lwip/opt.h" + +#include "lwip/arch.h" +#include "lwip/ip_addr.h" +#include "lwip/err.h" +#include "lwip/sys.h" +#include "lwip/igmp.h" +#include "lwip/api.h" +#include "lwip/priv/tcpip_priv.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_NETCONN || LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */ +/* Note: Netconn API is always available when sockets are enabled - + * sockets are implemented on top of them */ + +#if LWIP_MPU_COMPATIBLE +#if LWIP_NETCONN_SEM_PER_THREAD +#define API_MSG_M_DEF_SEM(m) *m +#else +#define API_MSG_M_DEF_SEM(m) API_MSG_M_DEF(m) +#endif +#else /* LWIP_MPU_COMPATIBLE */ +#define API_MSG_M_DEF_SEM(m) API_MSG_M_DEF(m) +#endif /* LWIP_MPU_COMPATIBLE */ + +/* For the netconn API, these values are use as a bitmask! */ +#define NETCONN_SHUT_RD 1 +#define NETCONN_SHUT_WR 2 +#define NETCONN_SHUT_RDWR (NETCONN_SHUT_RD | NETCONN_SHUT_WR) + +/* IP addresses and port numbers are expected to be in + * the same byte order as in the corresponding pcb. + */ +/** This struct includes everything that is necessary to execute a function + for a netconn in another thread context (mainly used to process netconns + in the tcpip_thread context to be thread safe). */ +struct api_msg { + /** The netconn which to process - always needed: it includes the semaphore + which is used to block the application thread until the function finished. */ + struct netconn *conn; + /** The return value of the function executed in tcpip_thread. */ + err_t err; + /** Depending on the executed function, one of these union members is used */ + union { + /** used for lwip_netconn_do_send */ + struct netbuf *b; + /** used for lwip_netconn_do_newconn */ + struct { + u8_t proto; + } n; + /** used for lwip_netconn_do_bind and lwip_netconn_do_connect */ + struct { + API_MSG_M_DEF_C(ip_addr_t, ipaddr); + u16_t port; + u8_t if_idx; + } bc; + /** used for lwip_netconn_do_getaddr */ + struct { + ip_addr_t API_MSG_M_DEF(ipaddr); + u16_t API_MSG_M_DEF(port); + u8_t local; + } ad; + /** used for lwip_netconn_do_write */ + struct { + /** current vector to write */ + const struct netvector *vector; + /** number of unwritten vectors */ + u16_t vector_cnt; + /** offset into current vector */ + size_t vector_off; + /** total length across vectors */ + size_t len; + /** offset into total length/output of bytes written when err == ERR_OK */ + size_t offset; + u8_t apiflags; +#if LWIP_SO_SNDTIMEO + u32_t time_started; +#endif /* LWIP_SO_SNDTIMEO */ + } w; + /** used for lwip_netconn_do_recv */ + struct { + size_t len; + } r; +#if LWIP_TCP + /** used for lwip_netconn_do_close (/shutdown) */ + struct { + u8_t shut; +#if LWIP_SO_SNDTIMEO || LWIP_SO_LINGER + u32_t time_started; +#else /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ + u8_t polls_left; +#endif /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ + } sd; +#endif /* LWIP_TCP */ +#if LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) + /** used for lwip_netconn_do_join_leave_group */ + struct { + API_MSG_M_DEF_C(ip_addr_t, multiaddr); + API_MSG_M_DEF_C(ip_addr_t, netif_addr); + u8_t if_idx; + enum netconn_igmp join_or_leave; + } jl; +#endif /* LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) */ +#if TCP_LISTEN_BACKLOG + struct { + u8_t backlog; + } lb; +#endif /* TCP_LISTEN_BACKLOG */ + } msg; +#if LWIP_NETCONN_SEM_PER_THREAD + sys_sem_t* op_completed_sem; +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ +}; + +#if LWIP_NETCONN_SEM_PER_THREAD +#define LWIP_API_MSG_SEM(msg) ((msg)->op_completed_sem) +#else /* LWIP_NETCONN_SEM_PER_THREAD */ +#define LWIP_API_MSG_SEM(msg) (&(msg)->conn->op_completed) +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + + +#if LWIP_DNS +/** As lwip_netconn_do_gethostbyname requires more arguments but doesn't require a netconn, + it has its own struct (to avoid struct api_msg getting bigger than necessary). + lwip_netconn_do_gethostbyname must be called using tcpip_callback instead of tcpip_apimsg + (see netconn_gethostbyname). */ +struct dns_api_msg { + /** Hostname to query or dotted IP address string */ +#if LWIP_MPU_COMPATIBLE + char name[DNS_MAX_NAME_LENGTH]; +#else /* LWIP_MPU_COMPATIBLE */ + const char *name; +#endif /* LWIP_MPU_COMPATIBLE */ + /** The resolved address is stored here */ + ip_addr_t API_MSG_M_DEF(addr); +#if LWIP_IPV4 && LWIP_IPV6 + /** Type of resolve call */ + u8_t dns_addrtype; +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + /** This semaphore is posted when the name is resolved, the application thread + should wait on it. */ + sys_sem_t API_MSG_M_DEF_SEM(sem); + /** Errors are given back here */ + err_t API_MSG_M_DEF(err); +}; +#endif /* LWIP_DNS */ + +#if LWIP_NETCONN_FULLDUPLEX +int lwip_netconn_is_deallocated_msg(void *msg); +#endif +int lwip_netconn_is_err_msg(void *msg, err_t *err); +void lwip_netconn_do_newconn (void *m); +void lwip_netconn_do_delconn (void *m); +void lwip_netconn_do_bind (void *m); +void lwip_netconn_do_bind_if (void *m); +void lwip_netconn_do_connect (void *m); +void lwip_netconn_do_disconnect (void *m); +void lwip_netconn_do_listen (void *m); +void lwip_netconn_do_send (void *m); +void lwip_netconn_do_recv (void *m); +#if TCP_LISTEN_BACKLOG +void lwip_netconn_do_accepted (void *m); +#endif /* TCP_LISTEN_BACKLOG */ +void lwip_netconn_do_write (void *m); +void lwip_netconn_do_getaddr (void *m); +void lwip_netconn_do_close (void *m); +void lwip_netconn_do_shutdown (void *m); +#if LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) +void lwip_netconn_do_join_leave_group(void *m); +void lwip_netconn_do_join_leave_group_netif(void *m); +#endif /* LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) */ + +#if LWIP_DNS +void lwip_netconn_do_gethostbyname(void *arg); +#endif /* LWIP_DNS */ + +struct netconn* netconn_alloc(enum netconn_type t, netconn_callback callback); +void netconn_free(struct netconn *conn); + +#endif /* LWIP_NETCONN || LWIP_SOCKET */ + +#if LWIP_NETIF_API /* don't build if not configured for use in lwipopts.h */ + +/* netifapi related lwIP internal definitions */ + +#if LWIP_MPU_COMPATIBLE +#define NETIFAPI_IPADDR_DEF(type, m) type m +#else /* LWIP_MPU_COMPATIBLE */ +#define NETIFAPI_IPADDR_DEF(type, m) const type * m +#endif /* LWIP_MPU_COMPATIBLE */ + +typedef void (*netifapi_void_fn)(struct netif *netif); +typedef err_t (*netifapi_errt_fn)(struct netif *netif); + +struct netifapi_msg { + struct tcpip_api_call_data call; + struct netif *netif; + union { + struct { +#if LWIP_IPV4 + NETIFAPI_IPADDR_DEF(ip4_addr_t, ipaddr); + NETIFAPI_IPADDR_DEF(ip4_addr_t, netmask); + NETIFAPI_IPADDR_DEF(ip4_addr_t, gw); +#endif /* LWIP_IPV4 */ + void *state; + netif_init_fn init; + netif_input_fn input; + } add; + struct { + netifapi_void_fn voidfunc; + netifapi_errt_fn errtfunc; + } common; + struct { +#if LWIP_MPU_COMPATIBLE + char name[NETIF_NAMESIZE]; +#else /* LWIP_MPU_COMPATIBLE */ + char *name; +#endif /* LWIP_MPU_COMPATIBLE */ + u8_t index; + } ifs; + } msg; +}; + +#endif /* LWIP_NETIF_API */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_API_MSG_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/priv/mem_priv.h b/Middlewares/Third_Party/LwIP/src/include/lwip/priv/mem_priv.h new file mode 100644 index 0000000..8630d75 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/priv/mem_priv.h @@ -0,0 +1,84 @@ +/** + * @file + * lwIP internal memory implementations (do not use in application code) + */ + +/* + * Copyright (c) 2018 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#ifndef LWIP_HDR_MEM_PRIV_H +#define LWIP_HDR_MEM_PRIV_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#include "lwip/mem.h" + +#if MEM_OVERFLOW_CHECK || MEMP_OVERFLOW_CHECK +/* if MEM_OVERFLOW_CHECK or MEMP_OVERFLOW_CHECK is turned on, we reserve some + * bytes at the beginning and at the end of each element, initialize them as + * 0xcd and check them later. + * If MEM(P)_OVERFLOW_CHECK is >= 2, on every call to mem(p)_malloc or mem(p)_free, + * every single element in each pool/heap is checked! + * This is VERY SLOW but also very helpful. + * MEM_SANITY_REGION_BEFORE and MEM_SANITY_REGION_AFTER can be overridden in + * lwipopts.h to change the amount reserved for checking. */ +#ifndef MEM_SANITY_REGION_BEFORE +#define MEM_SANITY_REGION_BEFORE 16 +#endif /* MEM_SANITY_REGION_BEFORE*/ +#if MEM_SANITY_REGION_BEFORE > 0 +#define MEM_SANITY_REGION_BEFORE_ALIGNED LWIP_MEM_ALIGN_SIZE(MEM_SANITY_REGION_BEFORE) +#else +#define MEM_SANITY_REGION_BEFORE_ALIGNED 0 +#endif /* MEM_SANITY_REGION_BEFORE*/ +#ifndef MEM_SANITY_REGION_AFTER +#define MEM_SANITY_REGION_AFTER 16 +#endif /* MEM_SANITY_REGION_AFTER*/ +#if MEM_SANITY_REGION_AFTER > 0 +#define MEM_SANITY_REGION_AFTER_ALIGNED LWIP_MEM_ALIGN_SIZE(MEM_SANITY_REGION_AFTER) +#else +#define MEM_SANITY_REGION_AFTER_ALIGNED 0 +#endif /* MEM_SANITY_REGION_AFTER*/ + +void mem_overflow_init_raw(void *p, size_t size); +void mem_overflow_check_raw(void *p, size_t size, const char *descr1, const char *descr2); + +#endif /* MEM_OVERFLOW_CHECK || MEMP_OVERFLOW_CHECK */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_MEMP_PRIV_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/priv/memp_priv.h b/Middlewares/Third_Party/LwIP/src/include/lwip/priv/memp_priv.h new file mode 100644 index 0000000..1f14cb1 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/priv/memp_priv.h @@ -0,0 +1,161 @@ +/** + * @file + * memory pools lwIP internal implementations (do not use in application code) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#ifndef LWIP_HDR_MEMP_PRIV_H +#define LWIP_HDR_MEMP_PRIV_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#include "lwip/mem.h" +#include "lwip/priv/mem_priv.h" + +#if MEMP_OVERFLOW_CHECK + + +/* MEMP_SIZE: save space for struct memp and for sanity check */ +#define MEMP_SIZE (LWIP_MEM_ALIGN_SIZE(sizeof(struct memp)) + MEM_SANITY_REGION_BEFORE_ALIGNED) +#define MEMP_ALIGN_SIZE(x) (LWIP_MEM_ALIGN_SIZE(x) + MEM_SANITY_REGION_AFTER_ALIGNED) + +#else /* MEMP_OVERFLOW_CHECK */ + +/* No sanity checks + * We don't need to preserve the struct memp while not allocated, so we + * can save a little space and set MEMP_SIZE to 0. + */ +#define MEMP_SIZE 0 +#define MEMP_ALIGN_SIZE(x) (LWIP_MEM_ALIGN_SIZE(x)) + +#endif /* MEMP_OVERFLOW_CHECK */ + +#if !MEMP_MEM_MALLOC || MEMP_OVERFLOW_CHECK +struct memp { + struct memp *next; +#if MEMP_OVERFLOW_CHECK + const char *file; + int line; +#endif /* MEMP_OVERFLOW_CHECK */ +}; +#endif /* !MEMP_MEM_MALLOC || MEMP_OVERFLOW_CHECK */ + +#if MEM_USE_POOLS && MEMP_USE_CUSTOM_POOLS +/* Use a helper type to get the start and end of the user "memory pools" for mem_malloc */ +typedef enum { + /* Get the first (via: + MEMP_POOL_HELPER_START = ((u8_t) 1*MEMP_POOL_A + 0*MEMP_POOL_B + 0*MEMP_POOL_C + 0)*/ + MEMP_POOL_HELPER_FIRST = ((u8_t) +#define LWIP_MEMPOOL(name,num,size,desc) +#define LWIP_MALLOC_MEMPOOL_START 1 +#define LWIP_MALLOC_MEMPOOL(num, size) * MEMP_POOL_##size + 0 +#define LWIP_MALLOC_MEMPOOL_END +#include "lwip/priv/memp_std.h" + ) , + /* Get the last (via: + MEMP_POOL_HELPER_END = ((u8_t) 0 + MEMP_POOL_A*0 + MEMP_POOL_B*0 + MEMP_POOL_C*1) */ + MEMP_POOL_HELPER_LAST = ((u8_t) +#define LWIP_MEMPOOL(name,num,size,desc) +#define LWIP_MALLOC_MEMPOOL_START +#define LWIP_MALLOC_MEMPOOL(num, size) 0 + MEMP_POOL_##size * +#define LWIP_MALLOC_MEMPOOL_END 1 +#include "lwip/priv/memp_std.h" + ) +} memp_pool_helper_t; + +/* The actual start and stop values are here (cast them over) + We use this helper type and these defines so we can avoid using const memp_t values */ +#define MEMP_POOL_FIRST ((memp_t) MEMP_POOL_HELPER_FIRST) +#define MEMP_POOL_LAST ((memp_t) MEMP_POOL_HELPER_LAST) +#endif /* MEM_USE_POOLS && MEMP_USE_CUSTOM_POOLS */ + +/** Memory pool descriptor */ +struct memp_desc { +#if defined(LWIP_DEBUG) || MEMP_OVERFLOW_CHECK || LWIP_STATS_DISPLAY + /** Textual description */ + const char *desc; +#endif /* LWIP_DEBUG || MEMP_OVERFLOW_CHECK || LWIP_STATS_DISPLAY */ +#if MEMP_STATS + /** Statistics */ + struct stats_mem *stats; +#endif + + /** Element size */ + u16_t size; + +#if !MEMP_MEM_MALLOC + /** Number of elements */ + u16_t num; + + /** Base address */ + u8_t *base; + + /** First free element of each pool. Elements form a linked list. */ + struct memp **tab; +#endif /* MEMP_MEM_MALLOC */ +}; + +#if defined(LWIP_DEBUG) || MEMP_OVERFLOW_CHECK || LWIP_STATS_DISPLAY +#define DECLARE_LWIP_MEMPOOL_DESC(desc) (desc), +#else +#define DECLARE_LWIP_MEMPOOL_DESC(desc) +#endif + +#if MEMP_STATS +#define LWIP_MEMPOOL_DECLARE_STATS_INSTANCE(name) static struct stats_mem name; +#define LWIP_MEMPOOL_DECLARE_STATS_REFERENCE(name) &name, +#else +#define LWIP_MEMPOOL_DECLARE_STATS_INSTANCE(name) +#define LWIP_MEMPOOL_DECLARE_STATS_REFERENCE(name) +#endif + +void memp_init_pool(const struct memp_desc *desc); + +#if MEMP_OVERFLOW_CHECK +void *memp_malloc_pool_fn(const struct memp_desc* desc, const char* file, const int line); +#define memp_malloc_pool(d) memp_malloc_pool_fn((d), __FILE__, __LINE__) +#else +void *memp_malloc_pool(const struct memp_desc *desc); +#endif +void memp_free_pool(const struct memp_desc* desc, void *mem); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_MEMP_PRIV_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/priv/memp_std.h b/Middlewares/Third_Party/LwIP/src/include/lwip/priv/memp_std.h new file mode 100644 index 0000000..669ad4d --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/priv/memp_std.h @@ -0,0 +1,153 @@ +/** + * @file + * lwIP internal memory pools (do not use in application code) + * This file is deliberately included multiple times: once with empty + * definition of LWIP_MEMPOOL() to handle all includes and multiple times + * to build up various lists of mem pools. + */ + +/* + * SETUP: Make sure we define everything we will need. + * + * We have create three types of pools: + * 1) MEMPOOL - standard pools + * 2) MALLOC_MEMPOOL - to be used by mem_malloc in mem.c + * 3) PBUF_MEMPOOL - a mempool of pbuf's, so include space for the pbuf struct + * + * If the include'r doesn't require any special treatment of each of the types + * above, then will declare #2 & #3 to be just standard mempools. + */ +#ifndef LWIP_MALLOC_MEMPOOL +/* This treats "malloc pools" just like any other pool. + The pools are a little bigger to provide 'size' as the amount of user data. */ +#define LWIP_MALLOC_MEMPOOL(num, size) LWIP_MEMPOOL(POOL_##size, num, (size + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper))), "MALLOC_"#size) +#define LWIP_MALLOC_MEMPOOL_START +#define LWIP_MALLOC_MEMPOOL_END +#endif /* LWIP_MALLOC_MEMPOOL */ + +#ifndef LWIP_PBUF_MEMPOOL +/* This treats "pbuf pools" just like any other pool. + * Allocates buffers for a pbuf struct AND a payload size */ +#define LWIP_PBUF_MEMPOOL(name, num, payload, desc) LWIP_MEMPOOL(name, num, (LWIP_MEM_ALIGN_SIZE(sizeof(struct pbuf)) + LWIP_MEM_ALIGN_SIZE(payload)), desc) +#endif /* LWIP_PBUF_MEMPOOL */ + + +/* + * A list of internal pools used by LWIP. + * + * LWIP_MEMPOOL(pool_name, number_elements, element_size, pool_description) + * creates a pool name MEMP_pool_name. description is used in stats.c + */ +#if LWIP_RAW +LWIP_MEMPOOL(RAW_PCB, MEMP_NUM_RAW_PCB, sizeof(struct raw_pcb), "RAW_PCB") +#endif /* LWIP_RAW */ + +#if LWIP_UDP +LWIP_MEMPOOL(UDP_PCB, MEMP_NUM_UDP_PCB, sizeof(struct udp_pcb), "UDP_PCB") +#endif /* LWIP_UDP */ + +#if LWIP_TCP +LWIP_MEMPOOL(TCP_PCB, MEMP_NUM_TCP_PCB, sizeof(struct tcp_pcb), "TCP_PCB") +LWIP_MEMPOOL(TCP_PCB_LISTEN, MEMP_NUM_TCP_PCB_LISTEN, sizeof(struct tcp_pcb_listen), "TCP_PCB_LISTEN") +LWIP_MEMPOOL(TCP_SEG, MEMP_NUM_TCP_SEG, sizeof(struct tcp_seg), "TCP_SEG") +#endif /* LWIP_TCP */ + +#if LWIP_ALTCP && LWIP_TCP +LWIP_MEMPOOL(ALTCP_PCB, MEMP_NUM_ALTCP_PCB, sizeof(struct altcp_pcb), "ALTCP_PCB") +#endif /* LWIP_ALTCP && LWIP_TCP */ + +#if LWIP_IPV4 && IP_REASSEMBLY +LWIP_MEMPOOL(REASSDATA, MEMP_NUM_REASSDATA, sizeof(struct ip_reassdata), "REASSDATA") +#endif /* LWIP_IPV4 && IP_REASSEMBLY */ +#if (IP_FRAG && !LWIP_NETIF_TX_SINGLE_PBUF) || (LWIP_IPV6 && LWIP_IPV6_FRAG) +LWIP_MEMPOOL(FRAG_PBUF, MEMP_NUM_FRAG_PBUF, sizeof(struct pbuf_custom_ref),"FRAG_PBUF") +#endif /* IP_FRAG && !LWIP_NETIF_TX_SINGLE_PBUF || (LWIP_IPV6 && LWIP_IPV6_FRAG) */ + +#if LWIP_NETCONN || LWIP_SOCKET +LWIP_MEMPOOL(NETBUF, MEMP_NUM_NETBUF, sizeof(struct netbuf), "NETBUF") +LWIP_MEMPOOL(NETCONN, MEMP_NUM_NETCONN, sizeof(struct netconn), "NETCONN") +#endif /* LWIP_NETCONN || LWIP_SOCKET */ + +#if NO_SYS==0 +LWIP_MEMPOOL(TCPIP_MSG_API, MEMP_NUM_TCPIP_MSG_API, sizeof(struct tcpip_msg), "TCPIP_MSG_API") +#if LWIP_MPU_COMPATIBLE +LWIP_MEMPOOL(API_MSG, MEMP_NUM_API_MSG, sizeof(struct api_msg), "API_MSG") +#if LWIP_DNS +LWIP_MEMPOOL(DNS_API_MSG, MEMP_NUM_DNS_API_MSG, sizeof(struct dns_api_msg), "DNS_API_MSG") +#endif +#if LWIP_SOCKET && !LWIP_TCPIP_CORE_LOCKING +LWIP_MEMPOOL(SOCKET_SETGETSOCKOPT_DATA, MEMP_NUM_SOCKET_SETGETSOCKOPT_DATA, sizeof(struct lwip_setgetsockopt_data), "SOCKET_SETGETSOCKOPT_DATA") +#endif +#if LWIP_SOCKET && (LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL) +LWIP_MEMPOOL(SELECT_CB, MEMP_NUM_SELECT_CB, sizeof(struct lwip_select_cb), "SELECT_CB") +#endif /* LWIP_SOCKET && (LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL) */ +#if LWIP_NETIF_API +LWIP_MEMPOOL(NETIFAPI_MSG, MEMP_NUM_NETIFAPI_MSG, sizeof(struct netifapi_msg), "NETIFAPI_MSG") +#endif +#endif /* LWIP_MPU_COMPATIBLE */ +#if !LWIP_TCPIP_CORE_LOCKING_INPUT +LWIP_MEMPOOL(TCPIP_MSG_INPKT,MEMP_NUM_TCPIP_MSG_INPKT, sizeof(struct tcpip_msg), "TCPIP_MSG_INPKT") +#endif /* !LWIP_TCPIP_CORE_LOCKING_INPUT */ +#endif /* NO_SYS==0 */ + +#if LWIP_IPV4 && LWIP_ARP && ARP_QUEUEING +LWIP_MEMPOOL(ARP_QUEUE, MEMP_NUM_ARP_QUEUE, sizeof(struct etharp_q_entry), "ARP_QUEUE") +#endif /* LWIP_IPV4 && LWIP_ARP && ARP_QUEUEING */ + +#if LWIP_IGMP +LWIP_MEMPOOL(IGMP_GROUP, MEMP_NUM_IGMP_GROUP, sizeof(struct igmp_group), "IGMP_GROUP") +#endif /* LWIP_IGMP */ + +#if LWIP_TIMERS && !LWIP_TIMERS_CUSTOM +LWIP_MEMPOOL(SYS_TIMEOUT, MEMP_NUM_SYS_TIMEOUT, sizeof(struct sys_timeo), "SYS_TIMEOUT") +#endif /* LWIP_TIMERS && !LWIP_TIMERS_CUSTOM */ + +#if LWIP_DNS && LWIP_SOCKET +LWIP_MEMPOOL(NETDB, MEMP_NUM_NETDB, NETDB_ELEM_SIZE, "NETDB") +#endif /* LWIP_DNS && LWIP_SOCKET */ +#if LWIP_DNS && DNS_LOCAL_HOSTLIST && DNS_LOCAL_HOSTLIST_IS_DYNAMIC +LWIP_MEMPOOL(LOCALHOSTLIST, MEMP_NUM_LOCALHOSTLIST, LOCALHOSTLIST_ELEM_SIZE, "LOCALHOSTLIST") +#endif /* LWIP_DNS && DNS_LOCAL_HOSTLIST && DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + +#if LWIP_IPV6 && LWIP_ND6_QUEUEING +LWIP_MEMPOOL(ND6_QUEUE, MEMP_NUM_ND6_QUEUE, sizeof(struct nd6_q_entry), "ND6_QUEUE") +#endif /* LWIP_IPV6 && LWIP_ND6_QUEUEING */ + +#if LWIP_IPV6 && LWIP_IPV6_REASS +LWIP_MEMPOOL(IP6_REASSDATA, MEMP_NUM_REASSDATA, sizeof(struct ip6_reassdata), "IP6_REASSDATA") +#endif /* LWIP_IPV6 && LWIP_IPV6_REASS */ + +#if LWIP_IPV6 && LWIP_IPV6_MLD +LWIP_MEMPOOL(MLD6_GROUP, MEMP_NUM_MLD6_GROUP, sizeof(struct mld_group), "MLD6_GROUP") +#endif /* LWIP_IPV6 && LWIP_IPV6_MLD */ + + +/* + * A list of pools of pbuf's used by LWIP. + * + * LWIP_PBUF_MEMPOOL(pool_name, number_elements, pbuf_payload_size, pool_description) + * creates a pool name MEMP_pool_name. description is used in stats.c + * This allocates enough space for the pbuf struct and a payload. + * (Example: pbuf_payload_size=0 allocates only size for the struct) + */ +LWIP_MEMPOOL(PBUF, MEMP_NUM_PBUF, sizeof(struct pbuf), "PBUF_REF/ROM") +LWIP_PBUF_MEMPOOL(PBUF_POOL, PBUF_POOL_SIZE, PBUF_POOL_BUFSIZE, "PBUF_POOL") + + +/* + * Allow for user-defined pools; this must be explicitly set in lwipopts.h + * since the default is to NOT look for lwippools.h + */ +#if MEMP_USE_CUSTOM_POOLS +#include "lwippools.h" +#endif /* MEMP_USE_CUSTOM_POOLS */ + +/* + * REQUIRED CLEANUP: Clear up so we don't get "multiply defined" error later + * (#undef is ignored for something that is not defined) + */ +#undef LWIP_MEMPOOL +#undef LWIP_MALLOC_MEMPOOL +#undef LWIP_MALLOC_MEMPOOL_START +#undef LWIP_MALLOC_MEMPOOL_END +#undef LWIP_PBUF_MEMPOOL diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/priv/nd6_priv.h b/Middlewares/Third_Party/LwIP/src/include/lwip/priv/nd6_priv.h new file mode 100644 index 0000000..cc3007d --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/priv/nd6_priv.h @@ -0,0 +1,142 @@ +/** + * @file + * + * Neighbor discovery and stateless address autoconfiguration for IPv6. + * Aims to be compliant with RFC 4861 (Neighbor discovery) and RFC 4862 + * (Address autoconfiguration). + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#ifndef LWIP_HDR_ND6_PRIV_H +#define LWIP_HDR_ND6_PRIV_H + +#include "lwip/opt.h" + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/pbuf.h" +#include "lwip/ip6_addr.h" +#include "lwip/netif.h" + + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_ND6_QUEUEING +/** struct for queueing outgoing packets for unknown address + * defined here to be accessed by memp.h + */ +struct nd6_q_entry { + struct nd6_q_entry *next; + struct pbuf *p; +}; +#endif /* LWIP_ND6_QUEUEING */ + +/** Struct for tables. */ +struct nd6_neighbor_cache_entry { + ip6_addr_t next_hop_address; + struct netif *netif; + u8_t lladdr[NETIF_MAX_HWADDR_LEN]; + /*u32_t pmtu;*/ +#if LWIP_ND6_QUEUEING + /** Pointer to queue of pending outgoing packets on this entry. */ + struct nd6_q_entry *q; +#else /* LWIP_ND6_QUEUEING */ + /** Pointer to a single pending outgoing packet on this entry. */ + struct pbuf *q; +#endif /* LWIP_ND6_QUEUEING */ + u8_t state; + u8_t isrouter; + union { + u32_t reachable_time; /* in seconds */ + u32_t delay_time; /* ticks (ND6_TMR_INTERVAL) */ + u32_t probes_sent; + u32_t stale_time; /* ticks (ND6_TMR_INTERVAL) */ + } counter; +}; + +struct nd6_destination_cache_entry { + ip6_addr_t destination_addr; + ip6_addr_t next_hop_addr; + u16_t pmtu; + u32_t age; +}; + +struct nd6_prefix_list_entry { + ip6_addr_t prefix; + struct netif *netif; + u32_t invalidation_timer; /* in seconds */ +}; + +struct nd6_router_list_entry { + struct nd6_neighbor_cache_entry *neighbor_entry; + u32_t invalidation_timer; /* in seconds */ + u8_t flags; +}; + +enum nd6_neighbor_cache_entry_state { + ND6_NO_ENTRY = 0, + ND6_INCOMPLETE, + ND6_REACHABLE, + ND6_STALE, + ND6_DELAY, + ND6_PROBE +}; + +#define ND6_HOPLIM 255 /* maximum hop limit, required in all ND packets */ + +#define ND6_2HRS 7200 /* two hours, expressed in number of seconds */ + +/* Router tables. */ +/* @todo make these static? and entries accessible through API? */ +extern struct nd6_neighbor_cache_entry neighbor_cache[]; +extern struct nd6_destination_cache_entry destination_cache[]; +extern struct nd6_prefix_list_entry prefix_list[]; +extern struct nd6_router_list_entry default_router_list[]; + +/* Default values, can be updated by a RA message. */ +extern u32_t reachable_time; +extern u32_t retrans_timer; + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6 */ + +#endif /* LWIP_HDR_ND6_PRIV_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/priv/raw_priv.h b/Middlewares/Third_Party/LwIP/src/include/lwip/priv/raw_priv.h new file mode 100644 index 0000000..d4561d4 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/priv/raw_priv.h @@ -0,0 +1,69 @@ +/** + * @file + * raw API internal implementations (do not use in application code) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_RAW_PRIV_H +#define LWIP_HDR_RAW_PRIV_H + +#include "lwip/opt.h" + +#if LWIP_RAW /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/raw.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** return codes for raw_input */ +typedef enum raw_input_state +{ + RAW_INPUT_NONE = 0, /* pbuf did not match any pcbs */ + RAW_INPUT_EATEN, /* pbuf handed off and delivered to pcb */ + RAW_INPUT_DELIVERED /* pbuf only delivered to pcb (pbuf can still be referenced) */ +} raw_input_state_t; + +/* The following functions are the lower layer interface to RAW. */ +raw_input_state_t raw_input(struct pbuf *p, struct netif *inp); + +void raw_netif_ip_addr_changed(const ip_addr_t* old_addr, const ip_addr_t* new_addr); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_RAW */ + +#endif /* LWIP_HDR_RAW_PRIV_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/priv/sockets_priv.h b/Middlewares/Third_Party/LwIP/src/include/lwip/priv/sockets_priv.h new file mode 100644 index 0000000..d8f9904 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/priv/sockets_priv.h @@ -0,0 +1,175 @@ +/** + * @file + * Sockets API internal implementations (do not use in application code) + */ + +/* + * Copyright (c) 2017 Joel Cunningham, Garmin International, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Joel Cunningham + * + */ +#ifndef LWIP_HDR_SOCKETS_PRIV_H +#define LWIP_HDR_SOCKETS_PRIV_H + +#include "lwip/opt.h" + +#if LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/err.h" +#include "lwip/sockets.h" +#include "lwip/sys.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define NUM_SOCKETS MEMP_NUM_NETCONN + +/** This is overridable for the rare case where more than 255 threads + * select on the same socket... + */ +#ifndef SELWAIT_T +#define SELWAIT_T u8_t +#endif + +union lwip_sock_lastdata { + struct netbuf *netbuf; + struct pbuf *pbuf; +}; + +/** Contains all internal pointers and states used for a socket */ +struct lwip_sock { + /** sockets currently are built on netconns, each socket has one netconn */ + struct netconn *conn; + /** data that was left from the previous read */ + union lwip_sock_lastdata lastdata; +#if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL + /** number of times data was received, set by event_callback(), + tested by the receive and select functions */ + s16_t rcvevent; + /** number of times data was ACKed (free send buffer), set by event_callback(), + tested by select */ + u16_t sendevent; + /** error happened for this socket, set by event_callback(), tested by select */ + u16_t errevent; + /** counter of how many threads are waiting for this socket using select */ + SELWAIT_T select_waiting; +#endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */ +#if LWIP_NETCONN_FULLDUPLEX + /* counter of how many threads are using a struct lwip_sock (not the 'int') */ + u8_t fd_used; + /* status of pending close/delete actions */ + u8_t fd_free_pending; +#define LWIP_SOCK_FD_FREE_TCP 1 +#define LWIP_SOCK_FD_FREE_FREE 2 +#endif +}; + +#ifndef set_errno +#define set_errno(err) do { if (err) { errno = (err); } } while(0) +#endif + +#if !LWIP_TCPIP_CORE_LOCKING +/** Maximum optlen used by setsockopt/getsockopt */ +#define LWIP_SETGETSOCKOPT_MAXOPTLEN LWIP_MAX(16, sizeof(struct ifreq)) + +/** This struct is used to pass data to the set/getsockopt_internal + * functions running in tcpip_thread context (only a void* is allowed) */ +struct lwip_setgetsockopt_data { + /** socket index for which to change options */ + int s; + /** level of the option to process */ + int level; + /** name of the option to process */ + int optname; + /** set: value to set the option to + * get: value of the option is stored here */ +#if LWIP_MPU_COMPATIBLE + u8_t optval[LWIP_SETGETSOCKOPT_MAXOPTLEN]; +#else + union { + void *p; + const void *pc; + } optval; +#endif + /** size of *optval */ + socklen_t optlen; + /** if an error occurs, it is temporarily stored here */ + int err; + /** semaphore to wake up the calling task */ + void* completed_sem; +}; +#endif /* !LWIP_TCPIP_CORE_LOCKING */ + +#ifdef __cplusplus +} +#endif + +struct lwip_sock* lwip_socket_dbg_get_socket(int fd); + +#if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL + +#if LWIP_NETCONN_SEM_PER_THREAD +#define SELECT_SEM_T sys_sem_t* +#define SELECT_SEM_PTR(sem) (sem) +#else /* LWIP_NETCONN_SEM_PER_THREAD */ +#define SELECT_SEM_T sys_sem_t +#define SELECT_SEM_PTR(sem) (&(sem)) +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + +/** Description for a task waiting in select */ +struct lwip_select_cb { + /** Pointer to the next waiting task */ + struct lwip_select_cb *next; + /** Pointer to the previous waiting task */ + struct lwip_select_cb *prev; +#if LWIP_SOCKET_SELECT + /** readset passed to select */ + fd_set *readset; + /** writeset passed to select */ + fd_set *writeset; + /** unimplemented: exceptset passed to select */ + fd_set *exceptset; +#endif /* LWIP_SOCKET_SELECT */ +#if LWIP_SOCKET_POLL + /** fds passed to poll; NULL if select */ + struct pollfd *poll_fds; + /** nfds passed to poll; 0 if select */ + nfds_t poll_nfds; +#endif /* LWIP_SOCKET_POLL */ + /** don't signal the same semaphore twice: set to 1 when signalled */ + int sem_signalled; + /** semaphore to wake up a task waiting for select */ + SELECT_SEM_T sem; +}; +#endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */ + +#endif /* LWIP_SOCKET */ + +#endif /* LWIP_HDR_SOCKETS_PRIV_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/priv/tcp_priv.h b/Middlewares/Third_Party/LwIP/src/include/lwip/priv/tcp_priv.h new file mode 100644 index 0000000..72f9126 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/priv/tcp_priv.h @@ -0,0 +1,523 @@ +/** + * @file + * TCP internal implementations (do not use in application code) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_TCP_PRIV_H +#define LWIP_HDR_TCP_PRIV_H + +#include "lwip/opt.h" + +#if LWIP_TCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/tcp.h" +#include "lwip/mem.h" +#include "lwip/pbuf.h" +#include "lwip/ip.h" +#include "lwip/icmp.h" +#include "lwip/err.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/prot/tcp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Functions for interfacing with TCP: */ + +/* Lower layer interface to TCP: */ +void tcp_init (void); /* Initialize this module. */ +void tcp_tmr (void); /* Must be called every + TCP_TMR_INTERVAL + ms. (Typically 250 ms). */ +/* It is also possible to call these two functions at the right + intervals (instead of calling tcp_tmr()). */ +void tcp_slowtmr (void); +void tcp_fasttmr (void); + +/* Call this from a netif driver (watch out for threading issues!) that has + returned a memory error on transmit and now has free buffers to send more. + This iterates all active pcbs that had an error and tries to call + tcp_output, so use this with care as it might slow down the system. */ +void tcp_txnow (void); + +/* Only used by IP to pass a TCP segment to TCP: */ +void tcp_input (struct pbuf *p, struct netif *inp); +/* Used within the TCP code only: */ +struct tcp_pcb * tcp_alloc (u8_t prio); +void tcp_free (struct tcp_pcb *pcb); +void tcp_abandon (struct tcp_pcb *pcb, int reset); +err_t tcp_send_empty_ack(struct tcp_pcb *pcb); +err_t tcp_rexmit (struct tcp_pcb *pcb); +err_t tcp_rexmit_rto_prepare(struct tcp_pcb *pcb); +void tcp_rexmit_rto_commit(struct tcp_pcb *pcb); +void tcp_rexmit_rto (struct tcp_pcb *pcb); +void tcp_rexmit_fast (struct tcp_pcb *pcb); +u32_t tcp_update_rcv_ann_wnd(struct tcp_pcb *pcb); +err_t tcp_process_refused_data(struct tcp_pcb *pcb); + +/** + * This is the Nagle algorithm: try to combine user data to send as few TCP + * segments as possible. Only send if + * - no previously transmitted data on the connection remains unacknowledged or + * - the TF_NODELAY flag is set (nagle algorithm turned off for this pcb) or + * - the only unsent segment is at least pcb->mss bytes long (or there is more + * than one unsent segment - with lwIP, this can happen although unsent->len < mss) + * - or if we are in fast-retransmit (TF_INFR) + */ +#define tcp_do_output_nagle(tpcb) ((((tpcb)->unacked == NULL) || \ + ((tpcb)->flags & (TF_NODELAY | TF_INFR)) || \ + (((tpcb)->unsent != NULL) && (((tpcb)->unsent->next != NULL) || \ + ((tpcb)->unsent->len >= (tpcb)->mss))) || \ + ((tcp_sndbuf(tpcb) == 0) || (tcp_sndqueuelen(tpcb) >= TCP_SND_QUEUELEN)) \ + ) ? 1 : 0) +#define tcp_output_nagle(tpcb) (tcp_do_output_nagle(tpcb) ? tcp_output(tpcb) : ERR_OK) + + +#define TCP_SEQ_LT(a,b) ((s32_t)((u32_t)(a) - (u32_t)(b)) < 0) +#define TCP_SEQ_LEQ(a,b) ((s32_t)((u32_t)(a) - (u32_t)(b)) <= 0) +#define TCP_SEQ_GT(a,b) ((s32_t)((u32_t)(a) - (u32_t)(b)) > 0) +#define TCP_SEQ_GEQ(a,b) ((s32_t)((u32_t)(a) - (u32_t)(b)) >= 0) +/* is b<=a<=c? */ +#if 0 /* see bug #10548 */ +#define TCP_SEQ_BETWEEN(a,b,c) ((c)-(b) >= (a)-(b)) +#endif +#define TCP_SEQ_BETWEEN(a,b,c) (TCP_SEQ_GEQ(a,b) && TCP_SEQ_LEQ(a,c)) + +#ifndef TCP_TMR_INTERVAL +#define TCP_TMR_INTERVAL 250 /* The TCP timer interval in milliseconds. */ +#endif /* TCP_TMR_INTERVAL */ + +#ifndef TCP_FAST_INTERVAL +#define TCP_FAST_INTERVAL TCP_TMR_INTERVAL /* the fine grained timeout in milliseconds */ +#endif /* TCP_FAST_INTERVAL */ + +#ifndef TCP_SLOW_INTERVAL +#define TCP_SLOW_INTERVAL (2*TCP_TMR_INTERVAL) /* the coarse grained timeout in milliseconds */ +#endif /* TCP_SLOW_INTERVAL */ + +#define TCP_FIN_WAIT_TIMEOUT 20000 /* milliseconds */ +#define TCP_SYN_RCVD_TIMEOUT 20000 /* milliseconds */ + +#define TCP_OOSEQ_TIMEOUT 6U /* x RTO */ + +#ifndef TCP_MSL +#define TCP_MSL 60000UL /* The maximum segment lifetime in milliseconds */ +#endif + +/* Keepalive values, compliant with RFC 1122. Don't change this unless you know what you're doing */ +#ifndef TCP_KEEPIDLE_DEFAULT +#define TCP_KEEPIDLE_DEFAULT 7200000UL /* Default KEEPALIVE timer in milliseconds */ +#endif + +#ifndef TCP_KEEPINTVL_DEFAULT +#define TCP_KEEPINTVL_DEFAULT 75000UL /* Default Time between KEEPALIVE probes in milliseconds */ +#endif + +#ifndef TCP_KEEPCNT_DEFAULT +#define TCP_KEEPCNT_DEFAULT 9U /* Default Counter for KEEPALIVE probes */ +#endif + +#define TCP_MAXIDLE TCP_KEEPCNT_DEFAULT * TCP_KEEPINTVL_DEFAULT /* Maximum KEEPALIVE probe time */ + +#define TCP_TCPLEN(seg) ((seg)->len + (((TCPH_FLAGS((seg)->tcphdr) & (TCP_FIN | TCP_SYN)) != 0) ? 1U : 0U)) + +/** Flags used on input processing, not on pcb->flags +*/ +#define TF_RESET (u8_t)0x08U /* Connection was reset. */ +#define TF_CLOSED (u8_t)0x10U /* Connection was successfully closed. */ +#define TF_GOT_FIN (u8_t)0x20U /* Connection was closed by the remote end. */ + + +#if LWIP_EVENT_API + +#define TCP_EVENT_ACCEPT(lpcb,pcb,arg,err,ret) ret = lwip_tcp_event(arg, (pcb),\ + LWIP_EVENT_ACCEPT, NULL, 0, err) +#define TCP_EVENT_SENT(pcb,space,ret) ret = lwip_tcp_event((pcb)->callback_arg, (pcb),\ + LWIP_EVENT_SENT, NULL, space, ERR_OK) +#define TCP_EVENT_RECV(pcb,p,err,ret) ret = lwip_tcp_event((pcb)->callback_arg, (pcb),\ + LWIP_EVENT_RECV, (p), 0, (err)) +#define TCP_EVENT_CLOSED(pcb,ret) ret = lwip_tcp_event((pcb)->callback_arg, (pcb),\ + LWIP_EVENT_RECV, NULL, 0, ERR_OK) +#define TCP_EVENT_CONNECTED(pcb,err,ret) ret = lwip_tcp_event((pcb)->callback_arg, (pcb),\ + LWIP_EVENT_CONNECTED, NULL, 0, (err)) +#define TCP_EVENT_POLL(pcb,ret) do { if ((pcb)->state != SYN_RCVD) { \ + ret = lwip_tcp_event((pcb)->callback_arg, (pcb), LWIP_EVENT_POLL, NULL, 0, ERR_OK); \ + } else { \ + ret = ERR_ARG; } } while(0) +/* For event API, last state SYN_RCVD must be excluded here: the application + has not seen this pcb, yet! */ +#define TCP_EVENT_ERR(last_state,errf,arg,err) do { if (last_state != SYN_RCVD) { \ + lwip_tcp_event((arg), NULL, LWIP_EVENT_ERR, NULL, 0, (err)); } } while(0) + +#else /* LWIP_EVENT_API */ + +#define TCP_EVENT_ACCEPT(lpcb,pcb,arg,err,ret) \ + do { \ + if((lpcb)->accept != NULL) \ + (ret) = (lpcb)->accept((arg),(pcb),(err)); \ + else (ret) = ERR_ARG; \ + } while (0) + +#define TCP_EVENT_SENT(pcb,space,ret) \ + do { \ + if((pcb)->sent != NULL) \ + (ret) = (pcb)->sent((pcb)->callback_arg,(pcb),(space)); \ + else (ret) = ERR_OK; \ + } while (0) + +#define TCP_EVENT_RECV(pcb,p,err,ret) \ + do { \ + if((pcb)->recv != NULL) { \ + (ret) = (pcb)->recv((pcb)->callback_arg,(pcb),(p),(err));\ + } else { \ + (ret) = tcp_recv_null(NULL, (pcb), (p), (err)); \ + } \ + } while (0) + +#define TCP_EVENT_CLOSED(pcb,ret) \ + do { \ + if(((pcb)->recv != NULL)) { \ + (ret) = (pcb)->recv((pcb)->callback_arg,(pcb),NULL,ERR_OK);\ + } else { \ + (ret) = ERR_OK; \ + } \ + } while (0) + +#define TCP_EVENT_CONNECTED(pcb,err,ret) \ + do { \ + if((pcb)->connected != NULL) \ + (ret) = (pcb)->connected((pcb)->callback_arg,(pcb),(err)); \ + else (ret) = ERR_OK; \ + } while (0) + +#define TCP_EVENT_POLL(pcb,ret) \ + do { \ + if((pcb)->poll != NULL) \ + (ret) = (pcb)->poll((pcb)->callback_arg,(pcb)); \ + else (ret) = ERR_OK; \ + } while (0) + +#define TCP_EVENT_ERR(last_state,errf,arg,err) \ + do { \ + LWIP_UNUSED_ARG(last_state); \ + if((errf) != NULL) \ + (errf)((arg),(err)); \ + } while (0) + +#endif /* LWIP_EVENT_API */ + +/** Enabled extra-check for TCP_OVERSIZE if LWIP_DEBUG is enabled */ +#if TCP_OVERSIZE && defined(LWIP_DEBUG) +#define TCP_OVERSIZE_DBGCHECK 1 +#else +#define TCP_OVERSIZE_DBGCHECK 0 +#endif + +/** Don't generate checksum on copy if CHECKSUM_GEN_TCP is disabled */ +#define TCP_CHECKSUM_ON_COPY (LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_TCP) + +/* This structure represents a TCP segment on the unsent, unacked and ooseq queues */ +struct tcp_seg { + struct tcp_seg *next; /* used when putting segments on a queue */ + struct pbuf *p; /* buffer containing data + TCP header */ + u16_t len; /* the TCP length of this segment */ +#if TCP_OVERSIZE_DBGCHECK + u16_t oversize_left; /* Extra bytes available at the end of the last + pbuf in unsent (used for asserting vs. + tcp_pcb.unsent_oversize only) */ +#endif /* TCP_OVERSIZE_DBGCHECK */ +#if TCP_CHECKSUM_ON_COPY + u16_t chksum; + u8_t chksum_swapped; +#endif /* TCP_CHECKSUM_ON_COPY */ + u8_t flags; +#define TF_SEG_OPTS_MSS (u8_t)0x01U /* Include MSS option (only used in SYN segments) */ +#define TF_SEG_OPTS_TS (u8_t)0x02U /* Include timestamp option. */ +#define TF_SEG_DATA_CHECKSUMMED (u8_t)0x04U /* ALL data (not the header) is + checksummed into 'chksum' */ +#define TF_SEG_OPTS_WND_SCALE (u8_t)0x08U /* Include WND SCALE option (only used in SYN segments) */ +#define TF_SEG_OPTS_SACK_PERM (u8_t)0x10U /* Include SACK Permitted option (only used in SYN segments) */ + struct tcp_hdr *tcphdr; /* the TCP header */ +}; + +#define LWIP_TCP_OPT_EOL 0 +#define LWIP_TCP_OPT_NOP 1 +#define LWIP_TCP_OPT_MSS 2 +#define LWIP_TCP_OPT_WS 3 +#define LWIP_TCP_OPT_SACK_PERM 4 +#define LWIP_TCP_OPT_TS 8 + +#define LWIP_TCP_OPT_LEN_MSS 4 +#if LWIP_TCP_TIMESTAMPS +#define LWIP_TCP_OPT_LEN_TS 10 +#define LWIP_TCP_OPT_LEN_TS_OUT 12 /* aligned for output (includes NOP padding) */ +#else +#define LWIP_TCP_OPT_LEN_TS_OUT 0 +#endif +#if LWIP_WND_SCALE +#define LWIP_TCP_OPT_LEN_WS 3 +#define LWIP_TCP_OPT_LEN_WS_OUT 4 /* aligned for output (includes NOP padding) */ +#else +#define LWIP_TCP_OPT_LEN_WS_OUT 0 +#endif + +#if LWIP_TCP_SACK_OUT +#define LWIP_TCP_OPT_LEN_SACK_PERM 2 +#define LWIP_TCP_OPT_LEN_SACK_PERM_OUT 4 /* aligned for output (includes NOP padding) */ +#else +#define LWIP_TCP_OPT_LEN_SACK_PERM_OUT 0 +#endif + +#define LWIP_TCP_OPT_LENGTH(flags) \ + ((flags) & TF_SEG_OPTS_MSS ? LWIP_TCP_OPT_LEN_MSS : 0) + \ + ((flags) & TF_SEG_OPTS_TS ? LWIP_TCP_OPT_LEN_TS_OUT : 0) + \ + ((flags) & TF_SEG_OPTS_WND_SCALE ? LWIP_TCP_OPT_LEN_WS_OUT : 0) + \ + ((flags) & TF_SEG_OPTS_SACK_PERM ? LWIP_TCP_OPT_LEN_SACK_PERM_OUT : 0) + +/** This returns a TCP header option for MSS in an u32_t */ +#define TCP_BUILD_MSS_OPTION(mss) lwip_htonl(0x02040000 | ((mss) & 0xFFFF)) + +#if LWIP_WND_SCALE +#define TCPWNDSIZE_F U32_F +#define TCPWND_MAX 0xFFFFFFFFU +#define TCPWND_CHECK16(x) LWIP_ASSERT("window size > 0xFFFF", (x) <= 0xFFFF) +#define TCPWND_MIN16(x) ((u16_t)LWIP_MIN((x), 0xFFFF)) +#else /* LWIP_WND_SCALE */ +#define TCPWNDSIZE_F U16_F +#define TCPWND_MAX 0xFFFFU +#define TCPWND_CHECK16(x) +#define TCPWND_MIN16(x) x +#endif /* LWIP_WND_SCALE */ + +/* Global variables: */ +extern struct tcp_pcb *tcp_input_pcb; +extern u32_t tcp_ticks; +extern u8_t tcp_active_pcbs_changed; + +/* The TCP PCB lists. */ +union tcp_listen_pcbs_t { /* List of all TCP PCBs in LISTEN state. */ + struct tcp_pcb_listen *listen_pcbs; + struct tcp_pcb *pcbs; +}; +extern struct tcp_pcb *tcp_bound_pcbs; +extern union tcp_listen_pcbs_t tcp_listen_pcbs; +extern struct tcp_pcb *tcp_active_pcbs; /* List of all TCP PCBs that are in a + state in which they accept or send + data. */ +extern struct tcp_pcb *tcp_tw_pcbs; /* List of all TCP PCBs in TIME-WAIT. */ + +#define NUM_TCP_PCB_LISTS_NO_TIME_WAIT 3 +#define NUM_TCP_PCB_LISTS 4 +extern struct tcp_pcb ** const tcp_pcb_lists[NUM_TCP_PCB_LISTS]; + +/* Axioms about the above lists: + 1) Every TCP PCB that is not CLOSED is in one of the lists. + 2) A PCB is only in one of the lists. + 3) All PCBs in the tcp_listen_pcbs list is in LISTEN state. + 4) All PCBs in the tcp_tw_pcbs list is in TIME-WAIT state. +*/ +/* Define two macros, TCP_REG and TCP_RMV that registers a TCP PCB + with a PCB list or removes a PCB from a list, respectively. */ +#ifndef TCP_DEBUG_PCB_LISTS +#define TCP_DEBUG_PCB_LISTS 0 +#endif +#if TCP_DEBUG_PCB_LISTS +#define TCP_REG(pcbs, npcb) do {\ + struct tcp_pcb *tcp_tmp_pcb; \ + LWIP_DEBUGF(TCP_DEBUG, ("TCP_REG %p local port %"U16_F"\n", (void *)(npcb), (npcb)->local_port)); \ + for (tcp_tmp_pcb = *(pcbs); \ + tcp_tmp_pcb != NULL; \ + tcp_tmp_pcb = tcp_tmp_pcb->next) { \ + LWIP_ASSERT("TCP_REG: already registered\n", tcp_tmp_pcb != (npcb)); \ + } \ + LWIP_ASSERT("TCP_REG: pcb->state != CLOSED", ((pcbs) == &tcp_bound_pcbs) || ((npcb)->state != CLOSED)); \ + (npcb)->next = *(pcbs); \ + LWIP_ASSERT("TCP_REG: npcb->next != npcb", (npcb)->next != (npcb)); \ + *(pcbs) = (npcb); \ + LWIP_ASSERT("TCP_REG: tcp_pcbs sane", tcp_pcbs_sane()); \ + tcp_timer_needed(); \ + } while(0) +#define TCP_RMV(pcbs, npcb) do { \ + struct tcp_pcb *tcp_tmp_pcb; \ + LWIP_ASSERT("TCP_RMV: pcbs != NULL", *(pcbs) != NULL); \ + LWIP_DEBUGF(TCP_DEBUG, ("TCP_RMV: removing %p from %p\n", (void *)(npcb), (void *)(*(pcbs)))); \ + if(*(pcbs) == (npcb)) { \ + *(pcbs) = (*pcbs)->next; \ + } else for (tcp_tmp_pcb = *(pcbs); tcp_tmp_pcb != NULL; tcp_tmp_pcb = tcp_tmp_pcb->next) { \ + if(tcp_tmp_pcb->next == (npcb)) { \ + tcp_tmp_pcb->next = (npcb)->next; \ + break; \ + } \ + } \ + (npcb)->next = NULL; \ + LWIP_ASSERT("TCP_RMV: tcp_pcbs sane", tcp_pcbs_sane()); \ + LWIP_DEBUGF(TCP_DEBUG, ("TCP_RMV: removed %p from %p\n", (void *)(npcb), (void *)(*(pcbs)))); \ + } while(0) + +#else /* LWIP_DEBUG */ + +#define TCP_REG(pcbs, npcb) \ + do { \ + (npcb)->next = *pcbs; \ + *(pcbs) = (npcb); \ + tcp_timer_needed(); \ + } while (0) + +#define TCP_RMV(pcbs, npcb) \ + do { \ + if(*(pcbs) == (npcb)) { \ + (*(pcbs)) = (*pcbs)->next; \ + } \ + else { \ + struct tcp_pcb *tcp_tmp_pcb; \ + for (tcp_tmp_pcb = *pcbs; \ + tcp_tmp_pcb != NULL; \ + tcp_tmp_pcb = tcp_tmp_pcb->next) { \ + if(tcp_tmp_pcb->next == (npcb)) { \ + tcp_tmp_pcb->next = (npcb)->next; \ + break; \ + } \ + } \ + } \ + (npcb)->next = NULL; \ + } while(0) + +#endif /* LWIP_DEBUG */ + +#define TCP_REG_ACTIVE(npcb) \ + do { \ + TCP_REG(&tcp_active_pcbs, npcb); \ + tcp_active_pcbs_changed = 1; \ + } while (0) + +#define TCP_RMV_ACTIVE(npcb) \ + do { \ + TCP_RMV(&tcp_active_pcbs, npcb); \ + tcp_active_pcbs_changed = 1; \ + } while (0) + +#define TCP_PCB_REMOVE_ACTIVE(pcb) \ + do { \ + tcp_pcb_remove(&tcp_active_pcbs, pcb); \ + tcp_active_pcbs_changed = 1; \ + } while (0) + + +/* Internal functions: */ +struct tcp_pcb *tcp_pcb_copy(struct tcp_pcb *pcb); +void tcp_pcb_purge(struct tcp_pcb *pcb); +void tcp_pcb_remove(struct tcp_pcb **pcblist, struct tcp_pcb *pcb); + +void tcp_segs_free(struct tcp_seg *seg); +void tcp_seg_free(struct tcp_seg *seg); +struct tcp_seg *tcp_seg_copy(struct tcp_seg *seg); + +#define tcp_ack(pcb) \ + do { \ + if((pcb)->flags & TF_ACK_DELAY) { \ + tcp_clear_flags(pcb, TF_ACK_DELAY); \ + tcp_ack_now(pcb); \ + } \ + else { \ + tcp_set_flags(pcb, TF_ACK_DELAY); \ + } \ + } while (0) + +#define tcp_ack_now(pcb) \ + tcp_set_flags(pcb, TF_ACK_NOW) + +err_t tcp_send_fin(struct tcp_pcb *pcb); +err_t tcp_enqueue_flags(struct tcp_pcb *pcb, u8_t flags); + +void tcp_rexmit_seg(struct tcp_pcb *pcb, struct tcp_seg *seg); + +void tcp_rst(const struct tcp_pcb* pcb, u32_t seqno, u32_t ackno, + const ip_addr_t *local_ip, const ip_addr_t *remote_ip, + u16_t local_port, u16_t remote_port); + +u32_t tcp_next_iss(struct tcp_pcb *pcb); + +err_t tcp_keepalive(struct tcp_pcb *pcb); +err_t tcp_split_unsent_seg(struct tcp_pcb *pcb, u16_t split); +err_t tcp_zero_window_probe(struct tcp_pcb *pcb); +void tcp_trigger_input_pcb_close(void); + +#if TCP_CALCULATE_EFF_SEND_MSS +u16_t tcp_eff_send_mss_netif(u16_t sendmss, struct netif *outif, + const ip_addr_t *dest); +#define tcp_eff_send_mss(sendmss, src, dest) \ + tcp_eff_send_mss_netif(sendmss, ip_route(src, dest), dest) +#endif /* TCP_CALCULATE_EFF_SEND_MSS */ + +#if LWIP_CALLBACK_API +err_t tcp_recv_null(void *arg, struct tcp_pcb *pcb, struct pbuf *p, err_t err); +#endif /* LWIP_CALLBACK_API */ + +#if TCP_DEBUG || TCP_INPUT_DEBUG || TCP_OUTPUT_DEBUG +void tcp_debug_print(struct tcp_hdr *tcphdr); +void tcp_debug_print_flags(u8_t flags); +void tcp_debug_print_state(enum tcp_state s); +void tcp_debug_print_pcbs(void); +s16_t tcp_pcbs_sane(void); +#else +# define tcp_debug_print(tcphdr) +# define tcp_debug_print_flags(flags) +# define tcp_debug_print_state(s) +# define tcp_debug_print_pcbs() +# define tcp_pcbs_sane() 1 +#endif /* TCP_DEBUG */ + +/** External function (implemented in timers.c), called when TCP detects + * that a timer is needed (i.e. active- or time-wait-pcb found). */ +void tcp_timer_needed(void); + +void tcp_netif_ip_addr_changed(const ip_addr_t* old_addr, const ip_addr_t* new_addr); + +#if TCP_QUEUE_OOSEQ +void tcp_free_ooseq(struct tcp_pcb *pcb); +#endif + +#if LWIP_TCP_PCB_NUM_EXT_ARGS +err_t tcp_ext_arg_invoke_callbacks_passive_open(struct tcp_pcb_listen *lpcb, struct tcp_pcb *cpcb); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_TCP */ + +#endif /* LWIP_HDR_TCP_PRIV_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/priv/tcpip_priv.h b/Middlewares/Third_Party/LwIP/src/include/lwip/priv/tcpip_priv.h new file mode 100644 index 0000000..74be634 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/priv/tcpip_priv.h @@ -0,0 +1,170 @@ +/** + * @file + * TCPIP API internal implementations (do not use in application code) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_TCPIP_PRIV_H +#define LWIP_HDR_TCPIP_PRIV_H + +#include "lwip/opt.h" + +#if !NO_SYS /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/tcpip.h" +#include "lwip/sys.h" +#include "lwip/timeouts.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct pbuf; +struct netif; + +#if LWIP_MPU_COMPATIBLE +#define API_VAR_REF(name) (*(name)) +#define API_VAR_DECLARE(type, name) type * name +#define API_VAR_ALLOC_EXT(type, pool, name, errorblock) do { \ + name = (type *)memp_malloc(pool); \ + if (name == NULL) { \ + errorblock; \ + } \ + } while(0) +#define API_VAR_ALLOC(type, pool, name, errorval) API_VAR_ALLOC_EXT(type, pool, name, return errorval) +#define API_VAR_ALLOC_POOL(type, pool, name, errorval) do { \ + name = (type *)LWIP_MEMPOOL_ALLOC(pool); \ + if (name == NULL) { \ + return errorval; \ + } \ + } while(0) +#define API_VAR_FREE(pool, name) memp_free(pool, name) +#define API_VAR_FREE_POOL(pool, name) LWIP_MEMPOOL_FREE(pool, name) +#define API_EXPR_REF(expr) (&(expr)) +#if LWIP_NETCONN_SEM_PER_THREAD +#define API_EXPR_REF_SEM(expr) (expr) +#else +#define API_EXPR_REF_SEM(expr) API_EXPR_REF(expr) +#endif +#define API_EXPR_DEREF(expr) expr +#define API_MSG_M_DEF(m) m +#define API_MSG_M_DEF_C(t, m) t m +#else /* LWIP_MPU_COMPATIBLE */ +#define API_VAR_REF(name) name +#define API_VAR_DECLARE(type, name) type name +#define API_VAR_ALLOC_EXT(type, pool, name, errorblock) +#define API_VAR_ALLOC(type, pool, name, errorval) +#define API_VAR_ALLOC_POOL(type, pool, name, errorval) +#define API_VAR_FREE(pool, name) +#define API_VAR_FREE_POOL(pool, name) +#define API_EXPR_REF(expr) expr +#define API_EXPR_REF_SEM(expr) API_EXPR_REF(expr) +#define API_EXPR_DEREF(expr) (*(expr)) +#define API_MSG_M_DEF(m) *m +#define API_MSG_M_DEF_C(t, m) const t * m +#endif /* LWIP_MPU_COMPATIBLE */ + +err_t tcpip_send_msg_wait_sem(tcpip_callback_fn fn, void *apimsg, sys_sem_t* sem); + +struct tcpip_api_call_data +{ +#if !LWIP_TCPIP_CORE_LOCKING + err_t err; +#if !LWIP_NETCONN_SEM_PER_THREAD + sys_sem_t sem; +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ +#else /* !LWIP_TCPIP_CORE_LOCKING */ + u8_t dummy; /* avoid empty struct :-( */ +#endif /* !LWIP_TCPIP_CORE_LOCKING */ +}; +typedef err_t (*tcpip_api_call_fn)(struct tcpip_api_call_data* call); +err_t tcpip_api_call(tcpip_api_call_fn fn, struct tcpip_api_call_data *call); + +enum tcpip_msg_type { +#if !LWIP_TCPIP_CORE_LOCKING + TCPIP_MSG_API, + TCPIP_MSG_API_CALL, +#endif /* !LWIP_TCPIP_CORE_LOCKING */ +#if !LWIP_TCPIP_CORE_LOCKING_INPUT + TCPIP_MSG_INPKT, +#endif /* !LWIP_TCPIP_CORE_LOCKING_INPUT */ +#if LWIP_TCPIP_TIMEOUT && LWIP_TIMERS + TCPIP_MSG_TIMEOUT, + TCPIP_MSG_UNTIMEOUT, +#endif /* LWIP_TCPIP_TIMEOUT && LWIP_TIMERS */ + TCPIP_MSG_CALLBACK, + TCPIP_MSG_CALLBACK_STATIC +}; + +struct tcpip_msg { + enum tcpip_msg_type type; + union { +#if !LWIP_TCPIP_CORE_LOCKING + struct { + tcpip_callback_fn function; + void* msg; + } api_msg; + struct { + tcpip_api_call_fn function; + struct tcpip_api_call_data *arg; + sys_sem_t *sem; + } api_call; +#endif /* LWIP_TCPIP_CORE_LOCKING */ +#if !LWIP_TCPIP_CORE_LOCKING_INPUT + struct { + struct pbuf *p; + struct netif *netif; + netif_input_fn input_fn; + } inp; +#endif /* !LWIP_TCPIP_CORE_LOCKING_INPUT */ + struct { + tcpip_callback_fn function; + void *ctx; + } cb; +#if LWIP_TCPIP_TIMEOUT && LWIP_TIMERS + struct { + u32_t msecs; + sys_timeout_handler h; + void *arg; + } tmo; +#endif /* LWIP_TCPIP_TIMEOUT && LWIP_TIMERS */ + } msg; +}; + +#ifdef __cplusplus +} +#endif + +#endif /* !NO_SYS */ + +#endif /* LWIP_HDR_TCPIP_PRIV_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/prot/autoip.h b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/autoip.h new file mode 100644 index 0000000..fd3af8a --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/autoip.h @@ -0,0 +1,78 @@ +/** + * @file + * AutoIP protocol definitions + */ + +/* + * + * Copyright (c) 2007 Dominik Spies + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * Author: Dominik Spies + * + * This is a AutoIP implementation for the lwIP TCP/IP stack. It aims to conform + * with RFC 3927. + * + */ + +#ifndef LWIP_HDR_PROT_AUTOIP_H +#define LWIP_HDR_PROT_AUTOIP_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* 169.254.0.0 */ +#define AUTOIP_NET 0xA9FE0000 +/* 169.254.1.0 */ +#define AUTOIP_RANGE_START (AUTOIP_NET | 0x0100) +/* 169.254.254.255 */ +#define AUTOIP_RANGE_END (AUTOIP_NET | 0xFEFF) + +/* RFC 3927 Constants */ +#define PROBE_WAIT 1 /* second (initial random delay) */ +#define PROBE_MIN 1 /* second (minimum delay till repeated probe) */ +#define PROBE_MAX 2 /* seconds (maximum delay till repeated probe) */ +#define PROBE_NUM 3 /* (number of probe packets) */ +#define ANNOUNCE_NUM 2 /* (number of announcement packets) */ +#define ANNOUNCE_INTERVAL 2 /* seconds (time between announcement packets) */ +#define ANNOUNCE_WAIT 2 /* seconds (delay before announcing) */ +#define MAX_CONFLICTS 10 /* (max conflicts before rate limiting) */ +#define RATE_LIMIT_INTERVAL 60 /* seconds (delay between successive attempts) */ +#define DEFEND_INTERVAL 10 /* seconds (min. wait between defensive ARPs) */ + +/* AutoIP client states */ +typedef enum { + AUTOIP_STATE_OFF = 0, + AUTOIP_STATE_PROBING = 1, + AUTOIP_STATE_ANNOUNCING = 2, + AUTOIP_STATE_BOUND = 3 +} autoip_state_enum_t; + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_AUTOIP_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/prot/dhcp.h b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/dhcp.h new file mode 100644 index 0000000..ab18dca --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/dhcp.h @@ -0,0 +1,178 @@ +/** + * @file + * DHCP protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Leon Woestenberg + * Copyright (c) 2001-2004 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Leon Woestenberg + * + */ +#ifndef LWIP_HDR_PROT_DHCP_H +#define LWIP_HDR_PROT_DHCP_H + +#include "lwip/opt.h" +#include "lwip/arch.h" +#include "lwip/prot/ip4.h" + +#ifdef __cplusplus +extern "C" { +#endif + + /* DHCP message item offsets and length */ +#define DHCP_CHADDR_LEN 16U +#define DHCP_SNAME_OFS 44U +#define DHCP_SNAME_LEN 64U +#define DHCP_FILE_OFS 108U +#define DHCP_FILE_LEN 128U +#define DHCP_MSG_LEN 236U +#define DHCP_OPTIONS_OFS (DHCP_MSG_LEN + 4U) /* 4 byte: cookie */ + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +/** minimum set of fields of any DHCP message */ +struct dhcp_msg +{ + PACK_STRUCT_FLD_8(u8_t op); + PACK_STRUCT_FLD_8(u8_t htype); + PACK_STRUCT_FLD_8(u8_t hlen); + PACK_STRUCT_FLD_8(u8_t hops); + PACK_STRUCT_FIELD(u32_t xid); + PACK_STRUCT_FIELD(u16_t secs); + PACK_STRUCT_FIELD(u16_t flags); + PACK_STRUCT_FLD_S(ip4_addr_p_t ciaddr); + PACK_STRUCT_FLD_S(ip4_addr_p_t yiaddr); + PACK_STRUCT_FLD_S(ip4_addr_p_t siaddr); + PACK_STRUCT_FLD_S(ip4_addr_p_t giaddr); + PACK_STRUCT_FLD_8(u8_t chaddr[DHCP_CHADDR_LEN]); + PACK_STRUCT_FLD_8(u8_t sname[DHCP_SNAME_LEN]); + PACK_STRUCT_FLD_8(u8_t file[DHCP_FILE_LEN]); + PACK_STRUCT_FIELD(u32_t cookie); +#define DHCP_MIN_OPTIONS_LEN 68U +/** make sure user does not configure this too small */ +#if ((defined(DHCP_OPTIONS_LEN)) && (DHCP_OPTIONS_LEN < DHCP_MIN_OPTIONS_LEN)) +# undef DHCP_OPTIONS_LEN +#endif +/** allow this to be configured in lwipopts.h, but not too small */ +#if (!defined(DHCP_OPTIONS_LEN)) +/** set this to be sufficient for your options in outgoing DHCP msgs */ +# define DHCP_OPTIONS_LEN DHCP_MIN_OPTIONS_LEN +#endif + PACK_STRUCT_FLD_8(u8_t options[DHCP_OPTIONS_LEN]); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + + +/* DHCP client states */ +typedef enum { + DHCP_STATE_OFF = 0, + DHCP_STATE_REQUESTING = 1, + DHCP_STATE_INIT = 2, + DHCP_STATE_REBOOTING = 3, + DHCP_STATE_REBINDING = 4, + DHCP_STATE_RENEWING = 5, + DHCP_STATE_SELECTING = 6, + DHCP_STATE_INFORMING = 7, + DHCP_STATE_CHECKING = 8, + DHCP_STATE_PERMANENT = 9, /* not yet implemented */ + DHCP_STATE_BOUND = 10, + DHCP_STATE_RELEASING = 11, /* not yet implemented */ + DHCP_STATE_BACKING_OFF = 12 +} dhcp_state_enum_t; + +/* DHCP op codes */ +#define DHCP_BOOTREQUEST 1 +#define DHCP_BOOTREPLY 2 + +/* DHCP message types */ +#define DHCP_DISCOVER 1 +#define DHCP_OFFER 2 +#define DHCP_REQUEST 3 +#define DHCP_DECLINE 4 +#define DHCP_ACK 5 +#define DHCP_NAK 6 +#define DHCP_RELEASE 7 +#define DHCP_INFORM 8 + +#define DHCP_MAGIC_COOKIE 0x63825363UL + +/* This is a list of options for BOOTP and DHCP, see RFC 2132 for descriptions */ + +/* BootP options */ +#define DHCP_OPTION_PAD 0 +#define DHCP_OPTION_SUBNET_MASK 1 /* RFC 2132 3.3 */ +#define DHCP_OPTION_ROUTER 3 +#define DHCP_OPTION_DNS_SERVER 6 +#define DHCP_OPTION_HOSTNAME 12 +#define DHCP_OPTION_IP_TTL 23 +#define DHCP_OPTION_MTU 26 +#define DHCP_OPTION_BROADCAST 28 +#define DHCP_OPTION_TCP_TTL 37 +#define DHCP_OPTION_NTP 42 +#define DHCP_OPTION_END 255 + +/* DHCP options */ +#define DHCP_OPTION_REQUESTED_IP 50 /* RFC 2132 9.1, requested IP address */ +#define DHCP_OPTION_LEASE_TIME 51 /* RFC 2132 9.2, time in seconds, in 4 bytes */ +#define DHCP_OPTION_OVERLOAD 52 /* RFC2132 9.3, use file and/or sname field for options */ + +#define DHCP_OPTION_MESSAGE_TYPE 53 /* RFC 2132 9.6, important for DHCP */ +#define DHCP_OPTION_MESSAGE_TYPE_LEN 1 + +#define DHCP_OPTION_SERVER_ID 54 /* RFC 2132 9.7, server IP address */ +#define DHCP_OPTION_PARAMETER_REQUEST_LIST 55 /* RFC 2132 9.8, requested option types */ + +#define DHCP_OPTION_MAX_MSG_SIZE 57 /* RFC 2132 9.10, message size accepted >= 576 */ +#define DHCP_OPTION_MAX_MSG_SIZE_LEN 2 + +#define DHCP_OPTION_T1 58 /* T1 renewal time */ +#define DHCP_OPTION_T2 59 /* T2 rebinding time */ +#define DHCP_OPTION_US 60 +#define DHCP_OPTION_CLIENT_ID 61 +#define DHCP_OPTION_TFTP_SERVERNAME 66 +#define DHCP_OPTION_BOOTFILE 67 + +/* possible combinations of overloading the file and sname fields with options */ +#define DHCP_OVERLOAD_NONE 0 +#define DHCP_OVERLOAD_FILE 1 +#define DHCP_OVERLOAD_SNAME 2 +#define DHCP_OVERLOAD_SNAME_FILE 3 + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_DHCP_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/prot/dhcp6.h b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/dhcp6.h new file mode 100644 index 0000000..0754c91 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/dhcp6.h @@ -0,0 +1,138 @@ +/** + * @file + * DHCPv6 protocol definitions + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_PROT_DHCP6_H +#define LWIP_HDR_PROT_DHCP6_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define DHCP6_CLIENT_PORT 546 +#define DHCP6_SERVER_PORT 547 + + + /* DHCPv6 message item offsets and length */ +#define DHCP6_TRANSACTION_ID_LEN 3 + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +/** minimum set of fields of any DHCPv6 message */ +struct dhcp6_msg +{ + PACK_STRUCT_FLD_8(u8_t msgtype); + PACK_STRUCT_FLD_8(u8_t transaction_id[DHCP6_TRANSACTION_ID_LEN]); + /* options follow */ +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + + +/* DHCP6 client states */ +typedef enum { + DHCP6_STATE_OFF = 0, + DHCP6_STATE_STATELESS_IDLE = 1, + DHCP6_STATE_REQUESTING_CONFIG = 2 +} dhcp6_state_enum_t; + +/* DHCPv6 message types */ +#define DHCP6_SOLICIT 1 +#define DHCP6_ADVERTISE 2 +#define DHCP6_REQUEST 3 +#define DHCP6_CONFIRM 4 +#define DHCP6_RENEW 5 +#define DHCP6_REBIND 6 +#define DHCP6_REPLY 7 +#define DHCP6_RELEASE 8 +#define DHCP6_DECLINE 9 +#define DHCP6_RECONFIGURE 10 +#define DHCP6_INFOREQUEST 11 +#define DHCP6_RELAYFORW 12 +#define DHCP6_RELAYREPL 13 +/* More message types see https://www.iana.org/assignments/dhcpv6-parameters/dhcpv6-parameters.xhtml */ + +/** DHCPv6 status codes */ +#define DHCP6_STATUS_SUCCESS 0 /* Success. */ +#define DHCP6_STATUS_UNSPECFAIL 1 /* Failure, reason unspecified; this status code is sent by either a client or a server to indicate a failure not explicitly specified in this document. */ +#define DHCP6_STATUS_NOADDRSAVAIL 2 /* Server has no addresses available to assign to the IA(s). */ +#define DHCP6_STATUS_NOBINDING 3 /* Client record (binding) unavailable. */ +#define DHCP6_STATUS_NOTONLINK 4 /* The prefix for the address is not appropriate for the link to which the client is attached. */ +#define DHCP6_STATUS_USEMULTICAST 5 /* Sent by a server to a client to force the client to send messages to the server using the All_DHCP_Relay_Agents_and_Servers address. */ +/* More status codes see https://www.iana.org/assignments/dhcpv6-parameters/dhcpv6-parameters.xhtml */ + +/** DHCPv6 DUID types */ +#define DHCP6_DUID_LLT 1 /* LLT: Link-layer Address Plus Time */ +#define DHCP6_DUID_EN 2 /* EN: Enterprise number */ +#define DHCP6_DUID_LL 3 /* LL: Link-layer Address */ +#define DHCP6_DUID_UUID 4 /* UUID (RFC 6355) */ + +/* DHCPv6 options */ +#define DHCP6_OPTION_CLIENTID 1 +#define DHCP6_OPTION_SERVERID 2 +#define DHCP6_OPTION_IA_NA 3 +#define DHCP6_OPTION_IA_TA 4 +#define DHCP6_OPTION_IAADDR 5 +#define DHCP6_OPTION_ORO 6 +#define DHCP6_OPTION_PREFERENCE 7 +#define DHCP6_OPTION_ELAPSED_TIME 8 +#define DHCP6_OPTION_RELAY_MSG 9 +#define DHCP6_OPTION_AUTH 11 +#define DHCP6_OPTION_UNICAST 12 +#define DHCP6_OPTION_STATUS_CODE 13 +#define DHCP6_OPTION_RAPID_COMMIT 14 +#define DHCP6_OPTION_USER_CLASS 15 +#define DHCP6_OPTION_VENDOR_CLASS 16 +#define DHCP6_OPTION_VENDOR_OPTS 17 +#define DHCP6_OPTION_INTERFACE_ID 18 +#define DHCP6_OPTION_RECONF_MSG 19 +#define DHCP6_OPTION_RECONF_ACCEPT 20 +/* More options see https://www.iana.org/assignments/dhcpv6-parameters/dhcpv6-parameters.xhtml */ +#define DHCP6_OPTION_DNS_SERVERS 23 /* RFC 3646 */ +#define DHCP6_OPTION_DOMAIN_LIST 24 /* RFC 3646 */ +#define DHCP6_OPTION_SNTP_SERVERS 31 /* RFC 4075 */ + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_DHCP6_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/prot/dns.h b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/dns.h new file mode 100644 index 0000000..94782d6 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/dns.h @@ -0,0 +1,140 @@ +/** + * @file + * DNS - host name to IP address resolver. + */ + +/* + * Port to lwIP from uIP + * by Jim Pettinato April 2007 + * + * security fixes and more by Simon Goldschmidt + * + * uIP version Copyright (c) 2002-2003, Adam Dunkels. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef LWIP_HDR_PROT_DNS_H +#define LWIP_HDR_PROT_DNS_H + +#include "lwip/arch.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** DNS server port address */ +#ifndef DNS_SERVER_PORT +#define DNS_SERVER_PORT 53 +#endif + +/* DNS field TYPE used for "Resource Records" */ +#define DNS_RRTYPE_A 1 /* a host address */ +#define DNS_RRTYPE_NS 2 /* an authoritative name server */ +#define DNS_RRTYPE_MD 3 /* a mail destination (Obsolete - use MX) */ +#define DNS_RRTYPE_MF 4 /* a mail forwarder (Obsolete - use MX) */ +#define DNS_RRTYPE_CNAME 5 /* the canonical name for an alias */ +#define DNS_RRTYPE_SOA 6 /* marks the start of a zone of authority */ +#define DNS_RRTYPE_MB 7 /* a mailbox domain name (EXPERIMENTAL) */ +#define DNS_RRTYPE_MG 8 /* a mail group member (EXPERIMENTAL) */ +#define DNS_RRTYPE_MR 9 /* a mail rename domain name (EXPERIMENTAL) */ +#define DNS_RRTYPE_NULL 10 /* a null RR (EXPERIMENTAL) */ +#define DNS_RRTYPE_WKS 11 /* a well known service description */ +#define DNS_RRTYPE_PTR 12 /* a domain name pointer */ +#define DNS_RRTYPE_HINFO 13 /* host information */ +#define DNS_RRTYPE_MINFO 14 /* mailbox or mail list information */ +#define DNS_RRTYPE_MX 15 /* mail exchange */ +#define DNS_RRTYPE_TXT 16 /* text strings */ +#define DNS_RRTYPE_AAAA 28 /* IPv6 address */ +#define DNS_RRTYPE_SRV 33 /* service location */ +#define DNS_RRTYPE_ANY 255 /* any type */ + +/* DNS field CLASS used for "Resource Records" */ +#define DNS_RRCLASS_IN 1 /* the Internet */ +#define DNS_RRCLASS_CS 2 /* the CSNET class (Obsolete - used only for examples in some obsolete RFCs) */ +#define DNS_RRCLASS_CH 3 /* the CHAOS class */ +#define DNS_RRCLASS_HS 4 /* Hesiod [Dyer 87] */ +#define DNS_RRCLASS_ANY 255 /* any class */ +#define DNS_RRCLASS_FLUSH 0x800 /* Flush bit */ + +/* DNS protocol flags */ +#define DNS_FLAG1_RESPONSE 0x80 +#define DNS_FLAG1_OPCODE_STATUS 0x10 +#define DNS_FLAG1_OPCODE_INVERSE 0x08 +#define DNS_FLAG1_OPCODE_STANDARD 0x00 +#define DNS_FLAG1_AUTHORATIVE 0x04 +#define DNS_FLAG1_TRUNC 0x02 +#define DNS_FLAG1_RD 0x01 +#define DNS_FLAG2_RA 0x80 +#define DNS_FLAG2_ERR_MASK 0x0f +#define DNS_FLAG2_ERR_NONE 0x00 +#define DNS_FLAG2_ERR_NAME 0x03 + +#define DNS_HDR_GET_OPCODE(hdr) ((((hdr)->flags1) >> 3) & 0xF) + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +/** DNS message header */ +struct dns_hdr { + PACK_STRUCT_FIELD(u16_t id); + PACK_STRUCT_FLD_8(u8_t flags1); + PACK_STRUCT_FLD_8(u8_t flags2); + PACK_STRUCT_FIELD(u16_t numquestions); + PACK_STRUCT_FIELD(u16_t numanswers); + PACK_STRUCT_FIELD(u16_t numauthrr); + PACK_STRUCT_FIELD(u16_t numextrarr); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +#define SIZEOF_DNS_HDR 12 + + +/* Multicast DNS definitions */ + +/** UDP port for multicast DNS queries */ +#ifndef DNS_MQUERY_PORT +#define DNS_MQUERY_PORT 5353 +#endif + +/* IPv4 group for multicast DNS queries: 224.0.0.251 */ +#ifndef DNS_MQUERY_IPV4_GROUP_INIT +#define DNS_MQUERY_IPV4_GROUP_INIT IPADDR4_INIT_BYTES(224,0,0,251) +#endif + +/* IPv6 group for multicast DNS queries: FF02::FB */ +#ifndef DNS_MQUERY_IPV6_GROUP_INIT +#define DNS_MQUERY_IPV6_GROUP_INIT IPADDR6_INIT_HOST(0xFF020000,0,0,0xFB) +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_DNS_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/prot/etharp.h b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/etharp.h new file mode 100644 index 0000000..811c228 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/etharp.h @@ -0,0 +1,114 @@ +/** + * @file + * ARP protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_ETHARP_H +#define LWIP_HDR_PROT_ETHARP_H + +#include "lwip/arch.h" +#include "lwip/prot/ethernet.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef ETHARP_HWADDR_LEN +#define ETHARP_HWADDR_LEN ETH_HWADDR_LEN +#endif + +/** + * struct ip4_addr_wordaligned is used in the definition of the ARP packet format in + * order to support compilers that don't have structure packing. + */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip4_addr_wordaligned { + PACK_STRUCT_FIELD(u16_t addrw[2]); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** MEMCPY-like copying of IP addresses where addresses are known to be + * 16-bit-aligned if the port is correctly configured (so a port could define + * this to copying 2 u16_t's) - no NULL-pointer-checking needed. */ +#ifndef IPADDR_WORDALIGNED_COPY_TO_IP4_ADDR_T +#define IPADDR_WORDALIGNED_COPY_TO_IP4_ADDR_T(dest, src) SMEMCPY(dest, src, sizeof(ip4_addr_t)) +#endif + + /** MEMCPY-like copying of IP addresses where addresses are known to be + * 16-bit-aligned if the port is correctly configured (so a port could define + * this to copying 2 u16_t's) - no NULL-pointer-checking needed. */ +#ifndef IPADDR_WORDALIGNED_COPY_FROM_IP4_ADDR_T +#define IPADDR_WORDALIGNED_COPY_FROM_IP4_ADDR_T(dest, src) SMEMCPY(dest, src, sizeof(ip4_addr_t)) +#endif + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +/** the ARP message, see RFC 826 ("Packet format") */ +struct etharp_hdr { + PACK_STRUCT_FIELD(u16_t hwtype); + PACK_STRUCT_FIELD(u16_t proto); + PACK_STRUCT_FLD_8(u8_t hwlen); + PACK_STRUCT_FLD_8(u8_t protolen); + PACK_STRUCT_FIELD(u16_t opcode); + PACK_STRUCT_FLD_S(struct eth_addr shwaddr); + PACK_STRUCT_FLD_S(struct ip4_addr_wordaligned sipaddr); + PACK_STRUCT_FLD_S(struct eth_addr dhwaddr); + PACK_STRUCT_FLD_S(struct ip4_addr_wordaligned dipaddr); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#define SIZEOF_ETHARP_HDR 28 + +/* ARP message types (opcodes) */ +enum etharp_opcode { + ARP_REQUEST = 1, + ARP_REPLY = 2 +}; + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_ETHARP_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ethernet.h b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ethernet.h new file mode 100644 index 0000000..309e574 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ethernet.h @@ -0,0 +1,125 @@ +/** + * @file + * Ethernet protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_ETHERNET_H +#define LWIP_HDR_PROT_ETHERNET_H + +#include "lwip/arch.h" +#include "lwip/prot/ieee.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef ETH_HWADDR_LEN +#ifdef ETHARP_HWADDR_LEN +#define ETH_HWADDR_LEN ETHARP_HWADDR_LEN /* compatibility mode */ +#else +#define ETH_HWADDR_LEN 6 +#endif +#endif + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +/** An Ethernet MAC address */ +struct eth_addr { + PACK_STRUCT_FLD_8(u8_t addr[ETH_HWADDR_LEN]); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** Initialize a struct eth_addr with its 6 bytes (takes care of correct braces) */ +#define ETH_ADDR(b0, b1, b2, b3, b4, b5) {{b0, b1, b2, b3, b4, b5}} + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +/** Ethernet header */ +struct eth_hdr { +#if ETH_PAD_SIZE + PACK_STRUCT_FLD_8(u8_t padding[ETH_PAD_SIZE]); +#endif + PACK_STRUCT_FLD_S(struct eth_addr dest); + PACK_STRUCT_FLD_S(struct eth_addr src); + PACK_STRUCT_FIELD(u16_t type); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#define SIZEOF_ETH_HDR (14 + ETH_PAD_SIZE) + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +/** VLAN header inserted between ethernet header and payload + * if 'type' in ethernet header is ETHTYPE_VLAN. + * See IEEE802.Q */ +struct eth_vlan_hdr { + PACK_STRUCT_FIELD(u16_t prio_vid); + PACK_STRUCT_FIELD(u16_t tpid); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#define SIZEOF_VLAN_HDR 4 +#define VLAN_ID(vlan_hdr) (lwip_htons((vlan_hdr)->prio_vid) & 0xFFF) + +/** The 24-bit IANA IPv4-multicast OUI is 01-00-5e: */ +#define LL_IP4_MULTICAST_ADDR_0 0x01 +#define LL_IP4_MULTICAST_ADDR_1 0x00 +#define LL_IP4_MULTICAST_ADDR_2 0x5e + +/** IPv6 multicast uses this prefix */ +#define LL_IP6_MULTICAST_ADDR_0 0x33 +#define LL_IP6_MULTICAST_ADDR_1 0x33 + +#define eth_addr_cmp(addr1, addr2) (memcmp((addr1)->addr, (addr2)->addr, ETH_HWADDR_LEN) == 0) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_ETHERNET_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/prot/iana.h b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/iana.h new file mode 100644 index 0000000..32890cc --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/iana.h @@ -0,0 +1,97 @@ +/** + * @file + * IANA assigned numbers (RFC 1700 and successors) + * + * @defgroup iana IANA assigned numbers + * @ingroup infrastructure + */ + +/* + * Copyright (c) 2017 Dirk Ziegelmeier. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Dirk Ziegelmeier + * + */ + +#ifndef LWIP_HDR_PROT_IANA_H +#define LWIP_HDR_PROT_IANA_H + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @ingroup iana + * Hardware types + */ +enum lwip_iana_hwtype { + /** Ethernet */ + LWIP_IANA_HWTYPE_ETHERNET = 1 +}; + +/** + * @ingroup iana + * Port numbers + * https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.txt + */ +enum lwip_iana_port_number { + /** SMTP */ + LWIP_IANA_PORT_SMTP = 25, + /** DHCP server */ + LWIP_IANA_PORT_DHCP_SERVER = 67, + /** DHCP client */ + LWIP_IANA_PORT_DHCP_CLIENT = 68, + /** TFTP */ + LWIP_IANA_PORT_TFTP = 69, + /** HTTP */ + LWIP_IANA_PORT_HTTP = 80, + /** SNTP */ + LWIP_IANA_PORT_SNTP = 123, + /** NETBIOS */ + LWIP_IANA_PORT_NETBIOS = 137, + /** SNMP */ + LWIP_IANA_PORT_SNMP = 161, + /** SNMP traps */ + LWIP_IANA_PORT_SNMP_TRAP = 162, + /** HTTPS */ + LWIP_IANA_PORT_HTTPS = 443, + /** SMTPS */ + LWIP_IANA_PORT_SMTPS = 465, + /** MQTT */ + LWIP_IANA_PORT_MQTT = 1883, + /** MDNS */ + LWIP_IANA_PORT_MDNS = 5353, + /** Secure MQTT */ + LWIP_IANA_PORT_SECURE_MQTT = 8883 +}; + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_IANA_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/prot/icmp.h b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/icmp.h new file mode 100644 index 0000000..7d19385 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/icmp.h @@ -0,0 +1,91 @@ +/** + * @file + * ICMP protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_ICMP_H +#define LWIP_HDR_PROT_ICMP_H + +#include "lwip/arch.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define ICMP_ER 0 /* echo reply */ +#define ICMP_DUR 3 /* destination unreachable */ +#define ICMP_SQ 4 /* source quench */ +#define ICMP_RD 5 /* redirect */ +#define ICMP_ECHO 8 /* echo */ +#define ICMP_TE 11 /* time exceeded */ +#define ICMP_PP 12 /* parameter problem */ +#define ICMP_TS 13 /* timestamp */ +#define ICMP_TSR 14 /* timestamp reply */ +#define ICMP_IRQ 15 /* information request */ +#define ICMP_IR 16 /* information reply */ +#define ICMP_AM 17 /* address mask request */ +#define ICMP_AMR 18 /* address mask reply */ + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +/** This is the standard ICMP header only that the u32_t data + * is split to two u16_t like ICMP echo needs it. + * This header is also used for other ICMP types that do not + * use the data part. + */ +PACK_STRUCT_BEGIN +struct icmp_echo_hdr { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FIELD(u16_t id); + PACK_STRUCT_FIELD(u16_t seqno); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/* Compatibility defines, old versions used to combine type and code to an u16_t */ +#define ICMPH_TYPE(hdr) ((hdr)->type) +#define ICMPH_CODE(hdr) ((hdr)->code) +#define ICMPH_TYPE_SET(hdr, t) ((hdr)->type = (t)) +#define ICMPH_CODE_SET(hdr, c) ((hdr)->code = (c)) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_ICMP_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/prot/icmp6.h b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/icmp6.h new file mode 100644 index 0000000..3461120 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/icmp6.h @@ -0,0 +1,170 @@ +/** + * @file + * ICMP6 protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_ICMP6_H +#define LWIP_HDR_PROT_ICMP6_H + +#include "lwip/arch.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** ICMP type */ +enum icmp6_type { + /** Destination unreachable */ + ICMP6_TYPE_DUR = 1, + /** Packet too big */ + ICMP6_TYPE_PTB = 2, + /** Time exceeded */ + ICMP6_TYPE_TE = 3, + /** Parameter problem */ + ICMP6_TYPE_PP = 4, + /** Private experimentation */ + ICMP6_TYPE_PE1 = 100, + /** Private experimentation */ + ICMP6_TYPE_PE2 = 101, + /** Reserved for expansion of error messages */ + ICMP6_TYPE_RSV_ERR = 127, + + /** Echo request */ + ICMP6_TYPE_EREQ = 128, + /** Echo reply */ + ICMP6_TYPE_EREP = 129, + /** Multicast listener query */ + ICMP6_TYPE_MLQ = 130, + /** Multicast listener report */ + ICMP6_TYPE_MLR = 131, + /** Multicast listener done */ + ICMP6_TYPE_MLD = 132, + /** Router solicitation */ + ICMP6_TYPE_RS = 133, + /** Router advertisement */ + ICMP6_TYPE_RA = 134, + /** Neighbor solicitation */ + ICMP6_TYPE_NS = 135, + /** Neighbor advertisement */ + ICMP6_TYPE_NA = 136, + /** Redirect */ + ICMP6_TYPE_RD = 137, + /** Multicast router advertisement */ + ICMP6_TYPE_MRA = 151, + /** Multicast router solicitation */ + ICMP6_TYPE_MRS = 152, + /** Multicast router termination */ + ICMP6_TYPE_MRT = 153, + /** Private experimentation */ + ICMP6_TYPE_PE3 = 200, + /** Private experimentation */ + ICMP6_TYPE_PE4 = 201, + /** Reserved for expansion of informational messages */ + ICMP6_TYPE_RSV_INF = 255 +}; + +/** ICMP destination unreachable codes */ +enum icmp6_dur_code { + /** No route to destination */ + ICMP6_DUR_NO_ROUTE = 0, + /** Communication with destination administratively prohibited */ + ICMP6_DUR_PROHIBITED = 1, + /** Beyond scope of source address */ + ICMP6_DUR_SCOPE = 2, + /** Address unreachable */ + ICMP6_DUR_ADDRESS = 3, + /** Port unreachable */ + ICMP6_DUR_PORT = 4, + /** Source address failed ingress/egress policy */ + ICMP6_DUR_POLICY = 5, + /** Reject route to destination */ + ICMP6_DUR_REJECT_ROUTE = 6 +}; + +/** ICMP time exceeded codes */ +enum icmp6_te_code { + /** Hop limit exceeded in transit */ + ICMP6_TE_HL = 0, + /** Fragment reassembly time exceeded */ + ICMP6_TE_FRAG = 1 +}; + +/** ICMP parameter code */ +enum icmp6_pp_code { + /** Erroneous header field encountered */ + ICMP6_PP_FIELD = 0, + /** Unrecognized next header type encountered */ + ICMP6_PP_HEADER = 1, + /** Unrecognized IPv6 option encountered */ + ICMP6_PP_OPTION = 2 +}; + +/** This is the standard ICMP6 header. */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct icmp6_hdr { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FIELD(u32_t data); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** This is the ICMP6 header adapted for echo req/resp. */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct icmp6_echo_hdr { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FIELD(u16_t id); + PACK_STRUCT_FIELD(u16_t seqno); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_ICMP6_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ieee.h b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ieee.h new file mode 100644 index 0000000..abbb9e3 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ieee.h @@ -0,0 +1,91 @@ +/** + * @file + * IEEE assigned numbers + * + * @defgroup ieee IEEE assigned numbers + * @ingroup infrastructure + */ + +/* + * Copyright (c) 2017 Dirk Ziegelmeier. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Dirk Ziegelmeier + * + */ + +#ifndef LWIP_HDR_PROT_IEEE_H +#define LWIP_HDR_PROT_IEEE_H + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @ingroup ieee + * A list of often ethtypes (although lwIP does not use all of them). + */ +enum lwip_ieee_eth_type { + /** Internet protocol v4 */ + ETHTYPE_IP = 0x0800U, + /** Address resolution protocol */ + ETHTYPE_ARP = 0x0806U, + /** Wake on lan */ + ETHTYPE_WOL = 0x0842U, + /** RARP */ + ETHTYPE_RARP = 0x8035U, + /** Virtual local area network */ + ETHTYPE_VLAN = 0x8100U, + /** Internet protocol v6 */ + ETHTYPE_IPV6 = 0x86DDU, + /** PPP Over Ethernet Discovery Stage */ + ETHTYPE_PPPOEDISC = 0x8863U, + /** PPP Over Ethernet Session Stage */ + ETHTYPE_PPPOE = 0x8864U, + /** Jumbo Frames */ + ETHTYPE_JUMBO = 0x8870U, + /** Process field network */ + ETHTYPE_PROFINET = 0x8892U, + /** Ethernet for control automation technology */ + ETHTYPE_ETHERCAT = 0x88A4U, + /** Link layer discovery protocol */ + ETHTYPE_LLDP = 0x88CCU, + /** Serial real-time communication system */ + ETHTYPE_SERCOS = 0x88CDU, + /** Media redundancy protocol */ + ETHTYPE_MRP = 0x88E3U, + /** Precision time protocol */ + ETHTYPE_PTP = 0x88F7U, + /** Q-in-Q, 802.1ad */ + ETHTYPE_QINQ = 0x9100U +}; + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_IEEE_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/prot/igmp.h b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/igmp.h new file mode 100644 index 0000000..46b81a9 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/igmp.h @@ -0,0 +1,90 @@ +/** + * @file + * IGMP protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_IGMP_H +#define LWIP_HDR_PROT_IGMP_H + +#include "lwip/arch.h" +#include "lwip/prot/ip4.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * IGMP constants + */ +#define IGMP_TTL 1 +#define IGMP_MINLEN 8 +#define ROUTER_ALERT 0x9404U +#define ROUTER_ALERTLEN 4 + +/* + * IGMP message types, including version number. + */ +#define IGMP_MEMB_QUERY 0x11 /* Membership query */ +#define IGMP_V1_MEMB_REPORT 0x12 /* Ver. 1 membership report */ +#define IGMP_V2_MEMB_REPORT 0x16 /* Ver. 2 membership report */ +#define IGMP_LEAVE_GROUP 0x17 /* Leave-group message */ + +/* Group membership states */ +#define IGMP_GROUP_NON_MEMBER 0 +#define IGMP_GROUP_DELAYING_MEMBER 1 +#define IGMP_GROUP_IDLE_MEMBER 2 + +/** + * IGMP packet format. + */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct igmp_msg { + PACK_STRUCT_FLD_8(u8_t igmp_msgtype); + PACK_STRUCT_FLD_8(u8_t igmp_maxresp); + PACK_STRUCT_FIELD(u16_t igmp_checksum); + PACK_STRUCT_FLD_S(ip4_addr_p_t igmp_group_address); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_IGMP_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip.h b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip.h new file mode 100644 index 0000000..223158f --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip.h @@ -0,0 +1,59 @@ +/** + * @file + * IP protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_IP_H +#define LWIP_HDR_PROT_IP_H + +#include "lwip/arch.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define IP_PROTO_ICMP 1 +#define IP_PROTO_IGMP 2 +#define IP_PROTO_UDP 17 +#define IP_PROTO_UDPLITE 136 +#define IP_PROTO_TCP 6 + +/** This operates on a void* by loading the first byte */ +#define IP_HDR_GET_VERSION(ptr) ((*(u8_t*)(ptr)) >> 4) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_IP_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip4.h b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip4.h new file mode 100644 index 0000000..9347461 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip4.h @@ -0,0 +1,131 @@ +/** + * @file + * IPv4 protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_IP4_H +#define LWIP_HDR_PROT_IP4_H + +#include "lwip/arch.h" +#include "lwip/ip4_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** This is the packed version of ip4_addr_t, + used in network headers that are itself packed */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip4_addr_packed { + PACK_STRUCT_FIELD(u32_t addr); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +typedef struct ip4_addr_packed ip4_addr_p_t; + +/* Size of the IPv4 header. Same as 'sizeof(struct ip_hdr)'. */ +#define IP_HLEN 20 +/* Maximum size of the IPv4 header with options. */ +#define IP_HLEN_MAX 60 + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +/* The IPv4 header */ +struct ip_hdr { + /* version / header length */ + PACK_STRUCT_FLD_8(u8_t _v_hl); + /* type of service */ + PACK_STRUCT_FLD_8(u8_t _tos); + /* total length */ + PACK_STRUCT_FIELD(u16_t _len); + /* identification */ + PACK_STRUCT_FIELD(u16_t _id); + /* fragment offset field */ + PACK_STRUCT_FIELD(u16_t _offset); +#define IP_RF 0x8000U /* reserved fragment flag */ +#define IP_DF 0x4000U /* don't fragment flag */ +#define IP_MF 0x2000U /* more fragments flag */ +#define IP_OFFMASK 0x1fffU /* mask for fragmenting bits */ + /* time to live */ + PACK_STRUCT_FLD_8(u8_t _ttl); + /* protocol*/ + PACK_STRUCT_FLD_8(u8_t _proto); + /* checksum */ + PACK_STRUCT_FIELD(u16_t _chksum); + /* source and destination IP addresses */ + PACK_STRUCT_FLD_S(ip4_addr_p_t src); + PACK_STRUCT_FLD_S(ip4_addr_p_t dest); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/* Macros to get struct ip_hdr fields: */ +#define IPH_V(hdr) ((hdr)->_v_hl >> 4) +#define IPH_HL(hdr) ((hdr)->_v_hl & 0x0f) +#define IPH_HL_BYTES(hdr) ((u8_t)(IPH_HL(hdr) * 4)) +#define IPH_TOS(hdr) ((hdr)->_tos) +#define IPH_LEN(hdr) ((hdr)->_len) +#define IPH_ID(hdr) ((hdr)->_id) +#define IPH_OFFSET(hdr) ((hdr)->_offset) +#define IPH_OFFSET_BYTES(hdr) ((u16_t)((lwip_ntohs(IPH_OFFSET(hdr)) & IP_OFFMASK) * 8U)) +#define IPH_TTL(hdr) ((hdr)->_ttl) +#define IPH_PROTO(hdr) ((hdr)->_proto) +#define IPH_CHKSUM(hdr) ((hdr)->_chksum) + +/* Macros to set struct ip_hdr fields: */ +#define IPH_VHL_SET(hdr, v, hl) (hdr)->_v_hl = (u8_t)((((v) << 4) | (hl))) +#define IPH_TOS_SET(hdr, tos) (hdr)->_tos = (tos) +#define IPH_LEN_SET(hdr, len) (hdr)->_len = (len) +#define IPH_ID_SET(hdr, id) (hdr)->_id = (id) +#define IPH_OFFSET_SET(hdr, off) (hdr)->_offset = (off) +#define IPH_TTL_SET(hdr, ttl) (hdr)->_ttl = (u8_t)(ttl) +#define IPH_PROTO_SET(hdr, proto) (hdr)->_proto = (u8_t)(proto) +#define IPH_CHKSUM_SET(hdr, chksum) (hdr)->_chksum = (chksum) + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_IP4_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip6.h b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip6.h new file mode 100644 index 0000000..0f6de45 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip6.h @@ -0,0 +1,233 @@ +/** + * @file + * IPv6 protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_IP6_H +#define LWIP_HDR_PROT_IP6_H + +#include "lwip/arch.h" +#include "lwip/ip6_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** This is the packed version of ip6_addr_t, + used in network headers that are itself packed */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip6_addr_packed { + PACK_STRUCT_FIELD(u32_t addr[4]); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +typedef struct ip6_addr_packed ip6_addr_p_t; + +#define IP6_HLEN 40 + +#define IP6_NEXTH_HOPBYHOP 0 +#define IP6_NEXTH_TCP 6 +#define IP6_NEXTH_UDP 17 +#define IP6_NEXTH_ENCAPS 41 +#define IP6_NEXTH_ROUTING 43 +#define IP6_NEXTH_FRAGMENT 44 +#define IP6_NEXTH_ICMP6 58 +#define IP6_NEXTH_NONE 59 +#define IP6_NEXTH_DESTOPTS 60 +#define IP6_NEXTH_UDPLITE 136 + +/** The IPv6 header. */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip6_hdr { + /** version / traffic class / flow label */ + PACK_STRUCT_FIELD(u32_t _v_tc_fl); + /** payload length */ + PACK_STRUCT_FIELD(u16_t _plen); + /** next header */ + PACK_STRUCT_FLD_8(u8_t _nexth); + /** hop limit */ + PACK_STRUCT_FLD_8(u8_t _hoplim); + /** source and destination IP addresses */ + PACK_STRUCT_FLD_S(ip6_addr_p_t src); + PACK_STRUCT_FLD_S(ip6_addr_p_t dest); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +#define IP6H_V(hdr) ((lwip_ntohl((hdr)->_v_tc_fl) >> 28) & 0x0f) +#define IP6H_TC(hdr) ((lwip_ntohl((hdr)->_v_tc_fl) >> 20) & 0xff) +#define IP6H_FL(hdr) (lwip_ntohl((hdr)->_v_tc_fl) & 0x000fffff) +#define IP6H_PLEN(hdr) (lwip_ntohs((hdr)->_plen)) +#define IP6H_NEXTH(hdr) ((hdr)->_nexth) +#define IP6H_NEXTH_P(hdr) ((u8_t *)(hdr) + 6) +#define IP6H_HOPLIM(hdr) ((hdr)->_hoplim) +#define IP6H_VTCFL_SET(hdr, v, tc, fl) (hdr)->_v_tc_fl = (lwip_htonl((((u32_t)(v)) << 28) | (((u32_t)(tc)) << 20) | (fl))) +#define IP6H_PLEN_SET(hdr, plen) (hdr)->_plen = lwip_htons(plen) +#define IP6H_NEXTH_SET(hdr, nexth) (hdr)->_nexth = (nexth) +#define IP6H_HOPLIM_SET(hdr, hl) (hdr)->_hoplim = (u8_t)(hl) + +/* ipv6 extended options header */ +#define IP6_PAD1_OPTION 0 +#define IP6_PADN_OPTION 1 +#define IP6_ROUTER_ALERT_OPTION 5 +#define IP6_JUMBO_OPTION 194 +#define IP6_HOME_ADDRESS_OPTION 201 +#define IP6_ROUTER_ALERT_DLEN 2 +#define IP6_ROUTER_ALERT_VALUE_MLD 0 + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip6_opt_hdr { + /* router alert option type */ + PACK_STRUCT_FLD_8(u8_t _opt_type); + /* router alert option data len */ + PACK_STRUCT_FLD_8(u8_t _opt_dlen); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +#define IP6_OPT_HLEN 2 +#define IP6_OPT_TYPE_ACTION(hdr) ((((hdr)->_opt_type) >> 6) & 0x3) +#define IP6_OPT_TYPE_CHANGE(hdr) ((((hdr)->_opt_type) >> 5) & 0x1) +#define IP6_OPT_TYPE(hdr) ((hdr)->_opt_type) +#define IP6_OPT_DLEN(hdr) ((hdr)->_opt_dlen) + +/* Hop-by-Hop header. */ +#define IP6_HBH_HLEN 2 + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip6_hbh_hdr { + /* next header */ + PACK_STRUCT_FLD_8(u8_t _nexth); + /* header length in 8-octet units */ + PACK_STRUCT_FLD_8(u8_t _hlen); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +#define IP6_HBH_NEXTH(hdr) ((hdr)->_nexth) + +/* Destination header. */ +#define IP6_DEST_HLEN 2 + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip6_dest_hdr { + /* next header */ + PACK_STRUCT_FLD_8(u8_t _nexth); + /* header length in 8-octet units */ + PACK_STRUCT_FLD_8(u8_t _hlen); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +#define IP6_DEST_NEXTH(hdr) ((hdr)->_nexth) + +/* Routing header */ +#define IP6_ROUT_TYPE2 2 +#define IP6_ROUT_RPL 3 + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip6_rout_hdr { + /* next header */ + PACK_STRUCT_FLD_8(u8_t _nexth); + /* reserved */ + PACK_STRUCT_FLD_8(u8_t _hlen); + /* fragment offset */ + PACK_STRUCT_FIELD(u8_t _routing_type); + /* fragmented packet identification */ + PACK_STRUCT_FIELD(u8_t _segments_left); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +#define IP6_ROUT_NEXTH(hdr) ((hdr)->_nexth) +#define IP6_ROUT_TYPE(hdr) ((hdr)->_routing_type) +#define IP6_ROUT_SEG_LEFT(hdr) ((hdr)->_segments_left) + +/* Fragment header. */ +#define IP6_FRAG_HLEN 8 +#define IP6_FRAG_OFFSET_MASK 0xfff8 +#define IP6_FRAG_MORE_FLAG 0x0001 + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip6_frag_hdr { + /* next header */ + PACK_STRUCT_FLD_8(u8_t _nexth); + /* reserved */ + PACK_STRUCT_FLD_8(u8_t reserved); + /* fragment offset */ + PACK_STRUCT_FIELD(u16_t _fragment_offset); + /* fragmented packet identification */ + PACK_STRUCT_FIELD(u32_t _identification); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +#define IP6_FRAG_NEXTH(hdr) ((hdr)->_nexth) +#define IP6_FRAG_MBIT(hdr) (lwip_ntohs((hdr)->_fragment_offset) & 0x1) +#define IP6_FRAG_ID(hdr) (lwip_ntohl((hdr)->_identification)) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_IP6_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/prot/mld6.h b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/mld6.h new file mode 100644 index 0000000..71f1dcb --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/mld6.h @@ -0,0 +1,71 @@ +/** + * @file + * MLD6 protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_MLD6_H +#define LWIP_HDR_PROT_MLD6_H + +#include "lwip/arch.h" +#include "lwip/prot/ip6.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define MLD6_HBH_HLEN 8 +/** Multicast listener report/query/done message header. */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct mld_header { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FIELD(u16_t max_resp_delay); + PACK_STRUCT_FIELD(u16_t reserved); + PACK_STRUCT_FLD_S(ip6_addr_p_t multicast_address); + /* Options follow. */ +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_MLD6_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/prot/nd6.h b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/nd6.h new file mode 100644 index 0000000..c270d07 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/nd6.h @@ -0,0 +1,274 @@ +/** + * @file + * ND6 protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_ND6_H +#define LWIP_HDR_PROT_ND6_H + +#include "lwip/arch.h" +#include "lwip/ip6_addr.h" +#include "lwip/prot/ip6.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** Neighbor solicitation message header. */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ns_header { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FIELD(u32_t reserved); + PACK_STRUCT_FLD_S(ip6_addr_p_t target_address); + /* Options follow. */ +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** Neighbor advertisement message header. */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct na_header { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FLD_8(u8_t flags); + PACK_STRUCT_FLD_8(u8_t reserved[3]); + PACK_STRUCT_FLD_S(ip6_addr_p_t target_address); + /* Options follow. */ +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +#define ND6_FLAG_ROUTER (0x80) +#define ND6_FLAG_SOLICITED (0x40) +#define ND6_FLAG_OVERRIDE (0x20) + +/** Router solicitation message header. */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct rs_header { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FIELD(u32_t reserved); + /* Options follow. */ +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** Router advertisement message header. */ +#define ND6_RA_FLAG_MANAGED_ADDR_CONFIG (0x80) +#define ND6_RA_FLAG_OTHER_CONFIG (0x40) +#define ND6_RA_FLAG_HOME_AGENT (0x20) +#define ND6_RA_PREFERENCE_MASK (0x18) +#define ND6_RA_PREFERENCE_HIGH (0x08) +#define ND6_RA_PREFERENCE_MEDIUM (0x00) +#define ND6_RA_PREFERENCE_LOW (0x18) +#define ND6_RA_PREFERENCE_DISABLED (0x10) +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ra_header { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FLD_8(u8_t current_hop_limit); + PACK_STRUCT_FLD_8(u8_t flags); + PACK_STRUCT_FIELD(u16_t router_lifetime); + PACK_STRUCT_FIELD(u32_t reachable_time); + PACK_STRUCT_FIELD(u32_t retrans_timer); + /* Options follow. */ +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** Redirect message header. */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct redirect_header { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FIELD(u32_t reserved); + PACK_STRUCT_FLD_S(ip6_addr_p_t target_address); + PACK_STRUCT_FLD_S(ip6_addr_p_t destination_address); + /* Options follow. */ +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** Link-layer address option. */ +#define ND6_OPTION_TYPE_SOURCE_LLADDR (0x01) +#define ND6_OPTION_TYPE_TARGET_LLADDR (0x02) +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct lladdr_option { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t length); + PACK_STRUCT_FLD_8(u8_t addr[NETIF_MAX_HWADDR_LEN]); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** Prefix information option. */ +#define ND6_OPTION_TYPE_PREFIX_INFO (0x03) +#define ND6_PREFIX_FLAG_ON_LINK (0x80) +#define ND6_PREFIX_FLAG_AUTONOMOUS (0x40) +#define ND6_PREFIX_FLAG_ROUTER_ADDRESS (0x20) +#define ND6_PREFIX_FLAG_SITE_PREFIX (0x10) +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct prefix_option { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t length); + PACK_STRUCT_FLD_8(u8_t prefix_length); + PACK_STRUCT_FLD_8(u8_t flags); + PACK_STRUCT_FIELD(u32_t valid_lifetime); + PACK_STRUCT_FIELD(u32_t preferred_lifetime); + PACK_STRUCT_FLD_8(u8_t reserved2[3]); + PACK_STRUCT_FLD_8(u8_t site_prefix_length); + PACK_STRUCT_FLD_S(ip6_addr_p_t prefix); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** Redirected header option. */ +#define ND6_OPTION_TYPE_REDIR_HDR (0x04) +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct redirected_header_option { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t length); + PACK_STRUCT_FLD_8(u8_t reserved[6]); + /* Portion of redirected packet follows. */ + /* PACK_STRUCT_FLD_8(u8_t redirected[8]); */ +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** MTU option. */ +#define ND6_OPTION_TYPE_MTU (0x05) +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct mtu_option { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t length); + PACK_STRUCT_FIELD(u16_t reserved); + PACK_STRUCT_FIELD(u32_t mtu); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** Route information option. */ +#define ND6_OPTION_TYPE_ROUTE_INFO (24) +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct route_option { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t length); + PACK_STRUCT_FLD_8(u8_t prefix_length); + PACK_STRUCT_FLD_8(u8_t preference); + PACK_STRUCT_FIELD(u32_t route_lifetime); + PACK_STRUCT_FLD_S(ip6_addr_p_t prefix); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** Recursive DNS Server Option. */ +#define ND6_OPTION_TYPE_RDNSS (25) +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct rdnss_option { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t length); + PACK_STRUCT_FIELD(u16_t reserved); + PACK_STRUCT_FIELD(u32_t lifetime); + PACK_STRUCT_FLD_S(ip6_addr_p_t rdnss_address[1]); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#define SIZEOF_RDNSS_OPTION_BASE 8 /* size without addresses */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_ND6_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/prot/tcp.h b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/tcp.h new file mode 100644 index 0000000..c1d7de1 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/tcp.h @@ -0,0 +1,100 @@ +/** + * @file + * TCP protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_TCP_H +#define LWIP_HDR_PROT_TCP_H + +#include "lwip/arch.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Length of the TCP header, excluding options. */ +#define TCP_HLEN 20 + +/* Fields are (of course) in network byte order. + * Some fields are converted to host byte order in tcp_input(). + */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct tcp_hdr { + PACK_STRUCT_FIELD(u16_t src); + PACK_STRUCT_FIELD(u16_t dest); + PACK_STRUCT_FIELD(u32_t seqno); + PACK_STRUCT_FIELD(u32_t ackno); + PACK_STRUCT_FIELD(u16_t _hdrlen_rsvd_flags); + PACK_STRUCT_FIELD(u16_t wnd); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FIELD(u16_t urgp); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/* TCP header flags bits */ +#define TCP_FIN 0x01U +#define TCP_SYN 0x02U +#define TCP_RST 0x04U +#define TCP_PSH 0x08U +#define TCP_ACK 0x10U +#define TCP_URG 0x20U +#define TCP_ECE 0x40U +#define TCP_CWR 0x80U +/* Valid TCP header flags */ +#define TCP_FLAGS 0x3fU + +#define TCP_MAX_OPTION_BYTES 40 + +#define TCPH_HDRLEN(phdr) ((u16_t)(lwip_ntohs((phdr)->_hdrlen_rsvd_flags) >> 12)) +#define TCPH_HDRLEN_BYTES(phdr) ((u8_t)(TCPH_HDRLEN(phdr) << 2)) +#define TCPH_FLAGS(phdr) ((u8_t)((lwip_ntohs((phdr)->_hdrlen_rsvd_flags) & TCP_FLAGS))) + +#define TCPH_HDRLEN_SET(phdr, len) (phdr)->_hdrlen_rsvd_flags = lwip_htons(((len) << 12) | TCPH_FLAGS(phdr)) +#define TCPH_FLAGS_SET(phdr, flags) (phdr)->_hdrlen_rsvd_flags = (((phdr)->_hdrlen_rsvd_flags & PP_HTONS(~TCP_FLAGS)) | lwip_htons(flags)) +#define TCPH_HDRLEN_FLAGS_SET(phdr, len, flags) (phdr)->_hdrlen_rsvd_flags = (u16_t)(lwip_htons((u16_t)((len) << 12) | (flags))) + +#define TCPH_SET_FLAG(phdr, flags ) (phdr)->_hdrlen_rsvd_flags = ((phdr)->_hdrlen_rsvd_flags | lwip_htons(flags)) +#define TCPH_UNSET_FLAG(phdr, flags) (phdr)->_hdrlen_rsvd_flags = ((phdr)->_hdrlen_rsvd_flags & ~lwip_htons(flags)) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_TCP_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/prot/udp.h b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/udp.h new file mode 100644 index 0000000..664f19a --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/udp.h @@ -0,0 +1,68 @@ +/** + * @file + * UDP protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_UDP_H +#define LWIP_HDR_PROT_UDP_H + +#include "lwip/arch.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define UDP_HLEN 8 + +/* Fields are (of course) in network byte order. */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct udp_hdr { + PACK_STRUCT_FIELD(u16_t src); + PACK_STRUCT_FIELD(u16_t dest); /* src/dest UDP ports */ + PACK_STRUCT_FIELD(u16_t len); + PACK_STRUCT_FIELD(u16_t chksum); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_UDP_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/raw.h b/Middlewares/Third_Party/LwIP/src/include/lwip/raw.h new file mode 100644 index 0000000..b129bd3 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/raw.h @@ -0,0 +1,143 @@ +/** + * @file + * raw API (to be used from TCPIP thread)\n + * See also @ref raw_raw + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_RAW_H +#define LWIP_HDR_RAW_H + +#include "lwip/opt.h" + +#if LWIP_RAW /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/pbuf.h" +#include "lwip/def.h" +#include "lwip/ip.h" +#include "lwip/ip_addr.h" +#include "lwip/ip6_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define RAW_FLAGS_CONNECTED 0x01U +#define RAW_FLAGS_HDRINCL 0x02U +#define RAW_FLAGS_MULTICAST_LOOP 0x04U + +struct raw_pcb; + +/** Function prototype for raw pcb receive callback functions. + * @param arg user supplied argument (raw_pcb.recv_arg) + * @param pcb the raw_pcb which received data + * @param p the packet buffer that was received + * @param addr the remote IP address from which the packet was received + * @return 1 if the packet was 'eaten' (aka. deleted), + * 0 if the packet lives on + * If returning 1, the callback is responsible for freeing the pbuf + * if it's not used any more. + */ +typedef u8_t (*raw_recv_fn)(void *arg, struct raw_pcb *pcb, struct pbuf *p, + const ip_addr_t *addr); + +/** the RAW protocol control block */ +struct raw_pcb { + /* Common members of all PCB types */ + IP_PCB; + + struct raw_pcb *next; + + u8_t protocol; + u8_t flags; + +#if LWIP_MULTICAST_TX_OPTIONS + /** outgoing network interface for multicast packets, by interface index (if nonzero) */ + u8_t mcast_ifindex; + /** TTL for outgoing multicast packets */ + u8_t mcast_ttl; +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + + /** receive callback function */ + raw_recv_fn recv; + /* user-supplied argument for the recv callback */ + void *recv_arg; +#if LWIP_IPV6 + /* fields for handling checksum computations as per RFC3542. */ + u16_t chksum_offset; + u8_t chksum_reqd; +#endif +}; + +/* The following functions is the application layer interface to the + RAW code. */ +struct raw_pcb * raw_new (u8_t proto); +struct raw_pcb * raw_new_ip_type(u8_t type, u8_t proto); +void raw_remove (struct raw_pcb *pcb); +err_t raw_bind (struct raw_pcb *pcb, const ip_addr_t *ipaddr); +void raw_bind_netif (struct raw_pcb *pcb, const struct netif *netif); +err_t raw_connect (struct raw_pcb *pcb, const ip_addr_t *ipaddr); +void raw_disconnect (struct raw_pcb *pcb); + +err_t raw_sendto (struct raw_pcb *pcb, struct pbuf *p, const ip_addr_t *ipaddr); +err_t raw_sendto_if_src(struct raw_pcb *pcb, struct pbuf *p, const ip_addr_t *dst_ip, struct netif *netif, const ip_addr_t *src_ip); +err_t raw_send (struct raw_pcb *pcb, struct pbuf *p); + +void raw_recv (struct raw_pcb *pcb, raw_recv_fn recv, void *recv_arg); + +#define raw_flags(pcb) ((pcb)->flags) +#define raw_setflags(pcb,f) ((pcb)->flags = (f)) + +#define raw_set_flags(pcb, set_flags) do { (pcb)->flags = (u8_t)((pcb)->flags | (set_flags)); } while(0) +#define raw_clear_flags(pcb, clr_flags) do { (pcb)->flags = (u8_t)((pcb)->flags & (u8_t)(~(clr_flags) & 0xff)); } while(0) +#define raw_is_flag_set(pcb, flag) (((pcb)->flags & (flag)) != 0) + +#define raw_init() /* Compatibility define, no init needed. */ + +/* for compatibility with older implementation */ +#define raw_new_ip6(proto) raw_new_ip_type(IPADDR_TYPE_V6, proto) + +#if LWIP_MULTICAST_TX_OPTIONS +#define raw_set_multicast_netif_index(pcb, idx) ((pcb)->mcast_ifindex = (idx)) +#define raw_get_multicast_netif_index(pcb) ((pcb)->mcast_ifindex) +#define raw_set_multicast_ttl(pcb, value) ((pcb)->mcast_ttl = (value)) +#define raw_get_multicast_ttl(pcb) ((pcb)->mcast_ttl) +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_RAW */ + +#endif /* LWIP_HDR_RAW_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/sio.h b/Middlewares/Third_Party/LwIP/src/include/lwip/sio.h new file mode 100644 index 0000000..7643e19 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/sio.h @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + */ + +/* + * This is the interface to the platform specific serial IO module + * It needs to be implemented by those platforms which need SLIP or PPP + */ + +#ifndef SIO_H +#define SIO_H + +#include "lwip/arch.h" +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* If you want to define sio_fd_t elsewhere or differently, + define this in your cc.h file. */ +#ifndef __sio_fd_t_defined +typedef void * sio_fd_t; +#endif + +/* The following functions can be defined to something else in your cc.h file + or be implemented in your custom sio.c file. */ + +#ifndef sio_open +/** + * Opens a serial device for communication. + * + * @param devnum device number + * @return handle to serial device if successful, NULL otherwise + */ +sio_fd_t sio_open(u8_t devnum); +#endif + +#ifndef sio_send +/** + * Sends a single character to the serial device. + * + * @param c character to send + * @param fd serial device handle + * + * @note This function will block until the character can be sent. + */ +void sio_send(u8_t c, sio_fd_t fd); +#endif + +#ifndef sio_recv +/** + * Receives a single character from the serial device. + * + * @param fd serial device handle + * + * @note This function will block until a character is received. + */ +u8_t sio_recv(sio_fd_t fd); +#endif + +#ifndef sio_read +/** + * Reads from the serial device. + * + * @param fd serial device handle + * @param data pointer to data buffer for receiving + * @param len maximum length (in bytes) of data to receive + * @return number of bytes actually received - may be 0 if aborted by sio_read_abort + * + * @note This function will block until data can be received. The blocking + * can be cancelled by calling sio_read_abort(). + */ +u32_t sio_read(sio_fd_t fd, u8_t *data, u32_t len); +#endif + +#ifndef sio_tryread +/** + * Tries to read from the serial device. Same as sio_read but returns + * immediately if no data is available and never blocks. + * + * @param fd serial device handle + * @param data pointer to data buffer for receiving + * @param len maximum length (in bytes) of data to receive + * @return number of bytes actually received + */ +u32_t sio_tryread(sio_fd_t fd, u8_t *data, u32_t len); +#endif + +#ifndef sio_write +/** + * Writes to the serial device. + * + * @param fd serial device handle + * @param data pointer to data to send + * @param len length (in bytes) of data to send + * @return number of bytes actually sent + * + * @note This function will block until all data can be sent. + */ +u32_t sio_write(sio_fd_t fd, u8_t *data, u32_t len); +#endif + +#ifndef sio_read_abort +/** + * Aborts a blocking sio_read() call. + * + * @param fd serial device handle + */ +void sio_read_abort(sio_fd_t fd); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* SIO_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/snmp.h b/Middlewares/Third_Party/LwIP/src/include/lwip/snmp.h new file mode 100644 index 0000000..8704d0b --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/snmp.h @@ -0,0 +1,213 @@ +/** + * @file + * SNMP support API for implementing netifs and statitics for MIB2 + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Dirk Ziegelmeier + * + */ +#ifndef LWIP_HDR_SNMP_H +#define LWIP_HDR_SNMP_H + +#include "lwip/opt.h" +#include "lwip/ip_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct udp_pcb; +struct netif; + +/** + * @defgroup netif_mib2 MIB2 statistics + * @ingroup netif + */ + +/* MIB2 statistics functions */ +#if MIB2_STATS /* don't build if not configured for use in lwipopts.h */ +/** + * @ingroup netif_mib2 + * @see RFC1213, "MIB-II, 6. Definitions" + */ +enum snmp_ifType { + snmp_ifType_other=1, /* none of the following */ + snmp_ifType_regular1822, + snmp_ifType_hdh1822, + snmp_ifType_ddn_x25, + snmp_ifType_rfc877_x25, + snmp_ifType_ethernet_csmacd, + snmp_ifType_iso88023_csmacd, + snmp_ifType_iso88024_tokenBus, + snmp_ifType_iso88025_tokenRing, + snmp_ifType_iso88026_man, + snmp_ifType_starLan, + snmp_ifType_proteon_10Mbit, + snmp_ifType_proteon_80Mbit, + snmp_ifType_hyperchannel, + snmp_ifType_fddi, + snmp_ifType_lapb, + snmp_ifType_sdlc, + snmp_ifType_ds1, /* T-1 */ + snmp_ifType_e1, /* european equiv. of T-1 */ + snmp_ifType_basicISDN, + snmp_ifType_primaryISDN, /* proprietary serial */ + snmp_ifType_propPointToPointSerial, + snmp_ifType_ppp, + snmp_ifType_softwareLoopback, + snmp_ifType_eon, /* CLNP over IP [11] */ + snmp_ifType_ethernet_3Mbit, + snmp_ifType_nsip, /* XNS over IP */ + snmp_ifType_slip, /* generic SLIP */ + snmp_ifType_ultra, /* ULTRA technologies */ + snmp_ifType_ds3, /* T-3 */ + snmp_ifType_sip, /* SMDS */ + snmp_ifType_frame_relay +}; + +/** This macro has a precision of ~49 days because sys_now returns u32_t. \#define your own if you want ~490 days. */ +#ifndef MIB2_COPY_SYSUPTIME_TO +#define MIB2_COPY_SYSUPTIME_TO(ptrToVal) (*(ptrToVal) = (sys_now() / 10)) +#endif + +/** + * @ingroup netif_mib2 + * Increment stats member for SNMP MIB2 stats (struct stats_mib2_netif_ctrs) + */ +#define MIB2_STATS_NETIF_INC(n, x) do { ++(n)->mib2_counters.x; } while(0) +/** + * @ingroup netif_mib2 + * Add value to stats member for SNMP MIB2 stats (struct stats_mib2_netif_ctrs) + */ +#define MIB2_STATS_NETIF_ADD(n, x, val) do { (n)->mib2_counters.x += (val); } while(0) + +/** + * @ingroup netif_mib2 + * Init MIB2 statistic counters in netif + * @param netif Netif to init + * @param type one of enum @ref snmp_ifType + * @param speed your link speed here (units: bits per second) + */ +#define MIB2_INIT_NETIF(netif, type, speed) do { \ + (netif)->link_type = (type); \ + (netif)->link_speed = (speed);\ + (netif)->ts = 0; \ + (netif)->mib2_counters.ifinoctets = 0; \ + (netif)->mib2_counters.ifinucastpkts = 0; \ + (netif)->mib2_counters.ifinnucastpkts = 0; \ + (netif)->mib2_counters.ifindiscards = 0; \ + (netif)->mib2_counters.ifinerrors = 0; \ + (netif)->mib2_counters.ifinunknownprotos = 0; \ + (netif)->mib2_counters.ifoutoctets = 0; \ + (netif)->mib2_counters.ifoutucastpkts = 0; \ + (netif)->mib2_counters.ifoutnucastpkts = 0; \ + (netif)->mib2_counters.ifoutdiscards = 0; \ + (netif)->mib2_counters.ifouterrors = 0; } while(0) +#else /* MIB2_STATS */ +#ifndef MIB2_COPY_SYSUPTIME_TO +#define MIB2_COPY_SYSUPTIME_TO(ptrToVal) +#endif +#define MIB2_INIT_NETIF(netif, type, speed) +#define MIB2_STATS_NETIF_INC(n, x) +#define MIB2_STATS_NETIF_ADD(n, x, val) +#endif /* MIB2_STATS */ + +/* LWIP MIB2 callbacks */ +#if LWIP_MIB2_CALLBACKS /* don't build if not configured for use in lwipopts.h */ +/* network interface */ +void mib2_netif_added(struct netif *ni); +void mib2_netif_removed(struct netif *ni); + +#if LWIP_IPV4 && LWIP_ARP +/* ARP (for atTable and ipNetToMediaTable) */ +void mib2_add_arp_entry(struct netif *ni, ip4_addr_t *ip); +void mib2_remove_arp_entry(struct netif *ni, ip4_addr_t *ip); +#else /* LWIP_IPV4 && LWIP_ARP */ +#define mib2_add_arp_entry(ni,ip) +#define mib2_remove_arp_entry(ni,ip) +#endif /* LWIP_IPV4 && LWIP_ARP */ + +/* IP */ +#if LWIP_IPV4 +void mib2_add_ip4(struct netif *ni); +void mib2_remove_ip4(struct netif *ni); +void mib2_add_route_ip4(u8_t dflt, struct netif *ni); +void mib2_remove_route_ip4(u8_t dflt, struct netif *ni); +#endif /* LWIP_IPV4 */ + +/* UDP */ +#if LWIP_UDP +void mib2_udp_bind(struct udp_pcb *pcb); +void mib2_udp_unbind(struct udp_pcb *pcb); +#endif /* LWIP_UDP */ + +#else /* LWIP_MIB2_CALLBACKS */ +/* LWIP_MIB2_CALLBACKS support not available */ +/* define everything to be empty */ + +/* network interface */ +#define mib2_netif_added(ni) +#define mib2_netif_removed(ni) + +/* ARP */ +#define mib2_add_arp_entry(ni,ip) +#define mib2_remove_arp_entry(ni,ip) + +/* IP */ +#define mib2_add_ip4(ni) +#define mib2_remove_ip4(ni) +#define mib2_add_route_ip4(dflt, ni) +#define mib2_remove_route_ip4(dflt, ni) + +/* UDP */ +#define mib2_udp_bind(pcb) +#define mib2_udp_unbind(pcb) +#endif /* LWIP_MIB2_CALLBACKS */ + +/* for source-code compatibility reasons only, can be removed (not used internally) */ +#define NETIF_INIT_SNMP MIB2_INIT_NETIF +#define snmp_add_ifinoctets(ni,value) MIB2_STATS_NETIF_ADD(ni, ifinoctets, value) +#define snmp_inc_ifinucastpkts(ni) MIB2_STATS_NETIF_INC(ni, ifinucastpkts) +#define snmp_inc_ifinnucastpkts(ni) MIB2_STATS_NETIF_INC(ni, ifinnucastpkts) +#define snmp_inc_ifindiscards(ni) MIB2_STATS_NETIF_INC(ni, ifindiscards) +#define snmp_inc_ifinerrors(ni) MIB2_STATS_NETIF_INC(ni, ifinerrors) +#define snmp_inc_ifinunknownprotos(ni) MIB2_STATS_NETIF_INC(ni, ifinunknownprotos) +#define snmp_add_ifoutoctets(ni,value) MIB2_STATS_NETIF_ADD(ni, ifoutoctets, value) +#define snmp_inc_ifoutucastpkts(ni) MIB2_STATS_NETIF_INC(ni, ifoutucastpkts) +#define snmp_inc_ifoutnucastpkts(ni) MIB2_STATS_NETIF_INC(ni, ifoutnucastpkts) +#define snmp_inc_ifoutdiscards(ni) MIB2_STATS_NETIF_INC(ni, ifoutdiscards) +#define snmp_inc_ifouterrors(ni) MIB2_STATS_NETIF_INC(ni, ifouterrors) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_SNMP_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/sockets.h b/Middlewares/Third_Party/LwIP/src/include/lwip/sockets.h new file mode 100644 index 0000000..d70d36c --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/sockets.h @@ -0,0 +1,688 @@ +/** + * @file + * Socket API (to be used from non-TCPIP threads) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + + +#ifndef LWIP_HDR_SOCKETS_H +#define LWIP_HDR_SOCKETS_H + +#include "lwip/opt.h" + +#if LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/err.h" +#include "lwip/inet.h" +#include "lwip/errno.h" + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* If your port already typedef's sa_family_t, define SA_FAMILY_T_DEFINED + to prevent this code from redefining it. */ +#if !defined(sa_family_t) && !defined(SA_FAMILY_T_DEFINED) +typedef u8_t sa_family_t; +#endif +/* If your port already typedef's in_port_t, define IN_PORT_T_DEFINED + to prevent this code from redefining it. */ +#if !defined(in_port_t) && !defined(IN_PORT_T_DEFINED) +typedef u16_t in_port_t; +#endif + +#if LWIP_IPV4 +/* members are in network byte order */ +struct sockaddr_in { + u8_t sin_len; + sa_family_t sin_family; + in_port_t sin_port; + struct in_addr sin_addr; +#define SIN_ZERO_LEN 8 + char sin_zero[SIN_ZERO_LEN]; +}; +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 +struct sockaddr_in6 { + u8_t sin6_len; /* length of this structure */ + sa_family_t sin6_family; /* AF_INET6 */ + in_port_t sin6_port; /* Transport layer port # */ + u32_t sin6_flowinfo; /* IPv6 flow information */ + struct in6_addr sin6_addr; /* IPv6 address */ + u32_t sin6_scope_id; /* Set of interfaces for scope */ +}; +#endif /* LWIP_IPV6 */ + +struct sockaddr { + u8_t sa_len; + sa_family_t sa_family; + char sa_data[14]; +}; + +struct sockaddr_storage { + u8_t s2_len; + sa_family_t ss_family; + char s2_data1[2]; + u32_t s2_data2[3]; +#if LWIP_IPV6 + u32_t s2_data3[3]; +#endif /* LWIP_IPV6 */ +}; + +/* If your port already typedef's socklen_t, define SOCKLEN_T_DEFINED + to prevent this code from redefining it. */ +#if !defined(socklen_t) && !defined(SOCKLEN_T_DEFINED) +typedef u32_t socklen_t; +#endif + +#if !defined IOV_MAX +#define IOV_MAX 0xFFFF +#elif IOV_MAX > 0xFFFF +#error "IOV_MAX larger than supported by LwIP" +#endif /* IOV_MAX */ + +#if !defined(iovec) +struct iovec { + void *iov_base; + size_t iov_len; +}; +#endif + +struct msghdr { + void *msg_name; + socklen_t msg_namelen; + struct iovec *msg_iov; + int msg_iovlen; + void *msg_control; + socklen_t msg_controllen; + int msg_flags; +}; + +/* struct msghdr->msg_flags bit field values */ +#define MSG_TRUNC 0x04 +#define MSG_CTRUNC 0x08 + +/* RFC 3542, Section 20: Ancillary Data */ +struct cmsghdr { + socklen_t cmsg_len; /* number of bytes, including header */ + int cmsg_level; /* originating protocol */ + int cmsg_type; /* protocol-specific type */ +}; +/* Data section follows header and possible padding, typically referred to as + unsigned char cmsg_data[]; */ + +/* cmsg header/data alignment. NOTE: we align to native word size (double word +size on 16-bit arch) so structures are not placed at an unaligned address. +16-bit arch needs double word to ensure 32-bit alignment because socklen_t +could be 32 bits. If we ever have cmsg data with a 64-bit variable, alignment +will need to increase long long */ +#define ALIGN_H(size) (((size) + sizeof(long) - 1U) & ~(sizeof(long)-1U)) +#define ALIGN_D(size) ALIGN_H(size) + +#define CMSG_FIRSTHDR(mhdr) \ + ((mhdr)->msg_controllen >= sizeof(struct cmsghdr) ? \ + (struct cmsghdr *)(mhdr)->msg_control : \ + (struct cmsghdr *)NULL) + +#define CMSG_NXTHDR(mhdr, cmsg) \ + (((cmsg) == NULL) ? CMSG_FIRSTHDR(mhdr) : \ + (((u8_t *)(cmsg) + ALIGN_H((cmsg)->cmsg_len) \ + + ALIGN_D(sizeof(struct cmsghdr)) > \ + (u8_t *)((mhdr)->msg_control) + (mhdr)->msg_controllen) ? \ + (struct cmsghdr *)NULL : \ + (struct cmsghdr *)((void*)((u8_t *)(cmsg) + \ + ALIGN_H((cmsg)->cmsg_len))))) + +#define CMSG_DATA(cmsg) ((void*)((u8_t *)(cmsg) + \ + ALIGN_D(sizeof(struct cmsghdr)))) + +#define CMSG_SPACE(length) (ALIGN_D(sizeof(struct cmsghdr)) + \ + ALIGN_H(length)) + +#define CMSG_LEN(length) (ALIGN_D(sizeof(struct cmsghdr)) + \ + length) + +/* Set socket options argument */ +#define IFNAMSIZ NETIF_NAMESIZE +struct ifreq { + char ifr_name[IFNAMSIZ]; /* Interface name */ +}; + +/* Socket protocol types (TCP/UDP/RAW) */ +#define SOCK_STREAM 1 +#define SOCK_DGRAM 2 +#define SOCK_RAW 3 + +/* + * Option flags per-socket. These must match the SOF_ flags in ip.h (checked in init.c) + */ +#define SO_REUSEADDR 0x0004 /* Allow local address reuse */ +#define SO_KEEPALIVE 0x0008 /* keep connections alive */ +#define SO_BROADCAST 0x0020 /* permit to send and to receive broadcast messages (see IP_SOF_BROADCAST option) */ + + +/* + * Additional options, not kept in so_options. + */ +#define SO_DEBUG 0x0001 /* Unimplemented: turn on debugging info recording */ +#define SO_ACCEPTCONN 0x0002 /* socket has had listen() */ +#define SO_DONTROUTE 0x0010 /* Unimplemented: just use interface addresses */ +#define SO_USELOOPBACK 0x0040 /* Unimplemented: bypass hardware when possible */ +#define SO_LINGER 0x0080 /* linger on close if data present */ +#define SO_DONTLINGER ((int)(~SO_LINGER)) +#define SO_OOBINLINE 0x0100 /* Unimplemented: leave received OOB data in line */ +#define SO_REUSEPORT 0x0200 /* Unimplemented: allow local address & port reuse */ +#define SO_SNDBUF 0x1001 /* Unimplemented: send buffer size */ +#define SO_RCVBUF 0x1002 /* receive buffer size */ +#define SO_SNDLOWAT 0x1003 /* Unimplemented: send low-water mark */ +#define SO_RCVLOWAT 0x1004 /* Unimplemented: receive low-water mark */ +#define SO_SNDTIMEO 0x1005 /* send timeout */ +#define SO_RCVTIMEO 0x1006 /* receive timeout */ +#define SO_ERROR 0x1007 /* get error status and clear */ +#define SO_TYPE 0x1008 /* get socket type */ +#define SO_CONTIMEO 0x1009 /* Unimplemented: connect timeout */ +#define SO_NO_CHECK 0x100a /* don't create UDP checksum */ +#define SO_BINDTODEVICE 0x100b /* bind to device */ + +/* + * Structure used for manipulating linger option. + */ +struct linger { + int l_onoff; /* option on/off */ + int l_linger; /* linger time in seconds */ +}; + +/* + * Level number for (get/set)sockopt() to apply to socket itself. + */ +#define SOL_SOCKET 0xfff /* options for socket level */ + + +#define AF_UNSPEC 0 +#define AF_INET 2 +#if LWIP_IPV6 +#define AF_INET6 10 +#else /* LWIP_IPV6 */ +#define AF_INET6 AF_UNSPEC +#endif /* LWIP_IPV6 */ +#define PF_INET AF_INET +#define PF_INET6 AF_INET6 +#define PF_UNSPEC AF_UNSPEC + +#define IPPROTO_IP 0 +#define IPPROTO_ICMP 1 +#define IPPROTO_TCP 6 +#define IPPROTO_UDP 17 +#if LWIP_IPV6 +#define IPPROTO_IPV6 41 +#define IPPROTO_ICMPV6 58 +#endif /* LWIP_IPV6 */ +#define IPPROTO_UDPLITE 136 +#define IPPROTO_RAW 255 + +/* Flags we can use with send and recv. */ +#define MSG_PEEK 0x01 /* Peeks at an incoming message */ +#define MSG_WAITALL 0x02 /* Unimplemented: Requests that the function block until the full amount of data requested can be returned */ +#define MSG_OOB 0x04 /* Unimplemented: Requests out-of-band data. The significance and semantics of out-of-band data are protocol-specific */ +#define MSG_DONTWAIT 0x08 /* Nonblocking i/o for this operation only */ +#define MSG_MORE 0x10 /* Sender will send more */ +#define MSG_NOSIGNAL 0x20 /* Uninmplemented: Requests not to send the SIGPIPE signal if an attempt to send is made on a stream-oriented socket that is no longer connected. */ + + +/* + * Options for level IPPROTO_IP + */ +#define IP_TOS 1 +#define IP_TTL 2 +#define IP_PKTINFO 8 + +#if LWIP_TCP +/* + * Options for level IPPROTO_TCP + */ +#define TCP_NODELAY 0x01 /* don't delay send to coalesce packets */ +#define TCP_KEEPALIVE 0x02 /* send KEEPALIVE probes when idle for pcb->keep_idle milliseconds */ +#define TCP_KEEPIDLE 0x03 /* set pcb->keep_idle - Same as TCP_KEEPALIVE, but use seconds for get/setsockopt */ +#define TCP_KEEPINTVL 0x04 /* set pcb->keep_intvl - Use seconds for get/setsockopt */ +#define TCP_KEEPCNT 0x05 /* set pcb->keep_cnt - Use number of probes sent for get/setsockopt */ +#endif /* LWIP_TCP */ + +#if LWIP_IPV6 +/* + * Options for level IPPROTO_IPV6 + */ +#define IPV6_CHECKSUM 7 /* RFC3542: calculate and insert the ICMPv6 checksum for raw sockets. */ +#define IPV6_V6ONLY 27 /* RFC3493: boolean control to restrict AF_INET6 sockets to IPv6 communications only. */ +#endif /* LWIP_IPV6 */ + +#if LWIP_UDP && LWIP_UDPLITE +/* + * Options for level IPPROTO_UDPLITE + */ +#define UDPLITE_SEND_CSCOV 0x01 /* sender checksum coverage */ +#define UDPLITE_RECV_CSCOV 0x02 /* minimal receiver checksum coverage */ +#endif /* LWIP_UDP && LWIP_UDPLITE*/ + + +#if LWIP_MULTICAST_TX_OPTIONS +/* + * Options and types for UDP multicast traffic handling + */ +#define IP_MULTICAST_TTL 5 +#define IP_MULTICAST_IF 6 +#define IP_MULTICAST_LOOP 7 +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + +#if LWIP_IGMP +/* + * Options and types related to multicast membership + */ +#define IP_ADD_MEMBERSHIP 3 +#define IP_DROP_MEMBERSHIP 4 + +typedef struct ip_mreq { + struct in_addr imr_multiaddr; /* IP multicast address of group */ + struct in_addr imr_interface; /* local IP address of interface */ +} ip_mreq; +#endif /* LWIP_IGMP */ + +#if LWIP_IPV4 +struct in_pktinfo { + unsigned int ipi_ifindex; /* Interface index */ + struct in_addr ipi_addr; /* Destination (from header) address */ +}; +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6_MLD +/* + * Options and types related to IPv6 multicast membership + */ +#define IPV6_JOIN_GROUP 12 +#define IPV6_ADD_MEMBERSHIP IPV6_JOIN_GROUP +#define IPV6_LEAVE_GROUP 13 +#define IPV6_DROP_MEMBERSHIP IPV6_LEAVE_GROUP + +typedef struct ipv6_mreq { + struct in6_addr ipv6mr_multiaddr; /* IPv6 multicast addr */ + unsigned int ipv6mr_interface; /* interface index, or 0 */ +} ipv6_mreq; +#endif /* LWIP_IPV6_MLD */ + +/* + * The Type of Service provides an indication of the abstract + * parameters of the quality of service desired. These parameters are + * to be used to guide the selection of the actual service parameters + * when transmitting a datagram through a particular network. Several + * networks offer service precedence, which somehow treats high + * precedence traffic as more important than other traffic (generally + * by accepting only traffic above a certain precedence at time of high + * load). The major choice is a three way tradeoff between low-delay, + * high-reliability, and high-throughput. + * The use of the Delay, Throughput, and Reliability indications may + * increase the cost (in some sense) of the service. In many networks + * better performance for one of these parameters is coupled with worse + * performance on another. Except for very unusual cases at most two + * of these three indications should be set. + */ +#define IPTOS_TOS_MASK 0x1E +#define IPTOS_TOS(tos) ((tos) & IPTOS_TOS_MASK) +#define IPTOS_LOWDELAY 0x10 +#define IPTOS_THROUGHPUT 0x08 +#define IPTOS_RELIABILITY 0x04 +#define IPTOS_LOWCOST 0x02 +#define IPTOS_MINCOST IPTOS_LOWCOST + +/* + * The Network Control precedence designation is intended to be used + * within a network only. The actual use and control of that + * designation is up to each network. The Internetwork Control + * designation is intended for use by gateway control originators only. + * If the actual use of these precedence designations is of concern to + * a particular network, it is the responsibility of that network to + * control the access to, and use of, those precedence designations. + */ +#define IPTOS_PREC_MASK 0xe0 +#define IPTOS_PREC(tos) ((tos) & IPTOS_PREC_MASK) +#define IPTOS_PREC_NETCONTROL 0xe0 +#define IPTOS_PREC_INTERNETCONTROL 0xc0 +#define IPTOS_PREC_CRITIC_ECP 0xa0 +#define IPTOS_PREC_FLASHOVERRIDE 0x80 +#define IPTOS_PREC_FLASH 0x60 +#define IPTOS_PREC_IMMEDIATE 0x40 +#define IPTOS_PREC_PRIORITY 0x20 +#define IPTOS_PREC_ROUTINE 0x00 + + +/* + * Commands for ioctlsocket(), taken from the BSD file fcntl.h. + * lwip_ioctl only supports FIONREAD and FIONBIO, for now + * + * Ioctl's have the command encoded in the lower word, + * and the size of any in or out parameters in the upper + * word. The high 2 bits of the upper word are used + * to encode the in/out status of the parameter; for now + * we restrict parameters to at most 128 bytes. + */ +#if !defined(FIONREAD) || !defined(FIONBIO) +#define IOCPARM_MASK 0x7fU /* parameters must be < 128 bytes */ +#define IOC_VOID 0x20000000UL /* no parameters */ +#define IOC_OUT 0x40000000UL /* copy out parameters */ +#define IOC_IN 0x80000000UL /* copy in parameters */ +#define IOC_INOUT (IOC_IN|IOC_OUT) + /* 0x20000000 distinguishes new & + old ioctl's */ +#define _IO(x,y) ((long)(IOC_VOID|((x)<<8)|(y))) + +#define _IOR(x,y,t) ((long)(IOC_OUT|((sizeof(t)&IOCPARM_MASK)<<16)|((x)<<8)|(y))) + +#define _IOW(x,y,t) ((long)(IOC_IN|((sizeof(t)&IOCPARM_MASK)<<16)|((x)<<8)|(y))) +#endif /* !defined(FIONREAD) || !defined(FIONBIO) */ + +#ifndef FIONREAD +#define FIONREAD _IOR('f', 127, unsigned long) /* get # bytes to read */ +#endif +#ifndef FIONBIO +#define FIONBIO _IOW('f', 126, unsigned long) /* set/clear non-blocking i/o */ +#endif + +/* Socket I/O Controls: unimplemented */ +#ifndef SIOCSHIWAT +#define SIOCSHIWAT _IOW('s', 0, unsigned long) /* set high watermark */ +#define SIOCGHIWAT _IOR('s', 1, unsigned long) /* get high watermark */ +#define SIOCSLOWAT _IOW('s', 2, unsigned long) /* set low watermark */ +#define SIOCGLOWAT _IOR('s', 3, unsigned long) /* get low watermark */ +#define SIOCATMARK _IOR('s', 7, unsigned long) /* at oob mark? */ +#endif + +/* commands for fnctl */ +#ifndef F_GETFL +#define F_GETFL 3 +#endif +#ifndef F_SETFL +#define F_SETFL 4 +#endif + +/* File status flags and file access modes for fnctl, + these are bits in an int. */ +#ifndef O_NONBLOCK +#define O_NONBLOCK 1 /* nonblocking I/O */ +#endif +#ifndef O_NDELAY +#define O_NDELAY O_NONBLOCK /* same as O_NONBLOCK, for compatibility */ +#endif +#ifndef O_RDONLY +#define O_RDONLY 2 +#endif +#ifndef O_WRONLY +#define O_WRONLY 4 +#endif +#ifndef O_RDWR +#define O_RDWR (O_RDONLY|O_WRONLY) +#endif + +#ifndef SHUT_RD + #define SHUT_RD 0 + #define SHUT_WR 1 + #define SHUT_RDWR 2 +#endif + +/* FD_SET used for lwip_select */ +#ifndef FD_SET +#undef FD_SETSIZE +/* Make FD_SETSIZE match NUM_SOCKETS in socket.c */ +#define FD_SETSIZE MEMP_NUM_NETCONN +#define LWIP_SELECT_MAXNFDS (FD_SETSIZE + LWIP_SOCKET_OFFSET) +#define FDSETSAFESET(n, code) do { \ + if (((n) - LWIP_SOCKET_OFFSET < MEMP_NUM_NETCONN) && (((int)(n) - LWIP_SOCKET_OFFSET) >= 0)) { \ + code; }} while(0) +#define FDSETSAFEGET(n, code) (((n) - LWIP_SOCKET_OFFSET < MEMP_NUM_NETCONN) && (((int)(n) - LWIP_SOCKET_OFFSET) >= 0) ?\ + (code) : 0) +#define FD_SET(n, p) FDSETSAFESET(n, (p)->fd_bits[((n)-LWIP_SOCKET_OFFSET)/8] = (u8_t)((p)->fd_bits[((n)-LWIP_SOCKET_OFFSET)/8] | (1 << (((n)-LWIP_SOCKET_OFFSET) & 7)))) +#define FD_CLR(n, p) FDSETSAFESET(n, (p)->fd_bits[((n)-LWIP_SOCKET_OFFSET)/8] = (u8_t)((p)->fd_bits[((n)-LWIP_SOCKET_OFFSET)/8] & ~(1 << (((n)-LWIP_SOCKET_OFFSET) & 7)))) +#define FD_ISSET(n,p) FDSETSAFEGET(n, (p)->fd_bits[((n)-LWIP_SOCKET_OFFSET)/8] & (1 << (((n)-LWIP_SOCKET_OFFSET) & 7))) +#define FD_ZERO(p) memset((void*)(p), 0, sizeof(*(p))) + +typedef struct fd_set +{ + unsigned char fd_bits [(FD_SETSIZE+7)/8]; +} fd_set; + +#elif FD_SETSIZE < (LWIP_SOCKET_OFFSET + MEMP_NUM_NETCONN) +#error "external FD_SETSIZE too small for number of sockets" +#else +#define LWIP_SELECT_MAXNFDS FD_SETSIZE +#endif /* FD_SET */ + +/* poll-related defines and types */ +/* @todo: find a better way to guard the definition of these defines and types if already defined */ +#if !defined(POLLIN) && !defined(POLLOUT) +#define POLLIN 0x1 +#define POLLOUT 0x2 +#define POLLERR 0x4 +#define POLLNVAL 0x8 +/* Below values are unimplemented */ +#define POLLRDNORM 0x10 +#define POLLRDBAND 0x20 +#define POLLPRI 0x40 +#define POLLWRNORM 0x80 +#define POLLWRBAND 0x100 +#define POLLHUP 0x200 +typedef unsigned int nfds_t; +struct pollfd +{ + int fd; + short events; + short revents; +}; +#endif + +/** LWIP_TIMEVAL_PRIVATE: if you want to use the struct timeval provided + * by your system, set this to 0 and include in cc.h */ +#ifndef LWIP_TIMEVAL_PRIVATE +#define LWIP_TIMEVAL_PRIVATE 1 +#endif + +#if LWIP_TIMEVAL_PRIVATE +struct timeval { + long tv_sec; /* seconds */ + long tv_usec; /* and microseconds */ +}; +#endif /* LWIP_TIMEVAL_PRIVATE */ + +#define lwip_socket_init() /* Compatibility define, no init needed. */ +void lwip_socket_thread_init(void); /* LWIP_NETCONN_SEM_PER_THREAD==1: initialize thread-local semaphore */ +void lwip_socket_thread_cleanup(void); /* LWIP_NETCONN_SEM_PER_THREAD==1: destroy thread-local semaphore */ + +#if LWIP_COMPAT_SOCKETS == 2 +/* This helps code parsers/code completion by not having the COMPAT functions as defines */ +#define lwip_accept accept +#define lwip_bind bind +#define lwip_shutdown shutdown +#define lwip_getpeername getpeername +#define lwip_getsockname getsockname +#define lwip_setsockopt setsockopt +#define lwip_getsockopt getsockopt +#define lwip_close closesocket +#define lwip_connect connect +#define lwip_listen listen +#define lwip_recv recv +#define lwip_recvmsg recvmsg +#define lwip_recvfrom recvfrom +#define lwip_send send +#define lwip_sendmsg sendmsg +#define lwip_sendto sendto +#define lwip_socket socket +#if LWIP_SOCKET_SELECT +#define lwip_select select +#endif +#if LWIP_SOCKET_POLL +#define lwip_poll poll +#endif +#define lwip_ioctl ioctlsocket +#define lwip_inet_ntop inet_ntop +#define lwip_inet_pton inet_pton + +#if LWIP_POSIX_SOCKETS_IO_NAMES +#define lwip_read read +#define lwip_readv readv +#define lwip_write write +#define lwip_writev writev +#undef lwip_close +#define lwip_close close +#define closesocket(s) close(s) +int fcntl(int s, int cmd, ...); +#undef lwip_ioctl +#define lwip_ioctl ioctl +#define ioctlsocket ioctl +#endif /* LWIP_POSIX_SOCKETS_IO_NAMES */ +#endif /* LWIP_COMPAT_SOCKETS == 2 */ + +int lwip_accept(int s, struct sockaddr *addr, socklen_t *addrlen); +int lwip_bind(int s, const struct sockaddr *name, socklen_t namelen); +int lwip_shutdown(int s, int how); +int lwip_getpeername (int s, struct sockaddr *name, socklen_t *namelen); +int lwip_getsockname (int s, struct sockaddr *name, socklen_t *namelen); +int lwip_getsockopt (int s, int level, int optname, void *optval, socklen_t *optlen); +int lwip_setsockopt (int s, int level, int optname, const void *optval, socklen_t optlen); + int lwip_close(int s); +int lwip_connect(int s, const struct sockaddr *name, socklen_t namelen); +int lwip_listen(int s, int backlog); +ssize_t lwip_recv(int s, void *mem, size_t len, int flags); +ssize_t lwip_read(int s, void *mem, size_t len); +ssize_t lwip_readv(int s, const struct iovec *iov, int iovcnt); +ssize_t lwip_recvfrom(int s, void *mem, size_t len, int flags, + struct sockaddr *from, socklen_t *fromlen); +ssize_t lwip_recvmsg(int s, struct msghdr *message, int flags); +ssize_t lwip_send(int s, const void *dataptr, size_t size, int flags); +ssize_t lwip_sendmsg(int s, const struct msghdr *message, int flags); +ssize_t lwip_sendto(int s, const void *dataptr, size_t size, int flags, + const struct sockaddr *to, socklen_t tolen); +int lwip_socket(int domain, int type, int protocol); +ssize_t lwip_write(int s, const void *dataptr, size_t size); +ssize_t lwip_writev(int s, const struct iovec *iov, int iovcnt); +#if LWIP_SOCKET_SELECT +int lwip_select(int maxfdp1, fd_set *readset, fd_set *writeset, fd_set *exceptset, + struct timeval *timeout); +#endif +#if LWIP_SOCKET_POLL +int lwip_poll(struct pollfd *fds, nfds_t nfds, int timeout); +#endif +int lwip_ioctl(int s, long cmd, void *argp); +int lwip_fcntl(int s, int cmd, int val); +const char *lwip_inet_ntop(int af, const void *src, char *dst, socklen_t size); +int lwip_inet_pton(int af, const char *src, void *dst); + +#if LWIP_COMPAT_SOCKETS +#if LWIP_COMPAT_SOCKETS != 2 +/** @ingroup socket */ +#define accept(s,addr,addrlen) lwip_accept(s,addr,addrlen) +/** @ingroup socket */ +#define bind(s,name,namelen) lwip_bind(s,name,namelen) +/** @ingroup socket */ +#define shutdown(s,how) lwip_shutdown(s,how) +/** @ingroup socket */ +#define getpeername(s,name,namelen) lwip_getpeername(s,name,namelen) +/** @ingroup socket */ +#define getsockname(s,name,namelen) lwip_getsockname(s,name,namelen) +/** @ingroup socket */ +#define setsockopt(s,level,optname,opval,optlen) lwip_setsockopt(s,level,optname,opval,optlen) +/** @ingroup socket */ +#define getsockopt(s,level,optname,opval,optlen) lwip_getsockopt(s,level,optname,opval,optlen) +/** @ingroup socket */ +#define closesocket(s) lwip_close(s) +/** @ingroup socket */ +#define connect(s,name,namelen) lwip_connect(s,name,namelen) +/** @ingroup socket */ +#define listen(s,backlog) lwip_listen(s,backlog) +/** @ingroup socket */ +#define recv(s,mem,len,flags) lwip_recv(s,mem,len,flags) +/** @ingroup socket */ +#define recvmsg(s,message,flags) lwip_recvmsg(s,message,flags) +/** @ingroup socket */ +#define recvfrom(s,mem,len,flags,from,fromlen) lwip_recvfrom(s,mem,len,flags,from,fromlen) +/** @ingroup socket */ +#define send(s,dataptr,size,flags) lwip_send(s,dataptr,size,flags) +/** @ingroup socket */ +#define sendmsg(s,message,flags) lwip_sendmsg(s,message,flags) +/** @ingroup socket */ +#define sendto(s,dataptr,size,flags,to,tolen) lwip_sendto(s,dataptr,size,flags,to,tolen) +/** @ingroup socket */ +#define socket(domain,type,protocol) lwip_socket(domain,type,protocol) +#if LWIP_SOCKET_SELECT +/** @ingroup socket */ +#define select(maxfdp1,readset,writeset,exceptset,timeout) lwip_select(maxfdp1,readset,writeset,exceptset,timeout) +#endif +#if LWIP_SOCKET_POLL +/** @ingroup socket */ +#define poll(fds,nfds,timeout) lwip_poll(fds,nfds,timeout) +#endif +/** @ingroup socket */ +#define ioctlsocket(s,cmd,argp) lwip_ioctl(s,cmd,argp) +/** @ingroup socket */ +#define inet_ntop(af,src,dst,size) lwip_inet_ntop(af,src,dst,size) +/** @ingroup socket */ +#define inet_pton(af,src,dst) lwip_inet_pton(af,src,dst) + +#if LWIP_POSIX_SOCKETS_IO_NAMES +/** @ingroup socket */ +#define read(s,mem,len) lwip_read(s,mem,len) +/** @ingroup socket */ +#define readv(s,iov,iovcnt) lwip_readv(s,iov,iovcnt) +/** @ingroup socket */ +#define write(s,dataptr,len) lwip_write(s,dataptr,len) +/** @ingroup socket */ +#define writev(s,iov,iovcnt) lwip_writev(s,iov,iovcnt) +/** @ingroup socket */ +#define close(s) lwip_close(s) +/** @ingroup socket */ +#define fcntl(s,cmd,val) lwip_fcntl(s,cmd,val) +/** @ingroup socket */ +#define ioctl(s,cmd,argp) lwip_ioctl(s,cmd,argp) +#endif /* LWIP_POSIX_SOCKETS_IO_NAMES */ +#endif /* LWIP_COMPAT_SOCKETS != 2 */ + +#endif /* LWIP_COMPAT_SOCKETS */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_SOCKET */ + +#endif /* LWIP_HDR_SOCKETS_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/stats.h b/Middlewares/Third_Party/LwIP/src/include/lwip/stats.h new file mode 100644 index 0000000..b570dba --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/stats.h @@ -0,0 +1,491 @@ +/** + * @file + * Statistics API (to be used from TCPIP thread) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_STATS_H +#define LWIP_HDR_STATS_H + +#include "lwip/opt.h" + +#include "lwip/mem.h" +#include "lwip/memp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_STATS + +#ifndef LWIP_STATS_LARGE +#define LWIP_STATS_LARGE 0 +#endif + +#if LWIP_STATS_LARGE +#define STAT_COUNTER u32_t +#define STAT_COUNTER_F U32_F +#else +#define STAT_COUNTER u16_t +#define STAT_COUNTER_F U16_F +#endif + +/** Protocol related stats */ +struct stats_proto { + STAT_COUNTER xmit; /* Transmitted packets. */ + STAT_COUNTER recv; /* Received packets. */ + STAT_COUNTER fw; /* Forwarded packets. */ + STAT_COUNTER drop; /* Dropped packets. */ + STAT_COUNTER chkerr; /* Checksum error. */ + STAT_COUNTER lenerr; /* Invalid length error. */ + STAT_COUNTER memerr; /* Out of memory error. */ + STAT_COUNTER rterr; /* Routing error. */ + STAT_COUNTER proterr; /* Protocol error. */ + STAT_COUNTER opterr; /* Error in options. */ + STAT_COUNTER err; /* Misc error. */ + STAT_COUNTER cachehit; +}; + +/** IGMP stats */ +struct stats_igmp { + STAT_COUNTER xmit; /* Transmitted packets. */ + STAT_COUNTER recv; /* Received packets. */ + STAT_COUNTER drop; /* Dropped packets. */ + STAT_COUNTER chkerr; /* Checksum error. */ + STAT_COUNTER lenerr; /* Invalid length error. */ + STAT_COUNTER memerr; /* Out of memory error. */ + STAT_COUNTER proterr; /* Protocol error. */ + STAT_COUNTER rx_v1; /* Received v1 frames. */ + STAT_COUNTER rx_group; /* Received group-specific queries. */ + STAT_COUNTER rx_general; /* Received general queries. */ + STAT_COUNTER rx_report; /* Received reports. */ + STAT_COUNTER tx_join; /* Sent joins. */ + STAT_COUNTER tx_leave; /* Sent leaves. */ + STAT_COUNTER tx_report; /* Sent reports. */ +}; + +/** Memory stats */ +struct stats_mem { +#if defined(LWIP_DEBUG) || LWIP_STATS_DISPLAY + const char *name; +#endif /* defined(LWIP_DEBUG) || LWIP_STATS_DISPLAY */ + STAT_COUNTER err; + mem_size_t avail; + mem_size_t used; + mem_size_t max; + STAT_COUNTER illegal; +}; + +/** System element stats */ +struct stats_syselem { + STAT_COUNTER used; + STAT_COUNTER max; + STAT_COUNTER err; +}; + +/** System stats */ +struct stats_sys { + struct stats_syselem sem; + struct stats_syselem mutex; + struct stats_syselem mbox; +}; + +/** SNMP MIB2 stats */ +struct stats_mib2 { + /* IP */ + u32_t ipinhdrerrors; + u32_t ipinaddrerrors; + u32_t ipinunknownprotos; + u32_t ipindiscards; + u32_t ipindelivers; + u32_t ipoutrequests; + u32_t ipoutdiscards; + u32_t ipoutnoroutes; + u32_t ipreasmoks; + u32_t ipreasmfails; + u32_t ipfragoks; + u32_t ipfragfails; + u32_t ipfragcreates; + u32_t ipreasmreqds; + u32_t ipforwdatagrams; + u32_t ipinreceives; + + /* TCP */ + u32_t tcpactiveopens; + u32_t tcppassiveopens; + u32_t tcpattemptfails; + u32_t tcpestabresets; + u32_t tcpoutsegs; + u32_t tcpretranssegs; + u32_t tcpinsegs; + u32_t tcpinerrs; + u32_t tcpoutrsts; + + /* UDP */ + u32_t udpindatagrams; + u32_t udpnoports; + u32_t udpinerrors; + u32_t udpoutdatagrams; + + /* ICMP */ + u32_t icmpinmsgs; + u32_t icmpinerrors; + u32_t icmpindestunreachs; + u32_t icmpintimeexcds; + u32_t icmpinparmprobs; + u32_t icmpinsrcquenchs; + u32_t icmpinredirects; + u32_t icmpinechos; + u32_t icmpinechoreps; + u32_t icmpintimestamps; + u32_t icmpintimestampreps; + u32_t icmpinaddrmasks; + u32_t icmpinaddrmaskreps; + u32_t icmpoutmsgs; + u32_t icmpouterrors; + u32_t icmpoutdestunreachs; + u32_t icmpouttimeexcds; + u32_t icmpoutechos; /* can be incremented by user application ('ping') */ + u32_t icmpoutechoreps; +}; + +/** + * @ingroup netif_mib2 + * SNMP MIB2 interface stats + */ +struct stats_mib2_netif_ctrs { + /** The total number of octets received on the interface, including framing characters */ + u32_t ifinoctets; + /** The number of packets, delivered by this sub-layer to a higher (sub-)layer, which were + * not addressed to a multicast or broadcast address at this sub-layer */ + u32_t ifinucastpkts; + /** The number of packets, delivered by this sub-layer to a higher (sub-)layer, which were + * addressed to a multicast or broadcast address at this sub-layer */ + u32_t ifinnucastpkts; + /** The number of inbound packets which were chosen to be discarded even though no errors had + * been detected to prevent their being deliverable to a higher-layer protocol. One possible + * reason for discarding such a packet could be to free up buffer space */ + u32_t ifindiscards; + /** For packet-oriented interfaces, the number of inbound packets that contained errors + * preventing them from being deliverable to a higher-layer protocol. For character- + * oriented or fixed-length interfaces, the number of inbound transmission units that + * contained errors preventing them from being deliverable to a higher-layer protocol. */ + u32_t ifinerrors; + /** For packet-oriented interfaces, the number of packets received via the interface which + * were discarded because of an unknown or unsupported protocol. For character-oriented + * or fixed-length interfaces that support protocol multiplexing the number of transmission + * units received via the interface which were discarded because of an unknown or unsupported + * protocol. For any interface that does not support protocol multiplexing, this counter will + * always be 0 */ + u32_t ifinunknownprotos; + /** The total number of octets transmitted out of the interface, including framing characters. */ + u32_t ifoutoctets; + /** The total number of packets that higher-level protocols requested be transmitted, and + * which were not addressed to a multicast or broadcast address at this sub-layer, including + * those that were discarded or not sent. */ + u32_t ifoutucastpkts; + /** The total number of packets that higher-level protocols requested be transmitted, and which + * were addressed to a multicast or broadcast address at this sub-layer, including + * those that were discarded or not sent. */ + u32_t ifoutnucastpkts; + /** The number of outbound packets which were chosen to be discarded even though no errors had + * been detected to prevent their being transmitted. One possible reason for discarding + * such a packet could be to free up buffer space. */ + u32_t ifoutdiscards; + /** For packet-oriented interfaces, the number of outbound packets that could not be transmitted + * because of errors. For character-oriented or fixed-length interfaces, the number of outbound + * transmission units that could not be transmitted because of errors. */ + u32_t ifouterrors; +}; + +/** lwIP stats container */ +struct stats_ { +#if LINK_STATS + /** Link level */ + struct stats_proto link; +#endif +#if ETHARP_STATS + /** ARP */ + struct stats_proto etharp; +#endif +#if IPFRAG_STATS + /** Fragmentation */ + struct stats_proto ip_frag; +#endif +#if IP_STATS + /** IP */ + struct stats_proto ip; +#endif +#if ICMP_STATS + /** ICMP */ + struct stats_proto icmp; +#endif +#if IGMP_STATS + /** IGMP */ + struct stats_igmp igmp; +#endif +#if UDP_STATS + /** UDP */ + struct stats_proto udp; +#endif +#if TCP_STATS + /** TCP */ + struct stats_proto tcp; +#endif +#if MEM_STATS + /** Heap */ + struct stats_mem mem; +#endif +#if MEMP_STATS + /** Internal memory pools */ + struct stats_mem *memp[MEMP_MAX]; +#endif +#if SYS_STATS + /** System */ + struct stats_sys sys; +#endif +#if IP6_STATS + /** IPv6 */ + struct stats_proto ip6; +#endif +#if ICMP6_STATS + /** ICMP6 */ + struct stats_proto icmp6; +#endif +#if IP6_FRAG_STATS + /** IPv6 fragmentation */ + struct stats_proto ip6_frag; +#endif +#if MLD6_STATS + /** Multicast listener discovery */ + struct stats_igmp mld6; +#endif +#if ND6_STATS + /** Neighbor discovery */ + struct stats_proto nd6; +#endif +#if MIB2_STATS + /** SNMP MIB2 */ + struct stats_mib2 mib2; +#endif +}; + +/** Global variable containing lwIP internal statistics. Add this to your debugger's watchlist. */ +extern struct stats_ lwip_stats; + +/** Init statistics */ +void stats_init(void); + +#define STATS_INC(x) ++lwip_stats.x +#define STATS_DEC(x) --lwip_stats.x +#define STATS_INC_USED(x, y, type) do { lwip_stats.x.used = (type)(lwip_stats.x.used + y); \ + if (lwip_stats.x.max < lwip_stats.x.used) { \ + lwip_stats.x.max = lwip_stats.x.used; \ + } \ + } while(0) +#define STATS_GET(x) lwip_stats.x +#else /* LWIP_STATS */ +#define stats_init() +#define STATS_INC(x) +#define STATS_DEC(x) +#define STATS_INC_USED(x, y, type) +#endif /* LWIP_STATS */ + +#if TCP_STATS +#define TCP_STATS_INC(x) STATS_INC(x) +#define TCP_STATS_DISPLAY() stats_display_proto(&lwip_stats.tcp, "TCP") +#else +#define TCP_STATS_INC(x) +#define TCP_STATS_DISPLAY() +#endif + +#if UDP_STATS +#define UDP_STATS_INC(x) STATS_INC(x) +#define UDP_STATS_DISPLAY() stats_display_proto(&lwip_stats.udp, "UDP") +#else +#define UDP_STATS_INC(x) +#define UDP_STATS_DISPLAY() +#endif + +#if ICMP_STATS +#define ICMP_STATS_INC(x) STATS_INC(x) +#define ICMP_STATS_DISPLAY() stats_display_proto(&lwip_stats.icmp, "ICMP") +#else +#define ICMP_STATS_INC(x) +#define ICMP_STATS_DISPLAY() +#endif + +#if IGMP_STATS +#define IGMP_STATS_INC(x) STATS_INC(x) +#define IGMP_STATS_DISPLAY() stats_display_igmp(&lwip_stats.igmp, "IGMP") +#else +#define IGMP_STATS_INC(x) +#define IGMP_STATS_DISPLAY() +#endif + +#if IP_STATS +#define IP_STATS_INC(x) STATS_INC(x) +#define IP_STATS_DISPLAY() stats_display_proto(&lwip_stats.ip, "IP") +#else +#define IP_STATS_INC(x) +#define IP_STATS_DISPLAY() +#endif + +#if IPFRAG_STATS +#define IPFRAG_STATS_INC(x) STATS_INC(x) +#define IPFRAG_STATS_DISPLAY() stats_display_proto(&lwip_stats.ip_frag, "IP_FRAG") +#else +#define IPFRAG_STATS_INC(x) +#define IPFRAG_STATS_DISPLAY() +#endif + +#if ETHARP_STATS +#define ETHARP_STATS_INC(x) STATS_INC(x) +#define ETHARP_STATS_DISPLAY() stats_display_proto(&lwip_stats.etharp, "ETHARP") +#else +#define ETHARP_STATS_INC(x) +#define ETHARP_STATS_DISPLAY() +#endif + +#if LINK_STATS +#define LINK_STATS_INC(x) STATS_INC(x) +#define LINK_STATS_DISPLAY() stats_display_proto(&lwip_stats.link, "LINK") +#else +#define LINK_STATS_INC(x) +#define LINK_STATS_DISPLAY() +#endif + +#if MEM_STATS +#define MEM_STATS_AVAIL(x, y) lwip_stats.mem.x = y +#define MEM_STATS_INC(x) STATS_INC(mem.x) +#define MEM_STATS_INC_USED(x, y) STATS_INC_USED(mem, y, mem_size_t) +#define MEM_STATS_DEC_USED(x, y) lwip_stats.mem.x = (mem_size_t)((lwip_stats.mem.x) - (y)) +#define MEM_STATS_DISPLAY() stats_display_mem(&lwip_stats.mem, "HEAP") +#else +#define MEM_STATS_AVAIL(x, y) +#define MEM_STATS_INC(x) +#define MEM_STATS_INC_USED(x, y) +#define MEM_STATS_DEC_USED(x, y) +#define MEM_STATS_DISPLAY() +#endif + + #if MEMP_STATS +#define MEMP_STATS_DEC(x, i) STATS_DEC(memp[i]->x) +#define MEMP_STATS_DISPLAY(i) stats_display_memp(lwip_stats.memp[i], i) +#define MEMP_STATS_GET(x, i) STATS_GET(memp[i]->x) + #else +#define MEMP_STATS_DEC(x, i) +#define MEMP_STATS_DISPLAY(i) +#define MEMP_STATS_GET(x, i) 0 +#endif + +#if SYS_STATS +#define SYS_STATS_INC(x) STATS_INC(sys.x) +#define SYS_STATS_DEC(x) STATS_DEC(sys.x) +#define SYS_STATS_INC_USED(x) STATS_INC_USED(sys.x, 1, STAT_COUNTER) +#define SYS_STATS_DISPLAY() stats_display_sys(&lwip_stats.sys) +#else +#define SYS_STATS_INC(x) +#define SYS_STATS_DEC(x) +#define SYS_STATS_INC_USED(x) +#define SYS_STATS_DISPLAY() +#endif + +#if IP6_STATS +#define IP6_STATS_INC(x) STATS_INC(x) +#define IP6_STATS_DISPLAY() stats_display_proto(&lwip_stats.ip6, "IPv6") +#else +#define IP6_STATS_INC(x) +#define IP6_STATS_DISPLAY() +#endif + +#if ICMP6_STATS +#define ICMP6_STATS_INC(x) STATS_INC(x) +#define ICMP6_STATS_DISPLAY() stats_display_proto(&lwip_stats.icmp6, "ICMPv6") +#else +#define ICMP6_STATS_INC(x) +#define ICMP6_STATS_DISPLAY() +#endif + +#if IP6_FRAG_STATS +#define IP6_FRAG_STATS_INC(x) STATS_INC(x) +#define IP6_FRAG_STATS_DISPLAY() stats_display_proto(&lwip_stats.ip6_frag, "IPv6 FRAG") +#else +#define IP6_FRAG_STATS_INC(x) +#define IP6_FRAG_STATS_DISPLAY() +#endif + +#if MLD6_STATS +#define MLD6_STATS_INC(x) STATS_INC(x) +#define MLD6_STATS_DISPLAY() stats_display_igmp(&lwip_stats.mld6, "MLDv1") +#else +#define MLD6_STATS_INC(x) +#define MLD6_STATS_DISPLAY() +#endif + +#if ND6_STATS +#define ND6_STATS_INC(x) STATS_INC(x) +#define ND6_STATS_DISPLAY() stats_display_proto(&lwip_stats.nd6, "ND") +#else +#define ND6_STATS_INC(x) +#define ND6_STATS_DISPLAY() +#endif + +#if MIB2_STATS +#define MIB2_STATS_INC(x) STATS_INC(x) +#else +#define MIB2_STATS_INC(x) +#endif + +/* Display of statistics */ +#if LWIP_STATS_DISPLAY +void stats_display(void); +void stats_display_proto(struct stats_proto *proto, const char *name); +void stats_display_igmp(struct stats_igmp *igmp, const char *name); +void stats_display_mem(struct stats_mem *mem, const char *name); +void stats_display_memp(struct stats_mem *mem, int index); +void stats_display_sys(struct stats_sys *sys); +#else /* LWIP_STATS_DISPLAY */ +#define stats_display() +#define stats_display_proto(proto, name) +#define stats_display_igmp(igmp, name) +#define stats_display_mem(mem, name) +#define stats_display_memp(mem, index) +#define stats_display_sys(sys) +#endif /* LWIP_STATS_DISPLAY */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_STATS_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/sys.h b/Middlewares/Third_Party/LwIP/src/include/lwip/sys.h new file mode 100644 index 0000000..168e465 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/sys.h @@ -0,0 +1,560 @@ +/** + * @file + * OS abstraction layer + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + */ + +#ifndef LWIP_HDR_SYS_H +#define LWIP_HDR_SYS_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if NO_SYS + +/* For a totally minimal and standalone system, we provide null + definitions of the sys_ functions. */ +typedef u8_t sys_sem_t; +typedef u8_t sys_mutex_t; +typedef u8_t sys_mbox_t; + +#define sys_sem_new(s, c) ERR_OK +#define sys_sem_signal(s) +#define sys_sem_wait(s) +#define sys_arch_sem_wait(s,t) +#define sys_sem_free(s) +#define sys_sem_valid(s) 0 +#define sys_sem_valid_val(s) 0 +#define sys_sem_set_invalid(s) +#define sys_sem_set_invalid_val(s) +#define sys_mutex_new(mu) ERR_OK +#define sys_mutex_lock(mu) +#define sys_mutex_unlock(mu) +#define sys_mutex_free(mu) +#define sys_mutex_valid(mu) 0 +#define sys_mutex_set_invalid(mu) +#define sys_mbox_new(m, s) ERR_OK +#define sys_mbox_fetch(m,d) +#define sys_mbox_tryfetch(m,d) +#define sys_mbox_post(m,d) +#define sys_mbox_trypost(m,d) +#define sys_mbox_free(m) +#define sys_mbox_valid(m) +#define sys_mbox_valid_val(m) +#define sys_mbox_set_invalid(m) +#define sys_mbox_set_invalid_val(m) + +#define sys_thread_new(n,t,a,s,p) + +#define sys_msleep(t) + +#else /* NO_SYS */ + +/** Return code for timeouts from sys_arch_mbox_fetch and sys_arch_sem_wait */ +#define SYS_ARCH_TIMEOUT 0xffffffffUL + +/** sys_mbox_tryfetch() returns SYS_MBOX_EMPTY if appropriate. + * For now we use the same magic value, but we allow this to change in future. + */ +#define SYS_MBOX_EMPTY SYS_ARCH_TIMEOUT + +#include "lwip/err.h" +#include "arch/sys_arch.h" + +/** Function prototype for thread functions */ +typedef void (*lwip_thread_fn)(void *arg); + +/* Function prototypes for functions to be implemented by platform ports + (in sys_arch.c) */ + +/* Mutex functions: */ + +/** Define LWIP_COMPAT_MUTEX if the port has no mutexes and binary semaphores + should be used instead */ +#ifndef LWIP_COMPAT_MUTEX +#define LWIP_COMPAT_MUTEX 0 +#endif + +#if LWIP_COMPAT_MUTEX +/* for old ports that don't have mutexes: define them to binary semaphores */ +#define sys_mutex_t sys_sem_t +#define sys_mutex_new(mutex) sys_sem_new(mutex, 1) +#define sys_mutex_lock(mutex) sys_sem_wait(mutex) +#define sys_mutex_unlock(mutex) sys_sem_signal(mutex) +#define sys_mutex_free(mutex) sys_sem_free(mutex) +#define sys_mutex_valid(mutex) sys_sem_valid(mutex) +#define sys_mutex_set_invalid(mutex) sys_sem_set_invalid(mutex) + +#else /* LWIP_COMPAT_MUTEX */ + +/** + * @ingroup sys_mutex + * Create a new mutex. + * Note that mutexes are expected to not be taken recursively by the lwIP code, + * so both implementation types (recursive or non-recursive) should work. + * The mutex is allocated to the memory that 'mutex' + * points to (which can be both a pointer or the actual OS structure). + * If the mutex has been created, ERR_OK should be returned. Returning any + * other error will provide a hint what went wrong, but except for assertions, + * no real error handling is implemented. + * + * @param mutex pointer to the mutex to create + * @return ERR_OK if successful, another err_t otherwise + */ +err_t sys_mutex_new(sys_mutex_t *mutex); +/** + * @ingroup sys_mutex + * Blocks the thread until the mutex can be grabbed. + * @param mutex the mutex to lock + */ +void sys_mutex_lock(sys_mutex_t *mutex); +/** + * @ingroup sys_mutex + * Releases the mutex previously locked through 'sys_mutex_lock()'. + * @param mutex the mutex to unlock + */ +void sys_mutex_unlock(sys_mutex_t *mutex); +/** + * @ingroup sys_mutex + * Deallocates a mutex. + * @param mutex the mutex to delete + */ +void sys_mutex_free(sys_mutex_t *mutex); +#ifndef sys_mutex_valid +/** + * @ingroup sys_mutex + * Returns 1 if the mutes is valid, 0 if it is not valid. + * When using pointers, a simple way is to check the pointer for != NULL. + * When directly using OS structures, implementing this may be more complex. + * This may also be a define, in which case the function is not prototyped. + */ +int sys_mutex_valid(sys_mutex_t *mutex); +#endif +#ifndef sys_mutex_set_invalid +/** + * @ingroup sys_mutex + * Invalidate a mutex so that sys_mutex_valid() returns 0. + * ATTENTION: This does NOT mean that the mutex shall be deallocated: + * sys_mutex_free() is always called before calling this function! + * This may also be a define, in which case the function is not prototyped. + */ +void sys_mutex_set_invalid(sys_mutex_t *mutex); +#endif +#endif /* LWIP_COMPAT_MUTEX */ + +/* Semaphore functions: */ + +/** + * @ingroup sys_sem + * Create a new semaphore + * Creates a new semaphore. The semaphore is allocated to the memory that 'sem' + * points to (which can be both a pointer or the actual OS structure). + * The "count" argument specifies the initial state of the semaphore (which is + * either 0 or 1). + * If the semaphore has been created, ERR_OK should be returned. Returning any + * other error will provide a hint what went wrong, but except for assertions, + * no real error handling is implemented. + * + * @param sem pointer to the semaphore to create + * @param count initial count of the semaphore + * @return ERR_OK if successful, another err_t otherwise + */ +err_t sys_sem_new(sys_sem_t *sem, u8_t count); +/** + * @ingroup sys_sem + * Signals a semaphore + * @param sem the semaphore to signal + */ +void sys_sem_signal(sys_sem_t *sem); +/** + * @ingroup sys_sem + * Blocks the thread while waiting for the semaphore to be signaled. If the + * "timeout" argument is non-zero, the thread should only be blocked for the + * specified time (measured in milliseconds). If the "timeout" argument is zero, + * the thread should be blocked until the semaphore is signalled. + * + * The return value is SYS_ARCH_TIMEOUT if the semaphore wasn't signaled within + * the specified time or any other value if it was signaled (with or without + * waiting). + * Notice that lwIP implements a function with a similar name, + * sys_sem_wait(), that uses the sys_arch_sem_wait() function. + * + * @param sem the semaphore to wait for + * @param timeout timeout in milliseconds to wait (0 = wait forever) + * @return SYS_ARCH_TIMEOUT on timeout, any other value on success + */ +u32_t sys_arch_sem_wait(sys_sem_t *sem, u32_t timeout); +/** + * @ingroup sys_sem + * Deallocates a semaphore. + * @param sem semaphore to delete + */ +void sys_sem_free(sys_sem_t *sem); +/** Wait for a semaphore - forever/no timeout */ +#define sys_sem_wait(sem) sys_arch_sem_wait(sem, 0) +#ifndef sys_sem_valid +/** + * @ingroup sys_sem + * Returns 1 if the semaphore is valid, 0 if it is not valid. + * When using pointers, a simple way is to check the pointer for != NULL. + * When directly using OS structures, implementing this may be more complex. + * This may also be a define, in which case the function is not prototyped. + */ +int sys_sem_valid(sys_sem_t *sem); +#endif +#ifndef sys_sem_set_invalid +/** + * @ingroup sys_sem + * Invalidate a semaphore so that sys_sem_valid() returns 0. + * ATTENTION: This does NOT mean that the semaphore shall be deallocated: + * sys_sem_free() is always called before calling this function! + * This may also be a define, in which case the function is not prototyped. + */ +void sys_sem_set_invalid(sys_sem_t *sem); +#endif +#ifndef sys_sem_valid_val +/** + * Same as sys_sem_valid() but taking a value, not a pointer + */ +#define sys_sem_valid_val(sem) sys_sem_valid(&(sem)) +#endif +#ifndef sys_sem_set_invalid_val +/** + * Same as sys_sem_set_invalid() but taking a value, not a pointer + */ +#define sys_sem_set_invalid_val(sem) sys_sem_set_invalid(&(sem)) +#endif + +#ifndef sys_msleep +/** + * @ingroup sys_misc + * Sleep for specified number of ms + */ +void sys_msleep(u32_t ms); /* only has a (close to) 1 ms resolution. */ +#endif + +/* Mailbox functions. */ + +/** + * @ingroup sys_mbox + * Creates an empty mailbox for maximum "size" elements. Elements stored + * in mailboxes are pointers. You have to define macros "_MBOX_SIZE" + * in your lwipopts.h, or ignore this parameter in your implementation + * and use a default size. + * If the mailbox has been created, ERR_OK should be returned. Returning any + * other error will provide a hint what went wrong, but except for assertions, + * no real error handling is implemented. + * + * @param mbox pointer to the mbox to create + * @param size (minimum) number of messages in this mbox + * @return ERR_OK if successful, another err_t otherwise + */ +err_t sys_mbox_new(sys_mbox_t *mbox, int size); +/** + * @ingroup sys_mbox + * Post a message to an mbox - may not fail + * -> blocks if full, only to be used from tasks NOT from ISR! + * + * @param mbox mbox to posts the message + * @param msg message to post (ATTENTION: can be NULL) + */ +void sys_mbox_post(sys_mbox_t *mbox, void *msg); +/** + * @ingroup sys_mbox + * Try to post a message to an mbox - may fail if full. + * Can be used from ISR (if the sys arch layer allows this). + * Returns ERR_MEM if it is full, else, ERR_OK if the "msg" is posted. + * + * @param mbox mbox to posts the message + * @param msg message to post (ATTENTION: can be NULL) + */ +err_t sys_mbox_trypost(sys_mbox_t *mbox, void *msg); +/** + * @ingroup sys_mbox + * Try to post a message to an mbox - may fail if full. + * To be be used from ISR. + * Returns ERR_MEM if it is full, else, ERR_OK if the "msg" is posted. + * + * @param mbox mbox to posts the message + * @param msg message to post (ATTENTION: can be NULL) + */ +err_t sys_mbox_trypost_fromisr(sys_mbox_t *mbox, void *msg); +/** + * @ingroup sys_mbox + * Blocks the thread until a message arrives in the mailbox, but does + * not block the thread longer than "timeout" milliseconds (similar to + * the sys_arch_sem_wait() function). If "timeout" is 0, the thread should + * be blocked until a message arrives. The "msg" argument is a result + * parameter that is set by the function (i.e., by doing "*msg = + * ptr"). The "msg" parameter maybe NULL to indicate that the message + * should be dropped. + * The return values are the same as for the sys_arch_sem_wait() function: + * SYS_ARCH_TIMEOUT if there was a timeout, any other value if a messages + * is received. + * + * Note that a function with a similar name, sys_mbox_fetch(), is + * implemented by lwIP. + * + * @param mbox mbox to get a message from + * @param msg pointer where the message is stored + * @param timeout maximum time (in milliseconds) to wait for a message (0 = wait forever) + * @return SYS_ARCH_TIMEOUT on timeout, any other value if a message has been received + */ +u32_t sys_arch_mbox_fetch(sys_mbox_t *mbox, void **msg, u32_t timeout); +/* Allow port to override with a macro, e.g. special timeout for sys_arch_mbox_fetch() */ +#ifndef sys_arch_mbox_tryfetch +/** + * @ingroup sys_mbox + * This is similar to sys_arch_mbox_fetch, however if a message is not + * present in the mailbox, it immediately returns with the code + * SYS_MBOX_EMPTY. On success 0 is returned. + * To allow for efficient implementations, this can be defined as a + * function-like macro in sys_arch.h instead of a normal function. For + * example, a naive implementation could be: + * \#define sys_arch_mbox_tryfetch(mbox,msg) sys_arch_mbox_fetch(mbox,msg,1) + * although this would introduce unnecessary delays. + * + * @param mbox mbox to get a message from + * @param msg pointer where the message is stored + * @return 0 (milliseconds) if a message has been received + * or SYS_MBOX_EMPTY if the mailbox is empty + */ +u32_t sys_arch_mbox_tryfetch(sys_mbox_t *mbox, void **msg); +#endif +/** + * For now, we map straight to sys_arch implementation. + */ +#define sys_mbox_tryfetch(mbox, msg) sys_arch_mbox_tryfetch(mbox, msg) +/** + * @ingroup sys_mbox + * Deallocates a mailbox. If there are messages still present in the + * mailbox when the mailbox is deallocated, it is an indication of a + * programming error in lwIP and the developer should be notified. + * + * @param mbox mbox to delete + */ +void sys_mbox_free(sys_mbox_t *mbox); +#define sys_mbox_fetch(mbox, msg) sys_arch_mbox_fetch(mbox, msg, 0) +#ifndef sys_mbox_valid +/** + * @ingroup sys_mbox + * Returns 1 if the mailbox is valid, 0 if it is not valid. + * When using pointers, a simple way is to check the pointer for != NULL. + * When directly using OS structures, implementing this may be more complex. + * This may also be a define, in which case the function is not prototyped. + */ +int sys_mbox_valid(sys_mbox_t *mbox); +#endif +#ifndef sys_mbox_set_invalid +/** + * @ingroup sys_mbox + * Invalidate a mailbox so that sys_mbox_valid() returns 0. + * ATTENTION: This does NOT mean that the mailbox shall be deallocated: + * sys_mbox_free() is always called before calling this function! + * This may also be a define, in which case the function is not prototyped. + */ +void sys_mbox_set_invalid(sys_mbox_t *mbox); +#endif +#ifndef sys_mbox_valid_val +/** + * Same as sys_mbox_valid() but taking a value, not a pointer + */ +#define sys_mbox_valid_val(mbox) sys_mbox_valid(&(mbox)) +#endif +#ifndef sys_mbox_set_invalid_val +/** + * Same as sys_mbox_set_invalid() but taking a value, not a pointer + */ +#define sys_mbox_set_invalid_val(mbox) sys_mbox_set_invalid(&(mbox)) +#endif + + +/** + * @ingroup sys_misc + * The only thread function: + * Starts a new thread named "name" with priority "prio" that will begin its + * execution in the function "thread()". The "arg" argument will be passed as an + * argument to the thread() function. The stack size to used for this thread is + * the "stacksize" parameter. The id of the new thread is returned. Both the id + * and the priority are system dependent. + * ATTENTION: although this function returns a value, it MUST NOT FAIL (ports have to assert this!) + * + * @param name human-readable name for the thread (used for debugging purposes) + * @param thread thread-function + * @param arg parameter passed to 'thread' + * @param stacksize stack size in bytes for the new thread (may be ignored by ports) + * @param prio priority of the new thread (may be ignored by ports) */ +sys_thread_t sys_thread_new(const char *name, lwip_thread_fn thread, void *arg, int stacksize, int prio); + +#endif /* NO_SYS */ + +/** + * @ingroup sys_misc + * sys_init() must be called before anything else. + * Initialize the sys_arch layer. + */ +void sys_init(void); + +#ifndef sys_jiffies +/** + * Ticks/jiffies since power up. + */ +u32_t sys_jiffies(void); +#endif + +/** + * @ingroup sys_time + * Returns the current time in milliseconds, + * may be the same as sys_jiffies or at least based on it. + * Don't care for wraparound, this is only used for time diffs. + * Not implementing this function means you cannot use some modules (e.g. TCP + * timestamps, internal timeouts for NO_SYS==1). + */ +u32_t sys_now(void); + +/* Critical Region Protection */ +/* These functions must be implemented in the sys_arch.c file. + In some implementations they can provide a more light-weight protection + mechanism than using semaphores. Otherwise semaphores can be used for + implementation */ +#ifndef SYS_ARCH_PROTECT +/** SYS_LIGHTWEIGHT_PROT + * define SYS_LIGHTWEIGHT_PROT in lwipopts.h if you want inter-task protection + * for certain critical regions during buffer allocation, deallocation and memory + * allocation and deallocation. + */ +#if SYS_LIGHTWEIGHT_PROT + +/** + * @ingroup sys_prot + * SYS_ARCH_DECL_PROTECT + * declare a protection variable. This macro will default to defining a variable of + * type sys_prot_t. If a particular port needs a different implementation, then + * this macro may be defined in sys_arch.h. + */ +#define SYS_ARCH_DECL_PROTECT(lev) sys_prot_t lev +/** + * @ingroup sys_prot + * SYS_ARCH_PROTECT + * Perform a "fast" protect. This could be implemented by + * disabling interrupts for an embedded system or by using a semaphore or + * mutex. The implementation should allow calling SYS_ARCH_PROTECT when + * already protected. The old protection level is returned in the variable + * "lev". This macro will default to calling the sys_arch_protect() function + * which should be implemented in sys_arch.c. If a particular port needs a + * different implementation, then this macro may be defined in sys_arch.h + */ +#define SYS_ARCH_PROTECT(lev) lev = sys_arch_protect() +/** + * @ingroup sys_prot + * SYS_ARCH_UNPROTECT + * Perform a "fast" set of the protection level to "lev". This could be + * implemented by setting the interrupt level to "lev" within the MACRO or by + * using a semaphore or mutex. This macro will default to calling the + * sys_arch_unprotect() function which should be implemented in + * sys_arch.c. If a particular port needs a different implementation, then + * this macro may be defined in sys_arch.h + */ +#define SYS_ARCH_UNPROTECT(lev) sys_arch_unprotect(lev) +sys_prot_t sys_arch_protect(void); +void sys_arch_unprotect(sys_prot_t pval); + +#else + +#define SYS_ARCH_DECL_PROTECT(lev) +#define SYS_ARCH_PROTECT(lev) +#define SYS_ARCH_UNPROTECT(lev) + +#endif /* SYS_LIGHTWEIGHT_PROT */ + +#endif /* SYS_ARCH_PROTECT */ + +/* + * Macros to set/get and increase/decrease variables in a thread-safe way. + * Use these for accessing variable that are used from more than one thread. + */ + +#ifndef SYS_ARCH_INC +#define SYS_ARCH_INC(var, val) do { \ + SYS_ARCH_DECL_PROTECT(old_level); \ + SYS_ARCH_PROTECT(old_level); \ + var += val; \ + SYS_ARCH_UNPROTECT(old_level); \ + } while(0) +#endif /* SYS_ARCH_INC */ + +#ifndef SYS_ARCH_DEC +#define SYS_ARCH_DEC(var, val) do { \ + SYS_ARCH_DECL_PROTECT(old_level); \ + SYS_ARCH_PROTECT(old_level); \ + var -= val; \ + SYS_ARCH_UNPROTECT(old_level); \ + } while(0) +#endif /* SYS_ARCH_DEC */ + +#ifndef SYS_ARCH_GET +#define SYS_ARCH_GET(var, ret) do { \ + SYS_ARCH_DECL_PROTECT(old_level); \ + SYS_ARCH_PROTECT(old_level); \ + ret = var; \ + SYS_ARCH_UNPROTECT(old_level); \ + } while(0) +#endif /* SYS_ARCH_GET */ + +#ifndef SYS_ARCH_SET +#define SYS_ARCH_SET(var, val) do { \ + SYS_ARCH_DECL_PROTECT(old_level); \ + SYS_ARCH_PROTECT(old_level); \ + var = val; \ + SYS_ARCH_UNPROTECT(old_level); \ + } while(0) +#endif /* SYS_ARCH_SET */ + +#ifndef SYS_ARCH_LOCKED +#define SYS_ARCH_LOCKED(code) do { \ + SYS_ARCH_DECL_PROTECT(old_level); \ + SYS_ARCH_PROTECT(old_level); \ + code; \ + SYS_ARCH_UNPROTECT(old_level); \ + } while(0) +#endif /* SYS_ARCH_LOCKED */ + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_SYS_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/tcp.h b/Middlewares/Third_Party/LwIP/src/include/lwip/tcp.h new file mode 100644 index 0000000..daf7599 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/tcp.h @@ -0,0 +1,500 @@ +/** + * @file + * TCP API (to be used from TCPIP thread)\n + * See also @ref tcp_raw + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_TCP_H +#define LWIP_HDR_TCP_H + +#include "lwip/opt.h" + +#if LWIP_TCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/tcpbase.h" +#include "lwip/mem.h" +#include "lwip/pbuf.h" +#include "lwip/ip.h" +#include "lwip/icmp.h" +#include "lwip/err.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct tcp_pcb; +struct tcp_pcb_listen; + +/** Function prototype for tcp accept callback functions. Called when a new + * connection can be accepted on a listening pcb. + * + * @param arg Additional argument to pass to the callback function (@see tcp_arg()) + * @param newpcb The new connection pcb + * @param err An error code if there has been an error accepting. + * Only return ERR_ABRT if you have called tcp_abort from within the + * callback function! + */ +typedef err_t (*tcp_accept_fn)(void *arg, struct tcp_pcb *newpcb, err_t err); + +/** Function prototype for tcp receive callback functions. Called when data has + * been received. + * + * @param arg Additional argument to pass to the callback function (@see tcp_arg()) + * @param tpcb The connection pcb which received data + * @param p The received data (or NULL when the connection has been closed!) + * @param err An error code if there has been an error receiving + * Only return ERR_ABRT if you have called tcp_abort from within the + * callback function! + */ +typedef err_t (*tcp_recv_fn)(void *arg, struct tcp_pcb *tpcb, + struct pbuf *p, err_t err); + +/** Function prototype for tcp sent callback functions. Called when sent data has + * been acknowledged by the remote side. Use it to free corresponding resources. + * This also means that the pcb has now space available to send new data. + * + * @param arg Additional argument to pass to the callback function (@see tcp_arg()) + * @param tpcb The connection pcb for which data has been acknowledged + * @param len The amount of bytes acknowledged + * @return ERR_OK: try to send some data by calling tcp_output + * Only return ERR_ABRT if you have called tcp_abort from within the + * callback function! + */ +typedef err_t (*tcp_sent_fn)(void *arg, struct tcp_pcb *tpcb, + u16_t len); + +/** Function prototype for tcp poll callback functions. Called periodically as + * specified by @see tcp_poll. + * + * @param arg Additional argument to pass to the callback function (@see tcp_arg()) + * @param tpcb tcp pcb + * @return ERR_OK: try to send some data by calling tcp_output + * Only return ERR_ABRT if you have called tcp_abort from within the + * callback function! + */ +typedef err_t (*tcp_poll_fn)(void *arg, struct tcp_pcb *tpcb); + +/** Function prototype for tcp error callback functions. Called when the pcb + * receives a RST or is unexpectedly closed for any other reason. + * + * @note The corresponding pcb is already freed when this callback is called! + * + * @param arg Additional argument to pass to the callback function (@see tcp_arg()) + * @param err Error code to indicate why the pcb has been closed + * ERR_ABRT: aborted through tcp_abort or by a TCP timer + * ERR_RST: the connection was reset by the remote host + */ +typedef void (*tcp_err_fn)(void *arg, err_t err); + +/** Function prototype for tcp connected callback functions. Called when a pcb + * is connected to the remote side after initiating a connection attempt by + * calling tcp_connect(). + * + * @param arg Additional argument to pass to the callback function (@see tcp_arg()) + * @param tpcb The connection pcb which is connected + * @param err An unused error code, always ERR_OK currently ;-) @todo! + * Only return ERR_ABRT if you have called tcp_abort from within the + * callback function! + * + * @note When a connection attempt fails, the error callback is currently called! + */ +typedef err_t (*tcp_connected_fn)(void *arg, struct tcp_pcb *tpcb, err_t err); + +#if LWIP_WND_SCALE +#define RCV_WND_SCALE(pcb, wnd) (((wnd) >> (pcb)->rcv_scale)) +#define SND_WND_SCALE(pcb, wnd) (((wnd) << (pcb)->snd_scale)) +#define TCPWND16(x) ((u16_t)LWIP_MIN((x), 0xFFFF)) +#define TCP_WND_MAX(pcb) ((tcpwnd_size_t)(((pcb)->flags & TF_WND_SCALE) ? TCP_WND : TCPWND16(TCP_WND))) +#else +#define RCV_WND_SCALE(pcb, wnd) (wnd) +#define SND_WND_SCALE(pcb, wnd) (wnd) +#define TCPWND16(x) (x) +#define TCP_WND_MAX(pcb) TCP_WND +#endif +/* Increments a tcpwnd_size_t and holds at max value rather than rollover */ +#define TCP_WND_INC(wnd, inc) do { \ + if ((tcpwnd_size_t)(wnd + inc) >= wnd) { \ + wnd = (tcpwnd_size_t)(wnd + inc); \ + } else { \ + wnd = (tcpwnd_size_t)-1; \ + } \ + } while(0) + +#if LWIP_TCP_SACK_OUT +/** SACK ranges to include in ACK packets. + * SACK entry is invalid if left==right. */ +struct tcp_sack_range { + /** Left edge of the SACK: the first acknowledged sequence number. */ + u32_t left; + /** Right edge of the SACK: the last acknowledged sequence number +1 (so first NOT acknowledged). */ + u32_t right; +}; +#endif /* LWIP_TCP_SACK_OUT */ + +/** Function prototype for deallocation of arguments. Called *just before* the + * pcb is freed, so don't expect to be able to do anything with this pcb! + * + * @param id ext arg id (allocated via @ref tcp_ext_arg_alloc_id) + * @param data pointer to the data (set via @ref tcp_ext_arg_set before) + */ +typedef void (*tcp_extarg_callback_pcb_destroyed_fn)(u8_t id, void *data); + +/** Function prototype to transition arguments from a listening pcb to an accepted pcb + * + * @param id ext arg id (allocated via @ref tcp_ext_arg_alloc_id) + * @param lpcb the listening pcb accepting a connection + * @param cpcb the newly allocated connection pcb + * @return ERR_OK if OK, any error if connection should be dropped + */ +typedef err_t (*tcp_extarg_callback_passive_open_fn)(u8_t id, struct tcp_pcb_listen *lpcb, struct tcp_pcb *cpcb); + +/** A table of callback functions that is invoked for ext arguments */ +struct tcp_ext_arg_callbacks { + /** @ref tcp_extarg_callback_pcb_destroyed_fn */ + tcp_extarg_callback_pcb_destroyed_fn destroy; + /** @ref tcp_extarg_callback_passive_open_fn */ + tcp_extarg_callback_passive_open_fn passive_open; +}; + +#define LWIP_TCP_PCB_NUM_EXT_ARG_ID_INVALID 0xFF + +#if LWIP_TCP_PCB_NUM_EXT_ARGS +/* This is the structure for ext args in tcp pcbs (used as array) */ +struct tcp_pcb_ext_args { + const struct tcp_ext_arg_callbacks *callbacks; + void *data; +}; +/* This is a helper define to prevent zero size arrays if disabled */ +#define TCP_PCB_EXTARGS struct tcp_pcb_ext_args ext_args[LWIP_TCP_PCB_NUM_EXT_ARGS]; +#else +#define TCP_PCB_EXTARGS +#endif + +typedef u16_t tcpflags_t; +#define TCP_ALLFLAGS 0xffffU + +/** + * members common to struct tcp_pcb and struct tcp_listen_pcb + */ +#define TCP_PCB_COMMON(type) \ + type *next; /* for the linked list */ \ + void *callback_arg; \ + TCP_PCB_EXTARGS \ + enum tcp_state state; /* TCP state */ \ + u8_t prio; \ + /* ports are in host byte order */ \ + u16_t local_port + + +/** the TCP protocol control block for listening pcbs */ +struct tcp_pcb_listen { +/** Common members of all PCB types */ + IP_PCB; +/** Protocol specific PCB members */ + TCP_PCB_COMMON(struct tcp_pcb_listen); + +#if LWIP_CALLBACK_API + /* Function to call when a listener has been connected. */ + tcp_accept_fn accept; +#endif /* LWIP_CALLBACK_API */ + +#if TCP_LISTEN_BACKLOG + u8_t backlog; + u8_t accepts_pending; +#endif /* TCP_LISTEN_BACKLOG */ +}; + + +/** the TCP protocol control block */ +struct tcp_pcb { +/** common PCB members */ + IP_PCB; +/** protocol specific PCB members */ + TCP_PCB_COMMON(struct tcp_pcb); + + /* ports are in host byte order */ + u16_t remote_port; + + tcpflags_t flags; +#define TF_ACK_DELAY 0x01U /* Delayed ACK. */ +#define TF_ACK_NOW 0x02U /* Immediate ACK. */ +#define TF_INFR 0x04U /* In fast recovery. */ +#define TF_CLOSEPEND 0x08U /* If this is set, tcp_close failed to enqueue the FIN (retried in tcp_tmr) */ +#define TF_RXCLOSED 0x10U /* rx closed by tcp_shutdown */ +#define TF_FIN 0x20U /* Connection was closed locally (FIN segment enqueued). */ +#define TF_NODELAY 0x40U /* Disable Nagle algorithm */ +#define TF_NAGLEMEMERR 0x80U /* nagle enabled, memerr, try to output to prevent delayed ACK to happen */ +#if LWIP_WND_SCALE +#define TF_WND_SCALE 0x0100U /* Window Scale option enabled */ +#endif +#if TCP_LISTEN_BACKLOG +#define TF_BACKLOGPEND 0x0200U /* If this is set, a connection pcb has increased the backlog on its listener */ +#endif +#if LWIP_TCP_TIMESTAMPS +#define TF_TIMESTAMP 0x0400U /* Timestamp option enabled */ +#endif +#define TF_RTO 0x0800U /* RTO timer has fired, in-flight data moved to unsent and being retransmitted */ +#if LWIP_TCP_SACK_OUT +#define TF_SACK 0x1000U /* Selective ACKs enabled */ +#endif + + /* the rest of the fields are in host byte order + as we have to do some math with them */ + + /* Timers */ + u8_t polltmr, pollinterval; + u8_t last_timer; + u32_t tmr; + + /* receiver variables */ + u32_t rcv_nxt; /* next seqno expected */ + tcpwnd_size_t rcv_wnd; /* receiver window available */ + tcpwnd_size_t rcv_ann_wnd; /* receiver window to announce */ + u32_t rcv_ann_right_edge; /* announced right edge of window */ + +#if LWIP_TCP_SACK_OUT + /* SACK ranges to include in ACK packets (entry is invalid if left==right) */ + struct tcp_sack_range rcv_sacks[LWIP_TCP_MAX_SACK_NUM]; +#define LWIP_TCP_SACK_VALID(pcb, idx) ((pcb)->rcv_sacks[idx].left != (pcb)->rcv_sacks[idx].right) +#endif /* LWIP_TCP_SACK_OUT */ + + /* Retransmission timer. */ + s16_t rtime; + + u16_t mss; /* maximum segment size */ + + /* RTT (round trip time) estimation variables */ + u32_t rttest; /* RTT estimate in 500ms ticks */ + u32_t rtseq; /* sequence number being timed */ + s16_t sa, sv; /* @see "Congestion Avoidance and Control" by Van Jacobson and Karels */ + + s16_t rto; /* retransmission time-out (in ticks of TCP_SLOW_INTERVAL) */ + u8_t nrtx; /* number of retransmissions */ + + /* fast retransmit/recovery */ + u8_t dupacks; + u32_t lastack; /* Highest acknowledged seqno. */ + + /* congestion avoidance/control variables */ + tcpwnd_size_t cwnd; + tcpwnd_size_t ssthresh; + + /* first byte following last rto byte */ + u32_t rto_end; + + /* sender variables */ + u32_t snd_nxt; /* next new seqno to be sent */ + u32_t snd_wl1, snd_wl2; /* Sequence and acknowledgement numbers of last + window update. */ + u32_t snd_lbb; /* Sequence number of next byte to be buffered. */ + tcpwnd_size_t snd_wnd; /* sender window */ + tcpwnd_size_t snd_wnd_max; /* the maximum sender window announced by the remote host */ + + tcpwnd_size_t snd_buf; /* Available buffer space for sending (in bytes). */ +#define TCP_SNDQUEUELEN_OVERFLOW (0xffffU-3) + u16_t snd_queuelen; /* Number of pbufs currently in the send buffer. */ + +#if TCP_OVERSIZE + /* Extra bytes available at the end of the last pbuf in unsent. */ + u16_t unsent_oversize; +#endif /* TCP_OVERSIZE */ + + tcpwnd_size_t bytes_acked; + + /* These are ordered by sequence number: */ + struct tcp_seg *unsent; /* Unsent (queued) segments. */ + struct tcp_seg *unacked; /* Sent but unacknowledged segments. */ +#if TCP_QUEUE_OOSEQ + struct tcp_seg *ooseq; /* Received out of sequence segments. */ +#endif /* TCP_QUEUE_OOSEQ */ + + struct pbuf *refused_data; /* Data previously received but not yet taken by upper layer */ + +#if LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG + struct tcp_pcb_listen* listener; +#endif /* LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG */ + +#if LWIP_CALLBACK_API + /* Function to be called when more send buffer space is available. */ + tcp_sent_fn sent; + /* Function to be called when (in-sequence) data has arrived. */ + tcp_recv_fn recv; + /* Function to be called when a connection has been set up. */ + tcp_connected_fn connected; + /* Function which is called periodically. */ + tcp_poll_fn poll; + /* Function to be called whenever a fatal error occurs. */ + tcp_err_fn errf; +#endif /* LWIP_CALLBACK_API */ + +#if LWIP_TCP_TIMESTAMPS + u32_t ts_lastacksent; + u32_t ts_recent; +#endif /* LWIP_TCP_TIMESTAMPS */ + + /* idle time before KEEPALIVE is sent */ + u32_t keep_idle; +#if LWIP_TCP_KEEPALIVE + u32_t keep_intvl; + u32_t keep_cnt; +#endif /* LWIP_TCP_KEEPALIVE */ + + /* Persist timer counter */ + u8_t persist_cnt; + /* Persist timer back-off */ + u8_t persist_backoff; + /* Number of persist probes */ + u8_t persist_probe; + + /* KEEPALIVE counter */ + u8_t keep_cnt_sent; + +#if LWIP_WND_SCALE + u8_t snd_scale; + u8_t rcv_scale; +#endif +}; + +#if LWIP_EVENT_API + +enum lwip_event { + LWIP_EVENT_ACCEPT, + LWIP_EVENT_SENT, + LWIP_EVENT_RECV, + LWIP_EVENT_CONNECTED, + LWIP_EVENT_POLL, + LWIP_EVENT_ERR +}; + +err_t lwip_tcp_event(void *arg, struct tcp_pcb *pcb, + enum lwip_event, + struct pbuf *p, + u16_t size, + err_t err); + +#endif /* LWIP_EVENT_API */ + +/* Application program's interface: */ +struct tcp_pcb * tcp_new (void); +struct tcp_pcb * tcp_new_ip_type (u8_t type); + +void tcp_arg (struct tcp_pcb *pcb, void *arg); +#if LWIP_CALLBACK_API +void tcp_recv (struct tcp_pcb *pcb, tcp_recv_fn recv); +void tcp_sent (struct tcp_pcb *pcb, tcp_sent_fn sent); +void tcp_err (struct tcp_pcb *pcb, tcp_err_fn err); +void tcp_accept (struct tcp_pcb *pcb, tcp_accept_fn accept); +#endif /* LWIP_CALLBACK_API */ +void tcp_poll (struct tcp_pcb *pcb, tcp_poll_fn poll, u8_t interval); + +#define tcp_set_flags(pcb, set_flags) do { (pcb)->flags = (tcpflags_t)((pcb)->flags | (set_flags)); } while(0) +#define tcp_clear_flags(pcb, clr_flags) do { (pcb)->flags = (tcpflags_t)((pcb)->flags & (tcpflags_t)(~(clr_flags) & TCP_ALLFLAGS)); } while(0) +#define tcp_is_flag_set(pcb, flag) (((pcb)->flags & (flag)) != 0) + +#if LWIP_TCP_TIMESTAMPS +#define tcp_mss(pcb) (((pcb)->flags & TF_TIMESTAMP) ? ((pcb)->mss - 12) : (pcb)->mss) +#else /* LWIP_TCP_TIMESTAMPS */ +/** @ingroup tcp_raw */ +#define tcp_mss(pcb) ((pcb)->mss) +#endif /* LWIP_TCP_TIMESTAMPS */ +/** @ingroup tcp_raw */ +#define tcp_sndbuf(pcb) (TCPWND16((pcb)->snd_buf)) +/** @ingroup tcp_raw */ +#define tcp_sndqueuelen(pcb) ((pcb)->snd_queuelen) +/** @ingroup tcp_raw */ +#define tcp_nagle_disable(pcb) tcp_set_flags(pcb, TF_NODELAY) +/** @ingroup tcp_raw */ +#define tcp_nagle_enable(pcb) tcp_clear_flags(pcb, TF_NODELAY) +/** @ingroup tcp_raw */ +#define tcp_nagle_disabled(pcb) tcp_is_flag_set(pcb, TF_NODELAY) + +#if TCP_LISTEN_BACKLOG +#define tcp_backlog_set(pcb, new_backlog) do { \ + LWIP_ASSERT("pcb->state == LISTEN (called for wrong pcb?)", (pcb)->state == LISTEN); \ + ((struct tcp_pcb_listen *)(pcb))->backlog = ((new_backlog) ? (new_backlog) : 1); } while(0) +void tcp_backlog_delayed(struct tcp_pcb* pcb); +void tcp_backlog_accepted(struct tcp_pcb* pcb); +#else /* TCP_LISTEN_BACKLOG */ +#define tcp_backlog_set(pcb, new_backlog) +#define tcp_backlog_delayed(pcb) +#define tcp_backlog_accepted(pcb) +#endif /* TCP_LISTEN_BACKLOG */ +#define tcp_accepted(pcb) do { LWIP_UNUSED_ARG(pcb); } while(0) /* compatibility define, not needed any more */ + +void tcp_recved (struct tcp_pcb *pcb, u16_t len); +err_t tcp_bind (struct tcp_pcb *pcb, const ip_addr_t *ipaddr, + u16_t port); +void tcp_bind_netif(struct tcp_pcb *pcb, const struct netif *netif); +err_t tcp_connect (struct tcp_pcb *pcb, const ip_addr_t *ipaddr, + u16_t port, tcp_connected_fn connected); + +struct tcp_pcb * tcp_listen_with_backlog_and_err(struct tcp_pcb *pcb, u8_t backlog, err_t *err); +struct tcp_pcb * tcp_listen_with_backlog(struct tcp_pcb *pcb, u8_t backlog); +/** @ingroup tcp_raw */ +#define tcp_listen(pcb) tcp_listen_with_backlog(pcb, TCP_DEFAULT_LISTEN_BACKLOG) + +void tcp_abort (struct tcp_pcb *pcb); +err_t tcp_close (struct tcp_pcb *pcb); +err_t tcp_shutdown(struct tcp_pcb *pcb, int shut_rx, int shut_tx); + +err_t tcp_write (struct tcp_pcb *pcb, const void *dataptr, u16_t len, + u8_t apiflags); + +void tcp_setprio (struct tcp_pcb *pcb, u8_t prio); + +err_t tcp_output (struct tcp_pcb *pcb); + +err_t tcp_tcp_get_tcp_addrinfo(struct tcp_pcb *pcb, int local, ip_addr_t *addr, u16_t *port); + +#define tcp_dbg_get_tcp_state(pcb) ((pcb)->state) + +/* for compatibility with older implementation */ +#define tcp_new_ip6() tcp_new_ip_type(IPADDR_TYPE_V6) + +#if LWIP_TCP_PCB_NUM_EXT_ARGS +u8_t tcp_ext_arg_alloc_id(void); +void tcp_ext_arg_set_callbacks(struct tcp_pcb *pcb, uint8_t id, const struct tcp_ext_arg_callbacks * const callbacks); +void tcp_ext_arg_set(struct tcp_pcb *pcb, uint8_t id, void *arg); +void *tcp_ext_arg_get(const struct tcp_pcb *pcb, uint8_t id); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_TCP */ + +#endif /* LWIP_HDR_TCP_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/tcpbase.h b/Middlewares/Third_Party/LwIP/src/include/lwip/tcpbase.h new file mode 100644 index 0000000..0023074 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/tcpbase.h @@ -0,0 +1,88 @@ +/** + * @file + * Base TCP API definitions shared by TCP and ALTCP\n + * See also @ref tcp_raw + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_TCPBASE_H +#define LWIP_HDR_TCPBASE_H + +#include "lwip/opt.h" + +#if LWIP_TCP /* don't build if not configured for use in lwipopts.h */ + +#ifdef __cplusplus +extern "C" { +#endif + + +#if LWIP_WND_SCALE +typedef u32_t tcpwnd_size_t; +#else +typedef u16_t tcpwnd_size_t; +#endif + +enum tcp_state { + CLOSED = 0, + LISTEN = 1, + SYN_SENT = 2, + SYN_RCVD = 3, + ESTABLISHED = 4, + FIN_WAIT_1 = 5, + FIN_WAIT_2 = 6, + CLOSE_WAIT = 7, + CLOSING = 8, + LAST_ACK = 9, + TIME_WAIT = 10 +}; +/* ATTENTION: this depends on state number ordering! */ +#define TCP_STATE_IS_CLOSING(state) ((state) >= FIN_WAIT_1) + +/* Flags for "apiflags" parameter in tcp_write */ +#define TCP_WRITE_FLAG_COPY 0x01 +#define TCP_WRITE_FLAG_MORE 0x02 + +#define TCP_PRIO_MIN 1 +#define TCP_PRIO_NORMAL 64 +#define TCP_PRIO_MAX 127 + +const char* tcp_debug_state_str(enum tcp_state s); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_TCP */ + +#endif /* LWIP_HDR_TCPBASE_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/tcpip.h b/Middlewares/Third_Party/LwIP/src/include/lwip/tcpip.h new file mode 100644 index 0000000..0b8880a --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/tcpip.h @@ -0,0 +1,113 @@ +/** + * @file + * Functions to sync with TCPIP thread + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_TCPIP_H +#define LWIP_HDR_TCPIP_H + +#include "lwip/opt.h" + +#if !NO_SYS /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/err.h" +#include "lwip/timeouts.h" +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_TCPIP_CORE_LOCKING +/** The global semaphore to lock the stack. */ +extern sys_mutex_t lock_tcpip_core; +#if !defined LOCK_TCPIP_CORE || defined __DOXYGEN__ +/** Lock lwIP core mutex (needs @ref LWIP_TCPIP_CORE_LOCKING 1) */ +#define LOCK_TCPIP_CORE() sys_mutex_lock(&lock_tcpip_core) +/** Unlock lwIP core mutex (needs @ref LWIP_TCPIP_CORE_LOCKING 1) */ +#define UNLOCK_TCPIP_CORE() sys_mutex_unlock(&lock_tcpip_core) +#endif /* LOCK_TCPIP_CORE */ +#else /* LWIP_TCPIP_CORE_LOCKING */ +#define LOCK_TCPIP_CORE() +#define UNLOCK_TCPIP_CORE() +#endif /* LWIP_TCPIP_CORE_LOCKING */ + +struct pbuf; +struct netif; + +/** Function prototype for the init_done function passed to tcpip_init */ +typedef void (*tcpip_init_done_fn)(void *arg); +/** Function prototype for functions passed to tcpip_callback() */ +typedef void (*tcpip_callback_fn)(void *ctx); + +/* Forward declarations */ +struct tcpip_callback_msg; + +void tcpip_init(tcpip_init_done_fn tcpip_init_done, void *arg); + +err_t tcpip_inpkt(struct pbuf *p, struct netif *inp, netif_input_fn input_fn); +err_t tcpip_input(struct pbuf *p, struct netif *inp); + +err_t tcpip_try_callback(tcpip_callback_fn function, void *ctx); +err_t tcpip_callback(tcpip_callback_fn function, void *ctx); +/** @ingroup lwip_os + * @deprecated use tcpip_try_callback() or tcpip_callback() instead + */ +#define tcpip_callback_with_block(function, ctx, block) ((block != 0)? tcpip_callback(function, ctx) : tcpip_try_callback(function, ctx)) + +struct tcpip_callback_msg* tcpip_callbackmsg_new(tcpip_callback_fn function, void *ctx); +void tcpip_callbackmsg_delete(struct tcpip_callback_msg* msg); +err_t tcpip_callbackmsg_trycallback(struct tcpip_callback_msg* msg); +err_t tcpip_callbackmsg_trycallback_fromisr(struct tcpip_callback_msg* msg); + +/* free pbufs or heap memory from another context without blocking */ +err_t pbuf_free_callback(struct pbuf *p); +err_t mem_free_callback(void *m); + +#if LWIP_TCPIP_TIMEOUT && LWIP_TIMERS +err_t tcpip_timeout(u32_t msecs, sys_timeout_handler h, void *arg); +err_t tcpip_untimeout(sys_timeout_handler h, void *arg); +#endif /* LWIP_TCPIP_TIMEOUT && LWIP_TIMERS */ + +#ifdef TCPIP_THREAD_TEST +int tcpip_thread_poll_one(void); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* !NO_SYS */ + +#endif /* LWIP_HDR_TCPIP_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/timeouts.h b/Middlewares/Third_Party/LwIP/src/include/lwip/timeouts.h new file mode 100644 index 0000000..b601f9e --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/timeouts.h @@ -0,0 +1,128 @@ +/** + * @file + * Timer implementations + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_TIMEOUTS_H +#define LWIP_HDR_TIMEOUTS_H + +#include "lwip/opt.h" +#include "lwip/err.h" +#if !NO_SYS +#include "lwip/sys.h" +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef LWIP_DEBUG_TIMERNAMES +#ifdef LWIP_DEBUG +#define LWIP_DEBUG_TIMERNAMES SYS_DEBUG +#else /* LWIP_DEBUG */ +#define LWIP_DEBUG_TIMERNAMES 0 +#endif /* LWIP_DEBUG*/ +#endif + +/** Returned by sys_timeouts_sleeptime() to indicate there is no timer, so we + * can sleep forever. + */ +#define SYS_TIMEOUTS_SLEEPTIME_INFINITE 0xFFFFFFFF + +/** Function prototype for a stack-internal timer function that has to be + * called at a defined interval */ +typedef void (* lwip_cyclic_timer_handler)(void); + +/** This struct contains information about a stack-internal timer function + that has to be called at a defined interval */ +struct lwip_cyclic_timer { + u32_t interval_ms; + lwip_cyclic_timer_handler handler; +#if LWIP_DEBUG_TIMERNAMES + const char* handler_name; +#endif /* LWIP_DEBUG_TIMERNAMES */ +}; + +/** This array contains all stack-internal cyclic timers. To get the number of + * timers, use lwip_num_cyclic_timers */ +extern const struct lwip_cyclic_timer lwip_cyclic_timers[]; +/** Array size of lwip_cyclic_timers[] */ +extern const int lwip_num_cyclic_timers; + +#if LWIP_TIMERS + +/** Function prototype for a timeout callback function. Register such a function + * using sys_timeout(). + * + * @param arg Additional argument to pass to the function - set up by sys_timeout() + */ +typedef void (* sys_timeout_handler)(void *arg); + +struct sys_timeo { + struct sys_timeo *next; + u32_t time; + sys_timeout_handler h; + void *arg; +#if LWIP_DEBUG_TIMERNAMES + const char* handler_name; +#endif /* LWIP_DEBUG_TIMERNAMES */ +}; + +void sys_timeouts_init(void); + +#if LWIP_DEBUG_TIMERNAMES +void sys_timeout_debug(u32_t msecs, sys_timeout_handler handler, void *arg, const char* handler_name); +#define sys_timeout(msecs, handler, arg) sys_timeout_debug(msecs, handler, arg, #handler) +#else /* LWIP_DEBUG_TIMERNAMES */ +void sys_timeout(u32_t msecs, sys_timeout_handler handler, void *arg); +#endif /* LWIP_DEBUG_TIMERNAMES */ + +void sys_untimeout(sys_timeout_handler handler, void *arg); +void sys_restart_timeouts(void); +void sys_check_timeouts(void); +u32_t sys_timeouts_sleeptime(void); + +#if LWIP_TESTMODE +struct sys_timeo** sys_timeouts_get_next_timeout(void); +void lwip_cyclic_timer(void *arg); +#endif + +#endif /* LWIP_TIMERS */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_TIMEOUTS_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/udp.h b/Middlewares/Third_Party/LwIP/src/include/lwip/udp.h new file mode 100644 index 0000000..b1c78e5 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/udp.h @@ -0,0 +1,195 @@ +/** + * @file + * UDP API (to be used from TCPIP thread)\n + * See also @ref udp_raw + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_UDP_H +#define LWIP_HDR_UDP_H + +#include "lwip/opt.h" + +#if LWIP_UDP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/pbuf.h" +#include "lwip/netif.h" +#include "lwip/ip_addr.h" +#include "lwip/ip.h" +#include "lwip/ip6_addr.h" +#include "lwip/prot/udp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define UDP_FLAGS_NOCHKSUM 0x01U +#define UDP_FLAGS_UDPLITE 0x02U +#define UDP_FLAGS_CONNECTED 0x04U +#define UDP_FLAGS_MULTICAST_LOOP 0x08U + +struct udp_pcb; + +/** Function prototype for udp pcb receive callback functions + * addr and port are in same byte order as in the pcb + * The callback is responsible for freeing the pbuf + * if it's not used any more. + * + * ATTENTION: Be aware that 'addr' might point into the pbuf 'p' so freeing this pbuf + * can make 'addr' invalid, too. + * + * @param arg user supplied argument (udp_pcb.recv_arg) + * @param pcb the udp_pcb which received data + * @param p the packet buffer that was received + * @param addr the remote IP address from which the packet was received + * @param port the remote port from which the packet was received + */ +typedef void (*udp_recv_fn)(void *arg, struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *addr, u16_t port); + +/** the UDP protocol control block */ +struct udp_pcb { +/** Common members of all PCB types */ + IP_PCB; + +/* Protocol specific PCB members */ + + struct udp_pcb *next; + + u8_t flags; + /** ports are in host byte order */ + u16_t local_port, remote_port; + +#if LWIP_MULTICAST_TX_OPTIONS +#if LWIP_IPV4 + /** outgoing network interface for multicast packets, by IPv4 address (if not 'any') */ + ip4_addr_t mcast_ip4; +#endif /* LWIP_IPV4 */ + /** outgoing network interface for multicast packets, by interface index (if nonzero) */ + u8_t mcast_ifindex; + /** TTL for outgoing multicast packets */ + u8_t mcast_ttl; +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + +#if LWIP_UDPLITE + /** used for UDP_LITE only */ + u16_t chksum_len_rx, chksum_len_tx; +#endif /* LWIP_UDPLITE */ + + /** receive callback function */ + udp_recv_fn recv; + /** user-supplied argument for the recv callback */ + void *recv_arg; +}; +/* udp_pcbs export for external reference (e.g. SNMP agent) */ +extern struct udp_pcb *udp_pcbs; + +/* The following functions is the application layer interface to the + UDP code. */ +struct udp_pcb * udp_new (void); +struct udp_pcb * udp_new_ip_type(u8_t type); +void udp_remove (struct udp_pcb *pcb); +err_t udp_bind (struct udp_pcb *pcb, const ip_addr_t *ipaddr, + u16_t port); +void udp_bind_netif (struct udp_pcb *pcb, const struct netif* netif); +err_t udp_connect (struct udp_pcb *pcb, const ip_addr_t *ipaddr, + u16_t port); +void udp_disconnect (struct udp_pcb *pcb); +void udp_recv (struct udp_pcb *pcb, udp_recv_fn recv, + void *recv_arg); +err_t udp_sendto_if (struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port, + struct netif *netif); +err_t udp_sendto_if_src(struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port, + struct netif *netif, const ip_addr_t *src_ip); +err_t udp_sendto (struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port); +err_t udp_send (struct udp_pcb *pcb, struct pbuf *p); + +#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP +err_t udp_sendto_if_chksum(struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port, + struct netif *netif, u8_t have_chksum, + u16_t chksum); +err_t udp_sendto_chksum(struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port, + u8_t have_chksum, u16_t chksum); +err_t udp_send_chksum(struct udp_pcb *pcb, struct pbuf *p, + u8_t have_chksum, u16_t chksum); +err_t udp_sendto_if_src_chksum(struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port, struct netif *netif, + u8_t have_chksum, u16_t chksum, const ip_addr_t *src_ip); +#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ + +#define udp_flags(pcb) ((pcb)->flags) +#define udp_setflags(pcb, f) ((pcb)->flags = (f)) + +#define udp_set_flags(pcb, set_flags) do { (pcb)->flags = (u8_t)((pcb)->flags | (set_flags)); } while(0) +#define udp_clear_flags(pcb, clr_flags) do { (pcb)->flags = (u8_t)((pcb)->flags & (u8_t)(~(clr_flags) & 0xff)); } while(0) +#define udp_is_flag_set(pcb, flag) (((pcb)->flags & (flag)) != 0) + +/* The following functions are the lower layer interface to UDP. */ +void udp_input (struct pbuf *p, struct netif *inp); + +void udp_init (void); + +/* for compatibility with older implementation */ +#define udp_new_ip6() udp_new_ip_type(IPADDR_TYPE_V6) + +#if LWIP_MULTICAST_TX_OPTIONS +#if LWIP_IPV4 +#define udp_set_multicast_netif_addr(pcb, ip4addr) ip4_addr_copy((pcb)->mcast_ip4, *(ip4addr)) +#define udp_get_multicast_netif_addr(pcb) (&(pcb)->mcast_ip4) +#endif /* LWIP_IPV4 */ +#define udp_set_multicast_netif_index(pcb, idx) ((pcb)->mcast_ifindex = (idx)) +#define udp_get_multicast_netif_index(pcb) ((pcb)->mcast_ifindex) +#define udp_set_multicast_ttl(pcb, value) ((pcb)->mcast_ttl = (value)) +#define udp_get_multicast_ttl(pcb) ((pcb)->mcast_ttl) +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + +#if UDP_DEBUG +void udp_debug_print(struct udp_hdr *udphdr); +#else +#define udp_debug_print(udphdr) +#endif + +void udp_netif_ip_addr_changed(const ip_addr_t* old_addr, const ip_addr_t* new_addr); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_UDP */ + +#endif /* LWIP_HDR_UDP_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/bridgeif.h b/Middlewares/Third_Party/LwIP/src/include/netif/bridgeif.h new file mode 100644 index 0000000..f4f8cf1 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/bridgeif.h @@ -0,0 +1,127 @@ +/** + * @file + * lwIP netif implementing an IEEE 802.1D MAC Bridge + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_NETIF_BRIDGEIF_H +#define LWIP_HDR_NETIF_BRIDGEIF_H + +#include "netif/bridgeif_opts.h" + +#include "lwip/err.h" +#include "lwip/prot/ethernet.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct netif; + +#if (BRIDGEIF_MAX_PORTS < 0) || (BRIDGEIF_MAX_PORTS >= 64) +#error BRIDGEIF_MAX_PORTS must be [1..63] +#elif BRIDGEIF_MAX_PORTS < 8 +typedef u8_t bridgeif_portmask_t; +#elif BRIDGEIF_MAX_PORTS < 16 +typedef u16_t bridgeif_portmask_t; +#elif BRIDGEIF_MAX_PORTS < 32 +typedef u32_t bridgeif_portmask_t; +#elif BRIDGEIF_MAX_PORTS < 64 +typedef u64_t bridgeif_portmask_t; +#endif + +#define BR_FLOOD ((bridgeif_portmask_t)-1) + +/** @ingroup bridgeif + * Initialisation data for @ref bridgeif_init. + * An instance of this type must be passed as parameter 'state' to @ref netif_add + * when the bridge is added. + */ +typedef struct bridgeif_initdata_s { + /** MAC address of the bridge (cannot use the netif's addresses) */ + struct eth_addr ethaddr; + /** Maximum number of ports in the bridge (ports are stored in an array, this + influences memory allocated for netif->state of the bridge netif). */ + u8_t max_ports; + /** Maximum number of dynamic/learning entries in the bridge's forwarding database. + In the default implementation, this controls memory consumption only. */ + u16_t max_fdb_dynamic_entries; + /** Maximum number of static forwarding entries. Influences memory consumption! */ + u16_t max_fdb_static_entries; +} bridgeif_initdata_t; + +/** @ingroup bridgeif + * Use this for constant initialization of a bridgeif_initdat_t + * (ethaddr must be passed as ETH_ADDR()) + */ +#define BRIDGEIF_INITDATA1(max_ports, max_fdb_dynamic_entries, max_fdb_static_entries, ethaddr) {ethaddr, max_ports, max_fdb_dynamic_entries, max_fdb_static_entries} +/** @ingroup bridgeif + * Use this for constant initialization of a bridgeif_initdat_t + * (each byte of ethaddr must be passed) + */ +#define BRIDGEIF_INITDATA2(max_ports, max_fdb_dynamic_entries, max_fdb_static_entries, e0, e1, e2, e3, e4, e5) {{e0, e1, e2, e3, e4, e5}, max_ports, max_fdb_dynamic_entries, max_fdb_static_entries} + +err_t bridgeif_init(struct netif *netif); +err_t bridgeif_add_port(struct netif *bridgeif, struct netif *portif); +err_t bridgeif_fdb_add(struct netif *bridgeif, const struct eth_addr *addr, bridgeif_portmask_t ports); +err_t bridgeif_fdb_remove(struct netif *bridgeif, const struct eth_addr *addr); + +/* FDB interface, can be replaced by own implementation */ +void bridgeif_fdb_update_src(void *fdb_ptr, struct eth_addr *src_addr, u8_t port_idx); +bridgeif_portmask_t bridgeif_fdb_get_dst_ports(void *fdb_ptr, struct eth_addr *dst_addr); +void* bridgeif_fdb_init(u16_t max_fdb_entries); + +#if BRIDGEIF_PORT_NETIFS_OUTPUT_DIRECT +#ifndef BRIDGEIF_DECL_PROTECT +/* define bridgeif protection to sys_arch_protect... */ +#include "lwip/sys.h" +#define BRIDGEIF_DECL_PROTECT(lev) SYS_ARCH_DECL_PROTECT(lev) +#define BRIDGEIF_READ_PROTECT(lev) SYS_ARCH_PROTECT(lev) +#define BRIDGEIF_READ_UNPROTECT(lev) SYS_ARCH_UNPROTECT(lev) +#define BRIDGEIF_WRITE_PROTECT(lev) +#define BRIDGEIF_WRITE_UNPROTECT(lev) +#endif +#else /* BRIDGEIF_PORT_NETIFS_OUTPUT_DIRECT */ +#include "lwip/tcpip.h" +#define BRIDGEIF_DECL_PROTECT(lev) +#define BRIDGEIF_READ_PROTECT(lev) +#define BRIDGEIF_READ_UNPROTECT(lev) +#define BRIDGEIF_WRITE_PROTECT(lev) +#define BRIDGEIF_WRITE_UNPROTECT(lev) +#endif /* BRIDGEIF_PORT_NETIFS_OUTPUT_DIRECT */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_NETIF_BRIDGEIF_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/bridgeif_opts.h b/Middlewares/Third_Party/LwIP/src/include/netif/bridgeif_opts.h new file mode 100644 index 0000000..b85c301 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/bridgeif_opts.h @@ -0,0 +1,90 @@ +/** + * @file + * lwIP netif implementing an IEEE 802.1D MAC Bridge + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#ifndef LWIP_HDR_NETIF_BRIDGEIF_OPTS_H +#define LWIP_HDR_NETIF_BRIDGEIF_OPTS_H + +#include "lwip/opt.h" + +/** + * @defgroup bridgeif_opts Options + * @ingroup bridgeif + * @{ + */ + +/** BRIDGEIF_PORT_NETIFS_OUTPUT_DIRECT==1: set port netif's 'input' function + * to call directly into bridgeif code and on top of that, directly call into + * the selected forwarding port's 'linkoutput' function. + * This means that the bridgeif input/output path is protected from concurrent access + * but as well, *all* bridge port netif's drivers must correctly handle concurrent access! + * == 0: get into tcpip_thread for every input packet (no multithreading) + * ATTENTION: as ==0 relies on tcpip.h, the default depends on NO_SYS setting + */ +#ifndef BRIDGEIF_PORT_NETIFS_OUTPUT_DIRECT +#define BRIDGEIF_PORT_NETIFS_OUTPUT_DIRECT NO_SYS +#endif + +/** BRIDGEIF_MAX_PORTS: this is used to create a typedef used for forwarding + * bit-fields: the number of bits required is this + 1 (for the internal/cpu port) + * (63 is the maximum, resulting in an u64_t for the bit mask) + * ATTENTION: this controls the maximum number of the implementation only! + * The max. number of ports per bridge must still be passed via netif_add parameter! + */ +#ifndef BRIDGEIF_MAX_PORTS +#define BRIDGEIF_MAX_PORTS 7 +#endif + +/** BRIDGEIF_DEBUG: Enable generic debugging in bridgeif.c. */ +#ifndef BRIDGEIF_DEBUG +#define BRIDGEIF_DEBUG LWIP_DBG_OFF +#endif + +/** BRIDGEIF_DEBUG: Enable FDB debugging in bridgeif.c. */ +#ifndef BRIDGEIF_FDB_DEBUG +#define BRIDGEIF_FDB_DEBUG LWIP_DBG_OFF +#endif + +/** BRIDGEIF_DEBUG: Enable forwarding debugging in bridgeif.c. */ +#ifndef BRIDGEIF_FW_DEBUG +#define BRIDGEIF_FW_DEBUG LWIP_DBG_OFF +#endif + +/** + * @} + */ + +#endif /* LWIP_HDR_NETIF_BRIDGEIF_OPTS_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/etharp.h b/Middlewares/Third_Party/LwIP/src/include/netif/etharp.h new file mode 100644 index 0000000..b536fd2 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/etharp.h @@ -0,0 +1,3 @@ +/* ARP has been moved to core/ipv4, provide this #include for compatibility only */ +#include "lwip/etharp.h" +#include "netif/ethernet.h" diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ethernet.h b/Middlewares/Third_Party/LwIP/src/include/netif/ethernet.h new file mode 100644 index 0000000..49649cb --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ethernet.h @@ -0,0 +1,77 @@ +/** + * @file + * Ethernet input function - handles INCOMING ethernet level traffic + * To be used in most low-level netif implementations + */ + +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * Copyright (c) 2003-2004 Leon Woestenberg + * Copyright (c) 2003-2004 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#ifndef LWIP_HDR_NETIF_ETHERNET_H +#define LWIP_HDR_NETIF_ETHERNET_H + +#include "lwip/opt.h" + +#include "lwip/pbuf.h" +#include "lwip/netif.h" +#include "lwip/prot/ethernet.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_ARP || LWIP_ETHERNET + +/** Define this to 1 and define LWIP_ARP_FILTER_NETIF_FN(pbuf, netif, type) + * to a filter function that returns the correct netif when using multiple + * netifs on one hardware interface where the netif's low-level receive + * routine cannot decide for the correct netif (e.g. when mapping multiple + * IP addresses to one hardware interface). + */ +#ifndef LWIP_ARP_FILTER_NETIF +#define LWIP_ARP_FILTER_NETIF 0 +#endif + +err_t ethernet_input(struct pbuf *p, struct netif *netif); +err_t ethernet_output(struct netif* netif, struct pbuf* p, const struct eth_addr* src, const struct eth_addr* dst, u16_t eth_type); + +extern const struct eth_addr ethbroadcast, ethzero; + +#endif /* LWIP_ARP || LWIP_ETHERNET */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_NETIF_ETHERNET_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ieee802154.h b/Middlewares/Third_Party/LwIP/src/include/netif/ieee802154.h new file mode 100644 index 0000000..54e019f --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ieee802154.h @@ -0,0 +1,112 @@ +/** + * @file + * Definitions for IEEE 802.15.4 MAC frames + */ + +/* + * Copyright (c) 2018 Simon Goldschmidt. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_NETIF_IEEE802154_H +#define LWIP_HDR_NETIF_IEEE802154_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +/** General MAC frame format + * This shows the full featured header, mainly for documentation. + * Some fields are omitted or shortened to achieve frame compression. + */ +struct ieee_802154_hdr { + /** See IEEE_802154_FC_* defines */ + PACK_STRUCT_FIELD(u16_t frame_control); + /** Sequence number is omitted if IEEE_802154_FC_SEQNO_SUPPR is set in frame_control */ + PACK_STRUCT_FLD_8(u8_t sequence_number); + /** Destination PAN ID is omitted if Destination Addressing Mode is 0 */ + PACK_STRUCT_FIELD(u16_t destination_pan_id); + /** Destination Address is omitted if Destination Addressing Mode is 0 */ + PACK_STRUCT_FLD_8(u8_t destination_address[8]); + /** Source PAN ID is omitted if Source Addressing Mode is 0 + or if IEEE_802154_FC_PANID_COMPR is set in frame control*/ + PACK_STRUCT_FIELD(u16_t source_pan_id); + /** Source Address is omitted if Source Addressing Mode is 0 */ + PACK_STRUCT_FLD_8(u8_t source_address[8]); + /* The rest is variable */ +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/* Addressing modes (2 bits) */ +#define IEEE_802154_ADDR_MODE_NO_ADDR 0x00 /* PAN ID and address fields are not present */ +#define IEEE_802154_ADDR_MODE_RESERVED 0x01 /* Reserved */ +#define IEEE_802154_ADDR_MODE_SHORT 0x02 /* Address field contains a short address (16 bit) */ +#define IEEE_802154_ADDR_MODE_EXT 0x03 /* Address field contains an extended address (64 bit) */ + +/* IEEE 802.15.4 Frame Control definitions (2 bytes; see IEEE 802.15.4-2015 ch. 7.2.1) */ +#define IEEE_802154_FC_FT_MASK 0x0007 /* bits 0..2: Frame Type */ +#define IEEE_802154_FC_FT_BEACON 0x00 +#define IEEE_802154_FC_FT_DATA 0x01 +#define IEEE_802154_FC_FT_ACK 0x02 +#define IEEE_802154_FC_FT_MAC_CMD 0x03 +#define IEEE_802154_FC_FT_RESERVED 0x04 +#define IEEE_802154_FC_FT_MULTIPURPOSE 0x05 +#define IEEE_802154_FC_FT_FRAG 0x06 +#define IEEE_802154_FC_FT_EXT 0x07 +#define IEEE_802154_FC_SEC_EN 0x0008 /* bit 3: Security Enabled */ +#define IEEE_802154_FC_FRAME_PEND 0x0010 /* bit 4: Frame Pending */ +#define IEEE_802154_FC_ACK_REQ 0x0020 /* bit 5: AR (ACK required) */ +#define IEEE_802154_FC_PANID_COMPR 0x0040 /* bit 6: PAN ID Compression (src and dst are equal, src PAN ID omitted) */ +#define IEEE_802154_FC_RESERVED 0x0080 +#define IEEE_802154_FC_SEQNO_SUPPR 0x0100 /* bit 8: Sequence Number Suppression */ +#define IEEE_802154_FC_IE_PRESENT 0x0200 /* bit 9: IE Present */ +#define IEEE_802154_FC_DST_ADDR_MODE_MASK 0x0c00 /* bits 10..11: Destination Addressing Mode */ +#define IEEE_802154_FC_DST_ADDR_MODE_NO_ADDR (IEEE_802154_ADDR_MODE_NO_ADDR << 10) +#define IEEE_802154_FC_DST_ADDR_MODE_SHORT (IEEE_802154_ADDR_MODE_SHORT << 10) +#define IEEE_802154_FC_DST_ADDR_MODE_EXT (IEEE_802154_ADDR_MODE_EXT << 10) +#define IEEE_802154_FC_FRAME_VERSION_MASK 0x3000 /* bits 12..13: Frame Version */ +#define IEEE_802154_FC_FRAME_VERSION_GET(x) (((x) & IEEE_802154_FC_FRAME_VERSION_MASK) >> 12) +#define IEEE_802154_FC_SRC_ADDR_MODE_MASK 0xc000 /* bits 14..15: Source Addressing Mode */ +#define IEEE_802154_FC_SRC_ADDR_MODE_SHORT (IEEE_802154_ADDR_MODE_SHORT << 14) +#define IEEE_802154_FC_SRC_ADDR_MODE_EXT (IEEE_802154_ADDR_MODE_EXT << 14) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_NETIF_IEEE802154_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6.h b/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6.h new file mode 100644 index 0000000..ecff24b --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6.h @@ -0,0 +1,89 @@ +/** + * @file + * + * 6LowPAN output for IPv6. Uses ND tables for link-layer addressing. Fragments packets to 6LowPAN units. + */ + +/* + * Copyright (c) 2015 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#ifndef LWIP_HDR_LOWPAN6_H +#define LWIP_HDR_LOWPAN6_H + +#include "netif/lowpan6_opts.h" + +#if LWIP_IPV6 + +#include "netif/lowpan6_common.h" +#include "lwip/pbuf.h" +#include "lwip/ip.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** 1 second period for reassembly */ +#define LOWPAN6_TMR_INTERVAL 1000 + +void lowpan6_tmr(void); + +err_t lowpan6_set_context(u8_t idx, const ip6_addr_t * context); +err_t lowpan6_set_short_addr(u8_t addr_high, u8_t addr_low); + +#if LWIP_IPV4 +err_t lowpan4_output(struct netif *netif, struct pbuf *q, const ip4_addr_t *ipaddr); +#endif /* LWIP_IPV4 */ +err_t lowpan6_output(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr); +err_t lowpan6_input(struct pbuf * p, struct netif *netif); +err_t lowpan6_if_init(struct netif *netif); + +/* pan_id in network byte order. */ +err_t lowpan6_set_pan_id(u16_t pan_id); + +u16_t lowpan6_calc_crc(const void *buf, u16_t len); + +#if !NO_SYS +err_t tcpip_6lowpan_input(struct pbuf *p, struct netif *inp); +#endif /* !NO_SYS */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6 */ + +#endif /* LWIP_HDR_LOWPAN6_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_ble.h b/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_ble.h new file mode 100644 index 0000000..01896a7 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_ble.h @@ -0,0 +1,78 @@ +/** + * @file + * 6LowPAN over BLE for IPv6 (RFC7668). + */ + +/* + * Copyright (c) 2017 Benjamin Aigner + * Copyright (c) 2015 Inico Technologies Ltd. , Author: Ivan Delamer + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * Author: Benjamin Aigner + * + * Based on the original 6lowpan implementation of lwIP ( @see 6lowpan.c) + */ + +#ifndef LWIP_HDR_LOWPAN6_BLE_H +#define LWIP_HDR_LOWPAN6_BLE_H + +#include "netif/lowpan6_opts.h" + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "netif/lowpan6_common.h" +#include "lwip/pbuf.h" +#include "lwip/ip.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + +err_t rfc7668_output(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr); +err_t rfc7668_input(struct pbuf * p, struct netif *netif); +err_t rfc7668_set_local_addr_eui64(struct netif *netif, const u8_t *local_addr, size_t local_addr_len); +err_t rfc7668_set_local_addr_mac48(struct netif *netif, const u8_t *local_addr, size_t local_addr_len, int is_public_addr); +err_t rfc7668_set_peer_addr_eui64(struct netif *netif, const u8_t *peer_addr, size_t peer_addr_len); +err_t rfc7668_set_peer_addr_mac48(struct netif *netif, const u8_t *peer_addr, size_t peer_addr_len, int is_public_addr); +err_t rfc7668_set_context(u8_t index, const ip6_addr_t * context); +err_t rfc7668_if_init(struct netif *netif); + +#if !NO_SYS +err_t tcpip_rfc7668_input(struct pbuf *p, struct netif *inp); +#endif + +void ble_addr_to_eui64(uint8_t *dst, const uint8_t *src, int public_addr); +void eui64_to_ble_addr(uint8_t *dst, const uint8_t *src); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6 */ + +#endif /* LWIP_HDR_LOWPAN6_BLE_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_common.h b/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_common.h new file mode 100644 index 0000000..0dc13ab --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_common.h @@ -0,0 +1,82 @@ +/** + * @file + * + * Common 6LowPAN routines for IPv6. Uses ND tables for link-layer addressing. Fragments packets to 6LowPAN units. + */ + +/* + * Copyright (c) 2015 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#ifndef LWIP_HDR_LOWPAN6_COMMON_H +#define LWIP_HDR_LOWPAN6_COMMON_H + +#include "netif/lowpan6_opts.h" + +#if LWIP_IPV6 /* don't build if IPv6 is disabled in lwipopts.h */ + +#include "lwip/pbuf.h" +#include "lwip/ip.h" +#include "lwip/ip6_addr.h" +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** Helper define for a link layer address, which can be encoded as 0, 2 or 8 bytes */ +struct lowpan6_link_addr { + /* encoded length of the address */ + u8_t addr_len; + /* address bytes */ + u8_t addr[8]; +}; + +s8_t lowpan6_get_address_mode(const ip6_addr_t *ip6addr, const struct lowpan6_link_addr *mac_addr); + +#if LWIP_6LOWPAN_IPHC +err_t lowpan6_compress_headers(struct netif *netif, u8_t *inbuf, size_t inbuf_size, u8_t *outbuf, size_t outbuf_size, + u8_t *lowpan6_header_len_out, u8_t *hidden_header_len_out, ip6_addr_t *lowpan6_contexts, + const struct lowpan6_link_addr *src, const struct lowpan6_link_addr *dst); +struct pbuf *lowpan6_decompress(struct pbuf *p, u16_t datagram_size, ip6_addr_t *lowpan6_contexts, + struct lowpan6_link_addr *src, struct lowpan6_link_addr *dest); +#endif /* LWIP_6LOWPAN_IPHC */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6 */ + +#endif /* LWIP_HDR_LOWPAN6_COMMON_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_opts.h b/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_opts.h new file mode 100644 index 0000000..17d46cd --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_opts.h @@ -0,0 +1,122 @@ +/** + * @file + * 6LowPAN options list + */ + +/* + * Copyright (c) 2015 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#ifndef LWIP_HDR_LOWPAN6_OPTS_H +#define LWIP_HDR_LOWPAN6_OPTS_H + +#include "lwip/opt.h" + +/** LWIP_6LOWPAN_NUM_CONTEXTS: define the number of compression + * contexts per netif type + */ +#ifndef LWIP_6LOWPAN_NUM_CONTEXTS +#define LWIP_6LOWPAN_NUM_CONTEXTS 10 +#endif + +/** LWIP_6LOWPAN_INFER_SHORT_ADDRESS: set this to 0 to disable creating + * short addresses for matching addresses (debug only) + */ +#ifndef LWIP_6LOWPAN_INFER_SHORT_ADDRESS +#define LWIP_6LOWPAN_INFER_SHORT_ADDRESS 1 +#endif + +/** LWIP_6LOWPAN_IPHC: set this to 0 to disable IP header compression as per + * RFC 6282 (which is mandatory for BLE) + */ +#ifndef LWIP_6LOWPAN_IPHC +#define LWIP_6LOWPAN_IPHC 1 +#endif + +/** Set this to 1 if your IEEE 802.15.4 interface can calculate and check the + * CRC in hardware. This means TX packets get 2 zero bytes added on transmission + * which are to be filled with the CRC. + */ +#ifndef LWIP_6LOWPAN_802154_HW_CRC +#define LWIP_6LOWPAN_802154_HW_CRC 0 +#endif + +/** If LWIP_6LOWPAN_802154_HW_CRC==0, this can override the default slow + * implementation of the CRC used for 6LoWPAN over IEEE 802.15.4 (which uses + * a shift register). + */ +#ifndef LWIP_6LOWPAN_CALC_CRC +#define LWIP_6LOWPAN_CALC_CRC(buf, len) lowpan6_calc_crc(buf, len) +#endif + +/** Debug level for 6LoWPAN in general */ +#ifndef LWIP_LOWPAN6_DEBUG +#define LWIP_LOWPAN6_DEBUG LWIP_DBG_OFF +#endif + +/** Debug level for 6LoWPAN over IEEE 802.15.4 */ +#ifndef LWIP_LOWPAN6_802154_DEBUG +#define LWIP_LOWPAN6_802154_DEBUG LWIP_DBG_OFF +#endif + +/** LWIP_LOWPAN6_IP_COMPRESSED_DEBUG: enable compressed IP frame + * output debugging + */ +#ifndef LWIP_LOWPAN6_IP_COMPRESSED_DEBUG +#define LWIP_LOWPAN6_IP_COMPRESSED_DEBUG LWIP_DBG_OFF +#endif + +/** LWIP_LOWPAN6_DECOMPRESSION_DEBUG: enable decompression debug output + */ +#ifndef LWIP_LOWPAN6_DECOMPRESSION_DEBUG +#define LWIP_LOWPAN6_DECOMPRESSION_DEBUG LWIP_DBG_OFF +#endif + +/** LWIP_RFC7668_IP_UNCOMPRESSED_DEBUG: enable decompressed IP frame + * output debugging */ +#ifndef LWIP_RFC7668_IP_UNCOMPRESSED_DEBUG +#define LWIP_RFC7668_IP_UNCOMPRESSED_DEBUG LWIP_DBG_OFF +#endif + +/** LWIP_RFC7668_LINUX_WORKAROUND_PUBLIC_ADDRESS: + * Currently, the linux kernel driver for 6lowpan sets/clears a bit in + * the address, depending on the BD address (either public or not). + * Might not be RFC7668 conform, so you may select to do that (=1) or + * not (=0) */ +#ifndef LWIP_RFC7668_LINUX_WORKAROUND_PUBLIC_ADDRESS +#define LWIP_RFC7668_LINUX_WORKAROUND_PUBLIC_ADDRESS 1 +#endif + + +#endif /* LWIP_HDR_LOWPAN6_OPTS_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ccp.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ccp.h new file mode 100644 index 0000000..b228522 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ccp.h @@ -0,0 +1,164 @@ +/* + * ccp.h - Definitions for PPP Compression Control Protocol. + * + * Copyright (c) 1994-2002 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 3. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: ccp.h,v 1.12 2004/11/04 10:02:26 paulus Exp $ + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && CCP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef CCP_H +#define CCP_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * CCP codes. + */ + +#define CCP_CONFREQ 1 +#define CCP_CONFACK 2 +#define CCP_TERMREQ 5 +#define CCP_TERMACK 6 +#define CCP_RESETREQ 14 +#define CCP_RESETACK 15 + +/* + * Max # bytes for a CCP option + */ + +#define CCP_MAX_OPTION_LENGTH 32 + +/* + * Parts of a CCP packet. + */ + +#define CCP_CODE(dp) ((dp)[0]) +#define CCP_ID(dp) ((dp)[1]) +#define CCP_LENGTH(dp) (((dp)[2] << 8) + (dp)[3]) +#define CCP_HDRLEN 4 + +#define CCP_OPT_CODE(dp) ((dp)[0]) +#define CCP_OPT_LENGTH(dp) ((dp)[1]) +#define CCP_OPT_MINLEN 2 + +#if BSDCOMPRESS_SUPPORT +/* + * Definitions for BSD-Compress. + */ + +#define CI_BSD_COMPRESS 21 /* config. option for BSD-Compress */ +#define CILEN_BSD_COMPRESS 3 /* length of config. option */ + +/* Macros for handling the 3rd byte of the BSD-Compress config option. */ +#define BSD_NBITS(x) ((x) & 0x1F) /* number of bits requested */ +#define BSD_VERSION(x) ((x) >> 5) /* version of option format */ +#define BSD_CURRENT_VERSION 1 /* current version number */ +#define BSD_MAKE_OPT(v, n) (((v) << 5) | (n)) + +#define BSD_MIN_BITS 9 /* smallest code size supported */ +#define BSD_MAX_BITS 15 /* largest code size supported */ +#endif /* BSDCOMPRESS_SUPPORT */ + +#if DEFLATE_SUPPORT +/* + * Definitions for Deflate. + */ + +#define CI_DEFLATE 26 /* config option for Deflate */ +#define CI_DEFLATE_DRAFT 24 /* value used in original draft RFC */ +#define CILEN_DEFLATE 4 /* length of its config option */ + +#define DEFLATE_MIN_SIZE 9 +#define DEFLATE_MAX_SIZE 15 +#define DEFLATE_METHOD_VAL 8 +#define DEFLATE_SIZE(x) (((x) >> 4) + 8) +#define DEFLATE_METHOD(x) ((x) & 0x0F) +#define DEFLATE_MAKE_OPT(w) ((((w) - 8) << 4) + DEFLATE_METHOD_VAL) +#define DEFLATE_CHK_SEQUENCE 0 +#endif /* DEFLATE_SUPPORT */ + +#if MPPE_SUPPORT +/* + * Definitions for MPPE. + */ + +#define CI_MPPE 18 /* config option for MPPE */ +#define CILEN_MPPE 6 /* length of config option */ +#endif /* MPPE_SUPPORT */ + +#if PREDICTOR_SUPPORT +/* + * Definitions for other, as yet unsupported, compression methods. + */ + +#define CI_PREDICTOR_1 1 /* config option for Predictor-1 */ +#define CILEN_PREDICTOR_1 2 /* length of its config option */ +#define CI_PREDICTOR_2 2 /* config option for Predictor-2 */ +#define CILEN_PREDICTOR_2 2 /* length of its config option */ +#endif /* PREDICTOR_SUPPORT */ + +typedef struct ccp_options { +#if DEFLATE_SUPPORT + unsigned int deflate :1; /* do Deflate? */ + unsigned int deflate_correct :1; /* use correct code for deflate? */ + unsigned int deflate_draft :1; /* use draft RFC code for deflate? */ +#endif /* DEFLATE_SUPPORT */ +#if BSDCOMPRESS_SUPPORT + unsigned int bsd_compress :1; /* do BSD Compress? */ +#endif /* BSDCOMPRESS_SUPPORT */ +#if PREDICTOR_SUPPORT + unsigned int predictor_1 :1; /* do Predictor-1? */ + unsigned int predictor_2 :1; /* do Predictor-2? */ +#endif /* PREDICTOR_SUPPORT */ + +#if MPPE_SUPPORT + u8_t mppe; /* MPPE bitfield */ +#endif /* MPPE_SUPPORT */ +#if BSDCOMPRESS_SUPPORT + u_short bsd_bits; /* # bits/code for BSD Compress */ +#endif /* BSDCOMPRESS_SUPPORT */ +#if DEFLATE_SUPPORT + u_short deflate_size; /* lg(window size) for Deflate */ +#endif /* DEFLATE_SUPPORT */ + u8_t method; /* code for chosen compression method */ +} ccp_options; + +extern const struct protent ccp_protent; + +void ccp_resetrequest(ppp_pcb *pcb); /* Issue a reset-request. */ + +#ifdef __cplusplus +} +#endif + +#endif /* CCP_H */ +#endif /* PPP_SUPPORT && CCP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap-md5.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap-md5.h new file mode 100644 index 0000000..eb0269f --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap-md5.h @@ -0,0 +1,36 @@ +/* + * chap-md5.h - New CHAP/MD5 implementation. + * + * Copyright (c) 2003 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 3. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && CHAP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +extern const struct chap_digest_type md5_digest; + +#endif /* PPP_SUPPORT && CHAP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap-new.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap-new.h new file mode 100644 index 0000000..2d8cd9c --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap-new.h @@ -0,0 +1,200 @@ +/* + * chap-new.c - New CHAP implementation. + * + * Copyright (c) 2003 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 3. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && CHAP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef CHAP_H +#define CHAP_H + +#include "ppp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * CHAP packets begin with a standard header with code, id, len (2 bytes). + */ +#define CHAP_HDRLEN 4 + +/* + * Values for the code field. + */ +#define CHAP_CHALLENGE 1 +#define CHAP_RESPONSE 2 +#define CHAP_SUCCESS 3 +#define CHAP_FAILURE 4 + +/* + * CHAP digest codes. + */ +#define CHAP_MD5 5 +#if MSCHAP_SUPPORT +#define CHAP_MICROSOFT 0x80 +#define CHAP_MICROSOFT_V2 0x81 +#endif /* MSCHAP_SUPPORT */ + +/* + * Semi-arbitrary limits on challenge and response fields. + */ +#define MAX_CHALLENGE_LEN 64 +#define MAX_RESPONSE_LEN 64 + +/* + * These limits apply to challenge and response packets we send. + * The +4 is the +1 that we actually need rounded up. + */ +#define CHAL_MAX_PKTLEN (PPP_HDRLEN + CHAP_HDRLEN + 4 + MAX_CHALLENGE_LEN + MAXNAMELEN) +#define RESP_MAX_PKTLEN (PPP_HDRLEN + CHAP_HDRLEN + 4 + MAX_RESPONSE_LEN + MAXNAMELEN) + +/* bitmask of supported algorithms */ +#if MSCHAP_SUPPORT +#define MDTYPE_MICROSOFT_V2 0x1 +#define MDTYPE_MICROSOFT 0x2 +#endif /* MSCHAP_SUPPORT */ +#define MDTYPE_MD5 0x4 +#define MDTYPE_NONE 0 + +#if MSCHAP_SUPPORT +/* Return the digest alg. ID for the most preferred digest type. */ +#define CHAP_DIGEST(mdtype) \ + ((mdtype) & MDTYPE_MD5)? CHAP_MD5: \ + ((mdtype) & MDTYPE_MICROSOFT_V2)? CHAP_MICROSOFT_V2: \ + ((mdtype) & MDTYPE_MICROSOFT)? CHAP_MICROSOFT: \ + 0 +#else /* !MSCHAP_SUPPORT */ +#define CHAP_DIGEST(mdtype) \ + ((mdtype) & MDTYPE_MD5)? CHAP_MD5: \ + 0 +#endif /* MSCHAP_SUPPORT */ + +/* Return the bit flag (lsb set) for our most preferred digest type. */ +#define CHAP_MDTYPE(mdtype) ((mdtype) ^ ((mdtype) - 1)) & (mdtype) + +/* Return the bit flag for a given digest algorithm ID. */ +#if MSCHAP_SUPPORT +#define CHAP_MDTYPE_D(digest) \ + ((digest) == CHAP_MICROSOFT_V2)? MDTYPE_MICROSOFT_V2: \ + ((digest) == CHAP_MICROSOFT)? MDTYPE_MICROSOFT: \ + ((digest) == CHAP_MD5)? MDTYPE_MD5: \ + 0 +#else /* !MSCHAP_SUPPORT */ +#define CHAP_MDTYPE_D(digest) \ + ((digest) == CHAP_MD5)? MDTYPE_MD5: \ + 0 +#endif /* MSCHAP_SUPPORT */ + +/* Can we do the requested digest? */ +#if MSCHAP_SUPPORT +#define CHAP_CANDIGEST(mdtype, digest) \ + ((digest) == CHAP_MICROSOFT_V2)? (mdtype) & MDTYPE_MICROSOFT_V2: \ + ((digest) == CHAP_MICROSOFT)? (mdtype) & MDTYPE_MICROSOFT: \ + ((digest) == CHAP_MD5)? (mdtype) & MDTYPE_MD5: \ + 0 +#else /* !MSCHAP_SUPPORT */ +#define CHAP_CANDIGEST(mdtype, digest) \ + ((digest) == CHAP_MD5)? (mdtype) & MDTYPE_MD5: \ + 0 +#endif /* MSCHAP_SUPPORT */ + +/* + * The code for each digest type has to supply one of these. + */ +struct chap_digest_type { + int code; + +#if PPP_SERVER + /* + * Note: challenge and response arguments below are formatted as + * a length byte followed by the actual challenge/response data. + */ + void (*generate_challenge)(ppp_pcb *pcb, unsigned char *challenge); + int (*verify_response)(ppp_pcb *pcb, int id, const char *name, + const unsigned char *secret, int secret_len, + const unsigned char *challenge, const unsigned char *response, + char *message, int message_space); +#endif /* PPP_SERVER */ + void (*make_response)(ppp_pcb *pcb, unsigned char *response, int id, const char *our_name, + const unsigned char *challenge, const char *secret, int secret_len, + unsigned char *priv); + int (*check_success)(ppp_pcb *pcb, unsigned char *pkt, int len, unsigned char *priv); + void (*handle_failure)(ppp_pcb *pcb, unsigned char *pkt, int len); +}; + +/* + * Each interface is described by chap structure. + */ +#if CHAP_SUPPORT +typedef struct chap_client_state { + u8_t flags; + const char *name; + const struct chap_digest_type *digest; + unsigned char priv[64]; /* private area for digest's use */ +} chap_client_state; + +#if PPP_SERVER +typedef struct chap_server_state { + u8_t flags; + u8_t id; + const char *name; + const struct chap_digest_type *digest; + int challenge_xmits; + int challenge_pktlen; + unsigned char challenge[CHAL_MAX_PKTLEN]; +} chap_server_state; +#endif /* PPP_SERVER */ +#endif /* CHAP_SUPPORT */ + +#if 0 /* UNUSED */ +/* Hook for a plugin to validate CHAP challenge */ +extern int (*chap_verify_hook)(char *name, char *ourname, int id, + const struct chap_digest_type *digest, + unsigned char *challenge, unsigned char *response, + char *message, int message_space); +#endif /* UNUSED */ + +#if PPP_SERVER +/* Called by authentication code to start authenticating the peer. */ +extern void chap_auth_peer(ppp_pcb *pcb, const char *our_name, int digest_code); +#endif /* PPP_SERVER */ + +/* Called by auth. code to start authenticating us to the peer. */ +extern void chap_auth_with_peer(ppp_pcb *pcb, const char *our_name, int digest_code); + +/* Represents the CHAP protocol to the main pppd code */ +extern const struct protent chap_protent; + +#ifdef __cplusplus +} +#endif + +#endif /* CHAP_H */ +#endif /* PPP_SUPPORT && CHAP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap_ms.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap_ms.h new file mode 100644 index 0000000..0795291 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap_ms.h @@ -0,0 +1,44 @@ +/* + * chap_ms.h - Challenge Handshake Authentication Protocol definitions. + * + * Copyright (c) 1995 Eric Rosenquist. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: chap_ms.h,v 1.13 2004/11/15 22:13:26 paulus Exp $ + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && MSCHAP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef CHAPMS_INCLUDE +#define CHAPMS_INCLUDE + +extern const struct chap_digest_type chapms_digest; +extern const struct chap_digest_type chapms2_digest; + +#endif /* CHAPMS_INCLUDE */ + +#endif /* PPP_SUPPORT && MSCHAP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/eap.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/eap.h new file mode 100644 index 0000000..3ee9aaf --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/eap.h @@ -0,0 +1,169 @@ +/* + * eap.h - Extensible Authentication Protocol for PPP (RFC 2284) + * + * Copyright (c) 2001 by Sun Microsystems, Inc. + * All rights reserved. + * + * Non-exclusive rights to redistribute, modify, translate, and use + * this software in source and binary forms, in whole or in part, is + * hereby granted, provided that the above copyright notice is + * duplicated in any source form, and that neither the name of the + * copyright holder nor the author is used to endorse or promote + * products derived from this software. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. + * + * Original version by James Carlson + * + * $Id: eap.h,v 1.2 2003/06/11 23:56:26 paulus Exp $ + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && EAP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef PPP_EAP_H +#define PPP_EAP_H + +#include "ppp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Packet header = Code, id, length. + */ +#define EAP_HEADERLEN 4 + + +/* EAP message codes. */ +#define EAP_REQUEST 1 +#define EAP_RESPONSE 2 +#define EAP_SUCCESS 3 +#define EAP_FAILURE 4 + +/* EAP types */ +#define EAPT_IDENTITY 1 +#define EAPT_NOTIFICATION 2 +#define EAPT_NAK 3 /* (response only) */ +#define EAPT_MD5CHAP 4 +#define EAPT_OTP 5 /* One-Time Password; RFC 1938 */ +#define EAPT_TOKEN 6 /* Generic Token Card */ +/* 7 and 8 are unassigned. */ +#define EAPT_RSA 9 /* RSA Public Key Authentication */ +#define EAPT_DSS 10 /* DSS Unilateral */ +#define EAPT_KEA 11 /* KEA */ +#define EAPT_KEA_VALIDATE 12 /* KEA-VALIDATE */ +#define EAPT_TLS 13 /* EAP-TLS */ +#define EAPT_DEFENDER 14 /* Defender Token (AXENT) */ +#define EAPT_W2K 15 /* Windows 2000 EAP */ +#define EAPT_ARCOT 16 /* Arcot Systems */ +#define EAPT_CISCOWIRELESS 17 /* Cisco Wireless */ +#define EAPT_NOKIACARD 18 /* Nokia IP smart card */ +#define EAPT_SRP 19 /* Secure Remote Password */ +/* 20 is deprecated */ + +/* EAP SRP-SHA1 Subtypes */ +#define EAPSRP_CHALLENGE 1 /* Request 1 - Challenge */ +#define EAPSRP_CKEY 1 /* Response 1 - Client Key */ +#define EAPSRP_SKEY 2 /* Request 2 - Server Key */ +#define EAPSRP_CVALIDATOR 2 /* Response 2 - Client Validator */ +#define EAPSRP_SVALIDATOR 3 /* Request 3 - Server Validator */ +#define EAPSRP_ACK 3 /* Response 3 - final ack */ +#define EAPSRP_LWRECHALLENGE 4 /* Req/resp 4 - Lightweight rechal */ + +#define SRPVAL_EBIT 0x00000001 /* Use shared key for ECP */ + +#define SRP_PSEUDO_ID "pseudo_" +#define SRP_PSEUDO_LEN 7 + +#define MD5_SIGNATURE_SIZE 16 +#define EAP_MIN_CHALLENGE_LENGTH 17 +#define EAP_MAX_CHALLENGE_LENGTH 24 +#define EAP_MIN_MAX_POWER_OF_TWO_CHALLENGE_LENGTH 3 /* 2^3-1 = 7, 17+7 = 24 */ + +#define EAP_STATES \ + "Initial", "Pending", "Closed", "Listen", "Identify", \ + "SRP1", "SRP2", "SRP3", "MD5Chall", "Open", "SRP4", "BadAuth" + +#define eap_client_active(pcb) ((pcb)->eap.es_client.ea_state == eapListen) +#if PPP_SERVER +#define eap_server_active(pcb) \ + ((pcb)->eap.es_server.ea_state >= eapIdentify && \ + (pcb)->eap.es_server.ea_state <= eapMD5Chall) +#endif /* PPP_SERVER */ + +/* + * Complete EAP state for one PPP session. + */ +enum eap_state_code { + eapInitial = 0, /* No EAP authentication yet requested */ + eapPending, /* Waiting for LCP (no timer) */ + eapClosed, /* Authentication not in use */ + eapListen, /* Client ready (and timer running) */ + eapIdentify, /* EAP Identify sent */ + eapSRP1, /* Sent EAP SRP-SHA1 Subtype 1 */ + eapSRP2, /* Sent EAP SRP-SHA1 Subtype 2 */ + eapSRP3, /* Sent EAP SRP-SHA1 Subtype 3 */ + eapMD5Chall, /* Sent MD5-Challenge */ + eapOpen, /* Completed authentication */ + eapSRP4, /* Sent EAP SRP-SHA1 Subtype 4 */ + eapBadAuth /* Failed authentication */ +}; + +struct eap_auth { + const char *ea_name; /* Our name */ + char ea_peer[MAXNAMELEN +1]; /* Peer's name */ + void *ea_session; /* Authentication library linkage */ + u_char *ea_skey; /* Shared encryption key */ + u_short ea_namelen; /* Length of our name */ + u_short ea_peerlen; /* Length of peer's name */ + enum eap_state_code ea_state; + u_char ea_id; /* Current id */ + u_char ea_requests; /* Number of Requests sent/received */ + u_char ea_responses; /* Number of Responses */ + u_char ea_type; /* One of EAPT_* */ + u32_t ea_keyflags; /* SRP shared key usage flags */ +}; + +#ifndef EAP_MAX_CHALLENGE_LENGTH +#define EAP_MAX_CHALLENGE_LENGTH 24 +#endif +typedef struct eap_state { + struct eap_auth es_client; /* Client (authenticatee) data */ +#if PPP_SERVER + struct eap_auth es_server; /* Server (authenticator) data */ +#endif /* PPP_SERVER */ + int es_savedtime; /* Saved timeout */ + int es_rechallenge; /* EAP rechallenge interval */ + int es_lwrechallenge; /* SRP lightweight rechallenge inter */ + u8_t es_usepseudo; /* Use SRP Pseudonym if offered one */ + int es_usedpseudo; /* Set if we already sent PN */ + int es_challen; /* Length of challenge string */ + u_char es_challenge[EAP_MAX_CHALLENGE_LENGTH]; +} eap_state; + +/* + * Timeouts. + */ +#if 0 /* moved to ppp_opts.h */ +#define EAP_DEFTIMEOUT 3 /* Timeout (seconds) for rexmit */ +#define EAP_DEFTRANSMITS 10 /* max # times to transmit */ +#define EAP_DEFREQTIME 20 /* Time to wait for peer request */ +#define EAP_DEFALLOWREQ 20 /* max # times to accept requests */ +#endif /* moved to ppp_opts.h */ + +void eap_authwithpeer(ppp_pcb *pcb, const char *localname); +void eap_authpeer(ppp_pcb *pcb, const char *localname); + +extern const struct protent eap_protent; + +#ifdef __cplusplus +} +#endif + +#endif /* PPP_EAP_H */ + +#endif /* PPP_SUPPORT && EAP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ecp.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ecp.h new file mode 100644 index 0000000..d8808c3 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ecp.h @@ -0,0 +1,62 @@ +/* + * ecp.h - Definitions for PPP Encryption Control Protocol. + * + * Copyright (c) 2002 Google, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: ecp.h,v 1.2 2003/01/10 07:12:36 fcusack Exp $ + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && ECP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef ECP_H +#define ECP_H + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct ecp_options { + bool required; /* Is ECP required? */ + unsigned enctype; /* Encryption type */ +} ecp_options; + +extern fsm ecp_fsm[]; +extern ecp_options ecp_wantoptions[]; +extern ecp_options ecp_gotoptions[]; +extern ecp_options ecp_allowoptions[]; +extern ecp_options ecp_hisoptions[]; + +extern const struct protent ecp_protent; + +#ifdef __cplusplus +} +#endif + +#endif /* ECP_H */ +#endif /* PPP_SUPPORT && ECP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/eui64.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/eui64.h new file mode 100644 index 0000000..5adeb48 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/eui64.h @@ -0,0 +1,102 @@ +/* + * eui64.h - EUI64 routines for IPv6CP. + * + * Copyright (c) 1999 Tommi Komulainen. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Tommi Komulainen + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: eui64.h,v 1.6 2002/12/04 23:03:32 paulus Exp $ +*/ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPP_IPV6_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef EUI64_H +#define EUI64_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * @todo: + * + * Maybe this should be done by processing struct in6_addr directly... + */ +typedef union +{ + u8_t e8[8]; + u16_t e16[4]; + u32_t e32[2]; +} eui64_t; + +#define eui64_iszero(e) (((e).e32[0] | (e).e32[1]) == 0) +#define eui64_equals(e, o) (((e).e32[0] == (o).e32[0]) && \ + ((e).e32[1] == (o).e32[1])) +#define eui64_zero(e) (e).e32[0] = (e).e32[1] = 0; + +#define eui64_copy(s, d) memcpy(&(d), &(s), sizeof(eui64_t)) + +#define eui64_magic(e) do { \ + (e).e32[0] = magic(); \ + (e).e32[1] = magic(); \ + (e).e8[0] &= ~2; \ + } while (0) +#define eui64_magic_nz(x) do { \ + eui64_magic(x); \ + } while (eui64_iszero(x)) +#define eui64_magic_ne(x, y) do { \ + eui64_magic(x); \ + } while (eui64_equals(x, y)) + +#define eui64_get(ll, cp) do { \ + eui64_copy((*cp), (ll)); \ + (cp) += sizeof(eui64_t); \ + } while (0) + +#define eui64_put(ll, cp) do { \ + eui64_copy((ll), (*cp)); \ + (cp) += sizeof(eui64_t); \ + } while (0) + +#define eui64_set32(e, l) do { \ + (e).e32[0] = 0; \ + (e).e32[1] = lwip_htonl(l); \ + } while (0) +#define eui64_setlo32(e, l) eui64_set32(e, l) + +char *eui64_ntoa(eui64_t); /* Returns ascii representation of id */ + +#ifdef __cplusplus +} +#endif + +#endif /* EUI64_H */ +#endif /* PPP_SUPPORT && PPP_IPV6_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/fsm.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/fsm.h new file mode 100644 index 0000000..8dec700 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/fsm.h @@ -0,0 +1,182 @@ +/* + * fsm.h - {Link, IP} Control Protocol Finite State Machine definitions. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: fsm.h,v 1.10 2004/11/13 02:28:15 paulus Exp $ + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef FSM_H +#define FSM_H + +#include "ppp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Packet header = Code, id, length. + */ +#define HEADERLEN 4 + + +/* + * CP (LCP, IPCP, etc.) codes. + */ +#define CONFREQ 1 /* Configuration Request */ +#define CONFACK 2 /* Configuration Ack */ +#define CONFNAK 3 /* Configuration Nak */ +#define CONFREJ 4 /* Configuration Reject */ +#define TERMREQ 5 /* Termination Request */ +#define TERMACK 6 /* Termination Ack */ +#define CODEREJ 7 /* Code Reject */ + + +/* + * Each FSM is described by an fsm structure and fsm callbacks. + */ +typedef struct fsm { + ppp_pcb *pcb; /* PPP Interface */ + const struct fsm_callbacks *callbacks; /* Callback routines */ + const char *term_reason; /* Reason for closing protocol */ + u8_t seen_ack; /* Have received valid Ack/Nak/Rej to Req */ + /* -- This is our only flag, we might use u_int :1 if we have more flags */ + u16_t protocol; /* Data Link Layer Protocol field value */ + u8_t state; /* State */ + u8_t flags; /* Contains option bits */ + u8_t id; /* Current id */ + u8_t reqid; /* Current request id */ + u8_t retransmits; /* Number of retransmissions left */ + u8_t nakloops; /* Number of nak loops since last ack */ + u8_t rnakloops; /* Number of naks received */ + u8_t maxnakloops; /* Maximum number of nak loops tolerated + (necessary because IPCP require a custom large max nak loops value) */ + u8_t term_reason_len; /* Length of term_reason */ +} fsm; + + +typedef struct fsm_callbacks { + void (*resetci) /* Reset our Configuration Information */ + (fsm *); + int (*cilen) /* Length of our Configuration Information */ + (fsm *); + void (*addci) /* Add our Configuration Information */ + (fsm *, u_char *, int *); + int (*ackci) /* ACK our Configuration Information */ + (fsm *, u_char *, int); + int (*nakci) /* NAK our Configuration Information */ + (fsm *, u_char *, int, int); + int (*rejci) /* Reject our Configuration Information */ + (fsm *, u_char *, int); + int (*reqci) /* Request peer's Configuration Information */ + (fsm *, u_char *, int *, int); + void (*up) /* Called when fsm reaches PPP_FSM_OPENED state */ + (fsm *); + void (*down) /* Called when fsm leaves PPP_FSM_OPENED state */ + (fsm *); + void (*starting) /* Called when we want the lower layer */ + (fsm *); + void (*finished) /* Called when we don't want the lower layer */ + (fsm *); + void (*protreject) /* Called when Protocol-Reject received */ + (int); + void (*retransmit) /* Retransmission is necessary */ + (fsm *); + int (*extcode) /* Called when unknown code received */ + (fsm *, int, int, u_char *, int); + const char *proto_name; /* String name for protocol (for messages) */ +} fsm_callbacks; + + +/* + * Link states. + */ +#define PPP_FSM_INITIAL 0 /* Down, hasn't been opened */ +#define PPP_FSM_STARTING 1 /* Down, been opened */ +#define PPP_FSM_CLOSED 2 /* Up, hasn't been opened */ +#define PPP_FSM_STOPPED 3 /* Open, waiting for down event */ +#define PPP_FSM_CLOSING 4 /* Terminating the connection, not open */ +#define PPP_FSM_STOPPING 5 /* Terminating, but open */ +#define PPP_FSM_REQSENT 6 /* We've sent a Config Request */ +#define PPP_FSM_ACKRCVD 7 /* We've received a Config Ack */ +#define PPP_FSM_ACKSENT 8 /* We've sent a Config Ack */ +#define PPP_FSM_OPENED 9 /* Connection available */ + + +/* + * Flags - indicate options controlling FSM operation + */ +#define OPT_PASSIVE 1 /* Don't die if we don't get a response */ +#define OPT_RESTART 2 /* Treat 2nd OPEN as DOWN, UP */ +#define OPT_SILENT 4 /* Wait for peer to speak first */ + + +/* + * Timeouts. + */ +#if 0 /* moved to ppp_opts.h */ +#define DEFTIMEOUT 3 /* Timeout time in seconds */ +#define DEFMAXTERMREQS 2 /* Maximum Terminate-Request transmissions */ +#define DEFMAXCONFREQS 10 /* Maximum Configure-Request transmissions */ +#define DEFMAXNAKLOOPS 5 /* Maximum number of nak loops */ +#endif /* moved to ppp_opts.h */ + + +/* + * Prototypes + */ +void fsm_init(fsm *f); +void fsm_lowerup(fsm *f); +void fsm_lowerdown(fsm *f); +void fsm_open(fsm *f); +void fsm_close(fsm *f, const char *reason); +void fsm_input(fsm *f, u_char *inpacket, int l); +void fsm_protreject(fsm *f); +void fsm_sdata(fsm *f, u_char code, u_char id, const u_char *data, int datalen); + +#ifdef __cplusplus +} +#endif + +#endif /* FSM_H */ +#endif /* PPP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ipcp.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ipcp.h new file mode 100644 index 0000000..32fdd1c --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ipcp.h @@ -0,0 +1,134 @@ +/* + * ipcp.h - IP Control Protocol definitions. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: ipcp.h,v 1.14 2002/12/04 23:03:32 paulus Exp $ + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPP_IPV4_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef IPCP_H +#define IPCP_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Options. + */ +#define CI_ADDRS 1 /* IP Addresses */ +#if VJ_SUPPORT +#define CI_COMPRESSTYPE 2 /* Compression Type */ +#endif /* VJ_SUPPORT */ +#define CI_ADDR 3 + +#if LWIP_DNS +#define CI_MS_DNS1 129 /* Primary DNS value */ +#define CI_MS_DNS2 131 /* Secondary DNS value */ +#endif /* LWIP_DNS */ +#if 0 /* UNUSED - WINS */ +#define CI_MS_WINS1 130 /* Primary WINS value */ +#define CI_MS_WINS2 132 /* Secondary WINS value */ +#endif /* UNUSED - WINS */ + +#if VJ_SUPPORT +#define MAX_STATES 16 /* from slcompress.h */ + +#define IPCP_VJMODE_OLD 1 /* "old" mode (option # = 0x0037) */ +#define IPCP_VJMODE_RFC1172 2 /* "old-rfc"mode (option # = 0x002d) */ +#define IPCP_VJMODE_RFC1332 3 /* "new-rfc"mode (option # = 0x002d, */ + /* maxslot and slot number compression) */ + +#define IPCP_VJ_COMP 0x002d /* current value for VJ compression option*/ +#define IPCP_VJ_COMP_OLD 0x0037 /* "old" (i.e, broken) value for VJ */ + /* compression option*/ +#endif /* VJ_SUPPORT */ + +typedef struct ipcp_options { + unsigned int neg_addr :1; /* Negotiate IP Address? */ + unsigned int old_addrs :1; /* Use old (IP-Addresses) option? */ + unsigned int req_addr :1; /* Ask peer to send IP address? */ +#if 0 /* UNUSED */ + unsigned int default_route :1; /* Assign default route through interface? */ + unsigned int replace_default_route :1; /* Replace default route through interface? */ +#endif /* UNUSED */ +#if 0 /* UNUSED - PROXY ARP */ + unsigned int proxy_arp :1; /* Make proxy ARP entry for peer? */ +#endif /* UNUSED - PROXY ARP */ +#if VJ_SUPPORT + unsigned int neg_vj :1; /* Van Jacobson Compression? */ + unsigned int old_vj :1; /* use old (short) form of VJ option? */ + unsigned int cflag :1; +#endif /* VJ_SUPPORT */ + unsigned int accept_local :1; /* accept peer's value for ouraddr */ + unsigned int accept_remote :1; /* accept peer's value for hisaddr */ +#if LWIP_DNS + unsigned int req_dns1 :1; /* Ask peer to send primary DNS address? */ + unsigned int req_dns2 :1; /* Ask peer to send secondary DNS address? */ +#endif /* LWIP_DNS */ + + u32_t ouraddr, hisaddr; /* Addresses in NETWORK BYTE ORDER */ +#if LWIP_DNS + u32_t dnsaddr[2]; /* Primary and secondary MS DNS entries */ +#endif /* LWIP_DNS */ +#if 0 /* UNUSED - WINS */ + u32_t winsaddr[2]; /* Primary and secondary MS WINS entries */ +#endif /* UNUSED - WINS */ + +#if VJ_SUPPORT + u16_t vj_protocol; /* protocol value to use in VJ option */ + u8_t maxslotindex; /* values for RFC1332 VJ compression neg. */ +#endif /* VJ_SUPPORT */ +} ipcp_options; + +#if 0 /* UNUSED, already defined by lwIP */ +char *ip_ntoa (u32_t); +#endif /* UNUSED, already defined by lwIP */ + +extern const struct protent ipcp_protent; + +#ifdef __cplusplus +} +#endif + +#endif /* IPCP_H */ +#endif /* PPP_SUPPORT && PPP_IPV4_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ipv6cp.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ipv6cp.h new file mode 100644 index 0000000..9099973 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ipv6cp.h @@ -0,0 +1,191 @@ +/* + * ipv6cp.h - PPP IPV6 Control Protocol. + * + * Copyright (c) 1999 Tommi Komulainen. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Tommi Komulainen + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ + +/* Original version, based on RFC2023 : + + Copyright (c) 1995, 1996, 1997 Francis.Dupont@inria.fr, INRIA Rocquencourt, + Alain.Durand@imag.fr, IMAG, + Jean-Luc.Richier@imag.fr, IMAG-LSR. + + Copyright (c) 1998, 1999 Francis.Dupont@inria.fr, GIE DYADE, + Alain.Durand@imag.fr, IMAG, + Jean-Luc.Richier@imag.fr, IMAG-LSR. + + Ce travail a été fait au sein du GIE DYADE (Groupement d'Intérêt + Économique ayant pour membres BULL S.A. et l'INRIA). + + Ce logiciel informatique est disponible aux conditions + usuelles dans la recherche, c'est-à-dire qu'il peut + être utilisé, copié, modifié, distribué à l'unique + condition que ce texte soit conservé afin que + l'origine de ce logiciel soit reconnue. + + Le nom de l'Institut National de Recherche en Informatique + et en Automatique (INRIA), de l'IMAG, ou d'une personne morale + ou physique ayant participé à l'élaboration de ce logiciel ne peut + être utilisé sans son accord préalable explicite. + + Ce logiciel est fourni tel quel sans aucune garantie, + support ou responsabilité d'aucune sorte. + Ce logiciel est dérivé de sources d'origine + "University of California at Berkeley" et + "Digital Equipment Corporation" couvertes par des copyrights. + + L'Institut d'Informatique et de Mathématiques Appliquées de Grenoble (IMAG) + est une fédération d'unités mixtes de recherche du CNRS, de l'Institut National + Polytechnique de Grenoble et de l'Université Joseph Fourier regroupant + sept laboratoires dont le laboratoire Logiciels, Systèmes, Réseaux (LSR). + + This work has been done in the context of GIE DYADE (joint R & D venture + between BULL S.A. and INRIA). + + This software is available with usual "research" terms + with the aim of retain credits of the software. + Permission to use, copy, modify and distribute this software for any + purpose and without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies, + and the name of INRIA, IMAG, or any contributor not be used in advertising + or publicity pertaining to this material without the prior explicit + permission. The software is provided "as is" without any + warranties, support or liabilities of any kind. + This software is derived from source code from + "University of California at Berkeley" and + "Digital Equipment Corporation" protected by copyrights. + + Grenoble's Institute of Computer Science and Applied Mathematics (IMAG) + is a federation of seven research units funded by the CNRS, National + Polytechnic Institute of Grenoble and University Joseph Fourier. + The research unit in Software, Systems, Networks (LSR) is member of IMAG. +*/ + +/* + * Derived from : + * + * + * ipcp.h - IP Control Protocol definitions. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: ipv6cp.h,v 1.7 2002/12/04 23:03:32 paulus Exp $ + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPP_IPV6_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef IPV6CP_H +#define IPV6CP_H + +#include "eui64.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Options. + */ +#define CI_IFACEID 1 /* Interface Identifier */ +#ifdef IPV6CP_COMP +#define CI_COMPRESSTYPE 2 /* Compression Type */ +#endif /* IPV6CP_COMP */ + +/* No compression types yet defined. + *#define IPV6CP_COMP 0x004f + */ +typedef struct ipv6cp_options { + unsigned int neg_ifaceid :1; /* Negotiate interface identifier? */ + unsigned int req_ifaceid :1; /* Ask peer to send interface identifier? */ + unsigned int accept_local :1; /* accept peer's value for iface id? */ + unsigned int opt_local :1; /* ourtoken set by option */ + unsigned int opt_remote :1; /* histoken set by option */ + unsigned int use_ip :1; /* use IP as interface identifier */ +#if 0 + unsigned int use_persistent :1; /* use uniquely persistent value for address */ +#endif +#ifdef IPV6CP_COMP + unsigned int neg_vj :1; /* Van Jacobson Compression? */ +#endif /* IPV6CP_COMP */ + +#ifdef IPV6CP_COMP + u_short vj_protocol; /* protocol value to use in VJ option */ +#endif /* IPV6CP_COMP */ + eui64_t ourid, hisid; /* Interface identifiers */ +} ipv6cp_options; + +extern const struct protent ipv6cp_protent; + +#ifdef __cplusplus +} +#endif + +#endif /* IPV6CP_H */ +#endif /* PPP_SUPPORT && PPP_IPV6_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/lcp.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/lcp.h new file mode 100644 index 0000000..18ad1cb --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/lcp.h @@ -0,0 +1,179 @@ +/* + * lcp.h - Link Control Protocol definitions. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: lcp.h,v 1.20 2004/11/14 22:53:42 carlsonj Exp $ + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef LCP_H +#define LCP_H + +#include "ppp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Options. + */ +#define CI_VENDOR 0 /* Vendor Specific */ +#define CI_MRU 1 /* Maximum Receive Unit */ +#define CI_ASYNCMAP 2 /* Async Control Character Map */ +#define CI_AUTHTYPE 3 /* Authentication Type */ +#define CI_QUALITY 4 /* Quality Protocol */ +#define CI_MAGICNUMBER 5 /* Magic Number */ +#define CI_PCOMPRESSION 7 /* Protocol Field Compression */ +#define CI_ACCOMPRESSION 8 /* Address/Control Field Compression */ +#define CI_FCSALTERN 9 /* FCS-Alternatives */ +#define CI_SDP 10 /* Self-Describing-Pad */ +#define CI_NUMBERED 11 /* Numbered-Mode */ +#define CI_CALLBACK 13 /* callback */ +#define CI_MRRU 17 /* max reconstructed receive unit; multilink */ +#define CI_SSNHF 18 /* short sequence numbers for multilink */ +#define CI_EPDISC 19 /* endpoint discriminator */ +#define CI_MPPLUS 22 /* Multi-Link-Plus-Procedure */ +#define CI_LDISC 23 /* Link-Discriminator */ +#define CI_LCPAUTH 24 /* LCP Authentication */ +#define CI_COBS 25 /* Consistent Overhead Byte Stuffing */ +#define CI_PREFELIS 26 /* Prefix Elision */ +#define CI_MPHDRFMT 27 /* MP Header Format */ +#define CI_I18N 28 /* Internationalization */ +#define CI_SDL 29 /* Simple Data Link */ + +/* + * LCP-specific packet types (code numbers). + */ +#define PROTREJ 8 /* Protocol Reject */ +#define ECHOREQ 9 /* Echo Request */ +#define ECHOREP 10 /* Echo Reply */ +#define DISCREQ 11 /* Discard Request */ +#define IDENTIF 12 /* Identification */ +#define TIMEREM 13 /* Time Remaining */ + +/* Value used as data for CI_CALLBACK option */ +#define CBCP_OPT 6 /* Use callback control protocol */ + +#if 0 /* moved to ppp_opts.h */ +#define DEFMRU 1500 /* Try for this */ +#define MINMRU 128 /* No MRUs below this */ +#define MAXMRU 16384 /* Normally limit MRU to this */ +#endif /* moved to ppp_opts.h */ + +/* An endpoint discriminator, used with multilink. */ +#define MAX_ENDP_LEN 20 /* maximum length of discriminator value */ +struct epdisc { + unsigned char class_; /* -- The word "class" is reserved in C++. */ + unsigned char length; + unsigned char value[MAX_ENDP_LEN]; +}; + +/* + * The state of options is described by an lcp_options structure. + */ +typedef struct lcp_options { + unsigned int passive :1; /* Don't die if we don't get a response */ + unsigned int silent :1; /* Wait for the other end to start first */ +#if 0 /* UNUSED */ + unsigned int restart :1; /* Restart vs. exit after close */ +#endif /* UNUSED */ + unsigned int neg_mru :1; /* Negotiate the MRU? */ + unsigned int neg_asyncmap :1; /* Negotiate the async map? */ +#if PAP_SUPPORT + unsigned int neg_upap :1; /* Ask for UPAP authentication? */ +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + unsigned int neg_chap :1; /* Ask for CHAP authentication? */ +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + unsigned int neg_eap :1; /* Ask for EAP authentication? */ +#endif /* EAP_SUPPORT */ + unsigned int neg_magicnumber :1; /* Ask for magic number? */ + unsigned int neg_pcompression :1; /* HDLC Protocol Field Compression? */ + unsigned int neg_accompression :1; /* HDLC Address/Control Field Compression? */ +#if LQR_SUPPORT + unsigned int neg_lqr :1; /* Negotiate use of Link Quality Reports */ +#endif /* LQR_SUPPORT */ + unsigned int neg_cbcp :1; /* Negotiate use of CBCP */ +#ifdef HAVE_MULTILINK + unsigned int neg_mrru :1; /* negotiate multilink MRRU */ +#endif /* HAVE_MULTILINK */ + unsigned int neg_ssnhf :1; /* negotiate short sequence numbers */ + unsigned int neg_endpoint :1; /* negotiate endpoint discriminator */ + + u16_t mru; /* Value of MRU */ +#ifdef HAVE_MULTILINK + u16_t mrru; /* Value of MRRU, and multilink enable */ +#endif /* MULTILINK */ +#if CHAP_SUPPORT + u8_t chap_mdtype; /* which MD types (hashing algorithm) */ +#endif /* CHAP_SUPPORT */ + u32_t asyncmap; /* Value of async map */ + u32_t magicnumber; + u8_t numloops; /* Number of loops during magic number neg. */ +#if LQR_SUPPORT + u32_t lqr_period; /* Reporting period for LQR 1/100ths second */ +#endif /* LQR_SUPPORT */ + struct epdisc endpoint; /* endpoint discriminator */ +} lcp_options; + +void lcp_open(ppp_pcb *pcb); +void lcp_close(ppp_pcb *pcb, const char *reason); +void lcp_lowerup(ppp_pcb *pcb); +void lcp_lowerdown(ppp_pcb *pcb); +void lcp_sprotrej(ppp_pcb *pcb, u_char *p, int len); /* send protocol reject */ + +extern const struct protent lcp_protent; + +#if 0 /* moved to ppp_opts.h */ +/* Default number of times we receive our magic number from the peer + before deciding the link is looped-back. */ +#define DEFLOOPBACKFAIL 10 +#endif /* moved to ppp_opts.h */ + +#ifdef __cplusplus +} +#endif + +#endif /* LCP_H */ +#endif /* PPP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/magic.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/magic.h new file mode 100644 index 0000000..a165e18 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/magic.h @@ -0,0 +1,130 @@ +/* + * magic.h - PPP Magic Number definitions. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: magic.h,v 1.5 2003/06/11 23:56:26 paulus Exp $ + */ +/***************************************************************************** +* randm.h - Random number generator header file. +* +* Copyright (c) 2003 by Marc Boucher, Services Informatiques (MBSI) inc. +* Copyright (c) 1998 Global Election Systems Inc. +* +* The authors hereby grant permission to use, copy, modify, distribute, +* and license this software and its documentation for any purpose, provided +* that existing copyright notices are retained in all copies and that this +* notice and the following disclaimer are included verbatim in any +* distributions. No written agreement, license, or royalty fee is required +* for any of the authorized uses. +* +* THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS *AS IS* AND ANY EXPRESS OR +* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +* IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +****************************************************************************** +* REVISION HISTORY +* +* 03-01-01 Marc Boucher +* Ported to lwIP. +* 98-05-29 Guy Lancaster , Global Election Systems Inc. +* Extracted from avos. +*****************************************************************************/ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef MAGIC_H +#define MAGIC_H + +#ifdef __cplusplus +extern "C" { +#endif + +/*********************** +*** PUBLIC FUNCTIONS *** +***********************/ + +/* + * Initialize the random number generator. + */ +void magic_init(void); + +/* + * Randomize our random seed value. To be called for truely random events + * such as user operations and network traffic. + */ +void magic_randomize(void); + +/* + * Return a new random number. + */ +u32_t magic(void); /* Returns the next magic number */ + +/* + * Fill buffer with random bytes + * + * Use the random pool to generate random data. This degrades to pseudo + * random when used faster than randomness is supplied using magic_churnrand(). + * Thus it's important to make sure that the results of this are not + * published directly because one could predict the next result to at + * least some degree. Also, it's important to get a good seed before + * the first use. + */ +void magic_random_bytes(unsigned char *buf, u32_t buf_len); + +/* + * Return a new random number between 0 and (2^pow)-1 included. + */ +u32_t magic_pow(u8_t pow); + +#ifdef __cplusplus +} +#endif + +#endif /* MAGIC_H */ + +#endif /* PPP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/mppe.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/mppe.h new file mode 100644 index 0000000..5de1128 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/mppe.h @@ -0,0 +1,181 @@ +/* + * mppe.h - Definitions for MPPE + * + * Copyright (c) 2008 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && MPPE_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef MPPE_H +#define MPPE_H + +#include "netif/ppp/pppcrypt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define MPPE_PAD 4 /* MPPE growth per frame */ +#define MPPE_MAX_KEY_LEN 16 /* largest key length (128-bit) */ + +/* option bits for ccp_options.mppe */ +#define MPPE_OPT_40 0x01 /* 40 bit */ +#define MPPE_OPT_128 0x02 /* 128 bit */ +#define MPPE_OPT_STATEFUL 0x04 /* stateful mode */ +/* unsupported opts */ +#define MPPE_OPT_56 0x08 /* 56 bit */ +#define MPPE_OPT_MPPC 0x10 /* MPPC compression */ +#define MPPE_OPT_D 0x20 /* Unknown */ +#define MPPE_OPT_UNSUPPORTED (MPPE_OPT_56|MPPE_OPT_MPPC|MPPE_OPT_D) +#define MPPE_OPT_UNKNOWN 0x40 /* Bits !defined in RFC 3078 were set */ + +/* + * This is not nice ... the alternative is a bitfield struct though. + * And unfortunately, we cannot share the same bits for the option + * names above since C and H are the same bit. We could do a u_int32 + * but then we have to do a lwip_htonl() all the time and/or we still need + * to know which octet is which. + */ +#define MPPE_C_BIT 0x01 /* MPPC */ +#define MPPE_D_BIT 0x10 /* Obsolete, usage unknown */ +#define MPPE_L_BIT 0x20 /* 40-bit */ +#define MPPE_S_BIT 0x40 /* 128-bit */ +#define MPPE_M_BIT 0x80 /* 56-bit, not supported */ +#define MPPE_H_BIT 0x01 /* Stateless (in a different byte) */ + +/* Does not include H bit; used for least significant octet only. */ +#define MPPE_ALL_BITS (MPPE_D_BIT|MPPE_L_BIT|MPPE_S_BIT|MPPE_M_BIT|MPPE_H_BIT) + +/* Build a CI from mppe opts (see RFC 3078) */ +#define MPPE_OPTS_TO_CI(opts, ci) \ + do { \ + u_char *ptr = ci; /* u_char[4] */ \ + \ + /* H bit */ \ + if (opts & MPPE_OPT_STATEFUL) \ + *ptr++ = 0x0; \ + else \ + *ptr++ = MPPE_H_BIT; \ + *ptr++ = 0; \ + *ptr++ = 0; \ + \ + /* S,L bits */ \ + *ptr = 0; \ + if (opts & MPPE_OPT_128) \ + *ptr |= MPPE_S_BIT; \ + if (opts & MPPE_OPT_40) \ + *ptr |= MPPE_L_BIT; \ + /* M,D,C bits not supported */ \ + } while (/* CONSTCOND */ 0) + +/* The reverse of the above */ +#define MPPE_CI_TO_OPTS(ci, opts) \ + do { \ + const u_char *ptr = ci; /* u_char[4] */ \ + \ + opts = 0; \ + \ + /* H bit */ \ + if (!(ptr[0] & MPPE_H_BIT)) \ + opts |= MPPE_OPT_STATEFUL; \ + \ + /* S,L bits */ \ + if (ptr[3] & MPPE_S_BIT) \ + opts |= MPPE_OPT_128; \ + if (ptr[3] & MPPE_L_BIT) \ + opts |= MPPE_OPT_40; \ + \ + /* M,D,C bits */ \ + if (ptr[3] & MPPE_M_BIT) \ + opts |= MPPE_OPT_56; \ + if (ptr[3] & MPPE_D_BIT) \ + opts |= MPPE_OPT_D; \ + if (ptr[3] & MPPE_C_BIT) \ + opts |= MPPE_OPT_MPPC; \ + \ + /* Other bits */ \ + if (ptr[0] & ~MPPE_H_BIT) \ + opts |= MPPE_OPT_UNKNOWN; \ + if (ptr[1] || ptr[2]) \ + opts |= MPPE_OPT_UNKNOWN; \ + if (ptr[3] & ~MPPE_ALL_BITS) \ + opts |= MPPE_OPT_UNKNOWN; \ + } while (/* CONSTCOND */ 0) + +/* Shared MPPE padding between MSCHAP and MPPE */ +#define SHA1_PAD_SIZE 40 + +static const u8_t mppe_sha1_pad1[SHA1_PAD_SIZE] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; +static const u8_t mppe_sha1_pad2[SHA1_PAD_SIZE] = { + 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, + 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, + 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, + 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2 +}; + +/* + * State for an MPPE (de)compressor. + */ +typedef struct ppp_mppe_state { + lwip_arc4_context arc4; + u8_t master_key[MPPE_MAX_KEY_LEN]; + u8_t session_key[MPPE_MAX_KEY_LEN]; + u8_t keylen; /* key length in bytes */ + /* NB: 128-bit == 16, 40-bit == 8! + * If we want to support 56-bit, the unit has to change to bits + */ + u8_t bits; /* MPPE control bits */ + u16_t ccount; /* 12-bit coherency count (seqno) */ + u16_t sanity_errors; /* take down LCP if too many */ + unsigned int stateful :1; /* stateful mode flag */ + unsigned int discard :1; /* stateful mode packet loss flag */ +} ppp_mppe_state; + +void mppe_set_key(ppp_pcb *pcb, ppp_mppe_state *state, u8_t *key); +void mppe_init(ppp_pcb *pcb, ppp_mppe_state *state, u8_t options); +void mppe_comp_reset(ppp_pcb *pcb, ppp_mppe_state *state); +err_t mppe_compress(ppp_pcb *pcb, ppp_mppe_state *state, struct pbuf **pb, u16_t protocol); +void mppe_decomp_reset(ppp_pcb *pcb, ppp_mppe_state *state); +err_t mppe_decompress(ppp_pcb *pcb, ppp_mppe_state *state, struct pbuf **pb); + +#ifdef __cplusplus +} +#endif + +#endif /* MPPE_H */ +#endif /* PPP_SUPPORT && MPPE_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp.h new file mode 100644 index 0000000..3d73c36 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp.h @@ -0,0 +1,698 @@ +/***************************************************************************** +* ppp.h - Network Point to Point Protocol header file. +* +* Copyright (c) 2003 by Marc Boucher, Services Informatiques (MBSI) inc. +* portions Copyright (c) 1997 Global Election Systems Inc. +* +* The authors hereby grant permission to use, copy, modify, distribute, +* and license this software and its documentation for any purpose, provided +* that existing copyright notices are retained in all copies and that this +* notice and the following disclaimer are included verbatim in any +* distributions. No written agreement, license, or royalty fee is required +* for any of the authorized uses. +* +* THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS *AS IS* AND ANY EXPRESS OR +* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +* IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +****************************************************************************** +* REVISION HISTORY +* +* 03-01-01 Marc Boucher +* Ported to lwIP. +* 97-11-05 Guy Lancaster , Global Election Systems Inc. +* Original derived from BSD codes. +*****************************************************************************/ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef PPP_H +#define PPP_H + +#include "lwip/def.h" +#include "lwip/stats.h" +#include "lwip/mem.h" +#include "lwip/netif.h" +#include "lwip/sys.h" +#include "lwip/timeouts.h" +#if PPP_IPV6_SUPPORT +#include "lwip/ip6_addr.h" +#endif /* PPP_IPV6_SUPPORT */ + +#ifdef __cplusplus +extern "C" { +#endif + +/* Disable non-working or rarely used PPP feature, so rarely that we don't want to bloat ppp_opts.h with them */ +#ifndef PPP_OPTIONS +#define PPP_OPTIONS 0 +#endif + +#ifndef PPP_NOTIFY +#define PPP_NOTIFY 0 +#endif + +#ifndef PPP_REMOTENAME +#define PPP_REMOTENAME 0 +#endif + +#ifndef PPP_IDLETIMELIMIT +#define PPP_IDLETIMELIMIT 0 +#endif + +#ifndef PPP_LCP_ADAPTIVE +#define PPP_LCP_ADAPTIVE 0 +#endif + +#ifndef PPP_MAXCONNECT +#define PPP_MAXCONNECT 0 +#endif + +#ifndef PPP_ALLOWED_ADDRS +#define PPP_ALLOWED_ADDRS 0 +#endif + +#ifndef PPP_PROTOCOLNAME +#define PPP_PROTOCOLNAME 0 +#endif + +#ifndef PPP_STATS_SUPPORT +#define PPP_STATS_SUPPORT 0 +#endif + +#ifndef DEFLATE_SUPPORT +#define DEFLATE_SUPPORT 0 +#endif + +#ifndef BSDCOMPRESS_SUPPORT +#define BSDCOMPRESS_SUPPORT 0 +#endif + +#ifndef PREDICTOR_SUPPORT +#define PREDICTOR_SUPPORT 0 +#endif + +/************************* +*** PUBLIC DEFINITIONS *** +*************************/ + +/* + * The basic PPP frame. + */ +#define PPP_HDRLEN 4 /* octets for standard ppp header */ +#define PPP_FCSLEN 2 /* octets for FCS */ + +/* + * Values for phase. + */ +#define PPP_PHASE_DEAD 0 +#define PPP_PHASE_MASTER 1 +#define PPP_PHASE_HOLDOFF 2 +#define PPP_PHASE_INITIALIZE 3 +#define PPP_PHASE_SERIALCONN 4 +#define PPP_PHASE_DORMANT 5 +#define PPP_PHASE_ESTABLISH 6 +#define PPP_PHASE_AUTHENTICATE 7 +#define PPP_PHASE_CALLBACK 8 +#define PPP_PHASE_NETWORK 9 +#define PPP_PHASE_RUNNING 10 +#define PPP_PHASE_TERMINATE 11 +#define PPP_PHASE_DISCONNECT 12 + +/* Error codes. */ +#define PPPERR_NONE 0 /* No error. */ +#define PPPERR_PARAM 1 /* Invalid parameter. */ +#define PPPERR_OPEN 2 /* Unable to open PPP session. */ +#define PPPERR_DEVICE 3 /* Invalid I/O device for PPP. */ +#define PPPERR_ALLOC 4 /* Unable to allocate resources. */ +#define PPPERR_USER 5 /* User interrupt. */ +#define PPPERR_CONNECT 6 /* Connection lost. */ +#define PPPERR_AUTHFAIL 7 /* Failed authentication challenge. */ +#define PPPERR_PROTOCOL 8 /* Failed to meet protocol. */ +#define PPPERR_PEERDEAD 9 /* Connection timeout */ +#define PPPERR_IDLETIMEOUT 10 /* Idle Timeout */ +#define PPPERR_CONNECTTIME 11 /* Max connect time reached */ +#define PPPERR_LOOPBACK 12 /* Loopback detected */ + +/* Whether auth support is enabled at all */ +#define PPP_AUTH_SUPPORT (PAP_SUPPORT || CHAP_SUPPORT || EAP_SUPPORT) + +/************************ +*** PUBLIC DATA TYPES *** +************************/ + +/* + * Other headers require ppp_pcb definition for prototypes, but ppp_pcb + * require some structure definition from other headers as well, we are + * fixing the dependency loop here by declaring the ppp_pcb type then + * by including headers containing necessary struct definition for ppp_pcb + */ +typedef struct ppp_pcb_s ppp_pcb; + +/* Type definitions for BSD code. */ +#ifndef __u_char_defined +typedef unsigned long u_long; +typedef unsigned int u_int; +typedef unsigned short u_short; +typedef unsigned char u_char; +#endif + +#include "fsm.h" +#include "lcp.h" +#if CCP_SUPPORT +#include "ccp.h" +#endif /* CCP_SUPPORT */ +#if MPPE_SUPPORT +#include "mppe.h" +#endif /* MPPE_SUPPORT */ +#if PPP_IPV4_SUPPORT +#include "ipcp.h" +#endif /* PPP_IPV4_SUPPORT */ +#if PPP_IPV6_SUPPORT +#include "ipv6cp.h" +#endif /* PPP_IPV6_SUPPORT */ +#if PAP_SUPPORT +#include "upap.h" +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT +#include "chap-new.h" +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT +#include "eap.h" +#endif /* EAP_SUPPORT */ +#if VJ_SUPPORT +#include "vj.h" +#endif /* VJ_SUPPORT */ + +/* Link status callback function prototype */ +typedef void (*ppp_link_status_cb_fn)(ppp_pcb *pcb, int err_code, void *ctx); + +/* + * PPP configuration. + */ +typedef struct ppp_settings_s { + +#if PPP_SERVER && PPP_AUTH_SUPPORT + unsigned int auth_required :1; /* Peer is required to authenticate */ + unsigned int null_login :1; /* Username of "" and a password of "" are acceptable */ +#endif /* PPP_SERVER && PPP_AUTH_SUPPORT */ +#if PPP_REMOTENAME + unsigned int explicit_remote :1; /* remote_name specified with remotename opt */ +#endif /* PPP_REMOTENAME */ +#if PAP_SUPPORT + unsigned int refuse_pap :1; /* Don't proceed auth. with PAP */ +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + unsigned int refuse_chap :1; /* Don't proceed auth. with CHAP */ +#endif /* CHAP_SUPPORT */ +#if MSCHAP_SUPPORT + unsigned int refuse_mschap :1; /* Don't proceed auth. with MS-CHAP */ + unsigned int refuse_mschap_v2 :1; /* Don't proceed auth. with MS-CHAPv2 */ +#endif /* MSCHAP_SUPPORT */ +#if EAP_SUPPORT + unsigned int refuse_eap :1; /* Don't proceed auth. with EAP */ +#endif /* EAP_SUPPORT */ +#if LWIP_DNS + unsigned int usepeerdns :1; /* Ask peer for DNS adds */ +#endif /* LWIP_DNS */ + unsigned int persist :1; /* Persist mode, always try to open the connection */ +#if PRINTPKT_SUPPORT + unsigned int hide_password :1; /* Hide password in dumped packets */ +#endif /* PRINTPKT_SUPPORT */ + unsigned int noremoteip :1; /* Let him have no IP address */ + unsigned int lax_recv :1; /* accept control chars in asyncmap */ + unsigned int noendpoint :1; /* don't send/accept endpoint discriminator */ +#if PPP_LCP_ADAPTIVE + unsigned int lcp_echo_adaptive :1; /* request echo only if the link was idle */ +#endif /* PPP_LCP_ADAPTIVE */ +#if MPPE_SUPPORT + unsigned int require_mppe :1; /* Require MPPE (Microsoft Point to Point Encryption) */ + unsigned int refuse_mppe_40 :1; /* Allow MPPE 40-bit mode? */ + unsigned int refuse_mppe_128 :1; /* Allow MPPE 128-bit mode? */ + unsigned int refuse_mppe_stateful :1; /* Allow MPPE stateful mode? */ +#endif /* MPPE_SUPPORT */ + + u16_t listen_time; /* time to listen first (ms), waiting for peer to send LCP packet */ + +#if PPP_IDLETIMELIMIT + u16_t idle_time_limit; /* Disconnect if idle for this many seconds */ +#endif /* PPP_IDLETIMELIMIT */ +#if PPP_MAXCONNECT + u32_t maxconnect; /* Maximum connect time (seconds) */ +#endif /* PPP_MAXCONNECT */ + +#if PPP_AUTH_SUPPORT + /* auth data */ + const char *user; /* Username for PAP */ + const char *passwd; /* Password for PAP, secret for CHAP */ +#if PPP_REMOTENAME + char remote_name[MAXNAMELEN + 1]; /* Peer's name for authentication */ +#endif /* PPP_REMOTENAME */ + +#if PAP_SUPPORT + u8_t pap_timeout_time; /* Timeout (seconds) for auth-req retrans. */ + u8_t pap_max_transmits; /* Number of auth-reqs sent */ +#if PPP_SERVER + u8_t pap_req_timeout; /* Time to wait for auth-req from peer */ +#endif /* PPP_SERVER */ +#endif /* PAP_SUPPPORT */ + +#if CHAP_SUPPORT + u8_t chap_timeout_time; /* Timeout (seconds) for retransmitting req */ + u8_t chap_max_transmits; /* max # times to send challenge */ +#if PPP_SERVER + u8_t chap_rechallenge_time; /* Time to wait for auth-req from peer */ +#endif /* PPP_SERVER */ +#endif /* CHAP_SUPPPORT */ + +#if EAP_SUPPORT + u8_t eap_req_time; /* Time to wait (for retransmit/fail) */ + u8_t eap_allow_req; /* Max Requests allowed */ +#if PPP_SERVER + u8_t eap_timeout_time; /* Time to wait (for retransmit/fail) */ + u8_t eap_max_transmits; /* Max Requests allowed */ +#endif /* PPP_SERVER */ +#endif /* EAP_SUPPORT */ + +#endif /* PPP_AUTH_SUPPORT */ + + u8_t fsm_timeout_time; /* Timeout time in seconds */ + u8_t fsm_max_conf_req_transmits; /* Maximum Configure-Request transmissions */ + u8_t fsm_max_term_transmits; /* Maximum Terminate-Request transmissions */ + u8_t fsm_max_nak_loops; /* Maximum number of nak loops tolerated */ + + u8_t lcp_loopbackfail; /* Number of times we receive our magic number from the peer + before deciding the link is looped-back. */ + u8_t lcp_echo_interval; /* Interval between LCP echo-requests */ + u8_t lcp_echo_fails; /* Tolerance to unanswered echo-requests */ + +} ppp_settings; + +#if PPP_SERVER +struct ppp_addrs { +#if PPP_IPV4_SUPPORT + ip4_addr_t our_ipaddr, his_ipaddr, netmask; +#if LWIP_DNS + ip4_addr_t dns1, dns2; +#endif /* LWIP_DNS */ +#endif /* PPP_IPV4_SUPPORT */ +#if PPP_IPV6_SUPPORT + ip6_addr_t our6_ipaddr, his6_ipaddr; +#endif /* PPP_IPV6_SUPPORT */ +}; +#endif /* PPP_SERVER */ + +/* + * PPP interface control block. + */ +struct ppp_pcb_s { + ppp_settings settings; + const struct link_callbacks *link_cb; + void *link_ctx_cb; + void (*link_status_cb)(ppp_pcb *pcb, int err_code, void *ctx); /* Status change callback */ +#if PPP_NOTIFY_PHASE + void (*notify_phase_cb)(ppp_pcb *pcb, u8_t phase, void *ctx); /* Notify phase callback */ +#endif /* PPP_NOTIFY_PHASE */ + void *ctx_cb; /* Callbacks optional pointer */ + struct netif *netif; /* PPP interface */ + u8_t phase; /* where the link is at */ + u8_t err_code; /* Code indicating why interface is down. */ + + /* flags */ +#if PPP_IPV4_SUPPORT + unsigned int ask_for_local :1; /* request our address from peer */ + unsigned int ipcp_is_open :1; /* haven't called np_finished() */ + unsigned int ipcp_is_up :1; /* have called ipcp_up() */ + unsigned int if4_up :1; /* True when the IPv4 interface is up. */ +#if 0 /* UNUSED - PROXY ARP */ + unsigned int proxy_arp_set :1; /* Have created proxy arp entry */ +#endif /* UNUSED - PROXY ARP */ +#endif /* PPP_IPV4_SUPPORT */ +#if PPP_IPV6_SUPPORT + unsigned int ipv6cp_is_up :1; /* have called ip6cp_up() */ + unsigned int if6_up :1; /* True when the IPv6 interface is up. */ +#endif /* PPP_IPV6_SUPPORT */ + unsigned int lcp_echo_timer_running :1; /* set if a timer is running */ +#if VJ_SUPPORT + unsigned int vj_enabled :1; /* Flag indicating VJ compression enabled. */ +#endif /* VJ_SUPPORT */ +#if CCP_SUPPORT + unsigned int ccp_all_rejected :1; /* we rejected all peer's options */ +#endif /* CCP_SUPPORT */ +#if MPPE_SUPPORT + unsigned int mppe_keys_set :1; /* Have the MPPE keys been set? */ +#endif /* MPPE_SUPPORT */ + +#if PPP_AUTH_SUPPORT + /* auth data */ +#if PPP_SERVER && defined(HAVE_MULTILINK) + char peer_authname[MAXNAMELEN + 1]; /* The name by which the peer authenticated itself to us. */ +#endif /* PPP_SERVER && defined(HAVE_MULTILINK) */ + u16_t auth_pending; /* Records which authentication operations haven't completed yet. */ + u16_t auth_done; /* Records which authentication operations have been completed. */ + +#if PAP_SUPPORT + upap_state upap; /* PAP data */ +#endif /* PAP_SUPPORT */ + +#if CHAP_SUPPORT + chap_client_state chap_client; /* CHAP client data */ +#if PPP_SERVER + chap_server_state chap_server; /* CHAP server data */ +#endif /* PPP_SERVER */ +#endif /* CHAP_SUPPORT */ + +#if EAP_SUPPORT + eap_state eap; /* EAP data */ +#endif /* EAP_SUPPORT */ +#endif /* PPP_AUTH_SUPPORT */ + + fsm lcp_fsm; /* LCP fsm structure */ + lcp_options lcp_wantoptions; /* Options that we want to request */ + lcp_options lcp_gotoptions; /* Options that peer ack'd */ + lcp_options lcp_allowoptions; /* Options we allow peer to request */ + lcp_options lcp_hisoptions; /* Options that we ack'd */ + u16_t peer_mru; /* currently negotiated peer MRU */ + u8_t lcp_echos_pending; /* Number of outstanding echo msgs */ + u8_t lcp_echo_number; /* ID number of next echo frame */ + + u8_t num_np_open; /* Number of network protocols which we have opened. */ + u8_t num_np_up; /* Number of network protocols which have come up. */ + +#if VJ_SUPPORT + struct vjcompress vj_comp; /* Van Jacobson compression header. */ +#endif /* VJ_SUPPORT */ + +#if CCP_SUPPORT + fsm ccp_fsm; /* CCP fsm structure */ + ccp_options ccp_wantoptions; /* what to request the peer to use */ + ccp_options ccp_gotoptions; /* what the peer agreed to do */ + ccp_options ccp_allowoptions; /* what we'll agree to do */ + ccp_options ccp_hisoptions; /* what we agreed to do */ + u8_t ccp_localstate; /* Local state (mainly for handling reset-reqs and reset-acks). */ + u8_t ccp_receive_method; /* Method chosen on receive path */ + u8_t ccp_transmit_method; /* Method chosen on transmit path */ +#if MPPE_SUPPORT + ppp_mppe_state mppe_comp; /* MPPE "compressor" structure */ + ppp_mppe_state mppe_decomp; /* MPPE "decompressor" structure */ +#endif /* MPPE_SUPPORT */ +#endif /* CCP_SUPPORT */ + +#if PPP_IPV4_SUPPORT + fsm ipcp_fsm; /* IPCP fsm structure */ + ipcp_options ipcp_wantoptions; /* Options that we want to request */ + ipcp_options ipcp_gotoptions; /* Options that peer ack'd */ + ipcp_options ipcp_allowoptions; /* Options we allow peer to request */ + ipcp_options ipcp_hisoptions; /* Options that we ack'd */ +#endif /* PPP_IPV4_SUPPORT */ + +#if PPP_IPV6_SUPPORT + fsm ipv6cp_fsm; /* IPV6CP fsm structure */ + ipv6cp_options ipv6cp_wantoptions; /* Options that we want to request */ + ipv6cp_options ipv6cp_gotoptions; /* Options that peer ack'd */ + ipv6cp_options ipv6cp_allowoptions; /* Options we allow peer to request */ + ipv6cp_options ipv6cp_hisoptions; /* Options that we ack'd */ +#endif /* PPP_IPV6_SUPPORT */ +}; + +/************************ + *** PUBLIC FUNCTIONS *** + ************************/ + +/* + * WARNING: For multi-threads environment, all ppp_set_* functions most + * only be called while the PPP is in the dead phase (i.e. disconnected). + */ + +#if PPP_AUTH_SUPPORT +/* + * Set PPP authentication. + * + * Warning: Using PPPAUTHTYPE_ANY might have security consequences. + * RFC 1994 says: + * + * In practice, within or associated with each PPP server, there is a + * database which associates "user" names with authentication + * information ("secrets"). It is not anticipated that a particular + * named user would be authenticated by multiple methods. This would + * make the user vulnerable to attacks which negotiate the least secure + * method from among a set (such as PAP rather than CHAP). If the same + * secret was used, PAP would reveal the secret to be used later with + * CHAP. + * + * Instead, for each user name there should be an indication of exactly + * one method used to authenticate that user name. If a user needs to + * make use of different authentication methods under different + * circumstances, then distinct user names SHOULD be employed, each of + * which identifies exactly one authentication method. + * + * Default is none auth type, unset (NULL) user and passwd. + */ +#define PPPAUTHTYPE_NONE 0x00 +#define PPPAUTHTYPE_PAP 0x01 +#define PPPAUTHTYPE_CHAP 0x02 +#define PPPAUTHTYPE_MSCHAP 0x04 +#define PPPAUTHTYPE_MSCHAP_V2 0x08 +#define PPPAUTHTYPE_EAP 0x10 +#define PPPAUTHTYPE_ANY 0xff +void ppp_set_auth(ppp_pcb *pcb, u8_t authtype, const char *user, const char *passwd); + +/* + * If set, peer is required to authenticate. This is mostly necessary for PPP server support. + * + * Default is false. + */ +#define ppp_set_auth_required(ppp, boolval) (ppp->settings.auth_required = boolval) +#endif /* PPP_AUTH_SUPPORT */ + +#if PPP_IPV4_SUPPORT +/* + * Set PPP interface "our" and "his" IPv4 addresses. This is mostly necessary for PPP server + * support but it can also be used on a PPP link where each side choose its own IP address. + * + * Default is unset (0.0.0.0). + */ +#define ppp_set_ipcp_ouraddr(ppp, addr) do { ppp->ipcp_wantoptions.ouraddr = ip4_addr_get_u32(addr); \ + ppp->ask_for_local = ppp->ipcp_wantoptions.ouraddr != 0; } while(0) +#define ppp_set_ipcp_hisaddr(ppp, addr) (ppp->ipcp_wantoptions.hisaddr = ip4_addr_get_u32(addr)) +#if LWIP_DNS +/* + * Set DNS server addresses that are sent if the peer asks for them. This is mostly necessary + * for PPP server support. + * + * Default is unset (0.0.0.0). + */ +#define ppp_set_ipcp_dnsaddr(ppp, index, addr) (ppp->ipcp_allowoptions.dnsaddr[index] = ip4_addr_get_u32(addr)) + +/* + * If set, we ask the peer for up to 2 DNS server addresses. Received DNS server addresses are + * registered using the dns_setserver() function. + * + * Default is false. + */ +#define ppp_set_usepeerdns(ppp, boolval) (ppp->settings.usepeerdns = boolval) +#endif /* LWIP_DNS */ +#endif /* PPP_IPV4_SUPPORT */ + +#if MPPE_SUPPORT +/* Disable MPPE (Microsoft Point to Point Encryption). This parameter is exclusive. */ +#define PPP_MPPE_DISABLE 0x00 +/* Require the use of MPPE (Microsoft Point to Point Encryption). */ +#define PPP_MPPE_ENABLE 0x01 +/* Allow MPPE to use stateful mode. Stateless mode is still attempted first. */ +#define PPP_MPPE_ALLOW_STATEFUL 0x02 +/* Refuse the use of MPPE with 40-bit encryption. Conflict with PPP_MPPE_REFUSE_128. */ +#define PPP_MPPE_REFUSE_40 0x04 +/* Refuse the use of MPPE with 128-bit encryption. Conflict with PPP_MPPE_REFUSE_40. */ +#define PPP_MPPE_REFUSE_128 0x08 +/* + * Set MPPE configuration + * + * Default is disabled. + */ +void ppp_set_mppe(ppp_pcb *pcb, u8_t flags); +#endif /* MPPE_SUPPORT */ + +/* + * Wait for up to intval milliseconds for a valid PPP packet from the peer. + * At the end of this time, or when a valid PPP packet is received from the + * peer, we commence negotiation by sending our first LCP packet. + * + * Default is 0. + */ +#define ppp_set_listen_time(ppp, intval) (ppp->settings.listen_time = intval) + +/* + * If set, we will attempt to initiate a connection but if no reply is received from + * the peer, we will then just wait passively for a valid LCP packet from the peer. + * + * Default is false. + */ +#define ppp_set_passive(ppp, boolval) (ppp->lcp_wantoptions.passive = boolval) + +/* + * If set, we will not transmit LCP packets to initiate a connection until a valid + * LCP packet is received from the peer. This is what we usually call the server mode. + * + * Default is false. + */ +#define ppp_set_silent(ppp, boolval) (ppp->lcp_wantoptions.silent = boolval) + +/* + * If set, enable protocol field compression negotiation in both the receive and + * the transmit direction. + * + * Default is true. + */ +#define ppp_set_neg_pcomp(ppp, boolval) (ppp->lcp_wantoptions.neg_pcompression = \ + ppp->lcp_allowoptions.neg_pcompression = boolval) + +/* + * If set, enable Address/Control compression in both the receive and the transmit + * direction. + * + * Default is true. + */ +#define ppp_set_neg_accomp(ppp, boolval) (ppp->lcp_wantoptions.neg_accompression = \ + ppp->lcp_allowoptions.neg_accompression = boolval) + +/* + * If set, enable asyncmap negotiation. Otherwise forcing all control characters to + * be escaped for both the transmit and the receive direction. + * + * Default is true. + */ +#define ppp_set_neg_asyncmap(ppp, boolval) (ppp->lcp_wantoptions.neg_asyncmap = \ + ppp->lcp_allowoptions.neg_asyncmap = boolval) + +/* + * This option sets the Async-Control-Character-Map (ACCM) for this end of the link. + * The ACCM is a set of 32 bits, one for each of the ASCII control characters with + * values from 0 to 31, where a 1 bit indicates that the corresponding control + * character should not be used in PPP packets sent to this system. The map is + * an unsigned 32 bits integer where the least significant bit (00000001) represents + * character 0 and the most significant bit (80000000) represents character 31. + * We will then ask the peer to send these characters as a 2-byte escape sequence. + * + * Default is 0. + */ +#define ppp_set_asyncmap(ppp, intval) (ppp->lcp_wantoptions.asyncmap = intval) + +/* + * Set a PPP interface as the default network interface + * (used to output all packets for which no specific route is found). + */ +#define ppp_set_default(ppp) netif_set_default(ppp->netif) + +#if PPP_NOTIFY_PHASE +/* + * Set a PPP notify phase callback. + * + * This can be used for example to set a LED pattern depending on the + * current phase of the PPP session. + */ +typedef void (*ppp_notify_phase_cb_fn)(ppp_pcb *pcb, u8_t phase, void *ctx); +void ppp_set_notify_phase_callback(ppp_pcb *pcb, ppp_notify_phase_cb_fn notify_phase_cb); +#endif /* PPP_NOTIFY_PHASE */ + +/* + * Initiate a PPP connection. + * + * This can only be called if PPP is in the dead phase. + * + * Holdoff is the time to wait (in seconds) before initiating + * the connection. + * + * If this port connects to a modem, the modem connection must be + * established before calling this. + */ +err_t ppp_connect(ppp_pcb *pcb, u16_t holdoff); + +#if PPP_SERVER +/* + * Listen for an incoming PPP connection. + * + * This can only be called if PPP is in the dead phase. + * + * If this port connects to a modem, the modem connection must be + * established before calling this. + */ +err_t ppp_listen(ppp_pcb *pcb); +#endif /* PPP_SERVER */ + +/* + * Initiate the end of a PPP connection. + * Any outstanding packets in the queues are dropped. + * + * Setting nocarrier to 1 close the PPP connection without initiating the + * shutdown procedure. Always using nocarrier = 0 is still recommended, + * this is going to take a little longer time if your link is down, but + * is a safer choice for the PPP state machine. + * + * Return 0 on success, an error code on failure. + */ +err_t ppp_close(ppp_pcb *pcb, u8_t nocarrier); + +/* + * Release the control block. + * + * This can only be called if PPP is in the dead phase. + * + * You must use ppp_close() before if you wish to terminate + * an established PPP session. + * + * Return 0 on success, an error code on failure. + */ +err_t ppp_free(ppp_pcb *pcb); + +/* + * PPP IOCTL commands. + * + * Get the up status - 0 for down, non-zero for up. The argument must + * point to an int. + */ +#define PPPCTLG_UPSTATUS 0 + +/* + * Get the PPP error code. The argument must point to an int. + * Returns a PPPERR_* value. + */ +#define PPPCTLG_ERRCODE 1 + +/* + * Get the fd associated with a PPP over serial + */ +#define PPPCTLG_FD 2 + +/* + * Get and set parameters for the given connection. + * Return 0 on success, an error code on failure. + */ +err_t ppp_ioctl(ppp_pcb *pcb, u8_t cmd, void *arg); + +/* Get the PPP netif interface */ +#define ppp_netif(ppp) (ppp->netif) + +/* Set an lwIP-style status-callback for the selected PPP device */ +#define ppp_set_netif_statuscallback(ppp, status_cb) \ + netif_set_status_callback(ppp->netif, status_cb); + +/* Set an lwIP-style link-callback for the selected PPP device */ +#define ppp_set_netif_linkcallback(ppp, link_cb) \ + netif_set_link_callback(ppp->netif, link_cb); + +#ifdef __cplusplus +} +#endif + +#endif /* PPP_H */ + +#endif /* PPP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp_impl.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp_impl.h new file mode 100644 index 0000000..40843d5 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp_impl.h @@ -0,0 +1,722 @@ +/***************************************************************************** +* ppp.h - Network Point to Point Protocol header file. +* +* Copyright (c) 2003 by Marc Boucher, Services Informatiques (MBSI) inc. +* portions Copyright (c) 1997 Global Election Systems Inc. +* +* The authors hereby grant permission to use, copy, modify, distribute, +* and license this software and its documentation for any purpose, provided +* that existing copyright notices are retained in all copies and that this +* notice and the following disclaimer are included verbatim in any +* distributions. No written agreement, license, or royalty fee is required +* for any of the authorized uses. +* +* THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS *AS IS* AND ANY EXPRESS OR +* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +* IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +****************************************************************************** +* REVISION HISTORY +* +* 03-01-01 Marc Boucher +* Ported to lwIP. +* 97-11-05 Guy Lancaster , Global Election Systems Inc. +* Original derived from BSD codes. +*****************************************************************************/ +#ifndef LWIP_HDR_PPP_IMPL_H +#define LWIP_HDR_PPP_IMPL_H + +#include "netif/ppp/ppp_opts.h" + +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifdef PPP_INCLUDE_SETTINGS_HEADER +#include "ppp_settings.h" +#endif + +#include /* formats */ +#include +#include +#include /* strtol() */ + +#include "lwip/netif.h" +#include "lwip/def.h" +#include "lwip/timeouts.h" + +#include "ppp.h" +#include "pppdebug.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Memory used for control packets. + * + * PPP_CTRL_PBUF_MAX_SIZE is the amount of memory we allocate when we + * cannot figure out how much we are going to use before filling the buffer. + */ +#if PPP_USE_PBUF_RAM +#define PPP_CTRL_PBUF_TYPE PBUF_RAM +#define PPP_CTRL_PBUF_MAX_SIZE 512 +#else /* PPP_USE_PBUF_RAM */ +#define PPP_CTRL_PBUF_TYPE PBUF_POOL +#define PPP_CTRL_PBUF_MAX_SIZE PBUF_POOL_BUFSIZE +#endif /* PPP_USE_PBUF_RAM */ + +/* + * The basic PPP frame. + */ +#define PPP_ADDRESS(p) (((u_char *)(p))[0]) +#define PPP_CONTROL(p) (((u_char *)(p))[1]) +#define PPP_PROTOCOL(p) ((((u_char *)(p))[2] << 8) + ((u_char *)(p))[3]) + +/* + * Significant octet values. + */ +#define PPP_ALLSTATIONS 0xff /* All-Stations broadcast address */ +#define PPP_UI 0x03 /* Unnumbered Information */ +#define PPP_FLAG 0x7e /* Flag Sequence */ +#define PPP_ESCAPE 0x7d /* Asynchronous Control Escape */ +#define PPP_TRANS 0x20 /* Asynchronous transparency modifier */ + +/* + * Protocol field values. + */ +#define PPP_IP 0x21 /* Internet Protocol */ +#if 0 /* UNUSED */ +#define PPP_AT 0x29 /* AppleTalk Protocol */ +#define PPP_IPX 0x2b /* IPX protocol */ +#endif /* UNUSED */ +#if VJ_SUPPORT +#define PPP_VJC_COMP 0x2d /* VJ compressed TCP */ +#define PPP_VJC_UNCOMP 0x2f /* VJ uncompressed TCP */ +#endif /* VJ_SUPPORT */ +#if PPP_IPV6_SUPPORT +#define PPP_IPV6 0x57 /* Internet Protocol Version 6 */ +#endif /* PPP_IPV6_SUPPORT */ +#if CCP_SUPPORT +#define PPP_COMP 0xfd /* compressed packet */ +#endif /* CCP_SUPPORT */ +#define PPP_IPCP 0x8021 /* IP Control Protocol */ +#if 0 /* UNUSED */ +#define PPP_ATCP 0x8029 /* AppleTalk Control Protocol */ +#define PPP_IPXCP 0x802b /* IPX Control Protocol */ +#endif /* UNUSED */ +#if PPP_IPV6_SUPPORT +#define PPP_IPV6CP 0x8057 /* IPv6 Control Protocol */ +#endif /* PPP_IPV6_SUPPORT */ +#if CCP_SUPPORT +#define PPP_CCP 0x80fd /* Compression Control Protocol */ +#endif /* CCP_SUPPORT */ +#if ECP_SUPPORT +#define PPP_ECP 0x8053 /* Encryption Control Protocol */ +#endif /* ECP_SUPPORT */ +#define PPP_LCP 0xc021 /* Link Control Protocol */ +#if PAP_SUPPORT +#define PPP_PAP 0xc023 /* Password Authentication Protocol */ +#endif /* PAP_SUPPORT */ +#if LQR_SUPPORT +#define PPP_LQR 0xc025 /* Link Quality Report protocol */ +#endif /* LQR_SUPPORT */ +#if CHAP_SUPPORT +#define PPP_CHAP 0xc223 /* Cryptographic Handshake Auth. Protocol */ +#endif /* CHAP_SUPPORT */ +#if CBCP_SUPPORT +#define PPP_CBCP 0xc029 /* Callback Control Protocol */ +#endif /* CBCP_SUPPORT */ +#if EAP_SUPPORT +#define PPP_EAP 0xc227 /* Extensible Authentication Protocol */ +#endif /* EAP_SUPPORT */ + +/* + * The following struct gives the addresses of procedures to call + * for a particular lower link level protocol. + */ +struct link_callbacks { + /* Start a connection (e.g. Initiate discovery phase) */ + void (*connect) (ppp_pcb *pcb, void *ctx); +#if PPP_SERVER + /* Listen for an incoming connection (Passive mode) */ + void (*listen) (ppp_pcb *pcb, void *ctx); +#endif /* PPP_SERVER */ + /* End a connection (i.e. initiate disconnect phase) */ + void (*disconnect) (ppp_pcb *pcb, void *ctx); + /* Free lower protocol control block */ + err_t (*free) (ppp_pcb *pcb, void *ctx); + /* Write a pbuf to a ppp link, only used from PPP functions to send PPP packets. */ + err_t (*write)(ppp_pcb *pcb, void *ctx, struct pbuf *p); + /* Send a packet from lwIP core (IPv4 or IPv6) */ + err_t (*netif_output)(ppp_pcb *pcb, void *ctx, struct pbuf *p, u_short protocol); + /* configure the transmit-side characteristics of the PPP interface */ + void (*send_config)(ppp_pcb *pcb, void *ctx, u32_t accm, int pcomp, int accomp); + /* confire the receive-side characteristics of the PPP interface */ + void (*recv_config)(ppp_pcb *pcb, void *ctx, u32_t accm, int pcomp, int accomp); +}; + +/* + * What to do with network protocol (NP) packets. + */ +enum NPmode { + NPMODE_PASS, /* pass the packet through */ + NPMODE_DROP, /* silently drop the packet */ + NPMODE_ERROR, /* return an error */ + NPMODE_QUEUE /* save it up for later. */ +}; + +/* + * Statistics. + */ +#if PPP_STATS_SUPPORT +struct pppstat { + unsigned int ppp_ibytes; /* bytes received */ + unsigned int ppp_ipackets; /* packets received */ + unsigned int ppp_ierrors; /* receive errors */ + unsigned int ppp_obytes; /* bytes sent */ + unsigned int ppp_opackets; /* packets sent */ + unsigned int ppp_oerrors; /* transmit errors */ +}; + +#if VJ_SUPPORT +struct vjstat { + unsigned int vjs_packets; /* outbound packets */ + unsigned int vjs_compressed; /* outbound compressed packets */ + unsigned int vjs_searches; /* searches for connection state */ + unsigned int vjs_misses; /* times couldn't find conn. state */ + unsigned int vjs_uncompressedin; /* inbound uncompressed packets */ + unsigned int vjs_compressedin; /* inbound compressed packets */ + unsigned int vjs_errorin; /* inbound unknown type packets */ + unsigned int vjs_tossed; /* inbound packets tossed because of error */ +}; +#endif /* VJ_SUPPORT */ + +struct ppp_stats { + struct pppstat p; /* basic PPP statistics */ +#if VJ_SUPPORT + struct vjstat vj; /* VJ header compression statistics */ +#endif /* VJ_SUPPORT */ +}; + +#if CCP_SUPPORT +struct compstat { + unsigned int unc_bytes; /* total uncompressed bytes */ + unsigned int unc_packets; /* total uncompressed packets */ + unsigned int comp_bytes; /* compressed bytes */ + unsigned int comp_packets; /* compressed packets */ + unsigned int inc_bytes; /* incompressible bytes */ + unsigned int inc_packets; /* incompressible packets */ + unsigned int ratio; /* recent compression ratio << 8 */ +}; + +struct ppp_comp_stats { + struct compstat c; /* packet compression statistics */ + struct compstat d; /* packet decompression statistics */ +}; +#endif /* CCP_SUPPORT */ + +#endif /* PPP_STATS_SUPPORT */ + +#if PPP_IDLETIMELIMIT +/* + * The following structure records the time in seconds since + * the last NP packet was sent or received. + */ +struct ppp_idle { + time_t xmit_idle; /* time since last NP packet sent */ + time_t recv_idle; /* time since last NP packet received */ +}; +#endif /* PPP_IDLETIMELIMIT */ + +/* values for epdisc.class */ +#define EPD_NULL 0 /* null discriminator, no data */ +#define EPD_LOCAL 1 +#define EPD_IP 2 +#define EPD_MAC 3 +#define EPD_MAGIC 4 +#define EPD_PHONENUM 5 + +/* + * Global variables. + */ +#ifdef HAVE_MULTILINK +extern u8_t multilink; /* enable multilink operation */ +extern u8_t doing_multilink; +extern u8_t multilink_master; +extern u8_t bundle_eof; +extern u8_t bundle_terminating; +#endif + +#ifdef MAXOCTETS +extern unsigned int maxoctets; /* Maximum octetes per session (in bytes) */ +extern int maxoctets_dir; /* Direction : + 0 - in+out (default) + 1 - in + 2 - out + 3 - max(in,out) */ +extern int maxoctets_timeout; /* Timeout for check of octets limit */ +#define PPP_OCTETS_DIRECTION_SUM 0 +#define PPP_OCTETS_DIRECTION_IN 1 +#define PPP_OCTETS_DIRECTION_OUT 2 +#define PPP_OCTETS_DIRECTION_MAXOVERAL 3 +/* same as previos, but little different on RADIUS side */ +#define PPP_OCTETS_DIRECTION_MAXSESSION 4 +#endif + +/* Data input may be used by CCP and ECP, remove this entry + * from struct protent to save some flash + */ +#define PPP_DATAINPUT 0 + +/* + * The following struct gives the addresses of procedures to call + * for a particular protocol. + */ +struct protent { + u_short protocol; /* PPP protocol number */ + /* Initialization procedure */ + void (*init) (ppp_pcb *pcb); + /* Process a received packet */ + void (*input) (ppp_pcb *pcb, u_char *pkt, int len); + /* Process a received protocol-reject */ + void (*protrej) (ppp_pcb *pcb); + /* Lower layer has come up */ + void (*lowerup) (ppp_pcb *pcb); + /* Lower layer has gone down */ + void (*lowerdown) (ppp_pcb *pcb); + /* Open the protocol */ + void (*open) (ppp_pcb *pcb); + /* Close the protocol */ + void (*close) (ppp_pcb *pcb, const char *reason); +#if PRINTPKT_SUPPORT + /* Print a packet in readable form */ + int (*printpkt) (const u_char *pkt, int len, + void (*printer) (void *, const char *, ...), + void *arg); +#endif /* PRINTPKT_SUPPORT */ +#if PPP_DATAINPUT + /* Process a received data packet */ + void (*datainput) (ppp_pcb *pcb, u_char *pkt, int len); +#endif /* PPP_DATAINPUT */ +#if PRINTPKT_SUPPORT + const char *name; /* Text name of protocol */ + const char *data_name; /* Text name of corresponding data protocol */ +#endif /* PRINTPKT_SUPPORT */ +#if PPP_OPTIONS + option_t *options; /* List of command-line options */ + /* Check requested options, assign defaults */ + void (*check_options) (void); +#endif /* PPP_OPTIONS */ +#if DEMAND_SUPPORT + /* Configure interface for demand-dial */ + int (*demand_conf) (int unit); + /* Say whether to bring up link for this pkt */ + int (*active_pkt) (u_char *pkt, int len); +#endif /* DEMAND_SUPPORT */ +}; + +/* Table of pointers to supported protocols */ +extern const struct protent* const protocols[]; + + +/* Values for auth_pending, auth_done */ +#if PAP_SUPPORT +#define PAP_WITHPEER 0x1 +#define PAP_PEER 0x2 +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT +#define CHAP_WITHPEER 0x4 +#define CHAP_PEER 0x8 +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT +#define EAP_WITHPEER 0x10 +#define EAP_PEER 0x20 +#endif /* EAP_SUPPORT */ + +/* Values for auth_done only */ +#if CHAP_SUPPORT +#define CHAP_MD5_WITHPEER 0x40 +#define CHAP_MD5_PEER 0x80 +#if MSCHAP_SUPPORT +#define CHAP_MS_SHIFT 8 /* LSB position for MS auths */ +#define CHAP_MS_WITHPEER 0x100 +#define CHAP_MS_PEER 0x200 +#define CHAP_MS2_WITHPEER 0x400 +#define CHAP_MS2_PEER 0x800 +#endif /* MSCHAP_SUPPORT */ +#endif /* CHAP_SUPPORT */ + +/* Supported CHAP protocols */ +#if CHAP_SUPPORT + +#if MSCHAP_SUPPORT +#define CHAP_MDTYPE_SUPPORTED (MDTYPE_MICROSOFT_V2 | MDTYPE_MICROSOFT | MDTYPE_MD5) +#else /* MSCHAP_SUPPORT */ +#define CHAP_MDTYPE_SUPPORTED (MDTYPE_MD5) +#endif /* MSCHAP_SUPPORT */ + +#else /* CHAP_SUPPORT */ +#define CHAP_MDTYPE_SUPPORTED (MDTYPE_NONE) +#endif /* CHAP_SUPPORT */ + +#if PPP_STATS_SUPPORT +/* + * PPP statistics structure + */ +struct pppd_stats { + unsigned int bytes_in; + unsigned int bytes_out; + unsigned int pkts_in; + unsigned int pkts_out; +}; +#endif /* PPP_STATS_SUPPORT */ + + +/* + * PPP private functions + */ + + +/* + * Functions called from lwIP core. + */ + +/* initialize the PPP subsystem */ +int ppp_init(void); + +/* + * Functions called from PPP link protocols. + */ + +/* Create a new PPP control block */ +ppp_pcb *ppp_new(struct netif *pppif, const struct link_callbacks *callbacks, void *link_ctx_cb, + ppp_link_status_cb_fn link_status_cb, void *ctx_cb); + +/* Initiate LCP open request */ +void ppp_start(ppp_pcb *pcb); + +/* Called when link failed to setup */ +void ppp_link_failed(ppp_pcb *pcb); + +/* Called when link is normally down (i.e. it was asked to end) */ +void ppp_link_end(ppp_pcb *pcb); + +/* function called to process input packet */ +void ppp_input(ppp_pcb *pcb, struct pbuf *pb); + + +/* + * Functions called by PPP protocols. + */ + +/* function called by all PPP subsystems to send packets */ +err_t ppp_write(ppp_pcb *pcb, struct pbuf *p); + +/* functions called by auth.c link_terminated() */ +void ppp_link_terminated(ppp_pcb *pcb); + +void new_phase(ppp_pcb *pcb, int p); + +int ppp_send_config(ppp_pcb *pcb, int mtu, u32_t accm, int pcomp, int accomp); +int ppp_recv_config(ppp_pcb *pcb, int mru, u32_t accm, int pcomp, int accomp); + +#if PPP_IPV4_SUPPORT +int sifaddr(ppp_pcb *pcb, u32_t our_adr, u32_t his_adr, u32_t netmask); +int cifaddr(ppp_pcb *pcb, u32_t our_adr, u32_t his_adr); +#if 0 /* UNUSED - PROXY ARP */ +int sifproxyarp(ppp_pcb *pcb, u32_t his_adr); +int cifproxyarp(ppp_pcb *pcb, u32_t his_adr); +#endif /* UNUSED - PROXY ARP */ +#if LWIP_DNS +int sdns(ppp_pcb *pcb, u32_t ns1, u32_t ns2); +int cdns(ppp_pcb *pcb, u32_t ns1, u32_t ns2); +#endif /* LWIP_DNS */ +#if VJ_SUPPORT +int sifvjcomp(ppp_pcb *pcb, int vjcomp, int cidcomp, int maxcid); +#endif /* VJ_SUPPORT */ +int sifup(ppp_pcb *pcb); +int sifdown (ppp_pcb *pcb); +u32_t get_mask(u32_t addr); +#endif /* PPP_IPV4_SUPPORT */ + +#if PPP_IPV6_SUPPORT +int sif6addr(ppp_pcb *pcb, eui64_t our_eui64, eui64_t his_eui64); +int cif6addr(ppp_pcb *pcb, eui64_t our_eui64, eui64_t his_eui64); +int sif6up(ppp_pcb *pcb); +int sif6down (ppp_pcb *pcb); +#endif /* PPP_IPV6_SUPPORT */ + +#if DEMAND_SUPPORT +int sifnpmode(ppp_pcb *pcb, int proto, enum NPmode mode); +#endif /* DEMAND_SUPPORt */ + +void netif_set_mtu(ppp_pcb *pcb, int mtu); +int netif_get_mtu(ppp_pcb *pcb); + +#if CCP_SUPPORT +#if 0 /* unused */ +int ccp_test(ppp_pcb *pcb, u_char *opt_ptr, int opt_len, int for_transmit); +#endif /* unused */ +void ccp_set(ppp_pcb *pcb, u8_t isopen, u8_t isup, u8_t receive_method, u8_t transmit_method); +void ccp_reset_comp(ppp_pcb *pcb); +void ccp_reset_decomp(ppp_pcb *pcb); +#if 0 /* unused */ +int ccp_fatal_error(ppp_pcb *pcb); +#endif /* unused */ +#endif /* CCP_SUPPORT */ + +#if PPP_IDLETIMELIMIT +int get_idle_time(ppp_pcb *pcb, struct ppp_idle *ip); +#endif /* PPP_IDLETIMELIMIT */ + +#if DEMAND_SUPPORT +int get_loop_output(void); +#endif /* DEMAND_SUPPORT */ + +/* Optional protocol names list, to make our messages a little more informative. */ +#if PPP_PROTOCOLNAME +const char * protocol_name(int proto); +#endif /* PPP_PROTOCOLNAME */ + +/* Optional stats support, to get some statistics on the PPP interface */ +#if PPP_STATS_SUPPORT +void print_link_stats(void); /* Print stats, if available */ +void reset_link_stats(int u); /* Reset (init) stats when link goes up */ +void update_link_stats(int u); /* Get stats at link termination */ +#endif /* PPP_STATS_SUPPORT */ + + + +/* + * Inline versions of get/put char/short/long. + * Pointer is advanced; we assume that both arguments + * are lvalues and will already be in registers. + * cp MUST be u_char *. + */ +#define GETCHAR(c, cp) { \ + (c) = *(cp)++; \ +} +#define PUTCHAR(c, cp) { \ + *(cp)++ = (u_char) (c); \ +} +#define GETSHORT(s, cp) { \ + (s) = *(cp)++ << 8; \ + (s) |= *(cp)++; \ +} +#define PUTSHORT(s, cp) { \ + *(cp)++ = (u_char) ((s) >> 8); \ + *(cp)++ = (u_char) (s); \ +} +#define GETLONG(l, cp) { \ + (l) = *(cp)++ << 8; \ + (l) |= *(cp)++; (l) <<= 8; \ + (l) |= *(cp)++; (l) <<= 8; \ + (l) |= *(cp)++; \ +} +#define PUTLONG(l, cp) { \ + *(cp)++ = (u_char) ((l) >> 24); \ + *(cp)++ = (u_char) ((l) >> 16); \ + *(cp)++ = (u_char) ((l) >> 8); \ + *(cp)++ = (u_char) (l); \ +} + +#define INCPTR(n, cp) ((cp) += (n)) +#define DECPTR(n, cp) ((cp) -= (n)) + +/* + * System dependent definitions for user-level 4.3BSD UNIX implementation. + */ +#define TIMEOUT(f, a, t) do { sys_untimeout((f), (a)); sys_timeout((t)*1000, (f), (a)); } while(0) +#define TIMEOUTMS(f, a, t) do { sys_untimeout((f), (a)); sys_timeout((t), (f), (a)); } while(0) +#define UNTIMEOUT(f, a) sys_untimeout((f), (a)) + +#define BZERO(s, n) memset(s, 0, n) +#define BCMP(s1, s2, l) memcmp(s1, s2, l) + +#define PRINTMSG(m, l) { ppp_info("Remote message: %0.*v", l, m); } + +/* + * MAKEHEADER - Add Header fields to a packet. + */ +#define MAKEHEADER(p, t) { \ + PUTCHAR(PPP_ALLSTATIONS, p); \ + PUTCHAR(PPP_UI, p); \ + PUTSHORT(t, p); } + +/* Procedures exported from auth.c */ +void link_required(ppp_pcb *pcb); /* we are starting to use the link */ +void link_terminated(ppp_pcb *pcb); /* we are finished with the link */ +void link_down(ppp_pcb *pcb); /* the LCP layer has left the Opened state */ +void upper_layers_down(ppp_pcb *pcb); /* take all NCPs down */ +void link_established(ppp_pcb *pcb); /* the link is up; authenticate now */ +void start_networks(ppp_pcb *pcb); /* start all the network control protos */ +void continue_networks(ppp_pcb *pcb); /* start network [ip, etc] control protos */ +#if PPP_AUTH_SUPPORT +#if PPP_SERVER +int auth_check_passwd(ppp_pcb *pcb, char *auser, int userlen, char *apasswd, int passwdlen, const char **msg, int *msglen); + /* check the user name and passwd against configuration */ +void auth_peer_fail(ppp_pcb *pcb, int protocol); + /* peer failed to authenticate itself */ +void auth_peer_success(ppp_pcb *pcb, int protocol, int prot_flavor, const char *name, int namelen); + /* peer successfully authenticated itself */ +#endif /* PPP_SERVER */ +void auth_withpeer_fail(ppp_pcb *pcb, int protocol); + /* we failed to authenticate ourselves */ +void auth_withpeer_success(ppp_pcb *pcb, int protocol, int prot_flavor); + /* we successfully authenticated ourselves */ +#endif /* PPP_AUTH_SUPPORT */ +void np_up(ppp_pcb *pcb, int proto); /* a network protocol has come up */ +void np_down(ppp_pcb *pcb, int proto); /* a network protocol has gone down */ +void np_finished(ppp_pcb *pcb, int proto); /* a network protocol no longer needs link */ +#if PPP_AUTH_SUPPORT +int get_secret(ppp_pcb *pcb, const char *client, const char *server, char *secret, int *secret_len, int am_server); + /* get "secret" for chap */ +#endif /* PPP_AUTH_SUPPORT */ + +/* Procedures exported from ipcp.c */ +/* int parse_dotted_ip (char *, u32_t *); */ + +/* Procedures exported from demand.c */ +#if DEMAND_SUPPORT +void demand_conf (void); /* config interface(s) for demand-dial */ +void demand_block (void); /* set all NPs to queue up packets */ +void demand_unblock (void); /* set all NPs to pass packets */ +void demand_discard (void); /* set all NPs to discard packets */ +void demand_rexmit (int, u32_t); /* retransmit saved frames for an NP*/ +int loop_chars (unsigned char *, int); /* process chars from loopback */ +int loop_frame (unsigned char *, int); /* should we bring link up? */ +#endif /* DEMAND_SUPPORT */ + +/* Procedures exported from multilink.c */ +#ifdef HAVE_MULTILINK +void mp_check_options (void); /* Check multilink-related options */ +int mp_join_bundle (void); /* join our link to an appropriate bundle */ +void mp_exit_bundle (void); /* have disconnected our link from bundle */ +void mp_bundle_terminated (void); +char *epdisc_to_str (struct epdisc *); /* string from endpoint discrim. */ +int str_to_epdisc (struct epdisc *, char *); /* endpt disc. from str */ +#else +#define mp_bundle_terminated() /* nothing */ +#define mp_exit_bundle() /* nothing */ +#define doing_multilink 0 +#define multilink_master 0 +#endif + +/* Procedures exported from utils.c. */ +void ppp_print_string(const u_char *p, int len, void (*printer) (void *, const char *, ...), void *arg); /* Format a string for output */ +int ppp_slprintf(char *buf, int buflen, const char *fmt, ...); /* sprintf++ */ +int ppp_vslprintf(char *buf, int buflen, const char *fmt, va_list args); /* vsprintf++ */ +size_t ppp_strlcpy(char *dest, const char *src, size_t len); /* safe strcpy */ +size_t ppp_strlcat(char *dest, const char *src, size_t len); /* safe strncpy */ +void ppp_dbglog(const char *fmt, ...); /* log a debug message */ +void ppp_info(const char *fmt, ...); /* log an informational message */ +void ppp_notice(const char *fmt, ...); /* log a notice-level message */ +void ppp_warn(const char *fmt, ...); /* log a warning message */ +void ppp_error(const char *fmt, ...); /* log an error message */ +void ppp_fatal(const char *fmt, ...); /* log an error message and die(1) */ +#if PRINTPKT_SUPPORT +void ppp_dump_packet(ppp_pcb *pcb, const char *tag, unsigned char *p, int len); + /* dump packet to debug log if interesting */ +#endif /* PRINTPKT_SUPPORT */ + +/* + * Number of necessary timers analysis. + * + * PPP use at least one timer per each of its protocol, but not all protocols are + * active at the same time, thus the number of necessary timeouts is actually + * lower than enabled protocols. Here is the actual necessary timeouts based + * on code analysis. + * + * Note that many features analysed here are not working at all and are only + * there for a comprehensive analysis of necessary timers in order to prevent + * having to redo that each time we add a feature. + * + * Timer list + * + * | holdoff timeout + * | low level protocol timeout (PPPoE or PPPoL2P) + * | LCP delayed UP + * | LCP retransmit (FSM) + * | LCP Echo timer + * .| PAP or CHAP or EAP authentication + * . | ECP retransmit (FSM) + * . | CCP retransmit (FSM) when MPPE is enabled + * . | CCP retransmit (FSM) when MPPE is NOT enabled + * . | IPCP retransmit (FSM) + * . .| IP6CP retransmit (FSM) + * . . | Idle time limit + * . . | Max connect time + * . . | Max octets + * . . | CCP RACK timeout + * . . . + * PPP_PHASE_DEAD + * PPP_PHASE_HOLDOFF + * | . . . + * PPP_PHASE_INITIALIZE + * | . . . + * PPP_PHASE_ESTABLISH + * | . . . + * |. . . + * | . . + * PPP_PHASE_AUTHENTICATE + * | . . + * || . . + * PPP_PHASE_NETWORK + * | || . . + * | ||| . + * PPP_PHASE_RUNNING + * | .||||| + * | . |||| + * PPP_PHASE_TERMINATE + * | . |||| + * PPP_PHASE_NETWORK + * |. . + * PPP_PHASE_ESTABLISH + * PPP_PHASE_DISCONNECT + * PPP_PHASE_DEAD + * + * Alright, PPP basic retransmission and LCP Echo consume one timer. + * 1 + * + * If authentication is enabled one timer is necessary during authentication. + * 1 + PPP_AUTH_SUPPORT + * + * If ECP is enabled one timer is necessary before IPCP and/or IP6CP, one more + * is necessary if CCP is enabled (only with MPPE support but we don't care much + * up to this detail level). + * 1 + ECP_SUPPORT + CCP_SUPPORT + * + * If CCP is enabled it might consume a timer during IPCP or IP6CP, thus + * we might use IPCP, IP6CP and CCP timers simultaneously. + * 1 + PPP_IPV4_SUPPORT + PPP_IPV6_SUPPORT + CCP_SUPPORT + * + * When entering running phase, IPCP or IP6CP is still running. If idle time limit + * is enabled one more timer is necessary. Same for max connect time and max + * octets features. Furthermore CCP RACK might be used past this point. + * 1 + PPP_IPV4_SUPPORT + PPP_IPV6_SUPPORT -1 + PPP_IDLETIMELIMIT + PPP_MAXCONNECT + MAXOCTETS + CCP_SUPPORT + * + * IPv4 or IPv6 must be enabled, therefore we don't need to take care the authentication + * and the CCP + ECP case, thus reducing overall complexity. + * 1 + LWIP_MAX(PPP_IPV4_SUPPORT + PPP_IPV6_SUPPORT + CCP_SUPPORT, PPP_IPV4_SUPPORT + PPP_IPV6_SUPPORT -1 + PPP_IDLETIMELIMIT + PPP_MAXCONNECT + MAXOCTETS + CCP_SUPPORT) + * + * We don't support PPP_IDLETIMELIMIT + PPP_MAXCONNECT + MAXOCTETS features + * and adding those defines to ppp_opts.h just for having the value always + * defined to 0 isn't worth it. + * 1 + LWIP_MAX(PPP_IPV4_SUPPORT + PPP_IPV6_SUPPORT + CCP_SUPPORT, PPP_IPV4_SUPPORT + PPP_IPV6_SUPPORT -1 + CCP_SUPPORT) + * + * Thus, the following is enough for now. + * 1 + PPP_IPV4_SUPPORT + PPP_IPV6_SUPPORT + CCP_SUPPORT + */ + +#ifdef __cplusplus +} +#endif + +#endif /* PPP_SUPPORT */ +#endif /* LWIP_HDR_PPP_IMPL_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp_opts.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp_opts.h new file mode 100644 index 0000000..6702bec --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp_opts.h @@ -0,0 +1,610 @@ +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#ifndef LWIP_PPP_OPTS_H +#define LWIP_PPP_OPTS_H + +#include "lwip/opt.h" + +/** + * PPP_SUPPORT==1: Enable PPP. + */ +#ifndef PPP_SUPPORT +#define PPP_SUPPORT 0 +#endif + +/** + * PPPOE_SUPPORT==1: Enable PPP Over Ethernet + */ +#ifndef PPPOE_SUPPORT +#define PPPOE_SUPPORT 0 +#endif + +/** + * PPPOL2TP_SUPPORT==1: Enable PPP Over L2TP + */ +#ifndef PPPOL2TP_SUPPORT +#define PPPOL2TP_SUPPORT 0 +#endif + +/** + * PPPOL2TP_AUTH_SUPPORT==1: Enable PPP Over L2TP Auth (enable MD5 support) + */ +#ifndef PPPOL2TP_AUTH_SUPPORT +#define PPPOL2TP_AUTH_SUPPORT PPPOL2TP_SUPPORT +#endif + +/** + * PPPOS_SUPPORT==1: Enable PPP Over Serial + */ +#ifndef PPPOS_SUPPORT +#define PPPOS_SUPPORT PPP_SUPPORT +#endif + +/** + * LWIP_PPP_API==1: Enable PPP API (in pppapi.c) + */ +#ifndef LWIP_PPP_API +#define LWIP_PPP_API (PPP_SUPPORT && (NO_SYS == 0)) +#endif + +#if PPP_SUPPORT + +/** + * MEMP_NUM_PPP_PCB: the number of simultaneously active PPP + * connections (requires the PPP_SUPPORT option) + */ +#ifndef MEMP_NUM_PPP_PCB +#define MEMP_NUM_PPP_PCB 1 +#endif + +/** + * PPP_NUM_TIMEOUTS_PER_PCB: the number of sys_timeouts running in parallel per + * ppp_pcb. See the detailed explanation at the end of ppp_impl.h about simultaneous + * timers analysis. + */ +#ifndef PPP_NUM_TIMEOUTS_PER_PCB +#define PPP_NUM_TIMEOUTS_PER_PCB (1 + PPP_IPV4_SUPPORT + PPP_IPV6_SUPPORT + CCP_SUPPORT) +#endif + +/* The number of sys_timeouts required for the PPP module */ +#define PPP_NUM_TIMEOUTS (PPP_SUPPORT * PPP_NUM_TIMEOUTS_PER_PCB * MEMP_NUM_PPP_PCB) + +/** + * MEMP_NUM_PPPOS_INTERFACES: the number of concurrently active PPPoS + * interfaces (only used with PPPOS_SUPPORT==1) + */ +#ifndef MEMP_NUM_PPPOS_INTERFACES +#define MEMP_NUM_PPPOS_INTERFACES MEMP_NUM_PPP_PCB +#endif + +/** + * MEMP_NUM_PPPOE_INTERFACES: the number of concurrently active PPPoE + * interfaces (only used with PPPOE_SUPPORT==1) + */ +#ifndef MEMP_NUM_PPPOE_INTERFACES +#define MEMP_NUM_PPPOE_INTERFACES 1 +#endif + +/** + * MEMP_NUM_PPPOL2TP_INTERFACES: the number of concurrently active PPPoL2TP + * interfaces (only used with PPPOL2TP_SUPPORT==1) + */ +#ifndef MEMP_NUM_PPPOL2TP_INTERFACES +#define MEMP_NUM_PPPOL2TP_INTERFACES 1 +#endif + +/** + * MEMP_NUM_PPP_API_MSG: Number of concurrent PPP API messages (in pppapi.c) + */ +#ifndef MEMP_NUM_PPP_API_MSG +#define MEMP_NUM_PPP_API_MSG 5 +#endif + +/** + * PPP_DEBUG: Enable debugging for PPP. + */ +#ifndef PPP_DEBUG +#define PPP_DEBUG LWIP_DBG_OFF +#endif + +/** + * PPP_INPROC_IRQ_SAFE==1 call pppos_input() using tcpip_callback(). + * + * Please read the "PPPoS input path" chapter in the PPP documentation about this option. + */ +#ifndef PPP_INPROC_IRQ_SAFE +#define PPP_INPROC_IRQ_SAFE 0 +#endif + +/** + * PRINTPKT_SUPPORT==1: Enable PPP print packet support + * + * Mandatory for debugging, it displays exchanged packet content in debug trace. + */ +#ifndef PRINTPKT_SUPPORT +#define PRINTPKT_SUPPORT 0 +#endif + +/** + * PPP_IPV4_SUPPORT==1: Enable PPP IPv4 support + */ +#ifndef PPP_IPV4_SUPPORT +#define PPP_IPV4_SUPPORT (LWIP_IPV4) +#endif + +/** + * PPP_IPV6_SUPPORT==1: Enable PPP IPv6 support + */ +#ifndef PPP_IPV6_SUPPORT +#define PPP_IPV6_SUPPORT (LWIP_IPV6) +#endif + +/** + * PPP_NOTIFY_PHASE==1: Support PPP notify phase support + * + * PPP notify phase support allows you to set a callback which is + * called on change of the internal PPP state machine. + * + * This can be used for example to set a LED pattern depending on the + * current phase of the PPP session. + */ +#ifndef PPP_NOTIFY_PHASE +#define PPP_NOTIFY_PHASE 0 +#endif + +/** + * pbuf_type PPP is using for LCP, PAP, CHAP, EAP, CCP, IPCP and IP6CP packets. + * + * Memory allocated must be single buffered for PPP to works, it requires pbuf + * that are not going to be chained when allocated. This requires setting + * PBUF_POOL_BUFSIZE to at least 512 bytes, which is quite huge for small systems. + * + * Setting PPP_USE_PBUF_RAM to 1 makes PPP use memory from heap where continuous + * buffers are required, allowing you to use a smaller PBUF_POOL_BUFSIZE. + */ +#ifndef PPP_USE_PBUF_RAM +#define PPP_USE_PBUF_RAM 0 +#endif + +/** + * PPP_FCS_TABLE: Keep a 256*2 byte table to speed up FCS calculation for PPPoS + */ +#ifndef PPP_FCS_TABLE +#define PPP_FCS_TABLE 1 +#endif + +/** + * PAP_SUPPORT==1: Support PAP. + */ +#ifndef PAP_SUPPORT +#define PAP_SUPPORT 0 +#endif + +/** + * CHAP_SUPPORT==1: Support CHAP. + */ +#ifndef CHAP_SUPPORT +#define CHAP_SUPPORT 0 +#endif + +/** + * MSCHAP_SUPPORT==1: Support MSCHAP. + */ +#ifndef MSCHAP_SUPPORT +#define MSCHAP_SUPPORT 0 +#endif +#if MSCHAP_SUPPORT +/* MSCHAP requires CHAP support */ +#undef CHAP_SUPPORT +#define CHAP_SUPPORT 1 +#endif /* MSCHAP_SUPPORT */ + +/** + * EAP_SUPPORT==1: Support EAP. + */ +#ifndef EAP_SUPPORT +#define EAP_SUPPORT 0 +#endif + +/** + * CCP_SUPPORT==1: Support CCP. + */ +#ifndef CCP_SUPPORT +#define CCP_SUPPORT 0 +#endif + +/** + * MPPE_SUPPORT==1: Support MPPE. + */ +#ifndef MPPE_SUPPORT +#define MPPE_SUPPORT 0 +#endif +#if MPPE_SUPPORT +/* MPPE requires CCP support */ +#undef CCP_SUPPORT +#define CCP_SUPPORT 1 +/* MPPE requires MSCHAP support */ +#undef MSCHAP_SUPPORT +#define MSCHAP_SUPPORT 1 +/* MSCHAP requires CHAP support */ +#undef CHAP_SUPPORT +#define CHAP_SUPPORT 1 +#endif /* MPPE_SUPPORT */ + +/** + * CBCP_SUPPORT==1: Support CBCP. CURRENTLY NOT SUPPORTED! DO NOT SET! + */ +#ifndef CBCP_SUPPORT +#define CBCP_SUPPORT 0 +#endif + +/** + * ECP_SUPPORT==1: Support ECP. CURRENTLY NOT SUPPORTED! DO NOT SET! + */ +#ifndef ECP_SUPPORT +#define ECP_SUPPORT 0 +#endif + +/** + * DEMAND_SUPPORT==1: Support dial on demand. CURRENTLY NOT SUPPORTED! DO NOT SET! + */ +#ifndef DEMAND_SUPPORT +#define DEMAND_SUPPORT 0 +#endif + +/** + * LQR_SUPPORT==1: Support Link Quality Report. Do nothing except exchanging some LCP packets. + */ +#ifndef LQR_SUPPORT +#define LQR_SUPPORT 0 +#endif + +/** + * PPP_SERVER==1: Enable PPP server support (waiting for incoming PPP session). + * + * Currently only supported for PPPoS. + */ +#ifndef PPP_SERVER +#define PPP_SERVER 0 +#endif + +#if PPP_SERVER +/* + * PPP_OUR_NAME: Our name for authentication purposes + */ +#ifndef PPP_OUR_NAME +#define PPP_OUR_NAME "lwIP" +#endif +#endif /* PPP_SERVER */ + +/** + * VJ_SUPPORT==1: Support VJ header compression. + */ +#ifndef VJ_SUPPORT +#define VJ_SUPPORT 1 +#endif +/* VJ compression is only supported for TCP over IPv4 over PPPoS. */ +#if !PPPOS_SUPPORT || !PPP_IPV4_SUPPORT || !LWIP_TCP +#undef VJ_SUPPORT +#define VJ_SUPPORT 0 +#endif /* !PPPOS_SUPPORT */ + +/** + * PPP_MD5_RANDM==1: Use MD5 for better randomness. + * Enabled by default if CHAP, EAP, or L2TP AUTH support is enabled. + */ +#ifndef PPP_MD5_RANDM +#define PPP_MD5_RANDM (CHAP_SUPPORT || EAP_SUPPORT || PPPOL2TP_AUTH_SUPPORT) +#endif + +/** + * PolarSSL embedded library + * + * + * lwIP contains some files fetched from the latest BSD release of + * the PolarSSL project (PolarSSL 0.10.1-bsd) for ciphers and encryption + * methods we need for lwIP PPP support. + * + * The PolarSSL files were cleaned to contain only the necessary struct + * fields and functions needed for lwIP. + * + * The PolarSSL API was not changed at all, so if you are already using + * PolarSSL you can choose to skip the compilation of the included PolarSSL + * library into lwIP. + * + * If you are not using the embedded copy you must include external + * libraries into your arch/cc.h port file. + * + * Beware of the stack requirements which can be a lot larger if you are not + * using our cleaned PolarSSL library. + */ + +/** + * LWIP_USE_EXTERNAL_POLARSSL: Use external PolarSSL library + */ +#ifndef LWIP_USE_EXTERNAL_POLARSSL +#define LWIP_USE_EXTERNAL_POLARSSL 0 +#endif + +/** + * LWIP_USE_EXTERNAL_MBEDTLS: Use external mbed TLS library + */ +#ifndef LWIP_USE_EXTERNAL_MBEDTLS +#define LWIP_USE_EXTERNAL_MBEDTLS 0 +#endif + +/* + * PPP Timeouts + */ + +/** + * FSM_DEFTIMEOUT: Timeout time in seconds + */ +#ifndef FSM_DEFTIMEOUT +#define FSM_DEFTIMEOUT 6 +#endif + +/** + * FSM_DEFMAXTERMREQS: Maximum Terminate-Request transmissions + */ +#ifndef FSM_DEFMAXTERMREQS +#define FSM_DEFMAXTERMREQS 2 +#endif + +/** + * FSM_DEFMAXCONFREQS: Maximum Configure-Request transmissions + */ +#ifndef FSM_DEFMAXCONFREQS +#define FSM_DEFMAXCONFREQS 10 +#endif + +/** + * FSM_DEFMAXNAKLOOPS: Maximum number of nak loops + */ +#ifndef FSM_DEFMAXNAKLOOPS +#define FSM_DEFMAXNAKLOOPS 5 +#endif + +/** + * UPAP_DEFTIMEOUT: Timeout (seconds) for retransmitting req + */ +#ifndef UPAP_DEFTIMEOUT +#define UPAP_DEFTIMEOUT 6 +#endif + +/** + * UPAP_DEFTRANSMITS: Maximum number of auth-reqs to send + */ +#ifndef UPAP_DEFTRANSMITS +#define UPAP_DEFTRANSMITS 10 +#endif + +#if PPP_SERVER +/** + * UPAP_DEFREQTIME: Time to wait for auth-req from peer + */ +#ifndef UPAP_DEFREQTIME +#define UPAP_DEFREQTIME 30 +#endif +#endif /* PPP_SERVER */ + +/** + * CHAP_DEFTIMEOUT: Timeout (seconds) for retransmitting req + */ +#ifndef CHAP_DEFTIMEOUT +#define CHAP_DEFTIMEOUT 6 +#endif + +/** + * CHAP_DEFTRANSMITS: max # times to send challenge + */ +#ifndef CHAP_DEFTRANSMITS +#define CHAP_DEFTRANSMITS 10 +#endif + +#if PPP_SERVER +/** + * CHAP_DEFRECHALLENGETIME: If this option is > 0, rechallenge the peer every n seconds + */ +#ifndef CHAP_DEFRECHALLENGETIME +#define CHAP_DEFRECHALLENGETIME 0 +#endif +#endif /* PPP_SERVER */ + +/** + * EAP_DEFREQTIME: Time to wait for peer request + */ +#ifndef EAP_DEFREQTIME +#define EAP_DEFREQTIME 6 +#endif + +/** + * EAP_DEFALLOWREQ: max # times to accept requests + */ +#ifndef EAP_DEFALLOWREQ +#define EAP_DEFALLOWREQ 10 +#endif + +#if PPP_SERVER +/** + * EAP_DEFTIMEOUT: Timeout (seconds) for rexmit + */ +#ifndef EAP_DEFTIMEOUT +#define EAP_DEFTIMEOUT 6 +#endif + +/** + * EAP_DEFTRANSMITS: max # times to transmit + */ +#ifndef EAP_DEFTRANSMITS +#define EAP_DEFTRANSMITS 10 +#endif +#endif /* PPP_SERVER */ + +/** + * LCP_DEFLOOPBACKFAIL: Default number of times we receive our magic number from the peer + * before deciding the link is looped-back. + */ +#ifndef LCP_DEFLOOPBACKFAIL +#define LCP_DEFLOOPBACKFAIL 10 +#endif + +/** + * LCP_ECHOINTERVAL: Interval in seconds between keepalive echo requests, 0 to disable. + */ +#ifndef LCP_ECHOINTERVAL +#define LCP_ECHOINTERVAL 0 +#endif + +/** + * LCP_MAXECHOFAILS: Number of unanswered echo requests before failure. + */ +#ifndef LCP_MAXECHOFAILS +#define LCP_MAXECHOFAILS 3 +#endif + +/** + * PPP_MAXIDLEFLAG: Max Xmit idle time (in ms) before resend flag char. + */ +#ifndef PPP_MAXIDLEFLAG +#define PPP_MAXIDLEFLAG 100 +#endif + +/** + * PPP Packet sizes + */ + +/** + * PPP_MRU: Default MRU + */ +#ifndef PPP_MRU +#define PPP_MRU 1500 +#endif + +/** + * PPP_DEFMRU: Default MRU to try + */ +#ifndef PPP_DEFMRU +#define PPP_DEFMRU 1500 +#endif + +/** + * PPP_MAXMRU: Normally limit MRU to this (pppd default = 16384) + */ +#ifndef PPP_MAXMRU +#define PPP_MAXMRU 1500 +#endif + +/** + * PPP_MINMRU: No MRUs below this + */ +#ifndef PPP_MINMRU +#define PPP_MINMRU 128 +#endif + +/** + * PPPOL2TP_DEFMRU: Default MTU and MRU for L2TP + * Default = 1500 - PPPoE(6) - PPP Protocol(2) - IPv4 header(20) - UDP Header(8) + * - L2TP Header(6) - HDLC Header(2) - PPP Protocol(2) - MPPE Header(2) - PPP Protocol(2) + */ +#if PPPOL2TP_SUPPORT +#ifndef PPPOL2TP_DEFMRU +#define PPPOL2TP_DEFMRU 1450 +#endif +#endif /* PPPOL2TP_SUPPORT */ + +/** + * MAXNAMELEN: max length of hostname or name for auth + */ +#ifndef MAXNAMELEN +#define MAXNAMELEN 256 +#endif + +/** + * MAXSECRETLEN: max length of password or secret + */ +#ifndef MAXSECRETLEN +#define MAXSECRETLEN 256 +#endif + +/* ------------------------------------------------------------------------- */ + +/* + * Build triggers for embedded PolarSSL + */ +#if !LWIP_USE_EXTERNAL_POLARSSL && !LWIP_USE_EXTERNAL_MBEDTLS + +/* CHAP, EAP, L2TP AUTH and MD5 Random require MD5 support */ +#if CHAP_SUPPORT || EAP_SUPPORT || PPPOL2TP_AUTH_SUPPORT || PPP_MD5_RANDM +#define LWIP_INCLUDED_POLARSSL_MD5 1 +#endif /* CHAP_SUPPORT || EAP_SUPPORT || PPPOL2TP_AUTH_SUPPORT || PPP_MD5_RANDM */ + +#if MSCHAP_SUPPORT + +/* MSCHAP require MD4 support */ +#define LWIP_INCLUDED_POLARSSL_MD4 1 +/* MSCHAP require SHA1 support */ +#define LWIP_INCLUDED_POLARSSL_SHA1 1 +/* MSCHAP require DES support */ +#define LWIP_INCLUDED_POLARSSL_DES 1 + +/* MS-CHAP support is required for MPPE */ +#if MPPE_SUPPORT +/* MPPE require ARC4 support */ +#define LWIP_INCLUDED_POLARSSL_ARC4 1 +#endif /* MPPE_SUPPORT */ + +#endif /* MSCHAP_SUPPORT */ + +#endif /* !LWIP_USE_EXTERNAL_POLARSSL && !LWIP_USE_EXTERNAL_MBEDTLS */ + +/* Default value if unset */ +#ifndef LWIP_INCLUDED_POLARSSL_MD4 +#define LWIP_INCLUDED_POLARSSL_MD4 0 +#endif /* LWIP_INCLUDED_POLARSSL_MD4 */ +#ifndef LWIP_INCLUDED_POLARSSL_MD5 +#define LWIP_INCLUDED_POLARSSL_MD5 0 +#endif /* LWIP_INCLUDED_POLARSSL_MD5 */ +#ifndef LWIP_INCLUDED_POLARSSL_SHA1 +#define LWIP_INCLUDED_POLARSSL_SHA1 0 +#endif /* LWIP_INCLUDED_POLARSSL_SHA1 */ +#ifndef LWIP_INCLUDED_POLARSSL_DES +#define LWIP_INCLUDED_POLARSSL_DES 0 +#endif /* LWIP_INCLUDED_POLARSSL_DES */ +#ifndef LWIP_INCLUDED_POLARSSL_ARC4 +#define LWIP_INCLUDED_POLARSSL_ARC4 0 +#endif /* LWIP_INCLUDED_POLARSSL_ARC4 */ + +#endif /* PPP_SUPPORT */ + +/* Default value if unset */ +#ifndef PPP_NUM_TIMEOUTS +#define PPP_NUM_TIMEOUTS 0 +#endif /* PPP_NUM_TIMEOUTS */ + +#endif /* LWIP_PPP_OPTS_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppapi.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppapi.h new file mode 100644 index 0000000..913d93f --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppapi.h @@ -0,0 +1,137 @@ +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#ifndef LWIP_PPPAPI_H +#define LWIP_PPPAPI_H + +#include "netif/ppp/ppp_opts.h" + +#if LWIP_PPP_API /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/sys.h" +#include "lwip/netif.h" +#include "lwip/priv/tcpip_priv.h" +#include "netif/ppp/ppp.h" +#if PPPOS_SUPPORT +#include "netif/ppp/pppos.h" +#endif /* PPPOS_SUPPORT */ + +#ifdef __cplusplus +extern "C" { +#endif + +struct pppapi_msg_msg { + ppp_pcb *ppp; + union { +#if PPP_NOTIFY_PHASE + struct { + ppp_notify_phase_cb_fn notify_phase_cb; + } setnotifyphasecb; +#endif /* PPP_NOTIFY_PHASE */ +#if PPPOS_SUPPORT + struct { + struct netif *pppif; + pppos_output_cb_fn output_cb; + ppp_link_status_cb_fn link_status_cb; + void *ctx_cb; + } serialcreate; +#endif /* PPPOS_SUPPORT */ +#if PPPOE_SUPPORT + struct { + struct netif *pppif; + struct netif *ethif; + const char *service_name; + const char *concentrator_name; + ppp_link_status_cb_fn link_status_cb; + void *ctx_cb; + } ethernetcreate; +#endif /* PPPOE_SUPPORT */ +#if PPPOL2TP_SUPPORT + struct { + struct netif *pppif; + struct netif *netif; + API_MSG_M_DEF_C(ip_addr_t, ipaddr); + u16_t port; +#if PPPOL2TP_AUTH_SUPPORT + const u8_t *secret; + u8_t secret_len; +#endif /* PPPOL2TP_AUTH_SUPPORT */ + ppp_link_status_cb_fn link_status_cb; + void *ctx_cb; + } l2tpcreate; +#endif /* PPPOL2TP_SUPPORT */ + struct { + u16_t holdoff; + } connect; + struct { + u8_t nocarrier; + } close; + struct { + u8_t cmd; + void *arg; + } ioctl; + } msg; +}; + +struct pppapi_msg { + struct tcpip_api_call_data call; + struct pppapi_msg_msg msg; +}; + +/* API for application */ +err_t pppapi_set_default(ppp_pcb *pcb); +#if PPP_NOTIFY_PHASE +err_t pppapi_set_notify_phase_callback(ppp_pcb *pcb, ppp_notify_phase_cb_fn notify_phase_cb); +#endif /* PPP_NOTIFY_PHASE */ +#if PPPOS_SUPPORT +ppp_pcb *pppapi_pppos_create(struct netif *pppif, pppos_output_cb_fn output_cb, ppp_link_status_cb_fn link_status_cb, void *ctx_cb); +#endif /* PPPOS_SUPPORT */ +#if PPPOE_SUPPORT +ppp_pcb *pppapi_pppoe_create(struct netif *pppif, struct netif *ethif, const char *service_name, + const char *concentrator_name, ppp_link_status_cb_fn link_status_cb, + void *ctx_cb); +#endif /* PPPOE_SUPPORT */ +#if PPPOL2TP_SUPPORT +ppp_pcb *pppapi_pppol2tp_create(struct netif *pppif, struct netif *netif, ip_addr_t *ipaddr, u16_t port, + const u8_t *secret, u8_t secret_len, + ppp_link_status_cb_fn link_status_cb, void *ctx_cb); +#endif /* PPPOL2TP_SUPPORT */ +err_t pppapi_connect(ppp_pcb *pcb, u16_t holdoff); +#if PPP_SERVER +err_t pppapi_listen(ppp_pcb *pcb); +#endif /* PPP_SERVER */ +err_t pppapi_close(ppp_pcb *pcb, u8_t nocarrier); +err_t pppapi_free(ppp_pcb *pcb); +err_t pppapi_ioctl(ppp_pcb *pcb, u8_t cmd, void *arg); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_PPP_API */ + +#endif /* LWIP_PPPAPI_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppcrypt.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppcrypt.h new file mode 100644 index 0000000..c0230bb --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppcrypt.h @@ -0,0 +1,144 @@ +/* + * pppcrypt.c - PPP/DES linkage for MS-CHAP and EAP SRP-SHA1 + * + * Extracted from chap_ms.c by James Carlson. + * + * Copyright (c) 1995 Eric Rosenquist. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +/* This header file is included in all PPP modules needing hashes and/or ciphers */ + +#ifndef PPPCRYPT_H +#define PPPCRYPT_H + +/* + * If included PolarSSL copy is not used, user is expected to include + * external libraries in arch/cc.h (which is included by lwip/arch.h). + */ +#include "lwip/arch.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Map hashes and ciphers functions to PolarSSL + */ +#if !LWIP_USE_EXTERNAL_MBEDTLS + +#include "netif/ppp/polarssl/md4.h" +#define lwip_md4_context md4_context +#define lwip_md4_init(context) +#define lwip_md4_starts md4_starts +#define lwip_md4_update md4_update +#define lwip_md4_finish md4_finish +#define lwip_md4_free(context) + +#include "netif/ppp/polarssl/md5.h" +#define lwip_md5_context md5_context +#define lwip_md5_init(context) +#define lwip_md5_starts md5_starts +#define lwip_md5_update md5_update +#define lwip_md5_finish md5_finish +#define lwip_md5_free(context) + +#include "netif/ppp/polarssl/sha1.h" +#define lwip_sha1_context sha1_context +#define lwip_sha1_init(context) +#define lwip_sha1_starts sha1_starts +#define lwip_sha1_update sha1_update +#define lwip_sha1_finish sha1_finish +#define lwip_sha1_free(context) + +#include "netif/ppp/polarssl/des.h" +#define lwip_des_context des_context +#define lwip_des_init(context) +#define lwip_des_setkey_enc des_setkey_enc +#define lwip_des_crypt_ecb des_crypt_ecb +#define lwip_des_free(context) + +#include "netif/ppp/polarssl/arc4.h" +#define lwip_arc4_context arc4_context +#define lwip_arc4_init(context) +#define lwip_arc4_setup arc4_setup +#define lwip_arc4_crypt arc4_crypt +#define lwip_arc4_free(context) + +#endif /* !LWIP_USE_EXTERNAL_MBEDTLS */ + +/* + * Map hashes and ciphers functions to mbed TLS + */ +#if LWIP_USE_EXTERNAL_MBEDTLS + +#define lwip_md4_context mbedtls_md4_context +#define lwip_md4_init mbedtls_md4_init +#define lwip_md4_starts mbedtls_md4_starts +#define lwip_md4_update mbedtls_md4_update +#define lwip_md4_finish mbedtls_md4_finish +#define lwip_md4_free mbedtls_md4_free + +#define lwip_md5_context mbedtls_md5_context +#define lwip_md5_init mbedtls_md5_init +#define lwip_md5_starts mbedtls_md5_starts +#define lwip_md5_update mbedtls_md5_update +#define lwip_md5_finish mbedtls_md5_finish +#define lwip_md5_free mbedtls_md5_free + +#define lwip_sha1_context mbedtls_sha1_context +#define lwip_sha1_init mbedtls_sha1_init +#define lwip_sha1_starts mbedtls_sha1_starts +#define lwip_sha1_update mbedtls_sha1_update +#define lwip_sha1_finish mbedtls_sha1_finish +#define lwip_sha1_free mbedtls_sha1_free + +#define lwip_des_context mbedtls_des_context +#define lwip_des_init mbedtls_des_init +#define lwip_des_setkey_enc mbedtls_des_setkey_enc +#define lwip_des_crypt_ecb mbedtls_des_crypt_ecb +#define lwip_des_free mbedtls_des_free + +#define lwip_arc4_context mbedtls_arc4_context +#define lwip_arc4_init mbedtls_arc4_init +#define lwip_arc4_setup mbedtls_arc4_setup +#define lwip_arc4_crypt(context, buffer, length) mbedtls_arc4_crypt(context, length, buffer, buffer) +#define lwip_arc4_free mbedtls_arc4_free + +#endif /* LWIP_USE_EXTERNAL_MBEDTLS */ + +void pppcrypt_56_to_64_bit_key(u_char *key, u_char *des_key); + +#ifdef __cplusplus +} +#endif + +#endif /* PPPCRYPT_H */ + +#endif /* PPP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppdebug.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppdebug.h new file mode 100644 index 0000000..36ee4f9 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppdebug.h @@ -0,0 +1,88 @@ +/***************************************************************************** +* pppdebug.h - System debugging utilities. +* +* Copyright (c) 2003 by Marc Boucher, Services Informatiques (MBSI) inc. +* portions Copyright (c) 1998 Global Election Systems Inc. +* portions Copyright (c) 2001 by Cognizant Pty Ltd. +* +* The authors hereby grant permission to use, copy, modify, distribute, +* and license this software and its documentation for any purpose, provided +* that existing copyright notices are retained in all copies and that this +* notice and the following disclaimer are included verbatim in any +* distributions. No written agreement, license, or royalty fee is required +* for any of the authorized uses. +* +* THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS *AS IS* AND ANY EXPRESS OR +* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +* IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +****************************************************************************** +* REVISION HISTORY (please don't use tabs!) +* +* 03-01-01 Marc Boucher +* Ported to lwIP. +* 98-07-29 Guy Lancaster , Global Election Systems Inc. +* Original. +* +***************************************************************************** +*/ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef PPPDEBUG_H +#define PPPDEBUG_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Trace levels. */ +#define LOG_CRITICAL (PPP_DEBUG | LWIP_DBG_LEVEL_SEVERE) +#define LOG_ERR (PPP_DEBUG | LWIP_DBG_LEVEL_SEVERE) +#define LOG_NOTICE (PPP_DEBUG | LWIP_DBG_LEVEL_WARNING) +#define LOG_WARNING (PPP_DEBUG | LWIP_DBG_LEVEL_WARNING) +#define LOG_INFO (PPP_DEBUG) +#define LOG_DETAIL (PPP_DEBUG) +#define LOG_DEBUG (PPP_DEBUG) + +#if PPP_DEBUG + +#define MAINDEBUG(a) LWIP_DEBUGF(LWIP_DBG_LEVEL_WARNING, a) +#define SYSDEBUG(a) LWIP_DEBUGF(LWIP_DBG_LEVEL_WARNING, a) +#define FSMDEBUG(a) LWIP_DEBUGF(LWIP_DBG_LEVEL_WARNING, a) +#define LCPDEBUG(a) LWIP_DEBUGF(LWIP_DBG_LEVEL_WARNING, a) +#define IPCPDEBUG(a) LWIP_DEBUGF(LWIP_DBG_LEVEL_WARNING, a) +#define IPV6CPDEBUG(a) LWIP_DEBUGF(LWIP_DBG_LEVEL_WARNING, a) +#define UPAPDEBUG(a) LWIP_DEBUGF(LWIP_DBG_LEVEL_WARNING, a) +#define CHAPDEBUG(a) LWIP_DEBUGF(LWIP_DBG_LEVEL_WARNING, a) +#define PPPDEBUG(a, b) LWIP_DEBUGF(a, b) + +#else /* PPP_DEBUG */ + +#define MAINDEBUG(a) +#define SYSDEBUG(a) +#define FSMDEBUG(a) +#define LCPDEBUG(a) +#define IPCPDEBUG(a) +#define IPV6CPDEBUG(a) +#define UPAPDEBUG(a) +#define CHAPDEBUG(a) +#define PPPDEBUG(a, b) + +#endif /* PPP_DEBUG */ + +#ifdef __cplusplus +} +#endif + +#endif /* PPPDEBUG_H */ + +#endif /* PPP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppoe.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppoe.h new file mode 100644 index 0000000..08ab7ab --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppoe.h @@ -0,0 +1,187 @@ +/***************************************************************************** +* pppoe.h - PPP Over Ethernet implementation for lwIP. +* +* Copyright (c) 2006 by Marc Boucher, Services Informatiques (MBSI) inc. +* +* The authors hereby grant permission to use, copy, modify, distribute, +* and license this software and its documentation for any purpose, provided +* that existing copyright notices are retained in all copies and that this +* notice and the following disclaimer are included verbatim in any +* distributions. No written agreement, license, or royalty fee is required +* for any of the authorized uses. +* +* THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS *AS IS* AND ANY EXPRESS OR +* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +* IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +****************************************************************************** +* REVISION HISTORY +* +* 06-01-01 Marc Boucher +* Ported to lwIP. +*****************************************************************************/ + + + +/* based on NetBSD: if_pppoe.c,v 1.64 2006/01/31 23:50:15 martin Exp */ + +/*- + * Copyright (c) 2002 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Martin Husemann . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPPOE_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef PPP_OE_H +#define PPP_OE_H + +#include "ppp.h" +#include "lwip/etharp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct pppoehdr { + PACK_STRUCT_FLD_8(u8_t vertype); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t session); + PACK_STRUCT_FIELD(u16_t plen); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct pppoetag { + PACK_STRUCT_FIELD(u16_t tag); + PACK_STRUCT_FIELD(u16_t len); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + + +#define PPPOE_STATE_INITIAL 0 +#define PPPOE_STATE_PADI_SENT 1 +#define PPPOE_STATE_PADR_SENT 2 +#define PPPOE_STATE_SESSION 3 +/* passive */ +#define PPPOE_STATE_PADO_SENT 1 + +#define PPPOE_HEADERLEN sizeof(struct pppoehdr) +#define PPPOE_VERTYPE 0x11 /* VER=1, TYPE = 1 */ + +#define PPPOE_TAG_EOL 0x0000 /* end of list */ +#define PPPOE_TAG_SNAME 0x0101 /* service name */ +#define PPPOE_TAG_ACNAME 0x0102 /* access concentrator name */ +#define PPPOE_TAG_HUNIQUE 0x0103 /* host unique */ +#define PPPOE_TAG_ACCOOKIE 0x0104 /* AC cookie */ +#define PPPOE_TAG_VENDOR 0x0105 /* vendor specific */ +#define PPPOE_TAG_RELAYSID 0x0110 /* relay session id */ +#define PPPOE_TAG_SNAME_ERR 0x0201 /* service name error */ +#define PPPOE_TAG_ACSYS_ERR 0x0202 /* AC system error */ +#define PPPOE_TAG_GENERIC_ERR 0x0203 /* gerneric error */ + +#define PPPOE_CODE_PADI 0x09 /* Active Discovery Initiation */ +#define PPPOE_CODE_PADO 0x07 /* Active Discovery Offer */ +#define PPPOE_CODE_PADR 0x19 /* Active Discovery Request */ +#define PPPOE_CODE_PADS 0x65 /* Active Discovery Session confirmation */ +#define PPPOE_CODE_PADT 0xA7 /* Active Discovery Terminate */ + +#ifndef PPPOE_MAX_AC_COOKIE_LEN +#define PPPOE_MAX_AC_COOKIE_LEN 64 +#endif + +struct pppoe_softc { + struct pppoe_softc *next; + struct netif *sc_ethif; /* ethernet interface we are using */ + ppp_pcb *pcb; /* PPP PCB */ + + struct eth_addr sc_dest; /* hardware address of concentrator */ + u16_t sc_session; /* PPPoE session id */ + u8_t sc_state; /* discovery phase or session connected */ + +#ifdef PPPOE_TODO + u8_t *sc_service_name; /* if != NULL: requested name of service */ + u8_t *sc_concentrator_name; /* if != NULL: requested concentrator id */ +#endif /* PPPOE_TODO */ + u8_t sc_ac_cookie[PPPOE_MAX_AC_COOKIE_LEN]; /* content of AC cookie we must echo back */ + u8_t sc_ac_cookie_len; /* length of cookie data */ +#ifdef PPPOE_SERVER + u8_t *sc_hunique; /* content of host unique we must echo back */ + u8_t sc_hunique_len; /* length of host unique */ +#endif + u8_t sc_padi_retried; /* number of PADI retries already done */ + u8_t sc_padr_retried; /* number of PADR retries already done */ +}; + + +#define pppoe_init() /* compatibility define, no initialization needed */ + +ppp_pcb *pppoe_create(struct netif *pppif, + struct netif *ethif, + const char *service_name, const char *concentrator_name, + ppp_link_status_cb_fn link_status_cb, void *ctx_cb); + +/* + * Functions called from lwIP + * DO NOT CALL FROM lwIP USER APPLICATION. + */ +void pppoe_disc_input(struct netif *netif, struct pbuf *p); +void pppoe_data_input(struct netif *netif, struct pbuf *p); + +#ifdef __cplusplus +} +#endif + +#endif /* PPP_OE_H */ + +#endif /* PPP_SUPPORT && PPPOE_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppol2tp.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppol2tp.h new file mode 100644 index 0000000..1221ca1 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppol2tp.h @@ -0,0 +1,209 @@ +/** + * @file + * Network Point to Point Protocol over Layer 2 Tunneling Protocol header file. + * + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPPOL2TP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef PPPOL2TP_H +#define PPPOL2TP_H + +#include "ppp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Timeout */ +#define PPPOL2TP_CONTROL_TIMEOUT (5*1000) /* base for quick timeout calculation */ +#define PPPOL2TP_SLOW_RETRY (60*1000) /* persistent retry interval */ + +#define PPPOL2TP_MAXSCCRQ 4 /* retry SCCRQ four times (quickly) */ +#define PPPOL2TP_MAXICRQ 4 /* retry IRCQ four times */ +#define PPPOL2TP_MAXICCN 4 /* retry ICCN four times */ + +/* L2TP header flags */ +#define PPPOL2TP_HEADERFLAG_CONTROL 0x8000 +#define PPPOL2TP_HEADERFLAG_LENGTH 0x4000 +#define PPPOL2TP_HEADERFLAG_SEQUENCE 0x0800 +#define PPPOL2TP_HEADERFLAG_OFFSET 0x0200 +#define PPPOL2TP_HEADERFLAG_PRIORITY 0x0100 +#define PPPOL2TP_HEADERFLAG_VERSION 0x0002 + +/* Mandatory bits for control: Control, Length, Sequence, Version 2 */ +#define PPPOL2TP_HEADERFLAG_CONTROL_MANDATORY (PPPOL2TP_HEADERFLAG_CONTROL|PPPOL2TP_HEADERFLAG_LENGTH|PPPOL2TP_HEADERFLAG_SEQUENCE|PPPOL2TP_HEADERFLAG_VERSION) +/* Forbidden bits for control: Offset, Priority */ +#define PPPOL2TP_HEADERFLAG_CONTROL_FORBIDDEN (PPPOL2TP_HEADERFLAG_OFFSET|PPPOL2TP_HEADERFLAG_PRIORITY) + +/* Mandatory bits for data: Version 2 */ +#define PPPOL2TP_HEADERFLAG_DATA_MANDATORY (PPPOL2TP_HEADERFLAG_VERSION) + +/* AVP (Attribute Value Pair) header */ +#define PPPOL2TP_AVPHEADERFLAG_MANDATORY 0x8000 +#define PPPOL2TP_AVPHEADERFLAG_HIDDEN 0x4000 +#define PPPOL2TP_AVPHEADERFLAG_LENGTHMASK 0x03ff + +/* -- AVP - Message type */ +#define PPPOL2TP_AVPTYPE_MESSAGE 0 /* Message type */ + +/* Control Connection Management */ +#define PPPOL2TP_MESSAGETYPE_SCCRQ 1 /* Start Control Connection Request */ +#define PPPOL2TP_MESSAGETYPE_SCCRP 2 /* Start Control Connection Reply */ +#define PPPOL2TP_MESSAGETYPE_SCCCN 3 /* Start Control Connection Connected */ +#define PPPOL2TP_MESSAGETYPE_STOPCCN 4 /* Stop Control Connection Notification */ +#define PPPOL2TP_MESSAGETYPE_HELLO 6 /* Hello */ +/* Call Management */ +#define PPPOL2TP_MESSAGETYPE_OCRQ 7 /* Outgoing Call Request */ +#define PPPOL2TP_MESSAGETYPE_OCRP 8 /* Outgoing Call Reply */ +#define PPPOL2TP_MESSAGETYPE_OCCN 9 /* Outgoing Call Connected */ +#define PPPOL2TP_MESSAGETYPE_ICRQ 10 /* Incoming Call Request */ +#define PPPOL2TP_MESSAGETYPE_ICRP 11 /* Incoming Call Reply */ +#define PPPOL2TP_MESSAGETYPE_ICCN 12 /* Incoming Call Connected */ +#define PPPOL2TP_MESSAGETYPE_CDN 14 /* Call Disconnect Notify */ +/* Error reporting */ +#define PPPOL2TP_MESSAGETYPE_WEN 15 /* WAN Error Notify */ +/* PPP Session Control */ +#define PPPOL2TP_MESSAGETYPE_SLI 16 /* Set Link Info */ + +/* -- AVP - Result code */ +#define PPPOL2TP_AVPTYPE_RESULTCODE 1 /* Result code */ +#define PPPOL2TP_RESULTCODE 1 /* General request to clear control connection */ + +/* -- AVP - Protocol version (!= L2TP Header version) */ +#define PPPOL2TP_AVPTYPE_VERSION 2 +#define PPPOL2TP_VERSION 0x0100 /* L2TP Protocol version 1, revision 0 */ + +/* -- AVP - Framing capabilities */ +#define PPPOL2TP_AVPTYPE_FRAMINGCAPABILITIES 3 /* Bearer capabilities */ +#define PPPOL2TP_FRAMINGCAPABILITIES 0x00000003 /* Async + Sync framing */ + +/* -- AVP - Bearer capabilities */ +#define PPPOL2TP_AVPTYPE_BEARERCAPABILITIES 4 /* Bearer capabilities */ +#define PPPOL2TP_BEARERCAPABILITIES 0x00000003 /* Analog + Digital Access */ + +/* -- AVP - Tie breaker */ +#define PPPOL2TP_AVPTYPE_TIEBREAKER 5 + +/* -- AVP - Host name */ +#define PPPOL2TP_AVPTYPE_HOSTNAME 7 /* Host name */ +#define PPPOL2TP_HOSTNAME "lwIP" /* FIXME: make it configurable */ + +/* -- AVP - Vendor name */ +#define PPPOL2TP_AVPTYPE_VENDORNAME 8 /* Vendor name */ +#define PPPOL2TP_VENDORNAME "lwIP" /* FIXME: make it configurable */ + +/* -- AVP - Assign tunnel ID */ +#define PPPOL2TP_AVPTYPE_TUNNELID 9 /* Assign Tunnel ID */ + +/* -- AVP - Receive window size */ +#define PPPOL2TP_AVPTYPE_RECEIVEWINDOWSIZE 10 /* Receive window size */ +#define PPPOL2TP_RECEIVEWINDOWSIZE 8 /* FIXME: make it configurable */ + +/* -- AVP - Challenge */ +#define PPPOL2TP_AVPTYPE_CHALLENGE 11 /* Challenge */ + +/* -- AVP - Cause code */ +#define PPPOL2TP_AVPTYPE_CAUSECODE 12 /* Cause code*/ + +/* -- AVP - Challenge response */ +#define PPPOL2TP_AVPTYPE_CHALLENGERESPONSE 13 /* Challenge response */ +#define PPPOL2TP_AVPTYPE_CHALLENGERESPONSE_SIZE 16 + +/* -- AVP - Assign session ID */ +#define PPPOL2TP_AVPTYPE_SESSIONID 14 /* Assign Session ID */ + +/* -- AVP - Call serial number */ +#define PPPOL2TP_AVPTYPE_CALLSERIALNUMBER 15 /* Call Serial Number */ + +/* -- AVP - Framing type */ +#define PPPOL2TP_AVPTYPE_FRAMINGTYPE 19 /* Framing Type */ +#define PPPOL2TP_FRAMINGTYPE 0x00000001 /* Sync framing */ + +/* -- AVP - TX Connect Speed */ +#define PPPOL2TP_AVPTYPE_TXCONNECTSPEED 24 /* TX Connect Speed */ +#define PPPOL2TP_TXCONNECTSPEED 100000000 /* Connect speed: 100 Mbits/s */ + +/* L2TP Session state */ +#define PPPOL2TP_STATE_INITIAL 0 +#define PPPOL2TP_STATE_SCCRQ_SENT 1 +#define PPPOL2TP_STATE_ICRQ_SENT 2 +#define PPPOL2TP_STATE_ICCN_SENT 3 +#define PPPOL2TP_STATE_DATA 4 + +#define PPPOL2TP_OUTPUT_DATA_HEADER_LEN 6 /* Our data header len */ + +/* + * PPPoL2TP interface control block. + */ +typedef struct pppol2tp_pcb_s pppol2tp_pcb; +struct pppol2tp_pcb_s { + ppp_pcb *ppp; /* PPP PCB */ + u8_t phase; /* L2TP phase */ + struct udp_pcb *udp; /* UDP L2TP Socket */ + struct netif *netif; /* Output interface, used as a default route */ + ip_addr_t remote_ip; /* LNS IP Address */ + u16_t remote_port; /* LNS port */ +#if PPPOL2TP_AUTH_SUPPORT + const u8_t *secret; /* Secret string */ + u8_t secret_len; /* Secret string length */ + u8_t secret_rv[16]; /* Random vector */ + u8_t challenge_hash[16]; /* Challenge response */ + u8_t send_challenge; /* Boolean whether the next sent packet should contains a challenge response */ +#endif /* PPPOL2TP_AUTH_SUPPORT */ + + u16_t tunnel_port; /* Tunnel port */ + u16_t our_ns; /* NS to peer */ + u16_t peer_nr; /* NR from peer */ + u16_t peer_ns; /* Expected NS from peer */ + u16_t source_tunnel_id; /* Tunnel ID assigned by peer */ + u16_t remote_tunnel_id; /* Tunnel ID assigned to peer */ + u16_t source_session_id; /* Session ID assigned by peer */ + u16_t remote_session_id; /* Session ID assigned to peer */ + + u8_t sccrq_retried; /* number of SCCRQ retries already done */ + u8_t icrq_retried; /* number of ICRQ retries already done */ + u8_t iccn_retried; /* number of ICCN retries already done */ +}; + + +/* Create a new L2TP session. */ +ppp_pcb *pppol2tp_create(struct netif *pppif, + struct netif *netif, const ip_addr_t *ipaddr, u16_t port, + const u8_t *secret, u8_t secret_len, + ppp_link_status_cb_fn link_status_cb, void *ctx_cb); + +#ifdef __cplusplus +} +#endif + +#endif /* PPPOL2TP_H */ +#endif /* PPP_SUPPORT && PPPOL2TP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppos.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppos.h new file mode 100644 index 0000000..380a965 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppos.h @@ -0,0 +1,126 @@ +/** + * @file + * Network Point to Point Protocol over Serial header file. + * + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPPOS_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef PPPOS_H +#define PPPOS_H + +#include "lwip/sys.h" + +#include "ppp.h" +#include "vj.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* PPP packet parser states. Current state indicates operation yet to be + * completed. */ +enum { + PDIDLE = 0, /* Idle state - waiting. */ + PDSTART, /* Process start flag. */ + PDADDRESS, /* Process address field. */ + PDCONTROL, /* Process control field. */ + PDPROTOCOL1, /* Process protocol field 1. */ + PDPROTOCOL2, /* Process protocol field 2. */ + PDDATA /* Process data byte. */ +}; + +/* PPPoS serial output callback function prototype */ +typedef u32_t (*pppos_output_cb_fn)(ppp_pcb *pcb, u8_t *data, u32_t len, void *ctx); + +/* + * Extended asyncmap - allows any character to be escaped. + */ +typedef u8_t ext_accm[32]; + +/* + * PPPoS interface control block. + */ +typedef struct pppos_pcb_s pppos_pcb; +struct pppos_pcb_s { + /* -- below are data that will NOT be cleared between two sessions */ + ppp_pcb *ppp; /* PPP PCB */ + pppos_output_cb_fn output_cb; /* PPP serial output callback */ + + /* -- below are data that will be cleared between two sessions + * + * last_xmit must be the first member of cleared members, because it is + * used to know which part must not be cleared. + */ + u32_t last_xmit; /* Time of last transmission. */ + ext_accm out_accm; /* Async-Ctl-Char-Map for output. */ + + /* flags */ + unsigned int open :1; /* Set if PPPoS is open */ + unsigned int pcomp :1; /* Does peer accept protocol compression? */ + unsigned int accomp :1; /* Does peer accept addr/ctl compression? */ + + /* PPPoS rx */ + ext_accm in_accm; /* Async-Ctl-Char-Map for input. */ + struct pbuf *in_head, *in_tail; /* The input packet. */ + u16_t in_protocol; /* The input protocol code. */ + u16_t in_fcs; /* Input Frame Check Sequence value. */ + u8_t in_state; /* The input process state. */ + u8_t in_escaped; /* Escape next character. */ +}; + +/* Create a new PPPoS session. */ +ppp_pcb *pppos_create(struct netif *pppif, pppos_output_cb_fn output_cb, + ppp_link_status_cb_fn link_status_cb, void *ctx_cb); + +#if !NO_SYS && !PPP_INPROC_IRQ_SAFE +/* Pass received raw characters to PPPoS to be decoded through lwIP TCPIP thread. */ +err_t pppos_input_tcpip(ppp_pcb *ppp, u8_t *s, int l); +#endif /* !NO_SYS && !PPP_INPROC_IRQ_SAFE */ + +/* PPP over Serial: this is the input function to be called for received data. */ +void pppos_input(ppp_pcb *ppp, u8_t* data, int len); + + +/* + * Functions called from lwIP + * DO NOT CALL FROM lwIP USER APPLICATION. + */ +#if !NO_SYS && !PPP_INPROC_IRQ_SAFE +err_t pppos_input_sys(struct pbuf *p, struct netif *inp); +#endif /* !NO_SYS && !PPP_INPROC_IRQ_SAFE */ + +#ifdef __cplusplus +} +#endif + +#endif /* PPPOS_H */ +#endif /* PPP_SUPPORT && PPPOL2TP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/upap.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/upap.h new file mode 100644 index 0000000..540d981 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/upap.h @@ -0,0 +1,131 @@ +/* + * upap.h - User/Password Authentication Protocol definitions. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: upap.h,v 1.8 2002/12/04 23:03:33 paulus Exp $ + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PAP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef UPAP_H +#define UPAP_H + +#include "ppp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Packet header = Code, id, length. + */ +#define UPAP_HEADERLEN 4 + + +/* + * UPAP codes. + */ +#define UPAP_AUTHREQ 1 /* Authenticate-Request */ +#define UPAP_AUTHACK 2 /* Authenticate-Ack */ +#define UPAP_AUTHNAK 3 /* Authenticate-Nak */ + + +/* + * Client states. + */ +#define UPAPCS_INITIAL 0 /* Connection down */ +#define UPAPCS_CLOSED 1 /* Connection up, haven't requested auth */ +#define UPAPCS_PENDING 2 /* Connection down, have requested auth */ +#define UPAPCS_AUTHREQ 3 /* We've sent an Authenticate-Request */ +#define UPAPCS_OPEN 4 /* We've received an Ack */ +#define UPAPCS_BADAUTH 5 /* We've received a Nak */ + +/* + * Server states. + */ +#define UPAPSS_INITIAL 0 /* Connection down */ +#define UPAPSS_CLOSED 1 /* Connection up, haven't requested auth */ +#define UPAPSS_PENDING 2 /* Connection down, have requested auth */ +#define UPAPSS_LISTEN 3 /* Listening for an Authenticate */ +#define UPAPSS_OPEN 4 /* We've sent an Ack */ +#define UPAPSS_BADAUTH 5 /* We've sent a Nak */ + + +/* + * Timeouts. + */ +#if 0 /* moved to ppp_opts.h */ +#define UPAP_DEFTIMEOUT 3 /* Timeout (seconds) for retransmitting req */ +#define UPAP_DEFREQTIME 30 /* Time to wait for auth-req from peer */ +#endif /* moved to ppp_opts.h */ + +/* + * Each interface is described by upap structure. + */ +#if PAP_SUPPORT +typedef struct upap_state { + const char *us_user; /* User */ + u8_t us_userlen; /* User length */ + const char *us_passwd; /* Password */ + u8_t us_passwdlen; /* Password length */ + u8_t us_clientstate; /* Client state */ +#if PPP_SERVER + u8_t us_serverstate; /* Server state */ +#endif /* PPP_SERVER */ + u8_t us_id; /* Current id */ + u8_t us_transmits; /* Number of auth-reqs sent */ +} upap_state; +#endif /* PAP_SUPPORT */ + + +void upap_authwithpeer(ppp_pcb *pcb, const char *user, const char *password); +#if PPP_SERVER +void upap_authpeer(ppp_pcb *pcb); +#endif /* PPP_SERVER */ + +extern const struct protent pap_protent; + +#ifdef __cplusplus +} +#endif + +#endif /* UPAP_H */ +#endif /* PPP_SUPPORT && PAP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/vj.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/vj.h new file mode 100644 index 0000000..77d9976 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/vj.h @@ -0,0 +1,169 @@ +/* + * Definitions for tcp compression routines. + * + * $Id: vj.h,v 1.7 2010/02/22 17:52:09 goldsimon Exp $ + * + * Copyright (c) 1989 Regents of the University of California. + * All rights reserved. + * + * Redistribution and use in source and binary forms are permitted + * provided that the above copyright notice and this paragraph are + * duplicated in all such forms and that any documentation, + * advertising materials, and other materials related to such + * distribution and use acknowledge that the software was developed + * by the University of California, Berkeley. The name of the + * University may not be used to endorse or promote products derived + * from this software without specific prior written permission. + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. + * + * Van Jacobson (van@helios.ee.lbl.gov), Dec 31, 1989: + * - Initial distribution. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && VJ_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef VJ_H +#define VJ_H + +#include "lwip/ip.h" +#include "lwip/priv/tcp_priv.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define MAX_SLOTS 16 /* must be > 2 and < 256 */ +#define MAX_HDR 128 + +/* + * Compressed packet format: + * + * The first octet contains the packet type (top 3 bits), TCP + * 'push' bit, and flags that indicate which of the 4 TCP sequence + * numbers have changed (bottom 5 bits). The next octet is a + * conversation number that associates a saved IP/TCP header with + * the compressed packet. The next two octets are the TCP checksum + * from the original datagram. The next 0 to 15 octets are + * sequence number changes, one change per bit set in the header + * (there may be no changes and there are two special cases where + * the receiver implicitly knows what changed -- see below). + * + * There are 5 numbers which can change (they are always inserted + * in the following order): TCP urgent pointer, window, + * acknowlegement, sequence number and IP ID. (The urgent pointer + * is different from the others in that its value is sent, not the + * change in value.) Since typical use of SLIP links is biased + * toward small packets (see comments on MTU/MSS below), changes + * use a variable length coding with one octet for numbers in the + * range 1 - 255 and 3 octets (0, MSB, LSB) for numbers in the + * range 256 - 65535 or 0. (If the change in sequence number or + * ack is more than 65535, an uncompressed packet is sent.) + */ + +/* + * Packet types (must not conflict with IP protocol version) + * + * The top nibble of the first octet is the packet type. There are + * three possible types: IP (not proto TCP or tcp with one of the + * control flags set); uncompressed TCP (a normal IP/TCP packet but + * with the 8-bit protocol field replaced by an 8-bit connection id -- + * this type of packet syncs the sender & receiver); and compressed + * TCP (described above). + * + * LSB of 4-bit field is TCP "PUSH" bit (a worthless anachronism) and + * is logically part of the 4-bit "changes" field that follows. Top + * three bits are actual packet type. For backward compatibility + * and in the interest of conserving bits, numbers are chosen so the + * IP protocol version number (4) which normally appears in this nibble + * means "IP packet". + */ + +/* packet types */ +#define TYPE_IP 0x40 +#define TYPE_UNCOMPRESSED_TCP 0x70 +#define TYPE_COMPRESSED_TCP 0x80 +#define TYPE_ERROR 0x00 + +/* Bits in first octet of compressed packet */ +#define NEW_C 0x40 /* flag bits for what changed in a packet */ +#define NEW_I 0x20 +#define NEW_S 0x08 +#define NEW_A 0x04 +#define NEW_W 0x02 +#define NEW_U 0x01 + +/* reserved, special-case values of above */ +#define SPECIAL_I (NEW_S|NEW_W|NEW_U) /* echoed interactive traffic */ +#define SPECIAL_D (NEW_S|NEW_A|NEW_W|NEW_U) /* unidirectional data */ +#define SPECIALS_MASK (NEW_S|NEW_A|NEW_W|NEW_U) + +#define TCP_PUSH_BIT 0x10 + + +/* + * "state" data for each active tcp conversation on the wire. This is + * basically a copy of the entire IP/TCP header from the last packet + * we saw from the conversation together with a small identifier + * the transmit & receive ends of the line use to locate saved header. + */ +struct cstate { + struct cstate *cs_next; /* next most recently used state (xmit only) */ + u16_t cs_hlen; /* size of hdr (receive only) */ + u8_t cs_id; /* connection # associated with this state */ + u8_t cs_filler; + union { + char csu_hdr[MAX_HDR]; + struct ip_hdr csu_ip; /* ip/tcp hdr from most recent packet */ + } vjcs_u; +}; +#define cs_ip vjcs_u.csu_ip +#define cs_hdr vjcs_u.csu_hdr + + +struct vjstat { + u32_t vjs_packets; /* outbound packets */ + u32_t vjs_compressed; /* outbound compressed packets */ + u32_t vjs_searches; /* searches for connection state */ + u32_t vjs_misses; /* times couldn't find conn. state */ + u32_t vjs_uncompressedin; /* inbound uncompressed packets */ + u32_t vjs_compressedin; /* inbound compressed packets */ + u32_t vjs_errorin; /* inbound unknown type packets */ + u32_t vjs_tossed; /* inbound packets tossed because of error */ +}; + +/* + * all the state data for one serial line (we need one of these per line). + */ +struct vjcompress { + struct cstate *last_cs; /* most recently used tstate */ + u8_t last_recv; /* last rcvd conn. id */ + u8_t last_xmit; /* last sent conn. id */ + u16_t flags; + u8_t maxSlotIndex; + u8_t compressSlot; /* Flag indicating OK to compress slot ID. */ +#if LINK_STATS + struct vjstat stats; +#endif + struct cstate tstate[MAX_SLOTS]; /* xmit connection states */ + struct cstate rstate[MAX_SLOTS]; /* receive connection states */ +}; + +/* flag values */ +#define VJF_TOSS 1U /* tossing rcvd frames because of input err */ + +extern void vj_compress_init (struct vjcompress *comp); +extern u8_t vj_compress_tcp (struct vjcompress *comp, struct pbuf **pb); +extern void vj_uncompress_err (struct vjcompress *comp); +extern int vj_uncompress_uncomp(struct pbuf *nb, struct vjcompress *comp); +extern int vj_uncompress_tcp (struct pbuf **nb, struct vjcompress *comp); + +#ifdef __cplusplus +} +#endif + +#endif /* VJ_H */ + +#endif /* PPP_SUPPORT && VJ_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/slipif.h b/Middlewares/Third_Party/LwIP/src/include/netif/slipif.h new file mode 100644 index 0000000..65ba31f --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/slipif.h @@ -0,0 +1,87 @@ +/** + * @file + * + * SLIP netif API + */ + +/* + * Copyright (c) 2001, Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the Institute nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_NETIF_SLIPIF_H +#define LWIP_HDR_NETIF_SLIPIF_H + +#include "lwip/opt.h" +#include "lwip/netif.h" + +/** Set this to 1 to start a thread that blocks reading on the serial line + * (using sio_read()). + */ +#ifndef SLIP_USE_RX_THREAD +#define SLIP_USE_RX_THREAD !NO_SYS +#endif + +/** Set this to 1 to enable functions to pass in RX bytes from ISR context. + * If enabled, slipif_received_byte[s]() process incoming bytes and put assembled + * packets on a queue, which is fed into lwIP from slipif_poll(). + * If disabled, slipif_poll() polls the serial line (using sio_tryread()). + */ +#ifndef SLIP_RX_FROM_ISR +#define SLIP_RX_FROM_ISR 0 +#endif + +/** Set this to 1 (default for SLIP_RX_FROM_ISR) to queue incoming packets + * received by slipif_received_byte[s]() as long as PBUF_POOL pbufs are available. + * If disabled, packets will be dropped if more than one packet is received. + */ +#ifndef SLIP_RX_QUEUE +#define SLIP_RX_QUEUE SLIP_RX_FROM_ISR +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +err_t slipif_init(struct netif * netif); +void slipif_poll(struct netif *netif); +#if SLIP_RX_FROM_ISR +void slipif_process_rxqueue(struct netif *netif); +void slipif_received_byte(struct netif *netif, u8_t data); +void slipif_received_bytes(struct netif *netif, u8_t *data, u8_t len); +#endif /* SLIP_RX_FROM_ISR */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_NETIF_SLIPIF_H */ + diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/zepif.h b/Middlewares/Third_Party/LwIP/src/include/netif/zepif.h new file mode 100644 index 0000000..2a801b4 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/zepif.h @@ -0,0 +1,81 @@ +/** + * @file + * + * A netif implementing the ZigBee Eencapsulation Protocol (ZEP). + * This is used to tunnel 6LowPAN over UDP. + */ + +/* + * Copyright (c) 2018 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#ifndef LWIP_HDR_ZEPIF_H +#define LWIP_HDR_ZEPIF_H + +#include "lwip/opt.h" +#include "netif/lowpan6.h" + +#if LWIP_IPV6 && LWIP_UDP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define ZEPIF_DEFAULT_UDP_PORT 17754 + +/** Pass this struct as 'state' to netif_add to control the behaviour + * of this netif. If NULL is passed, default behaviour is chosen */ +struct zepif_init { + /** The UDP port used to ZEP frames from (0 = default) */ + u16_t zep_src_udp_port; + /** The UDP port used to ZEP frames to (0 = default) */ + u16_t zep_dst_udp_port; + /** The IP address to sed ZEP frames from (NULL = ANY) */ + const ip_addr_t *zep_src_ip_addr; + /** The IP address to sed ZEP frames to (NULL = BROADCAST) */ + const ip_addr_t *zep_dst_ip_addr; + /** If != NULL, the udp pcb is bound to this netif */ + const struct netif *zep_netif; + /** MAC address of the 6LowPAN device */ + u8_t addr[6]; +}; + +err_t zepif_init(struct netif *netif); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6 && LWIP_UDP */ + +#endif /* LWIP_HDR_ZEPIF_H */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/bridgeif.c b/Middlewares/Third_Party/LwIP/src/netif/bridgeif.c new file mode 100644 index 0000000..8a97bce --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/bridgeif.c @@ -0,0 +1,563 @@ +/** + * @file + * lwIP netif implementing an IEEE 802.1D MAC Bridge + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +/** + * @defgroup bridgeif IEEE 802.1D bridge + * @ingroup netifs + * This file implements an IEEE 802.1D bridge by using a multilayer netif approach + * (one hardware-independent netif for the bridge that uses hardware netifs for its ports). + * On transmit, the bridge selects the outgoing port(s). + * On receive, the port netif calls into the bridge (via its netif->input function) and + * the bridge selects the port(s) (and/or its netif->input function) to pass the received pbuf to. + * + * Usage: + * - add the port netifs just like you would when using them as dedicated netif without a bridge + * - only NETIF_FLAG_ETHARP/NETIF_FLAG_ETHERNET netifs are supported as bridge ports + * - add the bridge port netifs without IPv4 addresses (i.e. pass 'NULL, NULL, NULL') + * - don't add IPv6 addresses to the port netifs! + * - set up the bridge configuration in a global variable of type 'bridgeif_initdata_t' that contains + * - the MAC address of the bridge + * - some configuration options controlling the memory consumption (maximum number of ports + * and FDB entries) + * - e.g. for a bridge MAC address 00-01-02-03-04-05, 2 bridge ports, 1024 FDB entries + 16 static MAC entries: + * bridgeif_initdata_t mybridge_initdata = BRIDGEIF_INITDATA1(2, 1024, 16, ETH_ADDR(0, 1, 2, 3, 4, 5)); + * - add the bridge netif (with IPv4 config): + * struct netif bridge_netif; + * netif_add(&bridge_netif, &my_ip, &my_netmask, &my_gw, &mybridge_initdata, bridgeif_init, tcpip_input); + * NOTE: the passed 'input' function depends on BRIDGEIF_PORT_NETIFS_OUTPUT_DIRECT setting, + * which controls where the forwarding is done (netif low level input context vs. tcpip_thread) + * - set up all ports netifs and the bridge netif + * + * - When adding a port netif, NETIF_FLAG_ETHARP flag will be removed from a port + * to prevent ETHARP working on that port netif (we only want one IP per bridge not per port). + * - When adding a port netif, its input function is changed to call into the bridge. + * + * + * @todo: + * - compact static FDB entries (instead of walking the whole array) + * - add FDB query/read access + * - add FDB change callback (when learning or dropping auto-learned entries) + * - prefill FDB with MAC classes that should never be forwarded + * - multicast snooping? (and only forward group addresses to interested ports) + * - support removing ports + * - check SNMP integration + * - VLAN handling / trunk ports + * - priority handling? (although that largely depends on TX queue limitations and lwIP doesn't provide tx-done handling) + */ + +#include "netif/bridgeif.h" +#include "lwip/netif.h" +#include "lwip/sys.h" +#include "lwip/etharp.h" +#include "lwip/ethip6.h" +#include "lwip/snmp.h" +#include "lwip/timeouts.h" +#include + +#if LWIP_NUM_NETIF_CLIENT_DATA + +/* Define those to better describe your network interface. */ +#define IFNAME0 'b' +#define IFNAME1 'r' + +struct bridgeif_private_s; +typedef struct bridgeif_port_private_s { + struct bridgeif_private_s *bridge; + struct netif *port_netif; + u8_t port_num; +} bridgeif_port_t; + +typedef struct bridgeif_fdb_static_entry_s { + u8_t used; + bridgeif_portmask_t dst_ports; + struct eth_addr addr; +} bridgeif_fdb_static_entry_t; + +typedef struct bridgeif_private_s { + struct netif *netif; + struct eth_addr ethaddr; + u8_t max_ports; + u8_t num_ports; + bridgeif_port_t *ports; + u16_t max_fdbs_entries; + bridgeif_fdb_static_entry_t *fdbs; + u16_t max_fdbd_entries; + void *fdbd; +} bridgeif_private_t; + +/* netif data index to get the bridge on input */ +u8_t bridgeif_netif_client_id = 0xff; + +/** + * @ingroup bridgeif + * Add a static entry to the forwarding database. + * A static entry marks where frames to a specific eth address (unicast or group address) are + * forwarded. + * bits [0..(BRIDGEIF_MAX_PORTS-1)]: hw ports + * bit [BRIDGEIF_MAX_PORTS]: cpu port + * 0: drop + */ +err_t +bridgeif_fdb_add(struct netif *bridgeif, const struct eth_addr *addr, bridgeif_portmask_t ports) +{ + int i; + bridgeif_private_t *br; + BRIDGEIF_DECL_PROTECT(lev); + LWIP_ASSERT("invalid netif", bridgeif != NULL); + br = (bridgeif_private_t *)bridgeif->state; + LWIP_ASSERT("invalid state", br != NULL); + + BRIDGEIF_READ_PROTECT(lev); + for (i = 0; i < br->max_fdbs_entries; i++) { + if (!br->fdbs[i].used) { + BRIDGEIF_WRITE_PROTECT(lev); + if (!br->fdbs[i].used) { + br->fdbs[i].used = 1; + br->fdbs[i].dst_ports = ports; + memcpy(&br->fdbs[i].addr, addr, sizeof(struct eth_addr)); + BRIDGEIF_WRITE_UNPROTECT(lev); + BRIDGEIF_READ_UNPROTECT(lev); + return ERR_OK; + } + BRIDGEIF_WRITE_UNPROTECT(lev); + } + } + BRIDGEIF_READ_UNPROTECT(lev); + return ERR_MEM; +} + +/** + * @ingroup bridgeif + * Remove a static entry from the forwarding database + */ +err_t +bridgeif_fdb_remove(struct netif *bridgeif, const struct eth_addr *addr) +{ + int i; + bridgeif_private_t *br; + BRIDGEIF_DECL_PROTECT(lev); + LWIP_ASSERT("invalid netif", bridgeif != NULL); + br = (bridgeif_private_t *)bridgeif->state; + LWIP_ASSERT("invalid state", br != NULL); + + BRIDGEIF_READ_PROTECT(lev); + for (i = 0; i < br->max_fdbs_entries; i++) { + if (br->fdbs[i].used && !memcmp(&br->fdbs[i].addr, addr, sizeof(struct eth_addr))) { + BRIDGEIF_WRITE_PROTECT(lev); + if (br->fdbs[i].used && !memcmp(&br->fdbs[i].addr, addr, sizeof(struct eth_addr))) { + memset(&br->fdbs[i], 0, sizeof(bridgeif_fdb_static_entry_t)); + BRIDGEIF_WRITE_UNPROTECT(lev); + BRIDGEIF_READ_UNPROTECT(lev); + return ERR_OK; + } + BRIDGEIF_WRITE_UNPROTECT(lev); + } + } + BRIDGEIF_READ_UNPROTECT(lev); + return ERR_VAL; +} + +/** Get the forwarding port(s) (as bit mask) for the specified destination mac address */ +static bridgeif_portmask_t +bridgeif_find_dst_ports(bridgeif_private_t *br, struct eth_addr *dst_addr) +{ + int i; + BRIDGEIF_DECL_PROTECT(lev); + BRIDGEIF_READ_PROTECT(lev); + /* first check for static entries */ + for (i = 0; i < br->max_fdbs_entries; i++) { + if (br->fdbs[i].used) { + if (!memcmp(&br->fdbs[i].addr, dst_addr, sizeof(struct eth_addr))) { + bridgeif_portmask_t ret = br->fdbs[i].dst_ports; + BRIDGEIF_READ_UNPROTECT(lev); + return ret; + } + } + } + if (dst_addr->addr[0] & 1) { + /* no match found: flood remaining group address */ + BRIDGEIF_READ_UNPROTECT(lev); + return BR_FLOOD; + } + BRIDGEIF_READ_UNPROTECT(lev); + /* no match found: check dynamic fdb for port or fall back to flooding */ + return bridgeif_fdb_get_dst_ports(br->fdbd, dst_addr); +} + +/** Helper function to see if a destination mac belongs to the bridge + * (bridge netif or one of the port netifs), in which case the frame + * is sent to the cpu only. + */ +static int +bridgeif_is_local_mac(bridgeif_private_t *br, struct eth_addr *addr) +{ + int i; + BRIDGEIF_DECL_PROTECT(lev); + if (!memcmp(br->netif->hwaddr, addr, sizeof(struct eth_addr))) { + return 1; + } + BRIDGEIF_READ_PROTECT(lev); + for (i = 0; i < br->num_ports; i++) { + struct netif *portif = br->ports[i].port_netif; + if (portif != NULL) { + if (!memcmp(portif->hwaddr, addr, sizeof(struct eth_addr))) { + BRIDGEIF_READ_UNPROTECT(lev); + return 1; + } + } + } + BRIDGEIF_READ_UNPROTECT(lev); + return 0; +} + +/* Output helper function */ +static err_t +bridgeif_send_to_port(bridgeif_private_t *br, struct pbuf *p, u8_t dstport_idx) +{ + if (dstport_idx < BRIDGEIF_MAX_PORTS) { + /* possibly an external port */ + if (dstport_idx < br->max_ports) { + struct netif *portif = br->ports[dstport_idx].port_netif; + if ((portif != NULL) && (portif->linkoutput != NULL)) { + /* prevent sending out to rx port */ + if (netif_get_index(portif) != p->if_idx) { + if (netif_is_link_up(portif)) { + LWIP_DEBUGF(BRIDGEIF_FW_DEBUG, ("br -> flood(%p:%d) -> %d\n", (void *)p, p->if_idx, netif_get_index(portif))); + return portif->linkoutput(portif, p); + } + } + } + } + } else { + LWIP_ASSERT("invalid port index", dstport_idx == BRIDGEIF_MAX_PORTS); + } + return ERR_OK; +} + +/** Helper function to pass a pbuf to all ports marked in 'dstports' + */ +static err_t +bridgeif_send_to_ports(bridgeif_private_t *br, struct pbuf *p, bridgeif_portmask_t dstports) +{ + err_t err, ret_err = ERR_OK; + u8_t i; + bridgeif_portmask_t mask = 1; + BRIDGEIF_DECL_PROTECT(lev); + BRIDGEIF_READ_PROTECT(lev); + for (i = 0; i < BRIDGEIF_MAX_PORTS; i++, mask = (bridgeif_portmask_t)(mask << 1)) { + if (dstports & mask) { + err = bridgeif_send_to_port(br, p, i); + if (err != ERR_OK) { + ret_err = err; + } + } + } + BRIDGEIF_READ_UNPROTECT(lev); + return ret_err; +} + +/** Output function of the application port of the bridge (the one with an ip address). + * The forwarding port(s) where this pbuf is sent on is/are automatically selected + * from the FDB. + */ +static err_t +bridgeif_output(struct netif *netif, struct pbuf *p) +{ + err_t err; + bridgeif_private_t *br = (bridgeif_private_t *)netif->state; + struct eth_addr *dst = (struct eth_addr *)(p->payload); + + bridgeif_portmask_t dstports = bridgeif_find_dst_ports(br, dst); + err = bridgeif_send_to_ports(br, p, dstports); + + MIB2_STATS_NETIF_ADD(netif, ifoutoctets, p->tot_len); + if (((u8_t *)p->payload)[0] & 1) { + /* broadcast or multicast packet*/ + MIB2_STATS_NETIF_INC(netif, ifoutnucastpkts); + } else { + /* unicast packet */ + MIB2_STATS_NETIF_INC(netif, ifoutucastpkts); + } + /* increase ifoutdiscards or ifouterrors on error */ + + LINK_STATS_INC(link.xmit); + + return err; +} + +/** The actual bridge input function. Port netif's input is changed to call + * here. This function decides where the frame is forwarded. + */ +static err_t +bridgeif_input(struct pbuf *p, struct netif *netif) +{ + u8_t rx_idx; + bridgeif_portmask_t dstports; + struct eth_addr *src, *dst; + bridgeif_private_t *br; + bridgeif_port_t *port; + if (p == NULL || netif == NULL) { + return ERR_VAL; + } + port = (bridgeif_port_t *)netif_get_client_data(netif, bridgeif_netif_client_id); + LWIP_ASSERT("port data not set", port != NULL); + if (port == NULL || port->bridge == NULL) { + return ERR_VAL; + } + br = (bridgeif_private_t *)port->bridge; + rx_idx = netif_get_index(netif); + /* store receive index in pbuf */ + p->if_idx = rx_idx; + + dst = (struct eth_addr *)p->payload; + src = (struct eth_addr *)(((u8_t *)p->payload) + sizeof(struct eth_addr)); + + if ((src->addr[0] & 1) == 0) { + /* update src for all non-group addresses */ + bridgeif_fdb_update_src(br->fdbd, src, port->port_num); + } + + if (dst->addr[0] & 1) { + /* group address -> flood + cpu? */ + dstports = bridgeif_find_dst_ports(br, dst); + bridgeif_send_to_ports(br, p, dstports); + if (dstports & (1 << BRIDGEIF_MAX_PORTS)) { + /* we pass the reference to ->input or have to free it */ + LWIP_DEBUGF(BRIDGEIF_FW_DEBUG, ("br -> input(%p)\n", (void *)p)); + if (br->netif->input(p, br->netif) != ERR_OK) { + pbuf_free(p); + } + } else { + /* all references done */ + pbuf_free(p); + } + /* always return ERR_OK here to prevent the caller freeing the pbuf */ + return ERR_OK; + } else { + /* is this for one of the local ports? */ + if (bridgeif_is_local_mac(br, dst)) { + /* yes, send to cpu port only */ + LWIP_DEBUGF(BRIDGEIF_FW_DEBUG, ("br -> input(%p)\n", (void *)p)); + return br->netif->input(p, br->netif); + } + + /* get dst port */ + dstports = bridgeif_find_dst_ports(br, dst); + bridgeif_send_to_ports(br, p, dstports); + /* no need to send to cpu, flooding is for external ports only */ + /* by this, we consumed the pbuf */ + pbuf_free(p); + /* always return ERR_OK here to prevent the caller freeing the pbuf */ + return ERR_OK; + } +} + +#if !BRIDGEIF_PORT_NETIFS_OUTPUT_DIRECT +/** Input function for port netifs used to synchronize into tcpip_thread. + */ +static err_t +bridgeif_tcpip_input(struct pbuf *p, struct netif *netif) +{ + return tcpip_inpkt(p, netif, bridgeif_input); +} +#endif /* BRIDGEIF_PORT_NETIFS_OUTPUT_DIRECT */ + +/** + * @ingroup bridgeif + * Initialization function passed to netif_add(). + * + * ATTENTION: A pointer to a @ref bridgeif_initdata_t must be passed as 'state' + * to @ref netif_add when adding the bridge. I supplies MAC address + * and controls memory allocation (number of ports, FDB size). + * + * @param netif the lwip network interface structure for this ethernetif + * @return ERR_OK if the loopif is initialized + * ERR_MEM if private data couldn't be allocated + * any other err_t on error + */ +err_t +bridgeif_init(struct netif *netif) +{ + bridgeif_initdata_t *init_data; + bridgeif_private_t *br; + size_t alloc_len_sizet; + mem_size_t alloc_len; + + LWIP_ASSERT("netif != NULL", (netif != NULL)); + LWIP_ASSERT("bridgeif needs an input callback", (netif->input != NULL)); +#if !BRIDGEIF_PORT_NETIFS_OUTPUT_DIRECT + if (netif->input == tcpip_input) { + LWIP_DEBUGF(BRIDGEIF_DEBUG | LWIP_DBG_ON, ("bridgeif does not need tcpip_input, use netif_input/ethernet_input instead")); + } +#endif + + if (bridgeif_netif_client_id == 0xFF) { + bridgeif_netif_client_id = netif_alloc_client_data_id(); + } + + init_data = (bridgeif_initdata_t *)netif->state; + LWIP_ASSERT("init_data != NULL", (init_data != NULL)); + LWIP_ASSERT("init_data->max_ports <= BRIDGEIF_MAX_PORTS", + init_data->max_ports <= BRIDGEIF_MAX_PORTS); + + alloc_len_sizet = sizeof(bridgeif_private_t) + (init_data->max_ports * sizeof(bridgeif_port_t) + (init_data->max_fdb_static_entries * sizeof(bridgeif_fdb_static_entry_t))); + alloc_len = (mem_size_t)alloc_len_sizet; + LWIP_ASSERT("alloc_len == alloc_len_sizet", alloc_len == alloc_len_sizet); + LWIP_DEBUGF(BRIDGEIF_DEBUG, ("bridgeif_init: allocating %d bytes for private data\n", (int)alloc_len)); + br = (bridgeif_private_t *)mem_calloc(1, alloc_len); + if (br == NULL) { + LWIP_DEBUGF(NETIF_DEBUG, ("bridgeif_init: out of memory\n")); + return ERR_MEM; + } + memcpy(&br->ethaddr, &init_data->ethaddr, sizeof(br->ethaddr)); + br->netif = netif; + + br->max_ports = init_data->max_ports; + br->ports = (bridgeif_port_t *)(br + 1); + + br->max_fdbs_entries = init_data->max_fdb_static_entries; + br->fdbs = (bridgeif_fdb_static_entry_t *)(((u8_t *)(br + 1)) + (init_data->max_ports * sizeof(bridgeif_port_t))); + + br->max_fdbd_entries = init_data->max_fdb_dynamic_entries; + br->fdbd = bridgeif_fdb_init(init_data->max_fdb_dynamic_entries); + if (br->fdbd == NULL) { + LWIP_DEBUGF(NETIF_DEBUG, ("bridgeif_init: out of memory in fdb_init\n")); + mem_free(br); + return ERR_MEM; + } + +#if LWIP_NETIF_HOSTNAME + /* Initialize interface hostname */ + netif->hostname = "lwip"; +#endif /* LWIP_NETIF_HOSTNAME */ + + /* + * Initialize the snmp variables and counters inside the struct netif. + * The last argument should be replaced with your link speed, in units + * of bits per second. + */ + MIB2_INIT_NETIF(netif, snmp_ifType_ethernet_csmacd, 0); + + netif->state = br; + netif->name[0] = IFNAME0; + netif->name[1] = IFNAME1; + /* We directly use etharp_output() here to save a function call. + * You can instead declare your own function an call etharp_output() + * from it if you have to do some checks before sending (e.g. if link + * is available...) */ +#if LWIP_IPV4 + netif->output = etharp_output; +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 + netif->output_ip6 = ethip6_output; +#endif /* LWIP_IPV6 */ + netif->linkoutput = bridgeif_output; + + /* set MAC hardware address length */ + netif->hwaddr_len = ETH_HWADDR_LEN; + + /* set MAC hardware address */ + memcpy(netif->hwaddr, &br->ethaddr, ETH_HWADDR_LEN); + + /* maximum transfer unit */ + netif->mtu = 1500; + + /* device capabilities */ + /* don't set NETIF_FLAG_ETHARP if this device is not an ethernet one */ + netif->flags = NETIF_FLAG_BROADCAST | NETIF_FLAG_ETHARP | NETIF_FLAG_ETHERNET | NETIF_FLAG_IGMP | NETIF_FLAG_MLD6 | NETIF_FLAG_LINK_UP; + +#if LWIP_IPV6 && LWIP_IPV6_MLD + /* + * For hardware/netifs that implement MAC filtering. + * All-nodes link-local is handled by default, so we must let the hardware know + * to allow multicast packets in. + * Should set mld_mac_filter previously. */ + if (netif->mld_mac_filter != NULL) { + ip6_addr_t ip6_allnodes_ll; + ip6_addr_set_allnodes_linklocal(&ip6_allnodes_ll); + netif->mld_mac_filter(netif, &ip6_allnodes_ll, NETIF_ADD_MAC_FILTER); + } +#endif /* LWIP_IPV6 && LWIP_IPV6_MLD */ + + return ERR_OK; +} + +/** + * @ingroup bridgeif + * Add a port to the bridge + */ +err_t +bridgeif_add_port(struct netif *bridgeif, struct netif *portif) +{ + bridgeif_private_t *br; + bridgeif_port_t *port; + + LWIP_ASSERT("bridgeif != NULL", bridgeif != NULL); + LWIP_ASSERT("bridgeif->state != NULL", bridgeif->state != NULL); + LWIP_ASSERT("portif != NULL", portif != NULL); + + if (!(portif->flags & NETIF_FLAG_ETHARP) || !(portif->flags & NETIF_FLAG_ETHERNET)) { + /* can only add ETHERNET/ETHARP interfaces */ + return ERR_VAL; + } + + br = (bridgeif_private_t *)bridgeif->state; + + if (br->num_ports >= br->max_ports) { + return ERR_VAL; + } + port = &br->ports[br->num_ports]; + port->port_netif = portif; + port->port_num = br->num_ports; + port->bridge = br; + br->num_ports++; + + /* let the port call us on input */ +#if BRIDGEIF_PORT_NETIFS_OUTPUT_DIRECT + portif->input = bridgeif_input; +#else + portif->input = bridgeif_tcpip_input; +#endif + /* store pointer to bridge in netif */ + netif_set_client_data(portif, bridgeif_netif_client_id, port); + /* remove ETHARP flag to prevent sending report events on netif-up */ + netif_clear_flags(portif, NETIF_FLAG_ETHARP); + + return ERR_OK; +} + +#endif /* LWIP_NUM_NETIF_CLIENT_DATA */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/bridgeif_fdb.c b/Middlewares/Third_Party/LwIP/src/netif/bridgeif_fdb.c new file mode 100644 index 0000000..6739fc2 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/bridgeif_fdb.c @@ -0,0 +1,212 @@ +/** + * @file + * lwIP netif implementing an FDB for IEEE 802.1D MAC Bridge + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +/** + * @defgroup bridgeif_fdb FDB example code + * @ingroup bridgeif + * This file implements an example for an FDB (Forwarding DataBase) + */ + +#include "netif/bridgeif.h" +#include "lwip/sys.h" +#include "lwip/mem.h" +#include "lwip/timeouts.h" +#include + +#define BRIDGEIF_AGE_TIMER_MS 1000 + +#define BR_FDB_TIMEOUT_SEC (60*5) /* 5 minutes FDB timeout */ + +typedef struct bridgeif_dfdb_entry_s { + u8_t used; + u8_t port; + u32_t ts; + struct eth_addr addr; +} bridgeif_dfdb_entry_t; + +typedef struct bridgeif_dfdb_s { + u16_t max_fdb_entries; + bridgeif_dfdb_entry_t *fdb; +} bridgeif_dfdb_t; + +/** + * @ingroup bridgeif_fdb + * A real simple and slow implementation of an auto-learning forwarding database that + * remembers known src mac addresses to know which port to send frames destined for that + * mac address. + * + * ATTENTION: This is meant as an example only, in real-world use, you should + * provide a better implementation :-) + */ +void +bridgeif_fdb_update_src(void *fdb_ptr, struct eth_addr *src_addr, u8_t port_idx) +{ + int i; + bridgeif_dfdb_t *fdb = (bridgeif_dfdb_t *)fdb_ptr; + BRIDGEIF_DECL_PROTECT(lev); + BRIDGEIF_READ_PROTECT(lev); + for (i = 0; i < fdb->max_fdb_entries; i++) { + bridgeif_dfdb_entry_t *e = &fdb->fdb[i]; + if (e->used && e->ts) { + if (!memcmp(&e->addr, src_addr, sizeof(struct eth_addr))) { + LWIP_DEBUGF(BRIDGEIF_FDB_DEBUG, ("br: update src %02x:%02x:%02x:%02x:%02x:%02x (from %d) @ idx %d\n", + src_addr->addr[0], src_addr->addr[1], src_addr->addr[2], src_addr->addr[3], src_addr->addr[4], src_addr->addr[5], + port_idx, i)); + BRIDGEIF_WRITE_PROTECT(lev); + e->ts = BR_FDB_TIMEOUT_SEC; + e->port = port_idx; + BRIDGEIF_WRITE_UNPROTECT(lev); + BRIDGEIF_READ_UNPROTECT(lev); + return; + } + } + } + /* not found, allocate new entry from free */ + for (i = 0; i < fdb->max_fdb_entries; i++) { + bridgeif_dfdb_entry_t *e = &fdb->fdb[i]; + if (!e->used || !e->ts) { + BRIDGEIF_WRITE_PROTECT(lev); + /* check again when protected */ + if (!e->used || !e->ts) { + LWIP_DEBUGF(BRIDGEIF_FDB_DEBUG, ("br: create src %02x:%02x:%02x:%02x:%02x:%02x (from %d) @ idx %d\n", + src_addr->addr[0], src_addr->addr[1], src_addr->addr[2], src_addr->addr[3], src_addr->addr[4], src_addr->addr[5], + port_idx, i)); + memcpy(&e->addr, src_addr, sizeof(struct eth_addr)); + e->ts = BR_FDB_TIMEOUT_SEC; + e->port = port_idx; + e->used = 1; + BRIDGEIF_WRITE_UNPROTECT(lev); + BRIDGEIF_READ_UNPROTECT(lev); + return; + } + BRIDGEIF_WRITE_UNPROTECT(lev); + } + } + BRIDGEIF_READ_UNPROTECT(lev); + /* not found, no free entry -> flood */ +} + +/** + * @ingroup bridgeif_fdb + * Walk our list of auto-learnt fdb entries and return a port to forward or BR_FLOOD if unknown + */ +bridgeif_portmask_t +bridgeif_fdb_get_dst_ports(void *fdb_ptr, struct eth_addr *dst_addr) +{ + int i; + bridgeif_dfdb_t *fdb = (bridgeif_dfdb_t *)fdb_ptr; + BRIDGEIF_DECL_PROTECT(lev); + BRIDGEIF_READ_PROTECT(lev); + for (i = 0; i < fdb->max_fdb_entries; i++) { + bridgeif_dfdb_entry_t *e = &fdb->fdb[i]; + if (e->used && e->ts) { + if (!memcmp(&e->addr, dst_addr, sizeof(struct eth_addr))) { + bridgeif_portmask_t ret = (bridgeif_portmask_t)(1 << e->port); + BRIDGEIF_READ_UNPROTECT(lev); + return ret; + } + } + } + BRIDGEIF_READ_UNPROTECT(lev); + return BR_FLOOD; +} + +/** + * @ingroup bridgeif_fdb + * Aging implementation of our simple fdb + */ +static void +bridgeif_fdb_age_one_second(void *fdb_ptr) +{ + int i; + bridgeif_dfdb_t *fdb; + BRIDGEIF_DECL_PROTECT(lev); + + fdb = (bridgeif_dfdb_t *)fdb_ptr; + BRIDGEIF_READ_PROTECT(lev); + + for (i = 0; i < fdb->max_fdb_entries; i++) { + bridgeif_dfdb_entry_t *e = &fdb->fdb[i]; + if (e->used && e->ts) { + BRIDGEIF_WRITE_PROTECT(lev); + /* check again when protected */ + if (e->used && e->ts) { + if (--e->ts == 0) { + e->used = 0; + } + } + BRIDGEIF_WRITE_UNPROTECT(lev); + } + } + BRIDGEIF_READ_UNPROTECT(lev); +} + +/** Timer callback for fdb aging, called once per second */ +static void +bridgeif_age_tmr(void *arg) +{ + bridgeif_dfdb_t *fdb = (bridgeif_dfdb_t *)arg; + + LWIP_ASSERT("invalid arg", arg != NULL); + + bridgeif_fdb_age_one_second(fdb); + sys_timeout(BRIDGEIF_AGE_TIMER_MS, bridgeif_age_tmr, arg); +} + +/** + * @ingroup bridgeif_fdb + * Init our simple fdb list + */ +void * +bridgeif_fdb_init(u16_t max_fdb_entries) +{ + bridgeif_dfdb_t *fdb; + size_t alloc_len_sizet = sizeof(bridgeif_dfdb_t) + (max_fdb_entries * sizeof(bridgeif_dfdb_entry_t)); + mem_size_t alloc_len = (mem_size_t)alloc_len_sizet; + LWIP_ASSERT("alloc_len == alloc_len_sizet", alloc_len == alloc_len_sizet); + LWIP_DEBUGF(BRIDGEIF_DEBUG, ("bridgeif_fdb_init: allocating %d bytes for private FDB data\n", (int)alloc_len)); + fdb = (bridgeif_dfdb_t *)mem_calloc(1, alloc_len); + if (fdb == NULL) { + return NULL; + } + fdb->max_fdb_entries = max_fdb_entries; + fdb->fdb = (bridgeif_dfdb_entry_t *)(fdb + 1); + + sys_timeout(BRIDGEIF_AGE_TIMER_MS, bridgeif_age_tmr, fdb); + + return fdb; +} diff --git a/Middlewares/Third_Party/LwIP/src/netif/ethernet.c b/Middlewares/Third_Party/LwIP/src/netif/ethernet.c new file mode 100644 index 0000000..dd171e2 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ethernet.c @@ -0,0 +1,321 @@ +/** + * @file + * Ethernet common functions + * + * @defgroup ethernet Ethernet + * @ingroup callbackstyle_api + */ + +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * Copyright (c) 2003-2004 Leon Woestenberg + * Copyright (c) 2003-2004 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "lwip/opt.h" + +#if LWIP_ARP || LWIP_ETHERNET + +#include "netif/ethernet.h" +#include "lwip/def.h" +#include "lwip/stats.h" +#include "lwip/etharp.h" +#include "lwip/ip.h" +#include "lwip/snmp.h" + +#include + +#include "netif/ppp/ppp_opts.h" +#if PPPOE_SUPPORT +#include "netif/ppp/pppoe.h" +#endif /* PPPOE_SUPPORT */ + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +const struct eth_addr ethbroadcast = {{0xff, 0xff, 0xff, 0xff, 0xff, 0xff}}; +const struct eth_addr ethzero = {{0, 0, 0, 0, 0, 0}}; + +/** + * @ingroup lwip_nosys + * Process received ethernet frames. Using this function instead of directly + * calling ip_input and passing ARP frames through etharp in ethernetif_input, + * the ARP cache is protected from concurrent access.\n + * Don't call directly, pass to netif_add() and call netif->input(). + * + * @param p the received packet, p->payload pointing to the ethernet header + * @param netif the network interface on which the packet was received + * + * @see LWIP_HOOK_UNKNOWN_ETH_PROTOCOL + * @see ETHARP_SUPPORT_VLAN + * @see LWIP_HOOK_VLAN_CHECK + */ +err_t +ethernet_input(struct pbuf *p, struct netif *netif) +{ + struct eth_hdr *ethhdr; + u16_t type; +#if LWIP_ARP || ETHARP_SUPPORT_VLAN || LWIP_IPV6 + u16_t next_hdr_offset = SIZEOF_ETH_HDR; +#endif /* LWIP_ARP || ETHARP_SUPPORT_VLAN */ + + LWIP_ASSERT_CORE_LOCKED(); + + if (p->len <= SIZEOF_ETH_HDR) { + /* a packet with only an ethernet header (or less) is not valid for us */ + ETHARP_STATS_INC(etharp.proterr); + ETHARP_STATS_INC(etharp.drop); + MIB2_STATS_NETIF_INC(netif, ifinerrors); + goto free_and_return; + } + + if (p->if_idx == NETIF_NO_INDEX) { + p->if_idx = netif_get_index(netif); + } + + /* points to packet payload, which starts with an Ethernet header */ + ethhdr = (struct eth_hdr *)p->payload; + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, + ("ethernet_input: dest:%"X8_F":%"X8_F":%"X8_F":%"X8_F":%"X8_F":%"X8_F", src:%"X8_F":%"X8_F":%"X8_F":%"X8_F":%"X8_F":%"X8_F", type:%"X16_F"\n", + (unsigned char)ethhdr->dest.addr[0], (unsigned char)ethhdr->dest.addr[1], (unsigned char)ethhdr->dest.addr[2], + (unsigned char)ethhdr->dest.addr[3], (unsigned char)ethhdr->dest.addr[4], (unsigned char)ethhdr->dest.addr[5], + (unsigned char)ethhdr->src.addr[0], (unsigned char)ethhdr->src.addr[1], (unsigned char)ethhdr->src.addr[2], + (unsigned char)ethhdr->src.addr[3], (unsigned char)ethhdr->src.addr[4], (unsigned char)ethhdr->src.addr[5], + lwip_htons(ethhdr->type))); + + type = ethhdr->type; +#if ETHARP_SUPPORT_VLAN + if (type == PP_HTONS(ETHTYPE_VLAN)) { + struct eth_vlan_hdr *vlan = (struct eth_vlan_hdr *)(((char *)ethhdr) + SIZEOF_ETH_HDR); + next_hdr_offset = SIZEOF_ETH_HDR + SIZEOF_VLAN_HDR; + if (p->len <= SIZEOF_ETH_HDR + SIZEOF_VLAN_HDR) { + /* a packet with only an ethernet/vlan header (or less) is not valid for us */ + ETHARP_STATS_INC(etharp.proterr); + ETHARP_STATS_INC(etharp.drop); + MIB2_STATS_NETIF_INC(netif, ifinerrors); + goto free_and_return; + } +#if defined(LWIP_HOOK_VLAN_CHECK) || defined(ETHARP_VLAN_CHECK) || defined(ETHARP_VLAN_CHECK_FN) /* if not, allow all VLANs */ +#ifdef LWIP_HOOK_VLAN_CHECK + if (!LWIP_HOOK_VLAN_CHECK(netif, ethhdr, vlan)) { +#elif defined(ETHARP_VLAN_CHECK_FN) + if (!ETHARP_VLAN_CHECK_FN(ethhdr, vlan)) { +#elif defined(ETHARP_VLAN_CHECK) + if (VLAN_ID(vlan) != ETHARP_VLAN_CHECK) { +#endif + /* silently ignore this packet: not for our VLAN */ + pbuf_free(p); + return ERR_OK; + } +#endif /* defined(LWIP_HOOK_VLAN_CHECK) || defined(ETHARP_VLAN_CHECK) || defined(ETHARP_VLAN_CHECK_FN) */ + type = vlan->tpid; + } +#endif /* ETHARP_SUPPORT_VLAN */ + +#if LWIP_ARP_FILTER_NETIF + netif = LWIP_ARP_FILTER_NETIF_FN(p, netif, lwip_htons(type)); +#endif /* LWIP_ARP_FILTER_NETIF*/ + + if (ethhdr->dest.addr[0] & 1) { + /* this might be a multicast or broadcast packet */ + if (ethhdr->dest.addr[0] == LL_IP4_MULTICAST_ADDR_0) { +#if LWIP_IPV4 + if ((ethhdr->dest.addr[1] == LL_IP4_MULTICAST_ADDR_1) && + (ethhdr->dest.addr[2] == LL_IP4_MULTICAST_ADDR_2)) { + /* mark the pbuf as link-layer multicast */ + p->flags |= PBUF_FLAG_LLMCAST; + } +#endif /* LWIP_IPV4 */ + } +#if LWIP_IPV6 + else if ((ethhdr->dest.addr[0] == LL_IP6_MULTICAST_ADDR_0) && + (ethhdr->dest.addr[1] == LL_IP6_MULTICAST_ADDR_1)) { + /* mark the pbuf as link-layer multicast */ + p->flags |= PBUF_FLAG_LLMCAST; + } +#endif /* LWIP_IPV6 */ + else if (eth_addr_cmp(ðhdr->dest, ðbroadcast)) { + /* mark the pbuf as link-layer broadcast */ + p->flags |= PBUF_FLAG_LLBCAST; + } + } + + switch (type) { +#if LWIP_IPV4 && LWIP_ARP + /* IP packet? */ + case PP_HTONS(ETHTYPE_IP): + if (!(netif->flags & NETIF_FLAG_ETHARP)) { + goto free_and_return; + } + /* skip Ethernet header (min. size checked above) */ + if (pbuf_remove_header(p, next_hdr_offset)) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, + ("ethernet_input: IPv4 packet dropped, too short (%"U16_F"/%"U16_F")\n", + p->tot_len, next_hdr_offset)); + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("Can't move over header in packet")); + goto free_and_return; + } else { + /* pass to IP layer */ + ip4_input(p, netif); + } + break; + + case PP_HTONS(ETHTYPE_ARP): + if (!(netif->flags & NETIF_FLAG_ETHARP)) { + goto free_and_return; + } + /* skip Ethernet header (min. size checked above) */ + if (pbuf_remove_header(p, next_hdr_offset)) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, + ("ethernet_input: ARP response packet dropped, too short (%"U16_F"/%"U16_F")\n", + p->tot_len, next_hdr_offset)); + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("Can't move over header in packet")); + ETHARP_STATS_INC(etharp.lenerr); + ETHARP_STATS_INC(etharp.drop); + goto free_and_return; + } else { + /* pass p to ARP module */ + etharp_input(p, netif); + } + break; +#endif /* LWIP_IPV4 && LWIP_ARP */ +#if PPPOE_SUPPORT + case PP_HTONS(ETHTYPE_PPPOEDISC): /* PPP Over Ethernet Discovery Stage */ + pppoe_disc_input(netif, p); + break; + + case PP_HTONS(ETHTYPE_PPPOE): /* PPP Over Ethernet Session Stage */ + pppoe_data_input(netif, p); + break; +#endif /* PPPOE_SUPPORT */ + +#if LWIP_IPV6 + case PP_HTONS(ETHTYPE_IPV6): /* IPv6 */ + /* skip Ethernet header */ + if ((p->len < next_hdr_offset) || pbuf_remove_header(p, next_hdr_offset)) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, + ("ethernet_input: IPv6 packet dropped, too short (%"U16_F"/%"U16_F")\n", + p->tot_len, next_hdr_offset)); + goto free_and_return; + } else { + /* pass to IPv6 layer */ + ip6_input(p, netif); + } + break; +#endif /* LWIP_IPV6 */ + + default: +#ifdef LWIP_HOOK_UNKNOWN_ETH_PROTOCOL + if (LWIP_HOOK_UNKNOWN_ETH_PROTOCOL(p, netif) == ERR_OK) { + break; + } +#endif + ETHARP_STATS_INC(etharp.proterr); + ETHARP_STATS_INC(etharp.drop); + MIB2_STATS_NETIF_INC(netif, ifinunknownprotos); + goto free_and_return; + } + + /* This means the pbuf is freed or consumed, + so the caller doesn't have to free it again */ + return ERR_OK; + +free_and_return: + pbuf_free(p); + return ERR_OK; +} + +/** + * @ingroup ethernet + * Send an ethernet packet on the network using netif->linkoutput(). + * The ethernet header is filled in before sending. + * + * @see LWIP_HOOK_VLAN_SET + * + * @param netif the lwIP network interface on which to send the packet + * @param p the packet to send. pbuf layer must be @ref PBUF_LINK. + * @param src the source MAC address to be copied into the ethernet header + * @param dst the destination MAC address to be copied into the ethernet header + * @param eth_type ethernet type (@ref lwip_ieee_eth_type) + * @return ERR_OK if the packet was sent, any other err_t on failure + */ +err_t +ethernet_output(struct netif * netif, struct pbuf * p, + const struct eth_addr * src, const struct eth_addr * dst, + u16_t eth_type) { + struct eth_hdr *ethhdr; + u16_t eth_type_be = lwip_htons(eth_type); + +#if ETHARP_SUPPORT_VLAN && defined(LWIP_HOOK_VLAN_SET) + s32_t vlan_prio_vid = LWIP_HOOK_VLAN_SET(netif, p, src, dst, eth_type); + if (vlan_prio_vid >= 0) { + struct eth_vlan_hdr *vlanhdr; + + LWIP_ASSERT("prio_vid must be <= 0xFFFF", vlan_prio_vid <= 0xFFFF); + + if (pbuf_add_header(p, SIZEOF_ETH_HDR + SIZEOF_VLAN_HDR) != 0) { + goto pbuf_header_failed; + } + vlanhdr = (struct eth_vlan_hdr *)(((u8_t *)p->payload) + SIZEOF_ETH_HDR); + vlanhdr->tpid = eth_type_be; + vlanhdr->prio_vid = lwip_htons((u16_t)vlan_prio_vid); + + eth_type_be = PP_HTONS(ETHTYPE_VLAN); + } else +#endif /* ETHARP_SUPPORT_VLAN && defined(LWIP_HOOK_VLAN_SET) */ + { + if (pbuf_add_header(p, SIZEOF_ETH_HDR) != 0) { + goto pbuf_header_failed; + } + } + + LWIP_ASSERT_CORE_LOCKED(); + + ethhdr = (struct eth_hdr *)p->payload; + ethhdr->type = eth_type_be; + SMEMCPY(ðhdr->dest, dst, ETH_HWADDR_LEN); + SMEMCPY(ðhdr->src, src, ETH_HWADDR_LEN); + + LWIP_ASSERT("netif->hwaddr_len must be 6 for ethernet_output!", + (netif->hwaddr_len == ETH_HWADDR_LEN)); + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, + ("ethernet_output: sending packet %p\n", (void *)p)); + + /* send the packet */ + return netif->linkoutput(netif, p); + +pbuf_header_failed: + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, + ("ethernet_output: could not allocate room for header.\n")); + LINK_STATS_INC(link.lenerr); + return ERR_BUF; +} + +#endif /* LWIP_ARP || LWIP_ETHERNET */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/lowpan6.c b/Middlewares/Third_Party/LwIP/src/netif/lowpan6.c new file mode 100644 index 0000000..7f0d276 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/lowpan6.c @@ -0,0 +1,920 @@ +/** + * @file + * + * 6LowPAN output for IPv6. Uses ND tables for link-layer addressing. Fragments packets to 6LowPAN units. + * + * This implementation aims to conform to IEEE 802.15.4(-2015), RFC 4944 and RFC 6282. + * @todo: RFC 6775. + */ + +/* + * Copyright (c) 2015 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +/** + * @defgroup sixlowpan 6LoWPAN (RFC4944) + * @ingroup netifs + * 6LowPAN netif implementation + */ + +#include "netif/lowpan6.h" + +#if LWIP_IPV6 + +#include "lwip/ip.h" +#include "lwip/pbuf.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/nd6.h" +#include "lwip/mem.h" +#include "lwip/udp.h" +#include "lwip/tcpip.h" +#include "lwip/snmp.h" +#include "netif/ieee802154.h" + +#include + +#if LWIP_6LOWPAN_802154_HW_CRC +#define LWIP_6LOWPAN_DO_CALC_CRC(buf, len) 0 +#else +#define LWIP_6LOWPAN_DO_CALC_CRC(buf, len) LWIP_6LOWPAN_CALC_CRC(buf, len) +#endif + +/** This is a helper struct for reassembly of fragments + * (IEEE 802.15.4 limits to 127 bytes) + */ +struct lowpan6_reass_helper { + struct lowpan6_reass_helper *next_packet; + struct pbuf *reass; + struct pbuf *frags; + u8_t timer; + struct lowpan6_link_addr sender_addr; + u16_t datagram_size; + u16_t datagram_tag; +}; + +/** This struct keeps track of per-netif state */ +struct lowpan6_ieee802154_data { + /** fragment reassembly list */ + struct lowpan6_reass_helper *reass_list; +#if LWIP_6LOWPAN_NUM_CONTEXTS > 0 + /** address context for compression */ + ip6_addr_t lowpan6_context[LWIP_6LOWPAN_NUM_CONTEXTS]; +#endif + /** Datagram Tag for fragmentation */ + u16_t tx_datagram_tag; + /** local PAN ID for IEEE 802.15.4 header */ + u16_t ieee_802154_pan_id; + /** Sequence Number for IEEE 802.15.4 transmission */ + u8_t tx_frame_seq_num; +}; + +/* Maximum frame size is 127 bytes minus CRC size */ +#define LOWPAN6_MAX_PAYLOAD (127 - 2) + +/** Currently, this state is global, since there's only one 6LoWPAN netif */ +static struct lowpan6_ieee802154_data lowpan6_data; + +#if LWIP_6LOWPAN_NUM_CONTEXTS > 0 +#define LWIP_6LOWPAN_CONTEXTS(netif) lowpan6_data.lowpan6_context +#else +#define LWIP_6LOWPAN_CONTEXTS(netif) NULL +#endif + +static const struct lowpan6_link_addr ieee_802154_broadcast = {2, {0xff, 0xff}}; + +#if LWIP_6LOWPAN_INFER_SHORT_ADDRESS +static struct lowpan6_link_addr short_mac_addr = {2, {0, 0}}; +#endif /* LWIP_6LOWPAN_INFER_SHORT_ADDRESS */ + +/* IEEE 802.15.4 specific functions: */ + +/** Write the IEEE 802.15.4 header that encapsulates the 6LoWPAN frame. + * Src and dst PAN IDs are filled with the ID set by @ref lowpan6_set_pan_id. + * + * Since the length is variable: + * @returns the header length + */ +static u8_t +lowpan6_write_iee802154_header(struct ieee_802154_hdr *hdr, const struct lowpan6_link_addr *src, + const struct lowpan6_link_addr *dst) +{ + u8_t ieee_header_len; + u8_t *buffer; + u8_t i; + u16_t fc; + + fc = IEEE_802154_FC_FT_DATA; /* send data packet (2003 frame version) */ + fc |= IEEE_802154_FC_PANID_COMPR; /* set PAN ID compression, for now src and dst PANs are equal */ + if (dst != &ieee_802154_broadcast) { + fc |= IEEE_802154_FC_ACK_REQ; /* data packet, no broadcast: ack required. */ + } + if (dst->addr_len == 2) { + fc |= IEEE_802154_FC_DST_ADDR_MODE_SHORT; + } else { + LWIP_ASSERT("invalid dst address length", dst->addr_len == 8); + fc |= IEEE_802154_FC_DST_ADDR_MODE_EXT; + } + if (src->addr_len == 2) { + fc |= IEEE_802154_FC_SRC_ADDR_MODE_SHORT; + } else { + LWIP_ASSERT("invalid src address length", src->addr_len == 8); + fc |= IEEE_802154_FC_SRC_ADDR_MODE_EXT; + } + hdr->frame_control = fc; + hdr->sequence_number = lowpan6_data.tx_frame_seq_num++; + hdr->destination_pan_id = lowpan6_data.ieee_802154_pan_id; /* pan id */ + + buffer = (u8_t *)hdr; + ieee_header_len = 5; + i = dst->addr_len; + /* reverse memcpy of dst addr */ + while (i-- > 0) { + buffer[ieee_header_len++] = dst->addr[i]; + } + /* Source PAN ID skipped due to PAN ID Compression */ + i = src->addr_len; + /* reverse memcpy of src addr */ + while (i-- > 0) { + buffer[ieee_header_len++] = src->addr[i]; + } + return ieee_header_len; +} + +/** Parse the IEEE 802.15.4 header from a pbuf. + * If successful, the header is hidden from the pbuf. + * + * PAN IDs and seuqence number are not checked + * + * @param p input pbuf, p->payload pointing at the IEEE 802.15.4 header + * @param src pointer to source address filled from the header + * @param dest pointer to destination address filled from the header + * @returns ERR_OK if successful + */ +static err_t +lowpan6_parse_iee802154_header(struct pbuf *p, struct lowpan6_link_addr *src, + struct lowpan6_link_addr *dest) +{ + u8_t *puc; + s8_t i; + u16_t frame_control, addr_mode; + u16_t datagram_offset; + + /* Parse IEEE 802.15.4 header */ + puc = (u8_t *)p->payload; + frame_control = puc[0] | (puc[1] << 8); + datagram_offset = 2; + if (frame_control & IEEE_802154_FC_SEQNO_SUPPR) { + if (IEEE_802154_FC_FRAME_VERSION_GET(frame_control) <= 1) { + /* sequence number suppressed, this is not valid for versions 0/1 */ + return ERR_VAL; + } + } else { + datagram_offset++; + } + datagram_offset += 2; /* Skip destination PAN ID */ + addr_mode = frame_control & IEEE_802154_FC_DST_ADDR_MODE_MASK; + if (addr_mode == IEEE_802154_FC_DST_ADDR_MODE_EXT) { + /* extended address (64 bit) */ + dest->addr_len = 8; + /* reverse memcpy: */ + for (i = 0; i < 8; i++) { + dest->addr[i] = puc[datagram_offset + 7 - i]; + } + datagram_offset += 8; + } else if (addr_mode == IEEE_802154_FC_DST_ADDR_MODE_SHORT) { + /* short address (16 bit) */ + dest->addr_len = 2; + /* reverse memcpy: */ + dest->addr[0] = puc[datagram_offset + 1]; + dest->addr[1] = puc[datagram_offset]; + datagram_offset += 2; + } else { + /* unsupported address mode (do we need "no address"?) */ + return ERR_VAL; + } + + if (!(frame_control & IEEE_802154_FC_PANID_COMPR)) { + /* No PAN ID compression, skip source PAN ID */ + datagram_offset += 2; + } + + addr_mode = frame_control & IEEE_802154_FC_SRC_ADDR_MODE_MASK; + if (addr_mode == IEEE_802154_FC_SRC_ADDR_MODE_EXT) { + /* extended address (64 bit) */ + src->addr_len = 8; + /* reverse memcpy: */ + for (i = 0; i < 8; i++) { + src->addr[i] = puc[datagram_offset + 7 - i]; + } + datagram_offset += 8; + } else if (addr_mode == IEEE_802154_FC_DST_ADDR_MODE_SHORT) { + /* short address (16 bit) */ + src->addr_len = 2; + src->addr[0] = puc[datagram_offset + 1]; + src->addr[1] = puc[datagram_offset]; + datagram_offset += 2; + } else { + /* unsupported address mode (do we need "no address"?) */ + return ERR_VAL; + } + + /* hide IEEE802.15.4 header. */ + if (pbuf_remove_header(p, datagram_offset)) { + return ERR_VAL; + } + return ERR_OK; +} + +/** Calculate the 16-bit CRC as required by IEEE 802.15.4 */ +u16_t +lowpan6_calc_crc(const void* buf, u16_t len) +{ +#define CCITT_POLY_16 0x8408U + u16_t i; + u8_t b; + u16_t crc = 0; + const u8_t* p = (const u8_t*)buf; + + for (i = 0; i < len; i++) { + u8_t data = *p; + for (b = 0U; b < 8U; b++) { + if (((data ^ crc) & 1) != 0) { + crc = (u16_t)((crc >> 1) ^ CCITT_POLY_16); + } else { + crc = (u16_t)(crc >> 1); + } + data = (u8_t)(data >> 1); + } + p++; + } + return crc; +} + +/* Fragmentation specific functions: */ + +static void +free_reass_datagram(struct lowpan6_reass_helper *lrh) +{ + if (lrh->reass) { + pbuf_free(lrh->reass); + } + if (lrh->frags) { + pbuf_free(lrh->frags); + } + mem_free(lrh); +} + +/** + * Removes a datagram from the reassembly queue. + **/ +static void +dequeue_datagram(struct lowpan6_reass_helper *lrh, struct lowpan6_reass_helper *prev) +{ + if (lowpan6_data.reass_list == lrh) { + lowpan6_data.reass_list = lowpan6_data.reass_list->next_packet; + } else { + /* it wasn't the first, so it must have a valid 'prev' */ + LWIP_ASSERT("sanity check linked list", prev != NULL); + prev->next_packet = lrh->next_packet; + } +} + +/** + * Periodic timer for 6LowPAN functions: + * + * - Remove incomplete/old packets + */ +void +lowpan6_tmr(void) +{ + struct lowpan6_reass_helper *lrh, *lrh_next, *lrh_prev = NULL; + + lrh = lowpan6_data.reass_list; + while (lrh != NULL) { + lrh_next = lrh->next_packet; + if ((--lrh->timer) == 0) { + dequeue_datagram(lrh, lrh_prev); + free_reass_datagram(lrh); + } else { + lrh_prev = lrh; + } + lrh = lrh_next; + } +} + +/* + * Encapsulates data into IEEE 802.15.4 frames. + * Fragments an IPv6 datagram into 6LowPAN units, which fit into IEEE 802.15.4 frames. + * If configured, will compress IPv6 and or UDP headers. + * */ +static err_t +lowpan6_frag(struct netif *netif, struct pbuf *p, const struct lowpan6_link_addr *src, const struct lowpan6_link_addr *dst) +{ + struct pbuf *p_frag; + u16_t frag_len, remaining_len, max_data_len; + u8_t *buffer; + u8_t ieee_header_len; + u8_t lowpan6_header_len; + u8_t hidden_header_len; + u16_t crc; + u16_t datagram_offset; + err_t err = ERR_IF; + + LWIP_ASSERT("lowpan6_frag: netif->linkoutput not set", netif->linkoutput != NULL); + + /* We'll use a dedicated pbuf for building 6LowPAN fragments. */ + p_frag = pbuf_alloc(PBUF_RAW, 127, PBUF_RAM); + if (p_frag == NULL) { + MIB2_STATS_NETIF_INC(netif, ifoutdiscards); + return ERR_MEM; + } + LWIP_ASSERT("this needs a pbuf in one piece", p_frag->len == p_frag->tot_len); + + /* Write IEEE 802.15.4 header. */ + buffer = (u8_t *)p_frag->payload; + ieee_header_len = lowpan6_write_iee802154_header((struct ieee_802154_hdr *)buffer, src, dst); + LWIP_ASSERT("ieee_header_len < p_frag->len", ieee_header_len < p_frag->len); + +#if LWIP_6LOWPAN_IPHC + /* Perform 6LowPAN IPv6 header compression according to RFC 6282 */ + /* do the header compression (this does NOT copy any non-compressed data) */ + err = lowpan6_compress_headers(netif, (u8_t *)p->payload, p->len, + &buffer[ieee_header_len], p_frag->len - ieee_header_len, &lowpan6_header_len, + &hidden_header_len, LWIP_6LOWPAN_CONTEXTS(netif), src, dst); + if (err != ERR_OK) { + MIB2_STATS_NETIF_INC(netif, ifoutdiscards); + pbuf_free(p_frag); + return err; + } + pbuf_remove_header(p, hidden_header_len); + +#else /* LWIP_6LOWPAN_IPHC */ + /* Send uncompressed IPv6 header with appropriate dispatch byte. */ + lowpan6_header_len = 1; + buffer[ieee_header_len] = 0x41; /* IPv6 dispatch */ +#endif /* LWIP_6LOWPAN_IPHC */ + + /* Calculate remaining packet length */ + remaining_len = p->tot_len; + + if (remaining_len > 0x7FF) { + MIB2_STATS_NETIF_INC(netif, ifoutdiscards); + /* datagram_size must fit into 11 bit */ + pbuf_free(p_frag); + return ERR_VAL; + } + + /* Fragment, or 1 packet? */ + max_data_len = LOWPAN6_MAX_PAYLOAD - ieee_header_len - lowpan6_header_len; + if (remaining_len > max_data_len) { + u16_t data_len; + /* We must move the 6LowPAN header to make room for the FRAG header. */ + memmove(&buffer[ieee_header_len + 4], &buffer[ieee_header_len], lowpan6_header_len); + + /* Now we need to fragment the packet. FRAG1 header first */ + buffer[ieee_header_len] = 0xc0 | (((p->tot_len + hidden_header_len) >> 8) & 0x7); + buffer[ieee_header_len + 1] = (p->tot_len + hidden_header_len) & 0xff; + + lowpan6_data.tx_datagram_tag++; + buffer[ieee_header_len + 2] = (lowpan6_data.tx_datagram_tag >> 8) & 0xff; + buffer[ieee_header_len + 3] = lowpan6_data.tx_datagram_tag & 0xff; + + /* Fragment follows. */ + data_len = (max_data_len - 4) & 0xf8; + frag_len = data_len + lowpan6_header_len; + + pbuf_copy_partial(p, buffer + ieee_header_len + lowpan6_header_len + 4, frag_len - lowpan6_header_len, 0); + remaining_len -= frag_len - lowpan6_header_len; + /* datagram offset holds the offset before compression */ + datagram_offset = frag_len - lowpan6_header_len + hidden_header_len; + LWIP_ASSERT("datagram offset must be a multiple of 8", (datagram_offset & 7) == 0); + + /* Calculate frame length */ + p_frag->len = p_frag->tot_len = ieee_header_len + 4 + frag_len + 2; /* add 2 bytes for crc*/ + + /* 2 bytes CRC */ + crc = LWIP_6LOWPAN_DO_CALC_CRC(p_frag->payload, p_frag->len - 2); + pbuf_take_at(p_frag, &crc, 2, p_frag->len - 2); + + /* send the packet */ + MIB2_STATS_NETIF_ADD(netif, ifoutoctets, p_frag->tot_len); + LWIP_DEBUGF(LWIP_LOWPAN6_DEBUG | LWIP_DBG_TRACE, ("lowpan6_send: sending packet %p\n", (void *)p)); + err = netif->linkoutput(netif, p_frag); + + while ((remaining_len > 0) && (err == ERR_OK)) { + struct ieee_802154_hdr *hdr = (struct ieee_802154_hdr *)buffer; + /* new frame, new seq num for ACK */ + hdr->sequence_number = lowpan6_data.tx_frame_seq_num++; + + buffer[ieee_header_len] |= 0x20; /* Change FRAG1 to FRAGN */ + + LWIP_ASSERT("datagram offset must be a multiple of 8", (datagram_offset & 7) == 0); + buffer[ieee_header_len + 4] = (u8_t)(datagram_offset >> 3); /* datagram offset in FRAGN header (datagram_offset is max. 11 bit) */ + + frag_len = (127 - ieee_header_len - 5 - 2) & 0xf8; + if (frag_len > remaining_len) { + frag_len = remaining_len; + } + + pbuf_copy_partial(p, buffer + ieee_header_len + 5, frag_len, p->tot_len - remaining_len); + remaining_len -= frag_len; + datagram_offset += frag_len; + + /* Calculate frame length */ + p_frag->len = p_frag->tot_len = frag_len + 5 + ieee_header_len + 2; + + /* 2 bytes CRC */ + crc = LWIP_6LOWPAN_DO_CALC_CRC(p_frag->payload, p_frag->len - 2); + pbuf_take_at(p_frag, &crc, 2, p_frag->len - 2); + + /* send the packet */ + MIB2_STATS_NETIF_ADD(netif, ifoutoctets, p_frag->tot_len); + LWIP_DEBUGF(LWIP_LOWPAN6_DEBUG | LWIP_DBG_TRACE, ("lowpan6_send: sending packet %p\n", (void *)p)); + err = netif->linkoutput(netif, p_frag); + } + } else { + /* It fits in one frame. */ + frag_len = remaining_len; + + /* Copy IPv6 packet */ + pbuf_copy_partial(p, buffer + ieee_header_len + lowpan6_header_len, frag_len, 0); + remaining_len = 0; + + /* Calculate frame length */ + p_frag->len = p_frag->tot_len = frag_len + lowpan6_header_len + ieee_header_len + 2; + LWIP_ASSERT("", p_frag->len <= 127); + + /* 2 bytes CRC */ + crc = LWIP_6LOWPAN_DO_CALC_CRC(p_frag->payload, p_frag->len - 2); + pbuf_take_at(p_frag, &crc, 2, p_frag->len - 2); + + /* send the packet */ + MIB2_STATS_NETIF_ADD(netif, ifoutoctets, p_frag->tot_len); + LWIP_DEBUGF(LWIP_LOWPAN6_DEBUG | LWIP_DBG_TRACE, ("lowpan6_send: sending packet %p\n", (void *)p)); + err = netif->linkoutput(netif, p_frag); + } + + pbuf_free(p_frag); + + return err; +} + +/** + * @ingroup sixlowpan + * Set context + */ +err_t +lowpan6_set_context(u8_t idx, const ip6_addr_t *context) +{ +#if LWIP_6LOWPAN_NUM_CONTEXTS > 0 + if (idx >= LWIP_6LOWPAN_NUM_CONTEXTS) { + return ERR_ARG; + } + + IP6_ADDR_ZONECHECK(context); + + ip6_addr_set(&lowpan6_data.lowpan6_context[idx], context); + + return ERR_OK; +#else + LWIP_UNUSED_ARG(idx); + LWIP_UNUSED_ARG(context); + return ERR_ARG; +#endif +} + +#if LWIP_6LOWPAN_INFER_SHORT_ADDRESS +/** + * @ingroup sixlowpan + * Set short address + */ +err_t +lowpan6_set_short_addr(u8_t addr_high, u8_t addr_low) +{ + short_mac_addr.addr[0] = addr_high; + short_mac_addr.addr[1] = addr_low; + + return ERR_OK; +} +#endif /* LWIP_6LOWPAN_INFER_SHORT_ADDRESS */ + +/* Create IEEE 802.15.4 address from netif address */ +static err_t +lowpan6_hwaddr_to_addr(struct netif *netif, struct lowpan6_link_addr *addr) +{ + addr->addr_len = 8; + if (netif->hwaddr_len == 8) { + LWIP_ERROR("NETIF_MAX_HWADDR_LEN >= 8 required", sizeof(netif->hwaddr) >= 8, return ERR_VAL;); + SMEMCPY(addr->addr, netif->hwaddr, 8); + } else if (netif->hwaddr_len == 6) { + /* Copy from MAC-48 */ + SMEMCPY(addr->addr, netif->hwaddr, 3); + addr->addr[3] = addr->addr[4] = 0xff; + SMEMCPY(&addr->addr[5], &netif->hwaddr[3], 3); + } else { + /* Invalid address length, don't know how to convert this */ + return ERR_VAL; + } + return ERR_OK; +} + +/** + * @ingroup sixlowpan + * Resolve and fill-in IEEE 802.15.4 address header for outgoing IPv6 packet. + * + * Perform Header Compression and fragment if necessary. + * + * @param netif The lwIP network interface which the IP packet will be sent on. + * @param q The pbuf(s) containing the IP packet to be sent. + * @param ip6addr The IP address of the packet destination. + * + * @return err_t + */ +err_t +lowpan6_output(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr) +{ + err_t result; + const u8_t *hwaddr; + struct lowpan6_link_addr src, dest; +#if LWIP_6LOWPAN_INFER_SHORT_ADDRESS + ip6_addr_t ip6_src; + struct ip6_hdr *ip6_hdr; +#endif /* LWIP_6LOWPAN_INFER_SHORT_ADDRESS */ + +#if LWIP_6LOWPAN_INFER_SHORT_ADDRESS + /* Check if we can compress source address (use aligned copy) */ + ip6_hdr = (struct ip6_hdr *)q->payload; + ip6_addr_copy_from_packed(ip6_src, ip6_hdr->src); + ip6_addr_assign_zone(&ip6_src, IP6_UNICAST, netif); + if (lowpan6_get_address_mode(&ip6_src, &short_mac_addr) == 3) { + src.addr_len = 2; + src.addr[0] = short_mac_addr.addr[0]; + src.addr[1] = short_mac_addr.addr[1]; + } else +#endif /* LWIP_6LOWPAN_INFER_SHORT_ADDRESS */ + { + result = lowpan6_hwaddr_to_addr(netif, &src); + if (result != ERR_OK) { + MIB2_STATS_NETIF_INC(netif, ifoutdiscards); + return result; + } + } + + /* multicast destination IP address? */ + if (ip6_addr_ismulticast(ip6addr)) { + MIB2_STATS_NETIF_INC(netif, ifoutnucastpkts); + /* We need to send to the broadcast address.*/ + return lowpan6_frag(netif, q, &src, &ieee_802154_broadcast); + } + + /* We have a unicast destination IP address */ + /* @todo anycast? */ + +#if LWIP_6LOWPAN_INFER_SHORT_ADDRESS + if (src.addr_len == 2) { + /* If source address was compressable to short_mac_addr, and dest has same subnet and + * is also compressable to 2-bytes, assume we can infer dest as a short address too. */ + dest.addr_len = 2; + dest.addr[0] = ((u8_t *)q->payload)[38]; + dest.addr[1] = ((u8_t *)q->payload)[39]; + if ((src.addr_len == 2) && (ip6_addr_netcmp_zoneless(&ip6_hdr->src, &ip6_hdr->dest)) && + (lowpan6_get_address_mode(ip6addr, &dest) == 3)) { + MIB2_STATS_NETIF_INC(netif, ifoutucastpkts); + return lowpan6_frag(netif, q, &src, &dest); + } + } +#endif /* LWIP_6LOWPAN_INFER_SHORT_ADDRESS */ + + /* Ask ND6 what to do with the packet. */ + result = nd6_get_next_hop_addr_or_queue(netif, q, ip6addr, &hwaddr); + if (result != ERR_OK) { + MIB2_STATS_NETIF_INC(netif, ifoutdiscards); + return result; + } + + /* If no hardware address is returned, nd6 has queued the packet for later. */ + if (hwaddr == NULL) { + return ERR_OK; + } + + /* Send out the packet using the returned hardware address. */ + dest.addr_len = netif->hwaddr_len; + /* XXX: Inferring the length of the source address from the destination address + * is not correct for IEEE 802.15.4, but currently we don't get this information + * from the neighbor cache */ + SMEMCPY(dest.addr, hwaddr, netif->hwaddr_len); + MIB2_STATS_NETIF_INC(netif, ifoutucastpkts); + return lowpan6_frag(netif, q, &src, &dest); +} +/** + * @ingroup sixlowpan + * NETIF input function: don't free the input pbuf when returning != ERR_OK! + */ +err_t +lowpan6_input(struct pbuf *p, struct netif *netif) +{ + u8_t *puc, b; + s8_t i; + struct lowpan6_link_addr src, dest; + u16_t datagram_size = 0; + u16_t datagram_offset, datagram_tag; + struct lowpan6_reass_helper *lrh, *lrh_next, *lrh_prev = NULL; + + if (p == NULL) { + return ERR_OK; + } + + MIB2_STATS_NETIF_ADD(netif, ifinoctets, p->tot_len); + + if (p->len != p->tot_len) { + /* for now, this needs a pbuf in one piece */ + goto lowpan6_input_discard; + } + + if (lowpan6_parse_iee802154_header(p, &src, &dest) != ERR_OK) { + goto lowpan6_input_discard; + } + + /* Check dispatch. */ + puc = (u8_t *)p->payload; + + b = *puc; + if ((b & 0xf8) == 0xc0) { + /* FRAG1 dispatch. add this packet to reassembly list. */ + datagram_size = ((u16_t)(puc[0] & 0x07) << 8) | (u16_t)puc[1]; + datagram_tag = ((u16_t)puc[2] << 8) | (u16_t)puc[3]; + + /* check for duplicate */ + lrh = lowpan6_data.reass_list; + while (lrh != NULL) { + uint8_t discard = 0; + lrh_next = lrh->next_packet; + if ((lrh->sender_addr.addr_len == src.addr_len) && + (memcmp(lrh->sender_addr.addr, src.addr, src.addr_len) == 0)) { + /* address match with packet in reassembly. */ + if ((datagram_tag == lrh->datagram_tag) && (datagram_size == lrh->datagram_size)) { + /* duplicate fragment. */ + goto lowpan6_input_discard; + } else { + /* We are receiving the start of a new datagram. Discard old one (incomplete). */ + discard = 1; + } + } + if (discard) { + dequeue_datagram(lrh, lrh_prev); + free_reass_datagram(lrh); + } else { + lrh_prev = lrh; + } + /* Check next datagram in queue. */ + lrh = lrh_next; + } + + pbuf_remove_header(p, 4); /* hide frag1 dispatch */ + + lrh = (struct lowpan6_reass_helper *) mem_malloc(sizeof(struct lowpan6_reass_helper)); + if (lrh == NULL) { + goto lowpan6_input_discard; + } + + lrh->sender_addr.addr_len = src.addr_len; + for (i = 0; i < src.addr_len; i++) { + lrh->sender_addr.addr[i] = src.addr[i]; + } + lrh->datagram_size = datagram_size; + lrh->datagram_tag = datagram_tag; + lrh->frags = NULL; + if (*(u8_t *)p->payload == 0x41) { + /* This is a complete IPv6 packet, just skip dispatch byte. */ + pbuf_remove_header(p, 1); /* hide dispatch byte. */ + lrh->reass = p; + } else if ((*(u8_t *)p->payload & 0xe0 ) == 0x60) { + lrh->reass = lowpan6_decompress(p, datagram_size, LWIP_6LOWPAN_CONTEXTS(netif), &src, &dest); + if (lrh->reass == NULL) { + /* decompression failed */ + mem_free(lrh); + goto lowpan6_input_discard; + } + } + /* TODO: handle the case where we already have FRAGN received */ + lrh->next_packet = lowpan6_data.reass_list; + lrh->timer = 2; + lowpan6_data.reass_list = lrh; + + return ERR_OK; + } else if ((b & 0xf8) == 0xe0) { + /* FRAGN dispatch, find packet being reassembled. */ + datagram_size = ((u16_t)(puc[0] & 0x07) << 8) | (u16_t)puc[1]; + datagram_tag = ((u16_t)puc[2] << 8) | (u16_t)puc[3]; + datagram_offset = (u16_t)puc[4] << 3; + pbuf_remove_header(p, 4); /* hide frag1 dispatch but keep datagram offset for reassembly */ + + for (lrh = lowpan6_data.reass_list; lrh != NULL; lrh_prev = lrh, lrh = lrh->next_packet) { + if ((lrh->sender_addr.addr_len == src.addr_len) && + (memcmp(lrh->sender_addr.addr, src.addr, src.addr_len) == 0) && + (datagram_tag == lrh->datagram_tag) && + (datagram_size == lrh->datagram_size)) { + break; + } + } + if (lrh == NULL) { + /* rogue fragment */ + goto lowpan6_input_discard; + } + /* Insert new pbuf into list of fragments. Each fragment is a pbuf, + this only works for unchained pbufs. */ + LWIP_ASSERT("p->next == NULL", p->next == NULL); + if (lrh->reass != NULL) { + /* FRAG1 already received, check this offset against first len */ + if (datagram_offset < lrh->reass->len) { + /* fragment overlap, discard old fragments */ + dequeue_datagram(lrh, lrh_prev); + free_reass_datagram(lrh); + goto lowpan6_input_discard; + } + } + if (lrh->frags == NULL) { + /* first FRAGN */ + lrh->frags = p; + } else { + /* find the correct place to insert */ + struct pbuf *q, *last; + u16_t new_frag_len = p->len - 1; /* p->len includes datagram_offset byte */ + for (q = lrh->frags, last = NULL; q != NULL; last = q, q = q->next) { + u16_t q_datagram_offset = ((u8_t *)q->payload)[0] << 3; + u16_t q_frag_len = q->len - 1; + if (datagram_offset < q_datagram_offset) { + if (datagram_offset + new_frag_len > q_datagram_offset) { + /* overlap, discard old fragments */ + dequeue_datagram(lrh, lrh_prev); + free_reass_datagram(lrh); + goto lowpan6_input_discard; + } + /* insert here */ + break; + } else if (datagram_offset == q_datagram_offset) { + if (q_frag_len != new_frag_len) { + /* fragment mismatch, discard old fragments */ + dequeue_datagram(lrh, lrh_prev); + free_reass_datagram(lrh); + goto lowpan6_input_discard; + } + /* duplicate, ignore */ + pbuf_free(p); + return ERR_OK; + } + } + /* insert fragment */ + if (last == NULL) { + lrh->frags = p; + } else { + last->next = p; + p->next = q; + } + } + /* check if all fragments were received */ + if (lrh->reass) { + u16_t offset = lrh->reass->len; + struct pbuf *q; + for (q = lrh->frags; q != NULL; q = q->next) { + u16_t q_datagram_offset = ((u8_t *)q->payload)[0] << 3; + if (q_datagram_offset != offset) { + /* not complete, wait for more fragments */ + return ERR_OK; + } + offset += q->len - 1; + } + if (offset == datagram_size) { + /* all fragments received, combine pbufs */ + u16_t datagram_left = datagram_size - lrh->reass->len; + for (q = lrh->frags; q != NULL; q = q->next) { + /* hide datagram_offset byte now */ + pbuf_remove_header(q, 1); + q->tot_len = datagram_left; + datagram_left -= q->len; + } + LWIP_ASSERT("datagram_left == 0", datagram_left == 0); + q = lrh->reass; + q->tot_len = datagram_size; + q->next = lrh->frags; + lrh->frags = NULL; + lrh->reass = NULL; + dequeue_datagram(lrh, lrh_prev); + mem_free(lrh); + + /* @todo: distinguish unicast/multicast */ + MIB2_STATS_NETIF_INC(netif, ifinucastpkts); + return ip6_input(q, netif); + } + } + /* pbuf enqueued, waiting for more fragments */ + return ERR_OK; + } else { + if (b == 0x41) { + /* This is a complete IPv6 packet, just skip dispatch byte. */ + pbuf_remove_header(p, 1); /* hide dispatch byte. */ + } else if ((b & 0xe0 ) == 0x60) { + /* IPv6 headers are compressed using IPHC. */ + p = lowpan6_decompress(p, datagram_size, LWIP_6LOWPAN_CONTEXTS(netif), &src, &dest); + if (p == NULL) { + MIB2_STATS_NETIF_INC(netif, ifindiscards); + return ERR_OK; + } + } else { + goto lowpan6_input_discard; + } + + /* @todo: distinguish unicast/multicast */ + MIB2_STATS_NETIF_INC(netif, ifinucastpkts); + + return ip6_input(p, netif); + } +lowpan6_input_discard: + MIB2_STATS_NETIF_INC(netif, ifindiscards); + pbuf_free(p); + /* always return ERR_OK here to prevent the caller freeing the pbuf */ + return ERR_OK; +} + +/** + * @ingroup sixlowpan + */ +err_t +lowpan6_if_init(struct netif *netif) +{ + netif->name[0] = 'L'; + netif->name[1] = '6'; + netif->output_ip6 = lowpan6_output; + + MIB2_INIT_NETIF(netif, snmp_ifType_other, 0); + + /* maximum transfer unit */ + netif->mtu = 1280; + + /* broadcast capability */ + netif->flags = NETIF_FLAG_BROADCAST /* | NETIF_FLAG_LOWPAN6 */; + + return ERR_OK; +} + +/** + * @ingroup sixlowpan + * Set PAN ID + */ +err_t +lowpan6_set_pan_id(u16_t pan_id) +{ + lowpan6_data.ieee_802154_pan_id = pan_id; + + return ERR_OK; +} + +#if !NO_SYS +/** + * @ingroup sixlowpan + * Pass a received packet to tcpip_thread for input processing + * + * @param p the received packet, p->payload pointing to the + * IEEE 802.15.4 header. + * @param inp the network interface on which the packet was received + */ +err_t +tcpip_6lowpan_input(struct pbuf *p, struct netif *inp) +{ + return tcpip_inpkt(p, inp, lowpan6_input); +} +#endif /* !NO_SYS */ + +#endif /* LWIP_IPV6 */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/lowpan6_ble.c b/Middlewares/Third_Party/LwIP/src/netif/lowpan6_ble.c new file mode 100644 index 0000000..d89816d --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/lowpan6_ble.c @@ -0,0 +1,447 @@ +/** + * @file + * 6LowPAN over BLE output for IPv6 (RFC7668). +*/ + +/* + * Copyright (c) 2017 Benjamin Aigner + * Copyright (c) 2015 Inico Technologies Ltd. , Author: Ivan Delamer + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * Author: Benjamin Aigner + * + * Based on the original 6lowpan implementation of lwIP ( @see 6lowpan.c) + */ + + +/** + * @defgroup rfc7668if 6LoWPAN over BLE (RFC7668) + * @ingroup netifs + * This file implements a RFC7668 implementation for 6LoWPAN over + * Bluetooth Low Energy. The specification is very similar to 6LoWPAN, + * so most of the code is re-used. + * Compared to 6LoWPAN, much functionality is already implemented in + * lower BLE layers (fragmenting, session management,...). + * + * Usage: + * - add this netif + * - don't add IPv4 addresses (no IPv4 support in RFC7668), pass 'NULL','NULL','NULL' + * - use the BLE to EUI64 conversation util to create an IPv6 link-local address from the BLE MAC (@ref ble_addr_to_eui64) + * - input function: @ref rfc7668_input + * - set the link output function, which transmits output data to an established L2CAP channel + * - If data arrives (HCI event "L2CAP_DATA_PACKET"): + * - allocate a @ref PBUF_RAW buffer + * - let the pbuf struct point to the incoming data or copy it to the buffer + * - call netif->input + * + * @todo: + * - further testing + * - support compression contexts + * - support multiple addresses + * - support multicast + * - support neighbor discovery + */ + + +#include "netif/lowpan6_ble.h" + +#if LWIP_IPV6 + +#include "lwip/ip.h" +#include "lwip/pbuf.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/nd6.h" +#include "lwip/mem.h" +#include "lwip/udp.h" +#include "lwip/tcpip.h" +#include "lwip/snmp.h" + +#include + +#if LWIP_6LOWPAN_NUM_CONTEXTS > 0 +/** context memory, containing IPv6 addresses */ +static ip6_addr_t rfc7668_context[LWIP_6LOWPAN_NUM_CONTEXTS]; +#else +#define rfc7668_context NULL +#endif + +static struct lowpan6_link_addr rfc7668_local_addr; +static struct lowpan6_link_addr rfc7668_peer_addr; + +/** + * @ingroup rfc7668if + * convert BT address to EUI64 addr + * + * This method converts a Bluetooth MAC address to an EUI64 address, + * which is used within IPv6 communication + * + * @param dst IPv6 destination space + * @param src BLE MAC address source + * @param public_addr If the LWIP_RFC7668_LINUX_WORKAROUND_PUBLIC_ADDRESS + * option is set, bit 0x02 will be set if param=0 (no public addr); cleared otherwise + * + * @see LWIP_RFC7668_LINUX_WORKAROUND_PUBLIC_ADDRESS + */ +void +ble_addr_to_eui64(uint8_t *dst, const uint8_t *src, int public_addr) +{ + /* according to RFC7668 ch 3.2.2. */ + memcpy(dst, src, 3); + dst[3] = 0xFF; + dst[4] = 0xFE; + memcpy(&dst[5], &src[3], 3); +#if LWIP_RFC7668_LINUX_WORKAROUND_PUBLIC_ADDRESS + if(public_addr) { + dst[0] &= ~0x02; + } else { + dst[0] |= 0x02; + } +#else + LWIP_UNUSED_ARG(public_addr); +#endif +} + +/** + * @ingroup rfc7668if + * convert EUI64 address to Bluetooth MAC addr + * + * This method converts an EUI64 address to a Bluetooth MAC address, + * + * @param dst BLE MAC address destination + * @param src IPv6 source + * + */ +void +eui64_to_ble_addr(uint8_t *dst, const uint8_t *src) +{ + /* according to RFC7668 ch 3.2.2. */ + memcpy(dst,src,3); + memcpy(&dst[3],&src[5],3); +} + +/** Set an address used for stateful compression. + * This expects an address of 6 or 8 bytes. + */ +static err_t +rfc7668_set_addr(struct lowpan6_link_addr *addr, const u8_t *in_addr, size_t in_addr_len, int is_mac_48, int is_public_addr) +{ + if ((in_addr == NULL) || (addr == NULL)) { + return ERR_VAL; + } + if (is_mac_48) { + if (in_addr_len != 6) { + return ERR_VAL; + } + addr->addr_len = 8; + ble_addr_to_eui64(addr->addr, in_addr, is_public_addr); + } else { + if (in_addr_len != 8) { + return ERR_VAL; + } + addr->addr_len = 8; + memcpy(addr->addr, in_addr, 8); + } + return ERR_OK; +} + + +/** Set the local address used for stateful compression. + * This expects an address of 8 bytes. + */ +err_t +rfc7668_set_local_addr_eui64(struct netif *netif, const u8_t *local_addr, size_t local_addr_len) +{ + /* netif not used for now, the address is stored globally... */ + LWIP_UNUSED_ARG(netif); + return rfc7668_set_addr(&rfc7668_local_addr, local_addr, local_addr_len, 0, 0); +} + +/** Set the local address used for stateful compression. + * This expects an address of 6 bytes. + */ +err_t +rfc7668_set_local_addr_mac48(struct netif *netif, const u8_t *local_addr, size_t local_addr_len, int is_public_addr) +{ + /* netif not used for now, the address is stored globally... */ + LWIP_UNUSED_ARG(netif); + return rfc7668_set_addr(&rfc7668_local_addr, local_addr, local_addr_len, 1, is_public_addr); +} + +/** Set the peer address used for stateful compression. + * This expects an address of 8 bytes. + */ +err_t +rfc7668_set_peer_addr_eui64(struct netif *netif, const u8_t *peer_addr, size_t peer_addr_len) +{ + /* netif not used for now, the address is stored globally... */ + LWIP_UNUSED_ARG(netif); + return rfc7668_set_addr(&rfc7668_peer_addr, peer_addr, peer_addr_len, 0, 0); +} + +/** Set the peer address used for stateful compression. + * This expects an address of 6 bytes. + */ +err_t +rfc7668_set_peer_addr_mac48(struct netif *netif, const u8_t *peer_addr, size_t peer_addr_len, int is_public_addr) +{ + /* netif not used for now, the address is stored globally... */ + LWIP_UNUSED_ARG(netif); + return rfc7668_set_addr(&rfc7668_peer_addr, peer_addr, peer_addr_len, 1, is_public_addr); +} + +/** Encapsulate IPv6 frames for BLE transmission + * + * This method implements the IPv6 header compression: + * *) According to RFC6282 + * *) See Figure 2, contains base format of bit positions + * *) Fragmentation not necessary (done at L2CAP layer of BLE) + * @note Currently the pbuf allocation uses 256 bytes. If longer packets are used (possible due to MTU=1480Bytes), increase it here! + * + * @param p Pbuf struct, containing the payload data + * @param netif Output network interface. Should be of RFC7668 type + * + * @return Same as netif->output. + */ +static err_t +rfc7668_compress(struct netif *netif, struct pbuf *p) +{ + struct pbuf *p_frag; + u16_t remaining_len; + u8_t *buffer; + u8_t lowpan6_header_len; + u8_t hidden_header_len; + err_t err; + + LWIP_ASSERT("lowpan6_frag: netif->linkoutput not set", netif->linkoutput != NULL); + +#if LWIP_6LOWPAN_IPHC + + /* We'll use a dedicated pbuf for building BLE fragments. + * We'll over-allocate it by the bytes saved for header compression. + */ + p_frag = pbuf_alloc(PBUF_RAW, p->tot_len, PBUF_RAM); + if (p_frag == NULL) { + MIB2_STATS_NETIF_INC(netif, ifoutdiscards); + return ERR_MEM; + } + LWIP_ASSERT("this needs a pbuf in one piece", p_frag->len == p_frag->tot_len); + + /* Write IP6 header (with IPHC). */ + buffer = (u8_t*)p_frag->payload; + + err = lowpan6_compress_headers(netif, (u8_t *)p->payload, p->len, buffer, p_frag->len, + &lowpan6_header_len, &hidden_header_len, rfc7668_context, &rfc7668_local_addr, &rfc7668_peer_addr); + if (err != ERR_OK) { + MIB2_STATS_NETIF_INC(netif, ifoutdiscards); + pbuf_free(p_frag); + return err; + } + pbuf_remove_header(p, hidden_header_len); + + /* Calculate remaining packet length */ + remaining_len = p->tot_len; + + /* Copy IPv6 packet */ + pbuf_copy_partial(p, buffer + lowpan6_header_len, remaining_len, 0); + + /* Calculate frame length */ + p_frag->len = p_frag->tot_len = remaining_len + lowpan6_header_len; + + /* send the packet */ + MIB2_STATS_NETIF_ADD(netif, ifoutoctets, p_frag->tot_len); + LWIP_DEBUGF(LWIP_LOWPAN6_DEBUG|LWIP_DBG_TRACE, ("rfc7668_output: sending packet %p\n", (void *)p)); + err = netif->linkoutput(netif, p_frag); + + pbuf_free(p_frag); + + return err; +#else /* LWIP_6LOWPAN_IPHC */ + /* 6LoWPAN over BLE requires IPHC! */ + return ERR_IF; +#endif/* LWIP_6LOWPAN_IPHC */ +} + +/** + * @ingroup rfc7668if + * Set context id IPv6 address + * + * Store one IPv6 address to a given context id. + * + * @param idx Context id + * @param context IPv6 addr for this context + * + * @return ERR_OK (if everything is fine), ERR_ARG (if the context id is out of range), ERR_VAL (if contexts disabled) + */ +err_t +rfc7668_set_context(u8_t idx, const ip6_addr_t *context) +{ +#if LWIP_6LOWPAN_NUM_CONTEXTS > 0 + /* check if the ID is possible */ + if (idx >= LWIP_6LOWPAN_NUM_CONTEXTS) { + return ERR_ARG; + } + /* copy IPv6 address to context storage */ + ip6_addr_set(&rfc7668_context[idx], context); + return ERR_OK; +#else + LWIP_UNUSED_ARG(idx); + LWIP_UNUSED_ARG(context); + return ERR_VAL; +#endif +} + +/** + * @ingroup rfc7668if + * Compress outgoing IPv6 packet and pass it on to netif->linkoutput + * + * @param netif The lwIP network interface which the IP packet will be sent on. + * @param q The pbuf(s) containing the IP packet to be sent. + * @param ip6addr The IP address of the packet destination. + * + * @return See rfc7668_compress + */ +err_t +rfc7668_output(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr) +{ + /* dst ip6addr is not used here, we only have one peer */ + LWIP_UNUSED_ARG(ip6addr); + + return rfc7668_compress(netif, q); +} + +/** + * @ingroup rfc7668if + * Process a received raw payload from an L2CAP channel + * + * @param p the received packet, p->payload pointing to the + * IPv6 header (maybe compressed) + * @param netif the network interface on which the packet was received + * + * @return ERR_OK if everything was fine + */ +err_t +rfc7668_input(struct pbuf * p, struct netif *netif) +{ + u8_t * puc; + + MIB2_STATS_NETIF_ADD(netif, ifinoctets, p->tot_len); + + /* Load first header byte */ + puc = (u8_t*)p->payload; + + /* no IP header compression */ + if (*puc == 0x41) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("Completed packet, removing dispatch: 0x%2x \n", *puc)); + /* This is a complete IPv6 packet, just skip header byte. */ + pbuf_remove_header(p, 1); + /* IPHC header compression */ + } else if ((*puc & 0xe0 )== 0x60) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("Completed packet, decompress dispatch: 0x%2x \n", *puc)); + /* IPv6 headers are compressed using IPHC. */ + p = lowpan6_decompress(p, 0, rfc7668_context, &rfc7668_peer_addr, &rfc7668_local_addr); + /* if no pbuf is returned, handle as discarded packet */ + if (p == NULL) { + MIB2_STATS_NETIF_INC(netif, ifindiscards); + return ERR_OK; + } + /* invalid header byte, discard */ + } else { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("Completed packet, discarding: 0x%2x \n", *puc)); + MIB2_STATS_NETIF_INC(netif, ifindiscards); + pbuf_free(p); + return ERR_OK; + } + /* @todo: distinguish unicast/multicast */ + MIB2_STATS_NETIF_INC(netif, ifinucastpkts); + +#if LWIP_RFC7668_IP_UNCOMPRESSED_DEBUG + { + u16_t i; + LWIP_DEBUGF(LWIP_RFC7668_IP_UNCOMPRESSED_DEBUG, ("IPv6 payload:\n")); + for (i = 0; i < p->len; i++) { + if ((i%4)==0) { + LWIP_DEBUGF(LWIP_RFC7668_IP_UNCOMPRESSED_DEBUG, ("\n")); + } + LWIP_DEBUGF(LWIP_RFC7668_IP_UNCOMPRESSED_DEBUG, ("%2X ", *((uint8_t *)p->payload+i))); + } + LWIP_DEBUGF(LWIP_RFC7668_IP_UNCOMPRESSED_DEBUG, ("\np->len: %d\n", p->len)); + } +#endif + /* pass data to ip6_input */ + return ip6_input(p, netif); +} + +/** + * @ingroup rfc7668if + * Initialize the netif + * + * No flags are used (broadcast not possible, not ethernet, ...) + * The shortname for this netif is "BT" + * + * @param netif the network interface to be initialized as RFC7668 netif + * + * @return ERR_OK if everything went fine + */ +err_t +rfc7668_if_init(struct netif *netif) +{ + netif->name[0] = 'b'; + netif->name[1] = 't'; + /* local function as IPv6 output */ + netif->output_ip6 = rfc7668_output; + + MIB2_INIT_NETIF(netif, snmp_ifType_other, 0); + + /* maximum transfer unit, set according to RFC7668 ch2.4 */ + netif->mtu = 1280; + + /* no flags set (no broadcast, ethernet,...)*/ + netif->flags = 0; + + /* everything fine */ + return ERR_OK; +} + +#if !NO_SYS +/** + * Pass a received packet to tcpip_thread for input processing + * + * @param p the received packet, p->payload pointing to the + * IEEE 802.15.4 header. + * @param inp the network interface on which the packet was received + * + * @return see @ref tcpip_inpkt, same return values + */ +err_t +tcpip_rfc7668_input(struct pbuf *p, struct netif *inp) +{ + /* send data to upper layer, return the result */ + return tcpip_inpkt(p, inp, rfc7668_input); +} +#endif /* !NO_SYS */ + +#endif /* LWIP_IPV6 */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/lowpan6_common.c b/Middlewares/Third_Party/LwIP/src/netif/lowpan6_common.c new file mode 100644 index 0000000..baea206 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/lowpan6_common.c @@ -0,0 +1,841 @@ +/** + * @file + * + * Common 6LowPAN routines for IPv6. Uses ND tables for link-layer addressing. Fragments packets to 6LowPAN units. + * + * This implementation aims to conform to IEEE 802.15.4(-2015), RFC 4944 and RFC 6282. + * @todo: RFC 6775. + */ + +/* + * Copyright (c) 2015 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +/** + * @defgroup sixlowpan 6LoWPAN (RFC4944) + * @ingroup netifs + * 6LowPAN netif implementation + */ + +#include "netif/lowpan6_common.h" + +#if LWIP_IPV6 + +#include "lwip/ip.h" +#include "lwip/pbuf.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/udp.h" + +#include + +/* Determine compression mode for unicast address. */ +s8_t +lowpan6_get_address_mode(const ip6_addr_t *ip6addr, const struct lowpan6_link_addr *mac_addr) +{ + if (mac_addr->addr_len == 2) { + if ((ip6addr->addr[2] == (u32_t)PP_HTONL(0x000000ff)) && + ((ip6addr->addr[3] & PP_HTONL(0xffff0000)) == PP_NTOHL(0xfe000000))) { + if ((ip6addr->addr[3] & PP_HTONL(0x0000ffff)) == lwip_ntohl((mac_addr->addr[0] << 8) | mac_addr->addr[1])) { + return 3; + } + } + } else if (mac_addr->addr_len == 8) { + if ((ip6addr->addr[2] == lwip_ntohl(((mac_addr->addr[0] ^ 2) << 24) | (mac_addr->addr[1] << 16) | mac_addr->addr[2] << 8 | mac_addr->addr[3])) && + (ip6addr->addr[3] == lwip_ntohl((mac_addr->addr[4] << 24) | (mac_addr->addr[5] << 16) | mac_addr->addr[6] << 8 | mac_addr->addr[7]))) { + return 3; + } + } + + if ((ip6addr->addr[2] == PP_HTONL(0x000000ffUL)) && + ((ip6addr->addr[3] & PP_HTONL(0xffff0000)) == PP_NTOHL(0xfe000000UL))) { + return 2; + } + + return 1; +} + +#if LWIP_6LOWPAN_IPHC + +/* Determine compression mode for multicast address. */ +static s8_t +lowpan6_get_address_mode_mc(const ip6_addr_t *ip6addr) +{ + if ((ip6addr->addr[0] == PP_HTONL(0xff020000)) && + (ip6addr->addr[1] == 0) && + (ip6addr->addr[2] == 0) && + ((ip6addr->addr[3] & PP_HTONL(0xffffff00)) == 0)) { + return 3; + } else if (((ip6addr->addr[0] & PP_HTONL(0xff00ffff)) == PP_HTONL(0xff000000)) && + (ip6addr->addr[1] == 0)) { + if ((ip6addr->addr[2] == 0) && + ((ip6addr->addr[3] & PP_HTONL(0xff000000)) == 0)) { + return 2; + } else if ((ip6addr->addr[2] & PP_HTONL(0xffffff00)) == 0) { + return 1; + } + } + + return 0; +} + +#if LWIP_6LOWPAN_NUM_CONTEXTS > 0 +static s8_t +lowpan6_context_lookup(const ip6_addr_t *lowpan6_contexts, const ip6_addr_t *ip6addr) +{ + s8_t i; + + for (i = 0; i < LWIP_6LOWPAN_NUM_CONTEXTS; i++) { + if (ip6_addr_netcmp(&lowpan6_contexts[i], ip6addr)) { + return i; + } + } + return -1; +} +#endif /* LWIP_6LOWPAN_NUM_CONTEXTS > 0 */ + +/* + * Compress IPv6 and/or UDP headers. + * */ +err_t +lowpan6_compress_headers(struct netif *netif, u8_t *inbuf, size_t inbuf_size, u8_t *outbuf, size_t outbuf_size, + u8_t *lowpan6_header_len_out, u8_t *hidden_header_len_out, ip6_addr_t *lowpan6_contexts, + const struct lowpan6_link_addr *src, const struct lowpan6_link_addr *dst) +{ + u8_t *buffer, *inptr; + u8_t lowpan6_header_len; + u8_t hidden_header_len = 0; + s8_t i; + struct ip6_hdr *ip6hdr; + ip_addr_t ip6src, ip6dst; + + LWIP_ASSERT("netif != NULL", netif != NULL); + LWIP_ASSERT("inbuf != NULL", inbuf != NULL); + LWIP_ASSERT("outbuf != NULL", outbuf != NULL); + LWIP_ASSERT("lowpan6_header_len_out != NULL", lowpan6_header_len_out != NULL); + LWIP_ASSERT("hidden_header_len_out != NULL", hidden_header_len_out != NULL); + + /* Perform 6LowPAN IPv6 header compression according to RFC 6282 */ + buffer = outbuf; + inptr = inbuf; + + if (inbuf_size < IP6_HLEN) { + /* input buffer too short */ + return ERR_VAL; + } + if (outbuf_size < IP6_HLEN) { + /* output buffer too short for worst case */ + return ERR_MEM; + } + + /* Point to ip6 header and align copies of src/dest addresses. */ + ip6hdr = (struct ip6_hdr *)inptr; + ip_addr_copy_from_ip6_packed(ip6dst, ip6hdr->dest); + ip6_addr_assign_zone(ip_2_ip6(&ip6dst), IP6_UNKNOWN, netif); + ip_addr_copy_from_ip6_packed(ip6src, ip6hdr->src); + ip6_addr_assign_zone(ip_2_ip6(&ip6src), IP6_UNKNOWN, netif); + + /* Basic length of 6LowPAN header, set dispatch and clear fields. */ + lowpan6_header_len = 2; + buffer[0] = 0x60; + buffer[1] = 0; + + /* Determine whether there will be a Context Identifier Extension byte or not. + * If so, set it already. */ +#if LWIP_6LOWPAN_NUM_CONTEXTS > 0 + buffer[2] = 0; + + i = lowpan6_context_lookup(lowpan6_contexts, ip_2_ip6(&ip6src)); + if (i >= 0) { + /* Stateful source address compression. */ + buffer[1] |= 0x40; + buffer[2] |= (i & 0x0f) << 4; + } + + i = lowpan6_context_lookup(lowpan6_contexts, ip_2_ip6(&ip6dst)); + if (i >= 0) { + /* Stateful destination address compression. */ + buffer[1] |= 0x04; + buffer[2] |= i & 0x0f; + } + + if (buffer[2] != 0x00) { + /* Context identifier extension byte is appended. */ + buffer[1] |= 0x80; + lowpan6_header_len++; + } +#else /* LWIP_6LOWPAN_NUM_CONTEXTS > 0 */ + LWIP_UNUSED_ARG(lowpan6_contexts); +#endif /* LWIP_6LOWPAN_NUM_CONTEXTS > 0 */ + + /* Determine TF field: Traffic Class, Flow Label */ + if (IP6H_FL(ip6hdr) == 0) { + /* Flow label is elided. */ + buffer[0] |= 0x10; + if (IP6H_TC(ip6hdr) == 0) { + /* Traffic class (ECN+DSCP) elided too. */ + buffer[0] |= 0x08; + } else { + /* Traffic class (ECN+DSCP) appended. */ + buffer[lowpan6_header_len++] = IP6H_TC(ip6hdr); + } + } else { + if (((IP6H_TC(ip6hdr) & 0x3f) == 0)) { + /* DSCP portion of Traffic Class is elided, ECN and FL are appended (3 bytes) */ + buffer[0] |= 0x08; + + buffer[lowpan6_header_len] = IP6H_TC(ip6hdr) & 0xc0; + buffer[lowpan6_header_len++] |= (IP6H_FL(ip6hdr) >> 16) & 0x0f; + buffer[lowpan6_header_len++] = (IP6H_FL(ip6hdr) >> 8) & 0xff; + buffer[lowpan6_header_len++] = IP6H_FL(ip6hdr) & 0xff; + } else { + /* Traffic class and flow label are appended (4 bytes) */ + buffer[lowpan6_header_len++] = IP6H_TC(ip6hdr); + buffer[lowpan6_header_len++] = (IP6H_FL(ip6hdr) >> 16) & 0x0f; + buffer[lowpan6_header_len++] = (IP6H_FL(ip6hdr) >> 8) & 0xff; + buffer[lowpan6_header_len++] = IP6H_FL(ip6hdr) & 0xff; + } + } + + /* Compress NH? + * Only if UDP for now. @todo support other NH compression. */ + if (IP6H_NEXTH(ip6hdr) == IP6_NEXTH_UDP) { + buffer[0] |= 0x04; + } else { + /* append nexth. */ + buffer[lowpan6_header_len++] = IP6H_NEXTH(ip6hdr); + } + + /* Compress hop limit? */ + if (IP6H_HOPLIM(ip6hdr) == 255) { + buffer[0] |= 0x03; + } else if (IP6H_HOPLIM(ip6hdr) == 64) { + buffer[0] |= 0x02; + } else if (IP6H_HOPLIM(ip6hdr) == 1) { + buffer[0] |= 0x01; + } else { + /* append hop limit */ + buffer[lowpan6_header_len++] = IP6H_HOPLIM(ip6hdr); + } + + /* Compress source address */ + if (((buffer[1] & 0x40) != 0) || + (ip6_addr_islinklocal(ip_2_ip6(&ip6src)))) { + /* Context-based or link-local source address compression. */ + i = lowpan6_get_address_mode(ip_2_ip6(&ip6src), src); + buffer[1] |= (i & 0x03) << 4; + if (i == 1) { + MEMCPY(buffer + lowpan6_header_len, inptr + 16, 8); + lowpan6_header_len += 8; + } else if (i == 2) { + MEMCPY(buffer + lowpan6_header_len, inptr + 22, 2); + lowpan6_header_len += 2; + } + } else if (ip6_addr_isany(ip_2_ip6(&ip6src))) { + /* Special case: mark SAC and leave SAM=0 */ + buffer[1] |= 0x40; + } else { + /* Append full address. */ + MEMCPY(buffer + lowpan6_header_len, inptr + 8, 16); + lowpan6_header_len += 16; + } + + /* Compress destination address */ + if (ip6_addr_ismulticast(ip_2_ip6(&ip6dst))) { + /* @todo support stateful multicast address compression */ + + buffer[1] |= 0x08; + + i = lowpan6_get_address_mode_mc(ip_2_ip6(&ip6dst)); + buffer[1] |= i & 0x03; + if (i == 0) { + MEMCPY(buffer + lowpan6_header_len, inptr + 24, 16); + lowpan6_header_len += 16; + } else if (i == 1) { + buffer[lowpan6_header_len++] = inptr[25]; + MEMCPY(buffer + lowpan6_header_len, inptr + 35, 5); + lowpan6_header_len += 5; + } else if (i == 2) { + buffer[lowpan6_header_len++] = inptr[25]; + MEMCPY(buffer + lowpan6_header_len, inptr + 37, 3); + lowpan6_header_len += 3; + } else if (i == 3) { + buffer[lowpan6_header_len++] = (inptr)[39]; + } + } else if (((buffer[1] & 0x04) != 0) || + (ip6_addr_islinklocal(ip_2_ip6(&ip6dst)))) { + /* Context-based or link-local destination address compression. */ + i = lowpan6_get_address_mode(ip_2_ip6(&ip6dst), dst); + buffer[1] |= i & 0x03; + if (i == 1) { + MEMCPY(buffer + lowpan6_header_len, inptr + 32, 8); + lowpan6_header_len += 8; + } else if (i == 2) { + MEMCPY(buffer + lowpan6_header_len, inptr + 38, 2); + lowpan6_header_len += 2; + } + } else { + /* Append full address. */ + MEMCPY(buffer + lowpan6_header_len, inptr + 24, 16); + lowpan6_header_len += 16; + } + + /* Move to payload. */ + inptr += IP6_HLEN; + hidden_header_len += IP6_HLEN; + +#if LWIP_UDP + /* Compress UDP header? */ + if (IP6H_NEXTH(ip6hdr) == IP6_NEXTH_UDP) { + /* @todo support optional checksum compression */ + + if (inbuf_size < IP6_HLEN + UDP_HLEN) { + /* input buffer too short */ + return ERR_VAL; + } + if (outbuf_size < (size_t)(hidden_header_len + 7)) { + /* output buffer too short for worst case */ + return ERR_MEM; + } + + buffer[lowpan6_header_len] = 0xf0; + + /* determine port compression mode. */ + if ((inptr[0] == 0xf0) && ((inptr[1] & 0xf0) == 0xb0) && + (inptr[2] == 0xf0) && ((inptr[3] & 0xf0) == 0xb0)) { + /* Compress source and dest ports. */ + buffer[lowpan6_header_len++] |= 0x03; + buffer[lowpan6_header_len++] = ((inptr[1] & 0x0f) << 4) | (inptr[3] & 0x0f); + } else if (inptr[0] == 0xf0) { + /* Compress source port. */ + buffer[lowpan6_header_len++] |= 0x02; + buffer[lowpan6_header_len++] = inptr[1]; + buffer[lowpan6_header_len++] = inptr[2]; + buffer[lowpan6_header_len++] = inptr[3]; + } else if (inptr[2] == 0xf0) { + /* Compress dest port. */ + buffer[lowpan6_header_len++] |= 0x01; + buffer[lowpan6_header_len++] = inptr[0]; + buffer[lowpan6_header_len++] = inptr[1]; + buffer[lowpan6_header_len++] = inptr[3]; + } else { + /* append full ports. */ + lowpan6_header_len++; + buffer[lowpan6_header_len++] = inptr[0]; + buffer[lowpan6_header_len++] = inptr[1]; + buffer[lowpan6_header_len++] = inptr[2]; + buffer[lowpan6_header_len++] = inptr[3]; + } + + /* elide length and copy checksum */ + buffer[lowpan6_header_len++] = inptr[6]; + buffer[lowpan6_header_len++] = inptr[7]; + + hidden_header_len += UDP_HLEN; + } +#endif /* LWIP_UDP */ + + *lowpan6_header_len_out = lowpan6_header_len; + *hidden_header_len_out = hidden_header_len; + + return ERR_OK; +} + +/** Decompress IPv6 and UDP headers compressed according to RFC 6282 + * + * @param lowpan6_buffer compressed headers, first byte is the dispatch byte + * @param lowpan6_bufsize size of lowpan6_buffer (may include data after headers) + * @param decomp_buffer buffer where the decompressed headers are stored + * @param decomp_bufsize size of decomp_buffer + * @param hdr_size_comp returns the size of the compressed headers (skip to get to data) + * @param hdr_size_decomp returns the size of the decompressed headers (IPv6 + UDP) + * @param datagram_size datagram size from fragments or 0 if unfragmented + * @param compressed_size compressed datagram size (for unfragmented rx) + * @param lowpan6_contexts context addresses + * @param src source address of the outer layer, used for address compression + * @param dest destination address of the outer layer, used for address compression + * @return ERR_OK if decompression succeeded, an error otherwise + */ +static err_t +lowpan6_decompress_hdr(u8_t *lowpan6_buffer, size_t lowpan6_bufsize, + u8_t *decomp_buffer, size_t decomp_bufsize, + u16_t *hdr_size_comp, u16_t *hdr_size_decomp, + u16_t datagram_size, u16_t compressed_size, + ip6_addr_t *lowpan6_contexts, + struct lowpan6_link_addr *src, struct lowpan6_link_addr *dest) +{ + u16_t lowpan6_offset; + struct ip6_hdr *ip6hdr; + s8_t i; + u32_t header_temp; + u16_t ip6_offset = IP6_HLEN; + + LWIP_ASSERT("lowpan6_buffer != NULL", lowpan6_buffer != NULL); + LWIP_ASSERT("decomp_buffer != NULL", decomp_buffer != NULL); + LWIP_ASSERT("src != NULL", src != NULL); + LWIP_ASSERT("dest != NULL", dest != NULL); + LWIP_ASSERT("hdr_size_comp != NULL", hdr_size_comp != NULL); + LWIP_ASSERT("dehdr_size_decompst != NULL", hdr_size_decomp != NULL); + + ip6hdr = (struct ip6_hdr *)decomp_buffer; + if (decomp_bufsize < IP6_HLEN) { + return ERR_MEM; + } + + /* output the full compressed packet, if set in @see lowpan6_opts.h */ +#if LWIP_LOWPAN6_IP_COMPRESSED_DEBUG + { + u16_t j; + LWIP_DEBUGF(LWIP_LOWPAN6_IP_COMPRESSED_DEBUG, ("lowpan6_decompress_hdr: IP6 payload (compressed): \n")); + for (j = 0; j < lowpan6_bufsize; j++) { + if ((j % 4) == 0) { + LWIP_DEBUGF(LWIP_LOWPAN6_IP_COMPRESSED_DEBUG, ("\n")); + } + LWIP_DEBUGF(LWIP_LOWPAN6_IP_COMPRESSED_DEBUG, ("%2X ", lowpan6_buffer[j])); + } + LWIP_DEBUGF(LWIP_LOWPAN6_IP_COMPRESSED_DEBUG, ("\np->len: %d", lowpan6_bufsize)); + } +#endif + + /* offset for inline IP headers (RFC 6282 ch3)*/ + lowpan6_offset = 2; + /* if CID is set (context identifier), the context byte + * follows immediately after the header, so other IPHC fields are @+3 */ + if (lowpan6_buffer[1] & 0x80) { + lowpan6_offset++; + } + + /* Set IPv6 version, traffic class and flow label. (RFC6282, ch 3.1.1.)*/ + if ((lowpan6_buffer[0] & 0x18) == 0x00) { + header_temp = ((lowpan6_buffer[lowpan6_offset+1] & 0x0f) << 16) | \ + (lowpan6_buffer[lowpan6_offset + 2] << 8) | lowpan6_buffer[lowpan6_offset+3]; + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("TF: 00, ECN: 0x%2x, Flowlabel+DSCP: 0x%8X\n", \ + lowpan6_buffer[lowpan6_offset],header_temp)); + IP6H_VTCFL_SET(ip6hdr, 6, lowpan6_buffer[lowpan6_offset], header_temp); + /* increase offset, processed 4 bytes here: + * TF=00: ECN + DSCP + 4-bit Pad + Flow Label (4 bytes)*/ + lowpan6_offset += 4; + } else if ((lowpan6_buffer[0] & 0x18) == 0x08) { + header_temp = ((lowpan6_buffer[lowpan6_offset] & 0x0f) << 16) | (lowpan6_buffer[lowpan6_offset + 1] << 8) | lowpan6_buffer[lowpan6_offset+2]; + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("TF: 01, ECN: 0x%2x, Flowlabel: 0x%2X, DSCP ignored\n", \ + lowpan6_buffer[lowpan6_offset] & 0xc0,header_temp)); + IP6H_VTCFL_SET(ip6hdr, 6, lowpan6_buffer[lowpan6_offset] & 0xc0, header_temp); + /* increase offset, processed 3 bytes here: + * TF=01: ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided.*/ + lowpan6_offset += 3; + } else if ((lowpan6_buffer[0] & 0x18) == 0x10) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("TF: 10, DCSP+ECN: 0x%2x, Flowlabel ignored\n", lowpan6_buffer[lowpan6_offset])); + IP6H_VTCFL_SET(ip6hdr, 6, lowpan6_buffer[lowpan6_offset],0); + /* increase offset, processed 1 byte here: + * ECN + DSCP (1 byte), Flow Label is elided.*/ + lowpan6_offset += 1; + } else if ((lowpan6_buffer[0] & 0x18) == 0x18) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("TF: 11, DCSP/ECN & Flowlabel ignored\n")); + /* don't increase offset, no bytes processed here */ + IP6H_VTCFL_SET(ip6hdr, 6, 0, 0); + } + + /* Set Next Header (NH) */ + if ((lowpan6_buffer[0] & 0x04) == 0x00) { + /* 0: full next header byte carried inline (increase offset)*/ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("NH: 0x%2X\n", lowpan6_buffer[lowpan6_offset+1])); + IP6H_NEXTH_SET(ip6hdr, lowpan6_buffer[lowpan6_offset++]); + } else { + /* 1: NH compression, LOWPAN_NHC (RFC6282, ch 4.1) */ + /* We should fill this later with NHC decoding */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("NH: skipped, later done with NHC\n")); + IP6H_NEXTH_SET(ip6hdr, 0); + } + + /* Set Hop Limit, either carried inline or 3 different hops (1,64,255) */ + if ((lowpan6_buffer[0] & 0x03) == 0x00) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("Hops: full value: %d\n", lowpan6_buffer[lowpan6_offset+1])); + IP6H_HOPLIM_SET(ip6hdr, lowpan6_buffer[lowpan6_offset++]); + } else if ((lowpan6_buffer[0] & 0x03) == 0x01) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("Hops: compressed: 1\n")); + IP6H_HOPLIM_SET(ip6hdr, 1); + } else if ((lowpan6_buffer[0] & 0x03) == 0x02) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("Hops: compressed: 64\n")); + IP6H_HOPLIM_SET(ip6hdr, 64); + } else if ((lowpan6_buffer[0] & 0x03) == 0x03) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("Hops: compressed: 255\n")); + IP6H_HOPLIM_SET(ip6hdr, 255); + } + + /* Source address decoding. */ + if ((lowpan6_buffer[1] & 0x40) == 0x00) { + /* Source address compression (SAC) = 0 -> stateless compression */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("SAC == 0, no context byte\n")); + /* Stateless compression */ + if ((lowpan6_buffer[1] & 0x30) == 0x00) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("SAM == 00, no src compression, fetching 128bits inline\n")); + /* copy full address, increase offset by 16 Bytes */ + MEMCPY(&ip6hdr->src.addr[0], lowpan6_buffer + lowpan6_offset, 16); + lowpan6_offset += 16; + } else if ((lowpan6_buffer[1] & 0x30) == 0x10) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("SAM == 01, src compression, 64bits inline\n")); + /* set 64 bits to link local */ + ip6hdr->src.addr[0] = PP_HTONL(0xfe800000UL); + ip6hdr->src.addr[1] = 0; + /* copy 8 Bytes, increase offset */ + MEMCPY(&ip6hdr->src.addr[2], lowpan6_buffer + lowpan6_offset, 8); + lowpan6_offset += 8; + } else if ((lowpan6_buffer[1] & 0x30) == 0x20) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("SAM == 10, src compression, 16bits inline\n")); + /* set 96 bits to link local */ + ip6hdr->src.addr[0] = PP_HTONL(0xfe800000UL); + ip6hdr->src.addr[1] = 0; + ip6hdr->src.addr[2] = PP_HTONL(0x000000ffUL); + /* extract remaining 16bits from inline bytes, increase offset */ + ip6hdr->src.addr[3] = lwip_htonl(0xfe000000UL | (lowpan6_buffer[lowpan6_offset] << 8) | + lowpan6_buffer[lowpan6_offset + 1]); + lowpan6_offset += 2; + } else if ((lowpan6_buffer[1] & 0x30) == 0x30) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("SAM == 11, src compression, 0bits inline, using other headers\n")); + /* no information avalaible, using other layers, see RFC6282 ch 3.2.2 */ + ip6hdr->src.addr[0] = PP_HTONL(0xfe800000UL); + ip6hdr->src.addr[1] = 0; + if (src->addr_len == 2) { + ip6hdr->src.addr[2] = PP_HTONL(0x000000ffUL); + ip6hdr->src.addr[3] = lwip_htonl(0xfe000000UL | (src->addr[0] << 8) | src->addr[1]); + } else if (src->addr_len == 8) { + ip6hdr->src.addr[2] = lwip_htonl(((src->addr[0] ^ 2) << 24) | (src->addr[1] << 16) | + (src->addr[2] << 8) | src->addr[3]); + ip6hdr->src.addr[3] = lwip_htonl((src->addr[4] << 24) | (src->addr[5] << 16) | + (src->addr[6] << 8) | src->addr[7]); + } else { + /* invalid source address length */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("Invalid source address length\n")); + return ERR_VAL; + } + } + } else { + /* Source address compression (SAC) = 1 -> stateful/context-based compression */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("SAC == 1, additional context byte\n")); + if ((lowpan6_buffer[1] & 0x30) == 0x00) { + /* SAM=00, address=> :: (ANY) */ + ip6hdr->src.addr[0] = 0; + ip6hdr->src.addr[1] = 0; + ip6hdr->src.addr[2] = 0; + ip6hdr->src.addr[3] = 0; + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("SAM == 00, context compression, ANY (::)\n")); + } else { + /* Set prefix from context info */ + if (lowpan6_buffer[1] & 0x80) { + i = (lowpan6_buffer[2] >> 4) & 0x0f; + } else { + i = 0; + } + if (i >= LWIP_6LOWPAN_NUM_CONTEXTS) { + /* Error */ + return ERR_VAL; + } +#if LWIP_6LOWPAN_NUM_CONTEXTS > 0 + ip6hdr->src.addr[0] = lowpan6_contexts[i].addr[0]; + ip6hdr->src.addr[1] = lowpan6_contexts[i].addr[1]; + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("SAM == xx, context compression found @%d: %8X, %8X\n", (int)i, ip6hdr->src.addr[0], ip6hdr->src.addr[1])); +#else + LWIP_UNUSED_ARG(lowpan6_contexts); +#endif + } + + /* determine further address bits */ + if ((lowpan6_buffer[1] & 0x30) == 0x10) { + /* SAM=01, load additional 64bits */ + MEMCPY(&ip6hdr->src.addr[2], lowpan6_buffer + lowpan6_offset, 8); + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("SAM == 01, context compression, 64bits inline\n")); + lowpan6_offset += 8; + } else if ((lowpan6_buffer[1] & 0x30) == 0x20) { + /* SAM=01, load additional 16bits */ + ip6hdr->src.addr[2] = PP_HTONL(0x000000ffUL); + ip6hdr->src.addr[3] = lwip_htonl(0xfe000000UL | (lowpan6_buffer[lowpan6_offset] << 8) | lowpan6_buffer[lowpan6_offset + 1]); + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("SAM == 10, context compression, 16bits inline\n")); + lowpan6_offset += 2; + } else if ((lowpan6_buffer[1] & 0x30) == 0x30) { + /* SAM=11, address is fully elided, load from other layers */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("SAM == 11, context compression, 0bits inline, using other headers\n")); + if (src->addr_len == 2) { + ip6hdr->src.addr[2] = PP_HTONL(0x000000ffUL); + ip6hdr->src.addr[3] = lwip_htonl(0xfe000000UL | (src->addr[0] << 8) | src->addr[1]); + } else if (src->addr_len == 8) { + ip6hdr->src.addr[2] = lwip_htonl(((src->addr[0] ^ 2) << 24) | (src->addr[1] << 16) | (src->addr[2] << 8) | src->addr[3]); + ip6hdr->src.addr[3] = lwip_htonl((src->addr[4] << 24) | (src->addr[5] << 16) | (src->addr[6] << 8) | src->addr[7]); + } else { + /* invalid source address length */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("Invalid source address length\n")); + return ERR_VAL; + } + } + } + + /* Destination address decoding. */ + if (lowpan6_buffer[1] & 0x08) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("M=1: multicast\n")); + /* Multicast destination */ + if (lowpan6_buffer[1] & 0x04) { + LWIP_DEBUGF(LWIP_DBG_ON,("DAC == 1, context multicast: unsupported!!!\n")); + /* @todo support stateful multicast addressing */ + return ERR_VAL; + } + + if ((lowpan6_buffer[1] & 0x03) == 0x00) { + /* DAM = 00, copy full address (128bits) */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("DAM == 00, no dst compression, fetching 128bits inline\n")); + MEMCPY(&ip6hdr->dest.addr[0], lowpan6_buffer + lowpan6_offset, 16); + lowpan6_offset += 16; + } else if ((lowpan6_buffer[1] & 0x03) == 0x01) { + /* DAM = 01, copy 4 bytes (32bits) */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("DAM == 01, dst address form (48bits): ffXX::00XX:XXXX:XXXX\n")); + ip6hdr->dest.addr[0] = lwip_htonl(0xff000000UL | (lowpan6_buffer[lowpan6_offset++] << 16)); + ip6hdr->dest.addr[1] = 0; + ip6hdr->dest.addr[2] = lwip_htonl(lowpan6_buffer[lowpan6_offset++]); + ip6hdr->dest.addr[3] = lwip_htonl((lowpan6_buffer[lowpan6_offset] << 24) | (lowpan6_buffer[lowpan6_offset + 1] << 16) | (lowpan6_buffer[lowpan6_offset + 2] << 8) | lowpan6_buffer[lowpan6_offset + 3]); + lowpan6_offset += 4; + } else if ((lowpan6_buffer[1] & 0x03) == 0x02) { + /* DAM = 10, copy 3 bytes (24bits) */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("DAM == 10, dst address form (32bits): ffXX::00XX:XXXX\n")); + ip6hdr->dest.addr[0] = lwip_htonl(0xff000000UL | (lowpan6_buffer[lowpan6_offset++] << 16)); + ip6hdr->dest.addr[1] = 0; + ip6hdr->dest.addr[2] = 0; + ip6hdr->dest.addr[3] = lwip_htonl((lowpan6_buffer[lowpan6_offset] << 16) | (lowpan6_buffer[lowpan6_offset + 1] << 8) | lowpan6_buffer[lowpan6_offset + 2]); + lowpan6_offset += 3; + } else if ((lowpan6_buffer[1] & 0x03) == 0x03) { + /* DAM = 11, copy 1 byte (8bits) */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("DAM == 11, dst address form (8bits): ff02::00XX\n")); + ip6hdr->dest.addr[0] = PP_HTONL(0xff020000UL); + ip6hdr->dest.addr[1] = 0; + ip6hdr->dest.addr[2] = 0; + ip6hdr->dest.addr[3] = lwip_htonl(lowpan6_buffer[lowpan6_offset++]); + } + + } else { + /* no Multicast (M=0) */ + if (lowpan6_buffer[1] & 0x04) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("DAC == 1, stateful compression\n")); + /* Stateful destination compression */ + /* Set prefix from context info */ + if (lowpan6_buffer[1] & 0x80) { + i = lowpan6_buffer[2] & 0x0f; + } else { + i = 0; + } + if (i >= LWIP_6LOWPAN_NUM_CONTEXTS) { + /* Error */ + return ERR_VAL; + } +#if LWIP_6LOWPAN_NUM_CONTEXTS > 0 + ip6hdr->dest.addr[0] = lowpan6_contexts[i].addr[0]; + ip6hdr->dest.addr[1] = lowpan6_contexts[i].addr[1]; +#endif + } else { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("DAC == 0, stateless compression, setting link local prefix\n")); + /* Link local address compression */ + ip6hdr->dest.addr[0] = PP_HTONL(0xfe800000UL); + ip6hdr->dest.addr[1] = 0; + } + + /* M=0, DAC=0, determining destination address length via DAM=xx */ + if ((lowpan6_buffer[1] & 0x03) == 0x00) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("DAM == 00, no dst compression, fetching 128bits inline")); + /* DAM=00, copy full address */ + MEMCPY(&ip6hdr->dest.addr[0], lowpan6_buffer + lowpan6_offset, 16); + lowpan6_offset += 16; + } else if ((lowpan6_buffer[1] & 0x03) == 0x01) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("DAM == 01, dst compression, 64bits inline\n")); + /* DAM=01, copy 64 inline bits, increase offset */ + MEMCPY(&ip6hdr->dest.addr[2], lowpan6_buffer + lowpan6_offset, 8); + lowpan6_offset += 8; + } else if ((lowpan6_buffer[1] & 0x03) == 0x02) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("DAM == 01, dst compression, 16bits inline\n")); + /* DAM=10, copy 16 inline bits, increase offset */ + ip6hdr->dest.addr[2] = PP_HTONL(0x000000ffUL); + ip6hdr->dest.addr[3] = lwip_htonl(0xfe000000UL | (lowpan6_buffer[lowpan6_offset] << 8) | lowpan6_buffer[lowpan6_offset + 1]); + lowpan6_offset += 2; + } else if ((lowpan6_buffer[1] & 0x03) == 0x03) { + /* DAM=11, no bits available, use other headers (not done here) */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG,("DAM == 01, dst compression, 0bits inline, using other headers\n")); + if (dest->addr_len == 2) { + ip6hdr->dest.addr[2] = PP_HTONL(0x000000ffUL); + ip6hdr->dest.addr[3] = lwip_htonl(0xfe000000UL | (dest->addr[0] << 8) | dest->addr[1]); + } else if (dest->addr_len == 8) { + ip6hdr->dest.addr[2] = lwip_htonl(((dest->addr[0] ^ 2) << 24) | (dest->addr[1] << 16) | dest->addr[2] << 8 | dest->addr[3]); + ip6hdr->dest.addr[3] = lwip_htonl((dest->addr[4] << 24) | (dest->addr[5] << 16) | dest->addr[6] << 8 | dest->addr[7]); + } else { + /* invalid destination address length */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("Invalid destination address length\n")); + return ERR_VAL; + } + } + } + + + /* Next Header Compression (NHC) decoding? */ + if (lowpan6_buffer[0] & 0x04) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("NHC decoding\n")); +#if LWIP_UDP + if ((lowpan6_buffer[lowpan6_offset] & 0xf8) == 0xf0) { + /* NHC: UDP */ + struct udp_hdr *udphdr; + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("NHC: UDP\n")); + + /* UDP compression */ + IP6H_NEXTH_SET(ip6hdr, IP6_NEXTH_UDP); + udphdr = (struct udp_hdr *)((u8_t *)decomp_buffer + ip6_offset); + if (decomp_bufsize < IP6_HLEN + UDP_HLEN) { + return ERR_MEM; + } + + /* Checksum decompression */ + if (lowpan6_buffer[lowpan6_offset] & 0x04) { + /* @todo support checksum decompress */ + LWIP_DEBUGF(LWIP_DBG_ON, ("NHC: UDP chechsum decompression UNSUPPORTED\n")); + return ERR_VAL; + } + + /* Decompress ports, according to RFC4944 */ + i = lowpan6_buffer[lowpan6_offset++] & 0x03; + if (i == 0) { + udphdr->src = lwip_htons(lowpan6_buffer[lowpan6_offset] << 8 | lowpan6_buffer[lowpan6_offset + 1]); + udphdr->dest = lwip_htons(lowpan6_buffer[lowpan6_offset + 2] << 8 | lowpan6_buffer[lowpan6_offset + 3]); + lowpan6_offset += 4; + } else if (i == 0x01) { + udphdr->src = lwip_htons(lowpan6_buffer[lowpan6_offset] << 8 | lowpan6_buffer[lowpan6_offset + 1]); + udphdr->dest = lwip_htons(0xf000 | lowpan6_buffer[lowpan6_offset + 2]); + lowpan6_offset += 3; + } else if (i == 0x02) { + udphdr->src = lwip_htons(0xf000 | lowpan6_buffer[lowpan6_offset]); + udphdr->dest = lwip_htons(lowpan6_buffer[lowpan6_offset + 1] << 8 | lowpan6_buffer[lowpan6_offset + 2]); + lowpan6_offset += 3; + } else if (i == 0x03) { + udphdr->src = lwip_htons(0xf0b0 | ((lowpan6_buffer[lowpan6_offset] >> 4) & 0x0f)); + udphdr->dest = lwip_htons(0xf0b0 | (lowpan6_buffer[lowpan6_offset] & 0x0f)); + lowpan6_offset += 1; + } + + udphdr->chksum = lwip_htons(lowpan6_buffer[lowpan6_offset] << 8 | lowpan6_buffer[lowpan6_offset + 1]); + lowpan6_offset += 2; + ip6_offset += UDP_HLEN; + if (datagram_size == 0) { + datagram_size = compressed_size - lowpan6_offset + ip6_offset; + } + udphdr->len = lwip_htons(datagram_size - IP6_HLEN); + + } else +#endif /* LWIP_UDP */ + { + LWIP_DEBUGF(LWIP_DBG_ON,("NHC: unsupported protocol!\n")); + /* @todo support NHC other than UDP */ + return ERR_VAL; + } + } + if (datagram_size == 0) { + datagram_size = compressed_size - lowpan6_offset + ip6_offset; + } + /* Infer IPv6 payload length for header */ + IP6H_PLEN_SET(ip6hdr, datagram_size - IP6_HLEN); + + if (lowpan6_offset > lowpan6_bufsize) { + /* input buffer overflow */ + return ERR_VAL; + } + *hdr_size_comp = lowpan6_offset; + *hdr_size_decomp = ip6_offset; + + return ERR_OK; +} + +struct pbuf * +lowpan6_decompress(struct pbuf *p, u16_t datagram_size, ip6_addr_t *lowpan6_contexts, + struct lowpan6_link_addr *src, struct lowpan6_link_addr *dest) +{ + struct pbuf *q; + u16_t lowpan6_offset, ip6_offset; + err_t err; + +#if LWIP_UDP +#define UDP_HLEN_ALLOC UDP_HLEN +#else +#define UDP_HLEN_ALLOC 0 +#endif + + /* Allocate a buffer for decompression. This buffer will be too big and will be + trimmed once the final size is known. */ + q = pbuf_alloc(PBUF_IP, p->len + IP6_HLEN + UDP_HLEN_ALLOC, PBUF_POOL); + if (q == NULL) { + pbuf_free(p); + return NULL; + } + if (q->len < IP6_HLEN + UDP_HLEN_ALLOC) { + /* The headers need to fit into the first pbuf */ + pbuf_free(p); + pbuf_free(q); + return NULL; + } + + /* Decompress the IPv6 (and possibly UDP) header(s) into the new pbuf */ + err = lowpan6_decompress_hdr((u8_t *)p->payload, p->len, (u8_t *)q->payload, q->len, + &lowpan6_offset, &ip6_offset, datagram_size, p->tot_len, lowpan6_contexts, src, dest); + if (err != ERR_OK) { + pbuf_free(p); + pbuf_free(q); + return NULL; + } + + /* Now we copy leftover contents from p to q, so we have all L2 and L3 headers + (and L4?) in a single pbuf: */ + + /* Hide the compressed headers in p */ + pbuf_remove_header(p, lowpan6_offset); + /* Temporarily hide the headers in q... */ + pbuf_remove_header(q, ip6_offset); + /* ... copy the rest of p into q... */ + pbuf_copy(q, p); + /* ... and reveal the headers again... */ + pbuf_add_header_force(q, ip6_offset); + /* ... trim the pbuf to its correct size... */ + pbuf_realloc(q, ip6_offset + p->len); + /* ... and cat possibly remaining (data-only) pbufs */ + if (p->next != NULL) { + pbuf_cat(q, p->next); + } + /* the original (first) pbuf can now be freed */ + p->next = NULL; + pbuf_free(p); + + /* all done */ + return q; +} + +#endif /* LWIP_6LOWPAN_IPHC */ +#endif /* LWIP_IPV6 */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/auth.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/auth.c new file mode 100644 index 0000000..c8673ad --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/auth.c @@ -0,0 +1,2510 @@ +/* + * auth.c - PPP authentication and phase control. + * + * Copyright (c) 1993-2002 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 3. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * Derived from main.c, which is: + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#if 0 /* UNUSED */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if defined(_PATH_LASTLOG) && defined(__linux__) +#include +#endif + +#include +#include +#include + +#ifdef HAS_SHADOW +#include +#ifndef PW_PPP +#define PW_PPP PW_LOGIN +#endif +#endif + +#include +#endif /* UNUSED */ + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/fsm.h" +#include "netif/ppp/lcp.h" +#if CCP_SUPPORT +#include "netif/ppp/ccp.h" +#endif /* CCP_SUPPORT */ +#if ECP_SUPPORT +#include "netif/ppp/ecp.h" +#endif /* ECP_SUPPORT */ +#include "netif/ppp/ipcp.h" +#if PAP_SUPPORT +#include "netif/ppp/upap.h" +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT +#include "netif/ppp/chap-new.h" +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT +#include "netif/ppp/eap.h" +#endif /* EAP_SUPPORT */ +#if CBCP_SUPPORT +#include "netif/ppp/cbcp.h" +#endif + +#if 0 /* UNUSED */ +#include "session.h" +#endif /* UNUSED */ + +#if 0 /* UNUSED */ +/* Bits in scan_authfile return value */ +#define NONWILD_SERVER 1 +#define NONWILD_CLIENT 2 + +#define ISWILD(word) (word[0] == '*' && word[1] == 0) +#endif /* UNUSED */ + +#if 0 /* UNUSED */ +/* List of addresses which the peer may use. */ +static struct permitted_ip *addresses[NUM_PPP]; + +/* Wordlist giving addresses which the peer may use + without authenticating itself. */ +static struct wordlist *noauth_addrs; + +/* Remote telephone number, if available */ +char remote_number[MAXNAMELEN]; + +/* Wordlist giving remote telephone numbers which may connect. */ +static struct wordlist *permitted_numbers; + +/* Extra options to apply, from the secrets file entry for the peer. */ +static struct wordlist *extra_options; +#endif /* UNUSED */ + +#if 0 /* UNUSED */ +/* Set if we require authentication only because we have a default route. */ +static bool default_auth; + +/* Hook to enable a plugin to control the idle time limit */ +int (*idle_time_hook) (struct ppp_idle *) = NULL; + +/* Hook for a plugin to say whether we can possibly authenticate any peer */ +int (*pap_check_hook) (void) = NULL; + +/* Hook for a plugin to check the PAP user and password */ +int (*pap_auth_hook) (char *user, char *passwd, char **msgp, + struct wordlist **paddrs, + struct wordlist **popts) = NULL; + +/* Hook for a plugin to know about the PAP user logout */ +void (*pap_logout_hook) (void) = NULL; + +/* Hook for a plugin to get the PAP password for authenticating us */ +int (*pap_passwd_hook) (char *user, char *passwd) = NULL; + +/* Hook for a plugin to say if we can possibly authenticate a peer using CHAP */ +int (*chap_check_hook) (void) = NULL; + +/* Hook for a plugin to get the CHAP password for authenticating us */ +int (*chap_passwd_hook) (char *user, char *passwd) = NULL; + +/* Hook for a plugin to say whether it is OK if the peer + refuses to authenticate. */ +int (*null_auth_hook) (struct wordlist **paddrs, + struct wordlist **popts) = NULL; + +int (*allowed_address_hook) (u32_t addr) = NULL; +#endif /* UNUSED */ + +#ifdef HAVE_MULTILINK +/* Hook for plugin to hear when an interface joins a multilink bundle */ +void (*multilink_join_hook) (void) = NULL; +#endif + +#if PPP_NOTIFY +/* A notifier for when the peer has authenticated itself, + and we are proceeding to the network phase. */ +struct notifier *auth_up_notifier = NULL; + +/* A notifier for when the link goes down. */ +struct notifier *link_down_notifier = NULL; +#endif /* PPP_NOTIFY */ + +/* + * Option variables. + */ +#if 0 /* MOVED TO ppp_settings */ +bool uselogin = 0; /* Use /etc/passwd for checking PAP */ +bool session_mgmt = 0; /* Do session management (login records) */ +bool cryptpap = 0; /* Passwords in pap-secrets are encrypted */ +bool refuse_pap = 0; /* Don't wanna auth. ourselves with PAP */ +bool refuse_chap = 0; /* Don't wanna auth. ourselves with CHAP */ +bool refuse_eap = 0; /* Don't wanna auth. ourselves with EAP */ +#if MSCHAP_SUPPORT +bool refuse_mschap = 0; /* Don't wanna auth. ourselves with MS-CHAP */ +bool refuse_mschap_v2 = 0; /* Don't wanna auth. ourselves with MS-CHAPv2 */ +#else /* MSCHAP_SUPPORT */ +bool refuse_mschap = 1; /* Don't wanna auth. ourselves with MS-CHAP */ +bool refuse_mschap_v2 = 1; /* Don't wanna auth. ourselves with MS-CHAPv2 */ +#endif /* MSCHAP_SUPPORT */ +bool usehostname = 0; /* Use hostname for our_name */ +bool auth_required = 0; /* Always require authentication from peer */ +bool allow_any_ip = 0; /* Allow peer to use any IP address */ +bool explicit_remote = 0; /* User specified explicit remote name */ +bool explicit_user = 0; /* Set if "user" option supplied */ +bool explicit_passwd = 0; /* Set if "password" option supplied */ +char remote_name[MAXNAMELEN]; /* Peer's name for authentication */ +static char *uafname; /* name of most recent +ua file */ + +extern char *crypt (const char *, const char *); +#endif /* UNUSED */ +/* Prototypes for procedures local to this file. */ + +static void network_phase(ppp_pcb *pcb); +#if PPP_IDLETIMELIMIT +static void check_idle(void *arg); +#endif /* PPP_IDLETIMELIMIT */ +#if PPP_MAXCONNECT +static void connect_time_expired(void *arg); +#endif /* PPP_MAXCONNECT */ +#if 0 /* UNUSED */ +static int null_login (int); +/* static int get_pap_passwd (char *); */ +static int have_pap_secret (int *); +static int have_chap_secret (char *, char *, int, int *); +static int have_srp_secret (char *client, char *server, int need_ip, + int *lacks_ipp); +static int ip_addr_check (u32_t, struct permitted_ip *); +static int scan_authfile (FILE *, char *, char *, char *, + struct wordlist **, struct wordlist **, + char *, int); +static void free_wordlist (struct wordlist *); +static void set_allowed_addrs (int, struct wordlist *, struct wordlist *); +static int some_ip_ok (struct wordlist *); +static int setupapfile (char **); +static int privgroup (char **); +static int set_noauth_addr (char **); +static int set_permitted_number (char **); +static void check_access (FILE *, char *); +static int wordlist_count (struct wordlist *); +#endif /* UNUSED */ + +#ifdef MAXOCTETS +static void check_maxoctets (void *); +#endif + +#if PPP_OPTIONS +/* + * Authentication-related options. + */ +option_t auth_options[] = { + { "auth", o_bool, &auth_required, + "Require authentication from peer", OPT_PRIO | 1 }, + { "noauth", o_bool, &auth_required, + "Don't require peer to authenticate", OPT_PRIOSUB | OPT_PRIV, + &allow_any_ip }, + { "require-pap", o_bool, &lcp_wantoptions[0].neg_upap, + "Require PAP authentication from peer", + OPT_PRIOSUB | 1, &auth_required }, + { "+pap", o_bool, &lcp_wantoptions[0].neg_upap, + "Require PAP authentication from peer", + OPT_ALIAS | OPT_PRIOSUB | 1, &auth_required }, + { "require-chap", o_bool, &auth_required, + "Require CHAP authentication from peer", + OPT_PRIOSUB | OPT_A2OR | MDTYPE_MD5, + &lcp_wantoptions[0].chap_mdtype }, + { "+chap", o_bool, &auth_required, + "Require CHAP authentication from peer", + OPT_ALIAS | OPT_PRIOSUB | OPT_A2OR | MDTYPE_MD5, + &lcp_wantoptions[0].chap_mdtype }, +#if MSCHAP_SUPPORT + { "require-mschap", o_bool, &auth_required, + "Require MS-CHAP authentication from peer", + OPT_PRIOSUB | OPT_A2OR | MDTYPE_MICROSOFT, + &lcp_wantoptions[0].chap_mdtype }, + { "+mschap", o_bool, &auth_required, + "Require MS-CHAP authentication from peer", + OPT_ALIAS | OPT_PRIOSUB | OPT_A2OR | MDTYPE_MICROSOFT, + &lcp_wantoptions[0].chap_mdtype }, + { "require-mschap-v2", o_bool, &auth_required, + "Require MS-CHAPv2 authentication from peer", + OPT_PRIOSUB | OPT_A2OR | MDTYPE_MICROSOFT_V2, + &lcp_wantoptions[0].chap_mdtype }, + { "+mschap-v2", o_bool, &auth_required, + "Require MS-CHAPv2 authentication from peer", + OPT_ALIAS | OPT_PRIOSUB | OPT_A2OR | MDTYPE_MICROSOFT_V2, + &lcp_wantoptions[0].chap_mdtype }, +#endif /* MSCHAP_SUPPORT */ +#if 0 + { "refuse-pap", o_bool, &refuse_pap, + "Don't agree to auth to peer with PAP", 1 }, + { "-pap", o_bool, &refuse_pap, + "Don't allow PAP authentication with peer", OPT_ALIAS | 1 }, + { "refuse-chap", o_bool, &refuse_chap, + "Don't agree to auth to peer with CHAP", + OPT_A2CLRB | MDTYPE_MD5, + &lcp_allowoptions[0].chap_mdtype }, + { "-chap", o_bool, &refuse_chap, + "Don't allow CHAP authentication with peer", + OPT_ALIAS | OPT_A2CLRB | MDTYPE_MD5, + &lcp_allowoptions[0].chap_mdtype }, +#endif +#if MSCHAP_SUPPORT +#if 0 + { "refuse-mschap", o_bool, &refuse_mschap, + "Don't agree to auth to peer with MS-CHAP", + OPT_A2CLRB | MDTYPE_MICROSOFT, + &lcp_allowoptions[0].chap_mdtype }, + { "-mschap", o_bool, &refuse_mschap, + "Don't allow MS-CHAP authentication with peer", + OPT_ALIAS | OPT_A2CLRB | MDTYPE_MICROSOFT, + &lcp_allowoptions[0].chap_mdtype }, + { "refuse-mschap-v2", o_bool, &refuse_mschap_v2, + "Don't agree to auth to peer with MS-CHAPv2", + OPT_A2CLRB | MDTYPE_MICROSOFT_V2, + &lcp_allowoptions[0].chap_mdtype }, + { "-mschap-v2", o_bool, &refuse_mschap_v2, + "Don't allow MS-CHAPv2 authentication with peer", + OPT_ALIAS | OPT_A2CLRB | MDTYPE_MICROSOFT_V2, + &lcp_allowoptions[0].chap_mdtype }, +#endif +#endif /* MSCHAP_SUPPORT*/ +#if EAP_SUPPORT + { "require-eap", o_bool, &lcp_wantoptions[0].neg_eap, + "Require EAP authentication from peer", OPT_PRIOSUB | 1, + &auth_required }, +#if 0 + { "refuse-eap", o_bool, &refuse_eap, + "Don't agree to authenticate to peer with EAP", 1 }, +#endif +#endif /* EAP_SUPPORT */ + { "name", o_string, our_name, + "Set local name for authentication", + OPT_PRIO | OPT_PRIV | OPT_STATIC, NULL, MAXNAMELEN }, + + { "+ua", o_special, (void *)setupapfile, + "Get PAP user and password from file", + OPT_PRIO | OPT_A2STRVAL, &uafname }, + +#if 0 + { "user", o_string, user, + "Set name for auth with peer", OPT_PRIO | OPT_STATIC, + &explicit_user, MAXNAMELEN }, + + { "password", o_string, passwd, + "Password for authenticating us to the peer", + OPT_PRIO | OPT_STATIC | OPT_HIDE, + &explicit_passwd, MAXSECRETLEN }, +#endif + + { "usehostname", o_bool, &usehostname, + "Must use hostname for authentication", 1 }, + + { "remotename", o_string, remote_name, + "Set remote name for authentication", OPT_PRIO | OPT_STATIC, + &explicit_remote, MAXNAMELEN }, + + { "login", o_bool, &uselogin, + "Use system password database for PAP", OPT_A2COPY | 1 , + &session_mgmt }, + { "enable-session", o_bool, &session_mgmt, + "Enable session accounting for remote peers", OPT_PRIV | 1 }, + + { "papcrypt", o_bool, &cryptpap, + "PAP passwords are encrypted", 1 }, + + { "privgroup", o_special, (void *)privgroup, + "Allow group members to use privileged options", OPT_PRIV | OPT_A2LIST }, + + { "allow-ip", o_special, (void *)set_noauth_addr, + "Set IP address(es) which can be used without authentication", + OPT_PRIV | OPT_A2LIST }, + + { "remotenumber", o_string, remote_number, + "Set remote telephone number for authentication", OPT_PRIO | OPT_STATIC, + NULL, MAXNAMELEN }, + + { "allow-number", o_special, (void *)set_permitted_number, + "Set telephone number(s) which are allowed to connect", + OPT_PRIV | OPT_A2LIST }, + + { NULL } +}; +#endif /* PPP_OPTIONS */ + +#if 0 /* UNUSED */ +/* + * setupapfile - specifies UPAP info for authenticating with peer. + */ +static int +setupapfile(argv) + char **argv; +{ + FILE *ufile; + int l; + uid_t euid; + char u[MAXNAMELEN], p[MAXSECRETLEN]; + char *fname; + + lcp_allowoptions[0].neg_upap = 1; + + /* open user info file */ + fname = strdup(*argv); + if (fname == NULL) + novm("+ua file name"); + euid = geteuid(); + if (seteuid(getuid()) == -1) { + option_error("unable to reset uid before opening %s: %m", fname); + return 0; + } + ufile = fopen(fname, "r"); + if (seteuid(euid) == -1) + fatal("unable to regain privileges: %m"); + if (ufile == NULL) { + option_error("unable to open user login data file %s", fname); + return 0; + } + check_access(ufile, fname); + uafname = fname; + + /* get username */ + if (fgets(u, MAXNAMELEN - 1, ufile) == NULL + || fgets(p, MAXSECRETLEN - 1, ufile) == NULL) { + fclose(ufile); + option_error("unable to read user login data file %s", fname); + return 0; + } + fclose(ufile); + + /* get rid of newlines */ + l = strlen(u); + if (l > 0 && u[l-1] == '\n') + u[l-1] = 0; + l = strlen(p); + if (l > 0 && p[l-1] == '\n') + p[l-1] = 0; + + if (override_value("user", option_priority, fname)) { + strlcpy(ppp_settings.user, u, sizeof(ppp_settings.user)); + explicit_user = 1; + } + if (override_value("passwd", option_priority, fname)) { + strlcpy(ppp_settings.passwd, p, sizeof(ppp_settings.passwd)); + explicit_passwd = 1; + } + + return (1); +} + +/* + * privgroup - allow members of the group to have privileged access. + */ +static int +privgroup(argv) + char **argv; +{ + struct group *g; + int i; + + g = getgrnam(*argv); + if (g == 0) { + option_error("group %s is unknown", *argv); + return 0; + } + for (i = 0; i < ngroups; ++i) { + if (groups[i] == g->gr_gid) { + privileged = 1; + break; + } + } + return 1; +} + + +/* + * set_noauth_addr - set address(es) that can be used without authentication. + * Equivalent to specifying an entry like `"" * "" addr' in pap-secrets. + */ +static int +set_noauth_addr(argv) + char **argv; +{ + char *addr = *argv; + int l = strlen(addr) + 1; + struct wordlist *wp; + + wp = (struct wordlist *) malloc(sizeof(struct wordlist) + l); + if (wp == NULL) + novm("allow-ip argument"); + wp->word = (char *) (wp + 1); + wp->next = noauth_addrs; + MEMCPY(wp->word, addr, l); + noauth_addrs = wp; + return 1; +} + + +/* + * set_permitted_number - set remote telephone number(s) that may connect. + */ +static int +set_permitted_number(argv) + char **argv; +{ + char *number = *argv; + int l = strlen(number) + 1; + struct wordlist *wp; + + wp = (struct wordlist *) malloc(sizeof(struct wordlist) + l); + if (wp == NULL) + novm("allow-number argument"); + wp->word = (char *) (wp + 1); + wp->next = permitted_numbers; + MEMCPY(wp->word, number, l); + permitted_numbers = wp; + return 1; +} +#endif + +/* + * An Open on LCP has requested a change from Dead to Establish phase. + */ +void link_required(ppp_pcb *pcb) { + LWIP_UNUSED_ARG(pcb); +} + +#if 0 +/* + * Bring the link up to the point of being able to do ppp. + */ +void start_link(unit) + int unit; +{ + ppp_pcb *pcb = &ppp_pcb_list[unit]; + char *msg; + + status = EXIT_NEGOTIATION_FAILED; + new_phase(pcb, PPP_PHASE_SERIALCONN); + + hungup = 0; + devfd = the_channel->connect(); + msg = "Connect script failed"; + if (devfd < 0) + goto fail; + + /* set up the serial device as a ppp interface */ + /* + * N.B. we used to do tdb_writelock/tdb_writeunlock around this + * (from establish_ppp to set_ifunit). However, we won't be + * doing the set_ifunit in multilink mode, which is the only time + * we need the atomicity that the tdb_writelock/tdb_writeunlock + * gives us. Thus we don't need the tdb_writelock/tdb_writeunlock. + */ + fd_ppp = the_channel->establish_ppp(devfd); + msg = "ppp establishment failed"; + if (fd_ppp < 0) { + status = EXIT_FATAL_ERROR; + goto disconnect; + } + + if (!demand && ifunit >= 0) + set_ifunit(1); + + /* + * Start opening the connection and wait for + * incoming events (reply, timeout, etc.). + */ + if (ifunit >= 0) + ppp_notice("Connect: %s <--> %s", ifname, ppp_devnam); + else + ppp_notice("Starting negotiation on %s", ppp_devnam); + add_fd(fd_ppp); + + new_phase(pcb, PPP_PHASE_ESTABLISH); + + lcp_lowerup(pcb); + return; + + disconnect: + new_phase(pcb, PPP_PHASE_DISCONNECT); + if (the_channel->disconnect) + the_channel->disconnect(); + + fail: + new_phase(pcb, PPP_PHASE_DEAD); + if (the_channel->cleanup) + (*the_channel->cleanup)(); +} +#endif + +/* + * LCP has terminated the link; go to the Dead phase and take the + * physical layer down. + */ +void link_terminated(ppp_pcb *pcb) { + if (pcb->phase == PPP_PHASE_DEAD +#ifdef HAVE_MULTILINK + || pcb->phase == PPP_PHASE_MASTER +#endif /* HAVE_MULTILINK */ + ) + return; + new_phase(pcb, PPP_PHASE_DISCONNECT); + +#if 0 /* UNUSED */ + if (pap_logout_hook) { + pap_logout_hook(); + } + session_end(devnam); +#endif /* UNUSED */ + + if (!doing_multilink) { + ppp_notice("Connection terminated."); +#if PPP_STATS_SUPPORT + print_link_stats(); +#endif /* PPP_STATS_SUPPORT */ + } else + ppp_notice("Link terminated."); + + lcp_lowerdown(pcb); + + ppp_link_terminated(pcb); +#if 0 + /* + * Delete pid files before disestablishing ppp. Otherwise it + * can happen that another pppd gets the same unit and then + * we delete its pid file. + */ + if (!doing_multilink && !demand) + remove_pidfiles(); + + /* + * If we may want to bring the link up again, transfer + * the ppp unit back to the loopback. Set the + * real serial device back to its normal mode of operation. + */ + if (fd_ppp >= 0) { + remove_fd(fd_ppp); + clean_check(); + the_channel->disestablish_ppp(devfd); + if (doing_multilink) + mp_exit_bundle(); + fd_ppp = -1; + } + if (!hungup) + lcp_lowerdown(pcb); + if (!doing_multilink && !demand) + script_unsetenv("IFNAME"); + + /* + * Run disconnector script, if requested. + * XXX we may not be able to do this if the line has hung up! + */ + if (devfd >= 0 && the_channel->disconnect) { + the_channel->disconnect(); + devfd = -1; + } + if (the_channel->cleanup) + (*the_channel->cleanup)(); + + if (doing_multilink && multilink_master) { + if (!bundle_terminating) + new_phase(pcb, PPP_PHASE_MASTER); + else + mp_bundle_terminated(); + } else + new_phase(pcb, PPP_PHASE_DEAD); +#endif +} + +/* + * LCP has gone down; it will either die or try to re-establish. + */ +void link_down(ppp_pcb *pcb) { +#if PPP_NOTIFY + notify(link_down_notifier, 0); +#endif /* PPP_NOTIFY */ + + if (!doing_multilink) { + upper_layers_down(pcb); + if (pcb->phase != PPP_PHASE_DEAD +#ifdef HAVE_MULTILINK + && pcb->phase != PPP_PHASE_MASTER +#endif /* HAVE_MULTILINK */ + ) + new_phase(pcb, PPP_PHASE_ESTABLISH); + } + /* XXX if doing_multilink, should do something to stop + network-layer traffic on the link */ +} + +void upper_layers_down(ppp_pcb *pcb) { + int i; + const struct protent *protp; + + for (i = 0; (protp = protocols[i]) != NULL; ++i) { + if (protp->protocol != PPP_LCP && protp->lowerdown != NULL) + (*protp->lowerdown)(pcb); + if (protp->protocol < 0xC000 && protp->close != NULL) + (*protp->close)(pcb, "LCP down"); + } + pcb->num_np_open = 0; + pcb->num_np_up = 0; +} + +/* + * The link is established. + * Proceed to the Dead, Authenticate or Network phase as appropriate. + */ +void link_established(ppp_pcb *pcb) { +#if PPP_AUTH_SUPPORT + int auth; +#if PPP_SERVER +#if PAP_SUPPORT + lcp_options *wo = &pcb->lcp_wantoptions; +#endif /* PAP_SUPPORT */ + lcp_options *go = &pcb->lcp_gotoptions; +#endif /* PPP_SERVER */ + lcp_options *ho = &pcb->lcp_hisoptions; +#endif /* PPP_AUTH_SUPPORT */ + int i; + const struct protent *protp; + + /* + * Tell higher-level protocols that LCP is up. + */ + if (!doing_multilink) { + for (i = 0; (protp = protocols[i]) != NULL; ++i) + if (protp->protocol != PPP_LCP + && protp->lowerup != NULL) + (*protp->lowerup)(pcb); + } + +#if PPP_AUTH_SUPPORT +#if PPP_SERVER +#if PPP_ALLOWED_ADDRS + if (!auth_required && noauth_addrs != NULL) + set_allowed_addrs(unit, NULL, NULL); +#endif /* PPP_ALLOWED_ADDRS */ + + if (pcb->settings.auth_required && !(0 +#if PAP_SUPPORT + || go->neg_upap +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + || go->neg_chap +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + || go->neg_eap +#endif /* EAP_SUPPORT */ + )) { + +#if PPP_ALLOWED_ADDRS + /* + * We wanted the peer to authenticate itself, and it refused: + * if we have some address(es) it can use without auth, fine, + * otherwise treat it as though it authenticated with PAP using + * a username of "" and a password of "". If that's not OK, + * boot it out. + */ + if (noauth_addrs != NULL) { + set_allowed_addrs(unit, NULL, NULL); + } else +#endif /* PPP_ALLOWED_ADDRS */ + if (!pcb->settings.null_login +#if PAP_SUPPORT + || !wo->neg_upap +#endif /* PAP_SUPPORT */ + ) { + ppp_warn("peer refused to authenticate: terminating link"); +#if 0 /* UNUSED */ + status = EXIT_PEER_AUTH_FAILED; +#endif /* UNUSED */ + pcb->err_code = PPPERR_AUTHFAIL; + lcp_close(pcb, "peer refused to authenticate"); + return; + } + } +#endif /* PPP_SERVER */ + + new_phase(pcb, PPP_PHASE_AUTHENTICATE); + auth = 0; +#if PPP_SERVER +#if EAP_SUPPORT + if (go->neg_eap) { + eap_authpeer(pcb, PPP_OUR_NAME); + auth |= EAP_PEER; + } else +#endif /* EAP_SUPPORT */ +#if CHAP_SUPPORT + if (go->neg_chap) { + chap_auth_peer(pcb, PPP_OUR_NAME, CHAP_DIGEST(go->chap_mdtype)); + auth |= CHAP_PEER; + } else +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + if (go->neg_upap) { + upap_authpeer(pcb); + auth |= PAP_PEER; + } else +#endif /* PAP_SUPPORT */ + {} +#endif /* PPP_SERVER */ + +#if EAP_SUPPORT + if (ho->neg_eap) { + eap_authwithpeer(pcb, pcb->settings.user); + auth |= EAP_WITHPEER; + } else +#endif /* EAP_SUPPORT */ +#if CHAP_SUPPORT + if (ho->neg_chap) { + chap_auth_with_peer(pcb, pcb->settings.user, CHAP_DIGEST(ho->chap_mdtype)); + auth |= CHAP_WITHPEER; + } else +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + if (ho->neg_upap) { + upap_authwithpeer(pcb, pcb->settings.user, pcb->settings.passwd); + auth |= PAP_WITHPEER; + } else +#endif /* PAP_SUPPORT */ + {} + + pcb->auth_pending = auth; + pcb->auth_done = 0; + + if (!auth) +#endif /* PPP_AUTH_SUPPORT */ + network_phase(pcb); +} + +/* + * Proceed to the network phase. + */ +static void network_phase(ppp_pcb *pcb) { +#if CBCP_SUPPORT + ppp_pcb *pcb = &ppp_pcb_list[unit]; +#endif +#if 0 /* UNUSED */ + lcp_options *go = &lcp_gotoptions[unit]; +#endif /* UNUSED */ + +#if 0 /* UNUSED */ + /* Log calling number. */ + if (*remote_number) + ppp_notice("peer from calling number %q authorized", remote_number); +#endif /* UNUSED */ + +#if PPP_NOTIFY + /* + * If the peer had to authenticate, notify it now. + */ + if (0 +#if CHAP_SUPPORT + || go->neg_chap +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + || go->neg_upap +#endif /* PAP_SUPPORT */ +#if EAP_SUPPORT + || go->neg_eap +#endif /* EAP_SUPPORT */ + ) { + notify(auth_up_notifier, 0); + } +#endif /* PPP_NOTIFY */ + +#if CBCP_SUPPORT + /* + * If we negotiated callback, do it now. + */ + if (go->neg_cbcp) { + new_phase(pcb, PPP_PHASE_CALLBACK); + (*cbcp_protent.open)(pcb); + return; + } +#endif + +#if PPP_OPTIONS + /* + * Process extra options from the secrets file + */ + if (extra_options) { + options_from_list(extra_options, 1); + free_wordlist(extra_options); + extra_options = 0; + } +#endif /* PPP_OPTIONS */ + start_networks(pcb); +} + +void start_networks(ppp_pcb *pcb) { +#if CCP_SUPPORT || ECP_SUPPORT + int i; + const struct protent *protp; +#endif /* CCP_SUPPORT || ECP_SUPPORT */ + + new_phase(pcb, PPP_PHASE_NETWORK); + +#ifdef HAVE_MULTILINK + if (multilink) { + if (mp_join_bundle()) { + if (multilink_join_hook) + (*multilink_join_hook)(); + if (updetach && !nodetach) + detach(); + return; + } + } +#endif /* HAVE_MULTILINK */ + +#ifdef PPP_FILTER + if (!demand) + set_filters(&pass_filter, &active_filter); +#endif +#if CCP_SUPPORT || ECP_SUPPORT + /* Start CCP and ECP */ + for (i = 0; (protp = protocols[i]) != NULL; ++i) + if ( + (0 +#if ECP_SUPPORT + || protp->protocol == PPP_ECP +#endif /* ECP_SUPPORT */ +#if CCP_SUPPORT + || protp->protocol == PPP_CCP +#endif /* CCP_SUPPORT */ + ) + && protp->open != NULL) + (*protp->open)(pcb); +#endif /* CCP_SUPPORT || ECP_SUPPORT */ + + /* + * Bring up other network protocols iff encryption is not required. + */ + if (1 +#if ECP_SUPPORT + && !ecp_gotoptions[unit].required +#endif /* ECP_SUPPORT */ +#if MPPE_SUPPORT + && !pcb->ccp_gotoptions.mppe +#endif /* MPPE_SUPPORT */ + ) + continue_networks(pcb); +} + +void continue_networks(ppp_pcb *pcb) { + int i; + const struct protent *protp; + + /* + * Start the "real" network protocols. + */ + for (i = 0; (protp = protocols[i]) != NULL; ++i) + if (protp->protocol < 0xC000 +#if CCP_SUPPORT + && protp->protocol != PPP_CCP +#endif /* CCP_SUPPORT */ +#if ECP_SUPPORT + && protp->protocol != PPP_ECP +#endif /* ECP_SUPPORT */ + && protp->open != NULL) { + (*protp->open)(pcb); + ++pcb->num_np_open; + } + + if (pcb->num_np_open == 0) + /* nothing to do */ + lcp_close(pcb, "No network protocols running"); +} + +#if PPP_AUTH_SUPPORT +#if PPP_SERVER +/* + * auth_check_passwd - Check the user name and passwd against configuration. + * + * returns: + * 0: Authentication failed. + * 1: Authentication succeeded. + * In either case, msg points to an appropriate message and msglen to the message len. + */ +int auth_check_passwd(ppp_pcb *pcb, char *auser, int userlen, char *apasswd, int passwdlen, const char **msg, int *msglen) { + int secretuserlen; + int secretpasswdlen; + + if (pcb->settings.user && pcb->settings.passwd) { + secretuserlen = (int)strlen(pcb->settings.user); + secretpasswdlen = (int)strlen(pcb->settings.passwd); + if (secretuserlen == userlen + && secretpasswdlen == passwdlen + && !memcmp(auser, pcb->settings.user, userlen) + && !memcmp(apasswd, pcb->settings.passwd, passwdlen) ) { + *msg = "Login ok"; + *msglen = sizeof("Login ok")-1; + return 1; + } + } + + *msg = "Login incorrect"; + *msglen = sizeof("Login incorrect")-1; + return 0; +} + +/* + * The peer has failed to authenticate himself using `protocol'. + */ +void auth_peer_fail(ppp_pcb *pcb, int protocol) { + LWIP_UNUSED_ARG(protocol); + /* + * Authentication failure: take the link down + */ +#if 0 /* UNUSED */ + status = EXIT_PEER_AUTH_FAILED; +#endif /* UNUSED */ + pcb->err_code = PPPERR_AUTHFAIL; + lcp_close(pcb, "Authentication failed"); +} + +/* + * The peer has been successfully authenticated using `protocol'. + */ +void auth_peer_success(ppp_pcb *pcb, int protocol, int prot_flavor, const char *name, int namelen) { + int bit; +#ifndef HAVE_MULTILINK + LWIP_UNUSED_ARG(name); + LWIP_UNUSED_ARG(namelen); +#endif /* HAVE_MULTILINK */ + + switch (protocol) { +#if CHAP_SUPPORT + case PPP_CHAP: + bit = CHAP_PEER; + switch (prot_flavor) { + case CHAP_MD5: + bit |= CHAP_MD5_PEER; + break; +#if MSCHAP_SUPPORT + case CHAP_MICROSOFT: + bit |= CHAP_MS_PEER; + break; + case CHAP_MICROSOFT_V2: + bit |= CHAP_MS2_PEER; + break; +#endif /* MSCHAP_SUPPORT */ + default: + break; + } + break; +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + case PPP_PAP: + bit = PAP_PEER; + break; +#endif /* PAP_SUPPORT */ +#if EAP_SUPPORT + case PPP_EAP: + bit = EAP_PEER; + break; +#endif /* EAP_SUPPORT */ + default: + ppp_warn("auth_peer_success: unknown protocol %x", protocol); + return; + } + +#ifdef HAVE_MULTILINK + /* + * Save the authenticated name of the peer for later. + */ + if (namelen > (int)sizeof(pcb->peer_authname) - 1) + namelen = (int)sizeof(pcb->peer_authname) - 1; + MEMCPY(pcb->peer_authname, name, namelen); + pcb->peer_authname[namelen] = 0; +#endif /* HAVE_MULTILINK */ +#if 0 /* UNUSED */ + script_setenv("PEERNAME", , 0); +#endif /* UNUSED */ + + /* Save the authentication method for later. */ + pcb->auth_done |= bit; + + /* + * If there is no more authentication still to be done, + * proceed to the network (or callback) phase. + */ + if ((pcb->auth_pending &= ~bit) == 0) + network_phase(pcb); +} +#endif /* PPP_SERVER */ + +/* + * We have failed to authenticate ourselves to the peer using `protocol'. + */ +void auth_withpeer_fail(ppp_pcb *pcb, int protocol) { + LWIP_UNUSED_ARG(protocol); + /* + * We've failed to authenticate ourselves to our peer. + * + * Some servers keep sending CHAP challenges, but there + * is no point in persisting without any way to get updated + * authentication secrets. + * + * He'll probably take the link down, and there's not much + * we can do except wait for that. + */ + pcb->err_code = PPPERR_AUTHFAIL; + lcp_close(pcb, "Failed to authenticate ourselves to peer"); +} + +/* + * We have successfully authenticated ourselves with the peer using `protocol'. + */ +void auth_withpeer_success(ppp_pcb *pcb, int protocol, int prot_flavor) { + int bit; + const char *prot = ""; + + switch (protocol) { +#if CHAP_SUPPORT + case PPP_CHAP: + bit = CHAP_WITHPEER; + prot = "CHAP"; + switch (prot_flavor) { + case CHAP_MD5: + bit |= CHAP_MD5_WITHPEER; + break; +#if MSCHAP_SUPPORT + case CHAP_MICROSOFT: + bit |= CHAP_MS_WITHPEER; + break; + case CHAP_MICROSOFT_V2: + bit |= CHAP_MS2_WITHPEER; + break; +#endif /* MSCHAP_SUPPORT */ + default: + break; + } + break; +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + case PPP_PAP: + bit = PAP_WITHPEER; + prot = "PAP"; + break; +#endif /* PAP_SUPPORT */ +#if EAP_SUPPORT + case PPP_EAP: + bit = EAP_WITHPEER; + prot = "EAP"; + break; +#endif /* EAP_SUPPORT */ + default: + ppp_warn("auth_withpeer_success: unknown protocol %x", protocol); + bit = 0; + /* no break */ + } + + ppp_notice("%s authentication succeeded", prot); + + /* Save the authentication method for later. */ + pcb->auth_done |= bit; + + /* + * If there is no more authentication still being done, + * proceed to the network (or callback) phase. + */ + if ((pcb->auth_pending &= ~bit) == 0) + network_phase(pcb); +} +#endif /* PPP_AUTH_SUPPORT */ + + +/* + * np_up - a network protocol has come up. + */ +void np_up(ppp_pcb *pcb, int proto) { +#if PPP_IDLETIMELIMIT + int tlim; +#endif /* PPP_IDLETIMELIMIT */ + LWIP_UNUSED_ARG(proto); + + if (pcb->num_np_up == 0) { + /* + * At this point we consider that the link has come up successfully. + */ + new_phase(pcb, PPP_PHASE_RUNNING); + +#if PPP_IDLETIMELIMIT +#if 0 /* UNUSED */ + if (idle_time_hook != 0) + tlim = (*idle_time_hook)(NULL); + else +#endif /* UNUSED */ + tlim = pcb->settings.idle_time_limit; + if (tlim > 0) + TIMEOUT(check_idle, (void*)pcb, tlim); +#endif /* PPP_IDLETIMELIMIT */ + +#if PPP_MAXCONNECT + /* + * Set a timeout to close the connection once the maximum + * connect time has expired. + */ + if (pcb->settings.maxconnect > 0) + TIMEOUT(connect_time_expired, (void*)pcb, pcb->settings.maxconnect); +#endif /* PPP_MAXCONNECT */ + +#ifdef MAXOCTETS + if (maxoctets > 0) + TIMEOUT(check_maxoctets, NULL, maxoctets_timeout); +#endif + +#if 0 /* Unused */ + /* + * Detach now, if the updetach option was given. + */ + if (updetach && !nodetach) + detach(); +#endif /* Unused */ + } + ++pcb->num_np_up; +} + +/* + * np_down - a network protocol has gone down. + */ +void np_down(ppp_pcb *pcb, int proto) { + LWIP_UNUSED_ARG(proto); + if (--pcb->num_np_up == 0) { +#if PPP_IDLETIMELIMIT + UNTIMEOUT(check_idle, (void*)pcb); +#endif /* PPP_IDLETIMELIMIT */ +#if PPP_MAXCONNECT + UNTIMEOUT(connect_time_expired, NULL); +#endif /* PPP_MAXCONNECT */ +#ifdef MAXOCTETS + UNTIMEOUT(check_maxoctets, NULL); +#endif + new_phase(pcb, PPP_PHASE_NETWORK); + } +} + +/* + * np_finished - a network protocol has finished using the link. + */ +void np_finished(ppp_pcb *pcb, int proto) { + LWIP_UNUSED_ARG(proto); + if (--pcb->num_np_open <= 0) { + /* no further use for the link: shut up shop. */ + lcp_close(pcb, "No network protocols running"); + } +} + +#ifdef MAXOCTETS +static void +check_maxoctets(arg) + void *arg; +{ +#if PPP_STATS_SUPPORT + unsigned int used; + + update_link_stats(ifunit); + link_stats_valid=0; + + switch(maxoctets_dir) { + case PPP_OCTETS_DIRECTION_IN: + used = link_stats.bytes_in; + break; + case PPP_OCTETS_DIRECTION_OUT: + used = link_stats.bytes_out; + break; + case PPP_OCTETS_DIRECTION_MAXOVERAL: + case PPP_OCTETS_DIRECTION_MAXSESSION: + used = (link_stats.bytes_in > link_stats.bytes_out) ? link_stats.bytes_in : link_stats.bytes_out; + break; + default: + used = link_stats.bytes_in+link_stats.bytes_out; + break; + } + if (used > maxoctets) { + ppp_notice("Traffic limit reached. Limit: %u Used: %u", maxoctets, used); + status = EXIT_TRAFFIC_LIMIT; + lcp_close(pcb, "Traffic limit"); +#if 0 /* UNUSED */ + need_holdoff = 0; +#endif /* UNUSED */ + } else { + TIMEOUT(check_maxoctets, NULL, maxoctets_timeout); + } +#endif /* PPP_STATS_SUPPORT */ +} +#endif /* MAXOCTETS */ + +#if PPP_IDLETIMELIMIT +/* + * check_idle - check whether the link has been idle for long + * enough that we can shut it down. + */ +static void check_idle(void *arg) { + ppp_pcb *pcb = (ppp_pcb*)arg; + struct ppp_idle idle; + time_t itime; + int tlim; + + if (!get_idle_time(pcb, &idle)) + return; +#if 0 /* UNUSED */ + if (idle_time_hook != 0) { + tlim = idle_time_hook(&idle); + } else { +#endif /* UNUSED */ + itime = LWIP_MIN(idle.xmit_idle, idle.recv_idle); + tlim = pcb->settings.idle_time_limit - itime; +#if 0 /* UNUSED */ + } +#endif /* UNUSED */ + if (tlim <= 0) { + /* link is idle: shut it down. */ + ppp_notice("Terminating connection due to lack of activity."); + pcb->err_code = PPPERR_IDLETIMEOUT; + lcp_close(pcb, "Link inactive"); +#if 0 /* UNUSED */ + need_holdoff = 0; +#endif /* UNUSED */ + } else { + TIMEOUT(check_idle, (void*)pcb, tlim); + } +} +#endif /* PPP_IDLETIMELIMIT */ + +#if PPP_MAXCONNECT +/* + * connect_time_expired - log a message and close the connection. + */ +static void connect_time_expired(void *arg) { + ppp_pcb *pcb = (ppp_pcb*)arg; + ppp_info("Connect time expired"); + pcb->err_code = PPPERR_CONNECTTIME; + lcp_close(pcb, "Connect time expired"); /* Close connection */ +} +#endif /* PPP_MAXCONNECT */ + +#if PPP_OPTIONS +/* + * auth_check_options - called to check authentication options. + */ +void +auth_check_options() +{ + lcp_options *wo = &lcp_wantoptions[0]; + int can_auth; + int lacks_ip; + + /* Default our_name to hostname, and user to our_name */ + if (our_name[0] == 0 || usehostname) + strlcpy(our_name, hostname, sizeof(our_name)); + /* If a blank username was explicitly given as an option, trust + the user and don't use our_name */ + if (ppp_settings.user[0] == 0 && !explicit_user) + strlcpy(ppp_settings.user, our_name, sizeof(ppp_settings.user)); + + /* + * If we have a default route, require the peer to authenticate + * unless the noauth option was given or the real user is root. + */ + if (!auth_required && !allow_any_ip && have_route_to(0) && !privileged) { + auth_required = 1; + default_auth = 1; + } + +#if CHAP_SUPPORT + /* If we selected any CHAP flavors, we should probably negotiate it. :-) */ + if (wo->chap_mdtype) + wo->neg_chap = 1; +#endif /* CHAP_SUPPORT */ + + /* If authentication is required, ask peer for CHAP, PAP, or EAP. */ + if (auth_required) { + allow_any_ip = 0; + if (1 +#if CHAP_SUPPORT + && !wo->neg_chap +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + && !wo->neg_upap +#endif /* PAP_SUPPORT */ +#if EAP_SUPPORT + && !wo->neg_eap +#endif /* EAP_SUPPORT */ + ) { +#if CHAP_SUPPORT + wo->neg_chap = CHAP_MDTYPE_SUPPORTED != MDTYPE_NONE; + wo->chap_mdtype = CHAP_MDTYPE_SUPPORTED; +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + wo->neg_upap = 1; +#endif /* PAP_SUPPORT */ +#if EAP_SUPPORT + wo->neg_eap = 1; +#endif /* EAP_SUPPORT */ + } + } else { +#if CHAP_SUPPORT + wo->neg_chap = 0; + wo->chap_mdtype = MDTYPE_NONE; +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + wo->neg_upap = 0; +#endif /* PAP_SUPPORT */ +#if EAP_SUPPORT + wo->neg_eap = 0; +#endif /* EAP_SUPPORT */ + } + + /* + * Check whether we have appropriate secrets to use + * to authenticate the peer. Note that EAP can authenticate by way + * of a CHAP-like exchanges as well as SRP. + */ + lacks_ip = 0; +#if PAP_SUPPORT + can_auth = wo->neg_upap && (uselogin || have_pap_secret(&lacks_ip)); +#else + can_auth = 0; +#endif /* PAP_SUPPORT */ + if (!can_auth && (0 +#if CHAP_SUPPORT + || wo->neg_chap +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + || wo->neg_eap +#endif /* EAP_SUPPORT */ + )) { +#if CHAP_SUPPORT + can_auth = have_chap_secret((explicit_remote? remote_name: NULL), + our_name, 1, &lacks_ip); +#else + can_auth = 0; +#endif + } + if (!can_auth +#if EAP_SUPPORT + && wo->neg_eap +#endif /* EAP_SUPPORT */ + ) { + can_auth = have_srp_secret((explicit_remote? remote_name: NULL), + our_name, 1, &lacks_ip); + } + + if (auth_required && !can_auth && noauth_addrs == NULL) { + if (default_auth) { + option_error( +"By default the remote system is required to authenticate itself"); + option_error( +"(because this system has a default route to the internet)"); + } else if (explicit_remote) + option_error( +"The remote system (%s) is required to authenticate itself", + remote_name); + else + option_error( +"The remote system is required to authenticate itself"); + option_error( +"but I couldn't find any suitable secret (password) for it to use to do so."); + if (lacks_ip) + option_error( +"(None of the available passwords would let it use an IP address.)"); + + exit(1); + } + + /* + * Early check for remote number authorization. + */ + if (!auth_number()) { + ppp_warn("calling number %q is not authorized", remote_number); + exit(EXIT_CNID_AUTH_FAILED); + } +} +#endif /* PPP_OPTIONS */ + +#if 0 /* UNUSED */ +/* + * auth_reset - called when LCP is starting negotiations to recheck + * authentication options, i.e. whether we have appropriate secrets + * to use for authenticating ourselves and/or the peer. + */ +void +auth_reset(unit) + int unit; +{ + lcp_options *go = &lcp_gotoptions[unit]; + lcp_options *ao = &lcp_allowoptions[unit]; + int hadchap; + + hadchap = -1; + ao->neg_upap = !refuse_pap && (passwd[0] != 0 || get_pap_passwd(NULL)); + ao->neg_chap = (!refuse_chap || !refuse_mschap || !refuse_mschap_v2) + && (passwd[0] != 0 || + (hadchap = have_chap_secret(user, (explicit_remote? remote_name: + NULL), 0, NULL))); + ao->neg_eap = !refuse_eap && ( + passwd[0] != 0 || + (hadchap == 1 || (hadchap == -1 && have_chap_secret(user, + (explicit_remote? remote_name: NULL), 0, NULL))) || + have_srp_secret(user, (explicit_remote? remote_name: NULL), 0, NULL)); + + hadchap = -1; + if (go->neg_upap && !uselogin && !have_pap_secret(NULL)) + go->neg_upap = 0; + if (go->neg_chap) { + if (!(hadchap = have_chap_secret((explicit_remote? remote_name: NULL), + our_name, 1, NULL))) + go->neg_chap = 0; + } + if (go->neg_eap && + (hadchap == 0 || (hadchap == -1 && + !have_chap_secret((explicit_remote? remote_name: NULL), our_name, + 1, NULL))) && + !have_srp_secret((explicit_remote? remote_name: NULL), our_name, 1, + NULL)) + go->neg_eap = 0; +} + +/* + * check_passwd - Check the user name and passwd against the PAP secrets + * file. If requested, also check against the system password database, + * and login the user if OK. + * + * returns: + * UPAP_AUTHNAK: Authentication failed. + * UPAP_AUTHACK: Authentication succeeded. + * In either case, msg points to an appropriate message. + */ +int +check_passwd(unit, auser, userlen, apasswd, passwdlen, msg) + int unit; + char *auser; + int userlen; + char *apasswd; + int passwdlen; + char **msg; +{ + return UPAP_AUTHNAK; + int ret; + char *filename; + FILE *f; + struct wordlist *addrs = NULL, *opts = NULL; + char passwd[256], user[256]; + char secret[MAXWORDLEN]; + static int attempts = 0; + + /* + * Make copies of apasswd and auser, then null-terminate them. + * If there are unprintable characters in the password, make + * them visible. + */ + slprintf(ppp_settings.passwd, sizeof(ppp_settings.passwd), "%.*v", passwdlen, apasswd); + slprintf(ppp_settings.user, sizeof(ppp_settings.user), "%.*v", userlen, auser); + *msg = ""; + + /* + * Check if a plugin wants to handle this. + */ + if (pap_auth_hook) { + ret = (*pap_auth_hook)(ppp_settings.user, ppp_settings.passwd, msg, &addrs, &opts); + if (ret >= 0) { + /* note: set_allowed_addrs() saves opts (but not addrs): + don't free it! */ + if (ret) + set_allowed_addrs(unit, addrs, opts); + else if (opts != 0) + free_wordlist(opts); + if (addrs != 0) + free_wordlist(addrs); + BZERO(ppp_settings.passwd, sizeof(ppp_settings.passwd)); + return ret? UPAP_AUTHACK: UPAP_AUTHNAK; + } + } + + /* + * Open the file of pap secrets and scan for a suitable secret + * for authenticating this user. + */ + filename = _PATH_UPAPFILE; + addrs = opts = NULL; + ret = UPAP_AUTHNAK; + f = fopen(filename, "r"); + if (f == NULL) { + ppp_error("Can't open PAP password file %s: %m", filename); + + } else { + check_access(f, filename); + if (scan_authfile(f, ppp_settings.user, our_name, secret, &addrs, &opts, filename, 0) < 0) { + ppp_warn("no PAP secret found for %s", user); + } else { + /* + * If the secret is "@login", it means to check + * the password against the login database. + */ + int login_secret = strcmp(secret, "@login") == 0; + ret = UPAP_AUTHACK; + if (uselogin || login_secret) { + /* login option or secret is @login */ + if (session_full(ppp_settings.user, ppp_settings.passwd, devnam, msg) == 0) { + ret = UPAP_AUTHNAK; + } + } else if (session_mgmt) { + if (session_check(ppp_settings.user, NULL, devnam, NULL) == 0) { + ppp_warn("Peer %q failed PAP Session verification", user); + ret = UPAP_AUTHNAK; + } + } + if (secret[0] != 0 && !login_secret) { + /* password given in pap-secrets - must match */ + if ((cryptpap || strcmp(ppp_settings.passwd, secret) != 0) + && strcmp(crypt(ppp_settings.passwd, secret), secret) != 0) + ret = UPAP_AUTHNAK; + } + } + fclose(f); + } + + if (ret == UPAP_AUTHNAK) { + if (**msg == 0) + *msg = "Login incorrect"; + /* + * XXX can we ever get here more than once?? + * Frustrate passwd stealer programs. + * Allow 10 tries, but start backing off after 3 (stolen from login). + * On 10'th, drop the connection. + */ + if (attempts++ >= 10) { + ppp_warn("%d LOGIN FAILURES ON %s, %s", attempts, devnam, user); + lcp_close(pcb, "login failed"); + } + if (attempts > 3) + sleep((u_int) (attempts - 3) * 5); + if (opts != NULL) + free_wordlist(opts); + + } else { + attempts = 0; /* Reset count */ + if (**msg == 0) + *msg = "Login ok"; + set_allowed_addrs(unit, addrs, opts); + } + + if (addrs != NULL) + free_wordlist(addrs); + BZERO(ppp_settings.passwd, sizeof(ppp_settings.passwd)); + BZERO(secret, sizeof(secret)); + + return ret; +} + +/* + * null_login - Check if a username of "" and a password of "" are + * acceptable, and iff so, set the list of acceptable IP addresses + * and return 1. + */ +static int +null_login(unit) + int unit; +{ + char *filename; + FILE *f; + int i, ret; + struct wordlist *addrs, *opts; + char secret[MAXWORDLEN]; + + /* + * Check if a plugin wants to handle this. + */ + ret = -1; + if (null_auth_hook) + ret = (*null_auth_hook)(&addrs, &opts); + + /* + * Open the file of pap secrets and scan for a suitable secret. + */ + if (ret <= 0) { + filename = _PATH_UPAPFILE; + addrs = NULL; + f = fopen(filename, "r"); + if (f == NULL) + return 0; + check_access(f, filename); + + i = scan_authfile(f, "", our_name, secret, &addrs, &opts, filename, 0); + ret = i >= 0 && secret[0] == 0; + BZERO(secret, sizeof(secret)); + fclose(f); + } + + if (ret) + set_allowed_addrs(unit, addrs, opts); + else if (opts != 0) + free_wordlist(opts); + if (addrs != 0) + free_wordlist(addrs); + + return ret; +} + +/* + * get_pap_passwd - get a password for authenticating ourselves with + * our peer using PAP. Returns 1 on success, 0 if no suitable password + * could be found. + * Assumes passwd points to MAXSECRETLEN bytes of space (if non-null). + */ +static int +get_pap_passwd(passwd) + char *passwd; +{ + char *filename; + FILE *f; + int ret; + char secret[MAXWORDLEN]; + + /* + * Check whether a plugin wants to supply this. + */ + if (pap_passwd_hook) { + ret = (*pap_passwd_hook)(ppp_settings,user, ppp_settings.passwd); + if (ret >= 0) + return ret; + } + + filename = _PATH_UPAPFILE; + f = fopen(filename, "r"); + if (f == NULL) + return 0; + check_access(f, filename); + ret = scan_authfile(f, user, + (remote_name[0]? remote_name: NULL), + secret, NULL, NULL, filename, 0); + fclose(f); + if (ret < 0) + return 0; + if (passwd != NULL) + strlcpy(passwd, secret, MAXSECRETLEN); + BZERO(secret, sizeof(secret)); + return 1; +} + +/* + * have_pap_secret - check whether we have a PAP file with any + * secrets that we could possibly use for authenticating the peer. + */ +static int +have_pap_secret(lacks_ipp) + int *lacks_ipp; +{ + FILE *f; + int ret; + char *filename; + struct wordlist *addrs; + + /* let the plugin decide, if there is one */ + if (pap_check_hook) { + ret = (*pap_check_hook)(); + if (ret >= 0) + return ret; + } + + filename = _PATH_UPAPFILE; + f = fopen(filename, "r"); + if (f == NULL) + return 0; + + ret = scan_authfile(f, (explicit_remote? remote_name: NULL), our_name, + NULL, &addrs, NULL, filename, 0); + fclose(f); + if (ret >= 0 && !some_ip_ok(addrs)) { + if (lacks_ipp != 0) + *lacks_ipp = 1; + ret = -1; + } + if (addrs != 0) + free_wordlist(addrs); + + return ret >= 0; +} + +/* + * have_chap_secret - check whether we have a CHAP file with a + * secret that we could possibly use for authenticating `client' + * on `server'. Either can be the null string, meaning we don't + * know the identity yet. + */ +static int +have_chap_secret(client, server, need_ip, lacks_ipp) + char *client; + char *server; + int need_ip; + int *lacks_ipp; +{ + FILE *f; + int ret; + char *filename; + struct wordlist *addrs; + + if (chap_check_hook) { + ret = (*chap_check_hook)(); + if (ret >= 0) { + return ret; + } + } + + filename = _PATH_CHAPFILE; + f = fopen(filename, "r"); + if (f == NULL) + return 0; + + if (client != NULL && client[0] == 0) + client = NULL; + else if (server != NULL && server[0] == 0) + server = NULL; + + ret = scan_authfile(f, client, server, NULL, &addrs, NULL, filename, 0); + fclose(f); + if (ret >= 0 && need_ip && !some_ip_ok(addrs)) { + if (lacks_ipp != 0) + *lacks_ipp = 1; + ret = -1; + } + if (addrs != 0) + free_wordlist(addrs); + + return ret >= 0; +} + +/* + * have_srp_secret - check whether we have a SRP file with a + * secret that we could possibly use for authenticating `client' + * on `server'. Either can be the null string, meaning we don't + * know the identity yet. + */ +static int +have_srp_secret(client, server, need_ip, lacks_ipp) + char *client; + char *server; + int need_ip; + int *lacks_ipp; +{ + FILE *f; + int ret; + char *filename; + struct wordlist *addrs; + + filename = _PATH_SRPFILE; + f = fopen(filename, "r"); + if (f == NULL) + return 0; + + if (client != NULL && client[0] == 0) + client = NULL; + else if (server != NULL && server[0] == 0) + server = NULL; + + ret = scan_authfile(f, client, server, NULL, &addrs, NULL, filename, 0); + fclose(f); + if (ret >= 0 && need_ip && !some_ip_ok(addrs)) { + if (lacks_ipp != 0) + *lacks_ipp = 1; + ret = -1; + } + if (addrs != 0) + free_wordlist(addrs); + + return ret >= 0; +} +#endif /* UNUSED */ + +#if PPP_AUTH_SUPPORT +/* + * get_secret - open the CHAP secret file and return the secret + * for authenticating the given client on the given server. + * (We could be either client or server). + */ +int get_secret(ppp_pcb *pcb, const char *client, const char *server, char *secret, int *secret_len, int am_server) { + int len; + LWIP_UNUSED_ARG(server); + LWIP_UNUSED_ARG(am_server); + + if (!client || !client[0] || !pcb->settings.user || !pcb->settings.passwd || strcmp(client, pcb->settings.user)) { + return 0; + } + + len = (int)strlen(pcb->settings.passwd); + if (len > MAXSECRETLEN) { + ppp_error("Secret for %s on %s is too long", client, server); + len = MAXSECRETLEN; + } + + MEMCPY(secret, pcb->settings.passwd, len); + *secret_len = len; + return 1; + +#if 0 /* UNUSED */ + FILE *f; + int ret, len; + char *filename; + struct wordlist *addrs, *opts; + char secbuf[MAXWORDLEN]; + struct wordlist *addrs; + addrs = NULL; + + if (!am_server && ppp_settings.passwd[0] != 0) { + strlcpy(secbuf, ppp_settings.passwd, sizeof(secbuf)); + } else if (!am_server && chap_passwd_hook) { + if ( (*chap_passwd_hook)(client, secbuf) < 0) { + ppp_error("Unable to obtain CHAP password for %s on %s from plugin", + client, server); + return 0; + } + } else { + filename = _PATH_CHAPFILE; + addrs = NULL; + secbuf[0] = 0; + + f = fopen(filename, "r"); + if (f == NULL) { + ppp_error("Can't open chap secret file %s: %m", filename); + return 0; + } + check_access(f, filename); + + ret = scan_authfile(f, client, server, secbuf, &addrs, &opts, filename, 0); + fclose(f); + if (ret < 0) + return 0; + + if (am_server) + set_allowed_addrs(unit, addrs, opts); + else if (opts != 0) + free_wordlist(opts); + if (addrs != 0) + free_wordlist(addrs); + } + + len = strlen(secbuf); + if (len > MAXSECRETLEN) { + ppp_error("Secret for %s on %s is too long", client, server); + len = MAXSECRETLEN; + } + MEMCPY(secret, secbuf, len); + BZERO(secbuf, sizeof(secbuf)); + *secret_len = len; + + return 1; +#endif /* UNUSED */ +} +#endif /* PPP_AUTH_SUPPORT */ + + +#if 0 /* UNUSED */ +/* + * get_srp_secret - open the SRP secret file and return the secret + * for authenticating the given client on the given server. + * (We could be either client or server). + */ +int +get_srp_secret(unit, client, server, secret, am_server) + int unit; + char *client; + char *server; + char *secret; + int am_server; +{ + FILE *fp; + int ret; + char *filename; + struct wordlist *addrs, *opts; + + if (!am_server && ppp_settings.passwd[0] != '\0') { + strlcpy(secret, ppp_settings.passwd, MAXWORDLEN); + } else { + filename = _PATH_SRPFILE; + addrs = NULL; + + fp = fopen(filename, "r"); + if (fp == NULL) { + ppp_error("Can't open srp secret file %s: %m", filename); + return 0; + } + check_access(fp, filename); + + secret[0] = '\0'; + ret = scan_authfile(fp, client, server, secret, &addrs, &opts, + filename, am_server); + fclose(fp); + if (ret < 0) + return 0; + + if (am_server) + set_allowed_addrs(unit, addrs, opts); + else if (opts != NULL) + free_wordlist(opts); + if (addrs != NULL) + free_wordlist(addrs); + } + + return 1; +} + +/* + * set_allowed_addrs() - set the list of allowed addresses. + * Also looks for `--' indicating options to apply for this peer + * and leaves the following words in extra_options. + */ +static void +set_allowed_addrs(unit, addrs, opts) + int unit; + struct wordlist *addrs; + struct wordlist *opts; +{ + int n; + struct wordlist *ap, **plink; + struct permitted_ip *ip; + char *ptr_word, *ptr_mask; + struct hostent *hp; + struct netent *np; + u32_t a, mask, ah, offset; + struct ipcp_options *wo = &ipcp_wantoptions[unit]; + u32_t suggested_ip = 0; + + if (addresses[unit] != NULL) + free(addresses[unit]); + addresses[unit] = NULL; + if (extra_options != NULL) + free_wordlist(extra_options); + extra_options = opts; + + /* + * Count the number of IP addresses given. + */ + n = wordlist_count(addrs) + wordlist_count(noauth_addrs); + if (n == 0) + return; + ip = (struct permitted_ip *) malloc((n + 1) * sizeof(struct permitted_ip)); + if (ip == 0) + return; + + /* temporarily append the noauth_addrs list to addrs */ + for (plink = &addrs; *plink != NULL; plink = &(*plink)->next) + ; + *plink = noauth_addrs; + + n = 0; + for (ap = addrs; ap != NULL; ap = ap->next) { + /* "-" means no addresses authorized, "*" means any address allowed */ + ptr_word = ap->word; + if (strcmp(ptr_word, "-") == 0) + break; + if (strcmp(ptr_word, "*") == 0) { + ip[n].permit = 1; + ip[n].base = ip[n].mask = 0; + ++n; + break; + } + + ip[n].permit = 1; + if (*ptr_word == '!') { + ip[n].permit = 0; + ++ptr_word; + } + + mask = ~ (u32_t) 0; + offset = 0; + ptr_mask = strchr (ptr_word, '/'); + if (ptr_mask != NULL) { + int bit_count; + char *endp; + + bit_count = (int) strtol (ptr_mask+1, &endp, 10); + if (bit_count <= 0 || bit_count > 32) { + ppp_warn("invalid address length %v in auth. address list", + ptr_mask+1); + continue; + } + bit_count = 32 - bit_count; /* # bits in host part */ + if (*endp == '+') { + offset = ifunit + 1; + ++endp; + } + if (*endp != 0) { + ppp_warn("invalid address length syntax: %v", ptr_mask+1); + continue; + } + *ptr_mask = '\0'; + mask <<= bit_count; + } + + hp = gethostbyname(ptr_word); + if (hp != NULL && hp->h_addrtype == AF_INET) { + a = *(u32_t *)hp->h_addr; + } else { + np = getnetbyname (ptr_word); + if (np != NULL && np->n_addrtype == AF_INET) { + a = lwip_htonl ((u32_t)np->n_net); + if (ptr_mask == NULL) { + /* calculate appropriate mask for net */ + ah = lwip_ntohl(a); + if (IN_CLASSA(ah)) + mask = IN_CLASSA_NET; + else if (IN_CLASSB(ah)) + mask = IN_CLASSB_NET; + else if (IN_CLASSC(ah)) + mask = IN_CLASSC_NET; + } + } else { + a = inet_addr (ptr_word); + } + } + + if (ptr_mask != NULL) + *ptr_mask = '/'; + + if (a == (u32_t)-1L) { + ppp_warn("unknown host %s in auth. address list", ap->word); + continue; + } + if (offset != 0) { + if (offset >= ~mask) { + ppp_warn("interface unit %d too large for subnet %v", + ifunit, ptr_word); + continue; + } + a = lwip_htonl((lwip_ntohl(a) & mask) + offset); + mask = ~(u32_t)0; + } + ip[n].mask = lwip_htonl(mask); + ip[n].base = a & ip[n].mask; + ++n; + if (~mask == 0 && suggested_ip == 0) + suggested_ip = a; + } + *plink = NULL; + + ip[n].permit = 0; /* make the last entry forbid all addresses */ + ip[n].base = 0; /* to terminate the list */ + ip[n].mask = 0; + + addresses[unit] = ip; + + /* + * If the address given for the peer isn't authorized, or if + * the user hasn't given one, AND there is an authorized address + * which is a single host, then use that if we find one. + */ + if (suggested_ip != 0 + && (wo->hisaddr == 0 || !auth_ip_addr(unit, wo->hisaddr))) { + wo->hisaddr = suggested_ip; + /* + * Do we insist on this address? No, if there are other + * addresses authorized than the suggested one. + */ + if (n > 1) + wo->accept_remote = 1; + } +} + +/* + * auth_ip_addr - check whether the peer is authorized to use + * a given IP address. Returns 1 if authorized, 0 otherwise. + */ +int +auth_ip_addr(unit, addr) + int unit; + u32_t addr; +{ + int ok; + + /* don't allow loopback or multicast address */ + if (bad_ip_adrs(addr)) + return 0; + + if (allowed_address_hook) { + ok = allowed_address_hook(addr); + if (ok >= 0) return ok; + } + + if (addresses[unit] != NULL) { + ok = ip_addr_check(addr, addresses[unit]); + if (ok >= 0) + return ok; + } + + if (auth_required) + return 0; /* no addresses authorized */ + return allow_any_ip || privileged || !have_route_to(addr); +} + +static int +ip_addr_check(addr, addrs) + u32_t addr; + struct permitted_ip *addrs; +{ + for (; ; ++addrs) + if ((addr & addrs->mask) == addrs->base) + return addrs->permit; +} + +/* + * bad_ip_adrs - return 1 if the IP address is one we don't want + * to use, such as an address in the loopback net or a multicast address. + * addr is in network byte order. + */ +int +bad_ip_adrs(addr) + u32_t addr; +{ + addr = lwip_ntohl(addr); + return (addr >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET + || IN_MULTICAST(addr) || IN_BADCLASS(addr); +} + +/* + * some_ip_ok - check a wordlist to see if it authorizes any + * IP address(es). + */ +static int +some_ip_ok(addrs) + struct wordlist *addrs; +{ + for (; addrs != 0; addrs = addrs->next) { + if (addrs->word[0] == '-') + break; + if (addrs->word[0] != '!') + return 1; /* some IP address is allowed */ + } + return 0; +} + +/* + * auth_number - check whether the remote number is allowed to connect. + * Returns 1 if authorized, 0 otherwise. + */ +int +auth_number() +{ + struct wordlist *wp = permitted_numbers; + int l; + + /* Allow all if no authorization list. */ + if (!wp) + return 1; + + /* Allow if we have a match in the authorization list. */ + while (wp) { + /* trailing '*' wildcard */ + l = strlen(wp->word); + if ((wp->word)[l - 1] == '*') + l--; + if (!strncasecmp(wp->word, remote_number, l)) + return 1; + wp = wp->next; + } + + return 0; +} + +/* + * check_access - complain if a secret file has too-liberal permissions. + */ +static void +check_access(f, filename) + FILE *f; + char *filename; +{ + struct stat sbuf; + + if (fstat(fileno(f), &sbuf) < 0) { + ppp_warn("cannot stat secret file %s: %m", filename); + } else if ((sbuf.st_mode & (S_IRWXG | S_IRWXO)) != 0) { + ppp_warn("Warning - secret file %s has world and/or group access", + filename); + } +} + +/* + * scan_authfile - Scan an authorization file for a secret suitable + * for authenticating `client' on `server'. The return value is -1 + * if no secret is found, otherwise >= 0. The return value has + * NONWILD_CLIENT set if the secret didn't have "*" for the client, and + * NONWILD_SERVER set if the secret didn't have "*" for the server. + * Any following words on the line up to a "--" (i.e. address authorization + * info) are placed in a wordlist and returned in *addrs. Any + * following words (extra options) are placed in a wordlist and + * returned in *opts. + * We assume secret is NULL or points to MAXWORDLEN bytes of space. + * Flags are non-zero if we need two colons in the secret in order to + * match. + */ +static int +scan_authfile(f, client, server, secret, addrs, opts, filename, flags) + FILE *f; + char *client; + char *server; + char *secret; + struct wordlist **addrs; + struct wordlist **opts; + char *filename; + int flags; +{ + int newline, xxx; + int got_flag, best_flag; + FILE *sf; + struct wordlist *ap, *addr_list, *alist, **app; + char word[MAXWORDLEN]; + char atfile[MAXWORDLEN]; + char lsecret[MAXWORDLEN]; + char *cp; + + if (addrs != NULL) + *addrs = NULL; + if (opts != NULL) + *opts = NULL; + addr_list = NULL; + if (!getword(f, word, &newline, filename)) + return -1; /* file is empty??? */ + newline = 1; + best_flag = -1; + for (;;) { + /* + * Skip until we find a word at the start of a line. + */ + while (!newline && getword(f, word, &newline, filename)) + ; + if (!newline) + break; /* got to end of file */ + + /* + * Got a client - check if it's a match or a wildcard. + */ + got_flag = 0; + if (client != NULL && strcmp(word, client) != 0 && !ISWILD(word)) { + newline = 0; + continue; + } + if (!ISWILD(word)) + got_flag = NONWILD_CLIENT; + + /* + * Now get a server and check if it matches. + */ + if (!getword(f, word, &newline, filename)) + break; + if (newline) + continue; + if (!ISWILD(word)) { + if (server != NULL && strcmp(word, server) != 0) + continue; + got_flag |= NONWILD_SERVER; + } + + /* + * Got some sort of a match - see if it's better than what + * we have already. + */ + if (got_flag <= best_flag) + continue; + + /* + * Get the secret. + */ + if (!getword(f, word, &newline, filename)) + break; + if (newline) + continue; + + /* + * SRP-SHA1 authenticator should never be reading secrets from + * a file. (Authenticatee may, though.) + */ + if (flags && ((cp = strchr(word, ':')) == NULL || + strchr(cp + 1, ':') == NULL)) + continue; + + if (secret != NULL) { + /* + * Special syntax: @/pathname means read secret from file. + */ + if (word[0] == '@' && word[1] == '/') { + strlcpy(atfile, word+1, sizeof(atfile)); + if ((sf = fopen(atfile, "r")) == NULL) { + ppp_warn("can't open indirect secret file %s", atfile); + continue; + } + check_access(sf, atfile); + if (!getword(sf, word, &xxx, atfile)) { + ppp_warn("no secret in indirect secret file %s", atfile); + fclose(sf); + continue; + } + fclose(sf); + } + strlcpy(lsecret, word, sizeof(lsecret)); + } + + /* + * Now read address authorization info and make a wordlist. + */ + app = &alist; + for (;;) { + if (!getword(f, word, &newline, filename) || newline) + break; + ap = (struct wordlist *) + malloc(sizeof(struct wordlist) + strlen(word) + 1); + if (ap == NULL) + novm("authorized addresses"); + ap->word = (char *) (ap + 1); + strcpy(ap->word, word); + *app = ap; + app = &ap->next; + } + *app = NULL; + + /* + * This is the best so far; remember it. + */ + best_flag = got_flag; + if (addr_list) + free_wordlist(addr_list); + addr_list = alist; + if (secret != NULL) + strlcpy(secret, lsecret, MAXWORDLEN); + + if (!newline) + break; + } + + /* scan for a -- word indicating the start of options */ + for (app = &addr_list; (ap = *app) != NULL; app = &ap->next) + if (strcmp(ap->word, "--") == 0) + break; + /* ap = start of options */ + if (ap != NULL) { + ap = ap->next; /* first option */ + free(*app); /* free the "--" word */ + *app = NULL; /* terminate addr list */ + } + if (opts != NULL) + *opts = ap; + else if (ap != NULL) + free_wordlist(ap); + if (addrs != NULL) + *addrs = addr_list; + else if (addr_list != NULL) + free_wordlist(addr_list); + + return best_flag; +} + +/* + * wordlist_count - return the number of items in a wordlist + */ +static int +wordlist_count(wp) + struct wordlist *wp; +{ + int n; + + for (n = 0; wp != NULL; wp = wp->next) + ++n; + return n; +} + +/* + * free_wordlist - release memory allocated for a wordlist. + */ +static void +free_wordlist(wp) + struct wordlist *wp; +{ + struct wordlist *next; + + while (wp != NULL) { + next = wp->next; + free(wp); + wp = next; + } +} +#endif /* UNUSED */ + +#endif /* PPP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/ccp.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/ccp.c new file mode 100644 index 0000000..f8519eb --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/ccp.c @@ -0,0 +1,1740 @@ +/* + * ccp.c - PPP Compression Control Protocol. + * + * Copyright (c) 1994-2002 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 3. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && CCP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#include +#include + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/fsm.h" +#include "netif/ppp/ccp.h" + +#if MPPE_SUPPORT +#include "netif/ppp/lcp.h" /* lcp_close(), lcp_fsm */ +#include "netif/ppp/mppe.h" /* mppe_init() */ +#endif /* MPPE_SUPPORT */ + +/* + * Unfortunately there is a bug in zlib which means that using a + * size of 8 (window size = 256) for Deflate compression will cause + * buffer overruns and kernel crashes in the deflate module. + * Until this is fixed we only accept sizes in the range 9 .. 15. + * Thanks to James Carlson for pointing this out. + */ +#define DEFLATE_MIN_WORKS 9 + +/* + * Command-line options. + */ +#if PPP_OPTIONS +static int setbsdcomp (char **); +static int setdeflate (char **); +static char bsd_value[8]; +static char deflate_value[8]; + +/* + * Option variables. + */ +#if MPPE_SUPPORT +bool refuse_mppe_stateful = 1; /* Allow stateful mode? */ +#endif /* MPPE_SUPPORT */ + +static option_t ccp_option_list[] = { + { "noccp", o_bool, &ccp_protent.enabled_flag, + "Disable CCP negotiation" }, + { "-ccp", o_bool, &ccp_protent.enabled_flag, + "Disable CCP negotiation", OPT_ALIAS }, + + { "bsdcomp", o_special, (void *)setbsdcomp, + "Request BSD-Compress packet compression", + OPT_PRIO | OPT_A2STRVAL | OPT_STATIC, bsd_value }, + { "nobsdcomp", o_bool, &ccp_wantoptions[0].bsd_compress, + "don't allow BSD-Compress", OPT_PRIOSUB | OPT_A2CLR, + &ccp_allowoptions[0].bsd_compress }, + { "-bsdcomp", o_bool, &ccp_wantoptions[0].bsd_compress, + "don't allow BSD-Compress", OPT_ALIAS | OPT_PRIOSUB | OPT_A2CLR, + &ccp_allowoptions[0].bsd_compress }, + + { "deflate", o_special, (void *)setdeflate, + "request Deflate compression", + OPT_PRIO | OPT_A2STRVAL | OPT_STATIC, deflate_value }, + { "nodeflate", o_bool, &ccp_wantoptions[0].deflate, + "don't allow Deflate compression", OPT_PRIOSUB | OPT_A2CLR, + &ccp_allowoptions[0].deflate }, + { "-deflate", o_bool, &ccp_wantoptions[0].deflate, + "don't allow Deflate compression", OPT_ALIAS | OPT_PRIOSUB | OPT_A2CLR, + &ccp_allowoptions[0].deflate }, + + { "nodeflatedraft", o_bool, &ccp_wantoptions[0].deflate_draft, + "don't use draft deflate #", OPT_A2COPY, + &ccp_allowoptions[0].deflate_draft }, + + { "predictor1", o_bool, &ccp_wantoptions[0].predictor_1, + "request Predictor-1", OPT_PRIO | 1 }, + { "nopredictor1", o_bool, &ccp_wantoptions[0].predictor_1, + "don't allow Predictor-1", OPT_PRIOSUB | OPT_A2CLR, + &ccp_allowoptions[0].predictor_1 }, + { "-predictor1", o_bool, &ccp_wantoptions[0].predictor_1, + "don't allow Predictor-1", OPT_ALIAS | OPT_PRIOSUB | OPT_A2CLR, + &ccp_allowoptions[0].predictor_1 }, + +#if MPPE_SUPPORT + /* MPPE options are symmetrical ... we only set wantoptions here */ + { "require-mppe", o_bool, &ccp_wantoptions[0].mppe, + "require MPPE encryption", + OPT_PRIO | MPPE_OPT_40 | MPPE_OPT_128 }, + { "+mppe", o_bool, &ccp_wantoptions[0].mppe, + "require MPPE encryption", + OPT_ALIAS | OPT_PRIO | MPPE_OPT_40 | MPPE_OPT_128 }, + { "nomppe", o_bool, &ccp_wantoptions[0].mppe, + "don't allow MPPE encryption", OPT_PRIO }, + { "-mppe", o_bool, &ccp_wantoptions[0].mppe, + "don't allow MPPE encryption", OPT_ALIAS | OPT_PRIO }, + + /* We use ccp_allowoptions[0].mppe as a junk var ... it is reset later */ + { "require-mppe-40", o_bool, &ccp_allowoptions[0].mppe, + "require MPPE 40-bit encryption", OPT_PRIO | OPT_A2OR | MPPE_OPT_40, + &ccp_wantoptions[0].mppe }, + { "+mppe-40", o_bool, &ccp_allowoptions[0].mppe, + "require MPPE 40-bit encryption", OPT_PRIO | OPT_A2OR | MPPE_OPT_40, + &ccp_wantoptions[0].mppe }, + { "nomppe-40", o_bool, &ccp_allowoptions[0].mppe, + "don't allow MPPE 40-bit encryption", + OPT_PRIOSUB | OPT_A2CLRB | MPPE_OPT_40, &ccp_wantoptions[0].mppe }, + { "-mppe-40", o_bool, &ccp_allowoptions[0].mppe, + "don't allow MPPE 40-bit encryption", + OPT_ALIAS | OPT_PRIOSUB | OPT_A2CLRB | MPPE_OPT_40, + &ccp_wantoptions[0].mppe }, + + { "require-mppe-128", o_bool, &ccp_allowoptions[0].mppe, + "require MPPE 128-bit encryption", OPT_PRIO | OPT_A2OR | MPPE_OPT_128, + &ccp_wantoptions[0].mppe }, + { "+mppe-128", o_bool, &ccp_allowoptions[0].mppe, + "require MPPE 128-bit encryption", + OPT_ALIAS | OPT_PRIO | OPT_A2OR | MPPE_OPT_128, + &ccp_wantoptions[0].mppe }, + { "nomppe-128", o_bool, &ccp_allowoptions[0].mppe, + "don't allow MPPE 128-bit encryption", + OPT_PRIOSUB | OPT_A2CLRB | MPPE_OPT_128, &ccp_wantoptions[0].mppe }, + { "-mppe-128", o_bool, &ccp_allowoptions[0].mppe, + "don't allow MPPE 128-bit encryption", + OPT_ALIAS | OPT_PRIOSUB | OPT_A2CLRB | MPPE_OPT_128, + &ccp_wantoptions[0].mppe }, + + /* strange one; we always request stateless, but will we allow stateful? */ + { "mppe-stateful", o_bool, &refuse_mppe_stateful, + "allow MPPE stateful mode", OPT_PRIO }, + { "nomppe-stateful", o_bool, &refuse_mppe_stateful, + "disallow MPPE stateful mode", OPT_PRIO | 1 }, +#endif /* MPPE_SUPPORT */ + + { NULL } +}; +#endif /* PPP_OPTIONS */ + +/* + * Protocol entry points from main code. + */ +static void ccp_init(ppp_pcb *pcb); +static void ccp_open(ppp_pcb *pcb); +static void ccp_close(ppp_pcb *pcb, const char *reason); +static void ccp_lowerup(ppp_pcb *pcb); +static void ccp_lowerdown(ppp_pcb *pcb); +static void ccp_input(ppp_pcb *pcb, u_char *pkt, int len); +static void ccp_protrej(ppp_pcb *pcb); +#if PRINTPKT_SUPPORT +static int ccp_printpkt(const u_char *p, int plen, void (*printer) (void *, const char *, ...), void *arg); +#endif /* PRINTPKT_SUPPORT */ +#if PPP_DATAINPUT +static void ccp_datainput(ppp_pcb *pcb, u_char *pkt, int len); +#endif /* PPP_DATAINPUT */ + +const struct protent ccp_protent = { + PPP_CCP, + ccp_init, + ccp_input, + ccp_protrej, + ccp_lowerup, + ccp_lowerdown, + ccp_open, + ccp_close, +#if PRINTPKT_SUPPORT + ccp_printpkt, +#endif /* PRINTPKT_SUPPORT */ +#if PPP_DATAINPUT + ccp_datainput, +#endif /* PPP_DATAINPUT */ +#if PRINTPKT_SUPPORT + "CCP", + "Compressed", +#endif /* PRINTPKT_SUPPORT */ +#if PPP_OPTIONS + ccp_option_list, + NULL, +#endif /* PPP_OPTIONS */ +#if DEMAND_SUPPORT + NULL, + NULL +#endif /* DEMAND_SUPPORT */ +}; + +/* + * Callbacks for fsm code. + */ +static void ccp_resetci (fsm *); +static int ccp_cilen (fsm *); +static void ccp_addci (fsm *, u_char *, int *); +static int ccp_ackci (fsm *, u_char *, int); +static int ccp_nakci (fsm *, u_char *, int, int); +static int ccp_rejci (fsm *, u_char *, int); +static int ccp_reqci (fsm *, u_char *, int *, int); +static void ccp_up (fsm *); +static void ccp_down (fsm *); +static int ccp_extcode (fsm *, int, int, u_char *, int); +static void ccp_rack_timeout (void *); +static const char *method_name (ccp_options *, ccp_options *); + +static const fsm_callbacks ccp_callbacks = { + ccp_resetci, + ccp_cilen, + ccp_addci, + ccp_ackci, + ccp_nakci, + ccp_rejci, + ccp_reqci, + ccp_up, + ccp_down, + NULL, + NULL, + NULL, + NULL, + ccp_extcode, + "CCP" +}; + +/* + * Do we want / did we get any compression? + */ +static int ccp_anycompress(ccp_options *opt) { + return (0 +#if DEFLATE_SUPPORT + || (opt)->deflate +#endif /* DEFLATE_SUPPORT */ +#if BSDCOMPRESS_SUPPORT + || (opt)->bsd_compress +#endif /* BSDCOMPRESS_SUPPORT */ +#if PREDICTOR_SUPPORT + || (opt)->predictor_1 || (opt)->predictor_2 +#endif /* PREDICTOR_SUPPORT */ +#if MPPE_SUPPORT + || (opt)->mppe +#endif /* MPPE_SUPPORT */ + ); +} + +/* + * Local state (mainly for handling reset-reqs and reset-acks). + */ +#define RACK_PENDING 1 /* waiting for reset-ack */ +#define RREQ_REPEAT 2 /* send another reset-req if no reset-ack */ + +#define RACKTIMEOUT 1 /* second */ + +#if PPP_OPTIONS +/* + * Option parsing + */ +static int +setbsdcomp(argv) + char **argv; +{ + int rbits, abits; + char *str, *endp; + + str = *argv; + abits = rbits = strtol(str, &endp, 0); + if (endp != str && *endp == ',') { + str = endp + 1; + abits = strtol(str, &endp, 0); + } + if (*endp != 0 || endp == str) { + option_error("invalid parameter '%s' for bsdcomp option", *argv); + return 0; + } + if ((rbits != 0 && (rbits < BSD_MIN_BITS || rbits > BSD_MAX_BITS)) + || (abits != 0 && (abits < BSD_MIN_BITS || abits > BSD_MAX_BITS))) { + option_error("bsdcomp option values must be 0 or %d .. %d", + BSD_MIN_BITS, BSD_MAX_BITS); + return 0; + } + if (rbits > 0) { + ccp_wantoptions[0].bsd_compress = 1; + ccp_wantoptions[0].bsd_bits = rbits; + } else + ccp_wantoptions[0].bsd_compress = 0; + if (abits > 0) { + ccp_allowoptions[0].bsd_compress = 1; + ccp_allowoptions[0].bsd_bits = abits; + } else + ccp_allowoptions[0].bsd_compress = 0; + ppp_slprintf(bsd_value, sizeof(bsd_value), + rbits == abits? "%d": "%d,%d", rbits, abits); + + return 1; +} + +static int +setdeflate(argv) + char **argv; +{ + int rbits, abits; + char *str, *endp; + + str = *argv; + abits = rbits = strtol(str, &endp, 0); + if (endp != str && *endp == ',') { + str = endp + 1; + abits = strtol(str, &endp, 0); + } + if (*endp != 0 || endp == str) { + option_error("invalid parameter '%s' for deflate option", *argv); + return 0; + } + if ((rbits != 0 && (rbits < DEFLATE_MIN_SIZE || rbits > DEFLATE_MAX_SIZE)) + || (abits != 0 && (abits < DEFLATE_MIN_SIZE + || abits > DEFLATE_MAX_SIZE))) { + option_error("deflate option values must be 0 or %d .. %d", + DEFLATE_MIN_SIZE, DEFLATE_MAX_SIZE); + return 0; + } + if (rbits == DEFLATE_MIN_SIZE || abits == DEFLATE_MIN_SIZE) { + if (rbits == DEFLATE_MIN_SIZE) + rbits = DEFLATE_MIN_WORKS; + if (abits == DEFLATE_MIN_SIZE) + abits = DEFLATE_MIN_WORKS; + warn("deflate option value of %d changed to %d to avoid zlib bug", + DEFLATE_MIN_SIZE, DEFLATE_MIN_WORKS); + } + if (rbits > 0) { + ccp_wantoptions[0].deflate = 1; + ccp_wantoptions[0].deflate_size = rbits; + } else + ccp_wantoptions[0].deflate = 0; + if (abits > 0) { + ccp_allowoptions[0].deflate = 1; + ccp_allowoptions[0].deflate_size = abits; + } else + ccp_allowoptions[0].deflate = 0; + ppp_slprintf(deflate_value, sizeof(deflate_value), + rbits == abits? "%d": "%d,%d", rbits, abits); + + return 1; +} +#endif /* PPP_OPTIONS */ + +/* + * ccp_init - initialize CCP. + */ +static void ccp_init(ppp_pcb *pcb) { + fsm *f = &pcb->ccp_fsm; + + f->pcb = pcb; + f->protocol = PPP_CCP; + f->callbacks = &ccp_callbacks; + fsm_init(f); + +#if 0 /* Not necessary, everything is cleared in ppp_new() */ + memset(wo, 0, sizeof(*wo)); + memset(go, 0, sizeof(*go)); + memset(ao, 0, sizeof(*ao)); + memset(ho, 0, sizeof(*ho)); +#endif /* 0 */ + +#if DEFLATE_SUPPORT + wo->deflate = 1; + wo->deflate_size = DEFLATE_MAX_SIZE; + wo->deflate_correct = 1; + wo->deflate_draft = 1; + ao->deflate = 1; + ao->deflate_size = DEFLATE_MAX_SIZE; + ao->deflate_correct = 1; + ao->deflate_draft = 1; +#endif /* DEFLATE_SUPPORT */ + +#if BSDCOMPRESS_SUPPORT + wo->bsd_compress = 1; + wo->bsd_bits = BSD_MAX_BITS; + ao->bsd_compress = 1; + ao->bsd_bits = BSD_MAX_BITS; +#endif /* BSDCOMPRESS_SUPPORT */ + +#if PREDICTOR_SUPPORT + ao->predictor_1 = 1; +#endif /* PREDICTOR_SUPPORT */ +} + +/* + * ccp_open - CCP is allowed to come up. + */ +static void ccp_open(ppp_pcb *pcb) { + fsm *f = &pcb->ccp_fsm; + ccp_options *go = &pcb->ccp_gotoptions; + + if (f->state != PPP_FSM_OPENED) + ccp_set(pcb, 1, 0, 0, 0); + + /* + * Find out which compressors the kernel supports before + * deciding whether to open in silent mode. + */ + ccp_resetci(f); + if (!ccp_anycompress(go)) + f->flags |= OPT_SILENT; + + fsm_open(f); +} + +/* + * ccp_close - Terminate CCP. + */ +static void ccp_close(ppp_pcb *pcb, const char *reason) { + fsm *f = &pcb->ccp_fsm; + ccp_set(pcb, 0, 0, 0, 0); + fsm_close(f, reason); +} + +/* + * ccp_lowerup - we may now transmit CCP packets. + */ +static void ccp_lowerup(ppp_pcb *pcb) { + fsm *f = &pcb->ccp_fsm; + fsm_lowerup(f); +} + +/* + * ccp_lowerdown - we may not transmit CCP packets. + */ +static void ccp_lowerdown(ppp_pcb *pcb) { + fsm *f = &pcb->ccp_fsm; + fsm_lowerdown(f); +} + +/* + * ccp_input - process a received CCP packet. + */ +static void ccp_input(ppp_pcb *pcb, u_char *p, int len) { + fsm *f = &pcb->ccp_fsm; + ccp_options *go = &pcb->ccp_gotoptions; + int oldstate; + + /* + * Check for a terminate-request so we can print a message. + */ + oldstate = f->state; + fsm_input(f, p, len); + if (oldstate == PPP_FSM_OPENED && p[0] == TERMREQ && f->state != PPP_FSM_OPENED) { + ppp_notice("Compression disabled by peer."); +#if MPPE_SUPPORT + if (go->mppe) { + ppp_error("MPPE disabled, closing LCP"); + lcp_close(pcb, "MPPE disabled by peer"); + } +#endif /* MPPE_SUPPORT */ + } + + /* + * If we get a terminate-ack and we're not asking for compression, + * close CCP. + */ + if (oldstate == PPP_FSM_REQSENT && p[0] == TERMACK + && !ccp_anycompress(go)) + ccp_close(pcb, "No compression negotiated"); +} + +/* + * Handle a CCP-specific code. + */ +static int ccp_extcode(fsm *f, int code, int id, u_char *p, int len) { + ppp_pcb *pcb = f->pcb; + LWIP_UNUSED_ARG(p); + LWIP_UNUSED_ARG(len); + + switch (code) { + case CCP_RESETREQ: + if (f->state != PPP_FSM_OPENED) + break; + ccp_reset_comp(pcb); + /* send a reset-ack, which the transmitter will see and + reset its compression state. */ + fsm_sdata(f, CCP_RESETACK, id, NULL, 0); + break; + + case CCP_RESETACK: + if ((pcb->ccp_localstate & RACK_PENDING) && id == f->reqid) { + pcb->ccp_localstate &= ~(RACK_PENDING | RREQ_REPEAT); + UNTIMEOUT(ccp_rack_timeout, f); + ccp_reset_decomp(pcb); + } + break; + + default: + return 0; + } + + return 1; +} + +/* + * ccp_protrej - peer doesn't talk CCP. + */ +static void ccp_protrej(ppp_pcb *pcb) { + fsm *f = &pcb->ccp_fsm; +#if MPPE_SUPPORT + ccp_options *go = &pcb->ccp_gotoptions; +#endif /* MPPE_SUPPORT */ + + ccp_set(pcb, 0, 0, 0, 0); + fsm_lowerdown(f); + +#if MPPE_SUPPORT + if (go->mppe) { + ppp_error("MPPE required but peer negotiation failed"); + lcp_close(pcb, "MPPE required but peer negotiation failed"); + } +#endif /* MPPE_SUPPORT */ + +} + +/* + * ccp_resetci - initialize at start of negotiation. + */ +static void ccp_resetci(fsm *f) { + ppp_pcb *pcb = f->pcb; + ccp_options *go = &pcb->ccp_gotoptions; + ccp_options *wo = &pcb->ccp_wantoptions; +#if MPPE_SUPPORT + ccp_options *ao = &pcb->ccp_allowoptions; +#endif /* MPPE_SUPPORT */ +#if DEFLATE_SUPPORT || BSDCOMPRESS_SUPPORT || PREDICTOR_SUPPORT + u_char opt_buf[CCP_MAX_OPTION_LENGTH]; +#endif /* DEFLATE_SUPPORT || BSDCOMPRESS_SUPPORT || PREDICTOR_SUPPORT */ +#if DEFLATE_SUPPORT || BSDCOMPRESS_SUPPORT + int res; +#endif /* DEFLATE_SUPPORT || BSDCOMPRESS_SUPPORT */ + +#if MPPE_SUPPORT + if (pcb->settings.require_mppe) { + wo->mppe = ao->mppe = + (pcb->settings.refuse_mppe_40 ? 0 : MPPE_OPT_40) + | (pcb->settings.refuse_mppe_128 ? 0 : MPPE_OPT_128); + } +#endif /* MPPE_SUPPORT */ + + *go = *wo; + pcb->ccp_all_rejected = 0; + +#if MPPE_SUPPORT + if (go->mppe) { + int auth_mschap_bits = pcb->auth_done; + int numbits; + + /* + * Start with a basic sanity check: mschap[v2] auth must be in + * exactly one direction. RFC 3079 says that the keys are + * 'derived from the credentials of the peer that initiated the call', + * however the PPP protocol doesn't have such a concept, and pppd + * cannot get this info externally. Instead we do the best we can. + * NB: If MPPE is required, all other compression opts are invalid. + * So, we return right away if we can't do it. + */ + + /* Leave only the mschap auth bits set */ + auth_mschap_bits &= (CHAP_MS_WITHPEER | CHAP_MS_PEER | + CHAP_MS2_WITHPEER | CHAP_MS2_PEER); + /* Count the mschap auths */ + auth_mschap_bits >>= CHAP_MS_SHIFT; + numbits = 0; + do { + numbits += auth_mschap_bits & 1; + auth_mschap_bits >>= 1; + } while (auth_mschap_bits); + if (numbits > 1) { + ppp_error("MPPE required, but auth done in both directions."); + lcp_close(pcb, "MPPE required but not available"); + return; + } + if (!numbits) { + ppp_error("MPPE required, but MS-CHAP[v2] auth not performed."); + lcp_close(pcb, "MPPE required but not available"); + return; + } + + /* A plugin (eg radius) may not have obtained key material. */ + if (!pcb->mppe_keys_set) { + ppp_error("MPPE required, but keys are not available. " + "Possible plugin problem?"); + lcp_close(pcb, "MPPE required but not available"); + return; + } + + /* LM auth not supported for MPPE */ + if (pcb->auth_done & (CHAP_MS_WITHPEER | CHAP_MS_PEER)) { + /* This might be noise */ + if (go->mppe & MPPE_OPT_40) { + ppp_notice("Disabling 40-bit MPPE; MS-CHAP LM not supported"); + go->mppe &= ~MPPE_OPT_40; + wo->mppe &= ~MPPE_OPT_40; + } + } + + /* Last check: can we actually negotiate something? */ + if (!(go->mppe & (MPPE_OPT_40 | MPPE_OPT_128))) { + /* Could be misconfig, could be 40-bit disabled above. */ + ppp_error("MPPE required, but both 40-bit and 128-bit disabled."); + lcp_close(pcb, "MPPE required but not available"); + return; + } + + /* sync options */ + ao->mppe = go->mppe; + /* MPPE is not compatible with other compression types */ +#if BSDCOMPRESS_SUPPORT + ao->bsd_compress = go->bsd_compress = 0; +#endif /* BSDCOMPRESS_SUPPORT */ +#if PREDICTOR_SUPPORT + ao->predictor_1 = go->predictor_1 = 0; + ao->predictor_2 = go->predictor_2 = 0; +#endif /* PREDICTOR_SUPPORT */ +#if DEFLATE_SUPPORT + ao->deflate = go->deflate = 0; +#endif /* DEFLATE_SUPPORT */ + } +#endif /* MPPE_SUPPORT */ + + /* + * Check whether the kernel knows about the various + * compression methods we might request. + */ +#if BSDCOMPRESS_SUPPORT + /* FIXME: we don't need to test if BSD compress is available + * if BSDCOMPRESS_SUPPORT is set, it is. + */ + if (go->bsd_compress) { + opt_buf[0] = CI_BSD_COMPRESS; + opt_buf[1] = CILEN_BSD_COMPRESS; + for (;;) { + if (go->bsd_bits < BSD_MIN_BITS) { + go->bsd_compress = 0; + break; + } + opt_buf[2] = BSD_MAKE_OPT(BSD_CURRENT_VERSION, go->bsd_bits); + res = ccp_test(pcb, opt_buf, CILEN_BSD_COMPRESS, 0); + if (res > 0) { + break; + } else if (res < 0) { + go->bsd_compress = 0; + break; + } + go->bsd_bits--; + } + } +#endif /* BSDCOMPRESS_SUPPORT */ +#if DEFLATE_SUPPORT + /* FIXME: we don't need to test if deflate is available + * if DEFLATE_SUPPORT is set, it is. + */ + if (go->deflate) { + if (go->deflate_correct) { + opt_buf[0] = CI_DEFLATE; + opt_buf[1] = CILEN_DEFLATE; + opt_buf[3] = DEFLATE_CHK_SEQUENCE; + for (;;) { + if (go->deflate_size < DEFLATE_MIN_WORKS) { + go->deflate_correct = 0; + break; + } + opt_buf[2] = DEFLATE_MAKE_OPT(go->deflate_size); + res = ccp_test(pcb, opt_buf, CILEN_DEFLATE, 0); + if (res > 0) { + break; + } else if (res < 0) { + go->deflate_correct = 0; + break; + } + go->deflate_size--; + } + } + if (go->deflate_draft) { + opt_buf[0] = CI_DEFLATE_DRAFT; + opt_buf[1] = CILEN_DEFLATE; + opt_buf[3] = DEFLATE_CHK_SEQUENCE; + for (;;) { + if (go->deflate_size < DEFLATE_MIN_WORKS) { + go->deflate_draft = 0; + break; + } + opt_buf[2] = DEFLATE_MAKE_OPT(go->deflate_size); + res = ccp_test(pcb, opt_buf, CILEN_DEFLATE, 0); + if (res > 0) { + break; + } else if (res < 0) { + go->deflate_draft = 0; + break; + } + go->deflate_size--; + } + } + if (!go->deflate_correct && !go->deflate_draft) + go->deflate = 0; + } +#endif /* DEFLATE_SUPPORT */ +#if PREDICTOR_SUPPORT + /* FIXME: we don't need to test if predictor is available, + * if PREDICTOR_SUPPORT is set, it is. + */ + if (go->predictor_1) { + opt_buf[0] = CI_PREDICTOR_1; + opt_buf[1] = CILEN_PREDICTOR_1; + if (ccp_test(pcb, opt_buf, CILEN_PREDICTOR_1, 0) <= 0) + go->predictor_1 = 0; + } + if (go->predictor_2) { + opt_buf[0] = CI_PREDICTOR_2; + opt_buf[1] = CILEN_PREDICTOR_2; + if (ccp_test(pcb, opt_buf, CILEN_PREDICTOR_2, 0) <= 0) + go->predictor_2 = 0; + } +#endif /* PREDICTOR_SUPPORT */ +} + +/* + * ccp_cilen - Return total length of our configuration info. + */ +static int ccp_cilen(fsm *f) { + ppp_pcb *pcb = f->pcb; + ccp_options *go = &pcb->ccp_gotoptions; + + return 0 +#if BSDCOMPRESS_SUPPORT + + (go->bsd_compress? CILEN_BSD_COMPRESS: 0) +#endif /* BSDCOMPRESS_SUPPORT */ +#if DEFLATE_SUPPORT + + (go->deflate && go->deflate_correct? CILEN_DEFLATE: 0) + + (go->deflate && go->deflate_draft? CILEN_DEFLATE: 0) +#endif /* DEFLATE_SUPPORT */ +#if PREDICTOR_SUPPORT + + (go->predictor_1? CILEN_PREDICTOR_1: 0) + + (go->predictor_2? CILEN_PREDICTOR_2: 0) +#endif /* PREDICTOR_SUPPORT */ +#if MPPE_SUPPORT + + (go->mppe? CILEN_MPPE: 0) +#endif /* MPPE_SUPPORT */ + ; +} + +/* + * ccp_addci - put our requests in a packet. + */ +static void ccp_addci(fsm *f, u_char *p, int *lenp) { + ppp_pcb *pcb = f->pcb; + ccp_options *go = &pcb->ccp_gotoptions; + u_char *p0 = p; + + /* + * Add the compression types that we can receive, in decreasing + * preference order. + */ +#if MPPE_SUPPORT + if (go->mppe) { + p[0] = CI_MPPE; + p[1] = CILEN_MPPE; + MPPE_OPTS_TO_CI(go->mppe, &p[2]); + mppe_init(pcb, &pcb->mppe_decomp, go->mppe); + p += CILEN_MPPE; + } +#endif /* MPPE_SUPPORT */ +#if DEFLATE_SUPPORT + if (go->deflate) { + if (go->deflate_correct) { + p[0] = CI_DEFLATE; + p[1] = CILEN_DEFLATE; + p[2] = DEFLATE_MAKE_OPT(go->deflate_size); + p[3] = DEFLATE_CHK_SEQUENCE; + p += CILEN_DEFLATE; + } + if (go->deflate_draft) { + p[0] = CI_DEFLATE_DRAFT; + p[1] = CILEN_DEFLATE; + p[2] = p[2 - CILEN_DEFLATE]; + p[3] = DEFLATE_CHK_SEQUENCE; + p += CILEN_DEFLATE; + } + } +#endif /* DEFLATE_SUPPORT */ +#if BSDCOMPRESS_SUPPORT + if (go->bsd_compress) { + p[0] = CI_BSD_COMPRESS; + p[1] = CILEN_BSD_COMPRESS; + p[2] = BSD_MAKE_OPT(BSD_CURRENT_VERSION, go->bsd_bits); + p += CILEN_BSD_COMPRESS; + } +#endif /* BSDCOMPRESS_SUPPORT */ +#if PREDICTOR_SUPPORT + /* XXX Should Predictor 2 be preferable to Predictor 1? */ + if (go->predictor_1) { + p[0] = CI_PREDICTOR_1; + p[1] = CILEN_PREDICTOR_1; + p += CILEN_PREDICTOR_1; + } + if (go->predictor_2) { + p[0] = CI_PREDICTOR_2; + p[1] = CILEN_PREDICTOR_2; + p += CILEN_PREDICTOR_2; + } +#endif /* PREDICTOR_SUPPORT */ + + go->method = (p > p0)? p0[0]: 0; + + *lenp = p - p0; +} + +/* + * ccp_ackci - process a received configure-ack, and return + * 1 iff the packet was OK. + */ +static int ccp_ackci(fsm *f, u_char *p, int len) { + ppp_pcb *pcb = f->pcb; + ccp_options *go = &pcb->ccp_gotoptions; +#if BSDCOMPRESS_SUPPORT || PREDICTOR_SUPPORT + u_char *p0 = p; +#endif /* BSDCOMPRESS_SUPPORT || PREDICTOR_SUPPORT */ + +#if MPPE_SUPPORT + if (go->mppe) { + u_char opt_buf[CILEN_MPPE]; + + opt_buf[0] = CI_MPPE; + opt_buf[1] = CILEN_MPPE; + MPPE_OPTS_TO_CI(go->mppe, &opt_buf[2]); + if (len < CILEN_MPPE || memcmp(opt_buf, p, CILEN_MPPE)) + return 0; + p += CILEN_MPPE; + len -= CILEN_MPPE; + /* XXX Cope with first/fast ack */ + if (len == 0) + return 1; + } +#endif /* MPPE_SUPPORT */ +#if DEFLATE_SUPPORT + if (go->deflate) { + if (len < CILEN_DEFLATE + || p[0] != (go->deflate_correct? CI_DEFLATE: CI_DEFLATE_DRAFT) + || p[1] != CILEN_DEFLATE + || p[2] != DEFLATE_MAKE_OPT(go->deflate_size) + || p[3] != DEFLATE_CHK_SEQUENCE) + return 0; + p += CILEN_DEFLATE; + len -= CILEN_DEFLATE; + /* XXX Cope with first/fast ack */ + if (len == 0) + return 1; + if (go->deflate_correct && go->deflate_draft) { + if (len < CILEN_DEFLATE + || p[0] != CI_DEFLATE_DRAFT + || p[1] != CILEN_DEFLATE + || p[2] != DEFLATE_MAKE_OPT(go->deflate_size) + || p[3] != DEFLATE_CHK_SEQUENCE) + return 0; + p += CILEN_DEFLATE; + len -= CILEN_DEFLATE; + } + } +#endif /* DEFLATE_SUPPORT */ +#if BSDCOMPRESS_SUPPORT + if (go->bsd_compress) { + if (len < CILEN_BSD_COMPRESS + || p[0] != CI_BSD_COMPRESS || p[1] != CILEN_BSD_COMPRESS + || p[2] != BSD_MAKE_OPT(BSD_CURRENT_VERSION, go->bsd_bits)) + return 0; + p += CILEN_BSD_COMPRESS; + len -= CILEN_BSD_COMPRESS; + /* XXX Cope with first/fast ack */ + if (p == p0 && len == 0) + return 1; + } +#endif /* BSDCOMPRESS_SUPPORT */ +#if PREDICTOR_SUPPORT + if (go->predictor_1) { + if (len < CILEN_PREDICTOR_1 + || p[0] != CI_PREDICTOR_1 || p[1] != CILEN_PREDICTOR_1) + return 0; + p += CILEN_PREDICTOR_1; + len -= CILEN_PREDICTOR_1; + /* XXX Cope with first/fast ack */ + if (p == p0 && len == 0) + return 1; + } + if (go->predictor_2) { + if (len < CILEN_PREDICTOR_2 + || p[0] != CI_PREDICTOR_2 || p[1] != CILEN_PREDICTOR_2) + return 0; + p += CILEN_PREDICTOR_2; + len -= CILEN_PREDICTOR_2; + /* XXX Cope with first/fast ack */ + if (p == p0 && len == 0) + return 1; + } +#endif /* PREDICTOR_SUPPORT */ + + if (len != 0) + return 0; + return 1; +} + +/* + * ccp_nakci - process received configure-nak. + * Returns 1 iff the nak was OK. + */ +static int ccp_nakci(fsm *f, u_char *p, int len, int treat_as_reject) { + ppp_pcb *pcb = f->pcb; + ccp_options *go = &pcb->ccp_gotoptions; + ccp_options no; /* options we've seen already */ + ccp_options try_; /* options to ask for next time */ + LWIP_UNUSED_ARG(treat_as_reject); +#if !MPPE_SUPPORT && !DEFLATE_SUPPORT && !BSDCOMPRESS_SUPPORT + LWIP_UNUSED_ARG(p); + LWIP_UNUSED_ARG(len); +#endif /* !MPPE_SUPPORT && !DEFLATE_SUPPORT && !BSDCOMPRESS_SUPPORT */ + + memset(&no, 0, sizeof(no)); + try_ = *go; + +#if MPPE_SUPPORT + if (go->mppe && len >= CILEN_MPPE + && p[0] == CI_MPPE && p[1] == CILEN_MPPE) { + no.mppe = 1; + /* + * Peer wants us to use a different strength or other setting. + * Fail if we aren't willing to use his suggestion. + */ + MPPE_CI_TO_OPTS(&p[2], try_.mppe); + if ((try_.mppe & MPPE_OPT_STATEFUL) && pcb->settings.refuse_mppe_stateful) { + ppp_error("Refusing MPPE stateful mode offered by peer"); + try_.mppe = 0; + } else if (((go->mppe | MPPE_OPT_STATEFUL) & try_.mppe) != try_.mppe) { + /* Peer must have set options we didn't request (suggest) */ + try_.mppe = 0; + } + + if (!try_.mppe) { + ppp_error("MPPE required but peer negotiation failed"); + lcp_close(pcb, "MPPE required but peer negotiation failed"); + } + } +#endif /* MPPE_SUPPORT */ +#if DEFLATE_SUPPORT + if (go->deflate && len >= CILEN_DEFLATE + && p[0] == (go->deflate_correct? CI_DEFLATE: CI_DEFLATE_DRAFT) + && p[1] == CILEN_DEFLATE) { + no.deflate = 1; + /* + * Peer wants us to use a different code size or something. + * Stop asking for Deflate if we don't understand his suggestion. + */ + if (DEFLATE_METHOD(p[2]) != DEFLATE_METHOD_VAL + || DEFLATE_SIZE(p[2]) < DEFLATE_MIN_WORKS + || p[3] != DEFLATE_CHK_SEQUENCE) + try_.deflate = 0; + else if (DEFLATE_SIZE(p[2]) < go->deflate_size) + try_.deflate_size = DEFLATE_SIZE(p[2]); + p += CILEN_DEFLATE; + len -= CILEN_DEFLATE; + if (go->deflate_correct && go->deflate_draft + && len >= CILEN_DEFLATE && p[0] == CI_DEFLATE_DRAFT + && p[1] == CILEN_DEFLATE) { + p += CILEN_DEFLATE; + len -= CILEN_DEFLATE; + } + } +#endif /* DEFLATE_SUPPORT */ +#if BSDCOMPRESS_SUPPORT + if (go->bsd_compress && len >= CILEN_BSD_COMPRESS + && p[0] == CI_BSD_COMPRESS && p[1] == CILEN_BSD_COMPRESS) { + no.bsd_compress = 1; + /* + * Peer wants us to use a different number of bits + * or a different version. + */ + if (BSD_VERSION(p[2]) != BSD_CURRENT_VERSION) + try_.bsd_compress = 0; + else if (BSD_NBITS(p[2]) < go->bsd_bits) + try_.bsd_bits = BSD_NBITS(p[2]); + p += CILEN_BSD_COMPRESS; + len -= CILEN_BSD_COMPRESS; + } +#endif /* BSDCOMPRESS_SUPPORT */ + + /* + * Predictor-1 and 2 have no options, so they can't be Naked. + * + * There may be remaining options but we ignore them. + */ + + if (f->state != PPP_FSM_OPENED) + *go = try_; + return 1; +} + +/* + * ccp_rejci - reject some of our suggested compression methods. + */ +static int ccp_rejci(fsm *f, u_char *p, int len) { + ppp_pcb *pcb = f->pcb; + ccp_options *go = &pcb->ccp_gotoptions; + ccp_options try_; /* options to request next time */ + + try_ = *go; + + /* + * Cope with empty configure-rejects by ceasing to send + * configure-requests. + */ + if (len == 0 && pcb->ccp_all_rejected) + return -1; + +#if MPPE_SUPPORT + if (go->mppe && len >= CILEN_MPPE + && p[0] == CI_MPPE && p[1] == CILEN_MPPE) { + ppp_error("MPPE required but peer refused"); + lcp_close(pcb, "MPPE required but peer refused"); + p += CILEN_MPPE; + len -= CILEN_MPPE; + } +#endif /* MPPE_SUPPORT */ +#if DEFLATE_SUPPORT + if (go->deflate_correct && len >= CILEN_DEFLATE + && p[0] == CI_DEFLATE && p[1] == CILEN_DEFLATE) { + if (p[2] != DEFLATE_MAKE_OPT(go->deflate_size) + || p[3] != DEFLATE_CHK_SEQUENCE) + return 0; /* Rej is bad */ + try_.deflate_correct = 0; + p += CILEN_DEFLATE; + len -= CILEN_DEFLATE; + } + if (go->deflate_draft && len >= CILEN_DEFLATE + && p[0] == CI_DEFLATE_DRAFT && p[1] == CILEN_DEFLATE) { + if (p[2] != DEFLATE_MAKE_OPT(go->deflate_size) + || p[3] != DEFLATE_CHK_SEQUENCE) + return 0; /* Rej is bad */ + try_.deflate_draft = 0; + p += CILEN_DEFLATE; + len -= CILEN_DEFLATE; + } + if (!try_.deflate_correct && !try_.deflate_draft) + try_.deflate = 0; +#endif /* DEFLATE_SUPPORT */ +#if BSDCOMPRESS_SUPPORT + if (go->bsd_compress && len >= CILEN_BSD_COMPRESS + && p[0] == CI_BSD_COMPRESS && p[1] == CILEN_BSD_COMPRESS) { + if (p[2] != BSD_MAKE_OPT(BSD_CURRENT_VERSION, go->bsd_bits)) + return 0; + try_.bsd_compress = 0; + p += CILEN_BSD_COMPRESS; + len -= CILEN_BSD_COMPRESS; + } +#endif /* BSDCOMPRESS_SUPPORT */ +#if PREDICTOR_SUPPORT + if (go->predictor_1 && len >= CILEN_PREDICTOR_1 + && p[0] == CI_PREDICTOR_1 && p[1] == CILEN_PREDICTOR_1) { + try_.predictor_1 = 0; + p += CILEN_PREDICTOR_1; + len -= CILEN_PREDICTOR_1; + } + if (go->predictor_2 && len >= CILEN_PREDICTOR_2 + && p[0] == CI_PREDICTOR_2 && p[1] == CILEN_PREDICTOR_2) { + try_.predictor_2 = 0; + p += CILEN_PREDICTOR_2; + len -= CILEN_PREDICTOR_2; + } +#endif /* PREDICTOR_SUPPORT */ + + if (len != 0) + return 0; + + if (f->state != PPP_FSM_OPENED) + *go = try_; + + return 1; +} + +/* + * ccp_reqci - processed a received configure-request. + * Returns CONFACK, CONFNAK or CONFREJ and the packet modified + * appropriately. + */ +static int ccp_reqci(fsm *f, u_char *p, int *lenp, int dont_nak) { + ppp_pcb *pcb = f->pcb; + ccp_options *ho = &pcb->ccp_hisoptions; + ccp_options *ao = &pcb->ccp_allowoptions; + int ret, newret; +#if DEFLATE_SUPPORT || BSDCOMPRESS_SUPPORT + int res; + int nb; +#endif /* DEFLATE_SUPPORT || BSDCOMPRESS_SUPPORT */ + u_char *p0, *retp; + int len, clen, type; +#if MPPE_SUPPORT + u8_t rej_for_ci_mppe = 1; /* Are we rejecting based on a bad/missing */ + /* CI_MPPE, or due to other options? */ +#endif /* MPPE_SUPPORT */ + + ret = CONFACK; + retp = p0 = p; + len = *lenp; + + memset(ho, 0, sizeof(ccp_options)); + ho->method = (len > 0)? p[0]: 0; + + while (len > 0) { + newret = CONFACK; + if (len < 2 || p[1] < 2 || p[1] > len) { + /* length is bad */ + clen = len; + newret = CONFREJ; + + } else { + type = p[0]; + clen = p[1]; + + switch (type) { +#if MPPE_SUPPORT + case CI_MPPE: + if (!ao->mppe || clen != CILEN_MPPE) { + newret = CONFREJ; + break; + } + MPPE_CI_TO_OPTS(&p[2], ho->mppe); + + /* Nak if anything unsupported or unknown are set. */ + if (ho->mppe & MPPE_OPT_UNSUPPORTED) { + newret = CONFNAK; + ho->mppe &= ~MPPE_OPT_UNSUPPORTED; + } + if (ho->mppe & MPPE_OPT_UNKNOWN) { + newret = CONFNAK; + ho->mppe &= ~MPPE_OPT_UNKNOWN; + } + + /* Check state opt */ + if (ho->mppe & MPPE_OPT_STATEFUL) { + /* + * We can Nak and request stateless, but it's a + * lot easier to just assume the peer will request + * it if he can do it; stateful mode is bad over + * the Internet -- which is where we expect MPPE. + */ + if (pcb->settings.refuse_mppe_stateful) { + ppp_error("Refusing MPPE stateful mode offered by peer"); + newret = CONFREJ; + break; + } + } + + /* Find out which of {S,L} are set. */ + if ((ho->mppe & MPPE_OPT_128) + && (ho->mppe & MPPE_OPT_40)) { + /* Both are set, negotiate the strongest. */ + newret = CONFNAK; + if (ao->mppe & MPPE_OPT_128) + ho->mppe &= ~MPPE_OPT_40; + else if (ao->mppe & MPPE_OPT_40) + ho->mppe &= ~MPPE_OPT_128; + else { + newret = CONFREJ; + break; + } + } else if (ho->mppe & MPPE_OPT_128) { + if (!(ao->mppe & MPPE_OPT_128)) { + newret = CONFREJ; + break; + } + } else if (ho->mppe & MPPE_OPT_40) { + if (!(ao->mppe & MPPE_OPT_40)) { + newret = CONFREJ; + break; + } + } else { + /* Neither are set. */ + /* We cannot accept this. */ + newret = CONFNAK; + /* Give the peer our idea of what can be used, + so it can choose and confirm */ + ho->mppe = ao->mppe; + } + + /* rebuild the opts */ + MPPE_OPTS_TO_CI(ho->mppe, &p[2]); + if (newret == CONFACK) { + int mtu; + + mppe_init(pcb, &pcb->mppe_comp, ho->mppe); + /* + * We need to decrease the interface MTU by MPPE_PAD + * because MPPE frames **grow**. The kernel [must] + * allocate MPPE_PAD extra bytes in xmit buffers. + */ + mtu = netif_get_mtu(pcb); + if (mtu) + netif_set_mtu(pcb, mtu - MPPE_PAD); + else + newret = CONFREJ; + } + + /* + * We have accepted MPPE or are willing to negotiate + * MPPE parameters. A CONFREJ is due to subsequent + * (non-MPPE) processing. + */ + rej_for_ci_mppe = 0; + break; +#endif /* MPPE_SUPPORT */ +#if DEFLATE_SUPPORT + case CI_DEFLATE: + case CI_DEFLATE_DRAFT: + if (!ao->deflate || clen != CILEN_DEFLATE + || (!ao->deflate_correct && type == CI_DEFLATE) + || (!ao->deflate_draft && type == CI_DEFLATE_DRAFT)) { + newret = CONFREJ; + break; + } + + ho->deflate = 1; + ho->deflate_size = nb = DEFLATE_SIZE(p[2]); + if (DEFLATE_METHOD(p[2]) != DEFLATE_METHOD_VAL + || p[3] != DEFLATE_CHK_SEQUENCE + || nb > ao->deflate_size || nb < DEFLATE_MIN_WORKS) { + newret = CONFNAK; + if (!dont_nak) { + p[2] = DEFLATE_MAKE_OPT(ao->deflate_size); + p[3] = DEFLATE_CHK_SEQUENCE; + /* fall through to test this #bits below */ + } else + break; + } + + /* + * Check whether we can do Deflate with the window + * size they want. If the window is too big, reduce + * it until the kernel can cope and nak with that. + * We only check this for the first option. + */ + if (p == p0) { + for (;;) { + res = ccp_test(pcb, p, CILEN_DEFLATE, 1); + if (res > 0) + break; /* it's OK now */ + if (res < 0 || nb == DEFLATE_MIN_WORKS || dont_nak) { + newret = CONFREJ; + p[2] = DEFLATE_MAKE_OPT(ho->deflate_size); + break; + } + newret = CONFNAK; + --nb; + p[2] = DEFLATE_MAKE_OPT(nb); + } + } + break; +#endif /* DEFLATE_SUPPORT */ +#if BSDCOMPRESS_SUPPORT + case CI_BSD_COMPRESS: + if (!ao->bsd_compress || clen != CILEN_BSD_COMPRESS) { + newret = CONFREJ; + break; + } + + ho->bsd_compress = 1; + ho->bsd_bits = nb = BSD_NBITS(p[2]); + if (BSD_VERSION(p[2]) != BSD_CURRENT_VERSION + || nb > ao->bsd_bits || nb < BSD_MIN_BITS) { + newret = CONFNAK; + if (!dont_nak) { + p[2] = BSD_MAKE_OPT(BSD_CURRENT_VERSION, ao->bsd_bits); + /* fall through to test this #bits below */ + } else + break; + } + + /* + * Check whether we can do BSD-Compress with the code + * size they want. If the code size is too big, reduce + * it until the kernel can cope and nak with that. + * We only check this for the first option. + */ + if (p == p0) { + for (;;) { + res = ccp_test(pcb, p, CILEN_BSD_COMPRESS, 1); + if (res > 0) + break; + if (res < 0 || nb == BSD_MIN_BITS || dont_nak) { + newret = CONFREJ; + p[2] = BSD_MAKE_OPT(BSD_CURRENT_VERSION, + ho->bsd_bits); + break; + } + newret = CONFNAK; + --nb; + p[2] = BSD_MAKE_OPT(BSD_CURRENT_VERSION, nb); + } + } + break; +#endif /* BSDCOMPRESS_SUPPORT */ +#if PREDICTOR_SUPPORT + case CI_PREDICTOR_1: + if (!ao->predictor_1 || clen != CILEN_PREDICTOR_1) { + newret = CONFREJ; + break; + } + + ho->predictor_1 = 1; + if (p == p0 + && ccp_test(pcb, p, CILEN_PREDICTOR_1, 1) <= 0) { + newret = CONFREJ; + } + break; + + case CI_PREDICTOR_2: + if (!ao->predictor_2 || clen != CILEN_PREDICTOR_2) { + newret = CONFREJ; + break; + } + + ho->predictor_2 = 1; + if (p == p0 + && ccp_test(pcb, p, CILEN_PREDICTOR_2, 1) <= 0) { + newret = CONFREJ; + } + break; +#endif /* PREDICTOR_SUPPORT */ + + default: + newret = CONFREJ; + } + } + + if (newret == CONFNAK && dont_nak) + newret = CONFREJ; + if (!(newret == CONFACK || (newret == CONFNAK && ret == CONFREJ))) { + /* we're returning this option */ + if (newret == CONFREJ && ret == CONFNAK) + retp = p0; + ret = newret; + if (p != retp) + MEMCPY(retp, p, clen); + retp += clen; + } + + p += clen; + len -= clen; + } + + if (ret != CONFACK) { + if (ret == CONFREJ && *lenp == retp - p0) + pcb->ccp_all_rejected = 1; + else + *lenp = retp - p0; + } +#if MPPE_SUPPORT + if (ret == CONFREJ && ao->mppe && rej_for_ci_mppe) { + ppp_error("MPPE required but peer negotiation failed"); + lcp_close(pcb, "MPPE required but peer negotiation failed"); + } +#endif /* MPPE_SUPPORT */ + return ret; +} + +/* + * Make a string name for a compression method (or 2). + */ +static const char *method_name(ccp_options *opt, ccp_options *opt2) { + static char result[64]; +#if !DEFLATE_SUPPORT && !BSDCOMPRESS_SUPPORT + LWIP_UNUSED_ARG(opt2); +#endif /* !DEFLATE_SUPPORT && !BSDCOMPRESS_SUPPORT */ + + if (!ccp_anycompress(opt)) + return "(none)"; + switch (opt->method) { +#if MPPE_SUPPORT + case CI_MPPE: + { + char *p = result; + char *q = result + sizeof(result); /* 1 past result */ + + ppp_slprintf(p, q - p, "MPPE "); + p += 5; + if (opt->mppe & MPPE_OPT_128) { + ppp_slprintf(p, q - p, "128-bit "); + p += 8; + } + if (opt->mppe & MPPE_OPT_40) { + ppp_slprintf(p, q - p, "40-bit "); + p += 7; + } + if (opt->mppe & MPPE_OPT_STATEFUL) + ppp_slprintf(p, q - p, "stateful"); + else + ppp_slprintf(p, q - p, "stateless"); + + break; + } +#endif /* MPPE_SUPPORT */ +#if DEFLATE_SUPPORT + case CI_DEFLATE: + case CI_DEFLATE_DRAFT: + if (opt2 != NULL && opt2->deflate_size != opt->deflate_size) + ppp_slprintf(result, sizeof(result), "Deflate%s (%d/%d)", + (opt->method == CI_DEFLATE_DRAFT? "(old#)": ""), + opt->deflate_size, opt2->deflate_size); + else + ppp_slprintf(result, sizeof(result), "Deflate%s (%d)", + (opt->method == CI_DEFLATE_DRAFT? "(old#)": ""), + opt->deflate_size); + break; +#endif /* DEFLATE_SUPPORT */ +#if BSDCOMPRESS_SUPPORT + case CI_BSD_COMPRESS: + if (opt2 != NULL && opt2->bsd_bits != opt->bsd_bits) + ppp_slprintf(result, sizeof(result), "BSD-Compress (%d/%d)", + opt->bsd_bits, opt2->bsd_bits); + else + ppp_slprintf(result, sizeof(result), "BSD-Compress (%d)", + opt->bsd_bits); + break; +#endif /* BSDCOMPRESS_SUPPORT */ +#if PREDICTOR_SUPPORT + case CI_PREDICTOR_1: + return "Predictor 1"; + case CI_PREDICTOR_2: + return "Predictor 2"; +#endif /* PREDICTOR_SUPPORT */ + default: + ppp_slprintf(result, sizeof(result), "Method %d", opt->method); + } + return result; +} + +/* + * CCP has come up - inform the kernel driver and log a message. + */ +static void ccp_up(fsm *f) { + ppp_pcb *pcb = f->pcb; + ccp_options *go = &pcb->ccp_gotoptions; + ccp_options *ho = &pcb->ccp_hisoptions; + char method1[64]; + + ccp_set(pcb, 1, 1, go->method, ho->method); + if (ccp_anycompress(go)) { + if (ccp_anycompress(ho)) { + if (go->method == ho->method) { + ppp_notice("%s compression enabled", method_name(go, ho)); + } else { + ppp_strlcpy(method1, method_name(go, NULL), sizeof(method1)); + ppp_notice("%s / %s compression enabled", + method1, method_name(ho, NULL)); + } + } else + ppp_notice("%s receive compression enabled", method_name(go, NULL)); + } else if (ccp_anycompress(ho)) + ppp_notice("%s transmit compression enabled", method_name(ho, NULL)); +#if MPPE_SUPPORT + if (go->mppe) { + continue_networks(pcb); /* Bring up IP et al */ + } +#endif /* MPPE_SUPPORT */ +} + +/* + * CCP has gone down - inform the kernel driver. + */ +static void ccp_down(fsm *f) { + ppp_pcb *pcb = f->pcb; +#if MPPE_SUPPORT + ccp_options *go = &pcb->ccp_gotoptions; +#endif /* MPPE_SUPPORT */ + + if (pcb->ccp_localstate & RACK_PENDING) + UNTIMEOUT(ccp_rack_timeout, f); + pcb->ccp_localstate = 0; + ccp_set(pcb, 1, 0, 0, 0); +#if MPPE_SUPPORT + if (go->mppe) { + go->mppe = 0; + if (pcb->lcp_fsm.state == PPP_FSM_OPENED) { + /* If LCP is not already going down, make sure it does. */ + ppp_error("MPPE disabled"); + lcp_close(pcb, "MPPE disabled"); + } + } +#endif /* MPPE_SUPPORT */ +} + +#if PRINTPKT_SUPPORT +/* + * Print the contents of a CCP packet. + */ +static const char* const ccp_codenames[] = { + "ConfReq", "ConfAck", "ConfNak", "ConfRej", + "TermReq", "TermAck", "CodeRej", + NULL, NULL, NULL, NULL, NULL, NULL, + "ResetReq", "ResetAck", +}; + +static int ccp_printpkt(const u_char *p, int plen, void (*printer) (void *, const char *, ...), void *arg) { + const u_char *p0, *optend; + int code, id, len; + int optlen; + + p0 = p; + if (plen < HEADERLEN) + return 0; + code = p[0]; + id = p[1]; + len = (p[2] << 8) + p[3]; + if (len < HEADERLEN || len > plen) + return 0; + + if (code >= 1 && code <= (int)LWIP_ARRAYSIZE(ccp_codenames) && ccp_codenames[code-1] != NULL) + printer(arg, " %s", ccp_codenames[code-1]); + else + printer(arg, " code=0x%x", code); + printer(arg, " id=0x%x", id); + len -= HEADERLEN; + p += HEADERLEN; + + switch (code) { + case CONFREQ: + case CONFACK: + case CONFNAK: + case CONFREJ: + /* print list of possible compression methods */ + while (len >= 2) { + code = p[0]; + optlen = p[1]; + if (optlen < 2 || optlen > len) + break; + printer(arg, " <"); + len -= optlen; + optend = p + optlen; + switch (code) { +#if MPPE_SUPPORT + case CI_MPPE: + if (optlen >= CILEN_MPPE) { + u_char mppe_opts; + + MPPE_CI_TO_OPTS(&p[2], mppe_opts); + printer(arg, "mppe %s %s %s %s %s %s%s", + (p[2] & MPPE_H_BIT)? "+H": "-H", + (p[5] & MPPE_M_BIT)? "+M": "-M", + (p[5] & MPPE_S_BIT)? "+S": "-S", + (p[5] & MPPE_L_BIT)? "+L": "-L", + (p[5] & MPPE_D_BIT)? "+D": "-D", + (p[5] & MPPE_C_BIT)? "+C": "-C", + (mppe_opts & MPPE_OPT_UNKNOWN)? " +U": ""); + if (mppe_opts & MPPE_OPT_UNKNOWN) + printer(arg, " (%.2x %.2x %.2x %.2x)", + p[2], p[3], p[4], p[5]); + p += CILEN_MPPE; + } + break; +#endif /* MPPE_SUPPORT */ +#if DEFLATE_SUPPORT + case CI_DEFLATE: + case CI_DEFLATE_DRAFT: + if (optlen >= CILEN_DEFLATE) { + printer(arg, "deflate%s %d", + (code == CI_DEFLATE_DRAFT? "(old#)": ""), + DEFLATE_SIZE(p[2])); + if (DEFLATE_METHOD(p[2]) != DEFLATE_METHOD_VAL) + printer(arg, " method %d", DEFLATE_METHOD(p[2])); + if (p[3] != DEFLATE_CHK_SEQUENCE) + printer(arg, " check %d", p[3]); + p += CILEN_DEFLATE; + } + break; +#endif /* DEFLATE_SUPPORT */ +#if BSDCOMPRESS_SUPPORT + case CI_BSD_COMPRESS: + if (optlen >= CILEN_BSD_COMPRESS) { + printer(arg, "bsd v%d %d", BSD_VERSION(p[2]), + BSD_NBITS(p[2])); + p += CILEN_BSD_COMPRESS; + } + break; +#endif /* BSDCOMPRESS_SUPPORT */ +#if PREDICTOR_SUPPORT + case CI_PREDICTOR_1: + if (optlen >= CILEN_PREDICTOR_1) { + printer(arg, "predictor 1"); + p += CILEN_PREDICTOR_1; + } + break; + case CI_PREDICTOR_2: + if (optlen >= CILEN_PREDICTOR_2) { + printer(arg, "predictor 2"); + p += CILEN_PREDICTOR_2; + } + break; +#endif /* PREDICTOR_SUPPORT */ + default: + break; + } + while (p < optend) + printer(arg, " %.2x", *p++); + printer(arg, ">"); + } + break; + + case TERMACK: + case TERMREQ: + if (len > 0 && *p >= ' ' && *p < 0x7f) { + ppp_print_string(p, len, printer, arg); + p += len; + len = 0; + } + break; + default: + break; + } + + /* dump out the rest of the packet in hex */ + while (--len >= 0) + printer(arg, " %.2x", *p++); + + return p - p0; +} +#endif /* PRINTPKT_SUPPORT */ + +#if PPP_DATAINPUT +/* + * We have received a packet that the decompressor failed to + * decompress. Here we would expect to issue a reset-request, but + * Motorola has a patent on resetting the compressor as a result of + * detecting an error in the decompressed data after decompression. + * (See US patent 5,130,993; international patent publication number + * WO 91/10289; Australian patent 73296/91.) + * + * So we ask the kernel whether the error was detected after + * decompression; if it was, we take CCP down, thus disabling + * compression :-(, otherwise we issue the reset-request. + */ +static void ccp_datainput(ppp_pcb *pcb, u_char *pkt, int len) { + fsm *f; +#if MPPE_SUPPORT + ccp_options *go = &pcb->ccp_gotoptions; +#endif /* MPPE_SUPPORT */ + LWIP_UNUSED_ARG(pkt); + LWIP_UNUSED_ARG(len); + + f = &pcb->ccp_fsm; + if (f->state == PPP_FSM_OPENED) { + if (ccp_fatal_error(pcb)) { + /* + * Disable compression by taking CCP down. + */ + ppp_error("Lost compression sync: disabling compression"); + ccp_close(pcb, "Lost compression sync"); +#if MPPE_SUPPORT + /* + * If we were doing MPPE, we must also take the link down. + */ + if (go->mppe) { + ppp_error("Too many MPPE errors, closing LCP"); + lcp_close(pcb, "Too many MPPE errors"); + } +#endif /* MPPE_SUPPORT */ + } else { + /* + * Send a reset-request to reset the peer's compressor. + * We don't do that if we are still waiting for an + * acknowledgement to a previous reset-request. + */ + if (!(pcb->ccp_localstate & RACK_PENDING)) { + fsm_sdata(f, CCP_RESETREQ, f->reqid = ++f->id, NULL, 0); + TIMEOUT(ccp_rack_timeout, f, RACKTIMEOUT); + pcb->ccp_localstate |= RACK_PENDING; + } else + pcb->ccp_localstate |= RREQ_REPEAT; + } + } +} +#endif /* PPP_DATAINPUT */ + +/* + * We have received a packet that the decompressor failed to + * decompress. Issue a reset-request. + */ +void ccp_resetrequest(ppp_pcb *pcb) { + fsm *f = &pcb->ccp_fsm; + + if (f->state != PPP_FSM_OPENED) + return; + + /* + * Send a reset-request to reset the peer's compressor. + * We don't do that if we are still waiting for an + * acknowledgement to a previous reset-request. + */ + if (!(pcb->ccp_localstate & RACK_PENDING)) { + fsm_sdata(f, CCP_RESETREQ, f->reqid = ++f->id, NULL, 0); + TIMEOUT(ccp_rack_timeout, f, RACKTIMEOUT); + pcb->ccp_localstate |= RACK_PENDING; + } else + pcb->ccp_localstate |= RREQ_REPEAT; +} + +/* + * Timeout waiting for reset-ack. + */ +static void ccp_rack_timeout(void *arg) { + fsm *f = (fsm*)arg; + ppp_pcb *pcb = f->pcb; + + if (f->state == PPP_FSM_OPENED && (pcb->ccp_localstate & RREQ_REPEAT)) { + fsm_sdata(f, CCP_RESETREQ, f->reqid, NULL, 0); + TIMEOUT(ccp_rack_timeout, f, RACKTIMEOUT); + pcb->ccp_localstate &= ~RREQ_REPEAT; + } else + pcb->ccp_localstate &= ~RACK_PENDING; +} + +#endif /* PPP_SUPPORT && CCP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/chap-md5.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/chap-md5.c new file mode 100644 index 0000000..88f069f --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/chap-md5.c @@ -0,0 +1,126 @@ +/* + * chap-md5.c - New CHAP/MD5 implementation. + * + * Copyright (c) 2003 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 3. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && CHAP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#if 0 /* UNUSED */ +#include +#include +#endif /* UNUSED */ + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/chap-new.h" +#include "netif/ppp/chap-md5.h" +#include "netif/ppp/magic.h" +#include "netif/ppp/pppcrypt.h" + +#define MD5_HASH_SIZE 16 +#define MD5_MIN_CHALLENGE 17 +#define MD5_MAX_CHALLENGE 24 +#define MD5_MIN_MAX_POWER_OF_TWO_CHALLENGE 3 /* 2^3-1 = 7, 17+7 = 24 */ + +#if PPP_SERVER +static void chap_md5_generate_challenge(ppp_pcb *pcb, unsigned char *cp) { + int clen; + LWIP_UNUSED_ARG(pcb); + + clen = MD5_MIN_CHALLENGE + magic_pow(MD5_MIN_MAX_POWER_OF_TWO_CHALLENGE); + *cp++ = clen; + magic_random_bytes(cp, clen); +} + +static int chap_md5_verify_response(ppp_pcb *pcb, int id, const char *name, + const unsigned char *secret, int secret_len, + const unsigned char *challenge, const unsigned char *response, + char *message, int message_space) { + lwip_md5_context ctx; + unsigned char idbyte = id; + unsigned char hash[MD5_HASH_SIZE]; + int challenge_len, response_len; + LWIP_UNUSED_ARG(name); + LWIP_UNUSED_ARG(pcb); + + challenge_len = *challenge++; + response_len = *response++; + if (response_len == MD5_HASH_SIZE) { + /* Generate hash of ID, secret, challenge */ + lwip_md5_init(&ctx); + lwip_md5_starts(&ctx); + lwip_md5_update(&ctx, &idbyte, 1); + lwip_md5_update(&ctx, secret, secret_len); + lwip_md5_update(&ctx, challenge, challenge_len); + lwip_md5_finish(&ctx, hash); + lwip_md5_free(&ctx); + + /* Test if our hash matches the peer's response */ + if (memcmp(hash, response, MD5_HASH_SIZE) == 0) { + ppp_slprintf(message, message_space, "Access granted"); + return 1; + } + } + ppp_slprintf(message, message_space, "Access denied"); + return 0; +} +#endif /* PPP_SERVER */ + +static void chap_md5_make_response(ppp_pcb *pcb, unsigned char *response, int id, const char *our_name, + const unsigned char *challenge, const char *secret, int secret_len, + unsigned char *private_) { + lwip_md5_context ctx; + unsigned char idbyte = id; + int challenge_len = *challenge++; + LWIP_UNUSED_ARG(our_name); + LWIP_UNUSED_ARG(private_); + LWIP_UNUSED_ARG(pcb); + + lwip_md5_init(&ctx); + lwip_md5_starts(&ctx); + lwip_md5_update(&ctx, &idbyte, 1); + lwip_md5_update(&ctx, (const u_char *)secret, secret_len); + lwip_md5_update(&ctx, challenge, challenge_len); + lwip_md5_finish(&ctx, &response[1]); + lwip_md5_free(&ctx); + response[0] = MD5_HASH_SIZE; +} + +const struct chap_digest_type md5_digest = { + CHAP_MD5, /* code */ +#if PPP_SERVER + chap_md5_generate_challenge, + chap_md5_verify_response, +#endif /* PPP_SERVER */ + chap_md5_make_response, + NULL, /* check_success */ + NULL, /* handle_failure */ +}; + +#endif /* PPP_SUPPORT && CHAP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/chap-new.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/chap-new.c new file mode 100644 index 0000000..485122d --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/chap-new.c @@ -0,0 +1,677 @@ +/* + * chap-new.c - New CHAP implementation. + * + * Copyright (c) 2003 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 3. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && CHAP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#if 0 /* UNUSED */ +#include +#include +#endif /* UNUSED */ + +#include "netif/ppp/ppp_impl.h" + +#if 0 /* UNUSED */ +#include "session.h" +#endif /* UNUSED */ + +#include "netif/ppp/chap-new.h" +#include "netif/ppp/chap-md5.h" +#if MSCHAP_SUPPORT +#include "netif/ppp/chap_ms.h" +#endif +#include "netif/ppp/magic.h" + +#if 0 /* UNUSED */ +/* Hook for a plugin to validate CHAP challenge */ +int (*chap_verify_hook)(const char *name, const char *ourname, int id, + const struct chap_digest_type *digest, + const unsigned char *challenge, const unsigned char *response, + char *message, int message_space) = NULL; +#endif /* UNUSED */ + +#if PPP_OPTIONS +/* + * Command-line options. + */ +static option_t chap_option_list[] = { + { "chap-restart", o_int, &chap_timeout_time, + "Set timeout for CHAP", OPT_PRIO }, + { "chap-max-challenge", o_int, &pcb->settings.chap_max_transmits, + "Set max #xmits for challenge", OPT_PRIO }, + { "chap-interval", o_int, &pcb->settings.chap_rechallenge_time, + "Set interval for rechallenge", OPT_PRIO }, + { NULL } +}; +#endif /* PPP_OPTIONS */ + + +/* Values for flags in chap_client_state and chap_server_state */ +#define LOWERUP 1 +#define AUTH_STARTED 2 +#define AUTH_DONE 4 +#define AUTH_FAILED 8 +#define TIMEOUT_PENDING 0x10 +#define CHALLENGE_VALID 0x20 + +/* + * Prototypes. + */ +static void chap_init(ppp_pcb *pcb); +static void chap_lowerup(ppp_pcb *pcb); +static void chap_lowerdown(ppp_pcb *pcb); +#if PPP_SERVER +static void chap_timeout(void *arg); +static void chap_generate_challenge(ppp_pcb *pcb); +static void chap_handle_response(ppp_pcb *pcb, int code, + unsigned char *pkt, int len); +static int chap_verify_response(ppp_pcb *pcb, const char *name, const char *ourname, int id, + const struct chap_digest_type *digest, + const unsigned char *challenge, const unsigned char *response, + char *message, int message_space); +#endif /* PPP_SERVER */ +static void chap_respond(ppp_pcb *pcb, int id, + unsigned char *pkt, int len); +static void chap_handle_status(ppp_pcb *pcb, int code, int id, + unsigned char *pkt, int len); +static void chap_protrej(ppp_pcb *pcb); +static void chap_input(ppp_pcb *pcb, unsigned char *pkt, int pktlen); +#if PRINTPKT_SUPPORT +static int chap_print_pkt(const unsigned char *p, int plen, + void (*printer) (void *, const char *, ...), void *arg); +#endif /* PRINTPKT_SUPPORT */ + +/* List of digest types that we know about */ +static const struct chap_digest_type* const chap_digests[] = { + &md5_digest, +#if MSCHAP_SUPPORT + &chapms_digest, + &chapms2_digest, +#endif /* MSCHAP_SUPPORT */ + NULL +}; + +/* + * chap_init - reset to initial state. + */ +static void chap_init(ppp_pcb *pcb) { + LWIP_UNUSED_ARG(pcb); + +#if 0 /* Not necessary, everything is cleared in ppp_new() */ + memset(&pcb->chap_client, 0, sizeof(chap_client_state)); +#if PPP_SERVER + memset(&pcb->chap_server, 0, sizeof(chap_server_state)); +#endif /* PPP_SERVER */ +#endif /* 0 */ +} + +/* + * chap_lowerup - we can start doing stuff now. + */ +static void chap_lowerup(ppp_pcb *pcb) { + + pcb->chap_client.flags |= LOWERUP; +#if PPP_SERVER + pcb->chap_server.flags |= LOWERUP; + if (pcb->chap_server.flags & AUTH_STARTED) + chap_timeout(pcb); +#endif /* PPP_SERVER */ +} + +static void chap_lowerdown(ppp_pcb *pcb) { + + pcb->chap_client.flags = 0; +#if PPP_SERVER + if (pcb->chap_server.flags & TIMEOUT_PENDING) + UNTIMEOUT(chap_timeout, pcb); + pcb->chap_server.flags = 0; +#endif /* PPP_SERVER */ +} + +#if PPP_SERVER +/* + * chap_auth_peer - Start authenticating the peer. + * If the lower layer is already up, we start sending challenges, + * otherwise we wait for the lower layer to come up. + */ +void chap_auth_peer(ppp_pcb *pcb, const char *our_name, int digest_code) { + const struct chap_digest_type *dp; + int i; + + if (pcb->chap_server.flags & AUTH_STARTED) { + ppp_error("CHAP: peer authentication already started!"); + return; + } + for (i = 0; (dp = chap_digests[i]) != NULL; ++i) + if (dp->code == digest_code) + break; + if (dp == NULL) + ppp_fatal("CHAP digest 0x%x requested but not available", + digest_code); + + pcb->chap_server.digest = dp; + pcb->chap_server.name = our_name; + /* Start with a random ID value */ + pcb->chap_server.id = magic(); + pcb->chap_server.flags |= AUTH_STARTED; + if (pcb->chap_server.flags & LOWERUP) + chap_timeout(pcb); +} +#endif /* PPP_SERVER */ + +/* + * chap_auth_with_peer - Prepare to authenticate ourselves to the peer. + * There isn't much to do until we receive a challenge. + */ +void chap_auth_with_peer(ppp_pcb *pcb, const char *our_name, int digest_code) { + const struct chap_digest_type *dp; + int i; + + if(NULL == our_name) + return; + + if (pcb->chap_client.flags & AUTH_STARTED) { + ppp_error("CHAP: authentication with peer already started!"); + return; + } + for (i = 0; (dp = chap_digests[i]) != NULL; ++i) + if (dp->code == digest_code) + break; + + if (dp == NULL) + ppp_fatal("CHAP digest 0x%x requested but not available", + digest_code); + + pcb->chap_client.digest = dp; + pcb->chap_client.name = our_name; + pcb->chap_client.flags |= AUTH_STARTED; +} + +#if PPP_SERVER +/* + * chap_timeout - It's time to send another challenge to the peer. + * This could be either a retransmission of a previous challenge, + * or a new challenge to start re-authentication. + */ +static void chap_timeout(void *arg) { + ppp_pcb *pcb = (ppp_pcb*)arg; + struct pbuf *p; + + pcb->chap_server.flags &= ~TIMEOUT_PENDING; + if ((pcb->chap_server.flags & CHALLENGE_VALID) == 0) { + pcb->chap_server.challenge_xmits = 0; + chap_generate_challenge(pcb); + pcb->chap_server.flags |= CHALLENGE_VALID; + } else if (pcb->chap_server.challenge_xmits >= pcb->settings.chap_max_transmits) { + pcb->chap_server.flags &= ~CHALLENGE_VALID; + pcb->chap_server.flags |= AUTH_DONE | AUTH_FAILED; + auth_peer_fail(pcb, PPP_CHAP); + return; + } + + p = pbuf_alloc(PBUF_RAW, (u16_t)(pcb->chap_server.challenge_pktlen), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + MEMCPY(p->payload, pcb->chap_server.challenge, pcb->chap_server.challenge_pktlen); + ppp_write(pcb, p); + ++pcb->chap_server.challenge_xmits; + pcb->chap_server.flags |= TIMEOUT_PENDING; + TIMEOUT(chap_timeout, arg, pcb->settings.chap_timeout_time); +} + +/* + * chap_generate_challenge - generate a challenge string and format + * the challenge packet in pcb->chap_server.challenge_pkt. + */ +static void chap_generate_challenge(ppp_pcb *pcb) { + int clen = 1, nlen, len; + unsigned char *p; + + p = pcb->chap_server.challenge; + MAKEHEADER(p, PPP_CHAP); + p += CHAP_HDRLEN; + pcb->chap_server.digest->generate_challenge(pcb, p); + clen = *p; + nlen = strlen(pcb->chap_server.name); + memcpy(p + 1 + clen, pcb->chap_server.name, nlen); + + len = CHAP_HDRLEN + 1 + clen + nlen; + pcb->chap_server.challenge_pktlen = PPP_HDRLEN + len; + + p = pcb->chap_server.challenge + PPP_HDRLEN; + p[0] = CHAP_CHALLENGE; + p[1] = ++pcb->chap_server.id; + p[2] = len >> 8; + p[3] = len; +} + +/* + * chap_handle_response - check the response to our challenge. + */ +static void chap_handle_response(ppp_pcb *pcb, int id, + unsigned char *pkt, int len) { + int response_len, ok, mlen; + const unsigned char *response; + unsigned char *outp; + struct pbuf *p; + const char *name = NULL; /* initialized to shut gcc up */ +#if 0 /* UNUSED */ + int (*verifier)(const char *, const char *, int, const struct chap_digest_type *, + const unsigned char *, const unsigned char *, char *, int); +#endif /* UNUSED */ + char rname[MAXNAMELEN+1]; + char message[256]; + + if ((pcb->chap_server.flags & LOWERUP) == 0) + return; + if (id != pcb->chap_server.challenge[PPP_HDRLEN+1] || len < 2) + return; + if (pcb->chap_server.flags & CHALLENGE_VALID) { + response = pkt; + GETCHAR(response_len, pkt); + len -= response_len + 1; /* length of name */ + name = (char *)pkt + response_len; + if (len < 0) + return; + + if (pcb->chap_server.flags & TIMEOUT_PENDING) { + pcb->chap_server.flags &= ~TIMEOUT_PENDING; + UNTIMEOUT(chap_timeout, pcb); + } +#if PPP_REMOTENAME + if (pcb->settings.explicit_remote) { + name = pcb->remote_name; + } else +#endif /* PPP_REMOTENAME */ + { + /* Null terminate and clean remote name. */ + ppp_slprintf(rname, sizeof(rname), "%.*v", len, name); + name = rname; + } + +#if 0 /* UNUSED */ + if (chap_verify_hook) + verifier = chap_verify_hook; + else + verifier = chap_verify_response; + ok = (*verifier)(name, pcb->chap_server.name, id, pcb->chap_server.digest, + pcb->chap_server.challenge + PPP_HDRLEN + CHAP_HDRLEN, + response, pcb->chap_server.message, sizeof(pcb->chap_server.message)); +#endif /* UNUSED */ + ok = chap_verify_response(pcb, name, pcb->chap_server.name, id, pcb->chap_server.digest, + pcb->chap_server.challenge + PPP_HDRLEN + CHAP_HDRLEN, + response, message, sizeof(message)); +#if 0 /* UNUSED */ + if (!ok || !auth_number()) { +#endif /* UNUSED */ + if (!ok) { + pcb->chap_server.flags |= AUTH_FAILED; + ppp_warn("Peer %q failed CHAP authentication", name); + } + } else if ((pcb->chap_server.flags & AUTH_DONE) == 0) + return; + + /* send the response */ + mlen = strlen(message); + len = CHAP_HDRLEN + mlen; + p = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_HDRLEN +len), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = (unsigned char *)p->payload; + MAKEHEADER(outp, PPP_CHAP); + + outp[0] = (pcb->chap_server.flags & AUTH_FAILED)? CHAP_FAILURE: CHAP_SUCCESS; + outp[1] = id; + outp[2] = len >> 8; + outp[3] = len; + if (mlen > 0) + memcpy(outp + CHAP_HDRLEN, message, mlen); + ppp_write(pcb, p); + + if (pcb->chap_server.flags & CHALLENGE_VALID) { + pcb->chap_server.flags &= ~CHALLENGE_VALID; + if (!(pcb->chap_server.flags & AUTH_DONE) && !(pcb->chap_server.flags & AUTH_FAILED)) { + +#if 0 /* UNUSED */ + /* + * Auth is OK, so now we need to check session restrictions + * to ensure everything is OK, but only if we used a + * plugin, and only if we're configured to check. This + * allows us to do PAM checks on PPP servers that + * authenticate against ActiveDirectory, and use AD for + * account info (like when using Winbind integrated with + * PAM). + */ + if (session_mgmt && + session_check(name, NULL, devnam, NULL) == 0) { + pcb->chap_server.flags |= AUTH_FAILED; + ppp_warn("Peer %q failed CHAP Session verification", name); + } +#endif /* UNUSED */ + + } + if (pcb->chap_server.flags & AUTH_FAILED) { + auth_peer_fail(pcb, PPP_CHAP); + } else { + if ((pcb->chap_server.flags & AUTH_DONE) == 0) + auth_peer_success(pcb, PPP_CHAP, + pcb->chap_server.digest->code, + name, strlen(name)); + if (pcb->settings.chap_rechallenge_time) { + pcb->chap_server.flags |= TIMEOUT_PENDING; + TIMEOUT(chap_timeout, pcb, + pcb->settings.chap_rechallenge_time); + } + } + pcb->chap_server.flags |= AUTH_DONE; + } +} + +/* + * chap_verify_response - check whether the peer's response matches + * what we think it should be. Returns 1 if it does (authentication + * succeeded), or 0 if it doesn't. + */ +static int chap_verify_response(ppp_pcb *pcb, const char *name, const char *ourname, int id, + const struct chap_digest_type *digest, + const unsigned char *challenge, const unsigned char *response, + char *message, int message_space) { + int ok; + unsigned char secret[MAXSECRETLEN]; + int secret_len; + + /* Get the secret that the peer is supposed to know */ + if (!get_secret(pcb, name, ourname, (char *)secret, &secret_len, 1)) { + ppp_error("No CHAP secret found for authenticating %q", name); + return 0; + } + ok = digest->verify_response(pcb, id, name, secret, secret_len, challenge, + response, message, message_space); + memset(secret, 0, sizeof(secret)); + + return ok; +} +#endif /* PPP_SERVER */ + +/* + * chap_respond - Generate and send a response to a challenge. + */ +static void chap_respond(ppp_pcb *pcb, int id, + unsigned char *pkt, int len) { + int clen, nlen; + int secret_len; + struct pbuf *p; + u_char *outp; + char rname[MAXNAMELEN+1]; + char secret[MAXSECRETLEN+1]; + + p = pbuf_alloc(PBUF_RAW, (u16_t)(RESP_MAX_PKTLEN), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + if ((pcb->chap_client.flags & (LOWERUP | AUTH_STARTED)) != (LOWERUP | AUTH_STARTED)) + return; /* not ready */ + if (len < 2 || len < pkt[0] + 1) + return; /* too short */ + clen = pkt[0]; + nlen = len - (clen + 1); + + /* Null terminate and clean remote name. */ + ppp_slprintf(rname, sizeof(rname), "%.*v", nlen, pkt + clen + 1); + +#if PPP_REMOTENAME + /* Microsoft doesn't send their name back in the PPP packet */ + if (pcb->settings.explicit_remote || (pcb->settings.remote_name[0] != 0 && rname[0] == 0)) + strlcpy(rname, pcb->settings.remote_name, sizeof(rname)); +#endif /* PPP_REMOTENAME */ + + /* get secret for authenticating ourselves with the specified host */ + if (!get_secret(pcb, pcb->chap_client.name, rname, secret, &secret_len, 0)) { + secret_len = 0; /* assume null secret if can't find one */ + ppp_warn("No CHAP secret found for authenticating us to %q", rname); + } + + outp = (u_char*)p->payload; + MAKEHEADER(outp, PPP_CHAP); + outp += CHAP_HDRLEN; + + pcb->chap_client.digest->make_response(pcb, outp, id, pcb->chap_client.name, pkt, + secret, secret_len, pcb->chap_client.priv); + memset(secret, 0, secret_len); + + clen = *outp; + nlen = strlen(pcb->chap_client.name); + memcpy(outp + clen + 1, pcb->chap_client.name, nlen); + + outp = (u_char*)p->payload + PPP_HDRLEN; + len = CHAP_HDRLEN + clen + 1 + nlen; + outp[0] = CHAP_RESPONSE; + outp[1] = id; + outp[2] = len >> 8; + outp[3] = len; + + pbuf_realloc(p, PPP_HDRLEN + len); + ppp_write(pcb, p); +} + +static void chap_handle_status(ppp_pcb *pcb, int code, int id, + unsigned char *pkt, int len) { + const char *msg = NULL; + LWIP_UNUSED_ARG(id); + + if ((pcb->chap_client.flags & (AUTH_DONE|AUTH_STARTED|LOWERUP)) + != (AUTH_STARTED|LOWERUP)) + return; + pcb->chap_client.flags |= AUTH_DONE; + + if (code == CHAP_SUCCESS) { + /* used for MS-CHAP v2 mutual auth, yuck */ + if (pcb->chap_client.digest->check_success != NULL) { + if (!(*pcb->chap_client.digest->check_success)(pcb, pkt, len, pcb->chap_client.priv)) + code = CHAP_FAILURE; + } else + msg = "CHAP authentication succeeded"; + } else { + if (pcb->chap_client.digest->handle_failure != NULL) + (*pcb->chap_client.digest->handle_failure)(pcb, pkt, len); + else + msg = "CHAP authentication failed"; + } + if (msg) { + if (len > 0) + ppp_info("%s: %.*v", msg, len, pkt); + else + ppp_info("%s", msg); + } + if (code == CHAP_SUCCESS) + auth_withpeer_success(pcb, PPP_CHAP, pcb->chap_client.digest->code); + else { + pcb->chap_client.flags |= AUTH_FAILED; + ppp_error("CHAP authentication failed"); + auth_withpeer_fail(pcb, PPP_CHAP); + } +} + +static void chap_input(ppp_pcb *pcb, unsigned char *pkt, int pktlen) { + unsigned char code, id; + int len; + + if (pktlen < CHAP_HDRLEN) + return; + GETCHAR(code, pkt); + GETCHAR(id, pkt); + GETSHORT(len, pkt); + if (len < CHAP_HDRLEN || len > pktlen) + return; + len -= CHAP_HDRLEN; + + switch (code) { + case CHAP_CHALLENGE: + chap_respond(pcb, id, pkt, len); + break; +#if PPP_SERVER + case CHAP_RESPONSE: + chap_handle_response(pcb, id, pkt, len); + break; +#endif /* PPP_SERVER */ + case CHAP_FAILURE: + case CHAP_SUCCESS: + chap_handle_status(pcb, code, id, pkt, len); + break; + default: + break; + } +} + +static void chap_protrej(ppp_pcb *pcb) { + +#if PPP_SERVER + if (pcb->chap_server.flags & TIMEOUT_PENDING) { + pcb->chap_server.flags &= ~TIMEOUT_PENDING; + UNTIMEOUT(chap_timeout, pcb); + } + if (pcb->chap_server.flags & AUTH_STARTED) { + pcb->chap_server.flags = 0; + auth_peer_fail(pcb, PPP_CHAP); + } +#endif /* PPP_SERVER */ + if ((pcb->chap_client.flags & (AUTH_STARTED|AUTH_DONE)) == AUTH_STARTED) { + pcb->chap_client.flags &= ~AUTH_STARTED; + ppp_error("CHAP authentication failed due to protocol-reject"); + auth_withpeer_fail(pcb, PPP_CHAP); + } +} + +#if PRINTPKT_SUPPORT +/* + * chap_print_pkt - print the contents of a CHAP packet. + */ +static const char* const chap_code_names[] = { + "Challenge", "Response", "Success", "Failure" +}; + +static int chap_print_pkt(const unsigned char *p, int plen, + void (*printer) (void *, const char *, ...), void *arg) { + int code, id, len; + int clen, nlen; + unsigned char x; + + if (plen < CHAP_HDRLEN) + return 0; + GETCHAR(code, p); + GETCHAR(id, p); + GETSHORT(len, p); + if (len < CHAP_HDRLEN || len > plen) + return 0; + + if (code >= 1 && code <= (int)LWIP_ARRAYSIZE(chap_code_names)) + printer(arg, " %s", chap_code_names[code-1]); + else + printer(arg, " code=0x%x", code); + printer(arg, " id=0x%x", id); + len -= CHAP_HDRLEN; + switch (code) { + case CHAP_CHALLENGE: + case CHAP_RESPONSE: + if (len < 1) + break; + clen = p[0]; + if (len < clen + 1) + break; + ++p; + nlen = len - clen - 1; + printer(arg, " <"); + for (; clen > 0; --clen) { + GETCHAR(x, p); + printer(arg, "%.2x", x); + } + printer(arg, ">, name = "); + ppp_print_string(p, nlen, printer, arg); + break; + case CHAP_FAILURE: + case CHAP_SUCCESS: + printer(arg, " "); + ppp_print_string(p, len, printer, arg); + break; + default: + for (clen = len; clen > 0; --clen) { + GETCHAR(x, p); + printer(arg, " %.2x", x); + } + /* no break */ + } + + return len + CHAP_HDRLEN; +} +#endif /* PRINTPKT_SUPPORT */ + +const struct protent chap_protent = { + PPP_CHAP, + chap_init, + chap_input, + chap_protrej, + chap_lowerup, + chap_lowerdown, + NULL, /* open */ + NULL, /* close */ +#if PRINTPKT_SUPPORT + chap_print_pkt, +#endif /* PRINTPKT_SUPPORT */ +#if PPP_DATAINPUT + NULL, /* datainput */ +#endif /* PPP_DATAINPUT */ +#if PRINTPKT_SUPPORT + "CHAP", /* name */ + NULL, /* data_name */ +#endif /* PRINTPKT_SUPPORT */ +#if PPP_OPTIONS + chap_option_list, + NULL, /* check_options */ +#endif /* PPP_OPTIONS */ +#if DEMAND_SUPPORT + NULL, + NULL +#endif /* DEMAND_SUPPORT */ +}; + +#endif /* PPP_SUPPORT && CHAP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/chap_ms.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/chap_ms.c new file mode 100644 index 0000000..5a989c9 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/chap_ms.c @@ -0,0 +1,962 @@ +/* + * chap_ms.c - Microsoft MS-CHAP compatible implementation. + * + * Copyright (c) 1995 Eric Rosenquist. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * Modifications by Lauri Pesonen / lpesonen@clinet.fi, april 1997 + * + * Implemented LANManager type password response to MS-CHAP challenges. + * Now pppd provides both NT style and LANMan style blocks, and the + * prefered is set by option "ms-lanman". Default is to use NT. + * The hash text (StdText) was taken from Win95 RASAPI32.DLL. + * + * You should also use DOMAIN\\USERNAME as described in README.MSCHAP80 + */ + +/* + * Modifications by Frank Cusack, frank@google.com, March 2002. + * + * Implemented MS-CHAPv2 functionality, heavily based on sample + * implementation in RFC 2759. Implemented MPPE functionality, + * heavily based on sample implementation in RFC 3079. + * + * Copyright (c) 2002 Google, Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && MSCHAP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#if 0 /* UNUSED */ +#include +#include +#include +#include +#include +#include +#include +#endif /* UNUSED */ + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/chap-new.h" +#include "netif/ppp/chap_ms.h" +#include "netif/ppp/pppcrypt.h" +#include "netif/ppp/magic.h" +#if MPPE_SUPPORT +#include "netif/ppp/mppe.h" /* For mppe_sha1_pad*, mppe_set_key() */ +#endif /* MPPE_SUPPORT */ + +#define SHA1_SIGNATURE_SIZE 20 +#define MD4_SIGNATURE_SIZE 16 /* 16 bytes in a MD4 message digest */ +#define MAX_NT_PASSWORD 256 /* Max (Unicode) chars in an NT pass */ + +#define MS_CHAP_RESPONSE_LEN 49 /* Response length for MS-CHAP */ +#define MS_CHAP2_RESPONSE_LEN 49 /* Response length for MS-CHAPv2 */ +#define MS_AUTH_RESPONSE_LENGTH 40 /* MS-CHAPv2 authenticator response, */ + /* as ASCII */ + +/* Error codes for MS-CHAP failure messages. */ +#define MS_CHAP_ERROR_RESTRICTED_LOGON_HOURS 646 +#define MS_CHAP_ERROR_ACCT_DISABLED 647 +#define MS_CHAP_ERROR_PASSWD_EXPIRED 648 +#define MS_CHAP_ERROR_NO_DIALIN_PERMISSION 649 +#define MS_CHAP_ERROR_AUTHENTICATION_FAILURE 691 +#define MS_CHAP_ERROR_CHANGING_PASSWORD 709 + +/* + * Offsets within the response field for MS-CHAP + */ +#define MS_CHAP_LANMANRESP 0 +#define MS_CHAP_LANMANRESP_LEN 24 +#define MS_CHAP_NTRESP 24 +#define MS_CHAP_NTRESP_LEN 24 +#define MS_CHAP_USENT 48 + +/* + * Offsets within the response field for MS-CHAP2 + */ +#define MS_CHAP2_PEER_CHALLENGE 0 +#define MS_CHAP2_PEER_CHAL_LEN 16 +#define MS_CHAP2_RESERVED_LEN 8 +#define MS_CHAP2_NTRESP 24 +#define MS_CHAP2_NTRESP_LEN 24 +#define MS_CHAP2_FLAGS 48 + +#if MPPE_SUPPORT +#if 0 /* UNUSED */ +/* These values are the RADIUS attribute values--see RFC 2548. */ +#define MPPE_ENC_POL_ENC_ALLOWED 1 +#define MPPE_ENC_POL_ENC_REQUIRED 2 +#define MPPE_ENC_TYPES_RC4_40 2 +#define MPPE_ENC_TYPES_RC4_128 4 + +/* used by plugins (using above values) */ +extern void set_mppe_enc_types(int, int); +#endif /* UNUSED */ +#endif /* MPPE_SUPPORT */ + +/* Are we the authenticator or authenticatee? For MS-CHAPv2 key derivation. */ +#define MS_CHAP2_AUTHENTICATEE 0 +#define MS_CHAP2_AUTHENTICATOR 1 + +static void ascii2unicode (const char[], int, u_char[]); +static void NTPasswordHash (u_char *, int, u_char[MD4_SIGNATURE_SIZE]); +static void ChallengeResponse (const u_char *, const u_char *, u_char[24]); +static void ChallengeHash (const u_char[16], const u_char *, const char *, u_char[8]); +static void ChapMS_NT (const u_char *, const char *, int, u_char[24]); +static void ChapMS2_NT (const u_char *, const u_char[16], const char *, const char *, int, + u_char[24]); +static void GenerateAuthenticatorResponsePlain + (const char*, int, u_char[24], const u_char[16], const u_char *, + const char *, u_char[41]); +#ifdef MSLANMAN +static void ChapMS_LANMan (u_char *, char *, int, u_char *); +#endif + +static void GenerateAuthenticatorResponse(const u_char PasswordHashHash[MD4_SIGNATURE_SIZE], + u_char NTResponse[24], const u_char PeerChallenge[16], + const u_char *rchallenge, const char *username, + u_char authResponse[MS_AUTH_RESPONSE_LENGTH+1]); + +#if MPPE_SUPPORT +static void Set_Start_Key (ppp_pcb *pcb, const u_char *, const char *, int); +static void SetMasterKeys (ppp_pcb *pcb, const char *, int, u_char[24], int); +#endif /* MPPE_SUPPORT */ + +static void ChapMS (ppp_pcb *pcb, const u_char *, const char *, int, u_char *); +static void ChapMS2 (ppp_pcb *pcb, const u_char *, const u_char *, const char *, const char *, int, + u_char *, u_char[MS_AUTH_RESPONSE_LENGTH+1], int); + +#ifdef MSLANMAN +bool ms_lanman = 0; /* Use LanMan password instead of NT */ + /* Has meaning only with MS-CHAP challenges */ +#endif + +#if MPPE_SUPPORT +#ifdef DEBUGMPPEKEY +/* For MPPE debug */ +/* Use "[]|}{?/><,`!2&&(" (sans quotes) for RFC 3079 MS-CHAPv2 test value */ +static char *mschap_challenge = NULL; +/* Use "!@\#$%^&*()_+:3|~" (sans quotes, backslash is to escape #) for ... */ +static char *mschap2_peer_challenge = NULL; +#endif + +#include "netif/ppp/fsm.h" /* Need to poke MPPE options */ +#include "netif/ppp/ccp.h" +#endif /* MPPE_SUPPORT */ + +#if PPP_OPTIONS +/* + * Command-line options. + */ +static option_t chapms_option_list[] = { +#ifdef MSLANMAN + { "ms-lanman", o_bool, &ms_lanman, + "Use LanMan passwd when using MS-CHAP", 1 }, +#endif +#ifdef DEBUGMPPEKEY + { "mschap-challenge", o_string, &mschap_challenge, + "specify CHAP challenge" }, + { "mschap2-peer-challenge", o_string, &mschap2_peer_challenge, + "specify CHAP peer challenge" }, +#endif + { NULL } +}; +#endif /* PPP_OPTIONS */ + +#if PPP_SERVER +/* + * chapms_generate_challenge - generate a challenge for MS-CHAP. + * For MS-CHAP the challenge length is fixed at 8 bytes. + * The length goes in challenge[0] and the actual challenge starts + * at challenge[1]. + */ +static void chapms_generate_challenge(ppp_pcb *pcb, unsigned char *challenge) { + LWIP_UNUSED_ARG(pcb); + + *challenge++ = 8; +#ifdef DEBUGMPPEKEY + if (mschap_challenge && strlen(mschap_challenge) == 8) + memcpy(challenge, mschap_challenge, 8); + else +#endif + magic_random_bytes(challenge, 8); +} + +static void chapms2_generate_challenge(ppp_pcb *pcb, unsigned char *challenge) { + LWIP_UNUSED_ARG(pcb); + + *challenge++ = 16; +#ifdef DEBUGMPPEKEY + if (mschap_challenge && strlen(mschap_challenge) == 16) + memcpy(challenge, mschap_challenge, 16); + else +#endif + magic_random_bytes(challenge, 16); +} + +static int chapms_verify_response(ppp_pcb *pcb, int id, const char *name, + const unsigned char *secret, int secret_len, + const unsigned char *challenge, const unsigned char *response, + char *message, int message_space) { + unsigned char md[MS_CHAP_RESPONSE_LEN]; + int diff; + int challenge_len, response_len; + LWIP_UNUSED_ARG(id); + LWIP_UNUSED_ARG(name); + + challenge_len = *challenge++; /* skip length, is 8 */ + response_len = *response++; + if (response_len != MS_CHAP_RESPONSE_LEN) + goto bad; + +#ifndef MSLANMAN + if (!response[MS_CHAP_USENT]) { + /* Should really propagate this into the error packet. */ + ppp_notice("Peer request for LANMAN auth not supported"); + goto bad; + } +#endif + + /* Generate the expected response. */ + ChapMS(pcb, (const u_char *)challenge, (const char *)secret, secret_len, md); + +#ifdef MSLANMAN + /* Determine which part of response to verify against */ + if (!response[MS_CHAP_USENT]) + diff = memcmp(&response[MS_CHAP_LANMANRESP], + &md[MS_CHAP_LANMANRESP], MS_CHAP_LANMANRESP_LEN); + else +#endif + diff = memcmp(&response[MS_CHAP_NTRESP], &md[MS_CHAP_NTRESP], + MS_CHAP_NTRESP_LEN); + + if (diff == 0) { + ppp_slprintf(message, message_space, "Access granted"); + return 1; + } + + bad: + /* See comments below for MS-CHAP V2 */ + ppp_slprintf(message, message_space, "E=691 R=1 C=%0.*B V=0", + challenge_len, challenge); + return 0; +} + +static int chapms2_verify_response(ppp_pcb *pcb, int id, const char *name, + const unsigned char *secret, int secret_len, + const unsigned char *challenge, const unsigned char *response, + char *message, int message_space) { + unsigned char md[MS_CHAP2_RESPONSE_LEN]; + char saresponse[MS_AUTH_RESPONSE_LENGTH+1]; + int challenge_len, response_len; + LWIP_UNUSED_ARG(id); + + challenge_len = *challenge++; /* skip length, is 16 */ + response_len = *response++; + if (response_len != MS_CHAP2_RESPONSE_LEN) + goto bad; /* not even the right length */ + + /* Generate the expected response and our mutual auth. */ + ChapMS2(pcb, (const u_char*)challenge, (const u_char*)&response[MS_CHAP2_PEER_CHALLENGE], name, + (const char *)secret, secret_len, md, + (unsigned char *)saresponse, MS_CHAP2_AUTHENTICATOR); + + /* compare MDs and send the appropriate status */ + /* + * Per RFC 2759, success message must be formatted as + * "S= M=" + * where + * is the Authenticator Response (mutual auth) + * is a text message + * + * However, some versions of Windows (win98 tested) do not know + * about the M= part (required per RFC 2759) and flag + * it as an error (reported incorrectly as an encryption error + * to the user). Since the RFC requires it, and it can be + * useful information, we supply it if the peer is a conforming + * system. Luckily (?), win98 sets the Flags field to 0x04 + * (contrary to RFC requirements) so we can use that to + * distinguish between conforming and non-conforming systems. + * + * Special thanks to Alex Swiridov for + * help debugging this. + */ + if (memcmp(&md[MS_CHAP2_NTRESP], &response[MS_CHAP2_NTRESP], + MS_CHAP2_NTRESP_LEN) == 0) { + if (response[MS_CHAP2_FLAGS]) + ppp_slprintf(message, message_space, "S=%s", saresponse); + else + ppp_slprintf(message, message_space, "S=%s M=%s", + saresponse, "Access granted"); + return 1; + } + + bad: + /* + * Failure message must be formatted as + * "E=e R=r C=c V=v M=m" + * where + * e = error code (we use 691, ERROR_AUTHENTICATION_FAILURE) + * r = retry (we use 1, ok to retry) + * c = challenge to use for next response, we reuse previous + * v = Change Password version supported, we use 0 + * m = text message + * + * The M=m part is only for MS-CHAPv2. Neither win2k nor + * win98 (others untested) display the message to the user anyway. + * They also both ignore the E=e code. + * + * Note that it's safe to reuse the same challenge as we don't + * actually accept another response based on the error message + * (and no clients try to resend a response anyway). + * + * Basically, this whole bit is useless code, even the small + * implementation here is only because of overspecification. + */ + ppp_slprintf(message, message_space, "E=691 R=1 C=%0.*B V=0 M=%s", + challenge_len, challenge, "Access denied"); + return 0; +} +#endif /* PPP_SERVER */ + +static void chapms_make_response(ppp_pcb *pcb, unsigned char *response, int id, const char *our_name, + const unsigned char *challenge, const char *secret, int secret_len, + unsigned char *private_) { + LWIP_UNUSED_ARG(id); + LWIP_UNUSED_ARG(our_name); + LWIP_UNUSED_ARG(private_); + challenge++; /* skip length, should be 8 */ + *response++ = MS_CHAP_RESPONSE_LEN; + ChapMS(pcb, challenge, secret, secret_len, response); +} + +static void chapms2_make_response(ppp_pcb *pcb, unsigned char *response, int id, const char *our_name, + const unsigned char *challenge, const char *secret, int secret_len, + unsigned char *private_) { + LWIP_UNUSED_ARG(id); + challenge++; /* skip length, should be 16 */ + *response++ = MS_CHAP2_RESPONSE_LEN; + ChapMS2(pcb, challenge, +#ifdef DEBUGMPPEKEY + mschap2_peer_challenge, +#else + NULL, +#endif + our_name, secret, secret_len, response, private_, + MS_CHAP2_AUTHENTICATEE); +} + +static int chapms2_check_success(ppp_pcb *pcb, unsigned char *msg, int len, unsigned char *private_) { + LWIP_UNUSED_ARG(pcb); + + if ((len < MS_AUTH_RESPONSE_LENGTH + 2) || + strncmp((char *)msg, "S=", 2) != 0) { + /* Packet does not start with "S=" */ + ppp_error("MS-CHAPv2 Success packet is badly formed."); + return 0; + } + msg += 2; + len -= 2; + if (len < MS_AUTH_RESPONSE_LENGTH + || memcmp(msg, private_, MS_AUTH_RESPONSE_LENGTH)) { + /* Authenticator Response did not match expected. */ + ppp_error("MS-CHAPv2 mutual authentication failed."); + return 0; + } + /* Authenticator Response matches. */ + msg += MS_AUTH_RESPONSE_LENGTH; /* Eat it */ + len -= MS_AUTH_RESPONSE_LENGTH; + if ((len >= 3) && !strncmp((char *)msg, " M=", 3)) { + msg += 3; /* Eat the delimiter */ + } else if (len) { + /* Packet has extra text which does not begin " M=" */ + ppp_error("MS-CHAPv2 Success packet is badly formed."); + return 0; + } + return 1; +} + +static void chapms_handle_failure(ppp_pcb *pcb, unsigned char *inp, int len) { + int err; + const char *p; + char msg[64]; + LWIP_UNUSED_ARG(pcb); + + /* We want a null-terminated string for strxxx(). */ + len = LWIP_MIN(len, 63); + MEMCPY(msg, inp, len); + msg[len] = 0; + p = msg; + + /* + * Deal with MS-CHAP formatted failure messages; just print the + * M= part (if any). For MS-CHAP we're not really supposed + * to use M=, but it shouldn't hurt. See + * chapms[2]_verify_response. + */ + if (!strncmp(p, "E=", 2)) + err = strtol(p+2, NULL, 10); /* Remember the error code. */ + else + goto print_msg; /* Message is badly formatted. */ + + if (len && ((p = strstr(p, " M=")) != NULL)) { + /* M= field found. */ + p += 3; + } else { + /* No M=; use the error code. */ + switch (err) { + case MS_CHAP_ERROR_RESTRICTED_LOGON_HOURS: + p = "E=646 Restricted logon hours"; + break; + + case MS_CHAP_ERROR_ACCT_DISABLED: + p = "E=647 Account disabled"; + break; + + case MS_CHAP_ERROR_PASSWD_EXPIRED: + p = "E=648 Password expired"; + break; + + case MS_CHAP_ERROR_NO_DIALIN_PERMISSION: + p = "E=649 No dialin permission"; + break; + + case MS_CHAP_ERROR_AUTHENTICATION_FAILURE: + p = "E=691 Authentication failure"; + break; + + case MS_CHAP_ERROR_CHANGING_PASSWORD: + /* Should never see this, we don't support Change Password. */ + p = "E=709 Error changing password"; + break; + + default: + ppp_error("Unknown MS-CHAP authentication failure: %.*v", + len, inp); + return; + } + } +print_msg: + if (p != NULL) + ppp_error("MS-CHAP authentication failed: %v", p); +} + +static void ChallengeResponse(const u_char *challenge, + const u_char PasswordHash[MD4_SIGNATURE_SIZE], + u_char response[24]) { + u_char ZPasswordHash[21]; + lwip_des_context des; + u_char des_key[8]; + + BZERO(ZPasswordHash, sizeof(ZPasswordHash)); + MEMCPY(ZPasswordHash, PasswordHash, MD4_SIGNATURE_SIZE); + +#if 0 + dbglog("ChallengeResponse - ZPasswordHash %.*B", + sizeof(ZPasswordHash), ZPasswordHash); +#endif + + pppcrypt_56_to_64_bit_key(ZPasswordHash + 0, des_key); + lwip_des_init(&des); + lwip_des_setkey_enc(&des, des_key); + lwip_des_crypt_ecb(&des, challenge, response +0); + lwip_des_free(&des); + + pppcrypt_56_to_64_bit_key(ZPasswordHash + 7, des_key); + lwip_des_init(&des); + lwip_des_setkey_enc(&des, des_key); + lwip_des_crypt_ecb(&des, challenge, response +8); + lwip_des_free(&des); + + pppcrypt_56_to_64_bit_key(ZPasswordHash + 14, des_key); + lwip_des_init(&des); + lwip_des_setkey_enc(&des, des_key); + lwip_des_crypt_ecb(&des, challenge, response +16); + lwip_des_free(&des); + +#if 0 + dbglog("ChallengeResponse - response %.24B", response); +#endif +} + +static void ChallengeHash(const u_char PeerChallenge[16], const u_char *rchallenge, + const char *username, u_char Challenge[8]) { + lwip_sha1_context sha1Context; + u_char sha1Hash[SHA1_SIGNATURE_SIZE]; + const char *user; + + /* remove domain from "domain\username" */ + if ((user = strrchr(username, '\\')) != NULL) + ++user; + else + user = username; + + lwip_sha1_init(&sha1Context); + lwip_sha1_starts(&sha1Context); + lwip_sha1_update(&sha1Context, PeerChallenge, 16); + lwip_sha1_update(&sha1Context, rchallenge, 16); + lwip_sha1_update(&sha1Context, (const unsigned char*)user, strlen(user)); + lwip_sha1_finish(&sha1Context, sha1Hash); + lwip_sha1_free(&sha1Context); + + MEMCPY(Challenge, sha1Hash, 8); +} + +/* + * Convert the ASCII version of the password to Unicode. + * This implicitly supports 8-bit ISO8859/1 characters. + * This gives us the little-endian representation, which + * is assumed by all M$ CHAP RFCs. (Unicode byte ordering + * is machine-dependent.) + */ +static void ascii2unicode(const char ascii[], int ascii_len, u_char unicode[]) { + int i; + + BZERO(unicode, ascii_len * 2); + for (i = 0; i < ascii_len; i++) + unicode[i * 2] = (u_char) ascii[i]; +} + +static void NTPasswordHash(u_char *secret, int secret_len, u_char hash[MD4_SIGNATURE_SIZE]) { + lwip_md4_context md4Context; + + lwip_md4_init(&md4Context); + lwip_md4_starts(&md4Context); + lwip_md4_update(&md4Context, secret, secret_len); + lwip_md4_finish(&md4Context, hash); + lwip_md4_free(&md4Context); +} + +static void ChapMS_NT(const u_char *rchallenge, const char *secret, int secret_len, + u_char NTResponse[24]) { + u_char unicodePassword[MAX_NT_PASSWORD * 2]; + u_char PasswordHash[MD4_SIGNATURE_SIZE]; + + /* Hash the Unicode version of the secret (== password). */ + ascii2unicode(secret, secret_len, unicodePassword); + NTPasswordHash(unicodePassword, secret_len * 2, PasswordHash); + + ChallengeResponse(rchallenge, PasswordHash, NTResponse); +} + +static void ChapMS2_NT(const u_char *rchallenge, const u_char PeerChallenge[16], const char *username, + const char *secret, int secret_len, u_char NTResponse[24]) { + u_char unicodePassword[MAX_NT_PASSWORD * 2]; + u_char PasswordHash[MD4_SIGNATURE_SIZE]; + u_char Challenge[8]; + + ChallengeHash(PeerChallenge, rchallenge, username, Challenge); + + /* Hash the Unicode version of the secret (== password). */ + ascii2unicode(secret, secret_len, unicodePassword); + NTPasswordHash(unicodePassword, secret_len * 2, PasswordHash); + + ChallengeResponse(Challenge, PasswordHash, NTResponse); +} + +#ifdef MSLANMAN +static u_char *StdText = (u_char *)"KGS!@#$%"; /* key from rasapi32.dll */ + +static void ChapMS_LANMan(u_char *rchallenge, char *secret, int secret_len, + unsigned char *response) { + int i; + u_char UcasePassword[MAX_NT_PASSWORD]; /* max is actually 14 */ + u_char PasswordHash[MD4_SIGNATURE_SIZE]; + lwip_des_context des; + u_char des_key[8]; + + /* LANMan password is case insensitive */ + BZERO(UcasePassword, sizeof(UcasePassword)); + for (i = 0; i < secret_len; i++) + UcasePassword[i] = (u_char)toupper(secret[i]); + + pppcrypt_56_to_64_bit_key(UcasePassword +0, des_key); + lwip_des_init(&des); + lwip_des_setkey_enc(&des, des_key); + lwip_des_crypt_ecb(&des, StdText, PasswordHash +0); + lwip_des_free(&des); + + pppcrypt_56_to_64_bit_key(UcasePassword +7, des_key); + lwip_des_init(&des); + lwip_des_setkey_enc(&des, des_key); + lwip_des_crypt_ecb(&des, StdText, PasswordHash +8); + lwip_des_free(&des); + + ChallengeResponse(rchallenge, PasswordHash, &response[MS_CHAP_LANMANRESP]); +} +#endif + + +static void GenerateAuthenticatorResponse(const u_char PasswordHashHash[MD4_SIGNATURE_SIZE], + u_char NTResponse[24], const u_char PeerChallenge[16], + const u_char *rchallenge, const char *username, + u_char authResponse[MS_AUTH_RESPONSE_LENGTH+1]) { + /* + * "Magic" constants used in response generation, from RFC 2759. + */ + static const u_char Magic1[39] = /* "Magic server to client signing constant" */ + { 0x4D, 0x61, 0x67, 0x69, 0x63, 0x20, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x20, 0x74, 0x6F, 0x20, 0x63, 0x6C, 0x69, 0x65, + 0x6E, 0x74, 0x20, 0x73, 0x69, 0x67, 0x6E, 0x69, 0x6E, 0x67, + 0x20, 0x63, 0x6F, 0x6E, 0x73, 0x74, 0x61, 0x6E, 0x74 }; + static const u_char Magic2[41] = /* "Pad to make it do more than one iteration" */ + { 0x50, 0x61, 0x64, 0x20, 0x74, 0x6F, 0x20, 0x6D, 0x61, 0x6B, + 0x65, 0x20, 0x69, 0x74, 0x20, 0x64, 0x6F, 0x20, 0x6D, 0x6F, + 0x72, 0x65, 0x20, 0x74, 0x68, 0x61, 0x6E, 0x20, 0x6F, 0x6E, + 0x65, 0x20, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6F, + 0x6E }; + + int i; + lwip_sha1_context sha1Context; + u_char Digest[SHA1_SIGNATURE_SIZE]; + u_char Challenge[8]; + + lwip_sha1_init(&sha1Context); + lwip_sha1_starts(&sha1Context); + lwip_sha1_update(&sha1Context, PasswordHashHash, MD4_SIGNATURE_SIZE); + lwip_sha1_update(&sha1Context, NTResponse, 24); + lwip_sha1_update(&sha1Context, Magic1, sizeof(Magic1)); + lwip_sha1_finish(&sha1Context, Digest); + lwip_sha1_free(&sha1Context); + + ChallengeHash(PeerChallenge, rchallenge, username, Challenge); + + lwip_sha1_init(&sha1Context); + lwip_sha1_starts(&sha1Context); + lwip_sha1_update(&sha1Context, Digest, sizeof(Digest)); + lwip_sha1_update(&sha1Context, Challenge, sizeof(Challenge)); + lwip_sha1_update(&sha1Context, Magic2, sizeof(Magic2)); + lwip_sha1_finish(&sha1Context, Digest); + lwip_sha1_free(&sha1Context); + + /* Convert to ASCII hex string. */ + for (i = 0; i < LWIP_MAX((MS_AUTH_RESPONSE_LENGTH / 2), (int)sizeof(Digest)); i++) + sprintf((char *)&authResponse[i * 2], "%02X", Digest[i]); +} + + +static void GenerateAuthenticatorResponsePlain( + const char *secret, int secret_len, + u_char NTResponse[24], const u_char PeerChallenge[16], + const u_char *rchallenge, const char *username, + u_char authResponse[MS_AUTH_RESPONSE_LENGTH+1]) { + u_char unicodePassword[MAX_NT_PASSWORD * 2]; + u_char PasswordHash[MD4_SIGNATURE_SIZE]; + u_char PasswordHashHash[MD4_SIGNATURE_SIZE]; + + /* Hash (x2) the Unicode version of the secret (== password). */ + ascii2unicode(secret, secret_len, unicodePassword); + NTPasswordHash(unicodePassword, secret_len * 2, PasswordHash); + NTPasswordHash(PasswordHash, sizeof(PasswordHash), + PasswordHashHash); + + GenerateAuthenticatorResponse(PasswordHashHash, NTResponse, PeerChallenge, + rchallenge, username, authResponse); +} + + +#if MPPE_SUPPORT +/* + * Set mppe_xxxx_key from MS-CHAP credentials. (see RFC 3079) + */ +static void Set_Start_Key(ppp_pcb *pcb, const u_char *rchallenge, const char *secret, int secret_len) { + u_char unicodePassword[MAX_NT_PASSWORD * 2]; + u_char PasswordHash[MD4_SIGNATURE_SIZE]; + u_char PasswordHashHash[MD4_SIGNATURE_SIZE]; + lwip_sha1_context sha1Context; + u_char Digest[SHA1_SIGNATURE_SIZE]; /* >= MPPE_MAX_KEY_LEN */ + + /* Hash (x2) the Unicode version of the secret (== password). */ + ascii2unicode(secret, secret_len, unicodePassword); + NTPasswordHash(unicodePassword, secret_len * 2, PasswordHash); + NTPasswordHash(PasswordHash, sizeof(PasswordHash), PasswordHashHash); + + lwip_sha1_init(&sha1Context); + lwip_sha1_starts(&sha1Context); + lwip_sha1_update(&sha1Context, PasswordHashHash, MD4_SIGNATURE_SIZE); + lwip_sha1_update(&sha1Context, PasswordHashHash, MD4_SIGNATURE_SIZE); + lwip_sha1_update(&sha1Context, rchallenge, 8); + lwip_sha1_finish(&sha1Context, Digest); + lwip_sha1_free(&sha1Context); + + /* Same key in both directions. */ + mppe_set_key(pcb, &pcb->mppe_comp, Digest); + mppe_set_key(pcb, &pcb->mppe_decomp, Digest); + + pcb->mppe_keys_set = 1; +} + +/* + * Set mppe_xxxx_key from MS-CHAPv2 credentials. (see RFC 3079) + */ +static void SetMasterKeys(ppp_pcb *pcb, const char *secret, int secret_len, u_char NTResponse[24], int IsServer) { + u_char unicodePassword[MAX_NT_PASSWORD * 2]; + u_char PasswordHash[MD4_SIGNATURE_SIZE]; + u_char PasswordHashHash[MD4_SIGNATURE_SIZE]; + lwip_sha1_context sha1Context; + u_char MasterKey[SHA1_SIGNATURE_SIZE]; /* >= MPPE_MAX_KEY_LEN */ + u_char Digest[SHA1_SIGNATURE_SIZE]; /* >= MPPE_MAX_KEY_LEN */ + const u_char *s; + + /* "This is the MPPE Master Key" */ + static const u_char Magic1[27] = + { 0x54, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, 0x74, + 0x68, 0x65, 0x20, 0x4d, 0x50, 0x50, 0x45, 0x20, 0x4d, + 0x61, 0x73, 0x74, 0x65, 0x72, 0x20, 0x4b, 0x65, 0x79 }; + /* "On the client side, this is the send key; " + "on the server side, it is the receive key." */ + static const u_char Magic2[84] = + { 0x4f, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x20, 0x73, 0x69, 0x64, 0x65, 0x2c, 0x20, + 0x74, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, + 0x65, 0x20, 0x73, 0x65, 0x6e, 0x64, 0x20, 0x6b, 0x65, 0x79, + 0x3b, 0x20, 0x6f, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x20, 0x73, 0x69, 0x64, 0x65, + 0x2c, 0x20, 0x69, 0x74, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, + 0x65, 0x20, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x20, + 0x6b, 0x65, 0x79, 0x2e }; + /* "On the client side, this is the receive key; " + "on the server side, it is the send key." */ + static const u_char Magic3[84] = + { 0x4f, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x20, 0x73, 0x69, 0x64, 0x65, 0x2c, 0x20, + 0x74, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, + 0x65, 0x20, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x20, + 0x6b, 0x65, 0x79, 0x3b, 0x20, 0x6f, 0x6e, 0x20, 0x74, 0x68, + 0x65, 0x20, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x20, 0x73, + 0x69, 0x64, 0x65, 0x2c, 0x20, 0x69, 0x74, 0x20, 0x69, 0x73, + 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x65, 0x6e, 0x64, 0x20, + 0x6b, 0x65, 0x79, 0x2e }; + + /* Hash (x2) the Unicode version of the secret (== password). */ + ascii2unicode(secret, secret_len, unicodePassword); + NTPasswordHash(unicodePassword, secret_len * 2, PasswordHash); + NTPasswordHash(PasswordHash, sizeof(PasswordHash), PasswordHashHash); + + lwip_sha1_init(&sha1Context); + lwip_sha1_starts(&sha1Context); + lwip_sha1_update(&sha1Context, PasswordHashHash, MD4_SIGNATURE_SIZE); + lwip_sha1_update(&sha1Context, NTResponse, 24); + lwip_sha1_update(&sha1Context, Magic1, sizeof(Magic1)); + lwip_sha1_finish(&sha1Context, MasterKey); + lwip_sha1_free(&sha1Context); + + /* + * generate send key + */ + if (IsServer) + s = Magic3; + else + s = Magic2; + lwip_sha1_init(&sha1Context); + lwip_sha1_starts(&sha1Context); + lwip_sha1_update(&sha1Context, MasterKey, 16); + lwip_sha1_update(&sha1Context, mppe_sha1_pad1, SHA1_PAD_SIZE); + lwip_sha1_update(&sha1Context, s, 84); + lwip_sha1_update(&sha1Context, mppe_sha1_pad2, SHA1_PAD_SIZE); + lwip_sha1_finish(&sha1Context, Digest); + lwip_sha1_free(&sha1Context); + + mppe_set_key(pcb, &pcb->mppe_comp, Digest); + + /* + * generate recv key + */ + if (IsServer) + s = Magic2; + else + s = Magic3; + lwip_sha1_init(&sha1Context); + lwip_sha1_starts(&sha1Context); + lwip_sha1_update(&sha1Context, MasterKey, 16); + lwip_sha1_update(&sha1Context, mppe_sha1_pad1, SHA1_PAD_SIZE); + lwip_sha1_update(&sha1Context, s, 84); + lwip_sha1_update(&sha1Context, mppe_sha1_pad2, SHA1_PAD_SIZE); + lwip_sha1_finish(&sha1Context, Digest); + lwip_sha1_free(&sha1Context); + + mppe_set_key(pcb, &pcb->mppe_decomp, Digest); + + pcb->mppe_keys_set = 1; +} + +#endif /* MPPE_SUPPORT */ + + +static void ChapMS(ppp_pcb *pcb, const u_char *rchallenge, const char *secret, int secret_len, + unsigned char *response) { +#if !MPPE_SUPPORT + LWIP_UNUSED_ARG(pcb); +#endif /* !MPPE_SUPPORT */ + BZERO(response, MS_CHAP_RESPONSE_LEN); + + ChapMS_NT(rchallenge, secret, secret_len, &response[MS_CHAP_NTRESP]); + +#ifdef MSLANMAN + ChapMS_LANMan(rchallenge, secret, secret_len, + &response[MS_CHAP_LANMANRESP]); + + /* preferred method is set by option */ + response[MS_CHAP_USENT] = !ms_lanman; +#else + response[MS_CHAP_USENT] = 1; +#endif + +#if MPPE_SUPPORT + Set_Start_Key(pcb, rchallenge, secret, secret_len); +#endif /* MPPE_SUPPORT */ +} + + +/* + * If PeerChallenge is NULL, one is generated and the PeerChallenge + * field of response is filled in. Call this way when generating a response. + * If PeerChallenge is supplied, it is copied into the PeerChallenge field. + * Call this way when verifying a response (or debugging). + * Do not call with PeerChallenge = response. + * + * The PeerChallenge field of response is then used for calculation of the + * Authenticator Response. + */ +static void ChapMS2(ppp_pcb *pcb, const u_char *rchallenge, const u_char *PeerChallenge, + const char *user, const char *secret, int secret_len, unsigned char *response, + u_char authResponse[], int authenticator) { + /* ARGSUSED */ + LWIP_UNUSED_ARG(authenticator); +#if !MPPE_SUPPORT + LWIP_UNUSED_ARG(pcb); +#endif /* !MPPE_SUPPORT */ + + BZERO(response, MS_CHAP2_RESPONSE_LEN); + + /* Generate the Peer-Challenge if requested, or copy it if supplied. */ + if (!PeerChallenge) + magic_random_bytes(&response[MS_CHAP2_PEER_CHALLENGE], MS_CHAP2_PEER_CHAL_LEN); + else + MEMCPY(&response[MS_CHAP2_PEER_CHALLENGE], PeerChallenge, + MS_CHAP2_PEER_CHAL_LEN); + + /* Generate the NT-Response */ + ChapMS2_NT(rchallenge, &response[MS_CHAP2_PEER_CHALLENGE], user, + secret, secret_len, &response[MS_CHAP2_NTRESP]); + + /* Generate the Authenticator Response. */ + GenerateAuthenticatorResponsePlain(secret, secret_len, + &response[MS_CHAP2_NTRESP], + &response[MS_CHAP2_PEER_CHALLENGE], + rchallenge, user, authResponse); + +#if MPPE_SUPPORT + SetMasterKeys(pcb, secret, secret_len, + &response[MS_CHAP2_NTRESP], authenticator); +#endif /* MPPE_SUPPORT */ +} + +#if 0 /* UNUSED */ +#if MPPE_SUPPORT +/* + * Set MPPE options from plugins. + */ +void set_mppe_enc_types(int policy, int types) { + /* Early exit for unknown policies. */ + if (policy != MPPE_ENC_POL_ENC_ALLOWED || + policy != MPPE_ENC_POL_ENC_REQUIRED) + return; + + /* Don't modify MPPE if it's optional and wasn't already configured. */ + if (policy == MPPE_ENC_POL_ENC_ALLOWED && !ccp_wantoptions[0].mppe) + return; + + /* + * Disable undesirable encryption types. Note that we don't ENABLE + * any encryption types, to avoid overriding manual configuration. + */ + switch(types) { + case MPPE_ENC_TYPES_RC4_40: + ccp_wantoptions[0].mppe &= ~MPPE_OPT_128; /* disable 128-bit */ + break; + case MPPE_ENC_TYPES_RC4_128: + ccp_wantoptions[0].mppe &= ~MPPE_OPT_40; /* disable 40-bit */ + break; + default: + break; + } +} +#endif /* MPPE_SUPPORT */ +#endif /* UNUSED */ + +const struct chap_digest_type chapms_digest = { + CHAP_MICROSOFT, /* code */ +#if PPP_SERVER + chapms_generate_challenge, + chapms_verify_response, +#endif /* PPP_SERVER */ + chapms_make_response, + NULL, /* check_success */ + chapms_handle_failure, +}; + +const struct chap_digest_type chapms2_digest = { + CHAP_MICROSOFT_V2, /* code */ +#if PPP_SERVER + chapms2_generate_challenge, + chapms2_verify_response, +#endif /* PPP_SERVER */ + chapms2_make_response, + chapms2_check_success, + chapms_handle_failure, +}; + +#endif /* PPP_SUPPORT && MSCHAP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/demand.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/demand.c new file mode 100644 index 0000000..26c6c30 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/demand.c @@ -0,0 +1,465 @@ +/* + * demand.c - Support routines for demand-dialling. + * + * Copyright (c) 1996-2002 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 3. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && DEMAND_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef PPP_FILTER +#include +#endif + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/fsm.h" +#include "netif/ppp/ipcp.h" +#include "netif/ppp/lcp.h" + +char *frame; +int framelen; +int framemax; +int escape_flag; +int flush_flag; +int fcs; + +struct packet { + int length; + struct packet *next; + unsigned char data[1]; +}; + +struct packet *pend_q; +struct packet *pend_qtail; + +static int active_packet (unsigned char *, int); + +/* + * demand_conf - configure the interface for doing dial-on-demand. + */ +void +demand_conf() +{ + int i; + const struct protent *protp; + +/* framemax = lcp_allowoptions[0].mru; + if (framemax < PPP_MRU) */ + framemax = PPP_MRU; + framemax += PPP_HDRLEN + PPP_FCSLEN; + frame = malloc(framemax); + if (frame == NULL) + novm("demand frame"); + framelen = 0; + pend_q = NULL; + escape_flag = 0; + flush_flag = 0; + fcs = PPP_INITFCS; + + netif_set_mtu(pcb, LWIP_MIN(lcp_allowoptions[0].mru, PPP_MRU)); + if (ppp_send_config(pcb, PPP_MRU, (u32_t) 0, 0, 0) < 0 + || ppp_recv_config(pcb, PPP_MRU, (u32_t) 0, 0, 0) < 0) + fatal("Couldn't set up demand-dialled PPP interface: %m"); + +#ifdef PPP_FILTER + set_filters(&pass_filter, &active_filter); +#endif + + /* + * Call the demand_conf procedure for each protocol that's got one. + */ + for (i = 0; (protp = protocols[i]) != NULL; ++i) + if (protp->demand_conf != NULL) + ((*protp->demand_conf)(pcb)); +/* FIXME: find a way to die() here */ +#if 0 + if (!((*protp->demand_conf)(pcb))) + die(1); +#endif +} + + +/* + * demand_block - set each network protocol to block further packets. + */ +void +demand_block() +{ + int i; + const struct protent *protp; + + for (i = 0; (protp = protocols[i]) != NULL; ++i) + if (protp->demand_conf != NULL) + sifnpmode(pcb, protp->protocol & ~0x8000, NPMODE_QUEUE); + get_loop_output(); +} + +/* + * demand_discard - set each network protocol to discard packets + * with an error. + */ +void +demand_discard() +{ + struct packet *pkt, *nextpkt; + int i; + const struct protent *protp; + + for (i = 0; (protp = protocols[i]) != NULL; ++i) + if (protp->demand_conf != NULL) + sifnpmode(pcb, protp->protocol & ~0x8000, NPMODE_ERROR); + get_loop_output(); + + /* discard all saved packets */ + for (pkt = pend_q; pkt != NULL; pkt = nextpkt) { + nextpkt = pkt->next; + free(pkt); + } + pend_q = NULL; + framelen = 0; + flush_flag = 0; + escape_flag = 0; + fcs = PPP_INITFCS; +} + +/* + * demand_unblock - set each enabled network protocol to pass packets. + */ +void +demand_unblock() +{ + int i; + const struct protent *protp; + + for (i = 0; (protp = protocols[i]) != NULL; ++i) + if (protp->demand_conf != NULL) + sifnpmode(pcb, protp->protocol & ~0x8000, NPMODE_PASS); +} + +/* + * FCS lookup table as calculated by genfcstab. + */ +static u_short fcstab[256] = { + 0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf, + 0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7, + 0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e, + 0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876, + 0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd, + 0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5, + 0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c, + 0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974, + 0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb, + 0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3, + 0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a, + 0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72, + 0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9, + 0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1, + 0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738, + 0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70, + 0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7, + 0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff, + 0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036, + 0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e, + 0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5, + 0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd, + 0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134, + 0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c, + 0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3, + 0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb, + 0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232, + 0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a, + 0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1, + 0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9, + 0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330, + 0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78 +}; + +/* + * loop_chars - process characters received from the loopback. + * Calls loop_frame when a complete frame has been accumulated. + * Return value is 1 if we need to bring up the link, 0 otherwise. + */ +int +loop_chars(p, n) + unsigned char *p; + int n; +{ + int c, rv; + + rv = 0; + +/* check for synchronous connection... */ + + if ( (p[0] == 0xFF) && (p[1] == 0x03) ) { + rv = loop_frame(p,n); + return rv; + } + + for (; n > 0; --n) { + c = *p++; + if (c == PPP_FLAG) { + if (!escape_flag && !flush_flag + && framelen > 2 && fcs == PPP_GOODFCS) { + framelen -= 2; + if (loop_frame((unsigned char *)frame, framelen)) + rv = 1; + } + framelen = 0; + flush_flag = 0; + escape_flag = 0; + fcs = PPP_INITFCS; + continue; + } + if (flush_flag) + continue; + if (escape_flag) { + c ^= PPP_TRANS; + escape_flag = 0; + } else if (c == PPP_ESCAPE) { + escape_flag = 1; + continue; + } + if (framelen >= framemax) { + flush_flag = 1; + continue; + } + frame[framelen++] = c; + fcs = PPP_FCS(fcs, c); + } + return rv; +} + +/* + * loop_frame - given a frame obtained from the loopback, + * decide whether to bring up the link or not, and, if we want + * to transmit this frame later, put it on the pending queue. + * Return value is 1 if we need to bring up the link, 0 otherwise. + * We assume that the kernel driver has already applied the + * pass_filter, so we won't get packets it rejected. + * We apply the active_filter to see if we want this packet to + * bring up the link. + */ +int +loop_frame(frame, len) + unsigned char *frame; + int len; +{ + struct packet *pkt; + + /* dbglog("from loop: %P", frame, len); */ + if (len < PPP_HDRLEN) + return 0; + if ((PPP_PROTOCOL(frame) & 0x8000) != 0) + return 0; /* shouldn't get any of these anyway */ + if (!active_packet(frame, len)) + return 0; + + pkt = (struct packet *) malloc(sizeof(struct packet) + len); + if (pkt != NULL) { + pkt->length = len; + pkt->next = NULL; + memcpy(pkt->data, frame, len); + if (pend_q == NULL) + pend_q = pkt; + else + pend_qtail->next = pkt; + pend_qtail = pkt; + } + return 1; +} + +/* + * demand_rexmit - Resend all those frames which we got via the + * loopback, now that the real serial link is up. + */ +void +demand_rexmit(proto, newip) + int proto; + u32_t newip; +{ + struct packet *pkt, *prev, *nextpkt; + unsigned short checksum; + unsigned short pkt_checksum = 0; + unsigned iphdr; + struct timeval tv; + char cv = 0; + char ipstr[16]; + + prev = NULL; + pkt = pend_q; + pend_q = NULL; + tv.tv_sec = 1; + tv.tv_usec = 0; + select(0,NULL,NULL,NULL,&tv); /* Sleep for 1 Seconds */ + for (; pkt != NULL; pkt = nextpkt) { + nextpkt = pkt->next; + if (PPP_PROTOCOL(pkt->data) == proto) { + if ( (proto == PPP_IP) && newip ) { + /* Get old checksum */ + + iphdr = (pkt->data[4] & 15) << 2; + checksum = *((unsigned short *) (pkt->data+14)); + if (checksum == 0xFFFF) { + checksum = 0; + } + + + if (pkt->data[13] == 17) { + pkt_checksum = *((unsigned short *) (pkt->data+10+iphdr)); + if (pkt_checksum) { + cv = 1; + if (pkt_checksum == 0xFFFF) { + pkt_checksum = 0; + } + } + else { + cv = 0; + } + } + + if (pkt->data[13] == 6) { + pkt_checksum = *((unsigned short *) (pkt->data+20+iphdr)); + cv = 1; + if (pkt_checksum == 0xFFFF) { + pkt_checksum = 0; + } + } + + /* Delete old Source-IP-Address */ + checksum -= *((unsigned short *) (pkt->data+16)) ^ 0xFFFF; + checksum -= *((unsigned short *) (pkt->data+18)) ^ 0xFFFF; + + pkt_checksum -= *((unsigned short *) (pkt->data+16)) ^ 0xFFFF; + pkt_checksum -= *((unsigned short *) (pkt->data+18)) ^ 0xFFFF; + + /* Change Source-IP-Address */ + * ((u32_t *) (pkt->data + 16)) = newip; + + /* Add new Source-IP-Address */ + checksum += *((unsigned short *) (pkt->data+16)) ^ 0xFFFF; + checksum += *((unsigned short *) (pkt->data+18)) ^ 0xFFFF; + + pkt_checksum += *((unsigned short *) (pkt->data+16)) ^ 0xFFFF; + pkt_checksum += *((unsigned short *) (pkt->data+18)) ^ 0xFFFF; + + /* Write new checksum */ + if (!checksum) { + checksum = 0xFFFF; + } + *((unsigned short *) (pkt->data+14)) = checksum; + if (pkt->data[13] == 6) { + *((unsigned short *) (pkt->data+20+iphdr)) = pkt_checksum; + } + if (cv && (pkt->data[13] == 17) ) { + *((unsigned short *) (pkt->data+10+iphdr)) = pkt_checksum; + } + + /* Log Packet */ + strcpy(ipstr,inet_ntoa(*( (struct in_addr *) (pkt->data+16)))); + if (pkt->data[13] == 1) { + syslog(LOG_INFO,"Open ICMP %s -> %s\n", + ipstr, + inet_ntoa(*( (struct in_addr *) (pkt->data+20)))); + } else { + syslog(LOG_INFO,"Open %s %s:%d -> %s:%d\n", + pkt->data[13] == 6 ? "TCP" : "UDP", + ipstr, + ntohs(*( (short *) (pkt->data+iphdr+4))), + inet_ntoa(*( (struct in_addr *) (pkt->data+20))), + ntohs(*( (short *) (pkt->data+iphdr+6)))); + } + } + output(pcb, pkt->data, pkt->length); + free(pkt); + } else { + if (prev == NULL) + pend_q = pkt; + else + prev->next = pkt; + prev = pkt; + } + } + pend_qtail = prev; + if (prev != NULL) + prev->next = NULL; +} + +/* + * Scan a packet to decide whether it is an "active" packet, + * that is, whether it is worth bringing up the link for. + */ +static int +active_packet(p, len) + unsigned char *p; + int len; +{ + int proto, i; + const struct protent *protp; + + if (len < PPP_HDRLEN) + return 0; + proto = PPP_PROTOCOL(p); +#ifdef PPP_FILTER + p[0] = 1; /* outbound packet indicator */ + if ((pass_filter.bf_len != 0 + && bpf_filter(pass_filter.bf_insns, p, len, len) == 0) + || (active_filter.bf_len != 0 + && bpf_filter(active_filter.bf_insns, p, len, len) == 0)) { + p[0] = 0xff; + return 0; + } + p[0] = 0xff; +#endif + for (i = 0; (protp = protocols[i]) != NULL; ++i) { + if (protp->protocol < 0xC000 && (protp->protocol & ~0x8000) == proto) { + if (protp->active_pkt == NULL) + return 1; + return (*protp->active_pkt)(p, len); + } + } + return 0; /* not a supported protocol !!?? */ +} + +#endif /* PPP_SUPPORT && DEMAND_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/eap.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/eap.c new file mode 100644 index 0000000..8fb5636 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/eap.c @@ -0,0 +1,2423 @@ +/* + * eap.c - Extensible Authentication Protocol for PPP (RFC 2284) + * + * Copyright (c) 2001 by Sun Microsystems, Inc. + * All rights reserved. + * + * Non-exclusive rights to redistribute, modify, translate, and use + * this software in source and binary forms, in whole or in part, is + * hereby granted, provided that the above copyright notice is + * duplicated in any source form, and that neither the name of the + * copyright holder nor the author is used to endorse or promote + * products derived from this software. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. + * + * Original version by James Carlson + * + * This implementation of EAP supports MD5-Challenge and SRP-SHA1 + * authentication styles. Note that support of MD5-Challenge is a + * requirement of RFC 2284, and that it's essentially just a + * reimplementation of regular RFC 1994 CHAP using EAP messages. + * + * As an authenticator ("server"), there are multiple phases for each + * style. In the first phase of each style, the unauthenticated peer + * name is queried using the EAP Identity request type. If the + * "remotename" option is used, then this phase is skipped, because + * the peer's name is presumed to be known. + * + * For MD5-Challenge, there are two phases, and the second phase + * consists of sending the challenge itself and handling the + * associated response. + * + * For SRP-SHA1, there are four phases. The second sends 's', 'N', + * and 'g'. The reply contains 'A'. The third sends 'B', and the + * reply contains 'M1'. The forth sends the 'M2' value. + * + * As an authenticatee ("client"), there's just a single phase -- + * responding to the queries generated by the peer. EAP is an + * authenticator-driven protocol. + * + * Based on draft-ietf-pppext-eap-srp-03.txt. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && EAP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#include "netif/ppp/ppp_impl.h" +#include "netif/ppp/eap.h" +#include "netif/ppp/magic.h" +#include "netif/ppp/pppcrypt.h" + +#ifdef USE_SRP +#include +#include +#include +#endif /* USE_SRP */ + +#ifndef SHA_DIGESTSIZE +#define SHA_DIGESTSIZE 20 +#endif + +#ifdef USE_SRP +static char *pn_secret = NULL; /* Pseudonym generating secret */ +#endif + +#if PPP_OPTIONS +/* + * Command-line options. + */ +static option_t eap_option_list[] = { + { "eap-restart", o_int, &eap_states[0].es_server.ea_timeout, + "Set retransmit timeout for EAP Requests (server)" }, + { "eap-max-sreq", o_int, &eap_states[0].es_server.ea_maxrequests, + "Set max number of EAP Requests sent (server)" }, + { "eap-timeout", o_int, &eap_states[0].es_client.ea_timeout, + "Set time limit for peer EAP authentication" }, + { "eap-max-rreq", o_int, &eap_states[0].es_client.ea_maxrequests, + "Set max number of EAP Requests allows (client)" }, + { "eap-interval", o_int, &eap_states[0].es_rechallenge, + "Set interval for EAP rechallenge" }, +#ifdef USE_SRP + { "srp-interval", o_int, &eap_states[0].es_lwrechallenge, + "Set interval for SRP lightweight rechallenge" }, + { "srp-pn-secret", o_string, &pn_secret, + "Long term pseudonym generation secret" }, + { "srp-use-pseudonym", o_bool, &eap_states[0].es_usepseudo, + "Use pseudonym if offered one by server", 1 }, +#endif + { NULL } +}; +#endif /* PPP_OPTIONS */ + +/* + * Protocol entry points. + */ +static void eap_init(ppp_pcb *pcb); +static void eap_input(ppp_pcb *pcb, u_char *inp, int inlen); +static void eap_protrej(ppp_pcb *pcb); +static void eap_lowerup(ppp_pcb *pcb); +static void eap_lowerdown(ppp_pcb *pcb); +#if PRINTPKT_SUPPORT +static int eap_printpkt(const u_char *inp, int inlen, + void (*)(void *arg, const char *fmt, ...), void *arg); +#endif /* PRINTPKT_SUPPORT */ + +const struct protent eap_protent = { + PPP_EAP, /* protocol number */ + eap_init, /* initialization procedure */ + eap_input, /* process a received packet */ + eap_protrej, /* process a received protocol-reject */ + eap_lowerup, /* lower layer has gone up */ + eap_lowerdown, /* lower layer has gone down */ + NULL, /* open the protocol */ + NULL, /* close the protocol */ +#if PRINTPKT_SUPPORT + eap_printpkt, /* print a packet in readable form */ +#endif /* PRINTPKT_SUPPORT */ +#if PPP_DATAINPUT + NULL, /* process a received data packet */ +#endif /* PPP_DATAINPUT */ +#if PRINTPKT_SUPPORT + "EAP", /* text name of protocol */ + NULL, /* text name of corresponding data protocol */ +#endif /* PRINTPKT_SUPPORT */ +#if PPP_OPTIONS + eap_option_list, /* list of command-line options */ + NULL, /* check requested options; assign defaults */ +#endif /* PPP_OPTIONS */ +#if DEMAND_SUPPORT + NULL, /* configure interface for demand-dial */ + NULL /* say whether to bring up link for this pkt */ +#endif /* DEMAND_SUPPORT */ +}; + +#ifdef USE_SRP +/* + * A well-known 2048 bit modulus. + */ +static const u_char wkmodulus[] = { + 0xAC, 0x6B, 0xDB, 0x41, 0x32, 0x4A, 0x9A, 0x9B, + 0xF1, 0x66, 0xDE, 0x5E, 0x13, 0x89, 0x58, 0x2F, + 0xAF, 0x72, 0xB6, 0x65, 0x19, 0x87, 0xEE, 0x07, + 0xFC, 0x31, 0x92, 0x94, 0x3D, 0xB5, 0x60, 0x50, + 0xA3, 0x73, 0x29, 0xCB, 0xB4, 0xA0, 0x99, 0xED, + 0x81, 0x93, 0xE0, 0x75, 0x77, 0x67, 0xA1, 0x3D, + 0xD5, 0x23, 0x12, 0xAB, 0x4B, 0x03, 0x31, 0x0D, + 0xCD, 0x7F, 0x48, 0xA9, 0xDA, 0x04, 0xFD, 0x50, + 0xE8, 0x08, 0x39, 0x69, 0xED, 0xB7, 0x67, 0xB0, + 0xCF, 0x60, 0x95, 0x17, 0x9A, 0x16, 0x3A, 0xB3, + 0x66, 0x1A, 0x05, 0xFB, 0xD5, 0xFA, 0xAA, 0xE8, + 0x29, 0x18, 0xA9, 0x96, 0x2F, 0x0B, 0x93, 0xB8, + 0x55, 0xF9, 0x79, 0x93, 0xEC, 0x97, 0x5E, 0xEA, + 0xA8, 0x0D, 0x74, 0x0A, 0xDB, 0xF4, 0xFF, 0x74, + 0x73, 0x59, 0xD0, 0x41, 0xD5, 0xC3, 0x3E, 0xA7, + 0x1D, 0x28, 0x1E, 0x44, 0x6B, 0x14, 0x77, 0x3B, + 0xCA, 0x97, 0xB4, 0x3A, 0x23, 0xFB, 0x80, 0x16, + 0x76, 0xBD, 0x20, 0x7A, 0x43, 0x6C, 0x64, 0x81, + 0xF1, 0xD2, 0xB9, 0x07, 0x87, 0x17, 0x46, 0x1A, + 0x5B, 0x9D, 0x32, 0xE6, 0x88, 0xF8, 0x77, 0x48, + 0x54, 0x45, 0x23, 0xB5, 0x24, 0xB0, 0xD5, 0x7D, + 0x5E, 0xA7, 0x7A, 0x27, 0x75, 0xD2, 0xEC, 0xFA, + 0x03, 0x2C, 0xFB, 0xDB, 0xF5, 0x2F, 0xB3, 0x78, + 0x61, 0x60, 0x27, 0x90, 0x04, 0xE5, 0x7A, 0xE6, + 0xAF, 0x87, 0x4E, 0x73, 0x03, 0xCE, 0x53, 0x29, + 0x9C, 0xCC, 0x04, 0x1C, 0x7B, 0xC3, 0x08, 0xD8, + 0x2A, 0x56, 0x98, 0xF3, 0xA8, 0xD0, 0xC3, 0x82, + 0x71, 0xAE, 0x35, 0xF8, 0xE9, 0xDB, 0xFB, 0xB6, + 0x94, 0xB5, 0xC8, 0x03, 0xD8, 0x9F, 0x7A, 0xE4, + 0x35, 0xDE, 0x23, 0x6D, 0x52, 0x5F, 0x54, 0x75, + 0x9B, 0x65, 0xE3, 0x72, 0xFC, 0xD6, 0x8E, 0xF2, + 0x0F, 0xA7, 0x11, 0x1F, 0x9E, 0x4A, 0xFF, 0x73 +}; +#endif + +#if PPP_SERVER +/* Local forward declarations. */ +static void eap_server_timeout(void *arg); +#endif /* PPP_SERVER */ + +/* + * Convert EAP state code to printable string for debug. + */ +static const char * eap_state_name(enum eap_state_code esc) +{ + static const char *state_names[] = { EAP_STATES }; + + return (state_names[(int)esc]); +} + +/* + * eap_init - Initialize state for an EAP user. This is currently + * called once by main() during start-up. + */ +static void eap_init(ppp_pcb *pcb) { + + BZERO(&pcb->eap, sizeof(eap_state)); +#if PPP_SERVER + pcb->eap.es_server.ea_id = magic(); +#endif /* PPP_SERVER */ +} + +/* + * eap_client_timeout - Give up waiting for the peer to send any + * Request messages. + */ +static void eap_client_timeout(void *arg) { + ppp_pcb *pcb = (ppp_pcb*)arg; + + if (!eap_client_active(pcb)) + return; + + ppp_error("EAP: timeout waiting for Request from peer"); + auth_withpeer_fail(pcb, PPP_EAP); + pcb->eap.es_client.ea_state = eapBadAuth; +} + +/* + * eap_authwithpeer - Authenticate to our peer (behave as client). + * + * Start client state and wait for requests. This is called only + * after eap_lowerup. + */ +void eap_authwithpeer(ppp_pcb *pcb, const char *localname) { + + if(NULL == localname) + return; + + /* Save the peer name we're given */ + pcb->eap.es_client.ea_name = localname; + pcb->eap.es_client.ea_namelen = strlen(localname); + + pcb->eap.es_client.ea_state = eapListen; + + /* + * Start a timer so that if the other end just goes + * silent, we don't sit here waiting forever. + */ + if (pcb->settings.eap_req_time > 0) + TIMEOUT(eap_client_timeout, pcb, + pcb->settings.eap_req_time); +} + +#if PPP_SERVER +/* + * Format a standard EAP Failure message and send it to the peer. + * (Server operation) + */ +static void eap_send_failure(ppp_pcb *pcb) { + struct pbuf *p; + u_char *outp; + + p = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_HDRLEN + EAP_HEADERLEN), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = (u_char*)p->payload; + + MAKEHEADER(outp, PPP_EAP); + + PUTCHAR(EAP_FAILURE, outp); + pcb->eap.es_server.ea_id++; + PUTCHAR(pcb->eap.es_server.ea_id, outp); + PUTSHORT(EAP_HEADERLEN, outp); + + ppp_write(pcb, p); + + pcb->eap.es_server.ea_state = eapBadAuth; + auth_peer_fail(pcb, PPP_EAP); +} + +/* + * Format a standard EAP Success message and send it to the peer. + * (Server operation) + */ +static void eap_send_success(ppp_pcb *pcb) { + struct pbuf *p; + u_char *outp; + + p = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_HDRLEN + EAP_HEADERLEN), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = (u_char*)p->payload; + + MAKEHEADER(outp, PPP_EAP); + + PUTCHAR(EAP_SUCCESS, outp); + pcb->eap.es_server.ea_id++; + PUTCHAR(pcb->eap.es_server.ea_id, outp); + PUTSHORT(EAP_HEADERLEN, outp); + + ppp_write(pcb, p); + + auth_peer_success(pcb, PPP_EAP, 0, + pcb->eap.es_server.ea_peer, pcb->eap.es_server.ea_peerlen); +} +#endif /* PPP_SERVER */ + +#ifdef USE_SRP +/* + * Set DES key according to pseudonym-generating secret and current + * date. + */ +static bool +pncrypt_setkey(int timeoffs) +{ + struct tm *tp; + char tbuf[9]; + SHA1_CTX ctxt; + u_char dig[SHA_DIGESTSIZE]; + time_t reftime; + + if (pn_secret == NULL) + return (0); + reftime = time(NULL) + timeoffs; + tp = localtime(&reftime); + SHA1Init(&ctxt); + SHA1Update(&ctxt, pn_secret, strlen(pn_secret)); + strftime(tbuf, sizeof (tbuf), "%Y%m%d", tp); + SHA1Update(&ctxt, tbuf, strlen(tbuf)); + SHA1Final(dig, &ctxt); + /* FIXME: if we want to do SRP, we need to find a way to pass the PolarSSL des_context instead of using static memory */ + return (DesSetkey(dig)); +} + +static char base64[] = +"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; + +struct b64state { + u32_t bs_bits; + int bs_offs; +}; + +static int +b64enc(bs, inp, inlen, outp) +struct b64state *bs; +u_char *inp; +int inlen; +u_char *outp; +{ + int outlen = 0; + + while (inlen > 0) { + bs->bs_bits = (bs->bs_bits << 8) | *inp++; + inlen--; + bs->bs_offs += 8; + if (bs->bs_offs >= 24) { + *outp++ = base64[(bs->bs_bits >> 18) & 0x3F]; + *outp++ = base64[(bs->bs_bits >> 12) & 0x3F]; + *outp++ = base64[(bs->bs_bits >> 6) & 0x3F]; + *outp++ = base64[bs->bs_bits & 0x3F]; + outlen += 4; + bs->bs_offs = 0; + bs->bs_bits = 0; + } + } + return (outlen); +} + +static int +b64flush(bs, outp) +struct b64state *bs; +u_char *outp; +{ + int outlen = 0; + + if (bs->bs_offs == 8) { + *outp++ = base64[(bs->bs_bits >> 2) & 0x3F]; + *outp++ = base64[(bs->bs_bits << 4) & 0x3F]; + outlen = 2; + } else if (bs->bs_offs == 16) { + *outp++ = base64[(bs->bs_bits >> 10) & 0x3F]; + *outp++ = base64[(bs->bs_bits >> 4) & 0x3F]; + *outp++ = base64[(bs->bs_bits << 2) & 0x3F]; + outlen = 3; + } + bs->bs_offs = 0; + bs->bs_bits = 0; + return (outlen); +} + +static int +b64dec(bs, inp, inlen, outp) +struct b64state *bs; +u_char *inp; +int inlen; +u_char *outp; +{ + int outlen = 0; + char *cp; + + while (inlen > 0) { + if ((cp = strchr(base64, *inp++)) == NULL) + break; + bs->bs_bits = (bs->bs_bits << 6) | (cp - base64); + inlen--; + bs->bs_offs += 6; + if (bs->bs_offs >= 8) { + *outp++ = bs->bs_bits >> (bs->bs_offs - 8); + outlen++; + bs->bs_offs -= 8; + } + } + return (outlen); +} +#endif /* USE_SRP */ + +#if PPP_SERVER +/* + * Assume that current waiting server state is complete and figure + * next state to use based on available authentication data. 'status' + * indicates if there was an error in handling the last query. It is + * 0 for success and non-zero for failure. + */ +static void eap_figure_next_state(ppp_pcb *pcb, int status) { +#ifdef USE_SRP + unsigned char secbuf[MAXSECRETLEN], clear[8], *sp, *dp; + struct t_pw tpw; + struct t_confent *tce, mytce; + char *cp, *cp2; + struct t_server *ts; + int id, i, plen, toffs; + u_char vals[2]; + struct b64state bs; +#endif /* USE_SRP */ + + pcb->settings.eap_timeout_time = pcb->eap.es_savedtime; + switch (pcb->eap.es_server.ea_state) { + case eapBadAuth: + return; + + case eapIdentify: +#ifdef USE_SRP + /* Discard any previous session. */ + ts = (struct t_server *)pcb->eap.es_server.ea_session; + if (ts != NULL) { + t_serverclose(ts); + pcb->eap.es_server.ea_session = NULL; + pcb->eap.es_server.ea_skey = NULL; + } +#endif /* USE_SRP */ + if (status != 0) { + pcb->eap.es_server.ea_state = eapBadAuth; + break; + } +#ifdef USE_SRP + /* If we've got a pseudonym, try to decode to real name. */ + if (pcb->eap.es_server.ea_peerlen > SRP_PSEUDO_LEN && + strncmp(pcb->eap.es_server.ea_peer, SRP_PSEUDO_ID, + SRP_PSEUDO_LEN) == 0 && + (pcb->eap.es_server.ea_peerlen - SRP_PSEUDO_LEN) * 3 / 4 < + sizeof (secbuf)) { + BZERO(&bs, sizeof (bs)); + plen = b64dec(&bs, + pcb->eap.es_server.ea_peer + SRP_PSEUDO_LEN, + pcb->eap.es_server.ea_peerlen - SRP_PSEUDO_LEN, + secbuf); + toffs = 0; + for (i = 0; i < 5; i++) { + pncrypt_setkey(toffs); + toffs -= 86400; + /* FIXME: if we want to do SRP, we need to find a way to pass the PolarSSL des_context instead of using static memory */ + if (!DesDecrypt(secbuf, clear)) { + ppp_dbglog("no DES here; cannot decode " + "pseudonym"); + return; + } + id = *(unsigned char *)clear; + if (id + 1 <= plen && id + 9 > plen) + break; + } + if (plen % 8 == 0 && i < 5) { + /* + * Note that this is always shorter than the + * original stored string, so there's no need + * to realloc. + */ + if ((i = plen = *(unsigned char *)clear) > 7) + i = 7; + pcb->eap.es_server.ea_peerlen = plen; + dp = (unsigned char *)pcb->eap.es_server.ea_peer; + MEMCPY(dp, clear + 1, i); + plen -= i; + dp += i; + sp = secbuf + 8; + while (plen > 0) { + /* FIXME: if we want to do SRP, we need to find a way to pass the PolarSSL des_context instead of using static memory */ + (void) DesDecrypt(sp, dp); + sp += 8; + dp += 8; + plen -= 8; + } + pcb->eap.es_server.ea_peer[ + pcb->eap.es_server.ea_peerlen] = '\0'; + ppp_dbglog("decoded pseudonym to \"%.*q\"", + pcb->eap.es_server.ea_peerlen, + pcb->eap.es_server.ea_peer); + } else { + ppp_dbglog("failed to decode real name"); + /* Stay in eapIdentfy state; requery */ + break; + } + } + /* Look up user in secrets database. */ + if (get_srp_secret(pcb->eap.es_unit, pcb->eap.es_server.ea_peer, + pcb->eap.es_server.ea_name, (char *)secbuf, 1) != 0) { + /* Set up default in case SRP entry is bad */ + pcb->eap.es_server.ea_state = eapMD5Chall; + /* Get t_confent based on index in srp-secrets */ + id = strtol((char *)secbuf, &cp, 10); + if (*cp++ != ':' || id < 0) + break; + if (id == 0) { + mytce.index = 0; + mytce.modulus.data = (u_char *)wkmodulus; + mytce.modulus.len = sizeof (wkmodulus); + mytce.generator.data = (u_char *)"\002"; + mytce.generator.len = 1; + tce = &mytce; + } else if ((tce = gettcid(id)) != NULL) { + /* + * Client will have to verify this modulus/ + * generator combination, and that will take + * a while. Lengthen the timeout here. + */ + if (pcb->settings.eap_timeout_time > 0 && + pcb->settings.eap_timeout_time < 30) + pcb->settings.eap_timeout_time = 30; + } else { + break; + } + if ((cp2 = strchr(cp, ':')) == NULL) + break; + *cp2++ = '\0'; + tpw.pebuf.name = pcb->eap.es_server.ea_peer; + tpw.pebuf.password.len = t_fromb64((char *)tpw.pwbuf, + cp); + tpw.pebuf.password.data = tpw.pwbuf; + tpw.pebuf.salt.len = t_fromb64((char *)tpw.saltbuf, + cp2); + tpw.pebuf.salt.data = tpw.saltbuf; + if ((ts = t_serveropenraw(&tpw.pebuf, tce)) == NULL) + break; + pcb->eap.es_server.ea_session = (void *)ts; + pcb->eap.es_server.ea_state = eapSRP1; + vals[0] = pcb->eap.es_server.ea_id + 1; + vals[1] = EAPT_SRP; + t_serveraddexdata(ts, vals, 2); + /* Generate B; must call before t_servergetkey() */ + t_servergenexp(ts); + break; + } +#endif /* USE_SRP */ + pcb->eap.es_server.ea_state = eapMD5Chall; + break; + + case eapSRP1: +#ifdef USE_SRP + ts = (struct t_server *)pcb->eap.es_server.ea_session; + if (ts != NULL && status != 0) { + t_serverclose(ts); + pcb->eap.es_server.ea_session = NULL; + pcb->eap.es_server.ea_skey = NULL; + } +#endif /* USE_SRP */ + if (status == 1) { + pcb->eap.es_server.ea_state = eapMD5Chall; + } else if (status != 0 || pcb->eap.es_server.ea_session == NULL) { + pcb->eap.es_server.ea_state = eapBadAuth; + } else { + pcb->eap.es_server.ea_state = eapSRP2; + } + break; + + case eapSRP2: +#ifdef USE_SRP + ts = (struct t_server *)pcb->eap.es_server.ea_session; + if (ts != NULL && status != 0) { + t_serverclose(ts); + pcb->eap.es_server.ea_session = NULL; + pcb->eap.es_server.ea_skey = NULL; + } +#endif /* USE_SRP */ + if (status != 0 || pcb->eap.es_server.ea_session == NULL) { + pcb->eap.es_server.ea_state = eapBadAuth; + } else { + pcb->eap.es_server.ea_state = eapSRP3; + } + break; + + case eapSRP3: + case eapSRP4: +#ifdef USE_SRP + ts = (struct t_server *)pcb->eap.es_server.ea_session; + if (ts != NULL && status != 0) { + t_serverclose(ts); + pcb->eap.es_server.ea_session = NULL; + pcb->eap.es_server.ea_skey = NULL; + } +#endif /* USE_SRP */ + if (status != 0 || pcb->eap.es_server.ea_session == NULL) { + pcb->eap.es_server.ea_state = eapBadAuth; + } else { + pcb->eap.es_server.ea_state = eapOpen; + } + break; + + case eapMD5Chall: + if (status != 0) { + pcb->eap.es_server.ea_state = eapBadAuth; + } else { + pcb->eap.es_server.ea_state = eapOpen; + } + break; + + default: + pcb->eap.es_server.ea_state = eapBadAuth; + break; + } + if (pcb->eap.es_server.ea_state == eapBadAuth) + eap_send_failure(pcb); +} + +/* + * Format an EAP Request message and send it to the peer. Message + * type depends on current state. (Server operation) + */ +static void eap_send_request(ppp_pcb *pcb) { + struct pbuf *p; + u_char *outp; + u_char *lenloc; + int outlen; + int len; + const char *str; +#ifdef USE_SRP + struct t_server *ts; + u_char clear[8], cipher[8], dig[SHA_DIGESTSIZE], *optr, *cp; + int i, j; + struct b64state b64; + SHA1_CTX ctxt; +#endif /* USE_SRP */ + + /* Handle both initial auth and restart */ + if (pcb->eap.es_server.ea_state < eapIdentify && + pcb->eap.es_server.ea_state != eapInitial) { + pcb->eap.es_server.ea_state = eapIdentify; +#if PPP_REMOTENAME + if (pcb->settings.explicit_remote && pcb->remote_name) { + /* + * If we already know the peer's + * unauthenticated name, then there's no + * reason to ask. Go to next state instead. + */ + int len = (int)strlen(pcb->remote_name); + if (len > MAXNAMELEN) { + len = MAXNAMELEN; + } + MEMCPY(pcb->eap.es_server.ea_peer, pcb->remote_name, len); + pcb->eap.es_server.ea_peer[len] = '\0'; + pcb->eap.es_server.ea_peerlen = len; + eap_figure_next_state(pcb, 0); + } +#endif /* PPP_REMOTENAME */ + } + + if (pcb->settings.eap_max_transmits > 0 && + pcb->eap.es_server.ea_requests >= pcb->settings.eap_max_transmits) { + if (pcb->eap.es_server.ea_responses > 0) + ppp_error("EAP: too many Requests sent"); + else + ppp_error("EAP: no response to Requests"); + eap_send_failure(pcb); + return; + } + + p = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_CTRL_PBUF_MAX_SIZE), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = (u_char*)p->payload; + + MAKEHEADER(outp, PPP_EAP); + + PUTCHAR(EAP_REQUEST, outp); + PUTCHAR(pcb->eap.es_server.ea_id, outp); + lenloc = outp; + INCPTR(2, outp); + + switch (pcb->eap.es_server.ea_state) { + case eapIdentify: + PUTCHAR(EAPT_IDENTITY, outp); + str = "Name"; + len = strlen(str); + MEMCPY(outp, str, len); + INCPTR(len, outp); + break; + + case eapMD5Chall: + PUTCHAR(EAPT_MD5CHAP, outp); + /* + * pick a random challenge length between + * EAP_MIN_CHALLENGE_LENGTH and EAP_MAX_CHALLENGE_LENGTH + */ + pcb->eap.es_challen = EAP_MIN_CHALLENGE_LENGTH + + magic_pow(EAP_MIN_MAX_POWER_OF_TWO_CHALLENGE_LENGTH); + PUTCHAR(pcb->eap.es_challen, outp); + magic_random_bytes(pcb->eap.es_challenge, pcb->eap.es_challen); + MEMCPY(outp, pcb->eap.es_challenge, pcb->eap.es_challen); + INCPTR(pcb->eap.es_challen, outp); + MEMCPY(outp, pcb->eap.es_server.ea_name, pcb->eap.es_server.ea_namelen); + INCPTR(pcb->eap.es_server.ea_namelen, outp); + break; + +#ifdef USE_SRP + case eapSRP1: + PUTCHAR(EAPT_SRP, outp); + PUTCHAR(EAPSRP_CHALLENGE, outp); + + PUTCHAR(pcb->eap.es_server.ea_namelen, outp); + MEMCPY(outp, pcb->eap.es_server.ea_name, pcb->eap.es_server.ea_namelen); + INCPTR(pcb->eap.es_server.ea_namelen, outp); + + ts = (struct t_server *)pcb->eap.es_server.ea_session; + assert(ts != NULL); + PUTCHAR(ts->s.len, outp); + MEMCPY(outp, ts->s.data, ts->s.len); + INCPTR(ts->s.len, outp); + + if (ts->g.len == 1 && ts->g.data[0] == 2) { + PUTCHAR(0, outp); + } else { + PUTCHAR(ts->g.len, outp); + MEMCPY(outp, ts->g.data, ts->g.len); + INCPTR(ts->g.len, outp); + } + + if (ts->n.len != sizeof (wkmodulus) || + BCMP(ts->n.data, wkmodulus, sizeof (wkmodulus)) != 0) { + MEMCPY(outp, ts->n.data, ts->n.len); + INCPTR(ts->n.len, outp); + } + break; + + case eapSRP2: + PUTCHAR(EAPT_SRP, outp); + PUTCHAR(EAPSRP_SKEY, outp); + + ts = (struct t_server *)pcb->eap.es_server.ea_session; + assert(ts != NULL); + MEMCPY(outp, ts->B.data, ts->B.len); + INCPTR(ts->B.len, outp); + break; + + case eapSRP3: + PUTCHAR(EAPT_SRP, outp); + PUTCHAR(EAPSRP_SVALIDATOR, outp); + PUTLONG(SRPVAL_EBIT, outp); + ts = (struct t_server *)pcb->eap.es_server.ea_session; + assert(ts != NULL); + MEMCPY(outp, t_serverresponse(ts), SHA_DIGESTSIZE); + INCPTR(SHA_DIGESTSIZE, outp); + + if (pncrypt_setkey(0)) { + /* Generate pseudonym */ + optr = outp; + cp = (unsigned char *)pcb->eap.es_server.ea_peer; + if ((j = i = pcb->eap.es_server.ea_peerlen) > 7) + j = 7; + clear[0] = i; + MEMCPY(clear + 1, cp, j); + i -= j; + cp += j; + /* FIXME: if we want to do SRP, we need to find a way to pass the PolarSSL des_context instead of using static memory */ + if (!DesEncrypt(clear, cipher)) { + ppp_dbglog("no DES here; not generating pseudonym"); + break; + } + BZERO(&b64, sizeof (b64)); + outp++; /* space for pseudonym length */ + outp += b64enc(&b64, cipher, 8, outp); + while (i >= 8) { + /* FIXME: if we want to do SRP, we need to find a way to pass the PolarSSL des_context instead of using static memory */ + (void) DesEncrypt(cp, cipher); + outp += b64enc(&b64, cipher, 8, outp); + cp += 8; + i -= 8; + } + if (i > 0) { + MEMCPY(clear, cp, i); + cp += i; + magic_random_bytes(cp, 8-i); + /* FIXME: if we want to do SRP, we need to find a way to pass the PolarSSL des_context instead of using static memory */ + (void) DesEncrypt(clear, cipher); + outp += b64enc(&b64, cipher, 8, outp); + } + outp += b64flush(&b64, outp); + + /* Set length and pad out to next 20 octet boundary */ + i = outp - optr - 1; + *optr = i; + i %= SHA_DIGESTSIZE; + if (i != 0) { + magic_random_bytes(outp, SHA_DIGESTSIZE-i); + INCPTR(SHA_DIGESTSIZE-i, outp); + } + + /* Obscure the pseudonym with SHA1 hash */ + SHA1Init(&ctxt); + SHA1Update(&ctxt, &pcb->eap.es_server.ea_id, 1); + SHA1Update(&ctxt, pcb->eap.es_server.ea_skey, + SESSION_KEY_LEN); + SHA1Update(&ctxt, pcb->eap.es_server.ea_peer, + pcb->eap.es_server.ea_peerlen); + while (optr < outp) { + SHA1Final(dig, &ctxt); + cp = dig; + while (cp < dig + SHA_DIGESTSIZE) + *optr++ ^= *cp++; + SHA1Init(&ctxt); + SHA1Update(&ctxt, &pcb->eap.es_server.ea_id, 1); + SHA1Update(&ctxt, pcb->eap.es_server.ea_skey, + SESSION_KEY_LEN); + SHA1Update(&ctxt, optr - SHA_DIGESTSIZE, + SHA_DIGESTSIZE); + } + } + break; + + case eapSRP4: + PUTCHAR(EAPT_SRP, outp); + PUTCHAR(EAPSRP_LWRECHALLENGE, outp); + pcb->eap.es_challen = EAP_MIN_CHALLENGE_LENGTH + + magic_pow(EAP_MIN_MAX_POWER_OF_TWO_CHALLENGE_LENGTH); + magic_random_bytes(pcb->eap.es_challenge, pcb->eap.es_challen); + MEMCPY(outp, pcb->eap.es_challenge, pcb->eap.es_challen); + INCPTR(pcb->eap.es_challen, outp); + break; +#endif /* USE_SRP */ + + default: + return; + } + + outlen = (outp - (unsigned char*)p->payload) - PPP_HDRLEN; + PUTSHORT(outlen, lenloc); + + pbuf_realloc(p, outlen + PPP_HDRLEN); + ppp_write(pcb, p); + + pcb->eap.es_server.ea_requests++; + + if (pcb->settings.eap_timeout_time > 0) + TIMEOUT(eap_server_timeout, pcb, pcb->settings.eap_timeout_time); +} + +/* + * eap_authpeer - Authenticate our peer (behave as server). + * + * Start server state and send first request. This is called only + * after eap_lowerup. + */ +void eap_authpeer(ppp_pcb *pcb, const char *localname) { + + /* Save the name we're given. */ + pcb->eap.es_server.ea_name = localname; + pcb->eap.es_server.ea_namelen = strlen(localname); + + pcb->eap.es_savedtime = pcb->settings.eap_timeout_time; + + /* Lower layer up yet? */ + if (pcb->eap.es_server.ea_state == eapInitial || + pcb->eap.es_server.ea_state == eapPending) { + pcb->eap.es_server.ea_state = eapPending; + return; + } + + pcb->eap.es_server.ea_state = eapPending; + + /* ID number not updated here intentionally; hashed into M1 */ + eap_send_request(pcb); +} + +/* + * eap_server_timeout - Retransmission timer for sending Requests + * expired. + */ +static void eap_server_timeout(void *arg) { + ppp_pcb *pcb = (ppp_pcb*)arg; + + if (!eap_server_active(pcb)) + return; + + /* EAP ID number must not change on timeout. */ + eap_send_request(pcb); +} + +/* + * When it's time to send rechallenge the peer, this timeout is + * called. Once the rechallenge is successful, the response handler + * will restart the timer. If it fails, then the link is dropped. + */ +static void eap_rechallenge(void *arg) { + ppp_pcb *pcb = (ppp_pcb*)arg; + + if (pcb->eap.es_server.ea_state != eapOpen && + pcb->eap.es_server.ea_state != eapSRP4) + return; + + pcb->eap.es_server.ea_requests = 0; + pcb->eap.es_server.ea_state = eapIdentify; + eap_figure_next_state(pcb, 0); + pcb->eap.es_server.ea_id++; + eap_send_request(pcb); +} + +static void srp_lwrechallenge(void *arg) { + ppp_pcb *pcb = (ppp_pcb*)arg; + + if (pcb->eap.es_server.ea_state != eapOpen || + pcb->eap.es_server.ea_type != EAPT_SRP) + return; + + pcb->eap.es_server.ea_requests = 0; + pcb->eap.es_server.ea_state = eapSRP4; + pcb->eap.es_server.ea_id++; + eap_send_request(pcb); +} +#endif /* PPP_SERVER */ + +/* + * eap_lowerup - The lower layer is now up. + * + * This is called before either eap_authpeer or eap_authwithpeer. See + * link_established() in auth.c. All that's necessary here is to + * return to closed state so that those two routines will do the right + * thing. + */ +static void eap_lowerup(ppp_pcb *pcb) { + pcb->eap.es_client.ea_state = eapClosed; +#if PPP_SERVER + pcb->eap.es_server.ea_state = eapClosed; +#endif /* PPP_SERVER */ +} + +/* + * eap_lowerdown - The lower layer is now down. + * + * Cancel all timeouts and return to initial state. + */ +static void eap_lowerdown(ppp_pcb *pcb) { + + if (eap_client_active(pcb) && pcb->settings.eap_req_time > 0) { + UNTIMEOUT(eap_client_timeout, pcb); + } +#if PPP_SERVER + if (eap_server_active(pcb)) { + if (pcb->settings.eap_timeout_time > 0) { + UNTIMEOUT(eap_server_timeout, pcb); + } + } else { + if ((pcb->eap.es_server.ea_state == eapOpen || + pcb->eap.es_server.ea_state == eapSRP4) && + pcb->eap.es_rechallenge > 0) { + UNTIMEOUT(eap_rechallenge, (void *)pcb); + } + if (pcb->eap.es_server.ea_state == eapOpen && + pcb->eap.es_lwrechallenge > 0) { + UNTIMEOUT(srp_lwrechallenge, (void *)pcb); + } + } + + pcb->eap.es_client.ea_state = pcb->eap.es_server.ea_state = eapInitial; + pcb->eap.es_client.ea_requests = pcb->eap.es_server.ea_requests = 0; +#endif /* PPP_SERVER */ +} + +/* + * eap_protrej - Peer doesn't speak this protocol. + * + * This shouldn't happen. If it does, it represents authentication + * failure. + */ +static void eap_protrej(ppp_pcb *pcb) { + + if (eap_client_active(pcb)) { + ppp_error("EAP authentication failed due to Protocol-Reject"); + auth_withpeer_fail(pcb, PPP_EAP); + } +#if PPP_SERVER + if (eap_server_active(pcb)) { + ppp_error("EAP authentication of peer failed on Protocol-Reject"); + auth_peer_fail(pcb, PPP_EAP); + } +#endif /* PPP_SERVER */ + eap_lowerdown(pcb); +} + +/* + * Format and send a regular EAP Response message. + */ +static void eap_send_response(ppp_pcb *pcb, u_char id, u_char typenum, const u_char *str, int lenstr) { + struct pbuf *p; + u_char *outp; + int msglen; + + msglen = EAP_HEADERLEN + sizeof (u_char) + lenstr; + p = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_HDRLEN + msglen), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = (u_char*)p->payload; + + MAKEHEADER(outp, PPP_EAP); + + PUTCHAR(EAP_RESPONSE, outp); + PUTCHAR(id, outp); + pcb->eap.es_client.ea_id = id; + PUTSHORT(msglen, outp); + PUTCHAR(typenum, outp); + if (lenstr > 0) { + MEMCPY(outp, str, lenstr); + } + + ppp_write(pcb, p); +} + +/* + * Format and send an MD5-Challenge EAP Response message. + */ +static void eap_chap_response(ppp_pcb *pcb, u_char id, u_char *hash, const char *name, int namelen) { + struct pbuf *p; + u_char *outp; + int msglen; + + msglen = EAP_HEADERLEN + 2 * sizeof (u_char) + MD5_SIGNATURE_SIZE + + namelen; + p = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_HDRLEN + msglen), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = (u_char*)p->payload; + + MAKEHEADER(outp, PPP_EAP); + + PUTCHAR(EAP_RESPONSE, outp); + PUTCHAR(id, outp); + pcb->eap.es_client.ea_id = id; + PUTSHORT(msglen, outp); + PUTCHAR(EAPT_MD5CHAP, outp); + PUTCHAR(MD5_SIGNATURE_SIZE, outp); + MEMCPY(outp, hash, MD5_SIGNATURE_SIZE); + INCPTR(MD5_SIGNATURE_SIZE, outp); + if (namelen > 0) { + MEMCPY(outp, name, namelen); + } + + ppp_write(pcb, p); +} + +#ifdef USE_SRP +/* + * Format and send a SRP EAP Response message. + */ +static void +eap_srp_response(esp, id, subtypenum, str, lenstr) +eap_state *esp; +u_char id; +u_char subtypenum; +u_char *str; +int lenstr; +{ + ppp_pcb *pcb = &ppp_pcb_list[pcb->eap.es_unit]; + struct pbuf *p; + u_char *outp; + int msglen; + + msglen = EAP_HEADERLEN + 2 * sizeof (u_char) + lenstr; + p = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_HDRLEN + msglen), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = p->payload; + + MAKEHEADER(outp, PPP_EAP); + + PUTCHAR(EAP_RESPONSE, outp); + PUTCHAR(id, outp); + pcb->eap.es_client.ea_id = id; + PUTSHORT(msglen, outp); + PUTCHAR(EAPT_SRP, outp); + PUTCHAR(subtypenum, outp); + if (lenstr > 0) { + MEMCPY(outp, str, lenstr); + } + + ppp_write(pcb, p); +} + +/* + * Format and send a SRP EAP Client Validator Response message. + */ +static void +eap_srpval_response(esp, id, flags, str) +eap_state *esp; +u_char id; +u32_t flags; +u_char *str; +{ + ppp_pcb *pcb = &ppp_pcb_list[pcb->eap.es_unit]; + struct pbuf *p; + u_char *outp; + int msglen; + + msglen = EAP_HEADERLEN + 2 * sizeof (u_char) + sizeof (u32_t) + + SHA_DIGESTSIZE; + p = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_HDRLEN + msglen), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = p->payload; + + MAKEHEADER(outp, PPP_EAP); + + PUTCHAR(EAP_RESPONSE, outp); + PUTCHAR(id, outp); + pcb->eap.es_client.ea_id = id; + PUTSHORT(msglen, outp); + PUTCHAR(EAPT_SRP, outp); + PUTCHAR(EAPSRP_CVALIDATOR, outp); + PUTLONG(flags, outp); + MEMCPY(outp, str, SHA_DIGESTSIZE); + + ppp_write(pcb, p); +} +#endif /* USE_SRP */ + +static void eap_send_nak(ppp_pcb *pcb, u_char id, u_char type) { + struct pbuf *p; + u_char *outp; + int msglen; + + msglen = EAP_HEADERLEN + 2 * sizeof (u_char); + p = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_HDRLEN + msglen), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = (u_char*)p->payload; + + MAKEHEADER(outp, PPP_EAP); + + PUTCHAR(EAP_RESPONSE, outp); + PUTCHAR(id, outp); + pcb->eap.es_client.ea_id = id; + PUTSHORT(msglen, outp); + PUTCHAR(EAPT_NAK, outp); + PUTCHAR(type, outp); + + ppp_write(pcb, p); +} + +#ifdef USE_SRP +static char * +name_of_pn_file() +{ + char *user, *path, *file; + struct passwd *pw; + size_t pl; + static bool pnlogged = 0; + + pw = getpwuid(getuid()); + if (pw == NULL || (user = pw->pw_dir) == NULL || user[0] == 0) { + errno = EINVAL; + return (NULL); + } + file = _PATH_PSEUDONYM; + pl = strlen(user) + strlen(file) + 2; + path = malloc(pl); + if (path == NULL) + return (NULL); + (void) slprintf(path, pl, "%s/%s", user, file); + if (!pnlogged) { + ppp_dbglog("pseudonym file: %s", path); + pnlogged = 1; + } + return (path); +} + +static int +open_pn_file(modebits) +mode_t modebits; +{ + char *path; + int fd, err; + + if ((path = name_of_pn_file()) == NULL) + return (-1); + fd = open(path, modebits, S_IRUSR | S_IWUSR); + err = errno; + free(path); + errno = err; + return (fd); +} + +static void +remove_pn_file() +{ + char *path; + + if ((path = name_of_pn_file()) != NULL) { + (void) unlink(path); + (void) free(path); + } +} + +static void +write_pseudonym(esp, inp, len, id) +eap_state *esp; +u_char *inp; +int len, id; +{ + u_char val; + u_char *datp, *digp; + SHA1_CTX ctxt; + u_char dig[SHA_DIGESTSIZE]; + int dsize, fd, olen = len; + + /* + * Do the decoding by working backwards. This eliminates the need + * to save the decoded output in a separate buffer. + */ + val = id; + while (len > 0) { + if ((dsize = len % SHA_DIGESTSIZE) == 0) + dsize = SHA_DIGESTSIZE; + len -= dsize; + datp = inp + len; + SHA1Init(&ctxt); + SHA1Update(&ctxt, &val, 1); + SHA1Update(&ctxt, pcb->eap.es_client.ea_skey, SESSION_KEY_LEN); + if (len > 0) { + SHA1Update(&ctxt, datp, SHA_DIGESTSIZE); + } else { + SHA1Update(&ctxt, pcb->eap.es_client.ea_name, + pcb->eap.es_client.ea_namelen); + } + SHA1Final(dig, &ctxt); + for (digp = dig; digp < dig + SHA_DIGESTSIZE; digp++) + *datp++ ^= *digp; + } + + /* Now check that the result is sane */ + if (olen <= 0 || *inp + 1 > olen) { + ppp_dbglog("EAP: decoded pseudonym is unusable <%.*B>", olen, inp); + return; + } + + /* Save it away */ + fd = open_pn_file(O_WRONLY | O_CREAT | O_TRUNC); + if (fd < 0) { + ppp_dbglog("EAP: error saving pseudonym: %m"); + return; + } + len = write(fd, inp + 1, *inp); + if (close(fd) != -1 && len == *inp) { + ppp_dbglog("EAP: saved pseudonym"); + pcb->eap.es_usedpseudo = 0; + } else { + ppp_dbglog("EAP: failed to save pseudonym"); + remove_pn_file(); + } +} +#endif /* USE_SRP */ + +/* + * eap_request - Receive EAP Request message (client mode). + */ +static void eap_request(ppp_pcb *pcb, u_char *inp, int id, int len) { + u_char typenum; + u_char vallen; + int secret_len; + char secret[MAXSECRETLEN]; + char rhostname[MAXNAMELEN]; + lwip_md5_context mdContext; + u_char hash[MD5_SIGNATURE_SIZE]; +#ifdef USE_SRP + struct t_client *tc; + struct t_num sval, gval, Nval, *Ap, Bval; + u_char vals[2]; + SHA1_CTX ctxt; + u_char dig[SHA_DIGESTSIZE]; + int fd; +#endif /* USE_SRP */ + + /* + * Note: we update es_client.ea_id *only if* a Response + * message is being generated. Otherwise, we leave it the + * same for duplicate detection purposes. + */ + + pcb->eap.es_client.ea_requests++; + if (pcb->settings.eap_allow_req != 0 && + pcb->eap.es_client.ea_requests > pcb->settings.eap_allow_req) { + ppp_info("EAP: received too many Request messages"); + if (pcb->settings.eap_req_time > 0) { + UNTIMEOUT(eap_client_timeout, pcb); + } + auth_withpeer_fail(pcb, PPP_EAP); + return; + } + + if (len <= 0) { + ppp_error("EAP: empty Request message discarded"); + return; + } + + GETCHAR(typenum, inp); + len--; + + switch (typenum) { + case EAPT_IDENTITY: + if (len > 0) + ppp_info("EAP: Identity prompt \"%.*q\"", len, inp); +#ifdef USE_SRP + if (pcb->eap.es_usepseudo && + (pcb->eap.es_usedpseudo == 0 || + (pcb->eap.es_usedpseudo == 1 && + id == pcb->eap.es_client.ea_id))) { + pcb->eap.es_usedpseudo = 1; + /* Try to get a pseudonym */ + if ((fd = open_pn_file(O_RDONLY)) >= 0) { + strcpy(rhostname, SRP_PSEUDO_ID); + len = read(fd, rhostname + SRP_PSEUDO_LEN, + sizeof (rhostname) - SRP_PSEUDO_LEN); + /* XXX NAI unsupported */ + if (len > 0) { + eap_send_response(pcb, id, typenum, + rhostname, len + SRP_PSEUDO_LEN); + } + (void) close(fd); + if (len > 0) + break; + } + } + /* Stop using pseudonym now. */ + if (pcb->eap.es_usepseudo && pcb->eap.es_usedpseudo != 2) { + remove_pn_file(); + pcb->eap.es_usedpseudo = 2; + } +#endif /* USE_SRP */ + eap_send_response(pcb, id, typenum, (const u_char*)pcb->eap.es_client.ea_name, + pcb->eap.es_client.ea_namelen); + break; + + case EAPT_NOTIFICATION: + if (len > 0) + ppp_info("EAP: Notification \"%.*q\"", len, inp); + eap_send_response(pcb, id, typenum, NULL, 0); + break; + + case EAPT_NAK: + /* + * Avoid the temptation to send Response Nak in reply + * to Request Nak here. It can only lead to trouble. + */ + ppp_warn("EAP: unexpected Nak in Request; ignored"); + /* Return because we're waiting for something real. */ + return; + + case EAPT_MD5CHAP: + if (len < 1) { + ppp_error("EAP: received MD5-Challenge with no data"); + /* Bogus request; wait for something real. */ + return; + } + GETCHAR(vallen, inp); + len--; + if (vallen < 8 || vallen > len) { + ppp_error("EAP: MD5-Challenge with bad length %d (8..%d)", + vallen, len); + /* Try something better. */ + eap_send_nak(pcb, id, EAPT_SRP); + break; + } + + /* Not so likely to happen. */ + if (vallen >= len + sizeof (rhostname)) { + ppp_dbglog("EAP: trimming really long peer name down"); + MEMCPY(rhostname, inp + vallen, sizeof (rhostname) - 1); + rhostname[sizeof (rhostname) - 1] = '\0'; + } else { + MEMCPY(rhostname, inp + vallen, len - vallen); + rhostname[len - vallen] = '\0'; + } + +#if PPP_REMOTENAME + /* In case the remote doesn't give us his name. */ + if (pcb->settings.explicit_remote || + (pcb->settings.remote_name[0] != '\0' && vallen == len)) + strlcpy(rhostname, pcb->settings.remote_name, sizeof (rhostname)); +#endif /* PPP_REMOTENAME */ + + /* + * Get the secret for authenticating ourselves with + * the specified host. + */ + if (!get_secret(pcb, pcb->eap.es_client.ea_name, + rhostname, secret, &secret_len, 0)) { + ppp_dbglog("EAP: no MD5 secret for auth to %q", rhostname); + eap_send_nak(pcb, id, EAPT_SRP); + break; + } + lwip_md5_init(&mdContext); + lwip_md5_starts(&mdContext); + typenum = id; + lwip_md5_update(&mdContext, &typenum, 1); + lwip_md5_update(&mdContext, (u_char *)secret, secret_len); + BZERO(secret, sizeof (secret)); + lwip_md5_update(&mdContext, inp, vallen); + lwip_md5_finish(&mdContext, hash); + lwip_md5_free(&mdContext); + eap_chap_response(pcb, id, hash, pcb->eap.es_client.ea_name, + pcb->eap.es_client.ea_namelen); + break; + +#ifdef USE_SRP + case EAPT_SRP: + if (len < 1) { + ppp_error("EAP: received empty SRP Request"); + /* Bogus request; wait for something real. */ + return; + } + + /* Get subtype */ + GETCHAR(vallen, inp); + len--; + switch (vallen) { + case EAPSRP_CHALLENGE: + tc = NULL; + if (pcb->eap.es_client.ea_session != NULL) { + tc = (struct t_client *)pcb->eap.es_client. + ea_session; + /* + * If this is a new challenge, then start + * over with a new client session context. + * Otherwise, just resend last response. + */ + if (id != pcb->eap.es_client.ea_id) { + t_clientclose(tc); + pcb->eap.es_client.ea_session = NULL; + tc = NULL; + } + } + /* No session key just yet */ + pcb->eap.es_client.ea_skey = NULL; + if (tc == NULL) { + int rhostnamelen; + + GETCHAR(vallen, inp); + len--; + if (vallen >= len) { + ppp_error("EAP: badly-formed SRP Challenge" + " (name)"); + /* Ignore badly-formed messages */ + return; + } + MEMCPY(rhostname, inp, vallen); + rhostname[vallen] = '\0'; + INCPTR(vallen, inp); + len -= vallen; + + /* + * In case the remote doesn't give us his name, + * use configured name. + */ + if (explicit_remote || + (remote_name[0] != '\0' && vallen == 0)) { + strlcpy(rhostname, remote_name, + sizeof (rhostname)); + } + + rhostnamelen = (int)strlen(rhostname); + if (rhostnamelen > MAXNAMELEN) { + rhostnamelen = MAXNAMELEN; + } + MEMCPY(pcb->eap.es_client.ea_peer, rhostname, rhostnamelen); + pcb->eap.es_client.ea_peer[rhostnamelen] = '\0'; + pcb->eap.es_client.ea_peerlen = rhostnamelen; + + GETCHAR(vallen, inp); + len--; + if (vallen >= len) { + ppp_error("EAP: badly-formed SRP Challenge" + " (s)"); + /* Ignore badly-formed messages */ + return; + } + sval.data = inp; + sval.len = vallen; + INCPTR(vallen, inp); + len -= vallen; + + GETCHAR(vallen, inp); + len--; + if (vallen > len) { + ppp_error("EAP: badly-formed SRP Challenge" + " (g)"); + /* Ignore badly-formed messages */ + return; + } + /* If no generator present, then use value 2 */ + if (vallen == 0) { + gval.data = (u_char *)"\002"; + gval.len = 1; + } else { + gval.data = inp; + gval.len = vallen; + } + INCPTR(vallen, inp); + len -= vallen; + + /* + * If no modulus present, then use well-known + * value. + */ + if (len == 0) { + Nval.data = (u_char *)wkmodulus; + Nval.len = sizeof (wkmodulus); + } else { + Nval.data = inp; + Nval.len = len; + } + tc = t_clientopen(pcb->eap.es_client.ea_name, + &Nval, &gval, &sval); + if (tc == NULL) { + eap_send_nak(pcb, id, EAPT_MD5CHAP); + break; + } + pcb->eap.es_client.ea_session = (void *)tc; + + /* Add Challenge ID & type to verifier */ + vals[0] = id; + vals[1] = EAPT_SRP; + t_clientaddexdata(tc, vals, 2); + } + Ap = t_clientgenexp(tc); + eap_srp_response(esp, id, EAPSRP_CKEY, Ap->data, + Ap->len); + break; + + case EAPSRP_SKEY: + tc = (struct t_client *)pcb->eap.es_client.ea_session; + if (tc == NULL) { + ppp_warn("EAP: peer sent Subtype 2 without 1"); + eap_send_nak(pcb, id, EAPT_MD5CHAP); + break; + } + if (pcb->eap.es_client.ea_skey != NULL) { + /* + * ID number should not change here. Warn + * if it does (but otherwise ignore). + */ + if (id != pcb->eap.es_client.ea_id) { + ppp_warn("EAP: ID changed from %d to %d " + "in SRP Subtype 2 rexmit", + pcb->eap.es_client.ea_id, id); + } + } else { + if (get_srp_secret(pcb->eap.es_unit, + pcb->eap.es_client.ea_name, + pcb->eap.es_client.ea_peer, secret, 0) == 0) { + /* + * Can't work with this peer because + * the secret is missing. Just give + * up. + */ + eap_send_nak(pcb, id, EAPT_MD5CHAP); + break; + } + Bval.data = inp; + Bval.len = len; + t_clientpasswd(tc, secret); + BZERO(secret, sizeof (secret)); + pcb->eap.es_client.ea_skey = + t_clientgetkey(tc, &Bval); + if (pcb->eap.es_client.ea_skey == NULL) { + /* Server is rogue; stop now */ + ppp_error("EAP: SRP server is rogue"); + goto client_failure; + } + } + eap_srpval_response(esp, id, SRPVAL_EBIT, + t_clientresponse(tc)); + break; + + case EAPSRP_SVALIDATOR: + tc = (struct t_client *)pcb->eap.es_client.ea_session; + if (tc == NULL || pcb->eap.es_client.ea_skey == NULL) { + ppp_warn("EAP: peer sent Subtype 3 without 1/2"); + eap_send_nak(pcb, id, EAPT_MD5CHAP); + break; + } + /* + * If we're already open, then this ought to be a + * duplicate. Otherwise, check that the server is + * who we think it is. + */ + if (pcb->eap.es_client.ea_state == eapOpen) { + if (id != pcb->eap.es_client.ea_id) { + ppp_warn("EAP: ID changed from %d to %d " + "in SRP Subtype 3 rexmit", + pcb->eap.es_client.ea_id, id); + } + } else { + len -= sizeof (u32_t) + SHA_DIGESTSIZE; + if (len < 0 || t_clientverify(tc, inp + + sizeof (u32_t)) != 0) { + ppp_error("EAP: SRP server verification " + "failed"); + goto client_failure; + } + GETLONG(pcb->eap.es_client.ea_keyflags, inp); + /* Save pseudonym if user wants it. */ + if (len > 0 && pcb->eap.es_usepseudo) { + INCPTR(SHA_DIGESTSIZE, inp); + write_pseudonym(esp, inp, len, id); + } + } + /* + * We've verified our peer. We're now mostly done, + * except for waiting on the regular EAP Success + * message. + */ + eap_srp_response(esp, id, EAPSRP_ACK, NULL, 0); + break; + + case EAPSRP_LWRECHALLENGE: + if (len < 4) { + ppp_warn("EAP: malformed Lightweight rechallenge"); + return; + } + SHA1Init(&ctxt); + vals[0] = id; + SHA1Update(&ctxt, vals, 1); + SHA1Update(&ctxt, pcb->eap.es_client.ea_skey, + SESSION_KEY_LEN); + SHA1Update(&ctxt, inp, len); + SHA1Update(&ctxt, pcb->eap.es_client.ea_name, + pcb->eap.es_client.ea_namelen); + SHA1Final(dig, &ctxt); + eap_srp_response(esp, id, EAPSRP_LWRECHALLENGE, dig, + SHA_DIGESTSIZE); + break; + + default: + ppp_error("EAP: unknown SRP Subtype %d", vallen); + eap_send_nak(pcb, id, EAPT_MD5CHAP); + break; + } + break; +#endif /* USE_SRP */ + + default: + ppp_info("EAP: unknown authentication type %d; Naking", typenum); + eap_send_nak(pcb, id, EAPT_SRP); + break; + } + + if (pcb->settings.eap_req_time > 0) { + UNTIMEOUT(eap_client_timeout, pcb); + TIMEOUT(eap_client_timeout, pcb, + pcb->settings.eap_req_time); + } + return; + +#ifdef USE_SRP +client_failure: + pcb->eap.es_client.ea_state = eapBadAuth; + if (pcb->settings.eap_req_time > 0) { + UNTIMEOUT(eap_client_timeout, (void *)esp); + } + pcb->eap.es_client.ea_session = NULL; + t_clientclose(tc); + auth_withpeer_fail(pcb, PPP_EAP); +#endif /* USE_SRP */ +} + +#if PPP_SERVER +/* + * eap_response - Receive EAP Response message (server mode). + */ +static void eap_response(ppp_pcb *pcb, u_char *inp, int id, int len) { + u_char typenum; + u_char vallen; + int secret_len; + char secret[MAXSECRETLEN]; + char rhostname[MAXNAMELEN]; + lwip_md5_context mdContext; + u_char hash[MD5_SIGNATURE_SIZE]; +#ifdef USE_SRP + struct t_server *ts; + struct t_num A; + SHA1_CTX ctxt; + u_char dig[SHA_DIGESTSIZE]; +#endif /* USE_SRP */ + + if (pcb->eap.es_server.ea_id != id) { + ppp_dbglog("EAP: discarding Response %d; expected ID %d", id, + pcb->eap.es_server.ea_id); + return; + } + + pcb->eap.es_server.ea_responses++; + + if (len <= 0) { + ppp_error("EAP: empty Response message discarded"); + return; + } + + GETCHAR(typenum, inp); + len--; + + switch (typenum) { + case EAPT_IDENTITY: + if (pcb->eap.es_server.ea_state != eapIdentify) { + ppp_dbglog("EAP discarding unwanted Identify \"%.q\"", len, + inp); + break; + } + ppp_info("EAP: unauthenticated peer name \"%.*q\"", len, inp); + if (len > MAXNAMELEN) { + len = MAXNAMELEN; + } + MEMCPY(pcb->eap.es_server.ea_peer, inp, len); + pcb->eap.es_server.ea_peer[len] = '\0'; + pcb->eap.es_server.ea_peerlen = len; + eap_figure_next_state(pcb, 0); + break; + + case EAPT_NOTIFICATION: + ppp_dbglog("EAP unexpected Notification; response discarded"); + break; + + case EAPT_NAK: + if (len < 1) { + ppp_info("EAP: Nak Response with no suggested protocol"); + eap_figure_next_state(pcb, 1); + break; + } + + GETCHAR(vallen, inp); + len--; + + if ( +#if PPP_REMOTENAME + !pcb->explicit_remote && +#endif /* PPP_REMOTENAME */ + pcb->eap.es_server.ea_state == eapIdentify){ + /* Peer cannot Nak Identify Request */ + eap_figure_next_state(pcb, 1); + break; + } + + switch (vallen) { + case EAPT_SRP: + /* Run through SRP validator selection again. */ + pcb->eap.es_server.ea_state = eapIdentify; + eap_figure_next_state(pcb, 0); + break; + + case EAPT_MD5CHAP: + pcb->eap.es_server.ea_state = eapMD5Chall; + break; + + default: + ppp_dbglog("EAP: peer requesting unknown Type %d", vallen); + switch (pcb->eap.es_server.ea_state) { + case eapSRP1: + case eapSRP2: + case eapSRP3: + pcb->eap.es_server.ea_state = eapMD5Chall; + break; + case eapMD5Chall: + case eapSRP4: + pcb->eap.es_server.ea_state = eapIdentify; + eap_figure_next_state(pcb, 0); + break; + default: + break; + } + break; + } + break; + + case EAPT_MD5CHAP: + if (pcb->eap.es_server.ea_state != eapMD5Chall) { + ppp_error("EAP: unexpected MD5-Response"); + eap_figure_next_state(pcb, 1); + break; + } + if (len < 1) { + ppp_error("EAP: received MD5-Response with no data"); + eap_figure_next_state(pcb, 1); + break; + } + GETCHAR(vallen, inp); + len--; + if (vallen != 16 || vallen > len) { + ppp_error("EAP: MD5-Response with bad length %d", vallen); + eap_figure_next_state(pcb, 1); + break; + } + + /* Not so likely to happen. */ + if (vallen >= len + sizeof (rhostname)) { + ppp_dbglog("EAP: trimming really long peer name down"); + MEMCPY(rhostname, inp + vallen, sizeof (rhostname) - 1); + rhostname[sizeof (rhostname) - 1] = '\0'; + } else { + MEMCPY(rhostname, inp + vallen, len - vallen); + rhostname[len - vallen] = '\0'; + } + +#if PPP_REMOTENAME + /* In case the remote doesn't give us his name. */ + if (explicit_remote || + (remote_name[0] != '\0' && vallen == len)) + strlcpy(rhostname, remote_name, sizeof (rhostname)); +#endif /* PPP_REMOTENAME */ + + /* + * Get the secret for authenticating the specified + * host. + */ + if (!get_secret(pcb, rhostname, + pcb->eap.es_server.ea_name, secret, &secret_len, 1)) { + ppp_dbglog("EAP: no MD5 secret for auth of %q", rhostname); + eap_send_failure(pcb); + break; + } + lwip_md5_init(&mdContext); + lwip_md5_starts(&mdContext); + lwip_md5_update(&mdContext, &pcb->eap.es_server.ea_id, 1); + lwip_md5_update(&mdContext, (u_char *)secret, secret_len); + BZERO(secret, sizeof (secret)); + lwip_md5_update(&mdContext, pcb->eap.es_challenge, pcb->eap.es_challen); + lwip_md5_finish(&mdContext, hash); + lwip_md5_free(&mdContext); + if (BCMP(hash, inp, MD5_SIGNATURE_SIZE) != 0) { + eap_send_failure(pcb); + break; + } + pcb->eap.es_server.ea_type = EAPT_MD5CHAP; + eap_send_success(pcb); + eap_figure_next_state(pcb, 0); + if (pcb->eap.es_rechallenge != 0) + TIMEOUT(eap_rechallenge, pcb, pcb->eap.es_rechallenge); + break; + +#ifdef USE_SRP + case EAPT_SRP: + if (len < 1) { + ppp_error("EAP: empty SRP Response"); + eap_figure_next_state(pcb, 1); + break; + } + GETCHAR(typenum, inp); + len--; + switch (typenum) { + case EAPSRP_CKEY: + if (pcb->eap.es_server.ea_state != eapSRP1) { + ppp_error("EAP: unexpected SRP Subtype 1 Response"); + eap_figure_next_state(pcb, 1); + break; + } + A.data = inp; + A.len = len; + ts = (struct t_server *)pcb->eap.es_server.ea_session; + assert(ts != NULL); + pcb->eap.es_server.ea_skey = t_servergetkey(ts, &A); + if (pcb->eap.es_server.ea_skey == NULL) { + /* Client's A value is bogus; terminate now */ + ppp_error("EAP: bogus A value from client"); + eap_send_failure(pcb); + } else { + eap_figure_next_state(pcb, 0); + } + break; + + case EAPSRP_CVALIDATOR: + if (pcb->eap.es_server.ea_state != eapSRP2) { + ppp_error("EAP: unexpected SRP Subtype 2 Response"); + eap_figure_next_state(pcb, 1); + break; + } + if (len < sizeof (u32_t) + SHA_DIGESTSIZE) { + ppp_error("EAP: M1 length %d < %d", len, + sizeof (u32_t) + SHA_DIGESTSIZE); + eap_figure_next_state(pcb, 1); + break; + } + GETLONG(pcb->eap.es_server.ea_keyflags, inp); + ts = (struct t_server *)pcb->eap.es_server.ea_session; + assert(ts != NULL); + if (t_serververify(ts, inp)) { + ppp_info("EAP: unable to validate client identity"); + eap_send_failure(pcb); + break; + } + eap_figure_next_state(pcb, 0); + break; + + case EAPSRP_ACK: + if (pcb->eap.es_server.ea_state != eapSRP3) { + ppp_error("EAP: unexpected SRP Subtype 3 Response"); + eap_send_failure(esp); + break; + } + pcb->eap.es_server.ea_type = EAPT_SRP; + eap_send_success(pcb, esp); + eap_figure_next_state(pcb, 0); + if (pcb->eap.es_rechallenge != 0) + TIMEOUT(eap_rechallenge, pcb, + pcb->eap.es_rechallenge); + if (pcb->eap.es_lwrechallenge != 0) + TIMEOUT(srp_lwrechallenge, pcb, + pcb->eap.es_lwrechallenge); + break; + + case EAPSRP_LWRECHALLENGE: + if (pcb->eap.es_server.ea_state != eapSRP4) { + ppp_info("EAP: unexpected SRP Subtype 4 Response"); + return; + } + if (len != SHA_DIGESTSIZE) { + ppp_error("EAP: bad Lightweight rechallenge " + "response"); + return; + } + SHA1Init(&ctxt); + vallen = id; + SHA1Update(&ctxt, &vallen, 1); + SHA1Update(&ctxt, pcb->eap.es_server.ea_skey, + SESSION_KEY_LEN); + SHA1Update(&ctxt, pcb->eap.es_challenge, pcb->eap.es_challen); + SHA1Update(&ctxt, pcb->eap.es_server.ea_peer, + pcb->eap.es_server.ea_peerlen); + SHA1Final(dig, &ctxt); + if (BCMP(dig, inp, SHA_DIGESTSIZE) != 0) { + ppp_error("EAP: failed Lightweight rechallenge"); + eap_send_failure(pcb); + break; + } + pcb->eap.es_server.ea_state = eapOpen; + if (pcb->eap.es_lwrechallenge != 0) + TIMEOUT(srp_lwrechallenge, esp, + pcb->eap.es_lwrechallenge); + break; + } + break; +#endif /* USE_SRP */ + + default: + /* This can't happen. */ + ppp_error("EAP: unknown Response type %d; ignored", typenum); + return; + } + + if (pcb->settings.eap_timeout_time > 0) { + UNTIMEOUT(eap_server_timeout, pcb); + } + + if (pcb->eap.es_server.ea_state != eapBadAuth && + pcb->eap.es_server.ea_state != eapOpen) { + pcb->eap.es_server.ea_id++; + eap_send_request(pcb); + } +} +#endif /* PPP_SERVER */ + +/* + * eap_success - Receive EAP Success message (client mode). + */ +static void eap_success(ppp_pcb *pcb, u_char *inp, int id, int len) { + LWIP_UNUSED_ARG(id); + + if (pcb->eap.es_client.ea_state != eapOpen && !eap_client_active(pcb)) { + ppp_dbglog("EAP unexpected success message in state %s (%d)", + eap_state_name(pcb->eap.es_client.ea_state), + pcb->eap.es_client.ea_state); + return; + } + + if (pcb->settings.eap_req_time > 0) { + UNTIMEOUT(eap_client_timeout, pcb); + } + + if (len > 0) { + /* This is odd. The spec doesn't allow for this. */ + PRINTMSG(inp, len); + } + + pcb->eap.es_client.ea_state = eapOpen; + auth_withpeer_success(pcb, PPP_EAP, 0); +} + +/* + * eap_failure - Receive EAP Failure message (client mode). + */ +static void eap_failure(ppp_pcb *pcb, u_char *inp, int id, int len) { + LWIP_UNUSED_ARG(id); + + if (!eap_client_active(pcb)) { + ppp_dbglog("EAP unexpected failure message in state %s (%d)", + eap_state_name(pcb->eap.es_client.ea_state), + pcb->eap.es_client.ea_state); + } + + if (pcb->settings.eap_req_time > 0) { + UNTIMEOUT(eap_client_timeout, pcb); + } + + if (len > 0) { + /* This is odd. The spec doesn't allow for this. */ + PRINTMSG(inp, len); + } + + pcb->eap.es_client.ea_state = eapBadAuth; + + ppp_error("EAP: peer reports authentication failure"); + auth_withpeer_fail(pcb, PPP_EAP); +} + +/* + * eap_input - Handle received EAP message. + */ +static void eap_input(ppp_pcb *pcb, u_char *inp, int inlen) { + u_char code, id; + int len; + + /* + * Parse header (code, id and length). If packet too short, + * drop it. + */ + if (inlen < EAP_HEADERLEN) { + ppp_error("EAP: packet too short: %d < %d", inlen, EAP_HEADERLEN); + return; + } + GETCHAR(code, inp); + GETCHAR(id, inp); + GETSHORT(len, inp); + if (len < EAP_HEADERLEN || len > inlen) { + ppp_error("EAP: packet has illegal length field %d (%d..%d)", len, + EAP_HEADERLEN, inlen); + return; + } + len -= EAP_HEADERLEN; + + /* Dispatch based on message code */ + switch (code) { + case EAP_REQUEST: + eap_request(pcb, inp, id, len); + break; + +#if PPP_SERVER + case EAP_RESPONSE: + eap_response(pcb, inp, id, len); + break; +#endif /* PPP_SERVER */ + + case EAP_SUCCESS: + eap_success(pcb, inp, id, len); + break; + + case EAP_FAILURE: + eap_failure(pcb, inp, id, len); + break; + + default: /* XXX Need code reject */ + /* Note: it's not legal to send EAP Nak here. */ + ppp_warn("EAP: unknown code %d received", code); + break; + } +} + +#if PRINTPKT_SUPPORT +/* + * eap_printpkt - print the contents of an EAP packet. + */ +static const char* const eap_codenames[] = { + "Request", "Response", "Success", "Failure" +}; + +static const char* const eap_typenames[] = { + "Identity", "Notification", "Nak", "MD5-Challenge", + "OTP", "Generic-Token", NULL, NULL, + "RSA", "DSS", "KEA", "KEA-Validate", + "TLS", "Defender", "Windows 2000", "Arcot", + "Cisco", "Nokia", "SRP" +}; + +static int eap_printpkt(const u_char *inp, int inlen, void (*printer) (void *, const char *, ...), void *arg) { + int code, id, len, rtype, vallen; + const u_char *pstart; + u32_t uval; + + if (inlen < EAP_HEADERLEN) + return (0); + pstart = inp; + GETCHAR(code, inp); + GETCHAR(id, inp); + GETSHORT(len, inp); + if (len < EAP_HEADERLEN || len > inlen) + return (0); + + if (code >= 1 && code <= (int)LWIP_ARRAYSIZE(eap_codenames)) + printer(arg, " %s", eap_codenames[code-1]); + else + printer(arg, " code=0x%x", code); + printer(arg, " id=0x%x", id); + len -= EAP_HEADERLEN; + switch (code) { + case EAP_REQUEST: + if (len < 1) { + printer(arg, " "); + break; + } + GETCHAR(rtype, inp); + len--; + if (rtype >= 1 && rtype <= (int)LWIP_ARRAYSIZE(eap_typenames)) + printer(arg, " %s", eap_typenames[rtype-1]); + else + printer(arg, " type=0x%x", rtype); + switch (rtype) { + case EAPT_IDENTITY: + case EAPT_NOTIFICATION: + if (len > 0) { + printer(arg, " "); + INCPTR(len, inp); + len = 0; + } else { + printer(arg, " "); + } + break; + + case EAPT_MD5CHAP: + if (len <= 0) + break; + GETCHAR(vallen, inp); + len--; + if (vallen > len) + goto truncated; + printer(arg, " ", vallen, inp); + INCPTR(vallen, inp); + len -= vallen; + if (len > 0) { + printer(arg, " "); + INCPTR(len, inp); + len = 0; + } else { + printer(arg, " "); + } + break; + + case EAPT_SRP: + if (len < 3) + goto truncated; + GETCHAR(vallen, inp); + len--; + printer(arg, "-%d", vallen); + switch (vallen) { + case EAPSRP_CHALLENGE: + GETCHAR(vallen, inp); + len--; + if (vallen >= len) + goto truncated; + if (vallen > 0) { + printer(arg, " "); + } else { + printer(arg, " "); + } + INCPTR(vallen, inp); + len -= vallen; + GETCHAR(vallen, inp); + len--; + if (vallen >= len) + goto truncated; + printer(arg, " ", vallen, inp); + INCPTR(vallen, inp); + len -= vallen; + GETCHAR(vallen, inp); + len--; + if (vallen > len) + goto truncated; + if (vallen == 0) { + printer(arg, " "); + } else { + printer(arg, " ", vallen, inp); + } + INCPTR(vallen, inp); + len -= vallen; + if (len == 0) { + printer(arg, " "); + } else { + printer(arg, " ", len, inp); + INCPTR(len, inp); + len = 0; + } + break; + + case EAPSRP_SKEY: + printer(arg, " ", len, inp); + INCPTR(len, inp); + len = 0; + break; + + case EAPSRP_SVALIDATOR: + if (len < (int)sizeof (u32_t)) + break; + GETLONG(uval, inp); + len -= sizeof (u32_t); + if (uval & SRPVAL_EBIT) { + printer(arg, " E"); + uval &= ~SRPVAL_EBIT; + } + if (uval != 0) { + printer(arg, " f<%X>", uval); + } + if ((vallen = len) > SHA_DIGESTSIZE) + vallen = SHA_DIGESTSIZE; + printer(arg, " ", len, inp, + len < SHA_DIGESTSIZE ? "?" : ""); + INCPTR(vallen, inp); + len -= vallen; + if (len > 0) { + printer(arg, " ", len, inp); + INCPTR(len, inp); + len = 0; + } + break; + + case EAPSRP_LWRECHALLENGE: + printer(arg, " ", len, inp); + INCPTR(len, inp); + len = 0; + break; + default: + break; + } + break; + default: + break; + } + break; + + case EAP_RESPONSE: + if (len < 1) + break; + GETCHAR(rtype, inp); + len--; + if (rtype >= 1 && rtype <= (int)LWIP_ARRAYSIZE(eap_typenames)) + printer(arg, " %s", eap_typenames[rtype-1]); + else + printer(arg, " type=0x%x", rtype); + switch (rtype) { + case EAPT_IDENTITY: + if (len > 0) { + printer(arg, " "); + INCPTR(len, inp); + len = 0; + } + break; + + case EAPT_NAK: + if (len <= 0) { + printer(arg, " "); + break; + } + GETCHAR(rtype, inp); + len--; + printer(arg, " = 1 && rtype < (int)LWIP_ARRAYSIZE(eap_typenames)) + printer(arg, " (%s)", eap_typenames[rtype-1]); + printer(arg, ">"); + break; + + case EAPT_MD5CHAP: + if (len <= 0) { + printer(arg, " "); + break; + } + GETCHAR(vallen, inp); + len--; + if (vallen > len) + goto truncated; + printer(arg, " ", vallen, inp); + INCPTR(vallen, inp); + len -= vallen; + if (len > 0) { + printer(arg, " "); + INCPTR(len, inp); + len = 0; + } else { + printer(arg, " "); + } + break; + + case EAPT_SRP: + if (len < 1) + goto truncated; + GETCHAR(vallen, inp); + len--; + printer(arg, "-%d", vallen); + switch (vallen) { + case EAPSRP_CKEY: + printer(arg, " ", len, inp); + INCPTR(len, inp); + len = 0; + break; + + case EAPSRP_CVALIDATOR: + if (len < (int)sizeof (u32_t)) + break; + GETLONG(uval, inp); + len -= sizeof (u32_t); + if (uval & SRPVAL_EBIT) { + printer(arg, " E"); + uval &= ~SRPVAL_EBIT; + } + if (uval != 0) { + printer(arg, " f<%X>", uval); + } + printer(arg, " ", len, inp, + len == SHA_DIGESTSIZE ? "" : "?"); + INCPTR(len, inp); + len = 0; + break; + + case EAPSRP_ACK: + break; + + case EAPSRP_LWRECHALLENGE: + printer(arg, " ", len, inp, + len == SHA_DIGESTSIZE ? "" : "?"); + if ((vallen = len) > SHA_DIGESTSIZE) + vallen = SHA_DIGESTSIZE; + INCPTR(vallen, inp); + len -= vallen; + break; + default: + break; + } + break; + default: + break; + } + break; + + case EAP_SUCCESS: /* No payload expected for these! */ + case EAP_FAILURE: + default: + break; + + truncated: + printer(arg, " "); + break; + } + + if (len > 8) + printer(arg, "%8B...", inp); + else if (len > 0) + printer(arg, "%.*B", len, inp); + INCPTR(len, inp); + + return (inp - pstart); +} +#endif /* PRINTPKT_SUPPORT */ + +#endif /* PPP_SUPPORT && EAP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/ecp.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/ecp.c new file mode 100644 index 0000000..4d84f60 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/ecp.c @@ -0,0 +1,191 @@ +/* + * ecp.c - PPP Encryption Control Protocol. + * + * Copyright (c) 2002 Google, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * Derived from ccp.c, which is: + * + * Copyright (c) 1994-2002 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 3. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && ECP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#include + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/fsm.h" +#include "netif/ppp/ecp.h" + +#if PPP_OPTIONS +static option_t ecp_option_list[] = { + { "noecp", o_bool, &ecp_protent.enabled_flag, + "Disable ECP negotiation" }, + { "-ecp", o_bool, &ecp_protent.enabled_flag, + "Disable ECP negotiation", OPT_ALIAS }, + + { NULL } +}; +#endif /* PPP_OPTIONS */ + +/* + * Protocol entry points from main code. + */ +static void ecp_init (int unit); +/* +static void ecp_open (int unit); +static void ecp_close (int unit, char *); +static void ecp_lowerup (int unit); +static void ecp_lowerdown (int); +static void ecp_input (int unit, u_char *pkt, int len); +static void ecp_protrej (int unit); +*/ +#if PRINTPKT_SUPPORT +static int ecp_printpkt (const u_char *pkt, int len, + void (*printer) (void *, char *, ...), + void *arg); +#endif /* PRINTPKT_SUPPORT */ +/* +static void ecp_datainput (int unit, u_char *pkt, int len); +*/ + +const struct protent ecp_protent = { + PPP_ECP, + ecp_init, + NULL, /* ecp_input, */ + NULL, /* ecp_protrej, */ + NULL, /* ecp_lowerup, */ + NULL, /* ecp_lowerdown, */ + NULL, /* ecp_open, */ + NULL, /* ecp_close, */ +#if PRINTPKT_SUPPORT + ecp_printpkt, +#endif /* PRINTPKT_SUPPORT */ +#if PPP_DATAINPUT + NULL, /* ecp_datainput, */ +#endif /* PPP_DATAINPUT */ +#if PRINTPKT_SUPPORT + "ECP", + "Encrypted", +#endif /* PRINTPKT_SUPPORT */ +#if PPP_OPTIONS + ecp_option_list, + NULL, +#endif /* PPP_OPTIONS */ +#if DEMAND_SUPPORT + NULL, + NULL +#endif /* DEMAND_SUPPORT */ +}; + +fsm ecp_fsm[NUM_PPP]; +ecp_options ecp_wantoptions[NUM_PPP]; /* what to request the peer to use */ +ecp_options ecp_gotoptions[NUM_PPP]; /* what the peer agreed to do */ +ecp_options ecp_allowoptions[NUM_PPP]; /* what we'll agree to do */ +ecp_options ecp_hisoptions[NUM_PPP]; /* what we agreed to do */ + +static const fsm_callbacks ecp_callbacks = { + NULL, /* ecp_resetci, */ + NULL, /* ecp_cilen, */ + NULL, /* ecp_addci, */ + NULL, /* ecp_ackci, */ + NULL, /* ecp_nakci, */ + NULL, /* ecp_rejci, */ + NULL, /* ecp_reqci, */ + NULL, /* ecp_up, */ + NULL, /* ecp_down, */ + NULL, + NULL, + NULL, + NULL, + NULL, /* ecp_extcode, */ + "ECP" +}; + +/* + * ecp_init - initialize ECP. + */ +static void +ecp_init(unit) + int unit; +{ + fsm *f = &ecp_fsm[unit]; + + f->unit = unit; + f->protocol = PPP_ECP; + f->callbacks = &ecp_callbacks; + fsm_init(f); + +#if 0 /* Not necessary, everything is cleared in ppp_new() */ + memset(&ecp_wantoptions[unit], 0, sizeof(ecp_options)); + memset(&ecp_gotoptions[unit], 0, sizeof(ecp_options)); + memset(&ecp_allowoptions[unit], 0, sizeof(ecp_options)); + memset(&ecp_hisoptions[unit], 0, sizeof(ecp_options)); +#endif /* 0 */ + +} + + +#if PRINTPKT_SUPPORT +static int +ecp_printpkt(p, plen, printer, arg) + const u_char *p; + int plen; + void (*printer) (void *, char *, ...); + void *arg; +{ + return 0; +} +#endif /* PRINTPKT_SUPPORT */ + +#endif /* PPP_SUPPORT && ECP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/eui64.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/eui64.c new file mode 100644 index 0000000..01493bc --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/eui64.c @@ -0,0 +1,56 @@ +/* + * eui64.c - EUI64 routines for IPv6CP. + * + * Copyright (c) 1999 Tommi Komulainen. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Tommi Komulainen + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: eui64.c,v 1.6 2002/12/04 23:03:32 paulus Exp $ + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPP_IPV6_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#include "netif/ppp/ppp_impl.h" +#include "netif/ppp/eui64.h" + +/* + * eui64_ntoa - Make an ascii representation of an interface identifier + */ +char *eui64_ntoa(eui64_t e) { + static char buf[20]; + + sprintf(buf, "%02x%02x:%02x%02x:%02x%02x:%02x%02x", + e.e8[0], e.e8[1], e.e8[2], e.e8[3], + e.e8[4], e.e8[5], e.e8[6], e.e8[7]); + return buf; +} + +#endif /* PPP_SUPPORT && PPP_IPV6_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/fsm.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/fsm.c new file mode 100644 index 0000000..b1f08af --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/fsm.c @@ -0,0 +1,799 @@ +/* + * fsm.c - {Link, IP} Control Protocol Finite State Machine. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +/* + * @todo: + * Randomize fsm id on link/init. + * Deal with variable outgoing MTU. + */ + +#if 0 /* UNUSED */ +#include +#include +#include +#endif /* UNUSED */ + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/fsm.h" + +static void fsm_timeout (void *); +static void fsm_rconfreq(fsm *f, u_char id, u_char *inp, int len); +static void fsm_rconfack(fsm *f, int id, u_char *inp, int len); +static void fsm_rconfnakrej(fsm *f, int code, int id, u_char *inp, int len); +static void fsm_rtermreq(fsm *f, int id, u_char *p, int len); +static void fsm_rtermack(fsm *f); +static void fsm_rcoderej(fsm *f, u_char *inp, int len); +static void fsm_sconfreq(fsm *f, int retransmit); + +#define PROTO_NAME(f) ((f)->callbacks->proto_name) + +/* + * fsm_init - Initialize fsm. + * + * Initialize fsm state. + */ +void fsm_init(fsm *f) { + ppp_pcb *pcb = f->pcb; + f->state = PPP_FSM_INITIAL; + f->flags = 0; + f->id = 0; /* XXX Start with random id? */ + f->maxnakloops = pcb->settings.fsm_max_nak_loops; + f->term_reason_len = 0; +} + + +/* + * fsm_lowerup - The lower layer is up. + */ +void fsm_lowerup(fsm *f) { + switch( f->state ){ + case PPP_FSM_INITIAL: + f->state = PPP_FSM_CLOSED; + break; + + case PPP_FSM_STARTING: + if( f->flags & OPT_SILENT ) + f->state = PPP_FSM_STOPPED; + else { + /* Send an initial configure-request */ + fsm_sconfreq(f, 0); + f->state = PPP_FSM_REQSENT; + } + break; + + default: + FSMDEBUG(("%s: Up event in state %d!", PROTO_NAME(f), f->state)); + /* no break */ + } +} + + +/* + * fsm_lowerdown - The lower layer is down. + * + * Cancel all timeouts and inform upper layers. + */ +void fsm_lowerdown(fsm *f) { + switch( f->state ){ + case PPP_FSM_CLOSED: + f->state = PPP_FSM_INITIAL; + break; + + case PPP_FSM_STOPPED: + f->state = PPP_FSM_STARTING; + if( f->callbacks->starting ) + (*f->callbacks->starting)(f); + break; + + case PPP_FSM_CLOSING: + f->state = PPP_FSM_INITIAL; + UNTIMEOUT(fsm_timeout, f); /* Cancel timeout */ + break; + + case PPP_FSM_STOPPING: + case PPP_FSM_REQSENT: + case PPP_FSM_ACKRCVD: + case PPP_FSM_ACKSENT: + f->state = PPP_FSM_STARTING; + UNTIMEOUT(fsm_timeout, f); /* Cancel timeout */ + break; + + case PPP_FSM_OPENED: + if( f->callbacks->down ) + (*f->callbacks->down)(f); + f->state = PPP_FSM_STARTING; + break; + + default: + FSMDEBUG(("%s: Down event in state %d!", PROTO_NAME(f), f->state)); + /* no break */ + } +} + + +/* + * fsm_open - Link is allowed to come up. + */ +void fsm_open(fsm *f) { + switch( f->state ){ + case PPP_FSM_INITIAL: + f->state = PPP_FSM_STARTING; + if( f->callbacks->starting ) + (*f->callbacks->starting)(f); + break; + + case PPP_FSM_CLOSED: + if( f->flags & OPT_SILENT ) + f->state = PPP_FSM_STOPPED; + else { + /* Send an initial configure-request */ + fsm_sconfreq(f, 0); + f->state = PPP_FSM_REQSENT; + } + break; + + case PPP_FSM_CLOSING: + f->state = PPP_FSM_STOPPING; + /* fall through */ + /* no break */ + case PPP_FSM_STOPPED: + case PPP_FSM_OPENED: + if( f->flags & OPT_RESTART ){ + fsm_lowerdown(f); + fsm_lowerup(f); + } + break; + default: + break; + } +} + +/* + * terminate_layer - Start process of shutting down the FSM + * + * Cancel any timeout running, notify upper layers we're done, and + * send a terminate-request message as configured. + */ +static void terminate_layer(fsm *f, int nextstate) { + ppp_pcb *pcb = f->pcb; + + if( f->state != PPP_FSM_OPENED ) + UNTIMEOUT(fsm_timeout, f); /* Cancel timeout */ + else if( f->callbacks->down ) + (*f->callbacks->down)(f); /* Inform upper layers we're down */ + + /* Init restart counter and send Terminate-Request */ + f->retransmits = pcb->settings.fsm_max_term_transmits; + fsm_sdata(f, TERMREQ, f->reqid = ++f->id, + (const u_char *) f->term_reason, f->term_reason_len); + + if (f->retransmits == 0) { + /* + * User asked for no terminate requests at all; just close it. + * We've already fired off one Terminate-Request just to be nice + * to the peer, but we're not going to wait for a reply. + */ + f->state = nextstate == PPP_FSM_CLOSING ? PPP_FSM_CLOSED : PPP_FSM_STOPPED; + if( f->callbacks->finished ) + (*f->callbacks->finished)(f); + return; + } + + TIMEOUT(fsm_timeout, f, pcb->settings.fsm_timeout_time); + --f->retransmits; + + f->state = nextstate; +} + +/* + * fsm_close - Start closing connection. + * + * Cancel timeouts and either initiate close or possibly go directly to + * the PPP_FSM_CLOSED state. + */ +void fsm_close(fsm *f, const char *reason) { + f->term_reason = reason; + f->term_reason_len = (reason == NULL? 0: (u8_t)LWIP_MIN(strlen(reason), 0xFF) ); + switch( f->state ){ + case PPP_FSM_STARTING: + f->state = PPP_FSM_INITIAL; + break; + case PPP_FSM_STOPPED: + f->state = PPP_FSM_CLOSED; + break; + case PPP_FSM_STOPPING: + f->state = PPP_FSM_CLOSING; + break; + + case PPP_FSM_REQSENT: + case PPP_FSM_ACKRCVD: + case PPP_FSM_ACKSENT: + case PPP_FSM_OPENED: + terminate_layer(f, PPP_FSM_CLOSING); + break; + default: + break; + } +} + + +/* + * fsm_timeout - Timeout expired. + */ +static void fsm_timeout(void *arg) { + fsm *f = (fsm *) arg; + ppp_pcb *pcb = f->pcb; + + switch (f->state) { + case PPP_FSM_CLOSING: + case PPP_FSM_STOPPING: + if( f->retransmits <= 0 ){ + /* + * We've waited for an ack long enough. Peer probably heard us. + */ + f->state = (f->state == PPP_FSM_CLOSING)? PPP_FSM_CLOSED: PPP_FSM_STOPPED; + if( f->callbacks->finished ) + (*f->callbacks->finished)(f); + } else { + /* Send Terminate-Request */ + fsm_sdata(f, TERMREQ, f->reqid = ++f->id, + (const u_char *) f->term_reason, f->term_reason_len); + TIMEOUT(fsm_timeout, f, pcb->settings.fsm_timeout_time); + --f->retransmits; + } + break; + + case PPP_FSM_REQSENT: + case PPP_FSM_ACKRCVD: + case PPP_FSM_ACKSENT: + if (f->retransmits <= 0) { + ppp_warn("%s: timeout sending Config-Requests", PROTO_NAME(f)); + f->state = PPP_FSM_STOPPED; + if( (f->flags & OPT_PASSIVE) == 0 && f->callbacks->finished ) + (*f->callbacks->finished)(f); + + } else { + /* Retransmit the configure-request */ + if (f->callbacks->retransmit) + (*f->callbacks->retransmit)(f); + fsm_sconfreq(f, 1); /* Re-send Configure-Request */ + if( f->state == PPP_FSM_ACKRCVD ) + f->state = PPP_FSM_REQSENT; + } + break; + + default: + FSMDEBUG(("%s: Timeout event in state %d!", PROTO_NAME(f), f->state)); + /* no break */ + } +} + + +/* + * fsm_input - Input packet. + */ +void fsm_input(fsm *f, u_char *inpacket, int l) { + u_char *inp; + u_char code, id; + int len; + + /* + * Parse header (code, id and length). + * If packet too short, drop it. + */ + inp = inpacket; + if (l < HEADERLEN) { + FSMDEBUG(("fsm_input(%x): Rcvd short header.", f->protocol)); + return; + } + GETCHAR(code, inp); + GETCHAR(id, inp); + GETSHORT(len, inp); + if (len < HEADERLEN) { + FSMDEBUG(("fsm_input(%x): Rcvd illegal length.", f->protocol)); + return; + } + if (len > l) { + FSMDEBUG(("fsm_input(%x): Rcvd short packet.", f->protocol)); + return; + } + len -= HEADERLEN; /* subtract header length */ + + if( f->state == PPP_FSM_INITIAL || f->state == PPP_FSM_STARTING ){ + FSMDEBUG(("fsm_input(%x): Rcvd packet in state %d.", + f->protocol, f->state)); + return; + } + + /* + * Action depends on code. + */ + switch (code) { + case CONFREQ: + fsm_rconfreq(f, id, inp, len); + break; + + case CONFACK: + fsm_rconfack(f, id, inp, len); + break; + + case CONFNAK: + case CONFREJ: + fsm_rconfnakrej(f, code, id, inp, len); + break; + + case TERMREQ: + fsm_rtermreq(f, id, inp, len); + break; + + case TERMACK: + fsm_rtermack(f); + break; + + case CODEREJ: + fsm_rcoderej(f, inp, len); + break; + + default: + if( !f->callbacks->extcode + || !(*f->callbacks->extcode)(f, code, id, inp, len) ) + fsm_sdata(f, CODEREJ, ++f->id, inpacket, len + HEADERLEN); + break; + } +} + + +/* + * fsm_rconfreq - Receive Configure-Request. + */ +static void fsm_rconfreq(fsm *f, u_char id, u_char *inp, int len) { + int code, reject_if_disagree; + + switch( f->state ){ + case PPP_FSM_CLOSED: + /* Go away, we're closed */ + fsm_sdata(f, TERMACK, id, NULL, 0); + return; + case PPP_FSM_CLOSING: + case PPP_FSM_STOPPING: + return; + + case PPP_FSM_OPENED: + /* Go down and restart negotiation */ + if( f->callbacks->down ) + (*f->callbacks->down)(f); /* Inform upper layers */ + fsm_sconfreq(f, 0); /* Send initial Configure-Request */ + f->state = PPP_FSM_REQSENT; + break; + + case PPP_FSM_STOPPED: + /* Negotiation started by our peer */ + fsm_sconfreq(f, 0); /* Send initial Configure-Request */ + f->state = PPP_FSM_REQSENT; + break; + default: + break; + } + + /* + * Pass the requested configuration options + * to protocol-specific code for checking. + */ + if (f->callbacks->reqci){ /* Check CI */ + reject_if_disagree = (f->nakloops >= f->maxnakloops); + code = (*f->callbacks->reqci)(f, inp, &len, reject_if_disagree); + } else if (len) + code = CONFREJ; /* Reject all CI */ + else + code = CONFACK; + + /* send the Ack, Nak or Rej to the peer */ + fsm_sdata(f, code, id, inp, len); + + if (code == CONFACK) { + if (f->state == PPP_FSM_ACKRCVD) { + UNTIMEOUT(fsm_timeout, f); /* Cancel timeout */ + f->state = PPP_FSM_OPENED; + if (f->callbacks->up) + (*f->callbacks->up)(f); /* Inform upper layers */ + } else + f->state = PPP_FSM_ACKSENT; + f->nakloops = 0; + + } else { + /* we sent CONFACK or CONFREJ */ + if (f->state != PPP_FSM_ACKRCVD) + f->state = PPP_FSM_REQSENT; + if( code == CONFNAK ) + ++f->nakloops; + } +} + + +/* + * fsm_rconfack - Receive Configure-Ack. + */ +static void fsm_rconfack(fsm *f, int id, u_char *inp, int len) { + ppp_pcb *pcb = f->pcb; + + if (id != f->reqid || f->seen_ack) /* Expected id? */ + return; /* Nope, toss... */ + if( !(f->callbacks->ackci? (*f->callbacks->ackci)(f, inp, len): + (len == 0)) ){ + /* Ack is bad - ignore it */ + ppp_error("Received bad configure-ack: %P", inp, len); + return; + } + f->seen_ack = 1; + f->rnakloops = 0; + + switch (f->state) { + case PPP_FSM_CLOSED: + case PPP_FSM_STOPPED: + fsm_sdata(f, TERMACK, id, NULL, 0); + break; + + case PPP_FSM_REQSENT: + f->state = PPP_FSM_ACKRCVD; + f->retransmits = pcb->settings.fsm_max_conf_req_transmits; + break; + + case PPP_FSM_ACKRCVD: + /* Huh? an extra valid Ack? oh well... */ + UNTIMEOUT(fsm_timeout, f); /* Cancel timeout */ + fsm_sconfreq(f, 0); + f->state = PPP_FSM_REQSENT; + break; + + case PPP_FSM_ACKSENT: + UNTIMEOUT(fsm_timeout, f); /* Cancel timeout */ + f->state = PPP_FSM_OPENED; + f->retransmits = pcb->settings.fsm_max_conf_req_transmits; + if (f->callbacks->up) + (*f->callbacks->up)(f); /* Inform upper layers */ + break; + + case PPP_FSM_OPENED: + /* Go down and restart negotiation */ + if (f->callbacks->down) + (*f->callbacks->down)(f); /* Inform upper layers */ + fsm_sconfreq(f, 0); /* Send initial Configure-Request */ + f->state = PPP_FSM_REQSENT; + break; + default: + break; + } +} + + +/* + * fsm_rconfnakrej - Receive Configure-Nak or Configure-Reject. + */ +static void fsm_rconfnakrej(fsm *f, int code, int id, u_char *inp, int len) { + int ret; + int treat_as_reject; + + if (id != f->reqid || f->seen_ack) /* Expected id? */ + return; /* Nope, toss... */ + + if (code == CONFNAK) { + ++f->rnakloops; + treat_as_reject = (f->rnakloops >= f->maxnakloops); + if (f->callbacks->nakci == NULL + || !(ret = f->callbacks->nakci(f, inp, len, treat_as_reject))) { + ppp_error("Received bad configure-nak: %P", inp, len); + return; + } + } else { + f->rnakloops = 0; + if (f->callbacks->rejci == NULL + || !(ret = f->callbacks->rejci(f, inp, len))) { + ppp_error("Received bad configure-rej: %P", inp, len); + return; + } + } + + f->seen_ack = 1; + + switch (f->state) { + case PPP_FSM_CLOSED: + case PPP_FSM_STOPPED: + fsm_sdata(f, TERMACK, id, NULL, 0); + break; + + case PPP_FSM_REQSENT: + case PPP_FSM_ACKSENT: + /* They didn't agree to what we wanted - try another request */ + UNTIMEOUT(fsm_timeout, f); /* Cancel timeout */ + if (ret < 0) + f->state = PPP_FSM_STOPPED; /* kludge for stopping CCP */ + else + fsm_sconfreq(f, 0); /* Send Configure-Request */ + break; + + case PPP_FSM_ACKRCVD: + /* Got a Nak/reject when we had already had an Ack?? oh well... */ + UNTIMEOUT(fsm_timeout, f); /* Cancel timeout */ + fsm_sconfreq(f, 0); + f->state = PPP_FSM_REQSENT; + break; + + case PPP_FSM_OPENED: + /* Go down and restart negotiation */ + if (f->callbacks->down) + (*f->callbacks->down)(f); /* Inform upper layers */ + fsm_sconfreq(f, 0); /* Send initial Configure-Request */ + f->state = PPP_FSM_REQSENT; + break; + default: + break; + } +} + + +/* + * fsm_rtermreq - Receive Terminate-Req. + */ +static void fsm_rtermreq(fsm *f, int id, u_char *p, int len) { + ppp_pcb *pcb = f->pcb; + + switch (f->state) { + case PPP_FSM_ACKRCVD: + case PPP_FSM_ACKSENT: + f->state = PPP_FSM_REQSENT; /* Start over but keep trying */ + break; + + case PPP_FSM_OPENED: + if (len > 0) { + ppp_info("%s terminated by peer (%0.*v)", PROTO_NAME(f), len, p); + } else + ppp_info("%s terminated by peer", PROTO_NAME(f)); + f->retransmits = 0; + f->state = PPP_FSM_STOPPING; + if (f->callbacks->down) + (*f->callbacks->down)(f); /* Inform upper layers */ + TIMEOUT(fsm_timeout, f, pcb->settings.fsm_timeout_time); + break; + default: + break; + } + + fsm_sdata(f, TERMACK, id, NULL, 0); +} + + +/* + * fsm_rtermack - Receive Terminate-Ack. + */ +static void fsm_rtermack(fsm *f) { + switch (f->state) { + case PPP_FSM_CLOSING: + UNTIMEOUT(fsm_timeout, f); + f->state = PPP_FSM_CLOSED; + if( f->callbacks->finished ) + (*f->callbacks->finished)(f); + break; + case PPP_FSM_STOPPING: + UNTIMEOUT(fsm_timeout, f); + f->state = PPP_FSM_STOPPED; + if( f->callbacks->finished ) + (*f->callbacks->finished)(f); + break; + + case PPP_FSM_ACKRCVD: + f->state = PPP_FSM_REQSENT; + break; + + case PPP_FSM_OPENED: + if (f->callbacks->down) + (*f->callbacks->down)(f); /* Inform upper layers */ + fsm_sconfreq(f, 0); + f->state = PPP_FSM_REQSENT; + break; + default: + break; + } +} + + +/* + * fsm_rcoderej - Receive an Code-Reject. + */ +static void fsm_rcoderej(fsm *f, u_char *inp, int len) { + u_char code, id; + + if (len < HEADERLEN) { + FSMDEBUG(("fsm_rcoderej: Rcvd short Code-Reject packet!")); + return; + } + GETCHAR(code, inp); + GETCHAR(id, inp); + ppp_warn("%s: Rcvd Code-Reject for code %d, id %d", PROTO_NAME(f), code, id); + + if( f->state == PPP_FSM_ACKRCVD ) + f->state = PPP_FSM_REQSENT; +} + + +/* + * fsm_protreject - Peer doesn't speak this protocol. + * + * Treat this as a catastrophic error (RXJ-). + */ +void fsm_protreject(fsm *f) { + switch( f->state ){ + case PPP_FSM_CLOSING: + UNTIMEOUT(fsm_timeout, f); /* Cancel timeout */ + /* fall through */ + /* no break */ + case PPP_FSM_CLOSED: + f->state = PPP_FSM_CLOSED; + if( f->callbacks->finished ) + (*f->callbacks->finished)(f); + break; + + case PPP_FSM_STOPPING: + case PPP_FSM_REQSENT: + case PPP_FSM_ACKRCVD: + case PPP_FSM_ACKSENT: + UNTIMEOUT(fsm_timeout, f); /* Cancel timeout */ + /* fall through */ + /* no break */ + case PPP_FSM_STOPPED: + f->state = PPP_FSM_STOPPED; + if( f->callbacks->finished ) + (*f->callbacks->finished)(f); + break; + + case PPP_FSM_OPENED: + terminate_layer(f, PPP_FSM_STOPPING); + break; + + default: + FSMDEBUG(("%s: Protocol-reject event in state %d!", + PROTO_NAME(f), f->state)); + /* no break */ + } +} + + +/* + * fsm_sconfreq - Send a Configure-Request. + */ +static void fsm_sconfreq(fsm *f, int retransmit) { + ppp_pcb *pcb = f->pcb; + struct pbuf *p; + u_char *outp; + int cilen; + + if( f->state != PPP_FSM_REQSENT && f->state != PPP_FSM_ACKRCVD && f->state != PPP_FSM_ACKSENT ){ + /* Not currently negotiating - reset options */ + if( f->callbacks->resetci ) + (*f->callbacks->resetci)(f); + f->nakloops = 0; + f->rnakloops = 0; + } + + if( !retransmit ){ + /* New request - reset retransmission counter, use new ID */ + f->retransmits = pcb->settings.fsm_max_conf_req_transmits; + f->reqid = ++f->id; + } + + f->seen_ack = 0; + + /* + * Make up the request packet + */ + if( f->callbacks->cilen && f->callbacks->addci ){ + cilen = (*f->callbacks->cilen)(f); + if( cilen > pcb->peer_mru - HEADERLEN ) + cilen = pcb->peer_mru - HEADERLEN; + } else + cilen = 0; + + p = pbuf_alloc(PBUF_RAW, (u16_t)(cilen + HEADERLEN + PPP_HDRLEN), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + /* send the request to our peer */ + outp = (u_char*)p->payload; + MAKEHEADER(outp, f->protocol); + PUTCHAR(CONFREQ, outp); + PUTCHAR(f->reqid, outp); + PUTSHORT(cilen + HEADERLEN, outp); + if (cilen != 0) { + (*f->callbacks->addci)(f, outp, &cilen); + LWIP_ASSERT("cilen == p->len - HEADERLEN - PPP_HDRLEN", cilen == p->len - HEADERLEN - PPP_HDRLEN); + } + + ppp_write(pcb, p); + + /* start the retransmit timer */ + --f->retransmits; + TIMEOUT(fsm_timeout, f, pcb->settings.fsm_timeout_time); +} + + +/* + * fsm_sdata - Send some data. + * + * Used for all packets sent to our peer by this module. + */ +void fsm_sdata(fsm *f, u_char code, u_char id, const u_char *data, int datalen) { + ppp_pcb *pcb = f->pcb; + struct pbuf *p; + u_char *outp; + int outlen; + + /* Adjust length to be smaller than MTU */ + if (datalen > pcb->peer_mru - HEADERLEN) + datalen = pcb->peer_mru - HEADERLEN; + outlen = datalen + HEADERLEN; + + p = pbuf_alloc(PBUF_RAW, (u16_t)(outlen + PPP_HDRLEN), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = (u_char*)p->payload; + if (datalen) /* && data != outp + PPP_HDRLEN + HEADERLEN) -- was only for fsm_sconfreq() */ + MEMCPY(outp + PPP_HDRLEN + HEADERLEN, data, datalen); + MAKEHEADER(outp, f->protocol); + PUTCHAR(code, outp); + PUTCHAR(id, outp); + PUTSHORT(outlen, outp); + ppp_write(pcb, p); +} + +#endif /* PPP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/ipcp.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/ipcp.c new file mode 100644 index 0000000..b7c766e --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/ipcp.c @@ -0,0 +1,2418 @@ +/* + * ipcp.c - PPP IP Control Protocol. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPP_IPV4_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +/* + * @todo: + */ + +#if 0 /* UNUSED */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#endif /* UNUSED */ + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/fsm.h" +#include "netif/ppp/ipcp.h" + +#if 0 /* UNUSED */ +/* global vars */ +u32_t netmask = 0; /* IP netmask to set on interface */ +#endif /* UNUSED */ + +#if 0 /* UNUSED */ +bool disable_defaultip = 0; /* Don't use hostname for default IP adrs */ +#endif /* UNUSED */ + +#if 0 /* moved to ppp_settings */ +bool noremoteip = 0; /* Let him have no IP address */ +#endif /* moved to ppp_setting */ + +#if 0 /* UNUSED */ +/* Hook for a plugin to know when IP protocol has come up */ +void (*ip_up_hook) (void) = NULL; + +/* Hook for a plugin to know when IP protocol has come down */ +void (*ip_down_hook) (void) = NULL; + +/* Hook for a plugin to choose the remote IP address */ +void (*ip_choose_hook) (u32_t *) = NULL; +#endif /* UNUSED */ + +#if PPP_NOTIFY +/* Notifiers for when IPCP goes up and down */ +struct notifier *ip_up_notifier = NULL; +struct notifier *ip_down_notifier = NULL; +#endif /* PPP_NOTIFY */ + +/* local vars */ +#if 0 /* moved to ppp_pcb */ +static int default_route_set[NUM_PPP]; /* Have set up a default route */ +static int proxy_arp_set[NUM_PPP]; /* Have created proxy arp entry */ +static int ipcp_is_up; /* have called np_up() */ +static int ipcp_is_open; /* haven't called np_finished() */ +static bool ask_for_local; /* request our address from peer */ +#endif /* moved to ppp_pcb */ +#if 0 /* UNUSED */ +static char vj_value[8]; /* string form of vj option value */ +static char netmask_str[20]; /* string form of netmask value */ +#endif /* UNUSED */ + +/* + * Callbacks for fsm code. (CI = Configuration Information) + */ +static void ipcp_resetci(fsm *f); /* Reset our CI */ +static int ipcp_cilen(fsm *f); /* Return length of our CI */ +static void ipcp_addci(fsm *f, u_char *ucp, int *lenp); /* Add our CI */ +static int ipcp_ackci(fsm *f, u_char *p, int len); /* Peer ack'd our CI */ +static int ipcp_nakci(fsm *f, u_char *p, int len, int treat_as_reject);/* Peer nak'd our CI */ +static int ipcp_rejci(fsm *f, u_char *p, int len); /* Peer rej'd our CI */ +static int ipcp_reqci(fsm *f, u_char *inp, int *len, int reject_if_disagree); /* Rcv CI */ +static void ipcp_up(fsm *f); /* We're UP */ +static void ipcp_down(fsm *f); /* We're DOWN */ +static void ipcp_finished(fsm *f); /* Don't need lower layer */ + +static const fsm_callbacks ipcp_callbacks = { /* IPCP callback routines */ + ipcp_resetci, /* Reset our Configuration Information */ + ipcp_cilen, /* Length of our Configuration Information */ + ipcp_addci, /* Add our Configuration Information */ + ipcp_ackci, /* ACK our Configuration Information */ + ipcp_nakci, /* NAK our Configuration Information */ + ipcp_rejci, /* Reject our Configuration Information */ + ipcp_reqci, /* Request peer's Configuration Information */ + ipcp_up, /* Called when fsm reaches OPENED state */ + ipcp_down, /* Called when fsm leaves OPENED state */ + NULL, /* Called when we want the lower layer up */ + ipcp_finished, /* Called when we want the lower layer down */ + NULL, /* Called when Protocol-Reject received */ + NULL, /* Retransmission is necessary */ + NULL, /* Called to handle protocol-specific codes */ + "IPCP" /* String name of protocol */ +}; + +/* + * Command-line options. + */ +#if PPP_OPTIONS +static int setvjslots (char **); +static int setdnsaddr (char **); +static int setwinsaddr (char **); +static int setnetmask (char **); +int setipaddr (char *, char **, int); + +static void printipaddr (option_t *, void (*)(void *, char *,...),void *); + +static option_t ipcp_option_list[] = { + { "noip", o_bool, &ipcp_protent.enabled_flag, + "Disable IP and IPCP" }, + { "-ip", o_bool, &ipcp_protent.enabled_flag, + "Disable IP and IPCP", OPT_ALIAS }, + + { "novj", o_bool, &ipcp_wantoptions[0].neg_vj, + "Disable VJ compression", OPT_A2CLR, &ipcp_allowoptions[0].neg_vj }, + { "-vj", o_bool, &ipcp_wantoptions[0].neg_vj, + "Disable VJ compression", OPT_ALIAS | OPT_A2CLR, + &ipcp_allowoptions[0].neg_vj }, + + { "novjccomp", o_bool, &ipcp_wantoptions[0].cflag, + "Disable VJ connection-ID compression", OPT_A2CLR, + &ipcp_allowoptions[0].cflag }, + { "-vjccomp", o_bool, &ipcp_wantoptions[0].cflag, + "Disable VJ connection-ID compression", OPT_ALIAS | OPT_A2CLR, + &ipcp_allowoptions[0].cflag }, + + { "vj-max-slots", o_special, (void *)setvjslots, + "Set maximum VJ header slots", + OPT_PRIO | OPT_A2STRVAL | OPT_STATIC, vj_value }, + + { "ipcp-accept-local", o_bool, &ipcp_wantoptions[0].accept_local, + "Accept peer's address for us", 1 }, + { "ipcp-accept-remote", o_bool, &ipcp_wantoptions[0].accept_remote, + "Accept peer's address for it", 1 }, + + { "ipparam", o_string, &ipparam, + "Set ip script parameter", OPT_PRIO }, + + { "noipdefault", o_bool, &disable_defaultip, + "Don't use name for default IP adrs", 1 }, + + { "ms-dns", 1, (void *)setdnsaddr, + "DNS address for the peer's use" }, + { "ms-wins", 1, (void *)setwinsaddr, + "Nameserver for SMB over TCP/IP for peer" }, + + { "ipcp-restart", o_int, &ipcp_fsm[0].timeouttime, + "Set timeout for IPCP", OPT_PRIO }, + { "ipcp-max-terminate", o_int, &ipcp_fsm[0].maxtermtransmits, + "Set max #xmits for term-reqs", OPT_PRIO }, + { "ipcp-max-configure", o_int, &ipcp_fsm[0].maxconfreqtransmits, + "Set max #xmits for conf-reqs", OPT_PRIO }, + { "ipcp-max-failure", o_int, &ipcp_fsm[0].maxnakloops, + "Set max #conf-naks for IPCP", OPT_PRIO }, + + { "defaultroute", o_bool, &ipcp_wantoptions[0].default_route, + "Add default route", OPT_ENABLE|1, &ipcp_allowoptions[0].default_route }, + { "nodefaultroute", o_bool, &ipcp_allowoptions[0].default_route, + "disable defaultroute option", OPT_A2CLR, + &ipcp_wantoptions[0].default_route }, + { "-defaultroute", o_bool, &ipcp_allowoptions[0].default_route, + "disable defaultroute option", OPT_ALIAS | OPT_A2CLR, + &ipcp_wantoptions[0].default_route }, + + { "replacedefaultroute", o_bool, + &ipcp_wantoptions[0].replace_default_route, + "Replace default route", 1 + }, + { "noreplacedefaultroute", o_bool, + &ipcp_allowoptions[0].replace_default_route, + "Never replace default route", OPT_A2COPY, + &ipcp_wantoptions[0].replace_default_route }, + { "proxyarp", o_bool, &ipcp_wantoptions[0].proxy_arp, + "Add proxy ARP entry", OPT_ENABLE|1, &ipcp_allowoptions[0].proxy_arp }, + { "noproxyarp", o_bool, &ipcp_allowoptions[0].proxy_arp, + "disable proxyarp option", OPT_A2CLR, + &ipcp_wantoptions[0].proxy_arp }, + { "-proxyarp", o_bool, &ipcp_allowoptions[0].proxy_arp, + "disable proxyarp option", OPT_ALIAS | OPT_A2CLR, + &ipcp_wantoptions[0].proxy_arp }, + + { "usepeerdns", o_bool, &usepeerdns, + "Ask peer for DNS address(es)", 1 }, + + { "netmask", o_special, (void *)setnetmask, + "set netmask", OPT_PRIO | OPT_A2STRVAL | OPT_STATIC, netmask_str }, + + { "ipcp-no-addresses", o_bool, &ipcp_wantoptions[0].old_addrs, + "Disable old-style IP-Addresses usage", OPT_A2CLR, + &ipcp_allowoptions[0].old_addrs }, + { "ipcp-no-address", o_bool, &ipcp_wantoptions[0].neg_addr, + "Disable IP-Address usage", OPT_A2CLR, + &ipcp_allowoptions[0].neg_addr }, + + { "noremoteip", o_bool, &noremoteip, + "Allow peer to have no IP address", 1 }, + + { "nosendip", o_bool, &ipcp_wantoptions[0].neg_addr, + "Don't send our IP address to peer", OPT_A2CLR, + &ipcp_wantoptions[0].old_addrs}, + + { "IP addresses", o_wild, (void *) &setipaddr, + "set local and remote IP addresses", + OPT_NOARG | OPT_A2PRINTER, (void *) &printipaddr }, + + { NULL } +}; +#endif /* PPP_OPTIONS */ + +/* + * Protocol entry points from main code. + */ +static void ipcp_init(ppp_pcb *pcb); +static void ipcp_open(ppp_pcb *pcb); +static void ipcp_close(ppp_pcb *pcb, const char *reason); +static void ipcp_lowerup(ppp_pcb *pcb); +static void ipcp_lowerdown(ppp_pcb *pcb); +static void ipcp_input(ppp_pcb *pcb, u_char *p, int len); +static void ipcp_protrej(ppp_pcb *pcb); +#if PRINTPKT_SUPPORT +static int ipcp_printpkt(const u_char *p, int plen, + void (*printer) (void *, const char *, ...), void *arg); +#endif /* PRINTPKT_SUPPORT */ +#if PPP_OPTIONS +static void ip_check_options (void); +#endif /* PPP_OPTIONS */ +#if DEMAND_SUPPORT +static int ip_demand_conf (int); +static int ip_active_pkt (u_char *, int); +#endif /* DEMAND_SUPPORT */ +#if 0 /* UNUSED */ +static void create_resolv (u32_t, u32_t); +#endif /* UNUSED */ + +const struct protent ipcp_protent = { + PPP_IPCP, + ipcp_init, + ipcp_input, + ipcp_protrej, + ipcp_lowerup, + ipcp_lowerdown, + ipcp_open, + ipcp_close, +#if PRINTPKT_SUPPORT + ipcp_printpkt, +#endif /* PRINTPKT_SUPPORT */ +#if PPP_DATAINPUT + NULL, +#endif /* PPP_DATAINPUT */ +#if PRINTPKT_SUPPORT + "IPCP", + "IP", +#endif /* PRINTPKT_SUPPORT */ +#if PPP_OPTIONS + ipcp_option_list, + ip_check_options, +#endif /* PPP_OPTIONS */ +#if DEMAND_SUPPORT + ip_demand_conf, + ip_active_pkt +#endif /* DEMAND_SUPPORT */ +}; + +static void ipcp_clear_addrs(ppp_pcb *pcb, u32_t ouraddr, u32_t hisaddr, u8_t replacedefaultroute); + +/* + * Lengths of configuration options. + */ +#define CILEN_VOID 2 +#define CILEN_COMPRESS 4 /* min length for compression protocol opt. */ +#define CILEN_VJ 6 /* length for RFC1332 Van-Jacobson opt. */ +#define CILEN_ADDR 6 /* new-style single address option */ +#define CILEN_ADDRS 10 /* old-style dual address option */ + + +#define CODENAME(x) ((x) == CONFACK ? "ACK" : \ + (x) == CONFNAK ? "NAK" : "REJ") + +#if 0 /* UNUSED, already defined by lwIP */ +/* + * Make a string representation of a network IP address. + */ +char * +ip_ntoa(ipaddr) +u32_t ipaddr; +{ + static char b[64]; + + slprintf(b, sizeof(b), "%I", ipaddr); + return b; +} +#endif /* UNUSED, already defined by lwIP */ + +/* + * Option parsing. + */ +#if PPP_OPTIONS +/* + * setvjslots - set maximum number of connection slots for VJ compression + */ +static int +setvjslots(argv) + char **argv; +{ + int value; + + if (!int_option(*argv, &value)) + return 0; + + if (value < 2 || value > 16) { + option_error("vj-max-slots value must be between 2 and 16"); + return 0; + } + ipcp_wantoptions [0].maxslotindex = + ipcp_allowoptions[0].maxslotindex = value - 1; + slprintf(vj_value, sizeof(vj_value), "%d", value); + return 1; +} + +/* + * setdnsaddr - set the dns address(es) + */ +static int +setdnsaddr(argv) + char **argv; +{ + u32_t dns; + struct hostent *hp; + + dns = inet_addr(*argv); + if (dns == (u32_t) -1) { + if ((hp = gethostbyname(*argv)) == NULL) { + option_error("invalid address parameter '%s' for ms-dns option", + *argv); + return 0; + } + dns = *(u32_t *)hp->h_addr; + } + + /* We take the last 2 values given, the 2nd-last as the primary + and the last as the secondary. If only one is given it + becomes both primary and secondary. */ + if (ipcp_allowoptions[0].dnsaddr[1] == 0) + ipcp_allowoptions[0].dnsaddr[0] = dns; + else + ipcp_allowoptions[0].dnsaddr[0] = ipcp_allowoptions[0].dnsaddr[1]; + + /* always set the secondary address value. */ + ipcp_allowoptions[0].dnsaddr[1] = dns; + + return (1); +} + +/* + * setwinsaddr - set the wins address(es) + * This is primrarly used with the Samba package under UNIX or for pointing + * the caller to the existing WINS server on a Windows NT platform. + */ +static int +setwinsaddr(argv) + char **argv; +{ + u32_t wins; + struct hostent *hp; + + wins = inet_addr(*argv); + if (wins == (u32_t) -1) { + if ((hp = gethostbyname(*argv)) == NULL) { + option_error("invalid address parameter '%s' for ms-wins option", + *argv); + return 0; + } + wins = *(u32_t *)hp->h_addr; + } + + /* We take the last 2 values given, the 2nd-last as the primary + and the last as the secondary. If only one is given it + becomes both primary and secondary. */ + if (ipcp_allowoptions[0].winsaddr[1] == 0) + ipcp_allowoptions[0].winsaddr[0] = wins; + else + ipcp_allowoptions[0].winsaddr[0] = ipcp_allowoptions[0].winsaddr[1]; + + /* always set the secondary address value. */ + ipcp_allowoptions[0].winsaddr[1] = wins; + + return (1); +} + +/* + * setipaddr - Set the IP address + * If doit is 0, the call is to check whether this option is + * potentially an IP address specification. + * Not static so that plugins can call it to set the addresses + */ +int +setipaddr(arg, argv, doit) + char *arg; + char **argv; + int doit; +{ + struct hostent *hp; + char *colon; + u32_t local, remote; + ipcp_options *wo = &ipcp_wantoptions[0]; + static int prio_local = 0, prio_remote = 0; + + /* + * IP address pair separated by ":". + */ + if ((colon = strchr(arg, ':')) == NULL) + return 0; + if (!doit) + return 1; + + /* + * If colon first character, then no local addr. + */ + if (colon != arg && option_priority >= prio_local) { + *colon = '\0'; + if ((local = inet_addr(arg)) == (u32_t) -1) { + if ((hp = gethostbyname(arg)) == NULL) { + option_error("unknown host: %s", arg); + return 0; + } + local = *(u32_t *)hp->h_addr; + } + if (bad_ip_adrs(local)) { + option_error("bad local IP address %s", ip_ntoa(local)); + return 0; + } + if (local != 0) + wo->ouraddr = local; + *colon = ':'; + prio_local = option_priority; + } + + /* + * If colon last character, then no remote addr. + */ + if (*++colon != '\0' && option_priority >= prio_remote) { + if ((remote = inet_addr(colon)) == (u32_t) -1) { + if ((hp = gethostbyname(colon)) == NULL) { + option_error("unknown host: %s", colon); + return 0; + } + remote = *(u32_t *)hp->h_addr; + if (remote_name[0] == 0) + strlcpy(remote_name, colon, sizeof(remote_name)); + } + if (bad_ip_adrs(remote)) { + option_error("bad remote IP address %s", ip_ntoa(remote)); + return 0; + } + if (remote != 0) + wo->hisaddr = remote; + prio_remote = option_priority; + } + + return 1; +} + +static void +printipaddr(opt, printer, arg) + option_t *opt; + void (*printer) (void *, char *, ...); + void *arg; +{ + ipcp_options *wo = &ipcp_wantoptions[0]; + + if (wo->ouraddr != 0) + printer(arg, "%I", wo->ouraddr); + printer(arg, ":"); + if (wo->hisaddr != 0) + printer(arg, "%I", wo->hisaddr); +} + +/* + * setnetmask - set the netmask to be used on the interface. + */ +static int +setnetmask(argv) + char **argv; +{ + u32_t mask; + int n; + char *p; + + /* + * Unfortunately, if we use inet_addr, we can't tell whether + * a result of all 1s is an error or a valid 255.255.255.255. + */ + p = *argv; + n = parse_dotted_ip(p, &mask); + + mask = lwip_htonl(mask); + + if (n == 0 || p[n] != 0 || (netmask & ~mask) != 0) { + option_error("invalid netmask value '%s'", *argv); + return 0; + } + + netmask = mask; + slprintf(netmask_str, sizeof(netmask_str), "%I", mask); + + return (1); +} + +int +parse_dotted_ip(p, vp) + char *p; + u32_t *vp; +{ + int n; + u32_t v, b; + char *endp, *p0 = p; + + v = 0; + for (n = 3;; --n) { + b = strtoul(p, &endp, 0); + if (endp == p) + return 0; + if (b > 255) { + if (n < 3) + return 0; + /* accept e.g. 0xffffff00 */ + *vp = b; + return endp - p0; + } + v |= b << (n * 8); + p = endp; + if (n == 0) + break; + if (*p != '.') + return 0; + ++p; + } + *vp = v; + return p - p0; +} +#endif /* PPP_OPTIONS */ + +/* + * ipcp_init - Initialize IPCP. + */ +static void ipcp_init(ppp_pcb *pcb) { + fsm *f = &pcb->ipcp_fsm; + + ipcp_options *wo = &pcb->ipcp_wantoptions; + ipcp_options *ao = &pcb->ipcp_allowoptions; + + f->pcb = pcb; + f->protocol = PPP_IPCP; + f->callbacks = &ipcp_callbacks; + fsm_init(f); + + /* + * Some 3G modems use repeated IPCP NAKs as a way of stalling + * until they can contact a server on the network, so we increase + * the default number of NAKs we accept before we start treating + * them as rejects. + */ + f->maxnakloops = 100; + +#if 0 /* Not necessary, everything is cleared in ppp_new() */ + memset(wo, 0, sizeof(*wo)); + memset(ao, 0, sizeof(*ao)); +#endif /* 0 */ + + wo->neg_addr = wo->old_addrs = 1; +#if VJ_SUPPORT + wo->neg_vj = 1; + wo->vj_protocol = IPCP_VJ_COMP; + wo->maxslotindex = MAX_STATES - 1; /* really max index */ + wo->cflag = 1; +#endif /* VJ_SUPPORT */ + +#if 0 /* UNUSED */ + /* wanting default route by default */ + wo->default_route = 1; +#endif /* UNUSED */ + + ao->neg_addr = ao->old_addrs = 1; +#if VJ_SUPPORT + /* max slots and slot-id compression are currently hardwired in */ + /* ppp_if.c to 16 and 1, this needs to be changed (among other */ + /* things) gmc */ + + ao->neg_vj = 1; + ao->maxslotindex = MAX_STATES - 1; + ao->cflag = 1; +#endif /* #if VJ_SUPPORT */ + +#if 0 /* UNUSED */ + /* + * XXX These control whether the user may use the proxyarp + * and defaultroute options. + */ + ao->proxy_arp = 1; + ao->default_route = 1; +#endif /* UNUSED */ +} + + +/* + * ipcp_open - IPCP is allowed to come up. + */ +static void ipcp_open(ppp_pcb *pcb) { + fsm *f = &pcb->ipcp_fsm; + fsm_open(f); + pcb->ipcp_is_open = 1; +} + + +/* + * ipcp_close - Take IPCP down. + */ +static void ipcp_close(ppp_pcb *pcb, const char *reason) { + fsm *f = &pcb->ipcp_fsm; + fsm_close(f, reason); +} + + +/* + * ipcp_lowerup - The lower layer is up. + */ +static void ipcp_lowerup(ppp_pcb *pcb) { + fsm *f = &pcb->ipcp_fsm; + fsm_lowerup(f); +} + + +/* + * ipcp_lowerdown - The lower layer is down. + */ +static void ipcp_lowerdown(ppp_pcb *pcb) { + fsm *f = &pcb->ipcp_fsm; + fsm_lowerdown(f); +} + + +/* + * ipcp_input - Input IPCP packet. + */ +static void ipcp_input(ppp_pcb *pcb, u_char *p, int len) { + fsm *f = &pcb->ipcp_fsm; + fsm_input(f, p, len); +} + + +/* + * ipcp_protrej - A Protocol-Reject was received for IPCP. + * + * Pretend the lower layer went down, so we shut up. + */ +static void ipcp_protrej(ppp_pcb *pcb) { + fsm *f = &pcb->ipcp_fsm; + fsm_lowerdown(f); +} + + +/* + * ipcp_resetci - Reset our CI. + * Called by fsm_sconfreq, Send Configure Request. + */ +static void ipcp_resetci(fsm *f) { + ppp_pcb *pcb = f->pcb; + ipcp_options *wo = &pcb->ipcp_wantoptions; + ipcp_options *go = &pcb->ipcp_gotoptions; + ipcp_options *ao = &pcb->ipcp_allowoptions; + + wo->req_addr = (wo->neg_addr || wo->old_addrs) && + (ao->neg_addr || ao->old_addrs); + if (wo->ouraddr == 0) + wo->accept_local = 1; + if (wo->hisaddr == 0) + wo->accept_remote = 1; +#if LWIP_DNS + wo->req_dns1 = wo->req_dns2 = pcb->settings.usepeerdns; /* Request DNS addresses from the peer */ +#endif /* LWIP_DNS */ + *go = *wo; + if (!pcb->ask_for_local) + go->ouraddr = 0; +#if 0 /* UNUSED */ + if (ip_choose_hook) { + ip_choose_hook(&wo->hisaddr); + if (wo->hisaddr) { + wo->accept_remote = 0; + } + } +#endif /* UNUSED */ + BZERO(&pcb->ipcp_hisoptions, sizeof(ipcp_options)); +} + + +/* + * ipcp_cilen - Return length of our CI. + * Called by fsm_sconfreq, Send Configure Request. + */ +static int ipcp_cilen(fsm *f) { + ppp_pcb *pcb = f->pcb; + ipcp_options *go = &pcb->ipcp_gotoptions; +#if VJ_SUPPORT + ipcp_options *wo = &pcb->ipcp_wantoptions; +#endif /* VJ_SUPPORT */ + ipcp_options *ho = &pcb->ipcp_hisoptions; + +#define LENCIADDRS(neg) (neg ? CILEN_ADDRS : 0) +#if VJ_SUPPORT +#define LENCIVJ(neg, old) (neg ? (old? CILEN_COMPRESS : CILEN_VJ) : 0) +#endif /* VJ_SUPPORT */ +#define LENCIADDR(neg) (neg ? CILEN_ADDR : 0) +#if LWIP_DNS +#define LENCIDNS(neg) LENCIADDR(neg) +#endif /* LWIP_DNS */ +#if 0 /* UNUSED - WINS */ +#define LENCIWINS(neg) LENCIADDR(neg) +#endif /* UNUSED - WINS */ + + /* + * First see if we want to change our options to the old + * forms because we have received old forms from the peer. + */ + if (go->neg_addr && go->old_addrs && !ho->neg_addr && ho->old_addrs) + go->neg_addr = 0; + +#if VJ_SUPPORT + if (wo->neg_vj && !go->neg_vj && !go->old_vj) { + /* try an older style of VJ negotiation */ + /* use the old style only if the peer did */ + if (ho->neg_vj && ho->old_vj) { + go->neg_vj = 1; + go->old_vj = 1; + go->vj_protocol = ho->vj_protocol; + } + } +#endif /* VJ_SUPPORT */ + + return (LENCIADDRS(!go->neg_addr && go->old_addrs) + +#if VJ_SUPPORT + LENCIVJ(go->neg_vj, go->old_vj) + +#endif /* VJ_SUPPORT */ + LENCIADDR(go->neg_addr) + +#if LWIP_DNS + LENCIDNS(go->req_dns1) + + LENCIDNS(go->req_dns2) + +#endif /* LWIP_DNS */ +#if 0 /* UNUSED - WINS */ + LENCIWINS(go->winsaddr[0]) + + LENCIWINS(go->winsaddr[1]) + +#endif /* UNUSED - WINS */ + 0); +} + + +/* + * ipcp_addci - Add our desired CIs to a packet. + * Called by fsm_sconfreq, Send Configure Request. + */ +static void ipcp_addci(fsm *f, u_char *ucp, int *lenp) { + ppp_pcb *pcb = f->pcb; + ipcp_options *go = &pcb->ipcp_gotoptions; + int len = *lenp; + +#define ADDCIADDRS(opt, neg, val1, val2) \ + if (neg) { \ + if (len >= CILEN_ADDRS) { \ + u32_t l; \ + PUTCHAR(opt, ucp); \ + PUTCHAR(CILEN_ADDRS, ucp); \ + l = lwip_ntohl(val1); \ + PUTLONG(l, ucp); \ + l = lwip_ntohl(val2); \ + PUTLONG(l, ucp); \ + len -= CILEN_ADDRS; \ + } else \ + go->old_addrs = 0; \ + } + +#if VJ_SUPPORT +#define ADDCIVJ(opt, neg, val, old, maxslotindex, cflag) \ + if (neg) { \ + int vjlen = old? CILEN_COMPRESS : CILEN_VJ; \ + if (len >= vjlen) { \ + PUTCHAR(opt, ucp); \ + PUTCHAR(vjlen, ucp); \ + PUTSHORT(val, ucp); \ + if (!old) { \ + PUTCHAR(maxslotindex, ucp); \ + PUTCHAR(cflag, ucp); \ + } \ + len -= vjlen; \ + } else \ + neg = 0; \ + } +#endif /* VJ_SUPPORT */ + +#define ADDCIADDR(opt, neg, val) \ + if (neg) { \ + if (len >= CILEN_ADDR) { \ + u32_t l; \ + PUTCHAR(opt, ucp); \ + PUTCHAR(CILEN_ADDR, ucp); \ + l = lwip_ntohl(val); \ + PUTLONG(l, ucp); \ + len -= CILEN_ADDR; \ + } else \ + neg = 0; \ + } + +#if LWIP_DNS +#define ADDCIDNS(opt, neg, addr) \ + if (neg) { \ + if (len >= CILEN_ADDR) { \ + u32_t l; \ + PUTCHAR(opt, ucp); \ + PUTCHAR(CILEN_ADDR, ucp); \ + l = lwip_ntohl(addr); \ + PUTLONG(l, ucp); \ + len -= CILEN_ADDR; \ + } else \ + neg = 0; \ + } +#endif /* LWIP_DNS */ + +#if 0 /* UNUSED - WINS */ +#define ADDCIWINS(opt, addr) \ + if (addr) { \ + if (len >= CILEN_ADDR) { \ + u32_t l; \ + PUTCHAR(opt, ucp); \ + PUTCHAR(CILEN_ADDR, ucp); \ + l = lwip_ntohl(addr); \ + PUTLONG(l, ucp); \ + len -= CILEN_ADDR; \ + } else \ + addr = 0; \ + } +#endif /* UNUSED - WINS */ + + ADDCIADDRS(CI_ADDRS, !go->neg_addr && go->old_addrs, go->ouraddr, + go->hisaddr); + +#if VJ_SUPPORT + ADDCIVJ(CI_COMPRESSTYPE, go->neg_vj, go->vj_protocol, go->old_vj, + go->maxslotindex, go->cflag); +#endif /* VJ_SUPPORT */ + + ADDCIADDR(CI_ADDR, go->neg_addr, go->ouraddr); + +#if LWIP_DNS + ADDCIDNS(CI_MS_DNS1, go->req_dns1, go->dnsaddr[0]); + + ADDCIDNS(CI_MS_DNS2, go->req_dns2, go->dnsaddr[1]); +#endif /* LWIP_DNS */ + +#if 0 /* UNUSED - WINS */ + ADDCIWINS(CI_MS_WINS1, go->winsaddr[0]); + + ADDCIWINS(CI_MS_WINS2, go->winsaddr[1]); +#endif /* UNUSED - WINS */ + + *lenp -= len; +} + + +/* + * ipcp_ackci - Ack our CIs. + * Called by fsm_rconfack, Receive Configure ACK. + * + * Returns: + * 0 - Ack was bad. + * 1 - Ack was good. + */ +static int ipcp_ackci(fsm *f, u_char *p, int len) { + ppp_pcb *pcb = f->pcb; + ipcp_options *go = &pcb->ipcp_gotoptions; + u_short cilen, citype; + u32_t cilong; +#if VJ_SUPPORT + u_short cishort; + u_char cimaxslotindex, cicflag; +#endif /* VJ_SUPPORT */ + + /* + * CIs must be in exactly the same order that we sent... + * Check packet length and CI length at each step. + * If we find any deviations, then this packet is bad. + */ + +#define ACKCIADDRS(opt, neg, val1, val2) \ + if (neg) { \ + u32_t l; \ + if ((len -= CILEN_ADDRS) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != CILEN_ADDRS || \ + citype != opt) \ + goto bad; \ + GETLONG(l, p); \ + cilong = lwip_htonl(l); \ + if (val1 != cilong) \ + goto bad; \ + GETLONG(l, p); \ + cilong = lwip_htonl(l); \ + if (val2 != cilong) \ + goto bad; \ + } + +#if VJ_SUPPORT +#define ACKCIVJ(opt, neg, val, old, maxslotindex, cflag) \ + if (neg) { \ + int vjlen = old? CILEN_COMPRESS : CILEN_VJ; \ + if ((len -= vjlen) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != vjlen || \ + citype != opt) \ + goto bad; \ + GETSHORT(cishort, p); \ + if (cishort != val) \ + goto bad; \ + if (!old) { \ + GETCHAR(cimaxslotindex, p); \ + if (cimaxslotindex != maxslotindex) \ + goto bad; \ + GETCHAR(cicflag, p); \ + if (cicflag != cflag) \ + goto bad; \ + } \ + } +#endif /* VJ_SUPPORT */ + +#define ACKCIADDR(opt, neg, val) \ + if (neg) { \ + u32_t l; \ + if ((len -= CILEN_ADDR) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != CILEN_ADDR || \ + citype != opt) \ + goto bad; \ + GETLONG(l, p); \ + cilong = lwip_htonl(l); \ + if (val != cilong) \ + goto bad; \ + } + +#if LWIP_DNS +#define ACKCIDNS(opt, neg, addr) \ + if (neg) { \ + u32_t l; \ + if ((len -= CILEN_ADDR) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != CILEN_ADDR || citype != opt) \ + goto bad; \ + GETLONG(l, p); \ + cilong = lwip_htonl(l); \ + if (addr != cilong) \ + goto bad; \ + } +#endif /* LWIP_DNS */ + +#if 0 /* UNUSED - WINS */ +#define ACKCIWINS(opt, addr) \ + if (addr) { \ + u32_t l; \ + if ((len -= CILEN_ADDR) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != CILEN_ADDR || citype != opt) \ + goto bad; \ + GETLONG(l, p); \ + cilong = lwip_htonl(l); \ + if (addr != cilong) \ + goto bad; \ + } +#endif /* UNUSED - WINS */ + + ACKCIADDRS(CI_ADDRS, !go->neg_addr && go->old_addrs, go->ouraddr, + go->hisaddr); + +#if VJ_SUPPORT + ACKCIVJ(CI_COMPRESSTYPE, go->neg_vj, go->vj_protocol, go->old_vj, + go->maxslotindex, go->cflag); +#endif /* VJ_SUPPORT */ + + ACKCIADDR(CI_ADDR, go->neg_addr, go->ouraddr); + +#if LWIP_DNS + ACKCIDNS(CI_MS_DNS1, go->req_dns1, go->dnsaddr[0]); + + ACKCIDNS(CI_MS_DNS2, go->req_dns2, go->dnsaddr[1]); +#endif /* LWIP_DNS */ + +#if 0 /* UNUSED - WINS */ + ACKCIWINS(CI_MS_WINS1, go->winsaddr[0]); + + ACKCIWINS(CI_MS_WINS2, go->winsaddr[1]); +#endif /* UNUSED - WINS */ + + /* + * If there are any remaining CIs, then this packet is bad. + */ + if (len != 0) + goto bad; + return (1); + +bad: + IPCPDEBUG(("ipcp_ackci: received bad Ack!")); + return (0); +} + +/* + * ipcp_nakci - Peer has sent a NAK for some of our CIs. + * This should not modify any state if the Nak is bad + * or if IPCP is in the OPENED state. + * Calback from fsm_rconfnakrej - Receive Configure-Nak or Configure-Reject. + * + * Returns: + * 0 - Nak was bad. + * 1 - Nak was good. + */ +static int ipcp_nakci(fsm *f, u_char *p, int len, int treat_as_reject) { + ppp_pcb *pcb = f->pcb; + ipcp_options *go = &pcb->ipcp_gotoptions; + u_char citype, cilen, *next; +#if VJ_SUPPORT + u_char cimaxslotindex, cicflag; + u_short cishort; +#endif /* VJ_SUPPORT */ + u32_t ciaddr1, ciaddr2, l; +#if LWIP_DNS + u32_t cidnsaddr; +#endif /* LWIP_DNS */ + ipcp_options no; /* options we've seen Naks for */ + ipcp_options try_; /* options to request next time */ + + BZERO(&no, sizeof(no)); + try_ = *go; + + /* + * Any Nak'd CIs must be in exactly the same order that we sent. + * Check packet length and CI length at each step. + * If we find any deviations, then this packet is bad. + */ +#define NAKCIADDRS(opt, neg, code) \ + if ((neg) && \ + (cilen = p[1]) == CILEN_ADDRS && \ + len >= cilen && \ + p[0] == opt) { \ + len -= cilen; \ + INCPTR(2, p); \ + GETLONG(l, p); \ + ciaddr1 = lwip_htonl(l); \ + GETLONG(l, p); \ + ciaddr2 = lwip_htonl(l); \ + no.old_addrs = 1; \ + code \ + } + +#if VJ_SUPPORT +#define NAKCIVJ(opt, neg, code) \ + if (go->neg && \ + ((cilen = p[1]) == CILEN_COMPRESS || cilen == CILEN_VJ) && \ + len >= cilen && \ + p[0] == opt) { \ + len -= cilen; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + no.neg = 1; \ + code \ + } +#endif /* VJ_SUPPORT */ + +#define NAKCIADDR(opt, neg, code) \ + if (go->neg && \ + (cilen = p[1]) == CILEN_ADDR && \ + len >= cilen && \ + p[0] == opt) { \ + len -= cilen; \ + INCPTR(2, p); \ + GETLONG(l, p); \ + ciaddr1 = lwip_htonl(l); \ + no.neg = 1; \ + code \ + } + +#if LWIP_DNS +#define NAKCIDNS(opt, neg, code) \ + if (go->neg && \ + ((cilen = p[1]) == CILEN_ADDR) && \ + len >= cilen && \ + p[0] == opt) { \ + len -= cilen; \ + INCPTR(2, p); \ + GETLONG(l, p); \ + cidnsaddr = lwip_htonl(l); \ + no.neg = 1; \ + code \ + } +#endif /* LWIP_DNS */ + + /* + * Accept the peer's idea of {our,his} address, if different + * from our idea, only if the accept_{local,remote} flag is set. + */ + NAKCIADDRS(CI_ADDRS, !go->neg_addr && go->old_addrs, + if (treat_as_reject) { + try_.old_addrs = 0; + } else { + if (go->accept_local && ciaddr1) { + /* take his idea of our address */ + try_.ouraddr = ciaddr1; + } + if (go->accept_remote && ciaddr2) { + /* take his idea of his address */ + try_.hisaddr = ciaddr2; + } + } + ); + +#if VJ_SUPPORT + /* + * Accept the peer's value of maxslotindex provided that it + * is less than what we asked for. Turn off slot-ID compression + * if the peer wants. Send old-style compress-type option if + * the peer wants. + */ + NAKCIVJ(CI_COMPRESSTYPE, neg_vj, + if (treat_as_reject) { + try_.neg_vj = 0; + } else if (cilen == CILEN_VJ) { + GETCHAR(cimaxslotindex, p); + GETCHAR(cicflag, p); + if (cishort == IPCP_VJ_COMP) { + try_.old_vj = 0; + if (cimaxslotindex < go->maxslotindex) + try_.maxslotindex = cimaxslotindex; + if (!cicflag) + try_.cflag = 0; + } else { + try_.neg_vj = 0; + } + } else { + if (cishort == IPCP_VJ_COMP || cishort == IPCP_VJ_COMP_OLD) { + try_.old_vj = 1; + try_.vj_protocol = cishort; + } else { + try_.neg_vj = 0; + } + } + ); +#endif /* VJ_SUPPORT */ + + NAKCIADDR(CI_ADDR, neg_addr, + if (treat_as_reject) { + try_.neg_addr = 0; + try_.old_addrs = 0; + } else if (go->accept_local && ciaddr1) { + /* take his idea of our address */ + try_.ouraddr = ciaddr1; + } + ); + +#if LWIP_DNS + NAKCIDNS(CI_MS_DNS1, req_dns1, + if (treat_as_reject) { + try_.req_dns1 = 0; + } else { + try_.dnsaddr[0] = cidnsaddr; + } + ); + + NAKCIDNS(CI_MS_DNS2, req_dns2, + if (treat_as_reject) { + try_.req_dns2 = 0; + } else { + try_.dnsaddr[1] = cidnsaddr; + } + ); +#endif /* #if LWIP_DNS */ + + /* + * There may be remaining CIs, if the peer is requesting negotiation + * on an option that we didn't include in our request packet. + * If they want to negotiate about IP addresses, we comply. + * If they want us to ask for compression, we refuse. + * If they want us to ask for ms-dns, we do that, since some + * peers get huffy if we don't. + */ + while (len >= CILEN_VOID) { + GETCHAR(citype, p); + GETCHAR(cilen, p); + if ( cilen < CILEN_VOID || (len -= cilen) < 0 ) + goto bad; + next = p + cilen - 2; + + switch (citype) { +#if VJ_SUPPORT + case CI_COMPRESSTYPE: + if (go->neg_vj || no.neg_vj || + (cilen != CILEN_VJ && cilen != CILEN_COMPRESS)) + goto bad; + no.neg_vj = 1; + break; +#endif /* VJ_SUPPORT */ + case CI_ADDRS: + if ((!go->neg_addr && go->old_addrs) || no.old_addrs + || cilen != CILEN_ADDRS) + goto bad; + try_.neg_addr = 0; + GETLONG(l, p); + ciaddr1 = lwip_htonl(l); + if (ciaddr1 && go->accept_local) + try_.ouraddr = ciaddr1; + GETLONG(l, p); + ciaddr2 = lwip_htonl(l); + if (ciaddr2 && go->accept_remote) + try_.hisaddr = ciaddr2; + no.old_addrs = 1; + break; + case CI_ADDR: + if (go->neg_addr || no.neg_addr || cilen != CILEN_ADDR) + goto bad; + try_.old_addrs = 0; + GETLONG(l, p); + ciaddr1 = lwip_htonl(l); + if (ciaddr1 && go->accept_local) + try_.ouraddr = ciaddr1; + if (try_.ouraddr != 0) + try_.neg_addr = 1; + no.neg_addr = 1; + break; +#if LWIP_DNS + case CI_MS_DNS1: + if (go->req_dns1 || no.req_dns1 || cilen != CILEN_ADDR) + goto bad; + GETLONG(l, p); + try_.dnsaddr[0] = lwip_htonl(l); + try_.req_dns1 = 1; + no.req_dns1 = 1; + break; + case CI_MS_DNS2: + if (go->req_dns2 || no.req_dns2 || cilen != CILEN_ADDR) + goto bad; + GETLONG(l, p); + try_.dnsaddr[1] = lwip_htonl(l); + try_.req_dns2 = 1; + no.req_dns2 = 1; + break; +#endif /* LWIP_DNS */ +#if 0 /* UNUSED - WINS */ + case CI_MS_WINS1: + case CI_MS_WINS2: + if (cilen != CILEN_ADDR) + goto bad; + GETLONG(l, p); + ciaddr1 = lwip_htonl(l); + if (ciaddr1) + try_.winsaddr[citype == CI_MS_WINS2] = ciaddr1; + break; +#endif /* UNUSED - WINS */ + default: + break; + } + p = next; + } + + /* + * OK, the Nak is good. Now we can update state. + * If there are any remaining options, we ignore them. + */ + if (f->state != PPP_FSM_OPENED) + *go = try_; + + return 1; + +bad: + IPCPDEBUG(("ipcp_nakci: received bad Nak!")); + return 0; +} + + +/* + * ipcp_rejci - Reject some of our CIs. + * Callback from fsm_rconfnakrej. + */ +static int ipcp_rejci(fsm *f, u_char *p, int len) { + ppp_pcb *pcb = f->pcb; + ipcp_options *go = &pcb->ipcp_gotoptions; + u_char cilen; +#if VJ_SUPPORT + u_char cimaxslotindex, ciflag; + u_short cishort; +#endif /* VJ_SUPPORT */ + u32_t cilong; + ipcp_options try_; /* options to request next time */ + + try_ = *go; + /* + * Any Rejected CIs must be in exactly the same order that we sent. + * Check packet length and CI length at each step. + * If we find any deviations, then this packet is bad. + */ +#define REJCIADDRS(opt, neg, val1, val2) \ + if ((neg) && \ + (cilen = p[1]) == CILEN_ADDRS && \ + len >= cilen && \ + p[0] == opt) { \ + u32_t l; \ + len -= cilen; \ + INCPTR(2, p); \ + GETLONG(l, p); \ + cilong = lwip_htonl(l); \ + /* Check rejected value. */ \ + if (cilong != val1) \ + goto bad; \ + GETLONG(l, p); \ + cilong = lwip_htonl(l); \ + /* Check rejected value. */ \ + if (cilong != val2) \ + goto bad; \ + try_.old_addrs = 0; \ + } + +#if VJ_SUPPORT +#define REJCIVJ(opt, neg, val, old, maxslot, cflag) \ + if (go->neg && \ + p[1] == (old? CILEN_COMPRESS : CILEN_VJ) && \ + len >= p[1] && \ + p[0] == opt) { \ + len -= p[1]; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + /* Check rejected value. */ \ + if (cishort != val) \ + goto bad; \ + if (!old) { \ + GETCHAR(cimaxslotindex, p); \ + if (cimaxslotindex != maxslot) \ + goto bad; \ + GETCHAR(ciflag, p); \ + if (ciflag != cflag) \ + goto bad; \ + } \ + try_.neg = 0; \ + } +#endif /* VJ_SUPPORT */ + +#define REJCIADDR(opt, neg, val) \ + if (go->neg && \ + (cilen = p[1]) == CILEN_ADDR && \ + len >= cilen && \ + p[0] == opt) { \ + u32_t l; \ + len -= cilen; \ + INCPTR(2, p); \ + GETLONG(l, p); \ + cilong = lwip_htonl(l); \ + /* Check rejected value. */ \ + if (cilong != val) \ + goto bad; \ + try_.neg = 0; \ + } + +#if LWIP_DNS +#define REJCIDNS(opt, neg, dnsaddr) \ + if (go->neg && \ + ((cilen = p[1]) == CILEN_ADDR) && \ + len >= cilen && \ + p[0] == opt) { \ + u32_t l; \ + len -= cilen; \ + INCPTR(2, p); \ + GETLONG(l, p); \ + cilong = lwip_htonl(l); \ + /* Check rejected value. */ \ + if (cilong != dnsaddr) \ + goto bad; \ + try_.neg = 0; \ + } +#endif /* LWIP_DNS */ + +#if 0 /* UNUSED - WINS */ +#define REJCIWINS(opt, addr) \ + if (addr && \ + ((cilen = p[1]) == CILEN_ADDR) && \ + len >= cilen && \ + p[0] == opt) { \ + u32_t l; \ + len -= cilen; \ + INCPTR(2, p); \ + GETLONG(l, p); \ + cilong = lwip_htonl(l); \ + /* Check rejected value. */ \ + if (cilong != addr) \ + goto bad; \ + try_.winsaddr[opt == CI_MS_WINS2] = 0; \ + } +#endif /* UNUSED - WINS */ + + REJCIADDRS(CI_ADDRS, !go->neg_addr && go->old_addrs, + go->ouraddr, go->hisaddr); + +#if VJ_SUPPORT + REJCIVJ(CI_COMPRESSTYPE, neg_vj, go->vj_protocol, go->old_vj, + go->maxslotindex, go->cflag); +#endif /* VJ_SUPPORT */ + + REJCIADDR(CI_ADDR, neg_addr, go->ouraddr); + +#if LWIP_DNS + REJCIDNS(CI_MS_DNS1, req_dns1, go->dnsaddr[0]); + + REJCIDNS(CI_MS_DNS2, req_dns2, go->dnsaddr[1]); +#endif /* LWIP_DNS */ + +#if 0 /* UNUSED - WINS */ + REJCIWINS(CI_MS_WINS1, go->winsaddr[0]); + + REJCIWINS(CI_MS_WINS2, go->winsaddr[1]); +#endif /* UNUSED - WINS */ + + /* + * If there are any remaining CIs, then this packet is bad. + */ + if (len != 0) + goto bad; + /* + * Now we can update state. + */ + if (f->state != PPP_FSM_OPENED) + *go = try_; + return 1; + +bad: + IPCPDEBUG(("ipcp_rejci: received bad Reject!")); + return 0; +} + + +/* + * ipcp_reqci - Check the peer's requested CIs and send appropriate response. + * Callback from fsm_rconfreq, Receive Configure Request + * + * Returns: CONFACK, CONFNAK or CONFREJ and input packet modified + * appropriately. If reject_if_disagree is non-zero, doesn't return + * CONFNAK; returns CONFREJ if it can't return CONFACK. + * + * inp = Requested CIs + * len = Length of requested CIs + */ +static int ipcp_reqci(fsm *f, u_char *inp, int *len, int reject_if_disagree) { + ppp_pcb *pcb = f->pcb; + ipcp_options *wo = &pcb->ipcp_wantoptions; + ipcp_options *ho = &pcb->ipcp_hisoptions; + ipcp_options *ao = &pcb->ipcp_allowoptions; + u_char *cip, *next; /* Pointer to current and next CIs */ + u_short cilen, citype; /* Parsed len, type */ +#if VJ_SUPPORT + u_short cishort; /* Parsed short value */ +#endif /* VJ_SUPPORT */ + u32_t tl, ciaddr1, ciaddr2;/* Parsed address values */ + int rc = CONFACK; /* Final packet return code */ + int orc; /* Individual option return code */ + u_char *p; /* Pointer to next char to parse */ + u_char *ucp = inp; /* Pointer to current output char */ + int l = *len; /* Length left */ +#if VJ_SUPPORT + u_char maxslotindex, cflag; +#endif /* VJ_SUPPORT */ +#if LWIP_DNS + int d; +#endif /* LWIP_DNS */ + + /* + * Reset all his options. + */ + BZERO(ho, sizeof(*ho)); + + /* + * Process all his options. + */ + next = inp; + while (l) { + orc = CONFACK; /* Assume success */ + cip = p = next; /* Remember begining of CI */ + if (l < 2 || /* Not enough data for CI header or */ + p[1] < 2 || /* CI length too small or */ + p[1] > l) { /* CI length too big? */ + IPCPDEBUG(("ipcp_reqci: bad CI length!")); + orc = CONFREJ; /* Reject bad CI */ + cilen = l; /* Reject till end of packet */ + l = 0; /* Don't loop again */ + goto endswitch; + } + GETCHAR(citype, p); /* Parse CI type */ + GETCHAR(cilen, p); /* Parse CI length */ + l -= cilen; /* Adjust remaining length */ + next += cilen; /* Step to next CI */ + + switch (citype) { /* Check CI type */ + case CI_ADDRS: + if (!ao->old_addrs || ho->neg_addr || + cilen != CILEN_ADDRS) { /* Check CI length */ + orc = CONFREJ; /* Reject CI */ + break; + } + + /* + * If he has no address, or if we both have his address but + * disagree about it, then NAK it with our idea. + * In particular, if we don't know his address, but he does, + * then accept it. + */ + GETLONG(tl, p); /* Parse source address (his) */ + ciaddr1 = lwip_htonl(tl); + if (ciaddr1 != wo->hisaddr + && (ciaddr1 == 0 || !wo->accept_remote)) { + orc = CONFNAK; + if (!reject_if_disagree) { + DECPTR(sizeof(u32_t), p); + tl = lwip_ntohl(wo->hisaddr); + PUTLONG(tl, p); + } + } else if (ciaddr1 == 0 && wo->hisaddr == 0) { + /* + * If neither we nor he knows his address, reject the option. + */ + orc = CONFREJ; + wo->req_addr = 0; /* don't NAK with 0.0.0.0 later */ + break; + } + + /* + * If he doesn't know our address, or if we both have our address + * but disagree about it, then NAK it with our idea. + */ + GETLONG(tl, p); /* Parse desination address (ours) */ + ciaddr2 = lwip_htonl(tl); + if (ciaddr2 != wo->ouraddr) { + if (ciaddr2 == 0 || !wo->accept_local) { + orc = CONFNAK; + if (!reject_if_disagree) { + DECPTR(sizeof(u32_t), p); + tl = lwip_ntohl(wo->ouraddr); + PUTLONG(tl, p); + } + } else { + wo->ouraddr = ciaddr2; /* accept peer's idea */ + } + } + + ho->old_addrs = 1; + ho->hisaddr = ciaddr1; + ho->ouraddr = ciaddr2; + break; + + case CI_ADDR: + if (!ao->neg_addr || ho->old_addrs || + cilen != CILEN_ADDR) { /* Check CI length */ + orc = CONFREJ; /* Reject CI */ + break; + } + + /* + * If he has no address, or if we both have his address but + * disagree about it, then NAK it with our idea. + * In particular, if we don't know his address, but he does, + * then accept it. + */ + GETLONG(tl, p); /* Parse source address (his) */ + ciaddr1 = lwip_htonl(tl); + if (ciaddr1 != wo->hisaddr + && (ciaddr1 == 0 || !wo->accept_remote)) { + orc = CONFNAK; + if (!reject_if_disagree) { + DECPTR(sizeof(u32_t), p); + tl = lwip_ntohl(wo->hisaddr); + PUTLONG(tl, p); + } + } else if (ciaddr1 == 0 && wo->hisaddr == 0) { + /* + * Don't ACK an address of 0.0.0.0 - reject it instead. + */ + orc = CONFREJ; + wo->req_addr = 0; /* don't NAK with 0.0.0.0 later */ + break; + } + + ho->neg_addr = 1; + ho->hisaddr = ciaddr1; + break; + +#if LWIP_DNS + case CI_MS_DNS1: + case CI_MS_DNS2: + /* Microsoft primary or secondary DNS request */ + d = citype == CI_MS_DNS2; + + /* If we do not have a DNS address then we cannot send it */ + if (ao->dnsaddr[d] == 0 || + cilen != CILEN_ADDR) { /* Check CI length */ + orc = CONFREJ; /* Reject CI */ + break; + } + GETLONG(tl, p); + if (lwip_htonl(tl) != ao->dnsaddr[d]) { + DECPTR(sizeof(u32_t), p); + tl = lwip_ntohl(ao->dnsaddr[d]); + PUTLONG(tl, p); + orc = CONFNAK; + } + break; +#endif /* LWIP_DNS */ + +#if 0 /* UNUSED - WINS */ + case CI_MS_WINS1: + case CI_MS_WINS2: + /* Microsoft primary or secondary WINS request */ + d = citype == CI_MS_WINS2; + + /* If we do not have a DNS address then we cannot send it */ + if (ao->winsaddr[d] == 0 || + cilen != CILEN_ADDR) { /* Check CI length */ + orc = CONFREJ; /* Reject CI */ + break; + } + GETLONG(tl, p); + if (lwip_htonl(tl) != ao->winsaddr[d]) { + DECPTR(sizeof(u32_t), p); + tl = lwip_ntohl(ao->winsaddr[d]); + PUTLONG(tl, p); + orc = CONFNAK; + } + break; +#endif /* UNUSED - WINS */ + +#if VJ_SUPPORT + case CI_COMPRESSTYPE: + if (!ao->neg_vj || + (cilen != CILEN_VJ && cilen != CILEN_COMPRESS)) { + orc = CONFREJ; + break; + } + GETSHORT(cishort, p); + + if (!(cishort == IPCP_VJ_COMP || + (cishort == IPCP_VJ_COMP_OLD && cilen == CILEN_COMPRESS))) { + orc = CONFREJ; + break; + } + + ho->neg_vj = 1; + ho->vj_protocol = cishort; + if (cilen == CILEN_VJ) { + GETCHAR(maxslotindex, p); + if (maxslotindex > ao->maxslotindex) { + orc = CONFNAK; + if (!reject_if_disagree){ + DECPTR(1, p); + PUTCHAR(ao->maxslotindex, p); + } + } + GETCHAR(cflag, p); + if (cflag && !ao->cflag) { + orc = CONFNAK; + if (!reject_if_disagree){ + DECPTR(1, p); + PUTCHAR(wo->cflag, p); + } + } + ho->maxslotindex = maxslotindex; + ho->cflag = cflag; + } else { + ho->old_vj = 1; + ho->maxslotindex = MAX_STATES - 1; + ho->cflag = 1; + } + break; +#endif /* VJ_SUPPORT */ + + default: + orc = CONFREJ; + break; + } +endswitch: + if (orc == CONFACK && /* Good CI */ + rc != CONFACK) /* but prior CI wasnt? */ + continue; /* Don't send this one */ + + if (orc == CONFNAK) { /* Nak this CI? */ + if (reject_if_disagree) /* Getting fed up with sending NAKs? */ + orc = CONFREJ; /* Get tough if so */ + else { + if (rc == CONFREJ) /* Rejecting prior CI? */ + continue; /* Don't send this one */ + if (rc == CONFACK) { /* Ack'd all prior CIs? */ + rc = CONFNAK; /* Not anymore... */ + ucp = inp; /* Backup */ + } + } + } + + if (orc == CONFREJ && /* Reject this CI */ + rc != CONFREJ) { /* but no prior ones? */ + rc = CONFREJ; + ucp = inp; /* Backup */ + } + + /* Need to move CI? */ + if (ucp != cip) + MEMCPY(ucp, cip, cilen); /* Move it */ + + /* Update output pointer */ + INCPTR(cilen, ucp); + } + + /* + * If we aren't rejecting this packet, and we want to negotiate + * their address, and they didn't send their address, then we + * send a NAK with a CI_ADDR option appended. We assume the + * input buffer is long enough that we can append the extra + * option safely. + */ + if (rc != CONFREJ && !ho->neg_addr && !ho->old_addrs && + wo->req_addr && !reject_if_disagree && !pcb->settings.noremoteip) { + if (rc == CONFACK) { + rc = CONFNAK; + ucp = inp; /* reset pointer */ + wo->req_addr = 0; /* don't ask again */ + } + PUTCHAR(CI_ADDR, ucp); + PUTCHAR(CILEN_ADDR, ucp); + tl = lwip_ntohl(wo->hisaddr); + PUTLONG(tl, ucp); + } + + *len = ucp - inp; /* Compute output length */ + IPCPDEBUG(("ipcp: returning Configure-%s", CODENAME(rc))); + return (rc); /* Return final code */ +} + + +#if 0 /* UNUSED */ +/* + * ip_check_options - check that any IP-related options are OK, + * and assign appropriate defaults. + */ +static void +ip_check_options() +{ + struct hostent *hp; + u32_t local; + ipcp_options *wo = &ipcp_wantoptions[0]; + + /* + * Default our local IP address based on our hostname. + * If local IP address already given, don't bother. + */ + if (wo->ouraddr == 0 && !disable_defaultip) { + /* + * Look up our hostname (possibly with domain name appended) + * and take the first IP address as our local IP address. + * If there isn't an IP address for our hostname, too bad. + */ + wo->accept_local = 1; /* don't insist on this default value */ + if ((hp = gethostbyname(hostname)) != NULL) { + local = *(u32_t *)hp->h_addr; + if (local != 0 && !bad_ip_adrs(local)) + wo->ouraddr = local; + } + } + ask_for_local = wo->ouraddr != 0 || !disable_defaultip; +} +#endif /* UNUSED */ + +#if DEMAND_SUPPORT +/* + * ip_demand_conf - configure the interface as though + * IPCP were up, for use with dial-on-demand. + */ +static int +ip_demand_conf(u) + int u; +{ + ppp_pcb *pcb = &ppp_pcb_list[u]; + ipcp_options *wo = &ipcp_wantoptions[u]; + + if (wo->hisaddr == 0 && !pcb->settings.noremoteip) { + /* make up an arbitrary address for the peer */ + wo->hisaddr = lwip_htonl(0x0a707070 + ifunit); + wo->accept_remote = 1; + } + if (wo->ouraddr == 0) { + /* make up an arbitrary address for us */ + wo->ouraddr = lwip_htonl(0x0a404040 + ifunit); + wo->accept_local = 1; + ask_for_local = 0; /* don't tell the peer this address */ + } + if (!sifaddr(pcb, wo->ouraddr, wo->hisaddr, get_mask(wo->ouraddr))) + return 0; + if (!sifup(pcb)) + return 0; + if (!sifnpmode(pcb, PPP_IP, NPMODE_QUEUE)) + return 0; +#if 0 /* UNUSED */ + if (wo->default_route) + if (sifdefaultroute(pcb, wo->ouraddr, wo->hisaddr, + wo->replace_default_route)) + default_route_set[u] = 1; +#endif /* UNUSED */ +#if 0 /* UNUSED - PROXY ARP */ + if (wo->proxy_arp) + if (sifproxyarp(pcb, wo->hisaddr)) + proxy_arp_set[u] = 1; +#endif /* UNUSED - PROXY ARP */ + + ppp_notice("local IP address %I", wo->ouraddr); + if (wo->hisaddr) + ppp_notice("remote IP address %I", wo->hisaddr); + + return 1; +} +#endif /* DEMAND_SUPPORT */ + +/* + * ipcp_up - IPCP has come UP. + * + * Configure the IP network interface appropriately and bring it up. + */ +static void ipcp_up(fsm *f) { + ppp_pcb *pcb = f->pcb; + u32_t mask; + ipcp_options *ho = &pcb->ipcp_hisoptions; + ipcp_options *go = &pcb->ipcp_gotoptions; + ipcp_options *wo = &pcb->ipcp_wantoptions; + + IPCPDEBUG(("ipcp: up")); + + /* + * We must have a non-zero IP address for both ends of the link. + */ + if (!ho->neg_addr && !ho->old_addrs) + ho->hisaddr = wo->hisaddr; + + if (!(go->neg_addr || go->old_addrs) && (wo->neg_addr || wo->old_addrs) + && wo->ouraddr != 0) { + ppp_error("Peer refused to agree to our IP address"); + ipcp_close(f->pcb, "Refused our IP address"); + return; + } + if (go->ouraddr == 0) { + ppp_error("Could not determine local IP address"); + ipcp_close(f->pcb, "Could not determine local IP address"); + return; + } + if (ho->hisaddr == 0 && !pcb->settings.noremoteip) { + ho->hisaddr = lwip_htonl(0x0a404040); + ppp_warn("Could not determine remote IP address: defaulting to %I", + ho->hisaddr); + } +#if 0 /* UNUSED */ + script_setenv("IPLOCAL", ip_ntoa(go->ouraddr), 0); + if (ho->hisaddr != 0) + script_setenv("IPREMOTE", ip_ntoa(ho->hisaddr), 1); +#endif /* UNUSED */ + +#if LWIP_DNS + if (!go->req_dns1) + go->dnsaddr[0] = 0; + if (!go->req_dns2) + go->dnsaddr[1] = 0; +#if 0 /* UNUSED */ + if (go->dnsaddr[0]) + script_setenv("DNS1", ip_ntoa(go->dnsaddr[0]), 0); + if (go->dnsaddr[1]) + script_setenv("DNS2", ip_ntoa(go->dnsaddr[1]), 0); +#endif /* UNUSED */ + if (pcb->settings.usepeerdns && (go->dnsaddr[0] || go->dnsaddr[1])) { + sdns(pcb, go->dnsaddr[0], go->dnsaddr[1]); +#if 0 /* UNUSED */ + script_setenv("USEPEERDNS", "1", 0); + create_resolv(go->dnsaddr[0], go->dnsaddr[1]); +#endif /* UNUSED */ + } +#endif /* LWIP_DNS */ + + /* + * Check that the peer is allowed to use the IP address it wants. + */ + if (ho->hisaddr != 0) { + u32_t addr = lwip_ntohl(ho->hisaddr); + if ((addr >> IP_CLASSA_NSHIFT) == IP_LOOPBACKNET + || IP_MULTICAST(addr) || IP_BADCLASS(addr) + /* + * For now, consider that PPP in server mode with peer required + * to authenticate must provide the peer IP address, reject any + * IP address wanted by peer different than the one we wanted. + */ +#if PPP_SERVER && PPP_AUTH_SUPPORT + || (pcb->settings.auth_required && wo->hisaddr != ho->hisaddr) +#endif /* PPP_SERVER && PPP_AUTH_SUPPORT */ + ) { + ppp_error("Peer is not authorized to use remote address %I", ho->hisaddr); + ipcp_close(pcb, "Unauthorized remote IP address"); + return; + } + } +#if 0 /* Unused */ + /* Upstream checking code */ + if (ho->hisaddr != 0 && !auth_ip_addr(f->unit, ho->hisaddr)) { + ppp_error("Peer is not authorized to use remote address %I", ho->hisaddr); + ipcp_close(f->unit, "Unauthorized remote IP address"); + return; + } +#endif /* Unused */ + +#if VJ_SUPPORT + /* set tcp compression */ + sifvjcomp(pcb, ho->neg_vj, ho->cflag, ho->maxslotindex); +#endif /* VJ_SUPPORT */ + +#if DEMAND_SUPPORT + /* + * If we are doing dial-on-demand, the interface is already + * configured, so we put out any saved-up packets, then set the + * interface to pass IP packets. + */ + if (demand) { + if (go->ouraddr != wo->ouraddr || ho->hisaddr != wo->hisaddr) { + ipcp_clear_addrs(f->unit, wo->ouraddr, wo->hisaddr, + wo->replace_default_route); + if (go->ouraddr != wo->ouraddr) { + ppp_warn("Local IP address changed to %I", go->ouraddr); + script_setenv("OLDIPLOCAL", ip_ntoa(wo->ouraddr), 0); + wo->ouraddr = go->ouraddr; + } else + script_unsetenv("OLDIPLOCAL"); + if (ho->hisaddr != wo->hisaddr && wo->hisaddr != 0) { + ppp_warn("Remote IP address changed to %I", ho->hisaddr); + script_setenv("OLDIPREMOTE", ip_ntoa(wo->hisaddr), 0); + wo->hisaddr = ho->hisaddr; + } else + script_unsetenv("OLDIPREMOTE"); + + /* Set the interface to the new addresses */ + mask = get_mask(go->ouraddr); + if (!sifaddr(pcb, go->ouraddr, ho->hisaddr, mask)) { +#if PPP_DEBUG + ppp_warn("Interface configuration failed"); +#endif /* PPP_DEBUG */ + ipcp_close(f->unit, "Interface configuration failed"); + return; + } + + /* assign a default route through the interface if required */ + if (ipcp_wantoptions[f->unit].default_route) + if (sifdefaultroute(pcb, go->ouraddr, ho->hisaddr, + wo->replace_default_route)) + default_route_set[f->unit] = 1; + +#if 0 /* UNUSED - PROXY ARP */ + /* Make a proxy ARP entry if requested. */ + if (ho->hisaddr != 0 && ipcp_wantoptions[f->unit].proxy_arp) + if (sifproxyarp(pcb, ho->hisaddr)) + proxy_arp_set[f->unit] = 1; +#endif /* UNUSED - PROXY ARP */ + + } + demand_rexmit(PPP_IP,go->ouraddr); + sifnpmode(pcb, PPP_IP, NPMODE_PASS); + + } else +#endif /* DEMAND_SUPPORT */ + { + /* + * Set IP addresses and (if specified) netmask. + */ + mask = get_mask(go->ouraddr); + +#if !(defined(SVR4) && (defined(SNI) || defined(__USLC__))) + if (!sifaddr(pcb, go->ouraddr, ho->hisaddr, mask)) { +#if PPP_DEBUG + ppp_warn("Interface configuration failed"); +#endif /* PPP_DEBUG */ + ipcp_close(f->pcb, "Interface configuration failed"); + return; + } +#endif + + /* bring the interface up for IP */ + if (!sifup(pcb)) { +#if PPP_DEBUG + ppp_warn("Interface failed to come up"); +#endif /* PPP_DEBUG */ + ipcp_close(f->pcb, "Interface configuration failed"); + return; + } + +#if (defined(SVR4) && (defined(SNI) || defined(__USLC__))) + if (!sifaddr(pcb, go->ouraddr, ho->hisaddr, mask)) { +#if PPP_DEBUG + ppp_warn("Interface configuration failed"); +#endif /* PPP_DEBUG */ + ipcp_close(f->unit, "Interface configuration failed"); + return; + } +#endif +#if DEMAND_SUPPORT + sifnpmode(pcb, PPP_IP, NPMODE_PASS); +#endif /* DEMAND_SUPPORT */ + +#if 0 /* UNUSED */ + /* assign a default route through the interface if required */ + if (wo->default_route) + if (sifdefaultroute(pcb, go->ouraddr, ho->hisaddr, + wo->replace_default_route)) + pcb->default_route_set = 1; +#endif /* UNUSED */ + +#if 0 /* UNUSED - PROXY ARP */ + /* Make a proxy ARP entry if requested. */ + if (ho->hisaddr != 0 && wo->proxy_arp) + if (sifproxyarp(pcb, ho->hisaddr)) + pcb->proxy_arp_set = 1; +#endif /* UNUSED - PROXY ARP */ + + wo->ouraddr = go->ouraddr; + + ppp_notice("local IP address %I", go->ouraddr); + if (ho->hisaddr != 0) + ppp_notice("remote IP address %I", ho->hisaddr); +#if LWIP_DNS + if (go->dnsaddr[0]) + ppp_notice("primary DNS address %I", go->dnsaddr[0]); + if (go->dnsaddr[1]) + ppp_notice("secondary DNS address %I", go->dnsaddr[1]); +#endif /* LWIP_DNS */ + } + +#if PPP_STATS_SUPPORT + reset_link_stats(f->unit); +#endif /* PPP_STATS_SUPPORT */ + + np_up(pcb, PPP_IP); + pcb->ipcp_is_up = 1; + +#if PPP_NOTIFY + notify(ip_up_notifier, 0); +#endif /* PPP_NOTIFY */ +#if 0 /* UNUSED */ + if (ip_up_hook) + ip_up_hook(); +#endif /* UNUSED */ +} + + +/* + * ipcp_down - IPCP has gone DOWN. + * + * Take the IP network interface down, clear its addresses + * and delete routes through it. + */ +static void ipcp_down(fsm *f) { + ppp_pcb *pcb = f->pcb; + ipcp_options *ho = &pcb->ipcp_hisoptions; + ipcp_options *go = &pcb->ipcp_gotoptions; + + IPCPDEBUG(("ipcp: down")); +#if PPP_STATS_SUPPORT + /* XXX a bit IPv4-centric here, we only need to get the stats + * before the interface is marked down. */ + /* XXX more correct: we must get the stats before running the notifiers, + * at least for the radius plugin */ + update_link_stats(f->unit); +#endif /* PPP_STATS_SUPPORT */ +#if PPP_NOTIFY + notify(ip_down_notifier, 0); +#endif /* PPP_NOTIFY */ +#if 0 /* UNUSED */ + if (ip_down_hook) + ip_down_hook(); +#endif /* UNUSED */ + if (pcb->ipcp_is_up) { + pcb->ipcp_is_up = 0; + np_down(pcb, PPP_IP); + } +#if VJ_SUPPORT + sifvjcomp(pcb, 0, 0, 0); +#endif /* VJ_SUPPORT */ + +#if PPP_STATS_SUPPORT + print_link_stats(); /* _after_ running the notifiers and ip_down_hook(), + * because print_link_stats() sets link_stats_valid + * to 0 (zero) */ +#endif /* PPP_STATS_SUPPORT */ + +#if DEMAND_SUPPORT + /* + * If we are doing dial-on-demand, set the interface + * to queue up outgoing packets (for now). + */ + if (demand) { + sifnpmode(pcb, PPP_IP, NPMODE_QUEUE); + } else +#endif /* DEMAND_SUPPORT */ + { +#if DEMAND_SUPPORT + sifnpmode(pcb, PPP_IP, NPMODE_DROP); +#endif /* DEMAND_SUPPORT */ + sifdown(pcb); + ipcp_clear_addrs(pcb, go->ouraddr, + ho->hisaddr, 0); +#if LWIP_DNS + cdns(pcb, go->dnsaddr[0], go->dnsaddr[1]); +#endif /* LWIP_DNS */ + } +} + + +/* + * ipcp_clear_addrs() - clear the interface addresses, routes, + * proxy arp entries, etc. + */ +static void ipcp_clear_addrs(ppp_pcb *pcb, u32_t ouraddr, u32_t hisaddr, u8_t replacedefaultroute) { + LWIP_UNUSED_ARG(replacedefaultroute); + +#if 0 /* UNUSED - PROXY ARP */ + if (pcb->proxy_arp_set) { + cifproxyarp(pcb, hisaddr); + pcb->proxy_arp_set = 0; + } +#endif /* UNUSED - PROXY ARP */ +#if 0 /* UNUSED */ + /* If replacedefaultroute, sifdefaultroute will be called soon + * with replacedefaultroute set and that will overwrite the current + * default route. This is the case only when doing demand, otherwise + * during demand, this cifdefaultroute would restore the old default + * route which is not what we want in this case. In the non-demand + * case, we'll delete the default route and restore the old if there + * is one saved by an sifdefaultroute with replacedefaultroute. + */ + if (!replacedefaultroute && pcb->default_route_set) { + cifdefaultroute(pcb, ouraddr, hisaddr); + pcb->default_route_set = 0; + } +#endif /* UNUSED */ + cifaddr(pcb, ouraddr, hisaddr); +} + + +/* + * ipcp_finished - possibly shut down the lower layers. + */ +static void ipcp_finished(fsm *f) { + ppp_pcb *pcb = f->pcb; + if (pcb->ipcp_is_open) { + pcb->ipcp_is_open = 0; + np_finished(pcb, PPP_IP); + } +} + + +#if 0 /* UNUSED */ +/* + * create_resolv - create the replacement resolv.conf file + */ +static void +create_resolv(peerdns1, peerdns2) + u32_t peerdns1, peerdns2; +{ + +} +#endif /* UNUSED */ + +#if PRINTPKT_SUPPORT +/* + * ipcp_printpkt - print the contents of an IPCP packet. + */ +static const char* const ipcp_codenames[] = { + "ConfReq", "ConfAck", "ConfNak", "ConfRej", + "TermReq", "TermAck", "CodeRej" +}; + +static int ipcp_printpkt(const u_char *p, int plen, + void (*printer) (void *, const char *, ...), void *arg) { + int code, id, len, olen; + const u_char *pstart, *optend; +#if VJ_SUPPORT + u_short cishort; +#endif /* VJ_SUPPORT */ + u32_t cilong; + + if (plen < HEADERLEN) + return 0; + pstart = p; + GETCHAR(code, p); + GETCHAR(id, p); + GETSHORT(len, p); + if (len < HEADERLEN || len > plen) + return 0; + + if (code >= 1 && code <= (int)LWIP_ARRAYSIZE(ipcp_codenames)) + printer(arg, " %s", ipcp_codenames[code-1]); + else + printer(arg, " code=0x%x", code); + printer(arg, " id=0x%x", id); + len -= HEADERLEN; + switch (code) { + case CONFREQ: + case CONFACK: + case CONFNAK: + case CONFREJ: + /* print option list */ + while (len >= 2) { + GETCHAR(code, p); + GETCHAR(olen, p); + p -= 2; + if (olen < 2 || olen > len) { + break; + } + printer(arg, " <"); + len -= olen; + optend = p + olen; + switch (code) { + case CI_ADDRS: + if (olen == CILEN_ADDRS) { + p += 2; + GETLONG(cilong, p); + printer(arg, "addrs %I", lwip_htonl(cilong)); + GETLONG(cilong, p); + printer(arg, " %I", lwip_htonl(cilong)); + } + break; +#if VJ_SUPPORT + case CI_COMPRESSTYPE: + if (olen >= CILEN_COMPRESS) { + p += 2; + GETSHORT(cishort, p); + printer(arg, "compress "); + switch (cishort) { + case IPCP_VJ_COMP: + printer(arg, "VJ"); + break; + case IPCP_VJ_COMP_OLD: + printer(arg, "old-VJ"); + break; + default: + printer(arg, "0x%x", cishort); + } + } + break; +#endif /* VJ_SUPPORT */ + case CI_ADDR: + if (olen == CILEN_ADDR) { + p += 2; + GETLONG(cilong, p); + printer(arg, "addr %I", lwip_htonl(cilong)); + } + break; +#if LWIP_DNS + case CI_MS_DNS1: + case CI_MS_DNS2: + p += 2; + GETLONG(cilong, p); + printer(arg, "ms-dns%d %I", (code == CI_MS_DNS1? 1: 2), + htonl(cilong)); + break; +#endif /* LWIP_DNS */ +#if 0 /* UNUSED - WINS */ + case CI_MS_WINS1: + case CI_MS_WINS2: + p += 2; + GETLONG(cilong, p); + printer(arg, "ms-wins %I", lwip_htonl(cilong)); + break; +#endif /* UNUSED - WINS */ + default: + break; + } + while (p < optend) { + GETCHAR(code, p); + printer(arg, " %.2x", code); + } + printer(arg, ">"); + } + break; + + case TERMACK: + case TERMREQ: + if (len > 0 && *p >= ' ' && *p < 0x7f) { + printer(arg, " "); + ppp_print_string(p, len, printer, arg); + p += len; + len = 0; + } + break; + default: + break; + } + + /* print the rest of the bytes in the packet */ + for (; len > 0; --len) { + GETCHAR(code, p); + printer(arg, " %.2x", code); + } + + return p - pstart; +} +#endif /* PRINTPKT_SUPPORT */ + +#if DEMAND_SUPPORT +/* + * ip_active_pkt - see if this IP packet is worth bringing the link up for. + * We don't bring the link up for IP fragments or for TCP FIN packets + * with no data. + */ +#define IP_HDRLEN 20 /* bytes */ +#define IP_OFFMASK 0x1fff +#ifndef IPPROTO_TCP +#define IPPROTO_TCP 6 +#endif +#define TCP_HDRLEN 20 +#define TH_FIN 0x01 + +/* + * We use these macros because the IP header may be at an odd address, + * and some compilers might use word loads to get th_off or ip_hl. + */ + +#define net_short(x) (((x)[0] << 8) + (x)[1]) +#define get_iphl(x) (((unsigned char *)(x))[0] & 0xF) +#define get_ipoff(x) net_short((unsigned char *)(x) + 6) +#define get_ipproto(x) (((unsigned char *)(x))[9]) +#define get_tcpoff(x) (((unsigned char *)(x))[12] >> 4) +#define get_tcpflags(x) (((unsigned char *)(x))[13]) + +static int +ip_active_pkt(pkt, len) + u_char *pkt; + int len; +{ + u_char *tcp; + int hlen; + + len -= PPP_HDRLEN; + pkt += PPP_HDRLEN; + if (len < IP_HDRLEN) + return 0; + if ((get_ipoff(pkt) & IP_OFFMASK) != 0) + return 0; + if (get_ipproto(pkt) != IPPROTO_TCP) + return 1; + hlen = get_iphl(pkt) * 4; + if (len < hlen + TCP_HDRLEN) + return 0; + tcp = pkt + hlen; + if ((get_tcpflags(tcp) & TH_FIN) != 0 && len == hlen + get_tcpoff(tcp) * 4) + return 0; + return 1; +} +#endif /* DEMAND_SUPPORT */ + +#endif /* PPP_SUPPORT && PPP_IPV4_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/ipv6cp.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/ipv6cp.c new file mode 100644 index 0000000..11c18df --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/ipv6cp.c @@ -0,0 +1,1533 @@ +/* + * ipv6cp.c - PPP IPV6 Control Protocol. + * + * Copyright (c) 1999 Tommi Komulainen. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Tommi Komulainen + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ + +/* Original version, based on RFC2023 : + + Copyright (c) 1995, 1996, 1997 Francis.Dupont@inria.fr, INRIA Rocquencourt, + Alain.Durand@imag.fr, IMAG, + Jean-Luc.Richier@imag.fr, IMAG-LSR. + + Copyright (c) 1998, 1999 Francis.Dupont@inria.fr, GIE DYADE, + Alain.Durand@imag.fr, IMAG, + Jean-Luc.Richier@imag.fr, IMAG-LSR. + + Ce travail a été fait au sein du GIE DYADE (Groupement d'Intérêt + Économique ayant pour membres BULL S.A. et l'INRIA). + + Ce logiciel informatique est disponible aux conditions + usuelles dans la recherche, c'est-à-dire qu'il peut + être utilisé, copié, modifié, distribué à l'unique + condition que ce texte soit conservé afin que + l'origine de ce logiciel soit reconnue. + + Le nom de l'Institut National de Recherche en Informatique + et en Automatique (INRIA), de l'IMAG, ou d'une personne morale + ou physique ayant participé à l'élaboration de ce logiciel ne peut + être utilisé sans son accord préalable explicite. + + Ce logiciel est fourni tel quel sans aucune garantie, + support ou responsabilité d'aucune sorte. + Ce logiciel est dérivé de sources d'origine + "University of California at Berkeley" et + "Digital Equipment Corporation" couvertes par des copyrights. + + L'Institut d'Informatique et de Mathématiques Appliquées de Grenoble (IMAG) + est une fédération d'unités mixtes de recherche du CNRS, de l'Institut National + Polytechnique de Grenoble et de l'Université Joseph Fourier regroupant + sept laboratoires dont le laboratoire Logiciels, Systèmes, Réseaux (LSR). + + This work has been done in the context of GIE DYADE (joint R & D venture + between BULL S.A. and INRIA). + + This software is available with usual "research" terms + with the aim of retain credits of the software. + Permission to use, copy, modify and distribute this software for any + purpose and without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies, + and the name of INRIA, IMAG, or any contributor not be used in advertising + or publicity pertaining to this material without the prior explicit + permission. The software is provided "as is" without any + warranties, support or liabilities of any kind. + This software is derived from source code from + "University of California at Berkeley" and + "Digital Equipment Corporation" protected by copyrights. + + Grenoble's Institute of Computer Science and Applied Mathematics (IMAG) + is a federation of seven research units funded by the CNRS, National + Polytechnic Institute of Grenoble and University Joseph Fourier. + The research unit in Software, Systems, Networks (LSR) is member of IMAG. +*/ + +/* + * Derived from : + * + * + * ipcp.c - PPP IP Control Protocol. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: ipv6cp.c,v 1.21 2005/08/25 23:59:34 paulus Exp $ + */ + +/* + * @todo: + * + * Proxy Neighbour Discovery. + * + * Better defines for selecting the ordering of + * interface up / set address. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPP_IPV6_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#if 0 /* UNUSED */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#endif /* UNUSED */ + +#include "netif/ppp/ppp_impl.h" +#include "netif/ppp/fsm.h" +#include "netif/ppp/ipcp.h" +#include "netif/ppp/ipv6cp.h" +#include "netif/ppp/magic.h" + +/* global vars */ +#if 0 /* UNUSED */ +int no_ifaceid_neg = 0; +#endif /* UNUSED */ + +/* + * Callbacks for fsm code. (CI = Configuration Information) + */ +static void ipv6cp_resetci(fsm *f); /* Reset our CI */ +static int ipv6cp_cilen(fsm *f); /* Return length of our CI */ +static void ipv6cp_addci(fsm *f, u_char *ucp, int *lenp); /* Add our CI */ +static int ipv6cp_ackci(fsm *f, u_char *p, int len); /* Peer ack'd our CI */ +static int ipv6cp_nakci(fsm *f, u_char *p, int len, int treat_as_reject); /* Peer nak'd our CI */ +static int ipv6cp_rejci(fsm *f, u_char *p, int len); /* Peer rej'd our CI */ +static int ipv6cp_reqci(fsm *f, u_char *inp, int *len, int reject_if_disagree); /* Rcv CI */ +static void ipv6cp_up(fsm *f); /* We're UP */ +static void ipv6cp_down(fsm *f); /* We're DOWN */ +static void ipv6cp_finished(fsm *f); /* Don't need lower layer */ + +static const fsm_callbacks ipv6cp_callbacks = { /* IPV6CP callback routines */ + ipv6cp_resetci, /* Reset our Configuration Information */ + ipv6cp_cilen, /* Length of our Configuration Information */ + ipv6cp_addci, /* Add our Configuration Information */ + ipv6cp_ackci, /* ACK our Configuration Information */ + ipv6cp_nakci, /* NAK our Configuration Information */ + ipv6cp_rejci, /* Reject our Configuration Information */ + ipv6cp_reqci, /* Request peer's Configuration Information */ + ipv6cp_up, /* Called when fsm reaches OPENED state */ + ipv6cp_down, /* Called when fsm leaves OPENED state */ + NULL, /* Called when we want the lower layer up */ + ipv6cp_finished, /* Called when we want the lower layer down */ + NULL, /* Called when Protocol-Reject received */ + NULL, /* Retransmission is necessary */ + NULL, /* Called to handle protocol-specific codes */ + "IPV6CP" /* String name of protocol */ +}; + +#if PPP_OPTIONS +/* + * Command-line options. + */ +static int setifaceid(char **arg)); +static void printifaceid(option_t *, + void (*)(void *, char *, ...), void *)); + +static option_t ipv6cp_option_list[] = { + { "ipv6", o_special, (void *)setifaceid, + "Set interface identifiers for IPV6", + OPT_A2PRINTER, (void *)printifaceid }, + + { "+ipv6", o_bool, &ipv6cp_protent.enabled_flag, + "Enable IPv6 and IPv6CP", OPT_PRIO | 1 }, + { "noipv6", o_bool, &ipv6cp_protent.enabled_flag, + "Disable IPv6 and IPv6CP", OPT_PRIOSUB }, + { "-ipv6", o_bool, &ipv6cp_protent.enabled_flag, + "Disable IPv6 and IPv6CP", OPT_PRIOSUB | OPT_ALIAS }, + + { "ipv6cp-accept-local", o_bool, &ipv6cp_allowoptions[0].accept_local, + "Accept peer's interface identifier for us", 1 }, + + { "ipv6cp-use-ipaddr", o_bool, &ipv6cp_allowoptions[0].use_ip, + "Use (default) IPv4 address as interface identifier", 1 }, + + { "ipv6cp-use-persistent", o_bool, &ipv6cp_wantoptions[0].use_persistent, + "Use uniquely-available persistent value for link local address", 1 }, + + { "ipv6cp-restart", o_int, &ipv6cp_fsm[0].timeouttime, + "Set timeout for IPv6CP", OPT_PRIO }, + { "ipv6cp-max-terminate", o_int, &ipv6cp_fsm[0].maxtermtransmits, + "Set max #xmits for term-reqs", OPT_PRIO }, + { "ipv6cp-max-configure", o_int, &ipv6cp_fsm[0].maxconfreqtransmits, + "Set max #xmits for conf-reqs", OPT_PRIO }, + { "ipv6cp-max-failure", o_int, &ipv6cp_fsm[0].maxnakloops, + "Set max #conf-naks for IPv6CP", OPT_PRIO }, + + { NULL } +}; +#endif /* PPP_OPTIONS */ + +/* + * Protocol entry points from main code. + */ +static void ipv6cp_init(ppp_pcb *pcb); +static void ipv6cp_open(ppp_pcb *pcb); +static void ipv6cp_close(ppp_pcb *pcb, const char *reason); +static void ipv6cp_lowerup(ppp_pcb *pcb); +static void ipv6cp_lowerdown(ppp_pcb *pcb); +static void ipv6cp_input(ppp_pcb *pcb, u_char *p, int len); +static void ipv6cp_protrej(ppp_pcb *pcb); +#if PPP_OPTIONS +static void ipv6_check_options(void); +#endif /* PPP_OPTIONS */ +#if DEMAND_SUPPORT +static int ipv6_demand_conf(int u); +#endif /* DEMAND_SUPPORT */ +#if PRINTPKT_SUPPORT +static int ipv6cp_printpkt(const u_char *p, int plen, + void (*printer)(void *, const char *, ...), void *arg); +#endif /* PRINTPKT_SUPPORT */ +#if DEMAND_SUPPORT +static int ipv6_active_pkt(u_char *pkt, int len); +#endif /* DEMAND_SUPPORT */ + +const struct protent ipv6cp_protent = { + PPP_IPV6CP, + ipv6cp_init, + ipv6cp_input, + ipv6cp_protrej, + ipv6cp_lowerup, + ipv6cp_lowerdown, + ipv6cp_open, + ipv6cp_close, +#if PRINTPKT_SUPPORT + ipv6cp_printpkt, +#endif /* PRINTPKT_SUPPORT */ +#if PPP_DATAINPUT + NULL, +#endif /* PPP_DATAINPUT */ +#if PRINTPKT_SUPPORT + "IPV6CP", + "IPV6", +#endif /* PRINTPKT_SUPPORT */ +#if PPP_OPTIONS + ipv6cp_option_list, + ipv6_check_options, +#endif /* PPP_OPTIONS */ +#if DEMAND_SUPPORT + ipv6_demand_conf, + ipv6_active_pkt +#endif /* DEMAND_SUPPORT */ +}; + +static void ipv6cp_clear_addrs(ppp_pcb *pcb, eui64_t ourid, eui64_t hisid); +#if 0 /* UNUSED */ +static void ipv6cp_script(char *)); +static void ipv6cp_script_done(void *)); +#endif /* UNUSED */ + +/* + * Lengths of configuration options. + */ +#define CILEN_VOID 2 +#define CILEN_COMPRESS 4 /* length for RFC2023 compress opt. */ +#define CILEN_IFACEID 10 /* RFC2472, interface identifier */ + +#define CODENAME(x) ((x) == CONFACK ? "ACK" : \ + (x) == CONFNAK ? "NAK" : "REJ") + +#if 0 /* UNUSED */ +/* + * This state variable is used to ensure that we don't + * run an ipcp-up/down script while one is already running. + */ +static enum script_state { + s_down, + s_up, +} ipv6cp_script_state; +static pid_t ipv6cp_script_pid; +#endif /* UNUSED */ + +static char *llv6_ntoa(eui64_t ifaceid); + +#if PPP_OPTIONS +/* + * setifaceid - set the interface identifiers manually + */ +static int +setifaceid(argv) + char **argv; +{ + char *comma, *arg, c; + ipv6cp_options *wo = &ipv6cp_wantoptions[0]; + struct in6_addr addr; + static int prio_local, prio_remote; + +#define VALIDID(a) ( (((a).s6_addr32[0] == 0) && ((a).s6_addr32[1] == 0)) && \ + (((a).s6_addr32[2] != 0) || ((a).s6_addr32[3] != 0)) ) + + arg = *argv; + if ((comma = strchr(arg, ',')) == NULL) + comma = arg + strlen(arg); + + /* + * If comma first character, then no local identifier + */ + if (comma != arg) { + c = *comma; + *comma = '\0'; + + if (inet_pton(AF_INET6, arg, &addr) == 0 || !VALIDID(addr)) { + option_error("Illegal interface identifier (local): %s", arg); + return 0; + } + + if (option_priority >= prio_local) { + eui64_copy(addr.s6_addr32[2], wo->ourid); + wo->opt_local = 1; + prio_local = option_priority; + } + *comma = c; + } + + /* + * If comma last character, the no remote identifier + */ + if (*comma != 0 && *++comma != '\0') { + if (inet_pton(AF_INET6, comma, &addr) == 0 || !VALIDID(addr)) { + option_error("Illegal interface identifier (remote): %s", comma); + return 0; + } + if (option_priority >= prio_remote) { + eui64_copy(addr.s6_addr32[2], wo->hisid); + wo->opt_remote = 1; + prio_remote = option_priority; + } + } + + if (override_value("+ipv6", option_priority, option_source)) + ipv6cp_protent.enabled_flag = 1; + return 1; +} + +static void +printifaceid(opt, printer, arg) + option_t *opt; + void (*printer)(void *, char *, ...)); + void *arg; +{ + ipv6cp_options *wo = &ipv6cp_wantoptions[0]; + + if (wo->opt_local) + printer(arg, "%s", llv6_ntoa(wo->ourid)); + printer(arg, ","); + if (wo->opt_remote) + printer(arg, "%s", llv6_ntoa(wo->hisid)); +} +#endif /* PPP_OPTIONS */ + +/* + * Make a string representation of a network address. + */ +static char * +llv6_ntoa(eui64_t ifaceid) +{ + static char b[26]; + + sprintf(b, "fe80::%02x%02x:%02x%02x:%02x%02x:%02x%02x", + ifaceid.e8[0], ifaceid.e8[1], ifaceid.e8[2], ifaceid.e8[3], + ifaceid.e8[4], ifaceid.e8[5], ifaceid.e8[6], ifaceid.e8[7]); + + return b; +} + + +/* + * ipv6cp_init - Initialize IPV6CP. + */ +static void ipv6cp_init(ppp_pcb *pcb) { + fsm *f = &pcb->ipv6cp_fsm; + ipv6cp_options *wo = &pcb->ipv6cp_wantoptions; + ipv6cp_options *ao = &pcb->ipv6cp_allowoptions; + + f->pcb = pcb; + f->protocol = PPP_IPV6CP; + f->callbacks = &ipv6cp_callbacks; + fsm_init(f); + +#if 0 /* Not necessary, everything is cleared in ppp_new() */ + memset(wo, 0, sizeof(*wo)); + memset(ao, 0, sizeof(*ao)); +#endif /* 0 */ + + wo->accept_local = 1; + wo->neg_ifaceid = 1; + ao->neg_ifaceid = 1; + +#ifdef IPV6CP_COMP + wo->neg_vj = 1; + ao->neg_vj = 1; + wo->vj_protocol = IPV6CP_COMP; +#endif + +} + + +/* + * ipv6cp_open - IPV6CP is allowed to come up. + */ +static void ipv6cp_open(ppp_pcb *pcb) { + fsm_open(&pcb->ipv6cp_fsm); +} + + +/* + * ipv6cp_close - Take IPV6CP down. + */ +static void ipv6cp_close(ppp_pcb *pcb, const char *reason) { + fsm_close(&pcb->ipv6cp_fsm, reason); +} + + +/* + * ipv6cp_lowerup - The lower layer is up. + */ +static void ipv6cp_lowerup(ppp_pcb *pcb) { + fsm_lowerup(&pcb->ipv6cp_fsm); +} + + +/* + * ipv6cp_lowerdown - The lower layer is down. + */ +static void ipv6cp_lowerdown(ppp_pcb *pcb) { + fsm_lowerdown(&pcb->ipv6cp_fsm); +} + + +/* + * ipv6cp_input - Input IPV6CP packet. + */ +static void ipv6cp_input(ppp_pcb *pcb, u_char *p, int len) { + fsm_input(&pcb->ipv6cp_fsm, p, len); +} + + +/* + * ipv6cp_protrej - A Protocol-Reject was received for IPV6CP. + * + * Pretend the lower layer went down, so we shut up. + */ +static void ipv6cp_protrej(ppp_pcb *pcb) { + fsm_lowerdown(&pcb->ipv6cp_fsm); +} + + +/* + * ipv6cp_resetci - Reset our CI. + */ +static void ipv6cp_resetci(fsm *f) { + ppp_pcb *pcb = f->pcb; + ipv6cp_options *wo = &pcb->ipv6cp_wantoptions; + ipv6cp_options *go = &pcb->ipv6cp_gotoptions; + ipv6cp_options *ao = &pcb->ipv6cp_allowoptions; + + wo->req_ifaceid = wo->neg_ifaceid && ao->neg_ifaceid; + + if (!wo->opt_local) { + eui64_magic_nz(wo->ourid); + } + + *go = *wo; + eui64_zero(go->hisid); /* last proposed interface identifier */ +} + + +/* + * ipv6cp_cilen - Return length of our CI. + */ +static int ipv6cp_cilen(fsm *f) { + ppp_pcb *pcb = f->pcb; + ipv6cp_options *go = &pcb->ipv6cp_gotoptions; + +#ifdef IPV6CP_COMP +#define LENCIVJ(neg) (neg ? CILEN_COMPRESS : 0) +#endif /* IPV6CP_COMP */ +#define LENCIIFACEID(neg) (neg ? CILEN_IFACEID : 0) + + return (LENCIIFACEID(go->neg_ifaceid) + +#ifdef IPV6CP_COMP + LENCIVJ(go->neg_vj) + +#endif /* IPV6CP_COMP */ + 0); +} + + +/* + * ipv6cp_addci - Add our desired CIs to a packet. + */ +static void ipv6cp_addci(fsm *f, u_char *ucp, int *lenp) { + ppp_pcb *pcb = f->pcb; + ipv6cp_options *go = &pcb->ipv6cp_gotoptions; + int len = *lenp; + +#ifdef IPV6CP_COMP +#define ADDCIVJ(opt, neg, val) \ + if (neg) { \ + int vjlen = CILEN_COMPRESS; \ + if (len >= vjlen) { \ + PUTCHAR(opt, ucp); \ + PUTCHAR(vjlen, ucp); \ + PUTSHORT(val, ucp); \ + len -= vjlen; \ + } else \ + neg = 0; \ + } +#endif /* IPV6CP_COMP */ + +#define ADDCIIFACEID(opt, neg, val1) \ + if (neg) { \ + int idlen = CILEN_IFACEID; \ + if (len >= idlen) { \ + PUTCHAR(opt, ucp); \ + PUTCHAR(idlen, ucp); \ + eui64_put(val1, ucp); \ + len -= idlen; \ + } else \ + neg = 0; \ + } + + ADDCIIFACEID(CI_IFACEID, go->neg_ifaceid, go->ourid); + +#ifdef IPV6CP_COMP + ADDCIVJ(CI_COMPRESSTYPE, go->neg_vj, go->vj_protocol); +#endif /* IPV6CP_COMP */ + + *lenp -= len; +} + + +/* + * ipv6cp_ackci - Ack our CIs. + * + * Returns: + * 0 - Ack was bad. + * 1 - Ack was good. + */ +static int ipv6cp_ackci(fsm *f, u_char *p, int len) { + ppp_pcb *pcb = f->pcb; + ipv6cp_options *go = &pcb->ipv6cp_gotoptions; + u_short cilen, citype; +#ifdef IPV6CP_COMP + u_short cishort; +#endif /* IPV6CP_COMP */ + eui64_t ifaceid; + + /* + * CIs must be in exactly the same order that we sent... + * Check packet length and CI length at each step. + * If we find any deviations, then this packet is bad. + */ + +#ifdef IPV6CP_COMP +#define ACKCIVJ(opt, neg, val) \ + if (neg) { \ + int vjlen = CILEN_COMPRESS; \ + if ((len -= vjlen) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != vjlen || \ + citype != opt) \ + goto bad; \ + GETSHORT(cishort, p); \ + if (cishort != val) \ + goto bad; \ + } +#endif /* IPV6CP_COMP */ + +#define ACKCIIFACEID(opt, neg, val1) \ + if (neg) { \ + int idlen = CILEN_IFACEID; \ + if ((len -= idlen) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != idlen || \ + citype != opt) \ + goto bad; \ + eui64_get(ifaceid, p); \ + if (! eui64_equals(val1, ifaceid)) \ + goto bad; \ + } + + ACKCIIFACEID(CI_IFACEID, go->neg_ifaceid, go->ourid); + +#ifdef IPV6CP_COMP + ACKCIVJ(CI_COMPRESSTYPE, go->neg_vj, go->vj_protocol); +#endif /* IPV6CP_COMP */ + + /* + * If there are any remaining CIs, then this packet is bad. + */ + if (len != 0) + goto bad; + return (1); + +bad: + IPV6CPDEBUG(("ipv6cp_ackci: received bad Ack!")); + return (0); +} + +/* + * ipv6cp_nakci - Peer has sent a NAK for some of our CIs. + * This should not modify any state if the Nak is bad + * or if IPV6CP is in the OPENED state. + * + * Returns: + * 0 - Nak was bad. + * 1 - Nak was good. + */ +static int ipv6cp_nakci(fsm *f, u_char *p, int len, int treat_as_reject) { + ppp_pcb *pcb = f->pcb; + ipv6cp_options *go = &pcb->ipv6cp_gotoptions; + u_char citype, cilen, *next; +#ifdef IPV6CP_COMP + u_short cishort; +#endif /* IPV6CP_COMP */ + eui64_t ifaceid; + ipv6cp_options no; /* options we've seen Naks for */ + ipv6cp_options try_; /* options to request next time */ + + BZERO(&no, sizeof(no)); + try_ = *go; + + /* + * Any Nak'd CIs must be in exactly the same order that we sent. + * Check packet length and CI length at each step. + * If we find any deviations, then this packet is bad. + */ +#define NAKCIIFACEID(opt, neg, code) \ + if (go->neg && \ + len >= (cilen = CILEN_IFACEID) && \ + p[1] == cilen && \ + p[0] == opt) { \ + len -= cilen; \ + INCPTR(2, p); \ + eui64_get(ifaceid, p); \ + no.neg = 1; \ + code \ + } + +#ifdef IPV6CP_COMP +#define NAKCIVJ(opt, neg, code) \ + if (go->neg && \ + ((cilen = p[1]) == CILEN_COMPRESS) && \ + len >= cilen && \ + p[0] == opt) { \ + len -= cilen; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + no.neg = 1; \ + code \ + } +#endif /* IPV6CP_COMP */ + + /* + * Accept the peer's idea of {our,his} interface identifier, if different + * from our idea, only if the accept_{local,remote} flag is set. + */ + NAKCIIFACEID(CI_IFACEID, neg_ifaceid, + if (treat_as_reject) { + try_.neg_ifaceid = 0; + } else if (go->accept_local) { + while (eui64_iszero(ifaceid) || + eui64_equals(ifaceid, go->hisid)) /* bad luck */ + eui64_magic(ifaceid); + try_.ourid = ifaceid; + IPV6CPDEBUG(("local LL address %s", llv6_ntoa(ifaceid))); + } + ); + +#ifdef IPV6CP_COMP + NAKCIVJ(CI_COMPRESSTYPE, neg_vj, + { + if (cishort == IPV6CP_COMP && !treat_as_reject) { + try_.vj_protocol = cishort; + } else { + try_.neg_vj = 0; + } + } + ); +#endif /* IPV6CP_COMP */ + + /* + * There may be remaining CIs, if the peer is requesting negotiation + * on an option that we didn't include in our request packet. + * If they want to negotiate about interface identifier, we comply. + * If they want us to ask for compression, we refuse. + */ + while (len >= CILEN_VOID) { + GETCHAR(citype, p); + GETCHAR(cilen, p); + if ( cilen < CILEN_VOID || (len -= cilen) < 0 ) + goto bad; + next = p + cilen - 2; + + switch (citype) { +#ifdef IPV6CP_COMP + case CI_COMPRESSTYPE: + if (go->neg_vj || no.neg_vj || + (cilen != CILEN_COMPRESS)) + goto bad; + no.neg_vj = 1; + break; +#endif /* IPV6CP_COMP */ + case CI_IFACEID: + if (go->neg_ifaceid || no.neg_ifaceid || cilen != CILEN_IFACEID) + goto bad; + try_.neg_ifaceid = 1; + eui64_get(ifaceid, p); + if (go->accept_local) { + while (eui64_iszero(ifaceid) || + eui64_equals(ifaceid, go->hisid)) /* bad luck */ + eui64_magic(ifaceid); + try_.ourid = ifaceid; + } + no.neg_ifaceid = 1; + break; + default: + break; + } + p = next; + } + + /* If there is still anything left, this packet is bad. */ + if (len != 0) + goto bad; + + /* + * OK, the Nak is good. Now we can update state. + */ + if (f->state != PPP_FSM_OPENED) + *go = try_; + + return 1; + +bad: + IPV6CPDEBUG(("ipv6cp_nakci: received bad Nak!")); + return 0; +} + + +/* + * ipv6cp_rejci - Reject some of our CIs. + */ +static int ipv6cp_rejci(fsm *f, u_char *p, int len) { + ppp_pcb *pcb = f->pcb; + ipv6cp_options *go = &pcb->ipv6cp_gotoptions; + u_char cilen; +#ifdef IPV6CP_COMP + u_short cishort; +#endif /* IPV6CP_COMP */ + eui64_t ifaceid; + ipv6cp_options try_; /* options to request next time */ + + try_ = *go; + /* + * Any Rejected CIs must be in exactly the same order that we sent. + * Check packet length and CI length at each step. + * If we find any deviations, then this packet is bad. + */ +#define REJCIIFACEID(opt, neg, val1) \ + if (go->neg && \ + len >= (cilen = CILEN_IFACEID) && \ + p[1] == cilen && \ + p[0] == opt) { \ + len -= cilen; \ + INCPTR(2, p); \ + eui64_get(ifaceid, p); \ + /* Check rejected value. */ \ + if (! eui64_equals(ifaceid, val1)) \ + goto bad; \ + try_.neg = 0; \ + } + +#ifdef IPV6CP_COMP +#define REJCIVJ(opt, neg, val) \ + if (go->neg && \ + p[1] == CILEN_COMPRESS && \ + len >= p[1] && \ + p[0] == opt) { \ + len -= p[1]; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + /* Check rejected value. */ \ + if (cishort != val) \ + goto bad; \ + try_.neg = 0; \ + } +#endif /* IPV6CP_COMP */ + + REJCIIFACEID(CI_IFACEID, neg_ifaceid, go->ourid); + +#ifdef IPV6CP_COMP + REJCIVJ(CI_COMPRESSTYPE, neg_vj, go->vj_protocol); +#endif /* IPV6CP_COMP */ + + /* + * If there are any remaining CIs, then this packet is bad. + */ + if (len != 0) + goto bad; + /* + * Now we can update state. + */ + if (f->state != PPP_FSM_OPENED) + *go = try_; + return 1; + +bad: + IPV6CPDEBUG(("ipv6cp_rejci: received bad Reject!")); + return 0; +} + + +/* + * ipv6cp_reqci - Check the peer's requested CIs and send appropriate response. + * + * Returns: CONFACK, CONFNAK or CONFREJ and input packet modified + * appropriately. If reject_if_disagree is non-zero, doesn't return + * CONFNAK; returns CONFREJ if it can't return CONFACK. + * + * inp = Requested CIs + * len = Length of requested CIs + * + */ +static int ipv6cp_reqci(fsm *f, u_char *inp, int *len, int reject_if_disagree) { + ppp_pcb *pcb = f->pcb; + ipv6cp_options *wo = &pcb->ipv6cp_wantoptions; + ipv6cp_options *ho = &pcb->ipv6cp_hisoptions; + ipv6cp_options *ao = &pcb->ipv6cp_allowoptions; + ipv6cp_options *go = &pcb->ipv6cp_gotoptions; + u_char *cip, *next; /* Pointer to current and next CIs */ + u_short cilen, citype; /* Parsed len, type */ +#ifdef IPV6CP_COMP + u_short cishort; /* Parsed short value */ +#endif /* IPV6CP_COMP */ + eui64_t ifaceid; /* Parsed interface identifier */ + int rc = CONFACK; /* Final packet return code */ + int orc; /* Individual option return code */ + u_char *p; /* Pointer to next char to parse */ + u_char *ucp = inp; /* Pointer to current output char */ + int l = *len; /* Length left */ + + /* + * Reset all his options. + */ + BZERO(ho, sizeof(*ho)); + + /* + * Process all his options. + */ + next = inp; + while (l) { + orc = CONFACK; /* Assume success */ + cip = p = next; /* Remember begining of CI */ + if (l < 2 || /* Not enough data for CI header or */ + p[1] < 2 || /* CI length too small or */ + p[1] > l) { /* CI length too big? */ + IPV6CPDEBUG(("ipv6cp_reqci: bad CI length!")); + orc = CONFREJ; /* Reject bad CI */ + cilen = l; /* Reject till end of packet */ + l = 0; /* Don't loop again */ + goto endswitch; + } + GETCHAR(citype, p); /* Parse CI type */ + GETCHAR(cilen, p); /* Parse CI length */ + l -= cilen; /* Adjust remaining length */ + next += cilen; /* Step to next CI */ + + switch (citype) { /* Check CI type */ + case CI_IFACEID: + IPV6CPDEBUG(("ipv6cp: received interface identifier ")); + + if (!ao->neg_ifaceid || + cilen != CILEN_IFACEID) { /* Check CI length */ + orc = CONFREJ; /* Reject CI */ + break; + } + + /* + * If he has no interface identifier, or if we both have same + * identifier then NAK it with new idea. + * In particular, if we don't know his identifier, but he does, + * then accept it. + */ + eui64_get(ifaceid, p); + IPV6CPDEBUG(("(%s)", llv6_ntoa(ifaceid))); + if (eui64_iszero(ifaceid) && eui64_iszero(go->ourid)) { + orc = CONFREJ; /* Reject CI */ + break; + } + if (!eui64_iszero(wo->hisid) && + !eui64_equals(ifaceid, wo->hisid) && + eui64_iszero(go->hisid)) { + + orc = CONFNAK; + ifaceid = wo->hisid; + go->hisid = ifaceid; + DECPTR(sizeof(ifaceid), p); + eui64_put(ifaceid, p); + } else + if (eui64_iszero(ifaceid) || eui64_equals(ifaceid, go->ourid)) { + orc = CONFNAK; + if (eui64_iszero(go->hisid)) /* first time, try option */ + ifaceid = wo->hisid; + while (eui64_iszero(ifaceid) || + eui64_equals(ifaceid, go->ourid)) /* bad luck */ + eui64_magic(ifaceid); + go->hisid = ifaceid; + DECPTR(sizeof(ifaceid), p); + eui64_put(ifaceid, p); + } + + ho->neg_ifaceid = 1; + ho->hisid = ifaceid; + break; + +#ifdef IPV6CP_COMP + case CI_COMPRESSTYPE: + IPV6CPDEBUG(("ipv6cp: received COMPRESSTYPE ")); + if (!ao->neg_vj || + (cilen != CILEN_COMPRESS)) { + orc = CONFREJ; + break; + } + GETSHORT(cishort, p); + IPV6CPDEBUG(("(%d)", cishort)); + + if (!(cishort == IPV6CP_COMP)) { + orc = CONFREJ; + break; + } + + ho->neg_vj = 1; + ho->vj_protocol = cishort; + break; +#endif /* IPV6CP_COMP */ + + default: + orc = CONFREJ; + break; + } + +endswitch: + IPV6CPDEBUG((" (%s)\n", CODENAME(orc))); + + if (orc == CONFACK && /* Good CI */ + rc != CONFACK) /* but prior CI wasnt? */ + continue; /* Don't send this one */ + + if (orc == CONFNAK) { /* Nak this CI? */ + if (reject_if_disagree) /* Getting fed up with sending NAKs? */ + orc = CONFREJ; /* Get tough if so */ + else { + if (rc == CONFREJ) /* Rejecting prior CI? */ + continue; /* Don't send this one */ + if (rc == CONFACK) { /* Ack'd all prior CIs? */ + rc = CONFNAK; /* Not anymore... */ + ucp = inp; /* Backup */ + } + } + } + + if (orc == CONFREJ && /* Reject this CI */ + rc != CONFREJ) { /* but no prior ones? */ + rc = CONFREJ; + ucp = inp; /* Backup */ + } + + /* Need to move CI? */ + if (ucp != cip) + MEMCPY(ucp, cip, cilen); /* Move it */ + + /* Update output pointer */ + INCPTR(cilen, ucp); + } + + /* + * If we aren't rejecting this packet, and we want to negotiate + * their identifier and they didn't send their identifier, then we + * send a NAK with a CI_IFACEID option appended. We assume the + * input buffer is long enough that we can append the extra + * option safely. + */ + if (rc != CONFREJ && !ho->neg_ifaceid && + wo->req_ifaceid && !reject_if_disagree) { + if (rc == CONFACK) { + rc = CONFNAK; + ucp = inp; /* reset pointer */ + wo->req_ifaceid = 0; /* don't ask again */ + } + PUTCHAR(CI_IFACEID, ucp); + PUTCHAR(CILEN_IFACEID, ucp); + eui64_put(wo->hisid, ucp); + } + + *len = ucp - inp; /* Compute output length */ + IPV6CPDEBUG(("ipv6cp: returning Configure-%s", CODENAME(rc))); + return (rc); /* Return final code */ +} + +#if PPP_OPTIONS +/* + * ipv6_check_options - check that any IP-related options are OK, + * and assign appropriate defaults. + */ +static void ipv6_check_options() { + ipv6cp_options *wo = &ipv6cp_wantoptions[0]; + + if (!ipv6cp_protent.enabled_flag) + return; + + /* + * Persistent link-local id is only used when user has not explicitly + * configure/hard-code the id + */ + if ((wo->use_persistent) && (!wo->opt_local) && (!wo->opt_remote)) { + + /* + * On systems where there are no Ethernet interfaces used, there + * may be other ways to obtain a persistent id. Right now, it + * will fall back to using magic [see eui64_magic] below when + * an EUI-48 from MAC address can't be obtained. Other possibilities + * include obtaining EEPROM serial numbers, or some other unique + * yet persistent number. On Sparc platforms, this is possible, + * but too bad there's no standards yet for x86 machines. + */ + if (ether_to_eui64(&wo->ourid)) { + wo->opt_local = 1; + } + } + + if (!wo->opt_local) { /* init interface identifier */ + if (wo->use_ip && eui64_iszero(wo->ourid)) { + eui64_setlo32(wo->ourid, lwip_ntohl(ipcp_wantoptions[0].ouraddr)); + if (!eui64_iszero(wo->ourid)) + wo->opt_local = 1; + } + + while (eui64_iszero(wo->ourid)) + eui64_magic(wo->ourid); + } + + if (!wo->opt_remote) { + if (wo->use_ip && eui64_iszero(wo->hisid)) { + eui64_setlo32(wo->hisid, lwip_ntohl(ipcp_wantoptions[0].hisaddr)); + if (!eui64_iszero(wo->hisid)) + wo->opt_remote = 1; + } + } + + if (demand && (eui64_iszero(wo->ourid) || eui64_iszero(wo->hisid))) { + option_error("local/remote LL address required for demand-dialling\n"); + exit(1); + } +} +#endif /* PPP_OPTIONS */ + +#if DEMAND_SUPPORT +/* + * ipv6_demand_conf - configure the interface as though + * IPV6CP were up, for use with dial-on-demand. + */ +static int ipv6_demand_conf(int u) { + ipv6cp_options *wo = &ipv6cp_wantoptions[u]; + + if (!sif6up(u)) + return 0; + + if (!sif6addr(u, wo->ourid, wo->hisid)) + return 0; + + if (!sifnpmode(u, PPP_IPV6, NPMODE_QUEUE)) + return 0; + + ppp_notice("ipv6_demand_conf"); + ppp_notice("local LL address %s", llv6_ntoa(wo->ourid)); + ppp_notice("remote LL address %s", llv6_ntoa(wo->hisid)); + + return 1; +} +#endif /* DEMAND_SUPPORT */ + + +/* + * ipv6cp_up - IPV6CP has come UP. + * + * Configure the IPv6 network interface appropriately and bring it up. + */ +static void ipv6cp_up(fsm *f) { + ppp_pcb *pcb = f->pcb; + ipv6cp_options *wo = &pcb->ipv6cp_wantoptions; + ipv6cp_options *ho = &pcb->ipv6cp_hisoptions; + ipv6cp_options *go = &pcb->ipv6cp_gotoptions; + + IPV6CPDEBUG(("ipv6cp: up")); + + /* + * We must have a non-zero LL address for both ends of the link. + */ + if (!ho->neg_ifaceid) + ho->hisid = wo->hisid; + +#if 0 /* UNUSED */ + if(!no_ifaceid_neg) { +#endif /* UNUSED */ + if (eui64_iszero(ho->hisid)) { + ppp_error("Could not determine remote LL address"); + ipv6cp_close(f->pcb, "Could not determine remote LL address"); + return; + } + if (eui64_iszero(go->ourid)) { + ppp_error("Could not determine local LL address"); + ipv6cp_close(f->pcb, "Could not determine local LL address"); + return; + } + if (eui64_equals(go->ourid, ho->hisid)) { + ppp_error("local and remote LL addresses are equal"); + ipv6cp_close(f->pcb, "local and remote LL addresses are equal"); + return; + } +#if 0 /* UNUSED */ + } +#endif /* UNUSED */ +#if 0 /* UNUSED */ + script_setenv("LLLOCAL", llv6_ntoa(go->ourid), 0); + script_setenv("LLREMOTE", llv6_ntoa(ho->hisid), 0); +#endif /* UNUSED */ + +#ifdef IPV6CP_COMP + /* set tcp compression */ + sif6comp(f->unit, ho->neg_vj); +#endif + +#if DEMAND_SUPPORT + /* + * If we are doing dial-on-demand, the interface is already + * configured, so we put out any saved-up packets, then set the + * interface to pass IPv6 packets. + */ + if (demand) { + if (! eui64_equals(go->ourid, wo->ourid) || + ! eui64_equals(ho->hisid, wo->hisid)) { + if (! eui64_equals(go->ourid, wo->ourid)) + warn("Local LL address changed to %s", + llv6_ntoa(go->ourid)); + if (! eui64_equals(ho->hisid, wo->hisid)) + warn("Remote LL address changed to %s", + llv6_ntoa(ho->hisid)); + ipv6cp_clear_addrs(f->pcb, go->ourid, ho->hisid); + + /* Set the interface to the new addresses */ + if (!sif6addr(f->pcb, go->ourid, ho->hisid)) { + if (debug) + warn("sif6addr failed"); + ipv6cp_close(f->unit, "Interface configuration failed"); + return; + } + + } + demand_rexmit(PPP_IPV6); + sifnpmode(f->unit, PPP_IPV6, NPMODE_PASS); + + } else +#endif /* DEMAND_SUPPORT */ + { + /* + * Set LL addresses + */ + if (!sif6addr(f->pcb, go->ourid, ho->hisid)) { + PPPDEBUG(LOG_DEBUG, ("sif6addr failed")); + ipv6cp_close(f->pcb, "Interface configuration failed"); + return; + } + + /* bring the interface up for IPv6 */ + if (!sif6up(f->pcb)) { + PPPDEBUG(LOG_DEBUG, ("sif6up failed (IPV6)")); + ipv6cp_close(f->pcb, "Interface configuration failed"); + return; + } +#if DEMAND_SUPPORT + sifnpmode(f->pcb, PPP_IPV6, NPMODE_PASS); +#endif /* DEMAND_SUPPORT */ + + ppp_notice("local LL address %s", llv6_ntoa(go->ourid)); + ppp_notice("remote LL address %s", llv6_ntoa(ho->hisid)); + } + + np_up(f->pcb, PPP_IPV6); + pcb->ipv6cp_is_up = 1; + +#if 0 /* UNUSED */ + /* + * Execute the ipv6-up script, like this: + * /etc/ppp/ipv6-up interface tty speed local-LL remote-LL + */ + if (ipv6cp_script_state == s_down && ipv6cp_script_pid == 0) { + ipv6cp_script_state = s_up; + ipv6cp_script(_PATH_IPV6UP); + } +#endif /* UNUSED */ +} + + +/* + * ipv6cp_down - IPV6CP has gone DOWN. + * + * Take the IPv6 network interface down, clear its addresses + * and delete routes through it. + */ +static void ipv6cp_down(fsm *f) { + ppp_pcb *pcb = f->pcb; + ipv6cp_options *go = &pcb->ipv6cp_gotoptions; + ipv6cp_options *ho = &pcb->ipv6cp_hisoptions; + + IPV6CPDEBUG(("ipv6cp: down")); +#if PPP_STATS_SUPPORT + update_link_stats(f->unit); +#endif /* PPP_STATS_SUPPORT */ + if (pcb->ipv6cp_is_up) { + pcb->ipv6cp_is_up = 0; + np_down(f->pcb, PPP_IPV6); + } +#ifdef IPV6CP_COMP + sif6comp(f->unit, 0); +#endif + +#if DEMAND_SUPPORT + /* + * If we are doing dial-on-demand, set the interface + * to queue up outgoing packets (for now). + */ + if (demand) { + sifnpmode(f->pcb, PPP_IPV6, NPMODE_QUEUE); + } else +#endif /* DEMAND_SUPPORT */ + { +#if DEMAND_SUPPORT + sifnpmode(f->pcb, PPP_IPV6, NPMODE_DROP); +#endif /* DEMAND_SUPPORT */ + ipv6cp_clear_addrs(f->pcb, + go->ourid, + ho->hisid); + sif6down(f->pcb); + } + +#if 0 /* UNUSED */ + /* Execute the ipv6-down script */ + if (ipv6cp_script_state == s_up && ipv6cp_script_pid == 0) { + ipv6cp_script_state = s_down; + ipv6cp_script(_PATH_IPV6DOWN); + } +#endif /* UNUSED */ +} + + +/* + * ipv6cp_clear_addrs() - clear the interface addresses, routes, + * proxy neighbour discovery entries, etc. + */ +static void ipv6cp_clear_addrs(ppp_pcb *pcb, eui64_t ourid, eui64_t hisid) { + cif6addr(pcb, ourid, hisid); +} + + +/* + * ipv6cp_finished - possibly shut down the lower layers. + */ +static void ipv6cp_finished(fsm *f) { + np_finished(f->pcb, PPP_IPV6); +} + + +#if 0 /* UNUSED */ +/* + * ipv6cp_script_done - called when the ipv6-up or ipv6-down script + * has finished. + */ +static void +ipv6cp_script_done(arg) + void *arg; +{ + ipv6cp_script_pid = 0; + switch (ipv6cp_script_state) { + case s_up: + if (ipv6cp_fsm[0].state != PPP_FSM_OPENED) { + ipv6cp_script_state = s_down; + ipv6cp_script(_PATH_IPV6DOWN); + } + break; + case s_down: + if (ipv6cp_fsm[0].state == PPP_FSM_OPENED) { + ipv6cp_script_state = s_up; + ipv6cp_script(_PATH_IPV6UP); + } + break; + } +} + + +/* + * ipv6cp_script - Execute a script with arguments + * interface-name tty-name speed local-LL remote-LL. + */ +static void +ipv6cp_script(script) + char *script; +{ + char strspeed[32], strlocal[32], strremote[32]; + char *argv[8]; + + sprintf(strspeed, "%d", baud_rate); + strcpy(strlocal, llv6_ntoa(ipv6cp_gotoptions[0].ourid)); + strcpy(strremote, llv6_ntoa(ipv6cp_hisoptions[0].hisid)); + + argv[0] = script; + argv[1] = ifname; + argv[2] = devnam; + argv[3] = strspeed; + argv[4] = strlocal; + argv[5] = strremote; + argv[6] = ipparam; + argv[7] = NULL; + + ipv6cp_script_pid = run_program(script, argv, 0, ipv6cp_script_done, + NULL, 0); +} +#endif /* UNUSED */ + +#if PRINTPKT_SUPPORT +/* + * ipv6cp_printpkt - print the contents of an IPV6CP packet. + */ +static const char* const ipv6cp_codenames[] = { + "ConfReq", "ConfAck", "ConfNak", "ConfRej", + "TermReq", "TermAck", "CodeRej" +}; + +static int ipv6cp_printpkt(const u_char *p, int plen, + void (*printer)(void *, const char *, ...), void *arg) { + int code, id, len, olen; + const u_char *pstart, *optend; +#ifdef IPV6CP_COMP + u_short cishort; +#endif /* IPV6CP_COMP */ + eui64_t ifaceid; + + if (plen < HEADERLEN) + return 0; + pstart = p; + GETCHAR(code, p); + GETCHAR(id, p); + GETSHORT(len, p); + if (len < HEADERLEN || len > plen) + return 0; + + if (code >= 1 && code <= (int)LWIP_ARRAYSIZE(ipv6cp_codenames)) + printer(arg, " %s", ipv6cp_codenames[code-1]); + else + printer(arg, " code=0x%x", code); + printer(arg, " id=0x%x", id); + len -= HEADERLEN; + switch (code) { + case CONFREQ: + case CONFACK: + case CONFNAK: + case CONFREJ: + /* print option list */ + while (len >= 2) { + GETCHAR(code, p); + GETCHAR(olen, p); + p -= 2; + if (olen < 2 || olen > len) { + break; + } + printer(arg, " <"); + len -= olen; + optend = p + olen; + switch (code) { +#ifdef IPV6CP_COMP + case CI_COMPRESSTYPE: + if (olen >= CILEN_COMPRESS) { + p += 2; + GETSHORT(cishort, p); + printer(arg, "compress "); + printer(arg, "0x%x", cishort); + } + break; +#endif /* IPV6CP_COMP */ + case CI_IFACEID: + if (olen == CILEN_IFACEID) { + p += 2; + eui64_get(ifaceid, p); + printer(arg, "addr %s", llv6_ntoa(ifaceid)); + } + break; + default: + break; + } + while (p < optend) { + GETCHAR(code, p); + printer(arg, " %.2x", code); + } + printer(arg, ">"); + } + break; + + case TERMACK: + case TERMREQ: + if (len > 0 && *p >= ' ' && *p < 0x7f) { + printer(arg, " "); + ppp_print_string(p, len, printer, arg); + p += len; + len = 0; + } + break; + default: + break; + } + + /* print the rest of the bytes in the packet */ + for (; len > 0; --len) { + GETCHAR(code, p); + printer(arg, " %.2x", code); + } + + return p - pstart; +} +#endif /* PRINTPKT_SUPPORT */ + +#if DEMAND_SUPPORT +/* + * ipv6_active_pkt - see if this IP packet is worth bringing the link up for. + * We don't bring the link up for IP fragments or for TCP FIN packets + * with no data. + */ +#define IP6_HDRLEN 40 /* bytes */ +#define IP6_NHDR_FRAG 44 /* fragment IPv6 header */ +#define TCP_HDRLEN 20 +#define TH_FIN 0x01 + +/* + * We use these macros because the IP header may be at an odd address, + * and some compilers might use word loads to get th_off or ip_hl. + */ + +#define get_ip6nh(x) (((unsigned char *)(x))[6]) +#define get_tcpoff(x) (((unsigned char *)(x))[12] >> 4) +#define get_tcpflags(x) (((unsigned char *)(x))[13]) + +static int ipv6_active_pkt(u_char *pkt, int len) { + u_char *tcp; + + len -= PPP_HDRLEN; + pkt += PPP_HDRLEN; + if (len < IP6_HDRLEN) + return 0; + if (get_ip6nh(pkt) == IP6_NHDR_FRAG) + return 0; + if (get_ip6nh(pkt) != IPPROTO_TCP) + return 1; + if (len < IP6_HDRLEN + TCP_HDRLEN) + return 0; + tcp = pkt + IP6_HDRLEN; + if ((get_tcpflags(tcp) & TH_FIN) != 0 && len == IP6_HDRLEN + get_tcpoff(tcp) * 4) + return 0; + return 1; +} +#endif /* DEMAND_SUPPORT */ + +#endif /* PPP_SUPPORT && PPP_IPV6_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/lcp.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/lcp.c new file mode 100644 index 0000000..90ed183 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/lcp.c @@ -0,0 +1,2790 @@ +/* + * lcp.c - PPP Link Control Protocol. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +/* + * @todo: + */ + +#if 0 /* UNUSED */ +#include +#include +#include +#endif /* UNUSED */ + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/fsm.h" +#include "netif/ppp/lcp.h" +#if CHAP_SUPPORT +#include "netif/ppp/chap-new.h" +#endif /* CHAP_SUPPORT */ +#include "netif/ppp/magic.h" + +/* + * When the link comes up we want to be able to wait for a short while, + * or until seeing some input from the peer, before starting to send + * configure-requests. We do this by delaying the fsm_lowerup call. + */ +/* steal a bit in fsm flags word */ +#define DELAYED_UP 0x80 + +static void lcp_delayed_up(void *arg); + +/* + * LCP-related command-line options. + */ +#if 0 /* UNUSED */ +int lcp_echo_interval = 0; /* Interval between LCP echo-requests */ +int lcp_echo_fails = 0; /* Tolerance to unanswered echo-requests */ +#endif /* UNUSED */ + +#if 0 /* UNUSED */ +/* options */ +static u_int lcp_echo_interval = LCP_ECHOINTERVAL; /* Interval between LCP echo-requests */ +static u_int lcp_echo_fails = LCP_MAXECHOFAILS; /* Tolerance to unanswered echo-requests */ +#endif /* UNUSED */ + +#if 0 /* UNUSED */ +#if PPP_LCP_ADAPTIVE +bool lcp_echo_adaptive = 0; /* request echo only if the link was idle */ +#endif +bool lax_recv = 0; /* accept control chars in asyncmap */ +bool noendpoint = 0; /* don't send/accept endpoint discriminator */ +#endif /* UNUSED */ + +#if PPP_OPTIONS +static int noopt (char **); +#endif /* PPP_OPTIONS */ + +#ifdef HAVE_MULTILINK +static int setendpoint (char **); +static void printendpoint (option_t *, void (*)(void *, char *, ...), + void *); +#endif /* HAVE_MULTILINK */ + +#if PPP_OPTIONS +static option_t lcp_option_list[] = { + /* LCP options */ + { "-all", o_special_noarg, (void *)noopt, + "Don't request/allow any LCP options" }, + + { "noaccomp", o_bool, &lcp_wantoptions[0].neg_accompression, + "Disable address/control compression", + OPT_A2CLR, &lcp_allowoptions[0].neg_accompression }, + { "-ac", o_bool, &lcp_wantoptions[0].neg_accompression, + "Disable address/control compression", + OPT_ALIAS | OPT_A2CLR, &lcp_allowoptions[0].neg_accompression }, + + { "asyncmap", o_uint32, &lcp_wantoptions[0].asyncmap, + "Set asyncmap (for received packets)", + OPT_OR, &lcp_wantoptions[0].neg_asyncmap }, + { "-as", o_uint32, &lcp_wantoptions[0].asyncmap, + "Set asyncmap (for received packets)", + OPT_ALIAS | OPT_OR, &lcp_wantoptions[0].neg_asyncmap }, + { "default-asyncmap", o_uint32, &lcp_wantoptions[0].asyncmap, + "Disable asyncmap negotiation", + OPT_OR | OPT_NOARG | OPT_VAL(~0U) | OPT_A2CLR, + &lcp_allowoptions[0].neg_asyncmap }, + { "-am", o_uint32, &lcp_wantoptions[0].asyncmap, + "Disable asyncmap negotiation", + OPT_ALIAS | OPT_OR | OPT_NOARG | OPT_VAL(~0U) | OPT_A2CLR, + &lcp_allowoptions[0].neg_asyncmap }, + + { "nomagic", o_bool, &lcp_wantoptions[0].neg_magicnumber, + "Disable magic number negotiation (looped-back line detection)", + OPT_A2CLR, &lcp_allowoptions[0].neg_magicnumber }, + { "-mn", o_bool, &lcp_wantoptions[0].neg_magicnumber, + "Disable magic number negotiation (looped-back line detection)", + OPT_ALIAS | OPT_A2CLR, &lcp_allowoptions[0].neg_magicnumber }, + + { "mru", o_int, &lcp_wantoptions[0].mru, + "Set MRU (maximum received packet size) for negotiation", + OPT_PRIO, &lcp_wantoptions[0].neg_mru }, + { "default-mru", o_bool, &lcp_wantoptions[0].neg_mru, + "Disable MRU negotiation (use default 1500)", + OPT_PRIOSUB | OPT_A2CLR, &lcp_allowoptions[0].neg_mru }, + { "-mru", o_bool, &lcp_wantoptions[0].neg_mru, + "Disable MRU negotiation (use default 1500)", + OPT_ALIAS | OPT_PRIOSUB | OPT_A2CLR, &lcp_allowoptions[0].neg_mru }, + + { "mtu", o_int, &lcp_allowoptions[0].mru, + "Set our MTU", OPT_LIMITS, NULL, MAXMRU, MINMRU }, + + { "nopcomp", o_bool, &lcp_wantoptions[0].neg_pcompression, + "Disable protocol field compression", + OPT_A2CLR, &lcp_allowoptions[0].neg_pcompression }, + { "-pc", o_bool, &lcp_wantoptions[0].neg_pcompression, + "Disable protocol field compression", + OPT_ALIAS | OPT_A2CLR, &lcp_allowoptions[0].neg_pcompression }, + + { "passive", o_bool, &lcp_wantoptions[0].passive, + "Set passive mode", 1 }, + { "-p", o_bool, &lcp_wantoptions[0].passive, + "Set passive mode", OPT_ALIAS | 1 }, + + { "silent", o_bool, &lcp_wantoptions[0].silent, + "Set silent mode", 1 }, + + { "lcp-echo-failure", o_int, &lcp_echo_fails, + "Set number of consecutive echo failures to indicate link failure", + OPT_PRIO }, + { "lcp-echo-interval", o_int, &lcp_echo_interval, + "Set time in seconds between LCP echo requests", OPT_PRIO }, +#if PPP_LCP_ADAPTIVE + { "lcp-echo-adaptive", o_bool, &lcp_echo_adaptive, + "Suppress LCP echo requests if traffic was received", 1 }, +#endif + { "lcp-restart", o_int, &lcp_fsm[0].timeouttime, + "Set time in seconds between LCP retransmissions", OPT_PRIO }, + { "lcp-max-terminate", o_int, &lcp_fsm[0].maxtermtransmits, + "Set maximum number of LCP terminate-request transmissions", OPT_PRIO }, + { "lcp-max-configure", o_int, &lcp_fsm[0].maxconfreqtransmits, + "Set maximum number of LCP configure-request transmissions", OPT_PRIO }, + { "lcp-max-failure", o_int, &lcp_fsm[0].maxnakloops, + "Set limit on number of LCP configure-naks", OPT_PRIO }, + + { "receive-all", o_bool, &lax_recv, + "Accept all received control characters", 1 }, + +#ifdef HAVE_MULTILINK + { "mrru", o_int, &lcp_wantoptions[0].mrru, + "Maximum received packet size for multilink bundle", + OPT_PRIO, &lcp_wantoptions[0].neg_mrru }, + + { "mpshortseq", o_bool, &lcp_wantoptions[0].neg_ssnhf, + "Use short sequence numbers in multilink headers", + OPT_PRIO | 1, &lcp_allowoptions[0].neg_ssnhf }, + { "nompshortseq", o_bool, &lcp_wantoptions[0].neg_ssnhf, + "Don't use short sequence numbers in multilink headers", + OPT_PRIOSUB | OPT_A2CLR, &lcp_allowoptions[0].neg_ssnhf }, + + { "endpoint", o_special, (void *) setendpoint, + "Endpoint discriminator for multilink", + OPT_PRIO | OPT_A2PRINTER, (void *) printendpoint }, +#endif /* HAVE_MULTILINK */ + + { "noendpoint", o_bool, &noendpoint, + "Don't send or accept multilink endpoint discriminator", 1 }, + + {NULL} +}; +#endif /* PPP_OPTIONS */ + +/* + * Callbacks for fsm code. (CI = Configuration Information) + */ +static void lcp_resetci(fsm *f); /* Reset our CI */ +static int lcp_cilen(fsm *f); /* Return length of our CI */ +static void lcp_addci(fsm *f, u_char *ucp, int *lenp); /* Add our CI to pkt */ +static int lcp_ackci(fsm *f, u_char *p, int len); /* Peer ack'd our CI */ +static int lcp_nakci(fsm *f, u_char *p, int len, int treat_as_reject); /* Peer nak'd our CI */ +static int lcp_rejci(fsm *f, u_char *p, int len); /* Peer rej'd our CI */ +static int lcp_reqci(fsm *f, u_char *inp, int *lenp, int reject_if_disagree); /* Rcv peer CI */ +static void lcp_up(fsm *f); /* We're UP */ +static void lcp_down(fsm *f); /* We're DOWN */ +static void lcp_starting (fsm *); /* We need lower layer up */ +static void lcp_finished (fsm *); /* We need lower layer down */ +static int lcp_extcode(fsm *f, int code, int id, u_char *inp, int len); +static void lcp_rprotrej(fsm *f, u_char *inp, int len); + +/* + * routines to send LCP echos to peer + */ + +static void lcp_echo_lowerup(ppp_pcb *pcb); +static void lcp_echo_lowerdown(ppp_pcb *pcb); +static void LcpEchoTimeout(void *arg); +static void lcp_received_echo_reply(fsm *f, int id, u_char *inp, int len); +static void LcpSendEchoRequest(fsm *f); +static void LcpLinkFailure(fsm *f); +static void LcpEchoCheck(fsm *f); + +static const fsm_callbacks lcp_callbacks = { /* LCP callback routines */ + lcp_resetci, /* Reset our Configuration Information */ + lcp_cilen, /* Length of our Configuration Information */ + lcp_addci, /* Add our Configuration Information */ + lcp_ackci, /* ACK our Configuration Information */ + lcp_nakci, /* NAK our Configuration Information */ + lcp_rejci, /* Reject our Configuration Information */ + lcp_reqci, /* Request peer's Configuration Information */ + lcp_up, /* Called when fsm reaches OPENED state */ + lcp_down, /* Called when fsm leaves OPENED state */ + lcp_starting, /* Called when we want the lower layer up */ + lcp_finished, /* Called when we want the lower layer down */ + NULL, /* Called when Protocol-Reject received */ + NULL, /* Retransmission is necessary */ + lcp_extcode, /* Called to handle LCP-specific codes */ + "LCP" /* String name of protocol */ +}; + +/* + * Protocol entry points. + * Some of these are called directly. + */ + +static void lcp_init(ppp_pcb *pcb); +static void lcp_input(ppp_pcb *pcb, u_char *p, int len); +static void lcp_protrej(ppp_pcb *pcb); +#if PRINTPKT_SUPPORT +static int lcp_printpkt(const u_char *p, int plen, + void (*printer) (void *, const char *, ...), void *arg); +#endif /* PRINTPKT_SUPPORT */ + +const struct protent lcp_protent = { + PPP_LCP, + lcp_init, + lcp_input, + lcp_protrej, + lcp_lowerup, + lcp_lowerdown, + lcp_open, + lcp_close, +#if PRINTPKT_SUPPORT + lcp_printpkt, +#endif /* PRINTPKT_SUPPORT */ +#if PPP_DATAINPUT + NULL, +#endif /* PPP_DATAINPUT */ +#if PRINTPKT_SUPPORT + "LCP", + NULL, +#endif /* PRINTPKT_SUPPORT */ +#if PPP_OPTIONS + lcp_option_list, + NULL, +#endif /* PPP_OPTIONS */ +#if DEMAND_SUPPORT + NULL, + NULL +#endif /* DEMAND_SUPPORT */ +}; + +/* + * Length of each type of configuration option (in octets) + */ +#define CILEN_VOID 2 +#define CILEN_CHAR 3 +#define CILEN_SHORT 4 /* CILEN_VOID + 2 */ +#if CHAP_SUPPORT +#define CILEN_CHAP 5 /* CILEN_VOID + 2 + 1 */ +#endif /* CHAP_SUPPORT */ +#define CILEN_LONG 6 /* CILEN_VOID + 4 */ +#if LQR_SUPPORT +#define CILEN_LQR 8 /* CILEN_VOID + 2 + 4 */ +#endif /* LQR_SUPPORT */ +#define CILEN_CBCP 3 + +#define CODENAME(x) ((x) == CONFACK ? "ACK" : \ + (x) == CONFNAK ? "NAK" : "REJ") + +#if PPP_OPTIONS +/* + * noopt - Disable all options (why?). + */ +static int +noopt(argv) + char **argv; +{ + BZERO((char *) &lcp_wantoptions[0], sizeof (struct lcp_options)); + BZERO((char *) &lcp_allowoptions[0], sizeof (struct lcp_options)); + + return (1); +} +#endif /* PPP_OPTIONS */ + +#ifdef HAVE_MULTILINK +static int +setendpoint(argv) + char **argv; +{ + if (str_to_epdisc(&lcp_wantoptions[0].endpoint, *argv)) { + lcp_wantoptions[0].neg_endpoint = 1; + return 1; + } + option_error("Can't parse '%s' as an endpoint discriminator", *argv); + return 0; +} + +static void +printendpoint(opt, printer, arg) + option_t *opt; + void (*printer) (void *, char *, ...); + void *arg; +{ + printer(arg, "%s", epdisc_to_str(&lcp_wantoptions[0].endpoint)); +} +#endif /* HAVE_MULTILINK */ + +/* + * lcp_init - Initialize LCP. + */ +static void lcp_init(ppp_pcb *pcb) { + fsm *f = &pcb->lcp_fsm; + lcp_options *wo = &pcb->lcp_wantoptions; + lcp_options *ao = &pcb->lcp_allowoptions; + + f->pcb = pcb; + f->protocol = PPP_LCP; + f->callbacks = &lcp_callbacks; + + fsm_init(f); + + BZERO(wo, sizeof(*wo)); + wo->neg_mru = 1; + wo->mru = PPP_DEFMRU; + wo->neg_asyncmap = 1; + wo->neg_magicnumber = 1; + wo->neg_pcompression = 1; + wo->neg_accompression = 1; + + BZERO(ao, sizeof(*ao)); + ao->neg_mru = 1; + ao->mru = PPP_MAXMRU; + ao->neg_asyncmap = 1; +#if CHAP_SUPPORT + ao->neg_chap = 1; + ao->chap_mdtype = CHAP_MDTYPE_SUPPORTED; +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + ao->neg_upap = 1; +#endif /* PAP_SUPPORT */ +#if EAP_SUPPORT + ao->neg_eap = 1; +#endif /* EAP_SUPPORT */ + ao->neg_magicnumber = 1; + ao->neg_pcompression = 1; + ao->neg_accompression = 1; + ao->neg_endpoint = 1; +} + + +/* + * lcp_open - LCP is allowed to come up. + */ +void lcp_open(ppp_pcb *pcb) { + fsm *f = &pcb->lcp_fsm; + lcp_options *wo = &pcb->lcp_wantoptions; + + f->flags &= ~(OPT_PASSIVE | OPT_SILENT); + if (wo->passive) + f->flags |= OPT_PASSIVE; + if (wo->silent) + f->flags |= OPT_SILENT; + fsm_open(f); +} + + +/* + * lcp_close - Take LCP down. + */ +void lcp_close(ppp_pcb *pcb, const char *reason) { + fsm *f = &pcb->lcp_fsm; + int oldstate; + + if (pcb->phase != PPP_PHASE_DEAD +#ifdef HAVE_MULTILINK + && pcb->phase != PPP_PHASE_MASTER +#endif /* HAVE_MULTILINK */ + ) + new_phase(pcb, PPP_PHASE_TERMINATE); + + if (f->flags & DELAYED_UP) { + UNTIMEOUT(lcp_delayed_up, f); + f->state = PPP_FSM_STOPPED; + } + oldstate = f->state; + + fsm_close(f, reason); + if (oldstate == PPP_FSM_STOPPED && (f->flags & (OPT_PASSIVE|OPT_SILENT|DELAYED_UP))) { + /* + * This action is not strictly according to the FSM in RFC1548, + * but it does mean that the program terminates if you do a + * lcp_close() when a connection hasn't been established + * because we are in passive/silent mode or because we have + * delayed the fsm_lowerup() call and it hasn't happened yet. + */ + f->flags &= ~DELAYED_UP; + lcp_finished(f); + } +} + + +/* + * lcp_lowerup - The lower layer is up. + */ +void lcp_lowerup(ppp_pcb *pcb) { + lcp_options *wo = &pcb->lcp_wantoptions; + fsm *f = &pcb->lcp_fsm; + /* + * Don't use A/C or protocol compression on transmission, + * but accept A/C and protocol compressed packets + * if we are going to ask for A/C and protocol compression. + */ + if (ppp_send_config(pcb, PPP_MRU, 0xffffffff, 0, 0) < 0 + || ppp_recv_config(pcb, PPP_MRU, (pcb->settings.lax_recv? 0: 0xffffffff), + wo->neg_pcompression, wo->neg_accompression) < 0) + return; + pcb->peer_mru = PPP_MRU; + + if (pcb->settings.listen_time != 0) { + f->flags |= DELAYED_UP; + TIMEOUTMS(lcp_delayed_up, f, pcb->settings.listen_time); + } else + fsm_lowerup(f); +} + + +/* + * lcp_lowerdown - The lower layer is down. + */ +void lcp_lowerdown(ppp_pcb *pcb) { + fsm *f = &pcb->lcp_fsm; + + if (f->flags & DELAYED_UP) { + f->flags &= ~DELAYED_UP; + UNTIMEOUT(lcp_delayed_up, f); + } else + fsm_lowerdown(f); +} + + +/* + * lcp_delayed_up - Bring the lower layer up now. + */ +static void lcp_delayed_up(void *arg) { + fsm *f = (fsm*)arg; + + if (f->flags & DELAYED_UP) { + f->flags &= ~DELAYED_UP; + fsm_lowerup(f); + } +} + + +/* + * lcp_input - Input LCP packet. + */ +static void lcp_input(ppp_pcb *pcb, u_char *p, int len) { + fsm *f = &pcb->lcp_fsm; + + if (f->flags & DELAYED_UP) { + f->flags &= ~DELAYED_UP; + UNTIMEOUT(lcp_delayed_up, f); + fsm_lowerup(f); + } + fsm_input(f, p, len); +} + +/* + * lcp_extcode - Handle a LCP-specific code. + */ +static int lcp_extcode(fsm *f, int code, int id, u_char *inp, int len) { + ppp_pcb *pcb = f->pcb; + lcp_options *go = &pcb->lcp_gotoptions; + u_char *magp; + + switch( code ){ + case PROTREJ: + lcp_rprotrej(f, inp, len); + break; + + case ECHOREQ: + if (f->state != PPP_FSM_OPENED) + break; + magp = inp; + PUTLONG(go->magicnumber, magp); + fsm_sdata(f, ECHOREP, id, inp, len); + break; + + case ECHOREP: + lcp_received_echo_reply(f, id, inp, len); + break; + + case DISCREQ: + case IDENTIF: + case TIMEREM: + break; + + default: + return 0; + } + return 1; +} + + +/* + * lcp_rprotrej - Receive an Protocol-Reject. + * + * Figure out which protocol is rejected and inform it. + */ +static void lcp_rprotrej(fsm *f, u_char *inp, int len) { + int i; + const struct protent *protp; + u_short prot; +#if PPP_PROTOCOLNAME + const char *pname; +#endif /* PPP_PROTOCOLNAME */ + + if (len < 2) { + LCPDEBUG(("lcp_rprotrej: Rcvd short Protocol-Reject packet!")); + return; + } + + GETSHORT(prot, inp); + + /* + * Protocol-Reject packets received in any state other than the LCP + * OPENED state SHOULD be silently discarded. + */ + if( f->state != PPP_FSM_OPENED ){ + LCPDEBUG(("Protocol-Reject discarded: LCP in state %d", f->state)); + return; + } + +#if PPP_PROTOCOLNAME + pname = protocol_name(prot); +#endif /* PPP_PROTOCOLNAME */ + + /* + * Upcall the proper Protocol-Reject routine. + */ + for (i = 0; (protp = protocols[i]) != NULL; ++i) + if (protp->protocol == prot) { +#if PPP_PROTOCOLNAME + if (pname != NULL) + ppp_dbglog("Protocol-Reject for '%s' (0x%x) received", pname, + prot); + else +#endif /* PPP_PROTOCOLNAME */ + ppp_dbglog("Protocol-Reject for 0x%x received", prot); + (*protp->protrej)(f->pcb); + return; + } + +#if PPP_PROTOCOLNAME + if (pname != NULL) + ppp_warn("Protocol-Reject for unsupported protocol '%s' (0x%x)", pname, + prot); + else +#endif /* #if PPP_PROTOCOLNAME */ + ppp_warn("Protocol-Reject for unsupported protocol 0x%x", prot); +} + + +/* + * lcp_protrej - A Protocol-Reject was received. + */ +/*ARGSUSED*/ +static void lcp_protrej(ppp_pcb *pcb) { + /* + * Can't reject LCP! + */ + ppp_error("Received Protocol-Reject for LCP!"); + fsm_protreject(&pcb->lcp_fsm); +} + + +/* + * lcp_sprotrej - Send a Protocol-Reject for some protocol. + */ +void lcp_sprotrej(ppp_pcb *pcb, u_char *p, int len) { + fsm *f = &pcb->lcp_fsm; + /* + * Send back the protocol and the information field of the + * rejected packet. We only get here if LCP is in the OPENED state. + */ +#if 0 + p += 2; + len -= 2; +#endif + + fsm_sdata(f, PROTREJ, ++f->id, + p, len); +} + + +/* + * lcp_resetci - Reset our CI. + */ +static void lcp_resetci(fsm *f) { + ppp_pcb *pcb = f->pcb; + lcp_options *wo = &pcb->lcp_wantoptions; + lcp_options *go = &pcb->lcp_gotoptions; + lcp_options *ao = &pcb->lcp_allowoptions; + +#if PPP_AUTH_SUPPORT + + /* note: default value is true for allow options */ + if (pcb->settings.user && pcb->settings.passwd) { +#if PAP_SUPPORT + if (pcb->settings.refuse_pap) { + ao->neg_upap = 0; + } +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + if (pcb->settings.refuse_chap) { + ao->chap_mdtype &= ~MDTYPE_MD5; + } +#if MSCHAP_SUPPORT + if (pcb->settings.refuse_mschap) { + ao->chap_mdtype &= ~MDTYPE_MICROSOFT; + } + if (pcb->settings.refuse_mschap_v2) { + ao->chap_mdtype &= ~MDTYPE_MICROSOFT_V2; + } +#endif /* MSCHAP_SUPPORT */ + ao->neg_chap = (ao->chap_mdtype != MDTYPE_NONE); +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + if (pcb->settings.refuse_eap) { + ao->neg_eap = 0; + } +#endif /* EAP_SUPPORT */ + +#if PPP_SERVER + /* note: default value is false for wanted options */ + if (pcb->settings.auth_required) { +#if PAP_SUPPORT + if (!pcb->settings.refuse_pap) { + wo->neg_upap = 1; + } +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + if (!pcb->settings.refuse_chap) { + wo->chap_mdtype |= MDTYPE_MD5; + } +#if MSCHAP_SUPPORT + if (!pcb->settings.refuse_mschap) { + wo->chap_mdtype |= MDTYPE_MICROSOFT; + } + if (!pcb->settings.refuse_mschap_v2) { + wo->chap_mdtype |= MDTYPE_MICROSOFT_V2; + } +#endif /* MSCHAP_SUPPORT */ + wo->neg_chap = (wo->chap_mdtype != MDTYPE_NONE); +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + if (!pcb->settings.refuse_eap) { + wo->neg_eap = 1; + } +#endif /* EAP_SUPPORT */ + } +#endif /* PPP_SERVER */ + + } else { +#if PAP_SUPPORT + ao->neg_upap = 0; +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + ao->neg_chap = 0; + ao->chap_mdtype = MDTYPE_NONE; +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + ao->neg_eap = 0; +#endif /* EAP_SUPPORT */ + } + + PPPDEBUG(LOG_DEBUG, ("ppp: auth protocols:")); +#if PAP_SUPPORT + PPPDEBUG(LOG_DEBUG, (" PAP=%d", ao->neg_upap)); +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + PPPDEBUG(LOG_DEBUG, (" CHAP=%d CHAP_MD5=%d", ao->neg_chap, !!(ao->chap_mdtype&MDTYPE_MD5))); +#if MSCHAP_SUPPORT + PPPDEBUG(LOG_DEBUG, (" CHAP_MS=%d CHAP_MS2=%d", !!(ao->chap_mdtype&MDTYPE_MICROSOFT), !!(ao->chap_mdtype&MDTYPE_MICROSOFT_V2))); +#endif /* MSCHAP_SUPPORT */ +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + PPPDEBUG(LOG_DEBUG, (" EAP=%d", ao->neg_eap)); +#endif /* EAP_SUPPORT */ + PPPDEBUG(LOG_DEBUG, ("\n")); + +#endif /* PPP_AUTH_SUPPORT */ + + wo->magicnumber = magic(); + wo->numloops = 0; + *go = *wo; +#ifdef HAVE_MULTILINK + if (!multilink) { + go->neg_mrru = 0; +#endif /* HAVE_MULTILINK */ + go->neg_ssnhf = 0; + go->neg_endpoint = 0; +#ifdef HAVE_MULTILINK + } +#endif /* HAVE_MULTILINK */ + if (pcb->settings.noendpoint) + ao->neg_endpoint = 0; + pcb->peer_mru = PPP_MRU; +#if 0 /* UNUSED */ + auth_reset(pcb); +#endif /* UNUSED */ +} + + +/* + * lcp_cilen - Return length of our CI. + */ +static int lcp_cilen(fsm *f) { + ppp_pcb *pcb = f->pcb; + lcp_options *go = &pcb->lcp_gotoptions; + +#define LENCIVOID(neg) ((neg) ? CILEN_VOID : 0) +#if CHAP_SUPPORT +#define LENCICHAP(neg) ((neg) ? CILEN_CHAP : 0) +#endif /* CHAP_SUPPORT */ +#define LENCISHORT(neg) ((neg) ? CILEN_SHORT : 0) +#define LENCILONG(neg) ((neg) ? CILEN_LONG : 0) +#if LQR_SUPPORT +#define LENCILQR(neg) ((neg) ? CILEN_LQR: 0) +#endif /* LQR_SUPPORT */ +#define LENCICBCP(neg) ((neg) ? CILEN_CBCP: 0) + /* + * NB: we only ask for one of CHAP, UPAP, or EAP, even if we will + * accept more than one. We prefer EAP first, then CHAP, then + * PAP. + */ + return (LENCISHORT(go->neg_mru && go->mru != PPP_DEFMRU) + + LENCILONG(go->neg_asyncmap && go->asyncmap != 0xFFFFFFFF) + +#if EAP_SUPPORT + LENCISHORT(go->neg_eap) + +#endif /* EAP_SUPPORT */ +#if CHAP_SUPPORT /* cannot be improved, embedding a directive within macro arguments is not portable */ +#if EAP_SUPPORT + LENCICHAP(!go->neg_eap && go->neg_chap) + +#endif /* EAP_SUPPORT */ +#if !EAP_SUPPORT + LENCICHAP(go->neg_chap) + +#endif /* !EAP_SUPPORT */ +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT /* cannot be improved, embedding a directive within macro arguments is not portable */ +#if EAP_SUPPORT && CHAP_SUPPORT + LENCISHORT(!go->neg_eap && !go->neg_chap && go->neg_upap) + +#endif /* EAP_SUPPORT && CHAP_SUPPORT */ +#if EAP_SUPPORT && !CHAP_SUPPORT + LENCISHORT(!go->neg_eap && go->neg_upap) + +#endif /* EAP_SUPPORT && !CHAP_SUPPORT */ +#if !EAP_SUPPORT && CHAP_SUPPORT + LENCISHORT(!go->neg_chap && go->neg_upap) + +#endif /* !EAP_SUPPORT && CHAP_SUPPORT */ +#if !EAP_SUPPORT && !CHAP_SUPPORT + LENCISHORT(go->neg_upap) + +#endif /* !EAP_SUPPORT && !CHAP_SUPPORT */ +#endif /* PAP_SUPPORT */ +#if LQR_SUPPORT + LENCILQR(go->neg_lqr) + +#endif /* LQR_SUPPORT */ + LENCICBCP(go->neg_cbcp) + + LENCILONG(go->neg_magicnumber) + + LENCIVOID(go->neg_pcompression) + + LENCIVOID(go->neg_accompression) + +#ifdef HAVE_MULTILINK + LENCISHORT(go->neg_mrru) + +#endif /* HAVE_MULTILINK */ + LENCIVOID(go->neg_ssnhf) + + (go->neg_endpoint? CILEN_CHAR + go->endpoint.length: 0)); +} + + +/* + * lcp_addci - Add our desired CIs to a packet. + */ +static void lcp_addci(fsm *f, u_char *ucp, int *lenp) { + ppp_pcb *pcb = f->pcb; + lcp_options *go = &pcb->lcp_gotoptions; + u_char *start_ucp = ucp; + +#define ADDCIVOID(opt, neg) \ + if (neg) { \ + PUTCHAR(opt, ucp); \ + PUTCHAR(CILEN_VOID, ucp); \ + } +#define ADDCISHORT(opt, neg, val) \ + if (neg) { \ + PUTCHAR(opt, ucp); \ + PUTCHAR(CILEN_SHORT, ucp); \ + PUTSHORT(val, ucp); \ + } +#if CHAP_SUPPORT +#define ADDCICHAP(opt, neg, val) \ + if (neg) { \ + PUTCHAR((opt), ucp); \ + PUTCHAR(CILEN_CHAP, ucp); \ + PUTSHORT(PPP_CHAP, ucp); \ + PUTCHAR((CHAP_DIGEST(val)), ucp); \ + } +#endif /* CHAP_SUPPORT */ +#define ADDCILONG(opt, neg, val) \ + if (neg) { \ + PUTCHAR(opt, ucp); \ + PUTCHAR(CILEN_LONG, ucp); \ + PUTLONG(val, ucp); \ + } +#if LQR_SUPPORT +#define ADDCILQR(opt, neg, val) \ + if (neg) { \ + PUTCHAR(opt, ucp); \ + PUTCHAR(CILEN_LQR, ucp); \ + PUTSHORT(PPP_LQR, ucp); \ + PUTLONG(val, ucp); \ + } +#endif /* LQR_SUPPORT */ +#define ADDCICHAR(opt, neg, val) \ + if (neg) { \ + PUTCHAR(opt, ucp); \ + PUTCHAR(CILEN_CHAR, ucp); \ + PUTCHAR(val, ucp); \ + } +#define ADDCIENDP(opt, neg, class, val, len) \ + if (neg) { \ + int i; \ + PUTCHAR(opt, ucp); \ + PUTCHAR(CILEN_CHAR + len, ucp); \ + PUTCHAR(class, ucp); \ + for (i = 0; i < len; ++i) \ + PUTCHAR(val[i], ucp); \ + } + + ADDCISHORT(CI_MRU, go->neg_mru && go->mru != PPP_DEFMRU, go->mru); + ADDCILONG(CI_ASYNCMAP, go->neg_asyncmap && go->asyncmap != 0xFFFFFFFF, + go->asyncmap); +#if EAP_SUPPORT + ADDCISHORT(CI_AUTHTYPE, go->neg_eap, PPP_EAP); +#endif /* EAP_SUPPORT */ +#if CHAP_SUPPORT /* cannot be improved, embedding a directive within macro arguments is not portable */ +#if EAP_SUPPORT + ADDCICHAP(CI_AUTHTYPE, !go->neg_eap && go->neg_chap, go->chap_mdtype); +#endif /* EAP_SUPPORT */ +#if !EAP_SUPPORT + ADDCICHAP(CI_AUTHTYPE, go->neg_chap, go->chap_mdtype); +#endif /* !EAP_SUPPORT */ +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT /* cannot be improved, embedding a directive within macro arguments is not portable */ +#if EAP_SUPPORT && CHAP_SUPPORT + ADDCISHORT(CI_AUTHTYPE, !go->neg_eap && !go->neg_chap && go->neg_upap, PPP_PAP); +#endif /* EAP_SUPPORT && CHAP_SUPPORT */ +#if EAP_SUPPORT && !CHAP_SUPPORT + ADDCISHORT(CI_AUTHTYPE, !go->neg_eap && go->neg_upap, PPP_PAP); +#endif /* EAP_SUPPORT && !CHAP_SUPPORT */ +#if !EAP_SUPPORT && CHAP_SUPPORT + ADDCISHORT(CI_AUTHTYPE, !go->neg_chap && go->neg_upap, PPP_PAP); +#endif /* !EAP_SUPPORT && CHAP_SUPPORT */ +#if !EAP_SUPPORT && !CHAP_SUPPORT + ADDCISHORT(CI_AUTHTYPE, go->neg_upap, PPP_PAP); +#endif /* !EAP_SUPPORT && !CHAP_SUPPORT */ +#endif /* PAP_SUPPORT */ +#if LQR_SUPPORT + ADDCILQR(CI_QUALITY, go->neg_lqr, go->lqr_period); +#endif /* LQR_SUPPORT */ + ADDCICHAR(CI_CALLBACK, go->neg_cbcp, CBCP_OPT); + ADDCILONG(CI_MAGICNUMBER, go->neg_magicnumber, go->magicnumber); + ADDCIVOID(CI_PCOMPRESSION, go->neg_pcompression); + ADDCIVOID(CI_ACCOMPRESSION, go->neg_accompression); +#ifdef HAVE_MULTILINK + ADDCISHORT(CI_MRRU, go->neg_mrru, go->mrru); +#endif + ADDCIVOID(CI_SSNHF, go->neg_ssnhf); + ADDCIENDP(CI_EPDISC, go->neg_endpoint, go->endpoint.class_, + go->endpoint.value, go->endpoint.length); + + if (ucp - start_ucp != *lenp) { + /* this should never happen, because peer_mtu should be 1500 */ + ppp_error("Bug in lcp_addci: wrong length"); + } +} + + +/* + * lcp_ackci - Ack our CIs. + * This should not modify any state if the Ack is bad. + * + * Returns: + * 0 - Ack was bad. + * 1 - Ack was good. + */ +static int lcp_ackci(fsm *f, u_char *p, int len) { + ppp_pcb *pcb = f->pcb; + lcp_options *go = &pcb->lcp_gotoptions; + u_char cilen, citype, cichar; + u_short cishort; + u32_t cilong; + + /* + * CIs must be in exactly the same order that we sent. + * Check packet length and CI length at each step. + * If we find any deviations, then this packet is bad. + */ +#define ACKCIVOID(opt, neg) \ + if (neg) { \ + if ((len -= CILEN_VOID) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != CILEN_VOID || \ + citype != opt) \ + goto bad; \ + } +#define ACKCISHORT(opt, neg, val) \ + if (neg) { \ + if ((len -= CILEN_SHORT) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != CILEN_SHORT || \ + citype != opt) \ + goto bad; \ + GETSHORT(cishort, p); \ + if (cishort != val) \ + goto bad; \ + } +#define ACKCICHAR(opt, neg, val) \ + if (neg) { \ + if ((len -= CILEN_CHAR) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != CILEN_CHAR || \ + citype != opt) \ + goto bad; \ + GETCHAR(cichar, p); \ + if (cichar != val) \ + goto bad; \ + } +#if CHAP_SUPPORT +#define ACKCICHAP(opt, neg, val) \ + if (neg) { \ + if ((len -= CILEN_CHAP) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != CILEN_CHAP || \ + citype != (opt)) \ + goto bad; \ + GETSHORT(cishort, p); \ + if (cishort != PPP_CHAP) \ + goto bad; \ + GETCHAR(cichar, p); \ + if (cichar != (CHAP_DIGEST(val))) \ + goto bad; \ + } +#endif /* CHAP_SUPPORT */ +#define ACKCILONG(opt, neg, val) \ + if (neg) { \ + if ((len -= CILEN_LONG) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != CILEN_LONG || \ + citype != opt) \ + goto bad; \ + GETLONG(cilong, p); \ + if (cilong != val) \ + goto bad; \ + } +#if LQR_SUPPORT +#define ACKCILQR(opt, neg, val) \ + if (neg) { \ + if ((len -= CILEN_LQR) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != CILEN_LQR || \ + citype != opt) \ + goto bad; \ + GETSHORT(cishort, p); \ + if (cishort != PPP_LQR) \ + goto bad; \ + GETLONG(cilong, p); \ + if (cilong != val) \ + goto bad; \ + } +#endif /* LQR_SUPPORT */ +#define ACKCIENDP(opt, neg, class, val, vlen) \ + if (neg) { \ + int i; \ + if ((len -= CILEN_CHAR + vlen) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != CILEN_CHAR + vlen || \ + citype != opt) \ + goto bad; \ + GETCHAR(cichar, p); \ + if (cichar != class) \ + goto bad; \ + for (i = 0; i < vlen; ++i) { \ + GETCHAR(cichar, p); \ + if (cichar != val[i]) \ + goto bad; \ + } \ + } + + ACKCISHORT(CI_MRU, go->neg_mru && go->mru != PPP_DEFMRU, go->mru); + ACKCILONG(CI_ASYNCMAP, go->neg_asyncmap && go->asyncmap != 0xFFFFFFFF, + go->asyncmap); +#if EAP_SUPPORT + ACKCISHORT(CI_AUTHTYPE, go->neg_eap, PPP_EAP); +#endif /* EAP_SUPPORT */ +#if CHAP_SUPPORT /* cannot be improved, embedding a directive within macro arguments is not portable */ +#if EAP_SUPPORT + ACKCICHAP(CI_AUTHTYPE, !go->neg_eap && go->neg_chap, go->chap_mdtype); +#endif /* EAP_SUPPORT */ +#if !EAP_SUPPORT + ACKCICHAP(CI_AUTHTYPE, go->neg_chap, go->chap_mdtype); +#endif /* !EAP_SUPPORT */ +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT /* cannot be improved, embedding a directive within macro arguments is not portable */ +#if EAP_SUPPORT && CHAP_SUPPORT + ACKCISHORT(CI_AUTHTYPE, !go->neg_eap && !go->neg_chap && go->neg_upap, PPP_PAP); +#endif /* EAP_SUPPORT && CHAP_SUPPORT */ +#if EAP_SUPPORT && !CHAP_SUPPORT + ACKCISHORT(CI_AUTHTYPE, !go->neg_eap && go->neg_upap, PPP_PAP); +#endif /* EAP_SUPPORT && !CHAP_SUPPORT */ +#if !EAP_SUPPORT && CHAP_SUPPORT + ACKCISHORT(CI_AUTHTYPE, !go->neg_chap && go->neg_upap, PPP_PAP); +#endif /* !EAP_SUPPORT && CHAP_SUPPORT */ +#if !EAP_SUPPORT && !CHAP_SUPPORT + ACKCISHORT(CI_AUTHTYPE, go->neg_upap, PPP_PAP); +#endif /* !EAP_SUPPORT && !CHAP_SUPPORT */ +#endif /* PAP_SUPPORT */ +#if LQR_SUPPORT + ACKCILQR(CI_QUALITY, go->neg_lqr, go->lqr_period); +#endif /* LQR_SUPPORT */ + ACKCICHAR(CI_CALLBACK, go->neg_cbcp, CBCP_OPT); + ACKCILONG(CI_MAGICNUMBER, go->neg_magicnumber, go->magicnumber); + ACKCIVOID(CI_PCOMPRESSION, go->neg_pcompression); + ACKCIVOID(CI_ACCOMPRESSION, go->neg_accompression); +#ifdef HAVE_MULTILINK + ACKCISHORT(CI_MRRU, go->neg_mrru, go->mrru); +#endif /* HAVE_MULTILINK */ + ACKCIVOID(CI_SSNHF, go->neg_ssnhf); + ACKCIENDP(CI_EPDISC, go->neg_endpoint, go->endpoint.class_, + go->endpoint.value, go->endpoint.length); + + /* + * If there are any remaining CIs, then this packet is bad. + */ + if (len != 0) + goto bad; + return (1); +bad: + LCPDEBUG(("lcp_acki: received bad Ack!")); + return (0); +} + + +/* + * lcp_nakci - Peer has sent a NAK for some of our CIs. + * This should not modify any state if the Nak is bad + * or if LCP is in the OPENED state. + * + * Returns: + * 0 - Nak was bad. + * 1 - Nak was good. + */ +static int lcp_nakci(fsm *f, u_char *p, int len, int treat_as_reject) { + ppp_pcb *pcb = f->pcb; + lcp_options *go = &pcb->lcp_gotoptions; + lcp_options *wo = &pcb->lcp_wantoptions; + u_char citype, cichar, *next; + u_short cishort; + u32_t cilong; + lcp_options no; /* options we've seen Naks for */ + lcp_options try_; /* options to request next time */ + int looped_back = 0; + int cilen; + + BZERO(&no, sizeof(no)); + try_ = *go; + + /* + * Any Nak'd CIs must be in exactly the same order that we sent. + * Check packet length and CI length at each step. + * If we find any deviations, then this packet is bad. + */ +#define NAKCIVOID(opt, neg) \ + if (go->neg && \ + len >= CILEN_VOID && \ + p[1] == CILEN_VOID && \ + p[0] == opt) { \ + len -= CILEN_VOID; \ + INCPTR(CILEN_VOID, p); \ + no.neg = 1; \ + try_.neg = 0; \ + } +#if CHAP_SUPPORT +#define NAKCICHAP(opt, neg, code) \ + if (go->neg && \ + len >= CILEN_CHAP && \ + p[1] == CILEN_CHAP && \ + p[0] == opt) { \ + len -= CILEN_CHAP; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + GETCHAR(cichar, p); \ + no.neg = 1; \ + code \ + } +#endif /* CHAP_SUPPORT */ +#define NAKCICHAR(opt, neg, code) \ + if (go->neg && \ + len >= CILEN_CHAR && \ + p[1] == CILEN_CHAR && \ + p[0] == opt) { \ + len -= CILEN_CHAR; \ + INCPTR(2, p); \ + GETCHAR(cichar, p); \ + no.neg = 1; \ + code \ + } +#define NAKCISHORT(opt, neg, code) \ + if (go->neg && \ + len >= CILEN_SHORT && \ + p[1] == CILEN_SHORT && \ + p[0] == opt) { \ + len -= CILEN_SHORT; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + no.neg = 1; \ + code \ + } +#define NAKCILONG(opt, neg, code) \ + if (go->neg && \ + len >= CILEN_LONG && \ + p[1] == CILEN_LONG && \ + p[0] == opt) { \ + len -= CILEN_LONG; \ + INCPTR(2, p); \ + GETLONG(cilong, p); \ + no.neg = 1; \ + code \ + } +#if LQR_SUPPORT +#define NAKCILQR(opt, neg, code) \ + if (go->neg && \ + len >= CILEN_LQR && \ + p[1] == CILEN_LQR && \ + p[0] == opt) { \ + len -= CILEN_LQR; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + GETLONG(cilong, p); \ + no.neg = 1; \ + code \ + } +#endif /* LQR_SUPPORT */ +#define NAKCIENDP(opt, neg) \ + if (go->neg && \ + len >= CILEN_CHAR && \ + p[0] == opt && \ + p[1] >= CILEN_CHAR && \ + p[1] <= len) { \ + len -= p[1]; \ + INCPTR(p[1], p); \ + no.neg = 1; \ + try_.neg = 0; \ + } + + /* + * NOTE! There must be no assignments to individual fields of *go in + * the code below. Any such assignment is a BUG! + */ + /* + * We don't care if they want to send us smaller packets than + * we want. Therefore, accept any MRU less than what we asked for, + * but then ignore the new value when setting the MRU in the kernel. + * If they send us a bigger MRU than what we asked, accept it, up to + * the limit of the default MRU we'd get if we didn't negotiate. + */ + if (go->neg_mru && go->mru != PPP_DEFMRU) { + NAKCISHORT(CI_MRU, neg_mru, + if (cishort <= wo->mru || cishort <= PPP_DEFMRU) + try_.mru = cishort; + ); + } + + /* + * Add any characters they want to our (receive-side) asyncmap. + */ + if (go->neg_asyncmap && go->asyncmap != 0xFFFFFFFF) { + NAKCILONG(CI_ASYNCMAP, neg_asyncmap, + try_.asyncmap = go->asyncmap | cilong; + ); + } + + /* + * If they've nak'd our authentication-protocol, check whether + * they are proposing a different protocol, or a different + * hash algorithm for CHAP. + */ + if ((0 +#if CHAP_SUPPORT + || go->neg_chap +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + || go->neg_upap +#endif /* PAP_SUPPORT */ +#if EAP_SUPPORT + || go->neg_eap +#endif /* EAP_SUPPORT */ + ) + && len >= CILEN_SHORT + && p[0] == CI_AUTHTYPE && p[1] >= CILEN_SHORT && p[1] <= len) { + cilen = p[1]; + len -= cilen; +#if CHAP_SUPPORT + no.neg_chap = go->neg_chap; +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + no.neg_upap = go->neg_upap; +#endif /* PAP_SUPPORT */ +#if EAP_SUPPORT + no.neg_eap = go->neg_eap; +#endif /* EAP_SUPPORT */ + INCPTR(2, p); + GETSHORT(cishort, p); + +#if PAP_SUPPORT + if (cishort == PPP_PAP && cilen == CILEN_SHORT) { +#if EAP_SUPPORT + /* If we were asking for EAP, then we need to stop that. */ + if (go->neg_eap) + try_.neg_eap = 0; + else +#endif /* EAP_SUPPORT */ + +#if CHAP_SUPPORT + /* If we were asking for CHAP, then we need to stop that. */ + if (go->neg_chap) + try_.neg_chap = 0; + else +#endif /* CHAP_SUPPORT */ + + /* + * If we weren't asking for CHAP or EAP, then we were asking for + * PAP, in which case this Nak is bad. + */ + goto bad; + } else +#endif /* PAP_SUPPORT */ + +#if CHAP_SUPPORT + if (cishort == PPP_CHAP && cilen == CILEN_CHAP) { + GETCHAR(cichar, p); +#if EAP_SUPPORT + /* Stop asking for EAP, if we were. */ + if (go->neg_eap) { + try_.neg_eap = 0; + /* Try to set up to use their suggestion, if possible */ + if (CHAP_CANDIGEST(go->chap_mdtype, cichar)) + try_.chap_mdtype = CHAP_MDTYPE_D(cichar); + } else +#endif /* EAP_SUPPORT */ + if (go->neg_chap) { + /* + * We were asking for our preferred algorithm, they must + * want something different. + */ + if (cichar != CHAP_DIGEST(go->chap_mdtype)) { + if (CHAP_CANDIGEST(go->chap_mdtype, cichar)) { + /* Use their suggestion if we support it ... */ + try_.chap_mdtype = CHAP_MDTYPE_D(cichar); + } else { + /* ... otherwise, try our next-preferred algorithm. */ + try_.chap_mdtype &= ~(CHAP_MDTYPE(try_.chap_mdtype)); + if (try_.chap_mdtype == MDTYPE_NONE) /* out of algos */ + try_.neg_chap = 0; + } + } else { + /* + * Whoops, they Nak'd our algorithm of choice + * but then suggested it back to us. + */ + goto bad; + } + } else { + /* + * Stop asking for PAP if we were asking for it. + */ +#if PAP_SUPPORT + try_.neg_upap = 0; +#endif /* PAP_SUPPORT */ + } + + } else +#endif /* CHAP_SUPPORT */ + { + +#if EAP_SUPPORT + /* + * If we were asking for EAP, and they're Conf-Naking EAP, + * well, that's just strange. Nobody should do that. + */ + if (cishort == PPP_EAP && cilen == CILEN_SHORT && go->neg_eap) + ppp_dbglog("Unexpected Conf-Nak for EAP"); + + /* + * We don't recognize what they're suggesting. + * Stop asking for what we were asking for. + */ + if (go->neg_eap) + try_.neg_eap = 0; + else +#endif /* EAP_SUPPORT */ + +#if CHAP_SUPPORT + if (go->neg_chap) + try_.neg_chap = 0; + else +#endif /* CHAP_SUPPORT */ + +#if PAP_SUPPORT + if(1) + try_.neg_upap = 0; + else +#endif /* PAP_SUPPORT */ + {} + + p += cilen - CILEN_SHORT; + } + } + +#if LQR_SUPPORT + /* + * If they can't cope with our link quality protocol, we'll have + * to stop asking for LQR. We haven't got any other protocol. + * If they Nak the reporting period, take their value XXX ? + */ + NAKCILQR(CI_QUALITY, neg_lqr, + if (cishort != PPP_LQR) + try_.neg_lqr = 0; + else + try_.lqr_period = cilong; + ); +#endif /* LQR_SUPPORT */ + + /* + * Only implementing CBCP...not the rest of the callback options + */ + NAKCICHAR(CI_CALLBACK, neg_cbcp, + try_.neg_cbcp = 0; + (void)cichar; /* if CHAP support is not compiled, cichar is set but not used, which makes some compilers complaining */ + ); + + /* + * Check for a looped-back line. + */ + NAKCILONG(CI_MAGICNUMBER, neg_magicnumber, + try_.magicnumber = magic(); + looped_back = 1; + ); + + /* + * Peer shouldn't send Nak for protocol compression or + * address/control compression requests; they should send + * a Reject instead. If they send a Nak, treat it as a Reject. + */ + NAKCIVOID(CI_PCOMPRESSION, neg_pcompression); + NAKCIVOID(CI_ACCOMPRESSION, neg_accompression); + +#ifdef HAVE_MULTILINK + /* + * Nak for MRRU option - accept their value if it is smaller + * than the one we want. + */ + if (go->neg_mrru) { + NAKCISHORT(CI_MRRU, neg_mrru, + if (treat_as_reject) + try_.neg_mrru = 0; + else if (cishort <= wo->mrru) + try_.mrru = cishort; + ); + } +#else /* HAVE_MULTILINK */ + LWIP_UNUSED_ARG(treat_as_reject); +#endif /* HAVE_MULTILINK */ + + /* + * Nak for short sequence numbers shouldn't be sent, treat it + * like a reject. + */ + NAKCIVOID(CI_SSNHF, neg_ssnhf); + + /* + * Nak of the endpoint discriminator option is not permitted, + * treat it like a reject. + */ + NAKCIENDP(CI_EPDISC, neg_endpoint); + + /* + * There may be remaining CIs, if the peer is requesting negotiation + * on an option that we didn't include in our request packet. + * If we see an option that we requested, or one we've already seen + * in this packet, then this packet is bad. + * If we wanted to respond by starting to negotiate on the requested + * option(s), we could, but we don't, because except for the + * authentication type and quality protocol, if we are not negotiating + * an option, it is because we were told not to. + * For the authentication type, the Nak from the peer means + * `let me authenticate myself with you' which is a bit pointless. + * For the quality protocol, the Nak means `ask me to send you quality + * reports', but if we didn't ask for them, we don't want them. + * An option we don't recognize represents the peer asking to + * negotiate some option we don't support, so ignore it. + */ + while (len >= CILEN_VOID) { + GETCHAR(citype, p); + GETCHAR(cilen, p); + if (cilen < CILEN_VOID || (len -= cilen) < 0) + goto bad; + next = p + cilen - 2; + + switch (citype) { + case CI_MRU: + if ((go->neg_mru && go->mru != PPP_DEFMRU) + || no.neg_mru || cilen != CILEN_SHORT) + goto bad; + GETSHORT(cishort, p); + if (cishort < PPP_DEFMRU) { + try_.neg_mru = 1; + try_.mru = cishort; + } + break; + case CI_ASYNCMAP: + if ((go->neg_asyncmap && go->asyncmap != 0xFFFFFFFF) + || no.neg_asyncmap || cilen != CILEN_LONG) + goto bad; + break; + case CI_AUTHTYPE: + if (0 +#if CHAP_SUPPORT + || go->neg_chap || no.neg_chap +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + || go->neg_upap || no.neg_upap +#endif /* PAP_SUPPORT */ +#if EAP_SUPPORT + || go->neg_eap || no.neg_eap +#endif /* EAP_SUPPORT */ + ) + goto bad; + break; + case CI_MAGICNUMBER: + if (go->neg_magicnumber || no.neg_magicnumber || + cilen != CILEN_LONG) + goto bad; + break; + case CI_PCOMPRESSION: + if (go->neg_pcompression || no.neg_pcompression + || cilen != CILEN_VOID) + goto bad; + break; + case CI_ACCOMPRESSION: + if (go->neg_accompression || no.neg_accompression + || cilen != CILEN_VOID) + goto bad; + break; +#if LQR_SUPPORT + case CI_QUALITY: + if (go->neg_lqr || no.neg_lqr || cilen != CILEN_LQR) + goto bad; + break; +#endif /* LQR_SUPPORT */ +#ifdef HAVE_MULTILINK + case CI_MRRU: + if (go->neg_mrru || no.neg_mrru || cilen != CILEN_SHORT) + goto bad; + break; +#endif /* HAVE_MULTILINK */ + case CI_SSNHF: + if (go->neg_ssnhf || no.neg_ssnhf || cilen != CILEN_VOID) + goto bad; + try_.neg_ssnhf = 1; + break; + case CI_EPDISC: + if (go->neg_endpoint || no.neg_endpoint || cilen < CILEN_CHAR) + goto bad; + break; + default: + break; + } + p = next; + } + + /* + * OK, the Nak is good. Now we can update state. + * If there are any options left we ignore them. + */ + if (f->state != PPP_FSM_OPENED) { + if (looped_back) { + if (++try_.numloops >= pcb->settings.lcp_loopbackfail) { + ppp_notice("Serial line is looped back."); + pcb->err_code = PPPERR_LOOPBACK; + lcp_close(f->pcb, "Loopback detected"); + } + } else + try_.numloops = 0; + *go = try_; + } + + return 1; + +bad: + LCPDEBUG(("lcp_nakci: received bad Nak!")); + return 0; +} + + +/* + * lcp_rejci - Peer has Rejected some of our CIs. + * This should not modify any state if the Reject is bad + * or if LCP is in the OPENED state. + * + * Returns: + * 0 - Reject was bad. + * 1 - Reject was good. + */ +static int lcp_rejci(fsm *f, u_char *p, int len) { + ppp_pcb *pcb = f->pcb; + lcp_options *go = &pcb->lcp_gotoptions; + u_char cichar; + u_short cishort; + u32_t cilong; + lcp_options try_; /* options to request next time */ + + try_ = *go; + + /* + * Any Rejected CIs must be in exactly the same order that we sent. + * Check packet length and CI length at each step. + * If we find any deviations, then this packet is bad. + */ +#define REJCIVOID(opt, neg) \ + if (go->neg && \ + len >= CILEN_VOID && \ + p[1] == CILEN_VOID && \ + p[0] == opt) { \ + len -= CILEN_VOID; \ + INCPTR(CILEN_VOID, p); \ + try_.neg = 0; \ + } +#define REJCISHORT(opt, neg, val) \ + if (go->neg && \ + len >= CILEN_SHORT && \ + p[1] == CILEN_SHORT && \ + p[0] == opt) { \ + len -= CILEN_SHORT; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + /* Check rejected value. */ \ + if (cishort != val) \ + goto bad; \ + try_.neg = 0; \ + } + +#if CHAP_SUPPORT && EAP_SUPPORT && PAP_SUPPORT +#define REJCICHAP(opt, neg, val) \ + if (go->neg && \ + len >= CILEN_CHAP && \ + p[1] == CILEN_CHAP && \ + p[0] == opt) { \ + len -= CILEN_CHAP; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + GETCHAR(cichar, p); \ + /* Check rejected value. */ \ + if ((cishort != PPP_CHAP) || (cichar != (CHAP_DIGEST(val)))) \ + goto bad; \ + try_.neg = 0; \ + try_.neg_eap = try_.neg_upap = 0; \ + } +#endif /* CHAP_SUPPORT && EAP_SUPPORT && PAP_SUPPORT */ + +#if CHAP_SUPPORT && !EAP_SUPPORT && PAP_SUPPORT +#define REJCICHAP(opt, neg, val) \ + if (go->neg && \ + len >= CILEN_CHAP && \ + p[1] == CILEN_CHAP && \ + p[0] == opt) { \ + len -= CILEN_CHAP; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + GETCHAR(cichar, p); \ + /* Check rejected value. */ \ + if ((cishort != PPP_CHAP) || (cichar != (CHAP_DIGEST(val)))) \ + goto bad; \ + try_.neg = 0; \ + try_.neg_upap = 0; \ + } +#endif /* CHAP_SUPPORT && !EAP_SUPPORT && PAP_SUPPORT */ + +#if CHAP_SUPPORT && EAP_SUPPORT && !PAP_SUPPORT +#define REJCICHAP(opt, neg, val) \ + if (go->neg && \ + len >= CILEN_CHAP && \ + p[1] == CILEN_CHAP && \ + p[0] == opt) { \ + len -= CILEN_CHAP; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + GETCHAR(cichar, p); \ + /* Check rejected value. */ \ + if ((cishort != PPP_CHAP) || (cichar != (CHAP_DIGEST(val)))) \ + goto bad; \ + try_.neg = 0; \ + try_.neg_eap = 0; \ + } +#endif /* CHAP_SUPPORT && EAP_SUPPORT && !PAP_SUPPORT */ + +#if CHAP_SUPPORT && !EAP_SUPPORT && !PAP_SUPPORT +#define REJCICHAP(opt, neg, val) \ + if (go->neg && \ + len >= CILEN_CHAP && \ + p[1] == CILEN_CHAP && \ + p[0] == opt) { \ + len -= CILEN_CHAP; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + GETCHAR(cichar, p); \ + /* Check rejected value. */ \ + if ((cishort != PPP_CHAP) || (cichar != (CHAP_DIGEST(val)))) \ + goto bad; \ + try_.neg = 0; \ + } +#endif /* CHAP_SUPPORT && !EAP_SUPPORT && !PAP_SUPPORT */ + +#define REJCILONG(opt, neg, val) \ + if (go->neg && \ + len >= CILEN_LONG && \ + p[1] == CILEN_LONG && \ + p[0] == opt) { \ + len -= CILEN_LONG; \ + INCPTR(2, p); \ + GETLONG(cilong, p); \ + /* Check rejected value. */ \ + if (cilong != val) \ + goto bad; \ + try_.neg = 0; \ + } +#if LQR_SUPPORT +#define REJCILQR(opt, neg, val) \ + if (go->neg && \ + len >= CILEN_LQR && \ + p[1] == CILEN_LQR && \ + p[0] == opt) { \ + len -= CILEN_LQR; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + GETLONG(cilong, p); \ + /* Check rejected value. */ \ + if (cishort != PPP_LQR || cilong != val) \ + goto bad; \ + try_.neg = 0; \ + } +#endif /* LQR_SUPPORT */ +#define REJCICBCP(opt, neg, val) \ + if (go->neg && \ + len >= CILEN_CBCP && \ + p[1] == CILEN_CBCP && \ + p[0] == opt) { \ + len -= CILEN_CBCP; \ + INCPTR(2, p); \ + GETCHAR(cichar, p); \ + /* Check rejected value. */ \ + if (cichar != val) \ + goto bad; \ + try_.neg = 0; \ + } +#define REJCIENDP(opt, neg, class, val, vlen) \ + if (go->neg && \ + len >= CILEN_CHAR + vlen && \ + p[0] == opt && \ + p[1] == CILEN_CHAR + vlen) { \ + int i; \ + len -= CILEN_CHAR + vlen; \ + INCPTR(2, p); \ + GETCHAR(cichar, p); \ + if (cichar != class) \ + goto bad; \ + for (i = 0; i < vlen; ++i) { \ + GETCHAR(cichar, p); \ + if (cichar != val[i]) \ + goto bad; \ + } \ + try_.neg = 0; \ + } + + REJCISHORT(CI_MRU, neg_mru, go->mru); + REJCILONG(CI_ASYNCMAP, neg_asyncmap, go->asyncmap); +#if EAP_SUPPORT + REJCISHORT(CI_AUTHTYPE, neg_eap, PPP_EAP); + if (!go->neg_eap) { +#endif /* EAP_SUPPORT */ +#if CHAP_SUPPORT + REJCICHAP(CI_AUTHTYPE, neg_chap, go->chap_mdtype); + if (!go->neg_chap) { +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + REJCISHORT(CI_AUTHTYPE, neg_upap, PPP_PAP); +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + } +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + } +#endif /* EAP_SUPPORT */ +#if LQR_SUPPORT + REJCILQR(CI_QUALITY, neg_lqr, go->lqr_period); +#endif /* LQR_SUPPORT */ + REJCICBCP(CI_CALLBACK, neg_cbcp, CBCP_OPT); + REJCILONG(CI_MAGICNUMBER, neg_magicnumber, go->magicnumber); + REJCIVOID(CI_PCOMPRESSION, neg_pcompression); + REJCIVOID(CI_ACCOMPRESSION, neg_accompression); +#ifdef HAVE_MULTILINK + REJCISHORT(CI_MRRU, neg_mrru, go->mrru); +#endif /* HAVE_MULTILINK */ + REJCIVOID(CI_SSNHF, neg_ssnhf); + REJCIENDP(CI_EPDISC, neg_endpoint, go->endpoint.class_, + go->endpoint.value, go->endpoint.length); + + /* + * If there are any remaining CIs, then this packet is bad. + */ + if (len != 0) + goto bad; + /* + * Now we can update state. + */ + if (f->state != PPP_FSM_OPENED) + *go = try_; + return 1; + +bad: + LCPDEBUG(("lcp_rejci: received bad Reject!")); + return 0; +} + + +/* + * lcp_reqci - Check the peer's requested CIs and send appropriate response. + * + * Returns: CONFACK, CONFNAK or CONFREJ and input packet modified + * appropriately. If reject_if_disagree is non-zero, doesn't return + * CONFNAK; returns CONFREJ if it can't return CONFACK. + * + * inp = Requested CIs + * lenp = Length of requested CIs + */ +static int lcp_reqci(fsm *f, u_char *inp, int *lenp, int reject_if_disagree) { + ppp_pcb *pcb = f->pcb; + lcp_options *go = &pcb->lcp_gotoptions; + lcp_options *ho = &pcb->lcp_hisoptions; + lcp_options *ao = &pcb->lcp_allowoptions; + u_char *cip, *next; /* Pointer to current and next CIs */ + int cilen, citype, cichar; /* Parsed len, type, char value */ + u_short cishort; /* Parsed short value */ + u32_t cilong; /* Parse long value */ + int rc = CONFACK; /* Final packet return code */ + int orc; /* Individual option return code */ + u_char *p; /* Pointer to next char to parse */ + u_char *rejp; /* Pointer to next char in reject frame */ + struct pbuf *nakp; /* Nak buffer */ + u_char *nakoutp; /* Pointer to next char in Nak frame */ + int l = *lenp; /* Length left */ + + /* + * Reset all his options. + */ + BZERO(ho, sizeof(*ho)); + + /* + * Process all his options. + */ + next = inp; + nakp = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_CTRL_PBUF_MAX_SIZE), PPP_CTRL_PBUF_TYPE); + if(NULL == nakp) + return 0; + if(nakp->tot_len != nakp->len) { + pbuf_free(nakp); + return 0; + } + + nakoutp = (u_char*)nakp->payload; + rejp = inp; + while (l) { + orc = CONFACK; /* Assume success */ + cip = p = next; /* Remember begining of CI */ + if (l < 2 || /* Not enough data for CI header or */ + p[1] < 2 || /* CI length too small or */ + p[1] > l) { /* CI length too big? */ + LCPDEBUG(("lcp_reqci: bad CI length!")); + orc = CONFREJ; /* Reject bad CI */ + cilen = l; /* Reject till end of packet */ + l = 0; /* Don't loop again */ + citype = 0; + goto endswitch; + } + GETCHAR(citype, p); /* Parse CI type */ + GETCHAR(cilen, p); /* Parse CI length */ + l -= cilen; /* Adjust remaining length */ + next += cilen; /* Step to next CI */ + + switch (citype) { /* Check CI type */ + case CI_MRU: + if (!ao->neg_mru || /* Allow option? */ + cilen != CILEN_SHORT) { /* Check CI length */ + orc = CONFREJ; /* Reject CI */ + break; + } + GETSHORT(cishort, p); /* Parse MRU */ + + /* + * He must be able to receive at least our minimum. + * No need to check a maximum. If he sends a large number, + * we'll just ignore it. + */ + if (cishort < PPP_MINMRU) { + orc = CONFNAK; /* Nak CI */ + PUTCHAR(CI_MRU, nakoutp); + PUTCHAR(CILEN_SHORT, nakoutp); + PUTSHORT(PPP_MINMRU, nakoutp); /* Give him a hint */ + break; + } + ho->neg_mru = 1; /* Remember he sent MRU */ + ho->mru = cishort; /* And remember value */ + break; + + case CI_ASYNCMAP: + if (!ao->neg_asyncmap || + cilen != CILEN_LONG) { + orc = CONFREJ; + break; + } + GETLONG(cilong, p); + + /* + * Asyncmap must have set at least the bits + * which are set in lcp_allowoptions[unit].asyncmap. + */ + if ((ao->asyncmap & ~cilong) != 0) { + orc = CONFNAK; + PUTCHAR(CI_ASYNCMAP, nakoutp); + PUTCHAR(CILEN_LONG, nakoutp); + PUTLONG(ao->asyncmap | cilong, nakoutp); + break; + } + ho->neg_asyncmap = 1; + ho->asyncmap = cilong; + break; + + case CI_AUTHTYPE: + if (cilen < CILEN_SHORT || + !(0 +#if PAP_SUPPORT + || ao->neg_upap +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + || ao->neg_chap +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + || ao->neg_eap +#endif /* EAP_SUPPORT */ + )) { + /* + * Reject the option if we're not willing to authenticate. + */ + ppp_dbglog("No auth is possible"); + orc = CONFREJ; + break; + } + GETSHORT(cishort, p); + + /* + * Authtype must be PAP, CHAP, or EAP. + * + * Note: if more than one of ao->neg_upap, ao->neg_chap, and + * ao->neg_eap are set, and the peer sends a Configure-Request + * with two or more authenticate-protocol requests, then we will + * reject the second request. + * Whether we end up doing CHAP, UPAP, or EAP depends then on + * the ordering of the CIs in the peer's Configure-Request. + */ + +#if PAP_SUPPORT + if (cishort == PPP_PAP) { + /* we've already accepted CHAP or EAP */ + if (0 +#if CHAP_SUPPORT + || ho->neg_chap +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + || ho->neg_eap +#endif /* EAP_SUPPORT */ + || cilen != CILEN_SHORT) { + LCPDEBUG(("lcp_reqci: rcvd AUTHTYPE PAP, rejecting...")); + orc = CONFREJ; + break; + } + if (!ao->neg_upap) { /* we don't want to do PAP */ + orc = CONFNAK; /* NAK it and suggest CHAP or EAP */ + PUTCHAR(CI_AUTHTYPE, nakoutp); +#if EAP_SUPPORT + if (ao->neg_eap) { + PUTCHAR(CILEN_SHORT, nakoutp); + PUTSHORT(PPP_EAP, nakoutp); + } else { +#endif /* EAP_SUPPORT */ +#if CHAP_SUPPORT + PUTCHAR(CILEN_CHAP, nakoutp); + PUTSHORT(PPP_CHAP, nakoutp); + PUTCHAR(CHAP_DIGEST(ao->chap_mdtype), nakoutp); +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + } +#endif /* EAP_SUPPORT */ + break; + } + ho->neg_upap = 1; + break; + } +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + if (cishort == PPP_CHAP) { + /* we've already accepted PAP or EAP */ + if ( +#if PAP_SUPPORT + ho->neg_upap || +#endif /* PAP_SUPPORT */ +#if EAP_SUPPORT + ho->neg_eap || +#endif /* EAP_SUPPORT */ + cilen != CILEN_CHAP) { + LCPDEBUG(("lcp_reqci: rcvd AUTHTYPE CHAP, rejecting...")); + orc = CONFREJ; + break; + } + if (!ao->neg_chap) { /* we don't want to do CHAP */ + orc = CONFNAK; /* NAK it and suggest EAP or PAP */ + PUTCHAR(CI_AUTHTYPE, nakoutp); + PUTCHAR(CILEN_SHORT, nakoutp); +#if EAP_SUPPORT + if (ao->neg_eap) { + PUTSHORT(PPP_EAP, nakoutp); + } else +#endif /* EAP_SUPPORT */ +#if PAP_SUPPORT + if(1) { + PUTSHORT(PPP_PAP, nakoutp); + } + else +#endif /* PAP_SUPPORT */ + {} + break; + } + GETCHAR(cichar, p); /* get digest type */ + if (!(CHAP_CANDIGEST(ao->chap_mdtype, cichar))) { + /* + * We can't/won't do the requested type, + * suggest something else. + */ + orc = CONFNAK; + PUTCHAR(CI_AUTHTYPE, nakoutp); + PUTCHAR(CILEN_CHAP, nakoutp); + PUTSHORT(PPP_CHAP, nakoutp); + PUTCHAR(CHAP_DIGEST(ao->chap_mdtype), nakoutp); + break; + } + ho->chap_mdtype = CHAP_MDTYPE_D(cichar); /* save md type */ + ho->neg_chap = 1; + break; + } +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + if (cishort == PPP_EAP) { + /* we've already accepted CHAP or PAP */ + if ( +#if CHAP_SUPPORT + ho->neg_chap || +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + ho->neg_upap || +#endif /* PAP_SUPPORT */ + cilen != CILEN_SHORT) { + LCPDEBUG(("lcp_reqci: rcvd AUTHTYPE EAP, rejecting...")); + orc = CONFREJ; + break; + } + if (!ao->neg_eap) { /* we don't want to do EAP */ + orc = CONFNAK; /* NAK it and suggest CHAP or PAP */ + PUTCHAR(CI_AUTHTYPE, nakoutp); +#if CHAP_SUPPORT + if (ao->neg_chap) { + PUTCHAR(CILEN_CHAP, nakoutp); + PUTSHORT(PPP_CHAP, nakoutp); + PUTCHAR(CHAP_DIGEST(ao->chap_mdtype), nakoutp); + } else +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + if(1) { + PUTCHAR(CILEN_SHORT, nakoutp); + PUTSHORT(PPP_PAP, nakoutp); + } else +#endif /* PAP_SUPPORT */ + {} + break; + } + ho->neg_eap = 1; + break; + } +#endif /* EAP_SUPPORT */ + + /* + * We don't recognize the protocol they're asking for. + * Nak it with something we're willing to do. + * (At this point we know ao->neg_upap || ao->neg_chap || + * ao->neg_eap.) + */ + orc = CONFNAK; + PUTCHAR(CI_AUTHTYPE, nakoutp); + +#if EAP_SUPPORT + if (ao->neg_eap) { + PUTCHAR(CILEN_SHORT, nakoutp); + PUTSHORT(PPP_EAP, nakoutp); + } else +#endif /* EAP_SUPPORT */ +#if CHAP_SUPPORT + if (ao->neg_chap) { + PUTCHAR(CILEN_CHAP, nakoutp); + PUTSHORT(PPP_CHAP, nakoutp); + PUTCHAR(CHAP_DIGEST(ao->chap_mdtype), nakoutp); + } else +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + if(1) { + PUTCHAR(CILEN_SHORT, nakoutp); + PUTSHORT(PPP_PAP, nakoutp); + } else +#endif /* PAP_SUPPORT */ + {} + break; + +#if LQR_SUPPORT + case CI_QUALITY: + if (!ao->neg_lqr || + cilen != CILEN_LQR) { + orc = CONFREJ; + break; + } + + GETSHORT(cishort, p); + GETLONG(cilong, p); + + /* + * Check the protocol and the reporting period. + * XXX When should we Nak this, and what with? + */ + if (cishort != PPP_LQR) { + orc = CONFNAK; + PUTCHAR(CI_QUALITY, nakoutp); + PUTCHAR(CILEN_LQR, nakoutp); + PUTSHORT(PPP_LQR, nakoutp); + PUTLONG(ao->lqr_period, nakoutp); + break; + } + break; +#endif /* LQR_SUPPORT */ + + case CI_MAGICNUMBER: + if (!(ao->neg_magicnumber || go->neg_magicnumber) || + cilen != CILEN_LONG) { + orc = CONFREJ; + break; + } + GETLONG(cilong, p); + + /* + * He must have a different magic number. + */ + if (go->neg_magicnumber && + cilong == go->magicnumber) { + cilong = magic(); /* Don't put magic() inside macro! */ + orc = CONFNAK; + PUTCHAR(CI_MAGICNUMBER, nakoutp); + PUTCHAR(CILEN_LONG, nakoutp); + PUTLONG(cilong, nakoutp); + break; + } + ho->neg_magicnumber = 1; + ho->magicnumber = cilong; + break; + + + case CI_PCOMPRESSION: + if (!ao->neg_pcompression || + cilen != CILEN_VOID) { + orc = CONFREJ; + break; + } + ho->neg_pcompression = 1; + break; + + case CI_ACCOMPRESSION: + if (!ao->neg_accompression || + cilen != CILEN_VOID) { + orc = CONFREJ; + break; + } + ho->neg_accompression = 1; + break; + +#ifdef HAVE_MULTILINK + case CI_MRRU: + if (!ao->neg_mrru + || !multilink + || cilen != CILEN_SHORT) { + orc = CONFREJ; + break; + } + + GETSHORT(cishort, p); + /* possibly should insist on a minimum/maximum MRRU here */ + ho->neg_mrru = 1; + ho->mrru = cishort; + break; +#endif /* HAVE_MULTILINK */ + + case CI_SSNHF: + if (!ao->neg_ssnhf +#ifdef HAVE_MULTILINK + || !multilink +#endif /* HAVE_MULTILINK */ + || cilen != CILEN_VOID) { + orc = CONFREJ; + break; + } + ho->neg_ssnhf = 1; + break; + + case CI_EPDISC: + if (!ao->neg_endpoint || + cilen < CILEN_CHAR || + cilen > CILEN_CHAR + MAX_ENDP_LEN) { + orc = CONFREJ; + break; + } + GETCHAR(cichar, p); + cilen -= CILEN_CHAR; + ho->neg_endpoint = 1; + ho->endpoint.class_ = cichar; + ho->endpoint.length = cilen; + MEMCPY(ho->endpoint.value, p, cilen); + INCPTR(cilen, p); + break; + + default: + LCPDEBUG(("lcp_reqci: rcvd unknown option %d", citype)); + orc = CONFREJ; + break; + } + +endswitch: + if (orc == CONFACK && /* Good CI */ + rc != CONFACK) /* but prior CI wasnt? */ + continue; /* Don't send this one */ + + if (orc == CONFNAK) { /* Nak this CI? */ + if (reject_if_disagree /* Getting fed up with sending NAKs? */ + && citype != CI_MAGICNUMBER) { + orc = CONFREJ; /* Get tough if so */ + } else { + if (rc == CONFREJ) /* Rejecting prior CI? */ + continue; /* Don't send this one */ + rc = CONFNAK; + } + } + if (orc == CONFREJ) { /* Reject this CI */ + rc = CONFREJ; + if (cip != rejp) /* Need to move rejected CI? */ + MEMCPY(rejp, cip, cilen); /* Move it */ + INCPTR(cilen, rejp); /* Update output pointer */ + } + } + + /* + * If we wanted to send additional NAKs (for unsent CIs), the + * code would go here. The extra NAKs would go at *nakoutp. + * At present there are no cases where we want to ask the + * peer to negotiate an option. + */ + + switch (rc) { + case CONFACK: + *lenp = next - inp; + break; + case CONFNAK: + /* + * Copy the Nak'd options from the nak buffer to the caller's buffer. + */ + *lenp = nakoutp - (u_char*)nakp->payload; + MEMCPY(inp, nakp->payload, *lenp); + break; + case CONFREJ: + *lenp = rejp - inp; + break; + default: + break; + } + + pbuf_free(nakp); + LCPDEBUG(("lcp_reqci: returning CONF%s.", CODENAME(rc))); + return (rc); /* Return final code */ +} + + +/* + * lcp_up - LCP has come UP. + */ +static void lcp_up(fsm *f) { + ppp_pcb *pcb = f->pcb; + lcp_options *wo = &pcb->lcp_wantoptions; + lcp_options *ho = &pcb->lcp_hisoptions; + lcp_options *go = &pcb->lcp_gotoptions; + lcp_options *ao = &pcb->lcp_allowoptions; + int mtu, mru; + + if (!go->neg_magicnumber) + go->magicnumber = 0; + if (!ho->neg_magicnumber) + ho->magicnumber = 0; + + /* + * Set our MTU to the smaller of the MTU we wanted and + * the MRU our peer wanted. If we negotiated an MRU, + * set our MRU to the larger of value we wanted and + * the value we got in the negotiation. + * Note on the MTU: the link MTU can be the MRU the peer wanted, + * the interface MTU is set to the lowest of that, the + * MTU we want to use, and our link MRU. + */ + mtu = ho->neg_mru? ho->mru: PPP_MRU; + mru = go->neg_mru? LWIP_MAX(wo->mru, go->mru): PPP_MRU; +#ifdef HAVE_MULTILINK + if (!(multilink && go->neg_mrru && ho->neg_mrru)) +#endif /* HAVE_MULTILINK */ + netif_set_mtu(pcb, LWIP_MIN(LWIP_MIN(mtu, mru), ao->mru)); + ppp_send_config(pcb, mtu, + (ho->neg_asyncmap? ho->asyncmap: 0xffffffff), + ho->neg_pcompression, ho->neg_accompression); + ppp_recv_config(pcb, mru, + (pcb->settings.lax_recv? 0: go->neg_asyncmap? go->asyncmap: 0xffffffff), + go->neg_pcompression, go->neg_accompression); + + if (ho->neg_mru) + pcb->peer_mru = ho->mru; + + lcp_echo_lowerup(f->pcb); /* Enable echo messages */ + + link_established(pcb); +} + + +/* + * lcp_down - LCP has gone DOWN. + * + * Alert other protocols. + */ +static void lcp_down(fsm *f) { + ppp_pcb *pcb = f->pcb; + lcp_options *go = &pcb->lcp_gotoptions; + + lcp_echo_lowerdown(f->pcb); + + link_down(pcb); + + ppp_send_config(pcb, PPP_MRU, 0xffffffff, 0, 0); + ppp_recv_config(pcb, PPP_MRU, + (go->neg_asyncmap? go->asyncmap: 0xffffffff), + go->neg_pcompression, go->neg_accompression); + pcb->peer_mru = PPP_MRU; +} + + +/* + * lcp_starting - LCP needs the lower layer up. + */ +static void lcp_starting(fsm *f) { + ppp_pcb *pcb = f->pcb; + link_required(pcb); +} + + +/* + * lcp_finished - LCP has finished with the lower layer. + */ +static void lcp_finished(fsm *f) { + ppp_pcb *pcb = f->pcb; + link_terminated(pcb); +} + + +#if PRINTPKT_SUPPORT +/* + * lcp_printpkt - print the contents of an LCP packet. + */ +static const char* const lcp_codenames[] = { + "ConfReq", "ConfAck", "ConfNak", "ConfRej", + "TermReq", "TermAck", "CodeRej", "ProtRej", + "EchoReq", "EchoRep", "DiscReq", "Ident", + "TimeRem" +}; + +static int lcp_printpkt(const u_char *p, int plen, + void (*printer) (void *, const char *, ...), void *arg) { + int code, id, len, olen, i; + const u_char *pstart, *optend; + u_short cishort; + u32_t cilong; + + if (plen < HEADERLEN) + return 0; + pstart = p; + GETCHAR(code, p); + GETCHAR(id, p); + GETSHORT(len, p); + if (len < HEADERLEN || len > plen) + return 0; + + if (code >= 1 && code <= (int)LWIP_ARRAYSIZE(lcp_codenames)) + printer(arg, " %s", lcp_codenames[code-1]); + else + printer(arg, " code=0x%x", code); + printer(arg, " id=0x%x", id); + len -= HEADERLEN; + switch (code) { + case CONFREQ: + case CONFACK: + case CONFNAK: + case CONFREJ: + /* print option list */ + while (len >= 2) { + GETCHAR(code, p); + GETCHAR(olen, p); + p -= 2; + if (olen < 2 || olen > len) { + break; + } + printer(arg, " <"); + len -= olen; + optend = p + olen; + switch (code) { + case CI_MRU: + if (olen == CILEN_SHORT) { + p += 2; + GETSHORT(cishort, p); + printer(arg, "mru %d", cishort); + } + break; + case CI_ASYNCMAP: + if (olen == CILEN_LONG) { + p += 2; + GETLONG(cilong, p); + printer(arg, "asyncmap 0x%x", cilong); + } + break; + case CI_AUTHTYPE: + if (olen >= CILEN_SHORT) { + p += 2; + printer(arg, "auth "); + GETSHORT(cishort, p); + switch (cishort) { +#if PAP_SUPPORT + case PPP_PAP: + printer(arg, "pap"); + break; +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + case PPP_CHAP: + printer(arg, "chap"); + if (p < optend) { + switch (*p) { + case CHAP_MD5: + printer(arg, " MD5"); + ++p; + break; +#if MSCHAP_SUPPORT + case CHAP_MICROSOFT: + printer(arg, " MS"); + ++p; + break; + + case CHAP_MICROSOFT_V2: + printer(arg, " MS-v2"); + ++p; + break; +#endif /* MSCHAP_SUPPORT */ + default: + break; + } + } + break; +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + case PPP_EAP: + printer(arg, "eap"); + break; +#endif /* EAP_SUPPORT */ + default: + printer(arg, "0x%x", cishort); + } + } + break; +#if LQR_SUPPORT + case CI_QUALITY: + if (olen >= CILEN_SHORT) { + p += 2; + printer(arg, "quality "); + GETSHORT(cishort, p); + switch (cishort) { + case PPP_LQR: + printer(arg, "lqr"); + break; + default: + printer(arg, "0x%x", cishort); + } + } + break; +#endif /* LQR_SUPPORT */ + case CI_CALLBACK: + if (olen >= CILEN_CHAR) { + p += 2; + printer(arg, "callback "); + GETCHAR(cishort, p); + switch (cishort) { + case CBCP_OPT: + printer(arg, "CBCP"); + break; + default: + printer(arg, "0x%x", cishort); + } + } + break; + case CI_MAGICNUMBER: + if (olen == CILEN_LONG) { + p += 2; + GETLONG(cilong, p); + printer(arg, "magic 0x%x", cilong); + } + break; + case CI_PCOMPRESSION: + if (olen == CILEN_VOID) { + p += 2; + printer(arg, "pcomp"); + } + break; + case CI_ACCOMPRESSION: + if (olen == CILEN_VOID) { + p += 2; + printer(arg, "accomp"); + } + break; + case CI_MRRU: + if (olen == CILEN_SHORT) { + p += 2; + GETSHORT(cishort, p); + printer(arg, "mrru %d", cishort); + } + break; + case CI_SSNHF: + if (olen == CILEN_VOID) { + p += 2; + printer(arg, "ssnhf"); + } + break; + case CI_EPDISC: +#ifdef HAVE_MULTILINK + if (olen >= CILEN_CHAR) { + struct epdisc epd; + p += 2; + GETCHAR(epd.class, p); + epd.length = olen - CILEN_CHAR; + if (epd.length > MAX_ENDP_LEN) + epd.length = MAX_ENDP_LEN; + if (epd.length > 0) { + MEMCPY(epd.value, p, epd.length); + p += epd.length; + } + printer(arg, "endpoint [%s]", epdisc_to_str(&epd)); + } +#else + printer(arg, "endpoint"); +#endif + break; + default: + break; + } + while (p < optend) { + GETCHAR(code, p); + printer(arg, " %.2x", code); + } + printer(arg, ">"); + } + break; + + case TERMACK: + case TERMREQ: + if (len > 0 && *p >= ' ' && *p < 0x7f) { + printer(arg, " "); + ppp_print_string(p, len, printer, arg); + p += len; + len = 0; + } + break; + + case ECHOREQ: + case ECHOREP: + case DISCREQ: + if (len >= 4) { + GETLONG(cilong, p); + printer(arg, " magic=0x%x", cilong); + len -= 4; + } + break; + + case IDENTIF: + case TIMEREM: + if (len >= 4) { + GETLONG(cilong, p); + printer(arg, " magic=0x%x", cilong); + len -= 4; + } + if (code == TIMEREM) { + if (len < 4) + break; + GETLONG(cilong, p); + printer(arg, " seconds=%u", cilong); + len -= 4; + } + if (len > 0) { + printer(arg, " "); + ppp_print_string(p, len, printer, arg); + p += len; + len = 0; + } + break; + default: + break; + } + + /* print the rest of the bytes in the packet */ + for (i = 0; i < len && i < 32; ++i) { + GETCHAR(code, p); + printer(arg, " %.2x", code); + } + if (i < len) { + printer(arg, " ..."); + p += len - i; + } + + return p - pstart; +} +#endif /* PRINTPKT_SUPPORT */ + +/* + * Time to shut down the link because there is nothing out there. + */ + +static void LcpLinkFailure(fsm *f) { + ppp_pcb *pcb = f->pcb; + if (f->state == PPP_FSM_OPENED) { + ppp_info("No response to %d echo-requests", pcb->lcp_echos_pending); + ppp_notice("Serial link appears to be disconnected."); + pcb->err_code = PPPERR_PEERDEAD; + lcp_close(pcb, "Peer not responding"); + } +} + +/* + * Timer expired for the LCP echo requests from this process. + */ + +static void LcpEchoCheck(fsm *f) { + ppp_pcb *pcb = f->pcb; + + LcpSendEchoRequest (f); + if (f->state != PPP_FSM_OPENED) + return; + + /* + * Start the timer for the next interval. + */ + if (pcb->lcp_echo_timer_running) + ppp_warn("assertion lcp_echo_timer_running==0 failed"); + TIMEOUT (LcpEchoTimeout, f, pcb->settings.lcp_echo_interval); + pcb->lcp_echo_timer_running = 1; +} + +/* + * LcpEchoTimeout - Timer expired on the LCP echo + */ + +static void LcpEchoTimeout(void *arg) { + fsm *f = (fsm*)arg; + ppp_pcb *pcb = f->pcb; + if (pcb->lcp_echo_timer_running != 0) { + pcb->lcp_echo_timer_running = 0; + LcpEchoCheck ((fsm *) arg); + } +} + +/* + * LcpEchoReply - LCP has received a reply to the echo + */ + +static void lcp_received_echo_reply(fsm *f, int id, u_char *inp, int len) { + ppp_pcb *pcb = f->pcb; + lcp_options *go = &pcb->lcp_gotoptions; + u32_t magic_val; + LWIP_UNUSED_ARG(id); + + /* Check the magic number - don't count replies from ourselves. */ + if (len < 4) { + ppp_dbglog("lcp: received short Echo-Reply, length %d", len); + return; + } + GETLONG(magic_val, inp); + if (go->neg_magicnumber + && magic_val == go->magicnumber) { + ppp_warn("appear to have received our own echo-reply!"); + return; + } + + /* Reset the number of outstanding echo frames */ + pcb->lcp_echos_pending = 0; +} + +/* + * LcpSendEchoRequest - Send an echo request frame to the peer + */ + +static void LcpSendEchoRequest(fsm *f) { + ppp_pcb *pcb = f->pcb; + lcp_options *go = &pcb->lcp_gotoptions; + u32_t lcp_magic; + u_char pkt[4], *pktp; + + /* + * Detect the failure of the peer at this point. + */ + if (pcb->settings.lcp_echo_fails != 0) { + if (pcb->lcp_echos_pending >= pcb->settings.lcp_echo_fails) { + LcpLinkFailure(f); + pcb->lcp_echos_pending = 0; + } + } + +#if PPP_LCP_ADAPTIVE + /* + * If adaptive echos have been enabled, only send the echo request if + * no traffic was received since the last one. + */ + if (pcb->settings.lcp_echo_adaptive) { + static unsigned int last_pkts_in = 0; + +#if PPP_STATS_SUPPORT + update_link_stats(f->unit); + link_stats_valid = 0; +#endif /* PPP_STATS_SUPPORT */ + + if (link_stats.pkts_in != last_pkts_in) { + last_pkts_in = link_stats.pkts_in; + return; + } + } +#endif + + /* + * Make and send the echo request frame. + */ + if (f->state == PPP_FSM_OPENED) { + lcp_magic = go->magicnumber; + pktp = pkt; + PUTLONG(lcp_magic, pktp); + fsm_sdata(f, ECHOREQ, pcb->lcp_echo_number++, pkt, pktp - pkt); + ++pcb->lcp_echos_pending; + } +} + +/* + * lcp_echo_lowerup - Start the timer for the LCP frame + */ + +static void lcp_echo_lowerup(ppp_pcb *pcb) { + fsm *f = &pcb->lcp_fsm; + + /* Clear the parameters for generating echo frames */ + pcb->lcp_echos_pending = 0; + pcb->lcp_echo_number = 0; + pcb->lcp_echo_timer_running = 0; + + /* If a timeout interval is specified then start the timer */ + if (pcb->settings.lcp_echo_interval != 0) + LcpEchoCheck (f); +} + +/* + * lcp_echo_lowerdown - Stop the timer for the LCP frame + */ + +static void lcp_echo_lowerdown(ppp_pcb *pcb) { + fsm *f = &pcb->lcp_fsm; + + if (pcb->lcp_echo_timer_running != 0) { + UNTIMEOUT (LcpEchoTimeout, f); + pcb->lcp_echo_timer_running = 0; + } +} + +#endif /* PPP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/magic.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/magic.c new file mode 100644 index 0000000..d0d87c5 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/magic.c @@ -0,0 +1,294 @@ +/* + * magic.c - PPP Magic Number routines. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +/***************************************************************************** +* randm.c - Random number generator program file. +* +* Copyright (c) 2003 by Marc Boucher, Services Informatiques (MBSI) inc. +* Copyright (c) 1998 by Global Election Systems Inc. +* +* The authors hereby grant permission to use, copy, modify, distribute, +* and license this software and its documentation for any purpose, provided +* that existing copyright notices are retained in all copies and that this +* notice and the following disclaimer are included verbatim in any +* distributions. No written agreement, license, or royalty fee is required +* for any of the authorized uses. +* +* THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS *AS IS* AND ANY EXPRESS OR +* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +* IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +****************************************************************************** +* REVISION HISTORY +* +* 03-01-01 Marc Boucher +* Ported to lwIP. +* 98-06-03 Guy Lancaster , Global Election Systems Inc. +* Extracted from avos. +*****************************************************************************/ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#include "netif/ppp/ppp_impl.h" +#include "netif/ppp/magic.h" + +#if PPP_MD5_RANDM /* Using MD5 for better randomness if enabled */ + +#include "netif/ppp/pppcrypt.h" + +#define MD5_HASH_SIZE 16 +static char magic_randpool[MD5_HASH_SIZE]; /* Pool of randomness. */ +static long magic_randcount; /* Pseudo-random incrementer */ +static u32_t magic_randomseed; /* Seed used for random number generation. */ + +/* + * Churn the randomness pool on a random event. Call this early and often + * on random and semi-random system events to build randomness in time for + * usage. For randomly timed events, pass a null pointer and a zero length + * and this will use the system timer and other sources to add randomness. + * If new random data is available, pass a pointer to that and it will be + * included. + * + * Ref: Applied Cryptography 2nd Ed. by Bruce Schneier p. 427 + */ +static void magic_churnrand(char *rand_data, u32_t rand_len) { + lwip_md5_context md5_ctx; + + /* LWIP_DEBUGF(LOG_INFO, ("magic_churnrand: %u@%P\n", rand_len, rand_data)); */ + lwip_md5_init(&md5_ctx); + lwip_md5_starts(&md5_ctx); + lwip_md5_update(&md5_ctx, (u_char *)magic_randpool, sizeof(magic_randpool)); + if (rand_data) { + lwip_md5_update(&md5_ctx, (u_char *)rand_data, rand_len); + } else { + struct { + /* INCLUDE fields for any system sources of randomness */ + u32_t jiffies; +#ifdef LWIP_RAND + u32_t rand; +#endif /* LWIP_RAND */ + } sys_data; + magic_randomseed += sys_jiffies(); + sys_data.jiffies = magic_randomseed; +#ifdef LWIP_RAND + sys_data.rand = LWIP_RAND(); +#endif /* LWIP_RAND */ + /* Load sys_data fields here. */ + lwip_md5_update(&md5_ctx, (u_char *)&sys_data, sizeof(sys_data)); + } + lwip_md5_finish(&md5_ctx, (u_char *)magic_randpool); + lwip_md5_free(&md5_ctx); +/* LWIP_DEBUGF(LOG_INFO, ("magic_churnrand: -> 0\n")); */ +} + +/* + * Initialize the random number generator. + */ +void magic_init(void) { + magic_churnrand(NULL, 0); +} + +/* + * Randomize our random seed value. + */ +void magic_randomize(void) { + magic_churnrand(NULL, 0); +} + +/* + * magic_random_bytes - Fill a buffer with random bytes. + * + * Use the random pool to generate random data. This degrades to pseudo + * random when used faster than randomness is supplied using magic_churnrand(). + * Note: It's important that there be sufficient randomness in magic_randpool + * before this is called for otherwise the range of the result may be + * narrow enough to make a search feasible. + * + * Ref: Applied Cryptography 2nd Ed. by Bruce Schneier p. 427 + * + * XXX Why does he not just call magic_churnrand() for each block? Probably + * so that you don't ever publish the seed which could possibly help + * predict future values. + * XXX Why don't we preserve md5 between blocks and just update it with + * magic_randcount each time? Probably there is a weakness but I wish that + * it was documented. + */ +void magic_random_bytes(unsigned char *buf, u32_t buf_len) { + lwip_md5_context md5_ctx; + u_char tmp[MD5_HASH_SIZE]; + u32_t n; + + while (buf_len > 0) { + lwip_md5_init(&md5_ctx); + lwip_md5_starts(&md5_ctx); + lwip_md5_update(&md5_ctx, (u_char *)magic_randpool, sizeof(magic_randpool)); + lwip_md5_update(&md5_ctx, (u_char *)&magic_randcount, sizeof(magic_randcount)); + lwip_md5_finish(&md5_ctx, tmp); + lwip_md5_free(&md5_ctx); + magic_randcount++; + n = LWIP_MIN(buf_len, MD5_HASH_SIZE); + MEMCPY(buf, tmp, n); + buf += n; + buf_len -= n; + } +} + +/* + * Return a new random number. + */ +u32_t magic(void) { + u32_t new_rand; + + magic_random_bytes((unsigned char *)&new_rand, sizeof(new_rand)); + + return new_rand; +} + +#else /* PPP_MD5_RANDM */ + +/*****************************/ +/*** LOCAL DATA STRUCTURES ***/ +/*****************************/ +#ifndef LWIP_RAND +static int magic_randomized; /* Set when truely randomized. */ +#endif /* LWIP_RAND */ +static u32_t magic_randomseed; /* Seed used for random number generation. */ + + +/***********************************/ +/*** PUBLIC FUNCTION DEFINITIONS ***/ +/***********************************/ + +/* + * Initialize the random number generator. + * + * Here we attempt to compute a random number seed but even if + * it isn't random, we'll randomize it later. + * + * The current method uses the fields from the real time clock, + * the idle process counter, the millisecond counter, and the + * hardware timer tick counter. When this is invoked + * in startup(), then the idle counter and timer values may + * repeat after each boot and the real time clock may not be + * operational. Thus we call it again on the first random + * event. + */ +void magic_init(void) { + magic_randomseed += sys_jiffies(); +#ifndef LWIP_RAND + /* Initialize the Borland random number generator. */ + srand((unsigned)magic_randomseed); +#endif /* LWIP_RAND */ +} + +/* + * magic_init - Initialize the magic number generator. + * + * Randomize our random seed value. Here we use the fact that + * this function is called at *truely random* times by the polling + * and network functions. Here we only get 16 bits of new random + * value but we use the previous value to randomize the other 16 + * bits. + */ +void magic_randomize(void) { +#ifndef LWIP_RAND + if (!magic_randomized) { + magic_randomized = !0; + magic_init(); + /* The initialization function also updates the seed. */ + } else { +#endif /* LWIP_RAND */ + magic_randomseed += sys_jiffies(); +#ifndef LWIP_RAND + } +#endif /* LWIP_RAND */ +} + +/* + * Return a new random number. + * + * Here we use the Borland rand() function to supply a pseudo random + * number which we make truely random by combining it with our own + * seed which is randomized by truely random events. + * Thus the numbers will be truely random unless there have been no + * operator or network events in which case it will be pseudo random + * seeded by the real time clock. + */ +u32_t magic(void) { +#ifdef LWIP_RAND + return LWIP_RAND() + magic_randomseed; +#else /* LWIP_RAND */ + return ((u32_t)rand() << 16) + (u32_t)rand() + magic_randomseed; +#endif /* LWIP_RAND */ +} + +/* + * magic_random_bytes - Fill a buffer with random bytes. + */ +void magic_random_bytes(unsigned char *buf, u32_t buf_len) { + u32_t new_rand, n; + + while (buf_len > 0) { + new_rand = magic(); + n = LWIP_MIN(buf_len, sizeof(new_rand)); + MEMCPY(buf, &new_rand, n); + buf += n; + buf_len -= n; + } +} +#endif /* PPP_MD5_RANDM */ + +/* + * Return a new random number between 0 and (2^pow)-1 included. + */ +u32_t magic_pow(u8_t pow) { + return magic() & ~(~0UL<. + * Copyright (c) 2002,2003,2004 Google, Inc. + * All rights reserved. + * + * License: + * Permission to use, copy, modify, and distribute this software and its + * documentation is hereby granted, provided that the above copyright + * notice appears in all copies. This software is provided without any + * warranty, express or implied. + * + * Changelog: + * 08/12/05 - Matt Domsch + * Only need extra skb padding on transmit, not receive. + * 06/18/04 - Matt Domsch , Oleg Makarenko + * Use Linux kernel 2.6 arc4 and sha1 routines rather than + * providing our own. + * 2/15/04 - TS: added #include and testing for Kernel + * version before using + * MOD_DEC_USAGE_COUNT/MOD_INC_USAGE_COUNT which are + * deprecated in 2.6 + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && MPPE_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#include + +#include "lwip/err.h" + +#include "netif/ppp/ppp_impl.h" +#include "netif/ppp/ccp.h" +#include "netif/ppp/mppe.h" +#include "netif/ppp/pppdebug.h" +#include "netif/ppp/pppcrypt.h" + +#define SHA1_SIGNATURE_SIZE 20 + +/* ppp_mppe_state.bits definitions */ +#define MPPE_BIT_A 0x80 /* Encryption table were (re)inititalized */ +#define MPPE_BIT_B 0x40 /* MPPC only (not implemented) */ +#define MPPE_BIT_C 0x20 /* MPPC only (not implemented) */ +#define MPPE_BIT_D 0x10 /* This is an encrypted frame */ + +#define MPPE_BIT_FLUSHED MPPE_BIT_A +#define MPPE_BIT_ENCRYPTED MPPE_BIT_D + +#define MPPE_BITS(p) ((p)[0] & 0xf0) +#define MPPE_CCOUNT(p) ((((p)[0] & 0x0f) << 8) + (p)[1]) +#define MPPE_CCOUNT_SPACE 0x1000 /* The size of the ccount space */ + +#define MPPE_OVHD 2 /* MPPE overhead/packet */ +#define SANITY_MAX 1600 /* Max bogon factor we will tolerate */ + +/* + * Perform the MPPE rekey algorithm, from RFC 3078, sec. 7.3. + * Well, not what's written there, but rather what they meant. + */ +static void mppe_rekey(ppp_mppe_state * state, int initial_key) +{ + lwip_sha1_context sha1_ctx; + u8_t sha1_digest[SHA1_SIGNATURE_SIZE]; + + /* + * Key Derivation, from RFC 3078, RFC 3079. + * Equivalent to Get_Key() for MS-CHAP as described in RFC 3079. + */ + lwip_sha1_init(&sha1_ctx); + lwip_sha1_starts(&sha1_ctx); + lwip_sha1_update(&sha1_ctx, state->master_key, state->keylen); + lwip_sha1_update(&sha1_ctx, mppe_sha1_pad1, SHA1_PAD_SIZE); + lwip_sha1_update(&sha1_ctx, state->session_key, state->keylen); + lwip_sha1_update(&sha1_ctx, mppe_sha1_pad2, SHA1_PAD_SIZE); + lwip_sha1_finish(&sha1_ctx, sha1_digest); + lwip_sha1_free(&sha1_ctx); + MEMCPY(state->session_key, sha1_digest, state->keylen); + + if (!initial_key) { + lwip_arc4_init(&state->arc4); + lwip_arc4_setup(&state->arc4, sha1_digest, state->keylen); + lwip_arc4_crypt(&state->arc4, state->session_key, state->keylen); + lwip_arc4_free(&state->arc4); + } + if (state->keylen == 8) { + /* See RFC 3078 */ + state->session_key[0] = 0xd1; + state->session_key[1] = 0x26; + state->session_key[2] = 0x9e; + } + lwip_arc4_init(&state->arc4); + lwip_arc4_setup(&state->arc4, state->session_key, state->keylen); +} + +/* + * Set key, used by MSCHAP before mppe_init() is actually called by CCP so we + * don't have to keep multiple copies of keys. + */ +void mppe_set_key(ppp_pcb *pcb, ppp_mppe_state *state, u8_t *key) { + LWIP_UNUSED_ARG(pcb); + MEMCPY(state->master_key, key, MPPE_MAX_KEY_LEN); +} + +/* + * Initialize (de)compressor state. + */ +void +mppe_init(ppp_pcb *pcb, ppp_mppe_state *state, u8_t options) +{ +#if PPP_DEBUG + const u8_t *debugstr = (const u8_t*)"mppe_comp_init"; + if (&pcb->mppe_decomp == state) { + debugstr = (const u8_t*)"mppe_decomp_init"; + } +#endif /* PPP_DEBUG */ + + /* Save keys. */ + MEMCPY(state->session_key, state->master_key, sizeof(state->master_key)); + + if (options & MPPE_OPT_128) + state->keylen = 16; + else if (options & MPPE_OPT_40) + state->keylen = 8; + else { + PPPDEBUG(LOG_DEBUG, ("%s[%d]: unknown key length\n", debugstr, + pcb->netif->num)); + lcp_close(pcb, "MPPE required but peer negotiation failed"); + return; + } + if (options & MPPE_OPT_STATEFUL) + state->stateful = 1; + + /* Generate the initial session key. */ + mppe_rekey(state, 1); + +#if PPP_DEBUG + { + int i; + char mkey[sizeof(state->master_key) * 2 + 1]; + char skey[sizeof(state->session_key) * 2 + 1]; + + PPPDEBUG(LOG_DEBUG, ("%s[%d]: initialized with %d-bit %s mode\n", + debugstr, pcb->netif->num, (state->keylen == 16) ? 128 : 40, + (state->stateful) ? "stateful" : "stateless")); + + for (i = 0; i < (int)sizeof(state->master_key); i++) + sprintf(mkey + i * 2, "%02x", state->master_key[i]); + for (i = 0; i < (int)sizeof(state->session_key); i++) + sprintf(skey + i * 2, "%02x", state->session_key[i]); + PPPDEBUG(LOG_DEBUG, + ("%s[%d]: keys: master: %s initial session: %s\n", + debugstr, pcb->netif->num, mkey, skey)); + } +#endif /* PPP_DEBUG */ + + /* + * Initialize the coherency count. The initial value is not specified + * in RFC 3078, but we can make a reasonable assumption that it will + * start at 0. Setting it to the max here makes the comp/decomp code + * do the right thing (determined through experiment). + */ + state->ccount = MPPE_CCOUNT_SPACE - 1; + + /* + * Note that even though we have initialized the key table, we don't + * set the FLUSHED bit. This is contrary to RFC 3078, sec. 3.1. + */ + state->bits = MPPE_BIT_ENCRYPTED; +} + +/* + * We received a CCP Reset-Request (actually, we are sending a Reset-Ack), + * tell the compressor to rekey. Note that we MUST NOT rekey for + * every CCP Reset-Request; we only rekey on the next xmit packet. + * We might get multiple CCP Reset-Requests if our CCP Reset-Ack is lost. + * So, rekeying for every CCP Reset-Request is broken as the peer will not + * know how many times we've rekeyed. (If we rekey and THEN get another + * CCP Reset-Request, we must rekey again.) + */ +void mppe_comp_reset(ppp_pcb *pcb, ppp_mppe_state *state) +{ + LWIP_UNUSED_ARG(pcb); + state->bits |= MPPE_BIT_FLUSHED; +} + +/* + * Compress (encrypt) a packet. + * It's strange to call this a compressor, since the output is always + * MPPE_OVHD + 2 bytes larger than the input. + */ +err_t +mppe_compress(ppp_pcb *pcb, ppp_mppe_state *state, struct pbuf **pb, u16_t protocol) +{ + struct pbuf *n, *np; + u8_t *pl; + err_t err; + + LWIP_UNUSED_ARG(pcb); + + /* TCP stack requires that we don't change the packet payload, therefore we copy + * the whole packet before encryption. + */ + np = pbuf_alloc(PBUF_RAW, MPPE_OVHD + sizeof(protocol) + (*pb)->tot_len, PBUF_RAM); + if (!np) { + return ERR_MEM; + } + + /* Hide MPPE header + protocol */ + pbuf_remove_header(np, MPPE_OVHD + sizeof(protocol)); + + if ((err = pbuf_copy(np, *pb)) != ERR_OK) { + pbuf_free(np); + return err; + } + + /* Reveal MPPE header + protocol */ + pbuf_add_header(np, MPPE_OVHD + sizeof(protocol)); + + *pb = np; + pl = (u8_t*)np->payload; + + state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE; + PPPDEBUG(LOG_DEBUG, ("mppe_compress[%d]: ccount %d\n", pcb->netif->num, state->ccount)); + /* FIXME: use PUT* macros */ + pl[0] = state->ccount>>8; + pl[1] = state->ccount; + + if (!state->stateful || /* stateless mode */ + ((state->ccount & 0xff) == 0xff) || /* "flag" packet */ + (state->bits & MPPE_BIT_FLUSHED)) { /* CCP Reset-Request */ + /* We must rekey */ + if (state->stateful) { + PPPDEBUG(LOG_DEBUG, ("mppe_compress[%d]: rekeying\n", pcb->netif->num)); + } + mppe_rekey(state, 0); + state->bits |= MPPE_BIT_FLUSHED; + } + pl[0] |= state->bits; + state->bits &= ~MPPE_BIT_FLUSHED; /* reset for next xmit */ + pl += MPPE_OVHD; + + /* Add protocol */ + /* FIXME: add PFC support */ + pl[0] = protocol >> 8; + pl[1] = protocol; + + /* Hide MPPE header */ + pbuf_remove_header(np, MPPE_OVHD); + + /* Encrypt packet */ + for (n = np; n != NULL; n = n->next) { + lwip_arc4_crypt(&state->arc4, (u8_t*)n->payload, n->len); + if (n->tot_len == n->len) { + break; + } + } + + /* Reveal MPPE header */ + pbuf_add_header(np, MPPE_OVHD); + + return ERR_OK; +} + +/* + * We received a CCP Reset-Ack. Just ignore it. + */ +void mppe_decomp_reset(ppp_pcb *pcb, ppp_mppe_state *state) +{ + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(state); + return; +} + +/* + * Decompress (decrypt) an MPPE packet. + */ +err_t +mppe_decompress(ppp_pcb *pcb, ppp_mppe_state *state, struct pbuf **pb) +{ + struct pbuf *n0 = *pb, *n; + u8_t *pl; + u16_t ccount; + u8_t flushed; + + /* MPPE Header */ + if (n0->len < MPPE_OVHD) { + PPPDEBUG(LOG_DEBUG, + ("mppe_decompress[%d]: short pkt (%d)\n", + pcb->netif->num, n0->len)); + state->sanity_errors += 100; + goto sanity_error; + } + + pl = (u8_t*)n0->payload; + flushed = MPPE_BITS(pl) & MPPE_BIT_FLUSHED; + ccount = MPPE_CCOUNT(pl); + PPPDEBUG(LOG_DEBUG, ("mppe_decompress[%d]: ccount %d\n", + pcb->netif->num, ccount)); + + /* sanity checks -- terminate with extreme prejudice */ + if (!(MPPE_BITS(pl) & MPPE_BIT_ENCRYPTED)) { + PPPDEBUG(LOG_DEBUG, + ("mppe_decompress[%d]: ENCRYPTED bit not set!\n", + pcb->netif->num)); + state->sanity_errors += 100; + goto sanity_error; + } + if (!state->stateful && !flushed) { + PPPDEBUG(LOG_DEBUG, ("mppe_decompress[%d]: FLUSHED bit not set in " + "stateless mode!\n", pcb->netif->num)); + state->sanity_errors += 100; + goto sanity_error; + } + if (state->stateful && ((ccount & 0xff) == 0xff) && !flushed) { + PPPDEBUG(LOG_DEBUG, ("mppe_decompress[%d]: FLUSHED bit not set on " + "flag packet!\n", pcb->netif->num)); + state->sanity_errors += 100; + goto sanity_error; + } + + /* + * Check the coherency count. + */ + + if (!state->stateful) { + /* Discard late packet */ + if ((ccount - state->ccount) % MPPE_CCOUNT_SPACE > MPPE_CCOUNT_SPACE / 2) { + state->sanity_errors++; + goto sanity_error; + } + + /* RFC 3078, sec 8.1. Rekey for every packet. */ + while (state->ccount != ccount) { + mppe_rekey(state, 0); + state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE; + } + } else { + /* RFC 3078, sec 8.2. */ + if (!state->discard) { + /* normal state */ + state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE; + if (ccount != state->ccount) { + /* + * (ccount > state->ccount) + * Packet loss detected, enter the discard state. + * Signal the peer to rekey (by sending a CCP Reset-Request). + */ + state->discard = 1; + ccp_resetrequest(pcb); + return ERR_BUF; + } + } else { + /* discard state */ + if (!flushed) { + /* ccp.c will be silent (no additional CCP Reset-Requests). */ + return ERR_BUF; + } else { + /* Rekey for every missed "flag" packet. */ + while ((ccount & ~0xff) != + (state->ccount & ~0xff)) { + mppe_rekey(state, 0); + state->ccount = + (state->ccount + + 256) % MPPE_CCOUNT_SPACE; + } + + /* reset */ + state->discard = 0; + state->ccount = ccount; + /* + * Another problem with RFC 3078 here. It implies that the + * peer need not send a Reset-Ack packet. But RFC 1962 + * requires it. Hopefully, M$ does send a Reset-Ack; even + * though it isn't required for MPPE synchronization, it is + * required to reset CCP state. + */ + } + } + if (flushed) + mppe_rekey(state, 0); + } + + /* Hide MPPE header */ + pbuf_remove_header(n0, MPPE_OVHD); + + /* Decrypt the packet. */ + for (n = n0; n != NULL; n = n->next) { + lwip_arc4_crypt(&state->arc4, (u8_t*)n->payload, n->len); + if (n->tot_len == n->len) { + break; + } + } + + /* good packet credit */ + state->sanity_errors >>= 1; + + return ERR_OK; + +sanity_error: + if (state->sanity_errors >= SANITY_MAX) { + /* + * Take LCP down if the peer is sending too many bogons. + * We don't want to do this for a single or just a few + * instances since it could just be due to packet corruption. + */ + lcp_close(pcb, "Too many MPPE errors"); + } + return ERR_BUF; +} + +#endif /* PPP_SUPPORT && MPPE_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/multilink.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/multilink.c new file mode 100644 index 0000000..62014e8 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/multilink.c @@ -0,0 +1,609 @@ +/* + * multilink.c - support routines for multilink. + * + * Copyright (c) 2000-2002 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 3. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && defined(HAVE_MULTILINK) /* don't build if not configured for use in lwipopts.h */ + +/* Multilink support + * + * Multilink uses Samba TDB (Trivial Database Library), which + * we cannot port, because it needs a filesystem. + * + * We have to choose between doing a memory-shared TDB-clone, + * or dropping multilink support at all. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/fsm.h" +#include "netif/ppp/lcp.h" +#include "netif/ppp/tdb.h" + +bool endpoint_specified; /* user gave explicit endpoint discriminator */ +char *bundle_id; /* identifier for our bundle */ +char *blinks_id; /* key for the list of links */ +bool doing_multilink; /* multilink was enabled and agreed to */ +bool multilink_master; /* we own the multilink bundle */ + +extern TDB_CONTEXT *pppdb; +extern char db_key[]; + +static void make_bundle_links (int append); +static void remove_bundle_link (void); +static void iterate_bundle_links (void (*func) (char *)); + +static int get_default_epdisc (struct epdisc *); +static int parse_num (char *str, const char *key, int *valp); +static int owns_unit (TDB_DATA pid, int unit); + +#define set_ip_epdisc(ep, addr) do { \ + ep->length = 4; \ + ep->value[0] = addr >> 24; \ + ep->value[1] = addr >> 16; \ + ep->value[2] = addr >> 8; \ + ep->value[3] = addr; \ +} while (0) + +#define LOCAL_IP_ADDR(addr) \ + (((addr) & 0xff000000) == 0x0a000000 /* 10.x.x.x */ \ + || ((addr) & 0xfff00000) == 0xac100000 /* 172.16.x.x */ \ + || ((addr) & 0xffff0000) == 0xc0a80000) /* 192.168.x.x */ + +#define process_exists(n) (kill((n), 0) == 0 || errno != ESRCH) + +void +mp_check_options() +{ + lcp_options *wo = &lcp_wantoptions[0]; + lcp_options *ao = &lcp_allowoptions[0]; + + doing_multilink = 0; + if (!multilink) + return; + /* if we're doing multilink, we have to negotiate MRRU */ + if (!wo->neg_mrru) { + /* mrru not specified, default to mru */ + wo->mrru = wo->mru; + wo->neg_mrru = 1; + } + ao->mrru = ao->mru; + ao->neg_mrru = 1; + + if (!wo->neg_endpoint && !noendpoint) { + /* get a default endpoint value */ + wo->neg_endpoint = get_default_epdisc(&wo->endpoint); + } +} + +/* + * Make a new bundle or join us to an existing bundle + * if we are doing multilink. + */ +int +mp_join_bundle() +{ + lcp_options *go = &lcp_gotoptions[0]; + lcp_options *ho = &lcp_hisoptions[0]; + lcp_options *ao = &lcp_allowoptions[0]; + int unit, pppd_pid; + int l, mtu; + char *p; + TDB_DATA key, pid, rec; + + if (doing_multilink) { + /* have previously joined a bundle */ + if (!go->neg_mrru || !ho->neg_mrru) { + notice("oops, didn't get multilink on renegotiation"); + lcp_close(pcb, "multilink required"); + return 0; + } + /* XXX should check the peer_authname and ho->endpoint + are the same as previously */ + return 0; + } + + if (!go->neg_mrru || !ho->neg_mrru) { + /* not doing multilink */ + if (go->neg_mrru) + notice("oops, multilink negotiated only for receive"); + mtu = ho->neg_mru? ho->mru: PPP_MRU; + if (mtu > ao->mru) + mtu = ao->mru; + if (demand) { + /* already have a bundle */ + cfg_bundle(0, 0, 0, 0); + netif_set_mtu(pcb, mtu); + return 0; + } + make_new_bundle(0, 0, 0, 0); + set_ifunit(1); + netif_set_mtu(pcb, mtu); + return 0; + } + + doing_multilink = 1; + + /* + * Find the appropriate bundle or join a new one. + * First we make up a name for the bundle. + * The length estimate is worst-case assuming every + * character has to be quoted. + */ + l = 4 * strlen(peer_authname) + 10; + if (ho->neg_endpoint) + l += 3 * ho->endpoint.length + 8; + if (bundle_name) + l += 3 * strlen(bundle_name) + 2; + bundle_id = malloc(l); + if (bundle_id == 0) + novm("bundle identifier"); + + p = bundle_id; + p += slprintf(p, l-1, "BUNDLE=\"%q\"", peer_authname); + if (ho->neg_endpoint || bundle_name) + *p++ = '/'; + if (ho->neg_endpoint) + p += slprintf(p, bundle_id+l-p, "%s", + epdisc_to_str(&ho->endpoint)); + if (bundle_name) + p += slprintf(p, bundle_id+l-p, "/%v", bundle_name); + + /* Make the key for the list of links belonging to the bundle */ + l = p - bundle_id; + blinks_id = malloc(l + 7); + if (blinks_id == NULL) + novm("bundle links key"); + slprintf(blinks_id, l + 7, "BUNDLE_LINKS=%s", bundle_id + 7); + + /* + * For demand mode, we only need to configure the bundle + * and attach the link. + */ + mtu = LWIP_MIN(ho->mrru, ao->mru); + if (demand) { + cfg_bundle(go->mrru, ho->mrru, go->neg_ssnhf, ho->neg_ssnhf); + netif_set_mtu(pcb, mtu); + script_setenv("BUNDLE", bundle_id + 7, 1); + return 0; + } + + /* + * Check if the bundle ID is already in the database. + */ + unit = -1; + lock_db(); + key.dptr = bundle_id; + key.dsize = p - bundle_id; + pid = tdb_fetch(pppdb, key); + if (pid.dptr != NULL) { + /* bundle ID exists, see if the pppd record exists */ + rec = tdb_fetch(pppdb, pid); + if (rec.dptr != NULL && rec.dsize > 0) { + /* make sure the string is null-terminated */ + rec.dptr[rec.dsize-1] = 0; + /* parse the interface number */ + parse_num(rec.dptr, "IFNAME=ppp", &unit); + /* check the pid value */ + if (!parse_num(rec.dptr, "PPPD_PID=", &pppd_pid) + || !process_exists(pppd_pid) + || !owns_unit(pid, unit)) + unit = -1; + free(rec.dptr); + } + free(pid.dptr); + } + + if (unit >= 0) { + /* attach to existing unit */ + if (bundle_attach(unit)) { + set_ifunit(0); + script_setenv("BUNDLE", bundle_id + 7, 0); + make_bundle_links(1); + unlock_db(); + info("Link attached to %s", ifname); + return 1; + } + /* attach failed because bundle doesn't exist */ + } + + /* we have to make a new bundle */ + make_new_bundle(go->mrru, ho->mrru, go->neg_ssnhf, ho->neg_ssnhf); + set_ifunit(1); + netif_set_mtu(pcb, mtu); + script_setenv("BUNDLE", bundle_id + 7, 1); + make_bundle_links(pcb); + unlock_db(); + info("New bundle %s created", ifname); + multilink_master = 1; + return 0; +} + +void mp_exit_bundle() +{ + lock_db(); + remove_bundle_link(); + unlock_db(); +} + +static void sendhup(char *str) +{ + int pid; + + if (parse_num(str, "PPPD_PID=", &pid) && pid != getpid()) { + if (debug) + dbglog("sending SIGHUP to process %d", pid); + kill(pid, SIGHUP); + } +} + +void mp_bundle_terminated() +{ + TDB_DATA key; + + bundle_terminating = 1; + upper_layers_down(pcb); + notice("Connection terminated."); +#if PPP_STATS_SUPPORT + print_link_stats(); +#endif /* PPP_STATS_SUPPORT */ + if (!demand) { + remove_pidfiles(); + script_unsetenv("IFNAME"); + } + + lock_db(); + destroy_bundle(); + iterate_bundle_links(sendhup); + key.dptr = blinks_id; + key.dsize = strlen(blinks_id); + tdb_delete(pppdb, key); + unlock_db(); + + new_phase(PPP_PHASE_DEAD); + + doing_multilink = 0; + multilink_master = 0; +} + +static void make_bundle_links(int append) +{ + TDB_DATA key, rec; + char *p; + char entry[32]; + int l; + + key.dptr = blinks_id; + key.dsize = strlen(blinks_id); + slprintf(entry, sizeof(entry), "%s;", db_key); + p = entry; + if (append) { + rec = tdb_fetch(pppdb, key); + if (rec.dptr != NULL && rec.dsize > 0) { + rec.dptr[rec.dsize-1] = 0; + if (strstr(rec.dptr, db_key) != NULL) { + /* already in there? strange */ + warn("link entry already exists in tdb"); + return; + } + l = rec.dsize + strlen(entry); + p = malloc(l); + if (p == NULL) + novm("bundle link list"); + slprintf(p, l, "%s%s", rec.dptr, entry); + } else { + warn("bundle link list not found"); + } + if (rec.dptr != NULL) + free(rec.dptr); + } + rec.dptr = p; + rec.dsize = strlen(p) + 1; + if (tdb_store(pppdb, key, rec, TDB_REPLACE)) + error("couldn't %s bundle link list", + append? "update": "create"); + if (p != entry) + free(p); +} + +static void remove_bundle_link() +{ + TDB_DATA key, rec; + char entry[32]; + char *p, *q; + int l; + + key.dptr = blinks_id; + key.dsize = strlen(blinks_id); + slprintf(entry, sizeof(entry), "%s;", db_key); + + rec = tdb_fetch(pppdb, key); + if (rec.dptr == NULL || rec.dsize <= 0) { + if (rec.dptr != NULL) + free(rec.dptr); + return; + } + rec.dptr[rec.dsize-1] = 0; + p = strstr(rec.dptr, entry); + if (p != NULL) { + q = p + strlen(entry); + l = strlen(q) + 1; + memmove(p, q, l); + rec.dsize = p - rec.dptr + l; + if (tdb_store(pppdb, key, rec, TDB_REPLACE)) + error("couldn't update bundle link list (removal)"); + } + free(rec.dptr); +} + +static void iterate_bundle_links(void (*func)(char *)) +{ + TDB_DATA key, rec, pp; + char *p, *q; + + key.dptr = blinks_id; + key.dsize = strlen(blinks_id); + rec = tdb_fetch(pppdb, key); + if (rec.dptr == NULL || rec.dsize <= 0) { + error("bundle link list not found (iterating list)"); + if (rec.dptr != NULL) + free(rec.dptr); + return; + } + p = rec.dptr; + p[rec.dsize-1] = 0; + while ((q = strchr(p, ';')) != NULL) { + *q = 0; + key.dptr = p; + key.dsize = q - p; + pp = tdb_fetch(pppdb, key); + if (pp.dptr != NULL && pp.dsize > 0) { + pp.dptr[pp.dsize-1] = 0; + func(pp.dptr); + } + if (pp.dptr != NULL) + free(pp.dptr); + p = q + 1; + } + free(rec.dptr); +} + +static int +parse_num(str, key, valp) + char *str; + const char *key; + int *valp; +{ + char *p, *endp; + int i; + + p = strstr(str, key); + if (p != 0) { + p += strlen(key); + i = strtol(p, &endp, 10); + if (endp != p && (*endp == 0 || *endp == ';')) { + *valp = i; + return 1; + } + } + return 0; +} + +/* + * Check whether the pppd identified by `key' still owns ppp unit `unit'. + */ +static int +owns_unit(key, unit) + TDB_DATA key; + int unit; +{ + char ifkey[32]; + TDB_DATA kd, vd; + int ret = 0; + + slprintf(ifkey, sizeof(ifkey), "IFNAME=ppp%d", unit); + kd.dptr = ifkey; + kd.dsize = strlen(ifkey); + vd = tdb_fetch(pppdb, kd); + if (vd.dptr != NULL) { + ret = vd.dsize == key.dsize + && memcmp(vd.dptr, key.dptr, vd.dsize) == 0; + free(vd.dptr); + } + return ret; +} + +static int +get_default_epdisc(ep) + struct epdisc *ep; +{ + char *p; + struct hostent *hp; + u32_t addr; + + /* First try for an ethernet MAC address */ + p = get_first_ethernet(); + if (p != 0 && get_if_hwaddr(ep->value, p) >= 0) { + ep->class = EPD_MAC; + ep->length = 6; + return 1; + } + + /* see if our hostname corresponds to a reasonable IP address */ + hp = gethostbyname(hostname); + if (hp != NULL) { + addr = *(u32_t *)hp->h_addr; + if (!bad_ip_adrs(addr)) { + addr = lwip_ntohl(addr); + if (!LOCAL_IP_ADDR(addr)) { + ep->class = EPD_IP; + set_ip_epdisc(ep, addr); + return 1; + } + } + } + + return 0; +} + +/* + * epdisc_to_str - make a printable string from an endpoint discriminator. + */ + +static char *endp_class_names[] = { + "null", "local", "IP", "MAC", "magic", "phone" +}; + +char * +epdisc_to_str(ep) + struct epdisc *ep; +{ + static char str[MAX_ENDP_LEN*3+8]; + u_char *p = ep->value; + int i, mask = 0; + char *q, c, c2; + + if (ep->class == EPD_NULL && ep->length == 0) + return "null"; + if (ep->class == EPD_IP && ep->length == 4) { + u32_t addr; + + GETLONG(addr, p); + slprintf(str, sizeof(str), "IP:%I", lwip_htonl(addr)); + return str; + } + + c = ':'; + c2 = '.'; + if (ep->class == EPD_MAC && ep->length == 6) + c2 = ':'; + else if (ep->class == EPD_MAGIC && (ep->length % 4) == 0) + mask = 3; + q = str; + if (ep->class <= EPD_PHONENUM) + q += slprintf(q, sizeof(str)-1, "%s", + endp_class_names[ep->class]); + else + q += slprintf(q, sizeof(str)-1, "%d", ep->class); + c = ':'; + for (i = 0; i < ep->length && i < MAX_ENDP_LEN; ++i) { + if ((i & mask) == 0) { + *q++ = c; + c = c2; + } + q += slprintf(q, str + sizeof(str) - q, "%.2x", ep->value[i]); + } + return str; +} + +static int hexc_val(int c) +{ + if (c >= 'a') + return c - 'a' + 10; + if (c >= 'A') + return c - 'A' + 10; + return c - '0'; +} + +int +str_to_epdisc(ep, str) + struct epdisc *ep; + char *str; +{ + int i, l; + char *p, *endp; + + for (i = EPD_NULL; i <= EPD_PHONENUM; ++i) { + int sl = strlen(endp_class_names[i]); + if (strncasecmp(str, endp_class_names[i], sl) == 0) { + str += sl; + break; + } + } + if (i > EPD_PHONENUM) { + /* not a class name, try a decimal class number */ + i = strtol(str, &endp, 10); + if (endp == str) + return 0; /* can't parse class number */ + str = endp; + } + ep->class = i; + if (*str == 0) { + ep->length = 0; + return 1; + } + if (*str != ':' && *str != '.') + return 0; + ++str; + + if (i == EPD_IP) { + u32_t addr; + i = parse_dotted_ip(str, &addr); + if (i == 0 || str[i] != 0) + return 0; + set_ip_epdisc(ep, addr); + return 1; + } + if (i == EPD_MAC && get_if_hwaddr(ep->value, str) >= 0) { + ep->length = 6; + return 1; + } + + p = str; + for (l = 0; l < MAX_ENDP_LEN; ++l) { + if (*str == 0) + break; + if (p <= str) + for (p = str; isxdigit(*p); ++p) + ; + i = p - str; + if (i == 0) + return 0; + ep->value[l] = hexc_val(*str++); + if ((i & 1) == 0) + ep->value[l] = (ep->value[l] << 4) + hexc_val(*str++); + if (*str == ':' || *str == '.') + ++str; + } + if (*str != 0 || (ep->class == EPD_MAC && l != 6)) + return 0; + ep->length = l; + return 1; +} + +#endif /* PPP_SUPPORT && HAVE_MULTILINK */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/ppp.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/ppp.c new file mode 100644 index 0000000..a9c18e3 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/ppp.c @@ -0,0 +1,1628 @@ +/***************************************************************************** +* ppp.c - Network Point to Point Protocol program file. +* +* Copyright (c) 2003 by Marc Boucher, Services Informatiques (MBSI) inc. +* portions Copyright (c) 1997 by Global Election Systems Inc. +* +* The authors hereby grant permission to use, copy, modify, distribute, +* and license this software and its documentation for any purpose, provided +* that existing copyright notices are retained in all copies and that this +* notice and the following disclaimer are included verbatim in any +* distributions. No written agreement, license, or royalty fee is required +* for any of the authorized uses. +* +* THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS *AS IS* AND ANY EXPRESS OR +* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +* IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +****************************************************************************** +* REVISION HISTORY +* +* 03-01-01 Marc Boucher +* Ported to lwIP. +* 97-11-05 Guy Lancaster , Global Election Systems Inc. +* Original. +*****************************************************************************/ + +/* + * ppp_defs.h - PPP definitions. + * + * if_pppvar.h - private structures and declarations for PPP. + * + * Copyright (c) 1994 The Australian National University. + * All rights reserved. + * + * Permission to use, copy, modify, and distribute this software and its + * documentation is hereby granted, provided that the above copyright + * notice appears in all copies. This software is provided without any + * warranty, express or implied. The Australian National University + * makes no representations about the suitability of this software for + * any purpose. + * + * IN NO EVENT SHALL THE AUSTRALIAN NATIONAL UNIVERSITY BE LIABLE TO ANY + * PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES + * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF + * THE AUSTRALIAN NATIONAL UNIVERSITY HAVE BEEN ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * THE AUSTRALIAN NATIONAL UNIVERSITY SPECIFICALLY DISCLAIMS ANY WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS + * ON AN "AS IS" BASIS, AND THE AUSTRALIAN NATIONAL UNIVERSITY HAS NO + * OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, + * OR MODIFICATIONS. + */ + +/* + * if_ppp.h - Point-to-Point Protocol definitions. + * + * Copyright (c) 1989 Carnegie Mellon University. + * All rights reserved. + * + * Redistribution and use in source and binary forms are permitted + * provided that the above copyright notice and this paragraph are + * duplicated in all such forms and that any documentation, + * advertising materials, and other materials related to such + * distribution and use acknowledge that the software was developed + * by Carnegie Mellon University. The name of the + * University may not be used to endorse or promote products derived + * from this software without specific prior written permission. + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. + */ + +/** + * @defgroup ppp PPP + * @ingroup netifs + * @verbinclude "ppp.txt" + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/pbuf.h" +#include "lwip/stats.h" +#include "lwip/sys.h" +#include "lwip/tcpip.h" +#include "lwip/api.h" +#include "lwip/snmp.h" +#include "lwip/ip4.h" /* for ip4_input() */ +#if PPP_IPV6_SUPPORT +#include "lwip/ip6.h" /* for ip6_input() */ +#endif /* PPP_IPV6_SUPPORT */ +#include "lwip/dns.h" + +#include "netif/ppp/ppp_impl.h" +#include "netif/ppp/pppos.h" + +#include "netif/ppp/fsm.h" +#include "netif/ppp/lcp.h" +#include "netif/ppp/magic.h" + +#if PAP_SUPPORT +#include "netif/ppp/upap.h" +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT +#include "netif/ppp/chap-new.h" +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT +#include "netif/ppp/eap.h" +#endif /* EAP_SUPPORT */ +#if CCP_SUPPORT +#include "netif/ppp/ccp.h" +#endif /* CCP_SUPPORT */ +#if MPPE_SUPPORT +#include "netif/ppp/mppe.h" +#endif /* MPPE_SUPPORT */ +#if ECP_SUPPORT +#include "netif/ppp/ecp.h" +#endif /* EAP_SUPPORT */ +#if VJ_SUPPORT +#include "netif/ppp/vj.h" +#endif /* VJ_SUPPORT */ +#if PPP_IPV4_SUPPORT +#include "netif/ppp/ipcp.h" +#endif /* PPP_IPV4_SUPPORT */ +#if PPP_IPV6_SUPPORT +#include "netif/ppp/ipv6cp.h" +#endif /* PPP_IPV6_SUPPORT */ + +/*************************/ +/*** LOCAL DEFINITIONS ***/ +/*************************/ + +/* Memory pools */ +#if PPPOS_SUPPORT +LWIP_MEMPOOL_PROTOTYPE(PPPOS_PCB); +#endif +#if PPPOE_SUPPORT +LWIP_MEMPOOL_PROTOTYPE(PPPOE_IF); +#endif +#if PPPOL2TP_SUPPORT +LWIP_MEMPOOL_PROTOTYPE(PPPOL2TP_PCB); +#endif +#if LWIP_PPP_API && LWIP_MPU_COMPATIBLE +LWIP_MEMPOOL_PROTOTYPE(PPPAPI_MSG); +#endif +LWIP_MEMPOOL_DECLARE(PPP_PCB, MEMP_NUM_PPP_PCB, sizeof(ppp_pcb), "PPP_PCB") + +/* FIXME: add stats per PPP session */ +#if PPP_STATS_SUPPORT +static struct timeval start_time; /* Time when link was started. */ +static struct pppd_stats old_link_stats; +struct pppd_stats link_stats; +unsigned link_connect_time; +int link_stats_valid; +#endif /* PPP_STATS_SUPPORT */ + +/* + * PPP Data Link Layer "protocol" table. + * One entry per supported protocol. + * The last entry must be NULL. + */ +const struct protent* const protocols[] = { + &lcp_protent, +#if PAP_SUPPORT + &pap_protent, +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + &chap_protent, +#endif /* CHAP_SUPPORT */ +#if CBCP_SUPPORT + &cbcp_protent, +#endif /* CBCP_SUPPORT */ +#if PPP_IPV4_SUPPORT + &ipcp_protent, +#endif /* PPP_IPV4_SUPPORT */ +#if PPP_IPV6_SUPPORT + &ipv6cp_protent, +#endif /* PPP_IPV6_SUPPORT */ +#if CCP_SUPPORT + &ccp_protent, +#endif /* CCP_SUPPORT */ +#if ECP_SUPPORT + &ecp_protent, +#endif /* ECP_SUPPORT */ +#ifdef AT_CHANGE + &atcp_protent, +#endif /* AT_CHANGE */ +#if EAP_SUPPORT + &eap_protent, +#endif /* EAP_SUPPORT */ + NULL +}; + +/* Prototypes for procedures local to this file. */ +static void ppp_do_connect(void *arg); +static err_t ppp_netif_init_cb(struct netif *netif); +#if PPP_IPV4_SUPPORT +static err_t ppp_netif_output_ip4(struct netif *netif, struct pbuf *pb, const ip4_addr_t *ipaddr); +#endif /* PPP_IPV4_SUPPORT */ +#if PPP_IPV6_SUPPORT +static err_t ppp_netif_output_ip6(struct netif *netif, struct pbuf *pb, const ip6_addr_t *ipaddr); +#endif /* PPP_IPV6_SUPPORT */ +static err_t ppp_netif_output(struct netif *netif, struct pbuf *pb, u16_t protocol); + +/***********************************/ +/*** PUBLIC FUNCTION DEFINITIONS ***/ +/***********************************/ +#if PPP_AUTH_SUPPORT +void ppp_set_auth(ppp_pcb *pcb, u8_t authtype, const char *user, const char *passwd) { + LWIP_ASSERT_CORE_LOCKED(); +#if PAP_SUPPORT + pcb->settings.refuse_pap = !(authtype & PPPAUTHTYPE_PAP); +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + pcb->settings.refuse_chap = !(authtype & PPPAUTHTYPE_CHAP); +#if MSCHAP_SUPPORT + pcb->settings.refuse_mschap = !(authtype & PPPAUTHTYPE_MSCHAP); + pcb->settings.refuse_mschap_v2 = !(authtype & PPPAUTHTYPE_MSCHAP_V2); +#endif /* MSCHAP_SUPPORT */ +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + pcb->settings.refuse_eap = !(authtype & PPPAUTHTYPE_EAP); +#endif /* EAP_SUPPORT */ + pcb->settings.user = user; + pcb->settings.passwd = passwd; +} +#endif /* PPP_AUTH_SUPPORT */ + +#if MPPE_SUPPORT +/* Set MPPE configuration */ +void ppp_set_mppe(ppp_pcb *pcb, u8_t flags) { + if (flags == PPP_MPPE_DISABLE) { + pcb->settings.require_mppe = 0; + return; + } + + pcb->settings.require_mppe = 1; + pcb->settings.refuse_mppe_stateful = !(flags & PPP_MPPE_ALLOW_STATEFUL); + pcb->settings.refuse_mppe_40 = !!(flags & PPP_MPPE_REFUSE_40); + pcb->settings.refuse_mppe_128 = !!(flags & PPP_MPPE_REFUSE_128); +} +#endif /* MPPE_SUPPORT */ + +#if PPP_NOTIFY_PHASE +void ppp_set_notify_phase_callback(ppp_pcb *pcb, ppp_notify_phase_cb_fn notify_phase_cb) { + pcb->notify_phase_cb = notify_phase_cb; + notify_phase_cb(pcb, pcb->phase, pcb->ctx_cb); +} +#endif /* PPP_NOTIFY_PHASE */ + +/* + * Initiate a PPP connection. + * + * This can only be called if PPP is in the dead phase. + * + * Holdoff is the time to wait (in seconds) before initiating + * the connection. + * + * If this port connects to a modem, the modem connection must be + * established before calling this. + */ +err_t ppp_connect(ppp_pcb *pcb, u16_t holdoff) { + LWIP_ASSERT_CORE_LOCKED(); + if (pcb->phase != PPP_PHASE_DEAD) { + return ERR_ALREADY; + } + + PPPDEBUG(LOG_DEBUG, ("ppp_connect[%d]: holdoff=%d\n", pcb->netif->num, holdoff)); + + magic_randomize(); + + if (holdoff == 0) { + ppp_do_connect(pcb); + return ERR_OK; + } + + new_phase(pcb, PPP_PHASE_HOLDOFF); + sys_timeout((u32_t)(holdoff*1000), ppp_do_connect, pcb); + return ERR_OK; +} + +#if PPP_SERVER +/* + * Listen for an incoming PPP connection. + * + * This can only be called if PPP is in the dead phase. + * + * If this port connects to a modem, the modem connection must be + * established before calling this. + */ +err_t ppp_listen(ppp_pcb *pcb) { + LWIP_ASSERT_CORE_LOCKED(); + if (pcb->phase != PPP_PHASE_DEAD) { + return ERR_ALREADY; + } + + PPPDEBUG(LOG_DEBUG, ("ppp_listen[%d]\n", pcb->netif->num)); + + magic_randomize(); + + if (pcb->link_cb->listen) { + new_phase(pcb, PPP_PHASE_INITIALIZE); + pcb->link_cb->listen(pcb, pcb->link_ctx_cb); + return ERR_OK; + } + return ERR_IF; +} +#endif /* PPP_SERVER */ + +/* + * Initiate the end of a PPP connection. + * Any outstanding packets in the queues are dropped. + * + * Setting nocarrier to 1 close the PPP connection without initiating the + * shutdown procedure. Always using nocarrier = 0 is still recommended, + * this is going to take a little longer time if your link is down, but + * is a safer choice for the PPP state machine. + * + * Return 0 on success, an error code on failure. + */ +err_t +ppp_close(ppp_pcb *pcb, u8_t nocarrier) +{ + LWIP_ASSERT_CORE_LOCKED(); + + pcb->err_code = PPPERR_USER; + + /* holdoff phase, cancel the reconnection */ + if (pcb->phase == PPP_PHASE_HOLDOFF) { + sys_untimeout(ppp_do_connect, pcb); + new_phase(pcb, PPP_PHASE_DEAD); + } + + /* dead phase, nothing to do, call the status callback to be consistent */ + if (pcb->phase == PPP_PHASE_DEAD) { + pcb->link_status_cb(pcb, pcb->err_code, pcb->ctx_cb); + return ERR_OK; + } + + /* Already terminating, nothing to do */ + if (pcb->phase >= PPP_PHASE_TERMINATE) { + return ERR_INPROGRESS; + } + + /* LCP not open, close link protocol */ + if (pcb->phase < PPP_PHASE_ESTABLISH) { + new_phase(pcb, PPP_PHASE_DISCONNECT); + ppp_link_terminated(pcb); + return ERR_OK; + } + + /* + * Only accept carrier lost signal on the stable running phase in order + * to prevent changing the PPP phase FSM in transition phases. + * + * Always using nocarrier = 0 is still recommended, this is going to + * take a little longer time, but is a safer choice from FSM point of view. + */ + if (nocarrier && pcb->phase == PPP_PHASE_RUNNING) { + PPPDEBUG(LOG_DEBUG, ("ppp_close[%d]: carrier lost -> lcp_lowerdown\n", pcb->netif->num)); + lcp_lowerdown(pcb); + /* forced link termination, this will force link protocol to disconnect. */ + link_terminated(pcb); + return ERR_OK; + } + + /* Disconnect */ + PPPDEBUG(LOG_DEBUG, ("ppp_close[%d]: kill_link -> lcp_close\n", pcb->netif->num)); + /* LCP soft close request. */ + lcp_close(pcb, "User request"); + return ERR_OK; +} + +/* + * Release the control block. + * + * This can only be called if PPP is in the dead phase. + * + * You must use ppp_close() before if you wish to terminate + * an established PPP session. + * + * Return 0 on success, an error code on failure. + */ +err_t ppp_free(ppp_pcb *pcb) { + err_t err; + LWIP_ASSERT_CORE_LOCKED(); + if (pcb->phase != PPP_PHASE_DEAD) { + return ERR_CONN; + } + + PPPDEBUG(LOG_DEBUG, ("ppp_free[%d]\n", pcb->netif->num)); + + netif_remove(pcb->netif); + + err = pcb->link_cb->free(pcb, pcb->link_ctx_cb); + + LWIP_MEMPOOL_FREE(PPP_PCB, pcb); + return err; +} + +/* Get and set parameters for the given connection. + * Return 0 on success, an error code on failure. */ +err_t +ppp_ioctl(ppp_pcb *pcb, u8_t cmd, void *arg) +{ + LWIP_ASSERT_CORE_LOCKED(); + if (pcb == NULL) { + return ERR_VAL; + } + + switch(cmd) { + case PPPCTLG_UPSTATUS: /* Get the PPP up status. */ + if (!arg) { + goto fail; + } + *(int *)arg = (int)(0 +#if PPP_IPV4_SUPPORT + || pcb->if4_up +#endif /* PPP_IPV4_SUPPORT */ +#if PPP_IPV6_SUPPORT + || pcb->if6_up +#endif /* PPP_IPV6_SUPPORT */ + ); + return ERR_OK; + + case PPPCTLG_ERRCODE: /* Get the PPP error code. */ + if (!arg) { + goto fail; + } + *(int *)arg = (int)(pcb->err_code); + return ERR_OK; + + default: + goto fail; + } + +fail: + return ERR_VAL; +} + + +/**********************************/ +/*** LOCAL FUNCTION DEFINITIONS ***/ +/**********************************/ + +static void ppp_do_connect(void *arg) { + ppp_pcb *pcb = (ppp_pcb*)arg; + + LWIP_ASSERT("pcb->phase == PPP_PHASE_DEAD || pcb->phase == PPP_PHASE_HOLDOFF", pcb->phase == PPP_PHASE_DEAD || pcb->phase == PPP_PHASE_HOLDOFF); + + new_phase(pcb, PPP_PHASE_INITIALIZE); + pcb->link_cb->connect(pcb, pcb->link_ctx_cb); +} + +/* + * ppp_netif_init_cb - netif init callback + */ +static err_t ppp_netif_init_cb(struct netif *netif) { + netif->name[0] = 'p'; + netif->name[1] = 'p'; +#if PPP_IPV4_SUPPORT + netif->output = ppp_netif_output_ip4; +#endif /* PPP_IPV4_SUPPORT */ +#if PPP_IPV6_SUPPORT + netif->output_ip6 = ppp_netif_output_ip6; +#endif /* PPP_IPV6_SUPPORT */ + netif->flags = NETIF_FLAG_UP; +#if LWIP_NETIF_HOSTNAME + /* @todo: Initialize interface hostname */ + /* netif_set_hostname(netif, "lwip"); */ +#endif /* LWIP_NETIF_HOSTNAME */ + return ERR_OK; +} + +#if PPP_IPV4_SUPPORT +/* + * Send an IPv4 packet on the given connection. + */ +static err_t ppp_netif_output_ip4(struct netif *netif, struct pbuf *pb, const ip4_addr_t *ipaddr) { + LWIP_UNUSED_ARG(ipaddr); + return ppp_netif_output(netif, pb, PPP_IP); +} +#endif /* PPP_IPV4_SUPPORT */ + +#if PPP_IPV6_SUPPORT +/* + * Send an IPv6 packet on the given connection. + */ +static err_t ppp_netif_output_ip6(struct netif *netif, struct pbuf *pb, const ip6_addr_t *ipaddr) { + LWIP_UNUSED_ARG(ipaddr); + return ppp_netif_output(netif, pb, PPP_IPV6); +} +#endif /* PPP_IPV6_SUPPORT */ + +static err_t ppp_netif_output(struct netif *netif, struct pbuf *pb, u16_t protocol) { + ppp_pcb *pcb = (ppp_pcb*)netif->state; + err_t err; + struct pbuf *fpb = NULL; + + /* Check that the link is up. */ + if (0 +#if PPP_IPV4_SUPPORT + || (protocol == PPP_IP && !pcb->if4_up) +#endif /* PPP_IPV4_SUPPORT */ +#if PPP_IPV6_SUPPORT + || (protocol == PPP_IPV6 && !pcb->if6_up) +#endif /* PPP_IPV6_SUPPORT */ + ) { + PPPDEBUG(LOG_ERR, ("ppp_netif_output[%d]: link not up\n", pcb->netif->num)); + goto err_rte_drop; + } + +#if MPPE_SUPPORT + /* If MPPE is required, refuse any IP packet until we are able to crypt them. */ + if (pcb->settings.require_mppe && pcb->ccp_transmit_method != CI_MPPE) { + PPPDEBUG(LOG_ERR, ("ppp_netif_output[%d]: MPPE required, not up\n", pcb->netif->num)); + goto err_rte_drop; + } +#endif /* MPPE_SUPPORT */ + +#if VJ_SUPPORT + /* + * Attempt Van Jacobson header compression if VJ is configured and + * this is an IP packet. + */ + if (protocol == PPP_IP && pcb->vj_enabled) { + switch (vj_compress_tcp(&pcb->vj_comp, &pb)) { + case TYPE_IP: + /* No change... + protocol = PPP_IP; */ + break; + case TYPE_COMPRESSED_TCP: + /* vj_compress_tcp() returns a new allocated pbuf, indicate we should free + * our duplicated pbuf later */ + fpb = pb; + protocol = PPP_VJC_COMP; + break; + case TYPE_UNCOMPRESSED_TCP: + /* vj_compress_tcp() returns a new allocated pbuf, indicate we should free + * our duplicated pbuf later */ + fpb = pb; + protocol = PPP_VJC_UNCOMP; + break; + default: + PPPDEBUG(LOG_WARNING, ("ppp_netif_output[%d]: bad IP packet\n", pcb->netif->num)); + LINK_STATS_INC(link.proterr); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(pcb->netif, ifoutdiscards); + return ERR_VAL; + } + } +#endif /* VJ_SUPPORT */ + +#if CCP_SUPPORT + switch (pcb->ccp_transmit_method) { + case 0: + break; /* Don't compress */ +#if MPPE_SUPPORT + case CI_MPPE: + if ((err = mppe_compress(pcb, &pcb->mppe_comp, &pb, protocol)) != ERR_OK) { + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(netif, ifoutdiscards); + goto err; + } + /* if VJ compressor returned a new allocated pbuf, free it */ + if (fpb) { + pbuf_free(fpb); + } + /* mppe_compress() returns a new allocated pbuf, indicate we should free + * our duplicated pbuf later */ + fpb = pb; + protocol = PPP_COMP; + break; +#endif /* MPPE_SUPPORT */ + default: + PPPDEBUG(LOG_ERR, ("ppp_netif_output[%d]: bad CCP transmit method\n", pcb->netif->num)); + goto err_rte_drop; /* Cannot really happen, we only negotiate what we are able to do */ + } +#endif /* CCP_SUPPORT */ + + err = pcb->link_cb->netif_output(pcb, pcb->link_ctx_cb, pb, protocol); + goto err; + +err_rte_drop: + err = ERR_RTE; + LINK_STATS_INC(link.rterr); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(netif, ifoutdiscards); +err: + if (fpb) { + pbuf_free(fpb); + } + return err; +} + +/************************************/ +/*** PRIVATE FUNCTION DEFINITIONS ***/ +/************************************/ + +/* Initialize the PPP subsystem. */ +int ppp_init(void) +{ +#if PPPOS_SUPPORT + LWIP_MEMPOOL_INIT(PPPOS_PCB); +#endif +#if PPPOE_SUPPORT + LWIP_MEMPOOL_INIT(PPPOE_IF); +#endif +#if PPPOL2TP_SUPPORT + LWIP_MEMPOOL_INIT(PPPOL2TP_PCB); +#endif +#if LWIP_PPP_API && LWIP_MPU_COMPATIBLE + LWIP_MEMPOOL_INIT(PPPAPI_MSG); +#endif + + LWIP_MEMPOOL_INIT(PPP_PCB); + + /* + * Initialize magic number generator now so that protocols may + * use magic numbers in initialization. + */ + magic_init(); + + return 0; +} + +/* + * Create a new PPP control block. + * + * This initializes the PPP control block but does not + * attempt to negotiate the LCP session. + * + * Return a new PPP connection control block pointer + * on success or a null pointer on failure. + */ +ppp_pcb *ppp_new(struct netif *pppif, const struct link_callbacks *callbacks, void *link_ctx_cb, ppp_link_status_cb_fn link_status_cb, void *ctx_cb) { + ppp_pcb *pcb; + const struct protent *protp; + int i; + + /* PPP is single-threaded: without a callback, + * there is no way to know when the link is up. */ + if (link_status_cb == NULL) { + return NULL; + } + + pcb = (ppp_pcb*)LWIP_MEMPOOL_ALLOC(PPP_PCB); + if (pcb == NULL) { + return NULL; + } + + memset(pcb, 0, sizeof(ppp_pcb)); + + /* default configuration */ +#if PAP_SUPPORT + pcb->settings.pap_timeout_time = UPAP_DEFTIMEOUT; + pcb->settings.pap_max_transmits = UPAP_DEFTRANSMITS; +#if PPP_SERVER + pcb->settings.pap_req_timeout = UPAP_DEFREQTIME; +#endif /* PPP_SERVER */ +#endif /* PAP_SUPPORT */ + +#if CHAP_SUPPORT + pcb->settings.chap_timeout_time = CHAP_DEFTIMEOUT; + pcb->settings.chap_max_transmits = CHAP_DEFTRANSMITS; +#if PPP_SERVER + pcb->settings.chap_rechallenge_time = CHAP_DEFRECHALLENGETIME; +#endif /* PPP_SERVER */ +#endif /* CHAP_SUPPPORT */ + +#if EAP_SUPPORT + pcb->settings.eap_req_time = EAP_DEFREQTIME; + pcb->settings.eap_allow_req = EAP_DEFALLOWREQ; +#if PPP_SERVER + pcb->settings.eap_timeout_time = EAP_DEFTIMEOUT; + pcb->settings.eap_max_transmits = EAP_DEFTRANSMITS; +#endif /* PPP_SERVER */ +#endif /* EAP_SUPPORT */ + + pcb->settings.lcp_loopbackfail = LCP_DEFLOOPBACKFAIL; + pcb->settings.lcp_echo_interval = LCP_ECHOINTERVAL; + pcb->settings.lcp_echo_fails = LCP_MAXECHOFAILS; + + pcb->settings.fsm_timeout_time = FSM_DEFTIMEOUT; + pcb->settings.fsm_max_conf_req_transmits = FSM_DEFMAXCONFREQS; + pcb->settings.fsm_max_term_transmits = FSM_DEFMAXTERMREQS; + pcb->settings.fsm_max_nak_loops = FSM_DEFMAXNAKLOOPS; + + pcb->netif = pppif; + MIB2_INIT_NETIF(pppif, snmp_ifType_ppp, 0); + if (!netif_add(pcb->netif, +#if LWIP_IPV4 + IP4_ADDR_ANY4, IP4_ADDR_BROADCAST, IP4_ADDR_ANY4, +#endif /* LWIP_IPV4 */ + (void *)pcb, ppp_netif_init_cb, NULL)) { + LWIP_MEMPOOL_FREE(PPP_PCB, pcb); + PPPDEBUG(LOG_ERR, ("ppp_new: netif_add failed\n")); + return NULL; + } + + pcb->link_cb = callbacks; + pcb->link_ctx_cb = link_ctx_cb; + pcb->link_status_cb = link_status_cb; + pcb->ctx_cb = ctx_cb; + + /* + * Initialize each protocol. + */ + for (i = 0; (protp = protocols[i]) != NULL; ++i) { + (*protp->init)(pcb); + } + + new_phase(pcb, PPP_PHASE_DEAD); + return pcb; +} + +/** Initiate LCP open request */ +void ppp_start(ppp_pcb *pcb) { + PPPDEBUG(LOG_DEBUG, ("ppp_start[%d]\n", pcb->netif->num)); + + /* Clean data not taken care by anything else, mostly shared data. */ +#if PPP_STATS_SUPPORT + link_stats_valid = 0; +#endif /* PPP_STATS_SUPPORT */ +#if MPPE_SUPPORT + pcb->mppe_keys_set = 0; + memset(&pcb->mppe_comp, 0, sizeof(pcb->mppe_comp)); + memset(&pcb->mppe_decomp, 0, sizeof(pcb->mppe_decomp)); +#endif /* MPPE_SUPPORT */ +#if VJ_SUPPORT + vj_compress_init(&pcb->vj_comp); +#endif /* VJ_SUPPORT */ + + /* Start protocol */ + new_phase(pcb, PPP_PHASE_ESTABLISH); + lcp_open(pcb); + lcp_lowerup(pcb); + PPPDEBUG(LOG_DEBUG, ("ppp_start[%d]: finished\n", pcb->netif->num)); +} + +/** Called when link failed to setup */ +void ppp_link_failed(ppp_pcb *pcb) { + PPPDEBUG(LOG_DEBUG, ("ppp_link_failed[%d]\n", pcb->netif->num)); + new_phase(pcb, PPP_PHASE_DEAD); + pcb->err_code = PPPERR_OPEN; + pcb->link_status_cb(pcb, pcb->err_code, pcb->ctx_cb); +} + +/** Called when link is normally down (i.e. it was asked to end) */ +void ppp_link_end(ppp_pcb *pcb) { + PPPDEBUG(LOG_DEBUG, ("ppp_link_end[%d]\n", pcb->netif->num)); + new_phase(pcb, PPP_PHASE_DEAD); + if (pcb->err_code == PPPERR_NONE) { + pcb->err_code = PPPERR_CONNECT; + } + pcb->link_status_cb(pcb, pcb->err_code, pcb->ctx_cb); +} + +/* + * Pass the processed input packet to the appropriate handler. + * This function and all handlers run in the context of the tcpip_thread + */ +void ppp_input(ppp_pcb *pcb, struct pbuf *pb) { + u16_t protocol; +#if PPP_DEBUG && PPP_PROTOCOLNAME + const char *pname; +#endif /* PPP_DEBUG && PPP_PROTOCOLNAME */ + + magic_randomize(); + + if (pb->len < 2) { + PPPDEBUG(LOG_ERR, ("ppp_input[%d]: packet too short\n", pcb->netif->num)); + goto drop; + } + protocol = (((u8_t *)pb->payload)[0] << 8) | ((u8_t*)pb->payload)[1]; + +#if PRINTPKT_SUPPORT + ppp_dump_packet(pcb, "rcvd", (unsigned char *)pb->payload, pb->len); +#endif /* PRINTPKT_SUPPORT */ + + pbuf_remove_header(pb, sizeof(protocol)); + + LINK_STATS_INC(link.recv); + MIB2_STATS_NETIF_INC(pcb->netif, ifinucastpkts); + MIB2_STATS_NETIF_ADD(pcb->netif, ifinoctets, pb->tot_len); + + /* + * Toss all non-LCP packets unless LCP is OPEN. + */ + if (protocol != PPP_LCP && pcb->lcp_fsm.state != PPP_FSM_OPENED) { + ppp_dbglog("Discarded non-LCP packet when LCP not open"); + goto drop; + } + + /* + * Until we get past the authentication phase, toss all packets + * except LCP, LQR and authentication packets. + */ + if (pcb->phase <= PPP_PHASE_AUTHENTICATE + && !(protocol == PPP_LCP +#if LQR_SUPPORT + || protocol == PPP_LQR +#endif /* LQR_SUPPORT */ +#if PAP_SUPPORT + || protocol == PPP_PAP +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + || protocol == PPP_CHAP +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + || protocol == PPP_EAP +#endif /* EAP_SUPPORT */ + )) { + ppp_dbglog("discarding proto 0x%x in phase %d", protocol, pcb->phase); + goto drop; + } + +#if CCP_SUPPORT +#if MPPE_SUPPORT + /* + * MPPE is required and unencrypted data has arrived (this + * should never happen!). We should probably drop the link if + * the protocol is in the range of what should be encrypted. + * At the least, we drop this packet. + */ + if (pcb->settings.require_mppe && protocol != PPP_COMP && protocol < 0x8000) { + PPPDEBUG(LOG_ERR, ("ppp_input[%d]: MPPE required, received unencrypted data!\n", pcb->netif->num)); + goto drop; + } +#endif /* MPPE_SUPPORT */ + + if (protocol == PPP_COMP) { + u8_t *pl; + + switch (pcb->ccp_receive_method) { +#if MPPE_SUPPORT + case CI_MPPE: + if (mppe_decompress(pcb, &pcb->mppe_decomp, &pb) != ERR_OK) { + goto drop; + } + break; +#endif /* MPPE_SUPPORT */ + default: + PPPDEBUG(LOG_ERR, ("ppp_input[%d]: bad CCP receive method\n", pcb->netif->num)); + goto drop; /* Cannot really happen, we only negotiate what we are able to do */ + } + + /* Assume no PFC */ + if (pb->len < 2) { + goto drop; + } + + /* Extract and hide protocol (do PFC decompression if necessary) */ + pl = (u8_t*)pb->payload; + if (pl[0] & 0x01) { + protocol = pl[0]; + pbuf_remove_header(pb, 1); + } else { + protocol = (pl[0] << 8) | pl[1]; + pbuf_remove_header(pb, 2); + } + } +#endif /* CCP_SUPPORT */ + + switch(protocol) { + +#if PPP_IPV4_SUPPORT + case PPP_IP: /* Internet Protocol */ + PPPDEBUG(LOG_INFO, ("ppp_input[%d]: ip in pbuf len=%d\n", pcb->netif->num, pb->tot_len)); + ip4_input(pb, pcb->netif); + return; +#endif /* PPP_IPV4_SUPPORT */ + +#if PPP_IPV6_SUPPORT + case PPP_IPV6: /* Internet Protocol Version 6 */ + PPPDEBUG(LOG_INFO, ("ppp_input[%d]: ip6 in pbuf len=%d\n", pcb->netif->num, pb->tot_len)); + ip6_input(pb, pcb->netif); + return; +#endif /* PPP_IPV6_SUPPORT */ + +#if VJ_SUPPORT + case PPP_VJC_COMP: /* VJ compressed TCP */ + /* + * Clip off the VJ header and prepend the rebuilt TCP/IP header and + * pass the result to IP. + */ + PPPDEBUG(LOG_INFO, ("ppp_input[%d]: vj_comp in pbuf len=%d\n", pcb->netif->num, pb->tot_len)); + if (pcb->vj_enabled && vj_uncompress_tcp(&pb, &pcb->vj_comp) >= 0) { + ip4_input(pb, pcb->netif); + return; + } + /* Something's wrong so drop it. */ + PPPDEBUG(LOG_WARNING, ("ppp_input[%d]: Dropping VJ compressed\n", pcb->netif->num)); + break; + + case PPP_VJC_UNCOMP: /* VJ uncompressed TCP */ + /* + * Process the TCP/IP header for VJ header compression and then pass + * the packet to IP. + */ + PPPDEBUG(LOG_INFO, ("ppp_input[%d]: vj_un in pbuf len=%d\n", pcb->netif->num, pb->tot_len)); + if (pcb->vj_enabled && vj_uncompress_uncomp(pb, &pcb->vj_comp) >= 0) { + ip4_input(pb, pcb->netif); + return; + } + /* Something's wrong so drop it. */ + PPPDEBUG(LOG_WARNING, ("ppp_input[%d]: Dropping VJ uncompressed\n", pcb->netif->num)); + break; +#endif /* VJ_SUPPORT */ + + default: { + int i; + const struct protent *protp; + + /* + * Upcall the proper protocol input routine. + */ + for (i = 0; (protp = protocols[i]) != NULL; ++i) { + if (protp->protocol == protocol) { + pb = pbuf_coalesce(pb, PBUF_RAW); + (*protp->input)(pcb, (u8_t*)pb->payload, pb->len); + goto out; + } +#if 0 /* UNUSED + * + * This is actually a (hacked?) way for the Linux kernel to pass a data + * packet to pppd. pppd in normal condition only do signaling + * (LCP, PAP, CHAP, IPCP, ...) and does not handle any data packet at all. + * + * We don't even need this interface, which is only there because of PPP + * interface limitation between Linux kernel and pppd. For MPPE, which uses + * CCP to negotiate although it is not really a (de)compressor, we added + * ccp_resetrequest() in CCP and MPPE input data flow is calling either + * ccp_resetrequest() or lcp_close() if the issue is, respectively, non-fatal + * or fatal, this is what ccp_datainput() really do. + */ + if (protocol == (protp->protocol & ~0x8000) + && protp->datainput != NULL) { + (*protp->datainput)(pcb, pb->payload, pb->len); + goto out; + } +#endif /* UNUSED */ + } + +#if PPP_DEBUG +#if PPP_PROTOCOLNAME + pname = protocol_name(protocol); + if (pname != NULL) { + ppp_warn("Unsupported protocol '%s' (0x%x) received", pname, protocol); + } else +#endif /* PPP_PROTOCOLNAME */ + ppp_warn("Unsupported protocol 0x%x received", protocol); +#endif /* PPP_DEBUG */ + if (pbuf_add_header(pb, sizeof(protocol))) { + PPPDEBUG(LOG_WARNING, ("ppp_input[%d]: Dropping (pbuf_add_header failed)\n", pcb->netif->num)); + goto drop; + } + lcp_sprotrej(pcb, (u8_t*)pb->payload, pb->len); + } + break; + } + +drop: + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(pcb->netif, ifindiscards); + +out: + pbuf_free(pb); +} + +/* + * Write a pbuf to a ppp link, only used from PPP functions + * to send PPP packets. + * + * IPv4 and IPv6 packets from lwIP are sent, respectively, + * with ppp_netif_output_ip4() and ppp_netif_output_ip6() + * functions (which are callbacks of the netif PPP interface). + */ +err_t ppp_write(ppp_pcb *pcb, struct pbuf *p) { +#if PRINTPKT_SUPPORT + ppp_dump_packet(pcb, "sent", (unsigned char *)p->payload+2, p->len-2); +#endif /* PRINTPKT_SUPPORT */ + return pcb->link_cb->write(pcb, pcb->link_ctx_cb, p); +} + +void ppp_link_terminated(ppp_pcb *pcb) { + PPPDEBUG(LOG_DEBUG, ("ppp_link_terminated[%d]\n", pcb->netif->num)); + pcb->link_cb->disconnect(pcb, pcb->link_ctx_cb); + PPPDEBUG(LOG_DEBUG, ("ppp_link_terminated[%d]: finished.\n", pcb->netif->num)); +} + + +/************************************************************************ + * Functions called by various PPP subsystems to configure + * the PPP interface or change the PPP phase. + */ + +/* + * new_phase - signal the start of a new phase of pppd's operation. + */ +void new_phase(ppp_pcb *pcb, int p) { + pcb->phase = p; + PPPDEBUG(LOG_DEBUG, ("ppp phase changed[%d]: phase=%d\n", pcb->netif->num, pcb->phase)); +#if PPP_NOTIFY_PHASE + if (pcb->notify_phase_cb != NULL) { + pcb->notify_phase_cb(pcb, p, pcb->ctx_cb); + } +#endif /* PPP_NOTIFY_PHASE */ +} + +/* + * ppp_send_config - configure the transmit-side characteristics of + * the ppp interface. + */ +int ppp_send_config(ppp_pcb *pcb, int mtu, u32_t accm, int pcomp, int accomp) { + LWIP_UNUSED_ARG(mtu); + /* pcb->mtu = mtu; -- set correctly with netif_set_mtu */ + + if (pcb->link_cb->send_config) { + pcb->link_cb->send_config(pcb, pcb->link_ctx_cb, accm, pcomp, accomp); + } + + PPPDEBUG(LOG_INFO, ("ppp_send_config[%d]\n", pcb->netif->num) ); + return 0; +} + +/* + * ppp_recv_config - configure the receive-side characteristics of + * the ppp interface. + */ +int ppp_recv_config(ppp_pcb *pcb, int mru, u32_t accm, int pcomp, int accomp) { + LWIP_UNUSED_ARG(mru); + + if (pcb->link_cb->recv_config) { + pcb->link_cb->recv_config(pcb, pcb->link_ctx_cb, accm, pcomp, accomp); + } + + PPPDEBUG(LOG_INFO, ("ppp_recv_config[%d]\n", pcb->netif->num)); + return 0; +} + +#if PPP_IPV4_SUPPORT +/* + * sifaddr - Config the interface IP addresses and netmask. + */ +int sifaddr(ppp_pcb *pcb, u32_t our_adr, u32_t his_adr, u32_t netmask) { + ip4_addr_t ip, nm, gw; + + ip4_addr_set_u32(&ip, our_adr); + ip4_addr_set_u32(&nm, netmask); + ip4_addr_set_u32(&gw, his_adr); + netif_set_addr(pcb->netif, &ip, &nm, &gw); + return 1; +} + +/******************************************************************** + * + * cifaddr - Clear the interface IP addresses, and delete routes + * through the interface if possible. + */ +int cifaddr(ppp_pcb *pcb, u32_t our_adr, u32_t his_adr) { + LWIP_UNUSED_ARG(our_adr); + LWIP_UNUSED_ARG(his_adr); + + netif_set_addr(pcb->netif, IP4_ADDR_ANY4, IP4_ADDR_BROADCAST, IP4_ADDR_ANY4); + return 1; +} + +#if 0 /* UNUSED - PROXY ARP */ +/******************************************************************** + * + * sifproxyarp - Make a proxy ARP entry for the peer. + */ + +int sifproxyarp(ppp_pcb *pcb, u32_t his_adr) { + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(his_adr); + return 0; +} + +/******************************************************************** + * + * cifproxyarp - Delete the proxy ARP entry for the peer. + */ + +int cifproxyarp(ppp_pcb *pcb, u32_t his_adr) { + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(his_adr); + return 0; +} +#endif /* UNUSED - PROXY ARP */ + +#if LWIP_DNS +/* + * sdns - Config the DNS servers + */ +int sdns(ppp_pcb *pcb, u32_t ns1, u32_t ns2) { + ip_addr_t ns; + LWIP_UNUSED_ARG(pcb); + + ip_addr_set_ip4_u32_val(ns, ns1); + dns_setserver(0, &ns); + ip_addr_set_ip4_u32_val(ns, ns2); + dns_setserver(1, &ns); + return 1; +} + +/******************************************************************** + * + * cdns - Clear the DNS servers + */ +int cdns(ppp_pcb *pcb, u32_t ns1, u32_t ns2) { + const ip_addr_t *nsa; + ip_addr_t nsb; + LWIP_UNUSED_ARG(pcb); + + nsa = dns_getserver(0); + ip_addr_set_ip4_u32_val(nsb, ns1); + if (ip_addr_cmp(nsa, &nsb)) { + dns_setserver(0, IP_ADDR_ANY); + } + nsa = dns_getserver(1); + ip_addr_set_ip4_u32_val(nsb, ns2); + if (ip_addr_cmp(nsa, &nsb)) { + dns_setserver(1, IP_ADDR_ANY); + } + return 1; +} +#endif /* LWIP_DNS */ + +#if VJ_SUPPORT +/******************************************************************** + * + * sifvjcomp - config tcp header compression + */ +int sifvjcomp(ppp_pcb *pcb, int vjcomp, int cidcomp, int maxcid) { + pcb->vj_enabled = vjcomp; + pcb->vj_comp.compressSlot = cidcomp; + pcb->vj_comp.maxSlotIndex = maxcid; + PPPDEBUG(LOG_INFO, ("sifvjcomp[%d]: VJ compress enable=%d slot=%d max slot=%d\n", + pcb->netif->num, vjcomp, cidcomp, maxcid)); + return 0; +} +#endif /* VJ_SUPPORT */ + +/* + * sifup - Config the interface up and enable IP packets to pass. + */ +int sifup(ppp_pcb *pcb) { + pcb->if4_up = 1; + pcb->err_code = PPPERR_NONE; + netif_set_link_up(pcb->netif); + + PPPDEBUG(LOG_DEBUG, ("sifup[%d]: err_code=%d\n", pcb->netif->num, pcb->err_code)); + pcb->link_status_cb(pcb, pcb->err_code, pcb->ctx_cb); + return 1; +} + +/******************************************************************** + * + * sifdown - Disable the indicated protocol and config the interface + * down if there are no remaining protocols. + */ +int sifdown(ppp_pcb *pcb) { + + pcb->if4_up = 0; + + if (1 +#if PPP_IPV6_SUPPORT + /* set the interface down if IPv6 is down as well */ + && !pcb->if6_up +#endif /* PPP_IPV6_SUPPORT */ + ) { + /* make sure the netif link callback is called */ + netif_set_link_down(pcb->netif); + } + PPPDEBUG(LOG_DEBUG, ("sifdown[%d]: err_code=%d\n", pcb->netif->num, pcb->err_code)); + return 1; +} + +/******************************************************************** + * + * Return user specified netmask, modified by any mask we might determine + * for address `addr' (in network byte order). + * Here we scan through the system's list of interfaces, looking for + * any non-point-to-point interfaces which might appear to be on the same + * network as `addr'. If we find any, we OR in their netmask to the + * user-specified netmask. + */ +u32_t get_mask(u32_t addr) { +#if 0 + u32_t mask, nmask; + + addr = lwip_htonl(addr); + if (IP_CLASSA(addr)) { /* determine network mask for address class */ + nmask = IP_CLASSA_NET; + } else if (IP_CLASSB(addr)) { + nmask = IP_CLASSB_NET; + } else { + nmask = IP_CLASSC_NET; + } + + /* class D nets are disallowed by bad_ip_adrs */ + mask = PP_HTONL(0xffffff00UL) | lwip_htonl(nmask); + + /* XXX + * Scan through the system's network interfaces. + * Get each netmask and OR them into our mask. + */ + /* return mask; */ + return mask; +#endif /* 0 */ + LWIP_UNUSED_ARG(addr); + return IPADDR_BROADCAST; +} +#endif /* PPP_IPV4_SUPPORT */ + +#if PPP_IPV6_SUPPORT +#define IN6_LLADDR_FROM_EUI64(ip6, eui64) do { \ + ip6.addr[0] = PP_HTONL(0xfe800000); \ + ip6.addr[1] = 0; \ + eui64_copy(eui64, ip6.addr[2]); \ + } while (0) + +/******************************************************************** + * + * sif6addr - Config the interface with an IPv6 link-local address + */ +int sif6addr(ppp_pcb *pcb, eui64_t our_eui64, eui64_t his_eui64) { + ip6_addr_t ip6; + LWIP_UNUSED_ARG(his_eui64); + + IN6_LLADDR_FROM_EUI64(ip6, our_eui64); + netif_ip6_addr_set(pcb->netif, 0, &ip6); + netif_ip6_addr_set_state(pcb->netif, 0, IP6_ADDR_PREFERRED); + /* FIXME: should we add an IPv6 static neighbor using his_eui64 ? */ + return 1; +} + +/******************************************************************** + * + * cif6addr - Remove IPv6 address from interface + */ +int cif6addr(ppp_pcb *pcb, eui64_t our_eui64, eui64_t his_eui64) { + LWIP_UNUSED_ARG(our_eui64); + LWIP_UNUSED_ARG(his_eui64); + + netif_ip6_addr_set_state(pcb->netif, 0, IP6_ADDR_INVALID); + netif_ip6_addr_set(pcb->netif, 0, IP6_ADDR_ANY6); + return 1; +} + +/* + * sif6up - Config the interface up and enable IPv6 packets to pass. + */ +int sif6up(ppp_pcb *pcb) { + + pcb->if6_up = 1; + pcb->err_code = PPPERR_NONE; + netif_set_link_up(pcb->netif); + + PPPDEBUG(LOG_DEBUG, ("sif6up[%d]: err_code=%d\n", pcb->netif->num, pcb->err_code)); + pcb->link_status_cb(pcb, pcb->err_code, pcb->ctx_cb); + return 1; +} + +/******************************************************************** + * + * sif6down - Disable the indicated protocol and config the interface + * down if there are no remaining protocols. + */ +int sif6down(ppp_pcb *pcb) { + + pcb->if6_up = 0; + + if (1 +#if PPP_IPV4_SUPPORT + /* set the interface down if IPv4 is down as well */ + && !pcb->if4_up +#endif /* PPP_IPV4_SUPPORT */ + ) { + /* make sure the netif link callback is called */ + netif_set_link_down(pcb->netif); + } + PPPDEBUG(LOG_DEBUG, ("sif6down[%d]: err_code=%d\n", pcb->netif->num, pcb->err_code)); + return 1; +} +#endif /* PPP_IPV6_SUPPORT */ + +#if DEMAND_SUPPORT +/* + * sifnpmode - Set the mode for handling packets for a given NP. + */ +int sifnpmode(ppp_pcb *pcb, int proto, enum NPmode mode) { + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(proto); + LWIP_UNUSED_ARG(mode); + return 0; +} +#endif /* DEMAND_SUPPORT */ + +/* + * netif_set_mtu - set the MTU on the PPP network interface. + */ +void netif_set_mtu(ppp_pcb *pcb, int mtu) { + + pcb->netif->mtu = mtu; + PPPDEBUG(LOG_INFO, ("netif_set_mtu[%d]: mtu=%d\n", pcb->netif->num, mtu)); +} + +/* + * netif_get_mtu - get PPP interface MTU + */ +int netif_get_mtu(ppp_pcb *pcb) { + + return pcb->netif->mtu; +} + +#if CCP_SUPPORT +#if 0 /* unused */ +/* + * ccp_test - whether a given compression method is acceptable for use. + */ +int +ccp_test(ppp_pcb *pcb, u_char *opt_ptr, int opt_len, int for_transmit) +{ + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(opt_ptr); + LWIP_UNUSED_ARG(opt_len); + LWIP_UNUSED_ARG(for_transmit); + return -1; +} +#endif /* unused */ + +/* + * ccp_set - inform about the current state of CCP. + */ +void +ccp_set(ppp_pcb *pcb, u8_t isopen, u8_t isup, u8_t receive_method, u8_t transmit_method) +{ + LWIP_UNUSED_ARG(isopen); + LWIP_UNUSED_ARG(isup); + pcb->ccp_receive_method = receive_method; + pcb->ccp_transmit_method = transmit_method; + PPPDEBUG(LOG_DEBUG, ("ccp_set[%d]: is_open=%d, is_up=%d, receive_method=%u, transmit_method=%u\n", + pcb->netif->num, isopen, isup, receive_method, transmit_method)); +} + +void +ccp_reset_comp(ppp_pcb *pcb) +{ + switch (pcb->ccp_transmit_method) { +#if MPPE_SUPPORT + case CI_MPPE: + mppe_comp_reset(pcb, &pcb->mppe_comp); + break; +#endif /* MPPE_SUPPORT */ + default: + break; + } +} + +void +ccp_reset_decomp(ppp_pcb *pcb) +{ + switch (pcb->ccp_receive_method) { +#if MPPE_SUPPORT + case CI_MPPE: + mppe_decomp_reset(pcb, &pcb->mppe_decomp); + break; +#endif /* MPPE_SUPPORT */ + default: + break; + } +} + +#if 0 /* unused */ +/* + * ccp_fatal_error - returns 1 if decompression was disabled as a + * result of an error detected after decompression of a packet, + * 0 otherwise. This is necessary because of patent nonsense. + */ +int +ccp_fatal_error(ppp_pcb *pcb) +{ + LWIP_UNUSED_ARG(pcb); + return 1; +} +#endif /* unused */ +#endif /* CCP_SUPPORT */ + +#if PPP_IDLETIMELIMIT +/******************************************************************** + * + * get_idle_time - return how long the link has been idle. + */ +int get_idle_time(ppp_pcb *pcb, struct ppp_idle *ip) { + /* FIXME: add idle time support and make it optional */ + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(ip); + return 1; +} +#endif /* PPP_IDLETIMELIMIT */ + +#if DEMAND_SUPPORT +/******************************************************************** + * + * get_loop_output - get outgoing packets from the ppp device, + * and detect when we want to bring the real link up. + * Return value is 1 if we need to bring up the link, 0 otherwise. + */ +int get_loop_output(void) { + return 0; +} +#endif /* DEMAND_SUPPORT */ + +#if PPP_PROTOCOLNAME +/* List of protocol names, to make our messages a little more informative. */ +struct protocol_list { + u_short proto; + const char *name; +} const protocol_list[] = { + { 0x21, "IP" }, + { 0x23, "OSI Network Layer" }, + { 0x25, "Xerox NS IDP" }, + { 0x27, "DECnet Phase IV" }, + { 0x29, "Appletalk" }, + { 0x2b, "Novell IPX" }, + { 0x2d, "VJ compressed TCP/IP" }, + { 0x2f, "VJ uncompressed TCP/IP" }, + { 0x31, "Bridging PDU" }, + { 0x33, "Stream Protocol ST-II" }, + { 0x35, "Banyan Vines" }, + { 0x39, "AppleTalk EDDP" }, + { 0x3b, "AppleTalk SmartBuffered" }, + { 0x3d, "Multi-Link" }, + { 0x3f, "NETBIOS Framing" }, + { 0x41, "Cisco Systems" }, + { 0x43, "Ascom Timeplex" }, + { 0x45, "Fujitsu Link Backup and Load Balancing (LBLB)" }, + { 0x47, "DCA Remote Lan" }, + { 0x49, "Serial Data Transport Protocol (PPP-SDTP)" }, + { 0x4b, "SNA over 802.2" }, + { 0x4d, "SNA" }, + { 0x4f, "IP6 Header Compression" }, + { 0x51, "KNX Bridging Data" }, + { 0x53, "Encryption" }, + { 0x55, "Individual Link Encryption" }, + { 0x57, "IPv6" }, + { 0x59, "PPP Muxing" }, + { 0x5b, "Vendor-Specific Network Protocol" }, + { 0x61, "RTP IPHC Full Header" }, + { 0x63, "RTP IPHC Compressed TCP" }, + { 0x65, "RTP IPHC Compressed non-TCP" }, + { 0x67, "RTP IPHC Compressed UDP 8" }, + { 0x69, "RTP IPHC Compressed RTP 8" }, + { 0x6f, "Stampede Bridging" }, + { 0x73, "MP+" }, + { 0xc1, "NTCITS IPI" }, + { 0xfb, "single-link compression" }, + { 0xfd, "Compressed Datagram" }, + { 0x0201, "802.1d Hello Packets" }, + { 0x0203, "IBM Source Routing BPDU" }, + { 0x0205, "DEC LANBridge100 Spanning Tree" }, + { 0x0207, "Cisco Discovery Protocol" }, + { 0x0209, "Netcs Twin Routing" }, + { 0x020b, "STP - Scheduled Transfer Protocol" }, + { 0x020d, "EDP - Extreme Discovery Protocol" }, + { 0x0211, "Optical Supervisory Channel Protocol" }, + { 0x0213, "Optical Supervisory Channel Protocol" }, + { 0x0231, "Luxcom" }, + { 0x0233, "Sigma Network Systems" }, + { 0x0235, "Apple Client Server Protocol" }, + { 0x0281, "MPLS Unicast" }, + { 0x0283, "MPLS Multicast" }, + { 0x0285, "IEEE p1284.4 standard - data packets" }, + { 0x0287, "ETSI TETRA Network Protocol Type 1" }, + { 0x0289, "Multichannel Flow Treatment Protocol" }, + { 0x2063, "RTP IPHC Compressed TCP No Delta" }, + { 0x2065, "RTP IPHC Context State" }, + { 0x2067, "RTP IPHC Compressed UDP 16" }, + { 0x2069, "RTP IPHC Compressed RTP 16" }, + { 0x4001, "Cray Communications Control Protocol" }, + { 0x4003, "CDPD Mobile Network Registration Protocol" }, + { 0x4005, "Expand accelerator protocol" }, + { 0x4007, "ODSICP NCP" }, + { 0x4009, "DOCSIS DLL" }, + { 0x400B, "Cetacean Network Detection Protocol" }, + { 0x4021, "Stacker LZS" }, + { 0x4023, "RefTek Protocol" }, + { 0x4025, "Fibre Channel" }, + { 0x4027, "EMIT Protocols" }, + { 0x405b, "Vendor-Specific Protocol (VSP)" }, + { 0x8021, "Internet Protocol Control Protocol" }, + { 0x8023, "OSI Network Layer Control Protocol" }, + { 0x8025, "Xerox NS IDP Control Protocol" }, + { 0x8027, "DECnet Phase IV Control Protocol" }, + { 0x8029, "Appletalk Control Protocol" }, + { 0x802b, "Novell IPX Control Protocol" }, + { 0x8031, "Bridging NCP" }, + { 0x8033, "Stream Protocol Control Protocol" }, + { 0x8035, "Banyan Vines Control Protocol" }, + { 0x803d, "Multi-Link Control Protocol" }, + { 0x803f, "NETBIOS Framing Control Protocol" }, + { 0x8041, "Cisco Systems Control Protocol" }, + { 0x8043, "Ascom Timeplex" }, + { 0x8045, "Fujitsu LBLB Control Protocol" }, + { 0x8047, "DCA Remote Lan Network Control Protocol (RLNCP)" }, + { 0x8049, "Serial Data Control Protocol (PPP-SDCP)" }, + { 0x804b, "SNA over 802.2 Control Protocol" }, + { 0x804d, "SNA Control Protocol" }, + { 0x804f, "IP6 Header Compression Control Protocol" }, + { 0x8051, "KNX Bridging Control Protocol" }, + { 0x8053, "Encryption Control Protocol" }, + { 0x8055, "Individual Link Encryption Control Protocol" }, + { 0x8057, "IPv6 Control Protocol" }, + { 0x8059, "PPP Muxing Control Protocol" }, + { 0x805b, "Vendor-Specific Network Control Protocol (VSNCP)" }, + { 0x806f, "Stampede Bridging Control Protocol" }, + { 0x8073, "MP+ Control Protocol" }, + { 0x80c1, "NTCITS IPI Control Protocol" }, + { 0x80fb, "Single Link Compression Control Protocol" }, + { 0x80fd, "Compression Control Protocol" }, + { 0x8207, "Cisco Discovery Protocol Control" }, + { 0x8209, "Netcs Twin Routing" }, + { 0x820b, "STP - Control Protocol" }, + { 0x820d, "EDPCP - Extreme Discovery Protocol Ctrl Prtcl" }, + { 0x8235, "Apple Client Server Protocol Control" }, + { 0x8281, "MPLSCP" }, + { 0x8285, "IEEE p1284.4 standard - Protocol Control" }, + { 0x8287, "ETSI TETRA TNP1 Control Protocol" }, + { 0x8289, "Multichannel Flow Treatment Protocol" }, + { 0xc021, "Link Control Protocol" }, + { 0xc023, "Password Authentication Protocol" }, + { 0xc025, "Link Quality Report" }, + { 0xc027, "Shiva Password Authentication Protocol" }, + { 0xc029, "CallBack Control Protocol (CBCP)" }, + { 0xc02b, "BACP Bandwidth Allocation Control Protocol" }, + { 0xc02d, "BAP" }, + { 0xc05b, "Vendor-Specific Authentication Protocol (VSAP)" }, + { 0xc081, "Container Control Protocol" }, + { 0xc223, "Challenge Handshake Authentication Protocol" }, + { 0xc225, "RSA Authentication Protocol" }, + { 0xc227, "Extensible Authentication Protocol" }, + { 0xc229, "Mitsubishi Security Info Exch Ptcl (SIEP)" }, + { 0xc26f, "Stampede Bridging Authorization Protocol" }, + { 0xc281, "Proprietary Authentication Protocol" }, + { 0xc283, "Proprietary Authentication Protocol" }, + { 0xc481, "Proprietary Node ID Authentication Protocol" }, + { 0, NULL }, +}; + +/* + * protocol_name - find a name for a PPP protocol. + */ +const char * protocol_name(int proto) { + const struct protocol_list *lp; + + for (lp = protocol_list; lp->proto != 0; ++lp) { + if (proto == lp->proto) { + return lp->name; + } + } + return NULL; +} +#endif /* PPP_PROTOCOLNAME */ + +#if PPP_STATS_SUPPORT + +/* ---- Note on PPP Stats support ---- + * + * The one willing link stats support should add the get_ppp_stats() + * to fetch statistics from lwIP. + */ + +/* + * reset_link_stats - "reset" stats when link goes up. + */ +void reset_link_stats(int u) { + if (!get_ppp_stats(u, &old_link_stats)) { + return; + } + gettimeofday(&start_time, NULL); +} + +/* + * update_link_stats - get stats at link termination. + */ +void update_link_stats(int u) { + struct timeval now; + char numbuf[32]; + + if (!get_ppp_stats(u, &link_stats) || gettimeofday(&now, NULL) < 0) { + return; + } + link_connect_time = now.tv_sec - start_time.tv_sec; + link_stats_valid = 1; + + link_stats.bytes_in -= old_link_stats.bytes_in; + link_stats.bytes_out -= old_link_stats.bytes_out; + link_stats.pkts_in -= old_link_stats.pkts_in; + link_stats.pkts_out -= old_link_stats.pkts_out; +} + +void print_link_stats() { + /* + * Print connect time and statistics. + */ + if (link_stats_valid) { + int t = (link_connect_time + 5) / 6; /* 1/10ths of minutes */ + info("Connect time %d.%d minutes.", t/10, t%10); + info("Sent %u bytes, received %u bytes.", link_stats.bytes_out, link_stats.bytes_in); + link_stats_valid = 0; + } +} +#endif /* PPP_STATS_SUPPORT */ + +#endif /* PPP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/pppapi.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/pppapi.c new file mode 100644 index 0000000..947f7ba --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/pppapi.c @@ -0,0 +1,427 @@ +/** + * @file + * Point To Point Protocol Sequential API module + * + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "netif/ppp/ppp_opts.h" + +#if LWIP_PPP_API /* don't build if not configured for use in lwipopts.h */ + +#include "netif/ppp/pppapi.h" +#include "lwip/priv/tcpip_priv.h" +#include "netif/ppp/pppoe.h" +#include "netif/ppp/pppol2tp.h" +#include "netif/ppp/pppos.h" + +#if LWIP_MPU_COMPATIBLE +LWIP_MEMPOOL_DECLARE(PPPAPI_MSG, MEMP_NUM_PPP_API_MSG, sizeof(struct pppapi_msg), "PPPAPI_MSG") +#endif + +#define PPPAPI_VAR_REF(name) API_VAR_REF(name) +#define PPPAPI_VAR_DECLARE(name) API_VAR_DECLARE(struct pppapi_msg, name) +#define PPPAPI_VAR_ALLOC(name) API_VAR_ALLOC_POOL(struct pppapi_msg, PPPAPI_MSG, name, ERR_MEM) +#define PPPAPI_VAR_ALLOC_RETURN_NULL(name) API_VAR_ALLOC_POOL(struct pppapi_msg, PPPAPI_MSG, name, NULL) +#define PPPAPI_VAR_FREE(name) API_VAR_FREE_POOL(PPPAPI_MSG, name) + +/** + * Call ppp_set_default() inside the tcpip_thread context. + */ +static err_t +pppapi_do_ppp_set_default(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct pppapi_msg */ + struct pppapi_msg *msg = (struct pppapi_msg *)(void*)m; + + ppp_set_default(msg->msg.ppp); + return ERR_OK; +} + +/** + * Call ppp_set_default() in a thread-safe way by running that function inside the + * tcpip_thread context. + */ +err_t +pppapi_set_default(ppp_pcb *pcb) +{ + err_t err; + PPPAPI_VAR_DECLARE(msg); + PPPAPI_VAR_ALLOC(msg); + + PPPAPI_VAR_REF(msg).msg.ppp = pcb; + err = tcpip_api_call(pppapi_do_ppp_set_default, &PPPAPI_VAR_REF(msg).call); + PPPAPI_VAR_FREE(msg); + return err; +} + + +#if PPP_NOTIFY_PHASE +/** + * Call ppp_set_notify_phase_callback() inside the tcpip_thread context. + */ +static err_t +pppapi_do_ppp_set_notify_phase_callback(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct pppapi_msg */ + struct pppapi_msg *msg = (struct pppapi_msg *)(void*)m; + + ppp_set_notify_phase_callback(msg->msg.ppp, msg->msg.msg.setnotifyphasecb.notify_phase_cb); + return ERR_OK; +} + +/** + * Call ppp_set_notify_phase_callback() in a thread-safe way by running that function inside the + * tcpip_thread context. + */ +err_t +pppapi_set_notify_phase_callback(ppp_pcb *pcb, ppp_notify_phase_cb_fn notify_phase_cb) +{ + err_t err; + PPPAPI_VAR_DECLARE(msg); + PPPAPI_VAR_ALLOC(msg); + + PPPAPI_VAR_REF(msg).msg.ppp = pcb; + PPPAPI_VAR_REF(msg).msg.msg.setnotifyphasecb.notify_phase_cb = notify_phase_cb; + err = tcpip_api_call(pppapi_do_ppp_set_notify_phase_callback, &PPPAPI_VAR_REF(msg).call); + PPPAPI_VAR_FREE(msg); + return err; +} +#endif /* PPP_NOTIFY_PHASE */ + + +#if PPPOS_SUPPORT +/** + * Call pppos_create() inside the tcpip_thread context. + */ +static err_t +pppapi_do_pppos_create(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct pppapi_msg */ + struct pppapi_msg *msg = (struct pppapi_msg *)(void*)m; + + msg->msg.ppp = pppos_create(msg->msg.msg.serialcreate.pppif, msg->msg.msg.serialcreate.output_cb, + msg->msg.msg.serialcreate.link_status_cb, msg->msg.msg.serialcreate.ctx_cb); + return ERR_OK; +} + +/** + * Call pppos_create() in a thread-safe way by running that function inside the + * tcpip_thread context. + */ +ppp_pcb* +pppapi_pppos_create(struct netif *pppif, pppos_output_cb_fn output_cb, + ppp_link_status_cb_fn link_status_cb, void *ctx_cb) +{ + ppp_pcb* result; + PPPAPI_VAR_DECLARE(msg); + PPPAPI_VAR_ALLOC_RETURN_NULL(msg); + + PPPAPI_VAR_REF(msg).msg.ppp = NULL; + PPPAPI_VAR_REF(msg).msg.msg.serialcreate.pppif = pppif; + PPPAPI_VAR_REF(msg).msg.msg.serialcreate.output_cb = output_cb; + PPPAPI_VAR_REF(msg).msg.msg.serialcreate.link_status_cb = link_status_cb; + PPPAPI_VAR_REF(msg).msg.msg.serialcreate.ctx_cb = ctx_cb; + tcpip_api_call(pppapi_do_pppos_create, &PPPAPI_VAR_REF(msg).call); + result = PPPAPI_VAR_REF(msg).msg.ppp; + PPPAPI_VAR_FREE(msg); + return result; +} +#endif /* PPPOS_SUPPORT */ + + +#if PPPOE_SUPPORT +/** + * Call pppoe_create() inside the tcpip_thread context. + */ +static err_t +pppapi_do_pppoe_create(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct pppapi_msg */ + struct pppapi_msg *msg = (struct pppapi_msg *)(void*)m; + + msg->msg.ppp = pppoe_create(msg->msg.msg.ethernetcreate.pppif, msg->msg.msg.ethernetcreate.ethif, + msg->msg.msg.ethernetcreate.service_name, msg->msg.msg.ethernetcreate.concentrator_name, + msg->msg.msg.ethernetcreate.link_status_cb, msg->msg.msg.ethernetcreate.ctx_cb); + return ERR_OK; +} + +/** + * Call pppoe_create() in a thread-safe way by running that function inside the + * tcpip_thread context. + */ +ppp_pcb* +pppapi_pppoe_create(struct netif *pppif, struct netif *ethif, const char *service_name, + const char *concentrator_name, ppp_link_status_cb_fn link_status_cb, + void *ctx_cb) +{ + ppp_pcb* result; + PPPAPI_VAR_DECLARE(msg); + PPPAPI_VAR_ALLOC_RETURN_NULL(msg); + + PPPAPI_VAR_REF(msg).msg.ppp = NULL; + PPPAPI_VAR_REF(msg).msg.msg.ethernetcreate.pppif = pppif; + PPPAPI_VAR_REF(msg).msg.msg.ethernetcreate.ethif = ethif; + PPPAPI_VAR_REF(msg).msg.msg.ethernetcreate.service_name = service_name; + PPPAPI_VAR_REF(msg).msg.msg.ethernetcreate.concentrator_name = concentrator_name; + PPPAPI_VAR_REF(msg).msg.msg.ethernetcreate.link_status_cb = link_status_cb; + PPPAPI_VAR_REF(msg).msg.msg.ethernetcreate.ctx_cb = ctx_cb; + tcpip_api_call(pppapi_do_pppoe_create, &PPPAPI_VAR_REF(msg).call); + result = PPPAPI_VAR_REF(msg).msg.ppp; + PPPAPI_VAR_FREE(msg); + return result; +} +#endif /* PPPOE_SUPPORT */ + + +#if PPPOL2TP_SUPPORT +/** + * Call pppol2tp_create() inside the tcpip_thread context. + */ +static err_t +pppapi_do_pppol2tp_create(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct pppapi_msg */ + struct pppapi_msg *msg = (struct pppapi_msg *)(void*)m; + + msg->msg.ppp = pppol2tp_create(msg->msg.msg.l2tpcreate.pppif, + msg->msg.msg.l2tpcreate.netif, API_EXPR_REF(msg->msg.msg.l2tpcreate.ipaddr), msg->msg.msg.l2tpcreate.port, +#if PPPOL2TP_AUTH_SUPPORT + msg->msg.msg.l2tpcreate.secret, + msg->msg.msg.l2tpcreate.secret_len, +#else /* PPPOL2TP_AUTH_SUPPORT */ + NULL, + 0, +#endif /* PPPOL2TP_AUTH_SUPPORT */ + msg->msg.msg.l2tpcreate.link_status_cb, msg->msg.msg.l2tpcreate.ctx_cb); + return ERR_OK; +} + +/** + * Call pppol2tp_create() in a thread-safe way by running that function inside the + * tcpip_thread context. + */ +ppp_pcb* +pppapi_pppol2tp_create(struct netif *pppif, struct netif *netif, ip_addr_t *ipaddr, u16_t port, + const u8_t *secret, u8_t secret_len, + ppp_link_status_cb_fn link_status_cb, void *ctx_cb) +{ + ppp_pcb* result; + PPPAPI_VAR_DECLARE(msg); + PPPAPI_VAR_ALLOC_RETURN_NULL(msg); +#if !PPPOL2TP_AUTH_SUPPORT + LWIP_UNUSED_ARG(secret); + LWIP_UNUSED_ARG(secret_len); +#endif /* !PPPOL2TP_AUTH_SUPPORT */ + + PPPAPI_VAR_REF(msg).msg.ppp = NULL; + PPPAPI_VAR_REF(msg).msg.msg.l2tpcreate.pppif = pppif; + PPPAPI_VAR_REF(msg).msg.msg.l2tpcreate.netif = netif; + PPPAPI_VAR_REF(msg).msg.msg.l2tpcreate.ipaddr = PPPAPI_VAR_REF(ipaddr); + PPPAPI_VAR_REF(msg).msg.msg.l2tpcreate.port = port; +#if PPPOL2TP_AUTH_SUPPORT + PPPAPI_VAR_REF(msg).msg.msg.l2tpcreate.secret = secret; + PPPAPI_VAR_REF(msg).msg.msg.l2tpcreate.secret_len = secret_len; +#endif /* PPPOL2TP_AUTH_SUPPORT */ + PPPAPI_VAR_REF(msg).msg.msg.l2tpcreate.link_status_cb = link_status_cb; + PPPAPI_VAR_REF(msg).msg.msg.l2tpcreate.ctx_cb = ctx_cb; + tcpip_api_call(pppapi_do_pppol2tp_create, &PPPAPI_VAR_REF(msg).call); + result = PPPAPI_VAR_REF(msg).msg.ppp; + PPPAPI_VAR_FREE(msg); + return result; +} +#endif /* PPPOL2TP_SUPPORT */ + + +/** + * Call ppp_connect() inside the tcpip_thread context. + */ +static err_t +pppapi_do_ppp_connect(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct pppapi_msg */ + struct pppapi_msg *msg = (struct pppapi_msg *)(void*)m; + + return ppp_connect(msg->msg.ppp, msg->msg.msg.connect.holdoff); +} + +/** + * Call ppp_connect() in a thread-safe way by running that function inside the + * tcpip_thread context. + */ +err_t +pppapi_connect(ppp_pcb *pcb, u16_t holdoff) +{ + err_t err; + PPPAPI_VAR_DECLARE(msg); + PPPAPI_VAR_ALLOC(msg); + + PPPAPI_VAR_REF(msg).msg.ppp = pcb; + PPPAPI_VAR_REF(msg).msg.msg.connect.holdoff = holdoff; + err = tcpip_api_call(pppapi_do_ppp_connect, &PPPAPI_VAR_REF(msg).call); + PPPAPI_VAR_FREE(msg); + return err; +} + + +#if PPP_SERVER +/** + * Call ppp_listen() inside the tcpip_thread context. + */ +static err_t +pppapi_do_ppp_listen(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct pppapi_msg */ + struct pppapi_msg *msg = (struct pppapi_msg *)(void*)m; + + return ppp_listen(msg->msg.ppp); +} + +/** + * Call ppp_listen() in a thread-safe way by running that function inside the + * tcpip_thread context. + */ +err_t +pppapi_listen(ppp_pcb *pcb) +{ + err_t err; + PPPAPI_VAR_DECLARE(msg); + PPPAPI_VAR_ALLOC(msg); + + PPPAPI_VAR_REF(msg).msg.ppp = pcb; + err = tcpip_api_call(pppapi_do_ppp_listen, &PPPAPI_VAR_REF(msg).call); + PPPAPI_VAR_FREE(msg); + return err; +} +#endif /* PPP_SERVER */ + + +/** + * Call ppp_close() inside the tcpip_thread context. + */ +static err_t +pppapi_do_ppp_close(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct pppapi_msg */ + struct pppapi_msg *msg = (struct pppapi_msg *)(void*)m; + + return ppp_close(msg->msg.ppp, msg->msg.msg.close.nocarrier); +} + +/** + * Call ppp_close() in a thread-safe way by running that function inside the + * tcpip_thread context. + */ +err_t +pppapi_close(ppp_pcb *pcb, u8_t nocarrier) +{ + err_t err; + PPPAPI_VAR_DECLARE(msg); + PPPAPI_VAR_ALLOC(msg); + + PPPAPI_VAR_REF(msg).msg.ppp = pcb; + PPPAPI_VAR_REF(msg).msg.msg.close.nocarrier = nocarrier; + err = tcpip_api_call(pppapi_do_ppp_close, &PPPAPI_VAR_REF(msg).call); + PPPAPI_VAR_FREE(msg); + return err; +} + + +/** + * Call ppp_free() inside the tcpip_thread context. + */ +static err_t +pppapi_do_ppp_free(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct pppapi_msg */ + struct pppapi_msg *msg = (struct pppapi_msg *)(void*)m; + + return ppp_free(msg->msg.ppp); +} + +/** + * Call ppp_free() in a thread-safe way by running that function inside the + * tcpip_thread context. + */ +err_t +pppapi_free(ppp_pcb *pcb) +{ + err_t err; + PPPAPI_VAR_DECLARE(msg); + PPPAPI_VAR_ALLOC(msg); + + PPPAPI_VAR_REF(msg).msg.ppp = pcb; + err = tcpip_api_call(pppapi_do_ppp_free, &PPPAPI_VAR_REF(msg).call); + PPPAPI_VAR_FREE(msg); + return err; +} + + +/** + * Call ppp_ioctl() inside the tcpip_thread context. + */ +static err_t +pppapi_do_ppp_ioctl(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct pppapi_msg */ + struct pppapi_msg *msg = (struct pppapi_msg *)(void*)m; + + return ppp_ioctl(msg->msg.ppp, msg->msg.msg.ioctl.cmd, msg->msg.msg.ioctl.arg); +} + +/** + * Call ppp_ioctl() in a thread-safe way by running that function inside the + * tcpip_thread context. + */ +err_t +pppapi_ioctl(ppp_pcb *pcb, u8_t cmd, void *arg) +{ + err_t err; + PPPAPI_VAR_DECLARE(msg); + PPPAPI_VAR_ALLOC(msg); + + PPPAPI_VAR_REF(msg).msg.ppp = pcb; + PPPAPI_VAR_REF(msg).msg.msg.ioctl.cmd = cmd; + PPPAPI_VAR_REF(msg).msg.msg.ioctl.arg = arg; + err = tcpip_api_call(pppapi_do_ppp_ioctl, &PPPAPI_VAR_REF(msg).call); + PPPAPI_VAR_FREE(msg); + return err; +} + +#endif /* LWIP_PPP_API */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/pppcrypt.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/pppcrypt.c new file mode 100644 index 0000000..82d78c1 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/pppcrypt.c @@ -0,0 +1,66 @@ +/* + * pppcrypt.c - PPP/DES linkage for MS-CHAP and EAP SRP-SHA1 + * + * Extracted from chap_ms.c by James Carlson. + * + * Copyright (c) 1995 Eric Rosenquist. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && MSCHAP_SUPPORT /* don't build if not necessary */ + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/pppcrypt.h" + + +static u_char pppcrypt_get_7bits(u_char *input, int startBit) { + unsigned int word; + + word = (unsigned)input[startBit / 8] << 8; + word |= (unsigned)input[startBit / 8 + 1]; + + word >>= 15 - (startBit % 8 + 7); + + return word & 0xFE; +} + +/* IN 56 bit DES key missing parity bits + * OUT 64 bit DES key with parity bits added + */ +void pppcrypt_56_to_64_bit_key(u_char *key, u_char * des_key) { + des_key[0] = pppcrypt_get_7bits(key, 0); + des_key[1] = pppcrypt_get_7bits(key, 7); + des_key[2] = pppcrypt_get_7bits(key, 14); + des_key[3] = pppcrypt_get_7bits(key, 21); + des_key[4] = pppcrypt_get_7bits(key, 28); + des_key[5] = pppcrypt_get_7bits(key, 35); + des_key[6] = pppcrypt_get_7bits(key, 42); + des_key[7] = pppcrypt_get_7bits(key, 49); +} + +#endif /* PPP_SUPPORT && MSCHAP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/pppoe.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/pppoe.c new file mode 100644 index 0000000..8ed2d63 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/pppoe.c @@ -0,0 +1,1201 @@ +/***************************************************************************** +* pppoe.c - PPP Over Ethernet implementation for lwIP. +* +* Copyright (c) 2006 by Marc Boucher, Services Informatiques (MBSI) inc. +* +* The authors hereby grant permission to use, copy, modify, distribute, +* and license this software and its documentation for any purpose, provided +* that existing copyright notices are retained in all copies and that this +* notice and the following disclaimer are included verbatim in any +* distributions. No written agreement, license, or royalty fee is required +* for any of the authorized uses. +* +* THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS *AS IS* AND ANY EXPRESS OR +* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +* IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +****************************************************************************** +* REVISION HISTORY +* +* 06-01-01 Marc Boucher +* Ported to lwIP. +*****************************************************************************/ + + + +/* based on NetBSD: if_pppoe.c,v 1.64 2006/01/31 23:50:15 martin Exp */ + +/*- + * Copyright (c) 2002 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Martin Husemann . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPPOE_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#if 0 /* UNUSED */ +#include +#include +#endif /* UNUSED */ + +#include "lwip/timeouts.h" +#include "lwip/memp.h" +#include "lwip/stats.h" +#include "lwip/snmp.h" + +#include "netif/ethernet.h" +#include "netif/ppp/ppp_impl.h" +#include "netif/ppp/lcp.h" +#include "netif/ppp/ipcp.h" +#include "netif/ppp/pppoe.h" + +/* Memory pool */ +LWIP_MEMPOOL_DECLARE(PPPOE_IF, MEMP_NUM_PPPOE_INTERFACES, sizeof(struct pppoe_softc), "PPPOE_IF") + +/* Add a 16 bit unsigned value to a buffer pointed to by PTR */ +#define PPPOE_ADD_16(PTR, VAL) \ + *(PTR)++ = (u8_t)((VAL) / 256); \ + *(PTR)++ = (u8_t)((VAL) % 256) + +/* Add a complete PPPoE header to the buffer pointed to by PTR */ +#define PPPOE_ADD_HEADER(PTR, CODE, SESS, LEN) \ + *(PTR)++ = PPPOE_VERTYPE; \ + *(PTR)++ = (CODE); \ + PPPOE_ADD_16(PTR, SESS); \ + PPPOE_ADD_16(PTR, LEN) + +#define PPPOE_DISC_TIMEOUT (5*1000) /* base for quick timeout calculation */ +#define PPPOE_SLOW_RETRY (60*1000) /* persistent retry interval */ +#define PPPOE_DISC_MAXPADI 4 /* retry PADI four times (quickly) */ +#define PPPOE_DISC_MAXPADR 2 /* retry PADR twice */ + +#ifdef PPPOE_SERVER +#error "PPPOE_SERVER is not yet supported under lwIP!" +/* from if_spppsubr.c */ +#define IFF_PASSIVE IFF_LINK0 /* wait passively for connection */ +#endif + +#define PPPOE_ERRORSTRING_LEN 64 + + +/* callbacks called from PPP core */ +static err_t pppoe_write(ppp_pcb *ppp, void *ctx, struct pbuf *p); +static err_t pppoe_netif_output(ppp_pcb *ppp, void *ctx, struct pbuf *p, u_short protocol); +static void pppoe_connect(ppp_pcb *ppp, void *ctx); +static void pppoe_disconnect(ppp_pcb *ppp, void *ctx); +static err_t pppoe_destroy(ppp_pcb *ppp, void *ctx); + +/* management routines */ +static void pppoe_abort_connect(struct pppoe_softc *); +#if 0 /* UNUSED */ +static void pppoe_clear_softc(struct pppoe_softc *, const char *); +#endif /* UNUSED */ + +/* internal timeout handling */ +static void pppoe_timeout(void *); + +/* sending actual protocol controll packets */ +static err_t pppoe_send_padi(struct pppoe_softc *); +static err_t pppoe_send_padr(struct pppoe_softc *); +#ifdef PPPOE_SERVER +static err_t pppoe_send_pado(struct pppoe_softc *); +static err_t pppoe_send_pads(struct pppoe_softc *); +#endif +static err_t pppoe_send_padt(struct netif *, u_int, const u8_t *); + +/* internal helper functions */ +static err_t pppoe_xmit(struct pppoe_softc *sc, struct pbuf *pb); +static struct pppoe_softc* pppoe_find_softc_by_session(u_int session, struct netif *rcvif); +static struct pppoe_softc* pppoe_find_softc_by_hunique(u8_t *token, size_t len, struct netif *rcvif); + +/** linked list of created pppoe interfaces */ +static struct pppoe_softc *pppoe_softc_list; + +/* Callbacks structure for PPP core */ +static const struct link_callbacks pppoe_callbacks = { + pppoe_connect, +#if PPP_SERVER + NULL, +#endif /* PPP_SERVER */ + pppoe_disconnect, + pppoe_destroy, + pppoe_write, + pppoe_netif_output, + NULL, + NULL +}; + +/* + * Create a new PPP Over Ethernet (PPPoE) connection. + * + * Return 0 on success, an error code on failure. + */ +ppp_pcb *pppoe_create(struct netif *pppif, + struct netif *ethif, + const char *service_name, const char *concentrator_name, + ppp_link_status_cb_fn link_status_cb, void *ctx_cb) +{ + ppp_pcb *ppp; + struct pppoe_softc *sc; + LWIP_UNUSED_ARG(service_name); + LWIP_UNUSED_ARG(concentrator_name); + LWIP_ASSERT_CORE_LOCKED(); + + sc = (struct pppoe_softc *)LWIP_MEMPOOL_ALLOC(PPPOE_IF); + if (sc == NULL) { + return NULL; + } + + ppp = ppp_new(pppif, &pppoe_callbacks, sc, link_status_cb, ctx_cb); + if (ppp == NULL) { + LWIP_MEMPOOL_FREE(PPPOE_IF, sc); + return NULL; + } + + memset(sc, 0, sizeof(struct pppoe_softc)); + sc->pcb = ppp; + sc->sc_ethif = ethif; + /* put the new interface at the head of the list */ + sc->next = pppoe_softc_list; + pppoe_softc_list = sc; + return ppp; +} + +/* Called by PPP core */ +static err_t pppoe_write(ppp_pcb *ppp, void *ctx, struct pbuf *p) { + struct pppoe_softc *sc = (struct pppoe_softc *)ctx; + struct pbuf *ph; /* Ethernet + PPPoE header */ + err_t ret; +#if MIB2_STATS + u16_t tot_len; +#else /* MIB2_STATS */ + LWIP_UNUSED_ARG(ppp); +#endif /* MIB2_STATS */ + + /* skip address & flags */ + pbuf_remove_header(p, 2); + + ph = pbuf_alloc(PBUF_LINK, (u16_t)(PPPOE_HEADERLEN), PBUF_RAM); + if(!ph) { + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.proterr); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutdiscards); + pbuf_free(p); + return ERR_MEM; + } + + pbuf_remove_header(ph, PPPOE_HEADERLEN); /* hide PPPoE header */ + pbuf_cat(ph, p); +#if MIB2_STATS + tot_len = ph->tot_len; +#endif /* MIB2_STATS */ + + ret = pppoe_xmit(sc, ph); + if (ret != ERR_OK) { + LINK_STATS_INC(link.err); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutdiscards); + return ret; + } + + MIB2_STATS_NETIF_ADD(ppp->netif, ifoutoctets, (u16_t)tot_len); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutucastpkts); + LINK_STATS_INC(link.xmit); + return ERR_OK; +} + +/* Called by PPP core */ +static err_t pppoe_netif_output(ppp_pcb *ppp, void *ctx, struct pbuf *p, u_short protocol) { + struct pppoe_softc *sc = (struct pppoe_softc *)ctx; + struct pbuf *pb; + u8_t *pl; + err_t err; +#if MIB2_STATS + u16_t tot_len; +#else /* MIB2_STATS */ + LWIP_UNUSED_ARG(ppp); +#endif /* MIB2_STATS */ + + /* @todo: try to use pbuf_header() here! */ + pb = pbuf_alloc(PBUF_LINK, PPPOE_HEADERLEN + sizeof(protocol), PBUF_RAM); + if(!pb) { + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.proterr); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutdiscards); + return ERR_MEM; + } + + pbuf_remove_header(pb, PPPOE_HEADERLEN); + + pl = (u8_t*)pb->payload; + PUTSHORT(protocol, pl); + + pbuf_chain(pb, p); +#if MIB2_STATS + tot_len = pb->tot_len; +#endif /* MIB2_STATS */ + + if( (err = pppoe_xmit(sc, pb)) != ERR_OK) { + LINK_STATS_INC(link.err); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutdiscards); + return err; + } + + MIB2_STATS_NETIF_ADD(ppp->netif, ifoutoctets, tot_len); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutucastpkts); + LINK_STATS_INC(link.xmit); + return ERR_OK; +} + +static err_t +pppoe_destroy(ppp_pcb *ppp, void *ctx) +{ + struct pppoe_softc *sc = (struct pppoe_softc *)ctx; + struct pppoe_softc **copp, *freep; + LWIP_UNUSED_ARG(ppp); + + sys_untimeout(pppoe_timeout, sc); + + /* remove interface from list */ + for (copp = &pppoe_softc_list; (freep = *copp); copp = &freep->next) { + if (freep == sc) { + *copp = freep->next; + break; + } + } + +#ifdef PPPOE_TODO + if (sc->sc_concentrator_name) { + mem_free(sc->sc_concentrator_name); + } + if (sc->sc_service_name) { + mem_free(sc->sc_service_name); + } +#endif /* PPPOE_TODO */ + LWIP_MEMPOOL_FREE(PPPOE_IF, sc); + + return ERR_OK; +} + +/* + * Find the interface handling the specified session. + * Note: O(number of sessions open), this is a client-side only, mean + * and lean implementation, so number of open sessions typically should + * be 1. + */ +static struct pppoe_softc* pppoe_find_softc_by_session(u_int session, struct netif *rcvif) { + struct pppoe_softc *sc; + + for (sc = pppoe_softc_list; sc != NULL; sc = sc->next) { + if (sc->sc_state == PPPOE_STATE_SESSION + && sc->sc_session == session + && sc->sc_ethif == rcvif) { + return sc; + } + } + return NULL; +} + +/* Check host unique token passed and return appropriate softc pointer, + * or NULL if token is bogus. */ +static struct pppoe_softc* pppoe_find_softc_by_hunique(u8_t *token, size_t len, struct netif *rcvif) { + struct pppoe_softc *sc, *t; + + if (len != sizeof sc) { + return NULL; + } + MEMCPY(&t, token, len); + + for (sc = pppoe_softc_list; sc != NULL; sc = sc->next) { + if (sc == t) { + break; + } + } + + if (sc == NULL) { + PPPDEBUG(LOG_DEBUG, ("pppoe: alien host unique tag, no session found\n")); + return NULL; + } + + /* should be safe to access *sc now */ + if (sc->sc_state < PPPOE_STATE_PADI_SENT || sc->sc_state >= PPPOE_STATE_SESSION) { + PPPDEBUG(LOG_DEBUG, ("%c%c%"U16_F": host unique tag found, but it belongs to a connection in state %d\n", + sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, sc->sc_state)); + return NULL; + } + if (sc->sc_ethif != rcvif) { + PPPDEBUG(LOG_DEBUG, ("%c%c%"U16_F": wrong interface, not accepting host unique\n", + sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num)); + return NULL; + } + return sc; +} + +/* analyze and handle a single received packet while not in session state */ +void +pppoe_disc_input(struct netif *netif, struct pbuf *pb) +{ + u16_t tag, len, off; + u16_t session, plen; + struct pppoe_softc *sc; +#if PPP_DEBUG + const char *err_msg = NULL; +#endif /* PPP_DEBUG */ + u8_t *ac_cookie; + u16_t ac_cookie_len; +#ifdef PPPOE_SERVER + u8_t *hunique; + size_t hunique_len; +#endif + struct pppoehdr *ph; + struct pppoetag pt; + int err; + struct eth_hdr *ethhdr; + + /* don't do anything if there is not a single PPPoE instance */ + if (pppoe_softc_list == NULL) { + pbuf_free(pb); + return; + } + + pb = pbuf_coalesce(pb, PBUF_RAW); + + ethhdr = (struct eth_hdr *)pb->payload; + + ac_cookie = NULL; + ac_cookie_len = 0; +#ifdef PPPOE_SERVER + hunique = NULL; + hunique_len = 0; +#endif + session = 0; + off = sizeof(struct eth_hdr) + sizeof(struct pppoehdr); + if (pb->len < off) { + PPPDEBUG(LOG_DEBUG, ("pppoe: packet too short: %d\n", pb->len)); + goto done; + } + + ph = (struct pppoehdr *) (ethhdr + 1); + if (ph->vertype != PPPOE_VERTYPE) { + PPPDEBUG(LOG_DEBUG, ("pppoe: unknown version/type packet: 0x%x\n", ph->vertype)); + goto done; + } + session = lwip_ntohs(ph->session); + plen = lwip_ntohs(ph->plen); + + if (plen > (pb->len - off)) { + PPPDEBUG(LOG_DEBUG, ("pppoe: packet content does not fit: data available = %d, packet size = %u\n", + pb->len - off, plen)); + goto done; + } + if(pb->tot_len == pb->len) { + u16_t framelen = off + plen; + if (framelen < pb->len) { + /* ignore trailing garbage */ + pb->tot_len = pb->len = framelen; + } + } + tag = 0; + len = 0; + sc = NULL; + while (off + sizeof(pt) <= pb->len) { + MEMCPY(&pt, (u8_t*)pb->payload + off, sizeof(pt)); + tag = lwip_ntohs(pt.tag); + len = lwip_ntohs(pt.len); + if (off + sizeof(pt) + len > pb->len) { + PPPDEBUG(LOG_DEBUG, ("pppoe: tag 0x%x len 0x%x is too long\n", tag, len)); + goto done; + } + switch (tag) { + case PPPOE_TAG_EOL: + goto breakbreak; + case PPPOE_TAG_SNAME: + break; /* ignored */ + case PPPOE_TAG_ACNAME: + break; /* ignored */ + case PPPOE_TAG_HUNIQUE: + if (sc != NULL) { + break; + } +#ifdef PPPOE_SERVER + hunique = (u8_t*)pb->payload + off + sizeof(pt); + hunique_len = len; +#endif + sc = pppoe_find_softc_by_hunique((u8_t*)pb->payload + off + sizeof(pt), len, netif); + break; + case PPPOE_TAG_ACCOOKIE: + if (ac_cookie == NULL) { + if (len > PPPOE_MAX_AC_COOKIE_LEN) { + PPPDEBUG(LOG_DEBUG, ("pppoe: AC cookie is too long: len = %d, max = %d\n", len, PPPOE_MAX_AC_COOKIE_LEN)); + goto done; + } + ac_cookie = (u8_t*)pb->payload + off + sizeof(pt); + ac_cookie_len = len; + } + break; +#if PPP_DEBUG + case PPPOE_TAG_SNAME_ERR: + err_msg = "SERVICE NAME ERROR"; + break; + case PPPOE_TAG_ACSYS_ERR: + err_msg = "AC SYSTEM ERROR"; + break; + case PPPOE_TAG_GENERIC_ERR: + err_msg = "GENERIC ERROR"; + break; +#endif /* PPP_DEBUG */ + default: + break; + } +#if PPP_DEBUG + if (err_msg != NULL) { + char error_tmp[PPPOE_ERRORSTRING_LEN]; + u16_t error_len = LWIP_MIN(len, sizeof(error_tmp)-1); + strncpy(error_tmp, (char*)pb->payload + off + sizeof(pt), error_len); + error_tmp[error_len] = '\0'; + if (sc) { + PPPDEBUG(LOG_DEBUG, ("pppoe: %c%c%"U16_F": %s: %s\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, err_msg, error_tmp)); + } else { + PPPDEBUG(LOG_DEBUG, ("pppoe: %s: %s\n", err_msg, error_tmp)); + } + } +#endif /* PPP_DEBUG */ + off += sizeof(pt) + len; + } + +breakbreak:; + switch (ph->code) { + case PPPOE_CODE_PADI: +#ifdef PPPOE_SERVER + /* + * got service name, concentrator name, and/or host unique. + * ignore if we have no interfaces with IFF_PASSIVE|IFF_UP. + */ + if (LIST_EMPTY(&pppoe_softc_list)) { + goto done; + } + LIST_FOREACH(sc, &pppoe_softc_list, sc_list) { + if (!(sc->sc_sppp.pp_if.if_flags & IFF_UP)) { + continue; + } + if (!(sc->sc_sppp.pp_if.if_flags & IFF_PASSIVE)) { + continue; + } + if (sc->sc_state == PPPOE_STATE_INITIAL) { + break; + } + } + if (sc == NULL) { + /* PPPDEBUG(LOG_DEBUG, ("pppoe: free passive interface is not found\n")); */ + goto done; + } + if (hunique) { + if (sc->sc_hunique) { + mem_free(sc->sc_hunique); + } + sc->sc_hunique = mem_malloc(hunique_len); + if (sc->sc_hunique == NULL) { + goto done; + } + sc->sc_hunique_len = hunique_len; + MEMCPY(sc->sc_hunique, hunique, hunique_len); + } + MEMCPY(&sc->sc_dest, eh->ether_shost, sizeof sc->sc_dest); + sc->sc_state = PPPOE_STATE_PADO_SENT; + pppoe_send_pado(sc); + break; +#endif /* PPPOE_SERVER */ + case PPPOE_CODE_PADR: +#ifdef PPPOE_SERVER + /* + * get sc from ac_cookie if IFF_PASSIVE + */ + if (ac_cookie == NULL) { + /* be quiet if there is not a single pppoe instance */ + PPPDEBUG(LOG_DEBUG, ("pppoe: received PADR but not includes ac_cookie\n")); + goto done; + } + sc = pppoe_find_softc_by_hunique(ac_cookie, ac_cookie_len, netif); + if (sc == NULL) { + /* be quiet if there is not a single pppoe instance */ + if (!LIST_EMPTY(&pppoe_softc_list)) { + PPPDEBUG(LOG_DEBUG, ("pppoe: received PADR but could not find request for it\n")); + } + goto done; + } + if (sc->sc_state != PPPOE_STATE_PADO_SENT) { + PPPDEBUG(LOG_DEBUG, ("%c%c%"U16_F": received unexpected PADR\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num)); + goto done; + } + if (hunique) { + if (sc->sc_hunique) { + mem_free(sc->sc_hunique); + } + sc->sc_hunique = mem_malloc(hunique_len); + if (sc->sc_hunique == NULL) { + goto done; + } + sc->sc_hunique_len = hunique_len; + MEMCPY(sc->sc_hunique, hunique, hunique_len); + } + pppoe_send_pads(sc); + sc->sc_state = PPPOE_STATE_SESSION; + ppp_start(sc->pcb); /* notify upper layers */ + break; +#else + /* ignore, we are no access concentrator */ + goto done; +#endif /* PPPOE_SERVER */ + case PPPOE_CODE_PADO: + if (sc == NULL) { + /* be quiet if there is not a single pppoe instance */ + if (pppoe_softc_list != NULL) { + PPPDEBUG(LOG_DEBUG, ("pppoe: received PADO but could not find request for it\n")); + } + goto done; + } + if (sc->sc_state != PPPOE_STATE_PADI_SENT) { + PPPDEBUG(LOG_DEBUG, ("%c%c%"U16_F": received unexpected PADO\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num)); + goto done; + } + if (ac_cookie) { + sc->sc_ac_cookie_len = ac_cookie_len; + MEMCPY(sc->sc_ac_cookie, ac_cookie, ac_cookie_len); + } + MEMCPY(&sc->sc_dest, ethhdr->src.addr, sizeof(sc->sc_dest.addr)); + sys_untimeout(pppoe_timeout, sc); + sc->sc_padr_retried = 0; + sc->sc_state = PPPOE_STATE_PADR_SENT; + if ((err = pppoe_send_padr(sc)) != 0) { + PPPDEBUG(LOG_DEBUG, ("pppoe: %c%c%"U16_F": failed to send PADR, error=%d\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, err)); + LWIP_UNUSED_ARG(err); /* if PPPDEBUG is disabled */ + } + sys_timeout(PPPOE_DISC_TIMEOUT * (1 + sc->sc_padr_retried), pppoe_timeout, sc); + break; + case PPPOE_CODE_PADS: + if (sc == NULL) { + goto done; + } + sc->sc_session = session; + sys_untimeout(pppoe_timeout, sc); + PPPDEBUG(LOG_DEBUG, ("pppoe: %c%c%"U16_F": session 0x%x connected\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, session)); + sc->sc_state = PPPOE_STATE_SESSION; + ppp_start(sc->pcb); /* notify upper layers */ + break; + case PPPOE_CODE_PADT: + /* Don't disconnect here, we let the LCP Echo/Reply find the fact + * that PPP session is down. Asking the PPP stack to end the session + * require strict checking about the PPP phase to prevent endless + * disconnection loops. + */ +#if 0 /* UNUSED */ + if (sc == NULL) { /* PADT frames are rarely sent with a hunique tag, this is actually almost always true */ + goto done; + } + pppoe_clear_softc(sc, "received PADT"); +#endif /* UNUSED */ + break; + default: + if(sc) { + PPPDEBUG(LOG_DEBUG, ("%c%c%"U16_F": unknown code (0x%"X16_F") session = 0x%"X16_F"\n", + sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, + (u16_t)ph->code, session)); + } else { + PPPDEBUG(LOG_DEBUG, ("pppoe: unknown code (0x%"X16_F") session = 0x%"X16_F"\n", (u16_t)ph->code, session)); + } + break; + } + +done: + pbuf_free(pb); + return; +} + +void +pppoe_data_input(struct netif *netif, struct pbuf *pb) +{ + u16_t session, plen; + struct pppoe_softc *sc; + struct pppoehdr *ph; +#ifdef PPPOE_TERM_UNKNOWN_SESSIONS + u8_t shost[ETHER_ADDR_LEN]; +#endif + +#ifdef PPPOE_TERM_UNKNOWN_SESSIONS + MEMCPY(shost, ((struct eth_hdr *)pb->payload)->src.addr, sizeof(shost)); +#endif + if (pbuf_remove_header(pb, sizeof(struct eth_hdr)) != 0) { + /* bail out */ + PPPDEBUG(LOG_ERR, ("pppoe_data_input: pbuf_remove_header failed\n")); + LINK_STATS_INC(link.lenerr); + goto drop; + } + + if (pb->len < sizeof(*ph)) { + PPPDEBUG(LOG_DEBUG, ("pppoe_data_input: could not get PPPoE header\n")); + goto drop; + } + ph = (struct pppoehdr *)pb->payload; + + if (ph->vertype != PPPOE_VERTYPE) { + PPPDEBUG(LOG_DEBUG, ("pppoe (data): unknown version/type packet: 0x%x\n", ph->vertype)); + goto drop; + } + if (ph->code != 0) { + goto drop; + } + + session = lwip_ntohs(ph->session); + sc = pppoe_find_softc_by_session(session, netif); + if (sc == NULL) { +#ifdef PPPOE_TERM_UNKNOWN_SESSIONS + PPPDEBUG(LOG_DEBUG, ("pppoe: input for unknown session 0x%x, sending PADT\n", session)); + pppoe_send_padt(netif, session, shost); +#endif + goto drop; + } + + plen = lwip_ntohs(ph->plen); + + if (pbuf_remove_header(pb, PPPOE_HEADERLEN) != 0) { + /* bail out */ + PPPDEBUG(LOG_ERR, ("pppoe_data_input: pbuf_remove_header PPPOE_HEADERLEN failed\n")); + LINK_STATS_INC(link.lenerr); + goto drop; + } + + PPPDEBUG(LOG_DEBUG, ("pppoe_data_input: %c%c%"U16_F": pkthdr.len=%d, pppoe.len=%d\n", + sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, + pb->len, plen)); + + if (pb->tot_len < plen) { + goto drop; + } + + /* Dispatch the packet thereby consuming it. */ + ppp_input(sc->pcb, pb); + return; + +drop: + pbuf_free(pb); +} + +static err_t +pppoe_output(struct pppoe_softc *sc, struct pbuf *pb) +{ + struct eth_hdr *ethhdr; + u16_t etype; + err_t res; + + /* make room for Ethernet header - should not fail */ + if (pbuf_add_header(pb, sizeof(struct eth_hdr)) != 0) { + /* bail out */ + PPPDEBUG(LOG_ERR, ("pppoe: %c%c%"U16_F": pppoe_output: could not allocate room for Ethernet header\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num)); + LINK_STATS_INC(link.lenerr); + pbuf_free(pb); + return ERR_BUF; + } + ethhdr = (struct eth_hdr *)pb->payload; + etype = sc->sc_state == PPPOE_STATE_SESSION ? ETHTYPE_PPPOE : ETHTYPE_PPPOEDISC; + ethhdr->type = lwip_htons(etype); + MEMCPY(ðhdr->dest.addr, &sc->sc_dest.addr, sizeof(ethhdr->dest.addr)); + MEMCPY(ðhdr->src.addr, &sc->sc_ethif->hwaddr, sizeof(ethhdr->src.addr)); + + PPPDEBUG(LOG_DEBUG, ("pppoe: %c%c%"U16_F" (%x) state=%d, session=0x%x output -> %02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F", len=%d\n", + sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, etype, + sc->sc_state, sc->sc_session, + sc->sc_dest.addr[0], sc->sc_dest.addr[1], sc->sc_dest.addr[2], sc->sc_dest.addr[3], sc->sc_dest.addr[4], sc->sc_dest.addr[5], + pb->tot_len)); + + res = sc->sc_ethif->linkoutput(sc->sc_ethif, pb); + + pbuf_free(pb); + + return res; +} + +static err_t +pppoe_send_padi(struct pppoe_softc *sc) +{ + struct pbuf *pb; + u8_t *p; + int len; +#ifdef PPPOE_TODO + int l1 = 0, l2 = 0; /* XXX: gcc */ +#endif /* PPPOE_TODO */ + + /* calculate length of frame (excluding ethernet header + pppoe header) */ + len = 2 + 2 + 2 + 2 + sizeof sc; /* service name tag is required, host unique is send too */ +#ifdef PPPOE_TODO + if (sc->sc_service_name != NULL) { + l1 = (int)strlen(sc->sc_service_name); + len += l1; + } + if (sc->sc_concentrator_name != NULL) { + l2 = (int)strlen(sc->sc_concentrator_name); + len += 2 + 2 + l2; + } +#endif /* PPPOE_TODO */ + LWIP_ASSERT("sizeof(struct eth_hdr) + PPPOE_HEADERLEN + len <= 0xffff", + sizeof(struct eth_hdr) + PPPOE_HEADERLEN + len <= 0xffff); + + /* allocate a buffer */ + pb = pbuf_alloc(PBUF_LINK, (u16_t)(PPPOE_HEADERLEN + len), PBUF_RAM); + if (!pb) { + return ERR_MEM; + } + LWIP_ASSERT("pb->tot_len == pb->len", pb->tot_len == pb->len); + + p = (u8_t*)pb->payload; + /* fill in pkt */ + PPPOE_ADD_HEADER(p, PPPOE_CODE_PADI, 0, (u16_t)len); + PPPOE_ADD_16(p, PPPOE_TAG_SNAME); +#ifdef PPPOE_TODO + if (sc->sc_service_name != NULL) { + PPPOE_ADD_16(p, l1); + MEMCPY(p, sc->sc_service_name, l1); + p += l1; + } else +#endif /* PPPOE_TODO */ + { + PPPOE_ADD_16(p, 0); + } +#ifdef PPPOE_TODO + if (sc->sc_concentrator_name != NULL) { + PPPOE_ADD_16(p, PPPOE_TAG_ACNAME); + PPPOE_ADD_16(p, l2); + MEMCPY(p, sc->sc_concentrator_name, l2); + p += l2; + } +#endif /* PPPOE_TODO */ + PPPOE_ADD_16(p, PPPOE_TAG_HUNIQUE); + PPPOE_ADD_16(p, sizeof(sc)); + MEMCPY(p, &sc, sizeof sc); + + /* send pkt */ + return pppoe_output(sc, pb); +} + +static void +pppoe_timeout(void *arg) +{ + u32_t retry_wait; + int err; + struct pppoe_softc *sc = (struct pppoe_softc*)arg; + + PPPDEBUG(LOG_DEBUG, ("pppoe: %c%c%"U16_F": timeout\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num)); + + switch (sc->sc_state) { + case PPPOE_STATE_PADI_SENT: + /* + * We have two basic ways of retrying: + * - Quick retry mode: try a few times in short sequence + * - Slow retry mode: we already had a connection successfully + * established and will try infinitely (without user + * intervention) + * We only enter slow retry mode if IFF_LINK1 (aka autodial) + * is not set. + */ + if (sc->sc_padi_retried < 0xff) { + sc->sc_padi_retried++; + } + if (!sc->pcb->settings.persist && sc->sc_padi_retried >= PPPOE_DISC_MAXPADI) { +#if 0 + if ((sc->sc_sppp.pp_if.if_flags & IFF_LINK1) == 0) { + /* slow retry mode */ + retry_wait = PPPOE_SLOW_RETRY; + } else +#endif + { + pppoe_abort_connect(sc); + return; + } + } + /* initialize for quick retry mode */ + retry_wait = LWIP_MIN(PPPOE_DISC_TIMEOUT * sc->sc_padi_retried, PPPOE_SLOW_RETRY); + if ((err = pppoe_send_padi(sc)) != 0) { + sc->sc_padi_retried--; + PPPDEBUG(LOG_DEBUG, ("pppoe: %c%c%"U16_F": failed to transmit PADI, error=%d\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, err)); + LWIP_UNUSED_ARG(err); /* if PPPDEBUG is disabled */ + } + sys_timeout(retry_wait, pppoe_timeout, sc); + break; + + case PPPOE_STATE_PADR_SENT: + sc->sc_padr_retried++; + if (sc->sc_padr_retried >= PPPOE_DISC_MAXPADR) { + MEMCPY(&sc->sc_dest, ethbroadcast.addr, sizeof(sc->sc_dest)); + sc->sc_state = PPPOE_STATE_PADI_SENT; + sc->sc_padr_retried = 0; + if ((err = pppoe_send_padi(sc)) != 0) { + PPPDEBUG(LOG_DEBUG, ("pppoe: %c%c%"U16_F": failed to send PADI, error=%d\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, err)); + LWIP_UNUSED_ARG(err); /* if PPPDEBUG is disabled */ + } + sys_timeout(PPPOE_DISC_TIMEOUT * (1 + sc->sc_padi_retried), pppoe_timeout, sc); + return; + } + if ((err = pppoe_send_padr(sc)) != 0) { + sc->sc_padr_retried--; + PPPDEBUG(LOG_DEBUG, ("pppoe: %c%c%"U16_F": failed to send PADR, error=%d\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, err)); + LWIP_UNUSED_ARG(err); /* if PPPDEBUG is disabled */ + } + sys_timeout(PPPOE_DISC_TIMEOUT * (1 + sc->sc_padr_retried), pppoe_timeout, sc); + break; + default: + return; /* all done, work in peace */ + } +} + +/* Start a connection (i.e. initiate discovery phase) */ +static void +pppoe_connect(ppp_pcb *ppp, void *ctx) +{ + err_t err; + struct pppoe_softc *sc = (struct pppoe_softc *)ctx; + lcp_options *lcp_wo; + lcp_options *lcp_ao; +#if PPP_IPV4_SUPPORT && VJ_SUPPORT + ipcp_options *ipcp_wo; + ipcp_options *ipcp_ao; +#endif /* PPP_IPV4_SUPPORT && VJ_SUPPORT */ + + sc->sc_session = 0; + sc->sc_ac_cookie_len = 0; + sc->sc_padi_retried = 0; + sc->sc_padr_retried = 0; + /* changed to real address later */ + MEMCPY(&sc->sc_dest, ethbroadcast.addr, sizeof(sc->sc_dest)); +#ifdef PPPOE_SERVER + /* wait PADI if IFF_PASSIVE */ + if ((sc->sc_sppp.pp_if.if_flags & IFF_PASSIVE)) { + return 0; + } +#endif + + lcp_wo = &ppp->lcp_wantoptions; + lcp_wo->mru = sc->sc_ethif->mtu-PPPOE_HEADERLEN-2; /* two byte PPP protocol discriminator, then IP data */ + lcp_wo->neg_asyncmap = 0; + lcp_wo->neg_pcompression = 0; + lcp_wo->neg_accompression = 0; + lcp_wo->passive = 0; + lcp_wo->silent = 0; + + lcp_ao = &ppp->lcp_allowoptions; + lcp_ao->mru = sc->sc_ethif->mtu-PPPOE_HEADERLEN-2; /* two byte PPP protocol discriminator, then IP data */ + lcp_ao->neg_asyncmap = 0; + lcp_ao->neg_pcompression = 0; + lcp_ao->neg_accompression = 0; + +#if PPP_IPV4_SUPPORT && VJ_SUPPORT + ipcp_wo = &ppp->ipcp_wantoptions; + ipcp_wo->neg_vj = 0; + ipcp_wo->old_vj = 0; + + ipcp_ao = &ppp->ipcp_allowoptions; + ipcp_ao->neg_vj = 0; + ipcp_ao->old_vj = 0; +#endif /* PPP_IPV4_SUPPORT && VJ_SUPPORT */ + + /* save state, in case we fail to send PADI */ + sc->sc_state = PPPOE_STATE_PADI_SENT; + if ((err = pppoe_send_padi(sc)) != 0) { + PPPDEBUG(LOG_DEBUG, ("pppoe: %c%c%"U16_F": failed to send PADI, error=%d\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, err)); + } + sys_timeout(PPPOE_DISC_TIMEOUT, pppoe_timeout, sc); +} + +/* disconnect */ +static void +pppoe_disconnect(ppp_pcb *ppp, void *ctx) +{ + struct pppoe_softc *sc = (struct pppoe_softc *)ctx; + + PPPDEBUG(LOG_DEBUG, ("pppoe: %c%c%"U16_F": disconnecting\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num)); + if (sc->sc_state == PPPOE_STATE_SESSION) { + pppoe_send_padt(sc->sc_ethif, sc->sc_session, (const u8_t *)&sc->sc_dest); + } + + /* stop any timer, disconnect can be called while initiating is in progress */ + sys_untimeout(pppoe_timeout, sc); + sc->sc_state = PPPOE_STATE_INITIAL; +#ifdef PPPOE_SERVER + if (sc->sc_hunique) { + mem_free(sc->sc_hunique); + sc->sc_hunique = NULL; /* probably not necessary, if state is initial we shouldn't have to access hunique anyway */ + } + sc->sc_hunique_len = 0; /* probably not necessary, if state is initial we shouldn't have to access hunique anyway */ +#endif + ppp_link_end(ppp); /* notify upper layers */ + return; +} + +/* Connection attempt aborted */ +static void +pppoe_abort_connect(struct pppoe_softc *sc) +{ + PPPDEBUG(LOG_DEBUG, ("%c%c%"U16_F": could not establish connection\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num)); + sc->sc_state = PPPOE_STATE_INITIAL; + ppp_link_failed(sc->pcb); /* notify upper layers */ +} + +/* Send a PADR packet */ +static err_t +pppoe_send_padr(struct pppoe_softc *sc) +{ + struct pbuf *pb; + u8_t *p; + size_t len; +#ifdef PPPOE_TODO + size_t l1 = 0; /* XXX: gcc */ +#endif /* PPPOE_TODO */ + + len = 2 + 2 + 2 + 2 + sizeof(sc); /* service name, host unique */ +#ifdef PPPOE_TODO + if (sc->sc_service_name != NULL) { /* service name tag maybe empty */ + l1 = strlen(sc->sc_service_name); + len += l1; + } +#endif /* PPPOE_TODO */ + if (sc->sc_ac_cookie_len > 0) { + len += 2 + 2 + sc->sc_ac_cookie_len; /* AC cookie */ + } + LWIP_ASSERT("sizeof(struct eth_hdr) + PPPOE_HEADERLEN + len <= 0xffff", + sizeof(struct eth_hdr) + PPPOE_HEADERLEN + len <= 0xffff); + pb = pbuf_alloc(PBUF_LINK, (u16_t)(PPPOE_HEADERLEN + len), PBUF_RAM); + if (!pb) { + return ERR_MEM; + } + LWIP_ASSERT("pb->tot_len == pb->len", pb->tot_len == pb->len); + p = (u8_t*)pb->payload; + PPPOE_ADD_HEADER(p, PPPOE_CODE_PADR, 0, len); + PPPOE_ADD_16(p, PPPOE_TAG_SNAME); +#ifdef PPPOE_TODO + if (sc->sc_service_name != NULL) { + PPPOE_ADD_16(p, l1); + MEMCPY(p, sc->sc_service_name, l1); + p += l1; + } else +#endif /* PPPOE_TODO */ + { + PPPOE_ADD_16(p, 0); + } + if (sc->sc_ac_cookie_len > 0) { + PPPOE_ADD_16(p, PPPOE_TAG_ACCOOKIE); + PPPOE_ADD_16(p, sc->sc_ac_cookie_len); + MEMCPY(p, sc->sc_ac_cookie, sc->sc_ac_cookie_len); + p += sc->sc_ac_cookie_len; + } + PPPOE_ADD_16(p, PPPOE_TAG_HUNIQUE); + PPPOE_ADD_16(p, sizeof(sc)); + MEMCPY(p, &sc, sizeof sc); + + return pppoe_output(sc, pb); +} + +/* send a PADT packet */ +static err_t +pppoe_send_padt(struct netif *outgoing_if, u_int session, const u8_t *dest) +{ + struct pbuf *pb; + struct eth_hdr *ethhdr; + err_t res; + u8_t *p; + + pb = pbuf_alloc(PBUF_LINK, (u16_t)(PPPOE_HEADERLEN), PBUF_RAM); + if (!pb) { + return ERR_MEM; + } + LWIP_ASSERT("pb->tot_len == pb->len", pb->tot_len == pb->len); + + if (pbuf_add_header(pb, sizeof(struct eth_hdr))) { + PPPDEBUG(LOG_ERR, ("pppoe: pppoe_send_padt: could not allocate room for PPPoE header\n")); + LINK_STATS_INC(link.lenerr); + pbuf_free(pb); + return ERR_BUF; + } + ethhdr = (struct eth_hdr *)pb->payload; + ethhdr->type = PP_HTONS(ETHTYPE_PPPOEDISC); + MEMCPY(ðhdr->dest.addr, dest, sizeof(ethhdr->dest.addr)); + MEMCPY(ðhdr->src.addr, &outgoing_if->hwaddr, sizeof(ethhdr->src.addr)); + + p = (u8_t*)(ethhdr + 1); + PPPOE_ADD_HEADER(p, PPPOE_CODE_PADT, session, 0); + + res = outgoing_if->linkoutput(outgoing_if, pb); + + pbuf_free(pb); + + return res; +} + +#ifdef PPPOE_SERVER +static err_t +pppoe_send_pado(struct pppoe_softc *sc) +{ + struct pbuf *pb; + u8_t *p; + size_t len; + + /* calc length */ + len = 0; + /* include ac_cookie */ + len += 2 + 2 + sizeof(sc); + /* include hunique */ + len += 2 + 2 + sc->sc_hunique_len; + pb = pbuf_alloc(PBUF_LINK, (u16_t)(PPPOE_HEADERLEN + len), PBUF_RAM); + if (!pb) { + return ERR_MEM; + } + LWIP_ASSERT("pb->tot_len == pb->len", pb->tot_len == pb->len); + p = (u8_t*)pb->payload; + PPPOE_ADD_HEADER(p, PPPOE_CODE_PADO, 0, len); + PPPOE_ADD_16(p, PPPOE_TAG_ACCOOKIE); + PPPOE_ADD_16(p, sizeof(sc)); + MEMCPY(p, &sc, sizeof(sc)); + p += sizeof(sc); + PPPOE_ADD_16(p, PPPOE_TAG_HUNIQUE); + PPPOE_ADD_16(p, sc->sc_hunique_len); + MEMCPY(p, sc->sc_hunique, sc->sc_hunique_len); + return pppoe_output(sc, pb); +} + +static err_t +pppoe_send_pads(struct pppoe_softc *sc) +{ + struct pbuf *pb; + u8_t *p; + size_t len, l1 = 0; /* XXX: gcc */ + + sc->sc_session = mono_time.tv_sec % 0xff + 1; + /* calc length */ + len = 0; + /* include hunique */ + len += 2 + 2 + 2 + 2 + sc->sc_hunique_len; /* service name, host unique*/ + if (sc->sc_service_name != NULL) { /* service name tag maybe empty */ + l1 = strlen(sc->sc_service_name); + len += l1; + } + pb = pbuf_alloc(PBUF_LINK, (u16_t)(PPPOE_HEADERLEN + len), PBUF_RAM); + if (!pb) { + return ERR_MEM; + } + LWIP_ASSERT("pb->tot_len == pb->len", pb->tot_len == pb->len); + p = (u8_t*)pb->payload; + PPPOE_ADD_HEADER(p, PPPOE_CODE_PADS, sc->sc_session, len); + PPPOE_ADD_16(p, PPPOE_TAG_SNAME); + if (sc->sc_service_name != NULL) { + PPPOE_ADD_16(p, l1); + MEMCPY(p, sc->sc_service_name, l1); + p += l1; + } else { + PPPOE_ADD_16(p, 0); + } + PPPOE_ADD_16(p, PPPOE_TAG_HUNIQUE); + PPPOE_ADD_16(p, sc->sc_hunique_len); + MEMCPY(p, sc->sc_hunique, sc->sc_hunique_len); + return pppoe_output(sc, pb); +} +#endif + +static err_t +pppoe_xmit(struct pppoe_softc *sc, struct pbuf *pb) +{ + u8_t *p; + size_t len; + + len = pb->tot_len; + + /* make room for PPPoE header - should not fail */ + if (pbuf_add_header(pb, PPPOE_HEADERLEN) != 0) { + /* bail out */ + PPPDEBUG(LOG_ERR, ("pppoe: %c%c%"U16_F": pppoe_xmit: could not allocate room for PPPoE header\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num)); + LINK_STATS_INC(link.lenerr); + pbuf_free(pb); + return ERR_BUF; + } + + p = (u8_t*)pb->payload; + PPPOE_ADD_HEADER(p, 0, sc->sc_session, len); + + return pppoe_output(sc, pb); +} + +#if 0 /*def PFIL_HOOKS*/ +static int +pppoe_ifattach_hook(void *arg, struct pbuf **mp, struct netif *ifp, int dir) +{ + struct pppoe_softc *sc; + int s; + + if (mp != (struct pbuf **)PFIL_IFNET_DETACH) { + return 0; + } + + LIST_FOREACH(sc, &pppoe_softc_list, sc_list) { + if (sc->sc_ethif != ifp) { + continue; + } + if (sc->sc_sppp.pp_if.if_flags & IFF_UP) { + sc->sc_sppp.pp_if.if_flags &= ~(IFF_UP|IFF_RUNNING); + PPPDEBUG(LOG_DEBUG, ("%c%c%"U16_F": ethernet interface detached, going down\n", + sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num)); + } + sc->sc_ethif = NULL; + pppoe_clear_softc(sc, "ethernet interface detached"); + } + + return 0; +} +#endif + +#if 0 /* UNUSED */ +static void +pppoe_clear_softc(struct pppoe_softc *sc, const char *message) +{ + LWIP_UNUSED_ARG(message); + + /* stop timer */ + sys_untimeout(pppoe_timeout, sc); + PPPDEBUG(LOG_DEBUG, ("pppoe: %c%c%"U16_F": session 0x%x terminated, %s\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, sc->sc_session, message)); + sc->sc_state = PPPOE_STATE_INITIAL; + ppp_link_end(sc->pcb); /* notify upper layers - /!\ dangerous /!\ - see pppoe_disc_input() */ +} +#endif /* UNUSED */ +#endif /* PPP_SUPPORT && PPPOE_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/pppol2tp.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/pppol2tp.c new file mode 100644 index 0000000..4c4557f --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/pppol2tp.c @@ -0,0 +1,1159 @@ +/** + * @file + * Network Point to Point Protocol over Layer 2 Tunneling Protocol program file. + * + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +/* + * L2TP Support status: + * + * Supported: + * - L2TPv2 (PPP over L2TP, a.k.a. UDP tunnels) + * - LAC + * + * Not supported: + * - LNS (require PPP server support) + * - L2TPv3 ethernet pseudowires + * - L2TPv3 VLAN pseudowire + * - L2TPv3 PPP pseudowires + * - L2TPv3 IP encapsulation + * - L2TPv3 IP pseudowire + * - L2TP tunnel switching - http://tools.ietf.org/html/draft-ietf-l2tpext-tunnel-switching-08 + * - Multiple tunnels per UDP socket, as well as multiple sessions per tunnel + * - Hidden AVPs + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPPOL2TP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/err.h" +#include "lwip/memp.h" +#include "lwip/netif.h" +#include "lwip/udp.h" +#include "lwip/snmp.h" + +#include "netif/ppp/ppp_impl.h" +#include "netif/ppp/lcp.h" +#include "netif/ppp/ipcp.h" +#include "netif/ppp/pppol2tp.h" +#include "netif/ppp/pppcrypt.h" +#include "netif/ppp/magic.h" + +/* Memory pool */ +LWIP_MEMPOOL_DECLARE(PPPOL2TP_PCB, MEMP_NUM_PPPOL2TP_INTERFACES, sizeof(pppol2tp_pcb), "PPPOL2TP_PCB") + +/* callbacks called from PPP core */ +static err_t pppol2tp_write(ppp_pcb *ppp, void *ctx, struct pbuf *p); +static err_t pppol2tp_netif_output(ppp_pcb *ppp, void *ctx, struct pbuf *p, u_short protocol); +static err_t pppol2tp_destroy(ppp_pcb *ppp, void *ctx); /* Destroy a L2TP control block */ +static void pppol2tp_connect(ppp_pcb *ppp, void *ctx); /* Be a LAC, connect to a LNS. */ +static void pppol2tp_disconnect(ppp_pcb *ppp, void *ctx); /* Disconnect */ + + /* Prototypes for procedures local to this file. */ +static void pppol2tp_input(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port); +static void pppol2tp_dispatch_control_packet(pppol2tp_pcb *l2tp, u16_t port, struct pbuf *p, u16_t ns, u16_t nr); +static void pppol2tp_timeout(void *arg); +static void pppol2tp_abort_connect(pppol2tp_pcb *l2tp); +static err_t pppol2tp_send_sccrq(pppol2tp_pcb *l2tp); +static err_t pppol2tp_send_scccn(pppol2tp_pcb *l2tp, u16_t ns); +static err_t pppol2tp_send_icrq(pppol2tp_pcb *l2tp, u16_t ns); +static err_t pppol2tp_send_iccn(pppol2tp_pcb *l2tp, u16_t ns); +static err_t pppol2tp_send_zlb(pppol2tp_pcb *l2tp, u16_t ns, u16_t nr); +static err_t pppol2tp_send_stopccn(pppol2tp_pcb *l2tp, u16_t ns); +static err_t pppol2tp_xmit(pppol2tp_pcb *l2tp, struct pbuf *pb); +static err_t pppol2tp_udp_send(pppol2tp_pcb *l2tp, struct pbuf *pb); + +/* Callbacks structure for PPP core */ +static const struct link_callbacks pppol2tp_callbacks = { + pppol2tp_connect, +#if PPP_SERVER + NULL, +#endif /* PPP_SERVER */ + pppol2tp_disconnect, + pppol2tp_destroy, + pppol2tp_write, + pppol2tp_netif_output, + NULL, + NULL +}; + + +/* Create a new L2TP session. */ +ppp_pcb *pppol2tp_create(struct netif *pppif, + struct netif *netif, const ip_addr_t *ipaddr, u16_t port, + const u8_t *secret, u8_t secret_len, + ppp_link_status_cb_fn link_status_cb, void *ctx_cb) { + ppp_pcb *ppp; + pppol2tp_pcb *l2tp; + struct udp_pcb *udp; +#if !PPPOL2TP_AUTH_SUPPORT + LWIP_UNUSED_ARG(secret); + LWIP_UNUSED_ARG(secret_len); +#endif /* !PPPOL2TP_AUTH_SUPPORT */ + + if (ipaddr == NULL) { + goto ipaddr_check_failed; + } + + l2tp = (pppol2tp_pcb *)LWIP_MEMPOOL_ALLOC(PPPOL2TP_PCB); + if (l2tp == NULL) { + goto memp_malloc_l2tp_failed; + } + + udp = udp_new_ip_type(IP_GET_TYPE(ipaddr)); + if (udp == NULL) { + goto udp_new_failed; + } + udp_recv(udp, pppol2tp_input, l2tp); + + ppp = ppp_new(pppif, &pppol2tp_callbacks, l2tp, link_status_cb, ctx_cb); + if (ppp == NULL) { + goto ppp_new_failed; + } + + memset(l2tp, 0, sizeof(pppol2tp_pcb)); + l2tp->phase = PPPOL2TP_STATE_INITIAL; + l2tp->ppp = ppp; + l2tp->udp = udp; + l2tp->netif = netif; + ip_addr_copy(l2tp->remote_ip, *ipaddr); + l2tp->remote_port = port; +#if PPPOL2TP_AUTH_SUPPORT + l2tp->secret = secret; + l2tp->secret_len = secret_len; +#endif /* PPPOL2TP_AUTH_SUPPORT */ + + return ppp; + +ppp_new_failed: + udp_remove(udp); +udp_new_failed: + LWIP_MEMPOOL_FREE(PPPOL2TP_PCB, l2tp); +memp_malloc_l2tp_failed: +ipaddr_check_failed: + return NULL; +} + +/* Called by PPP core */ +static err_t pppol2tp_write(ppp_pcb *ppp, void *ctx, struct pbuf *p) { + pppol2tp_pcb *l2tp = (pppol2tp_pcb *)ctx; + struct pbuf *ph; /* UDP + L2TP header */ + err_t ret; +#if MIB2_STATS + u16_t tot_len; +#else /* MIB2_STATS */ + LWIP_UNUSED_ARG(ppp); +#endif /* MIB2_STATS */ + + ph = pbuf_alloc(PBUF_TRANSPORT, (u16_t)(PPPOL2TP_OUTPUT_DATA_HEADER_LEN), PBUF_RAM); + if(!ph) { + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.proterr); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutdiscards); + pbuf_free(p); + return ERR_MEM; + } + + pbuf_remove_header(ph, PPPOL2TP_OUTPUT_DATA_HEADER_LEN); /* hide L2TP header */ + pbuf_cat(ph, p); +#if MIB2_STATS + tot_len = ph->tot_len; +#endif /* MIB2_STATS */ + + ret = pppol2tp_xmit(l2tp, ph); + if (ret != ERR_OK) { + LINK_STATS_INC(link.err); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutdiscards); + return ret; + } + + MIB2_STATS_NETIF_ADD(ppp->netif, ifoutoctets, (u16_t)tot_len); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutucastpkts); + LINK_STATS_INC(link.xmit); + return ERR_OK; +} + +/* Called by PPP core */ +static err_t pppol2tp_netif_output(ppp_pcb *ppp, void *ctx, struct pbuf *p, u_short protocol) { + pppol2tp_pcb *l2tp = (pppol2tp_pcb *)ctx; + struct pbuf *pb; + u8_t *pl; + err_t err; +#if MIB2_STATS + u16_t tot_len; +#else /* MIB2_STATS */ + LWIP_UNUSED_ARG(ppp); +#endif /* MIB2_STATS */ + + /* @todo: try to use pbuf_header() here! */ + pb = pbuf_alloc(PBUF_TRANSPORT, PPPOL2TP_OUTPUT_DATA_HEADER_LEN + sizeof(protocol), PBUF_RAM); + if(!pb) { + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.proterr); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutdiscards); + return ERR_MEM; + } + + pbuf_remove_header(pb, PPPOL2TP_OUTPUT_DATA_HEADER_LEN); + + pl = (u8_t*)pb->payload; + PUTSHORT(protocol, pl); + + pbuf_chain(pb, p); +#if MIB2_STATS + tot_len = pb->tot_len; +#endif /* MIB2_STATS */ + + if( (err = pppol2tp_xmit(l2tp, pb)) != ERR_OK) { + LINK_STATS_INC(link.err); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutdiscards); + return err; + } + + MIB2_STATS_NETIF_ADD(ppp->netif, ifoutoctets, tot_len); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutucastpkts); + LINK_STATS_INC(link.xmit); + return ERR_OK; +} + +/* Destroy a L2TP control block */ +static err_t pppol2tp_destroy(ppp_pcb *ppp, void *ctx) { + pppol2tp_pcb *l2tp = (pppol2tp_pcb *)ctx; + LWIP_UNUSED_ARG(ppp); + + sys_untimeout(pppol2tp_timeout, l2tp); + udp_remove(l2tp->udp); + LWIP_MEMPOOL_FREE(PPPOL2TP_PCB, l2tp); + return ERR_OK; +} + +/* Be a LAC, connect to a LNS. */ +static void pppol2tp_connect(ppp_pcb *ppp, void *ctx) { + err_t err; + pppol2tp_pcb *l2tp = (pppol2tp_pcb *)ctx; + lcp_options *lcp_wo; + lcp_options *lcp_ao; +#if PPP_IPV4_SUPPORT && VJ_SUPPORT + ipcp_options *ipcp_wo; + ipcp_options *ipcp_ao; +#endif /* PPP_IPV4_SUPPORT && VJ_SUPPORT */ + + l2tp->tunnel_port = l2tp->remote_port; + l2tp->our_ns = 0; + l2tp->peer_nr = 0; + l2tp->peer_ns = 0; + l2tp->source_tunnel_id = 0; + l2tp->remote_tunnel_id = 0; + l2tp->source_session_id = 0; + l2tp->remote_session_id = 0; + /* l2tp->*_retried are cleared when used */ + + lcp_wo = &ppp->lcp_wantoptions; + lcp_wo->mru = PPPOL2TP_DEFMRU; + lcp_wo->neg_asyncmap = 0; + lcp_wo->neg_pcompression = 0; + lcp_wo->neg_accompression = 0; + lcp_wo->passive = 0; + lcp_wo->silent = 0; + + lcp_ao = &ppp->lcp_allowoptions; + lcp_ao->mru = PPPOL2TP_DEFMRU; + lcp_ao->neg_asyncmap = 0; + lcp_ao->neg_pcompression = 0; + lcp_ao->neg_accompression = 0; + +#if PPP_IPV4_SUPPORT && VJ_SUPPORT + ipcp_wo = &ppp->ipcp_wantoptions; + ipcp_wo->neg_vj = 0; + ipcp_wo->old_vj = 0; + + ipcp_ao = &ppp->ipcp_allowoptions; + ipcp_ao->neg_vj = 0; + ipcp_ao->old_vj = 0; +#endif /* PPP_IPV4_SUPPORT && VJ_SUPPORT */ + + /* Listen to a random source port, we need to do that instead of using udp_connect() + * because the L2TP LNS might answer with its own random source port (!= 1701) + */ +#if LWIP_IPV6 + if (IP_IS_V6_VAL(l2tp->udp->local_ip)) { + udp_bind(l2tp->udp, IP6_ADDR_ANY, 0); + } else +#endif /* LWIP_IPV6 */ + udp_bind(l2tp->udp, IP_ADDR_ANY, 0); + +#if PPPOL2TP_AUTH_SUPPORT + /* Generate random vector */ + if (l2tp->secret != NULL) { + magic_random_bytes(l2tp->secret_rv, sizeof(l2tp->secret_rv)); + } +#endif /* PPPOL2TP_AUTH_SUPPORT */ + + do { + l2tp->remote_tunnel_id = magic(); + } while(l2tp->remote_tunnel_id == 0); + /* save state, in case we fail to send SCCRQ */ + l2tp->sccrq_retried = 0; + l2tp->phase = PPPOL2TP_STATE_SCCRQ_SENT; + if ((err = pppol2tp_send_sccrq(l2tp)) != 0) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: failed to send SCCRQ, error=%d\n", err)); + } + sys_timeout(PPPOL2TP_CONTROL_TIMEOUT, pppol2tp_timeout, l2tp); +} + +/* Disconnect */ +static void pppol2tp_disconnect(ppp_pcb *ppp, void *ctx) { + pppol2tp_pcb *l2tp = (pppol2tp_pcb *)ctx; + + l2tp->our_ns++; + pppol2tp_send_stopccn(l2tp, l2tp->our_ns); + + /* stop any timer, disconnect can be called while initiating is in progress */ + sys_untimeout(pppol2tp_timeout, l2tp); + l2tp->phase = PPPOL2TP_STATE_INITIAL; + ppp_link_end(ppp); /* notify upper layers */ +} + +/* UDP Callback for incoming IPv4 L2TP frames */ +static void pppol2tp_input(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port) { + pppol2tp_pcb *l2tp = (pppol2tp_pcb*)arg; + u16_t hflags, hlen, len=0, tunnel_id=0, session_id=0, ns=0, nr=0, offset=0; + u8_t *inp; + LWIP_UNUSED_ARG(pcb); + + /* we can't unbound a UDP pcb, thus we can still receive UDP frames after the link is closed */ + if (l2tp->phase < PPPOL2TP_STATE_SCCRQ_SENT) { + goto free_and_return; + } + + if (!ip_addr_cmp(&l2tp->remote_ip, addr)) { + goto free_and_return; + } + + /* discard packet if port mismatch, but only if we received a SCCRP */ + if (l2tp->phase > PPPOL2TP_STATE_SCCRQ_SENT && l2tp->tunnel_port != port) { + goto free_and_return; + } + + /* printf("-----------\nL2TP INPUT, %d\n", p->len); */ + + /* L2TP header */ + if (p->len < sizeof(hflags) + sizeof(tunnel_id) + sizeof(session_id) ) { + goto packet_too_short; + } + + inp = (u8_t*)p->payload; + GETSHORT(hflags, inp); + + if (hflags & PPPOL2TP_HEADERFLAG_CONTROL) { + /* check mandatory flags for a control packet */ + if ( (hflags & PPPOL2TP_HEADERFLAG_CONTROL_MANDATORY) != PPPOL2TP_HEADERFLAG_CONTROL_MANDATORY ) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: mandatory header flags for control packet not set\n")); + goto free_and_return; + } + /* check forbidden flags for a control packet */ + if (hflags & PPPOL2TP_HEADERFLAG_CONTROL_FORBIDDEN) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: forbidden header flags for control packet found\n")); + goto free_and_return; + } + } else { + /* check mandatory flags for a data packet */ + if ( (hflags & PPPOL2TP_HEADERFLAG_DATA_MANDATORY) != PPPOL2TP_HEADERFLAG_DATA_MANDATORY) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: mandatory header flags for data packet not set\n")); + goto free_and_return; + } + } + + /* Expected header size */ + hlen = sizeof(hflags) + sizeof(tunnel_id) + sizeof(session_id); + if (hflags & PPPOL2TP_HEADERFLAG_LENGTH) { + hlen += sizeof(len); + } + if (hflags & PPPOL2TP_HEADERFLAG_SEQUENCE) { + hlen += sizeof(ns) + sizeof(nr); + } + if (hflags & PPPOL2TP_HEADERFLAG_OFFSET) { + hlen += sizeof(offset); + } + if (p->len < hlen) { + goto packet_too_short; + } + + if (hflags & PPPOL2TP_HEADERFLAG_LENGTH) { + GETSHORT(len, inp); + if (p->len < len || len < hlen) { + goto packet_too_short; + } + } + GETSHORT(tunnel_id, inp); + GETSHORT(session_id, inp); + if (hflags & PPPOL2TP_HEADERFLAG_SEQUENCE) { + GETSHORT(ns, inp); + GETSHORT(nr, inp); + } + if (hflags & PPPOL2TP_HEADERFLAG_OFFSET) { + GETSHORT(offset, inp) + if (offset > 4096) { /* don't be fooled with large offset which might overflow hlen */ + PPPDEBUG(LOG_DEBUG, ("pppol2tp: strange packet received, offset=%d\n", offset)); + goto free_and_return; + } + hlen += offset; + if (p->len < hlen) { + goto packet_too_short; + } + INCPTR(offset, inp); + } + + /* printf("HLEN = %d\n", hlen); */ + + /* skip L2TP header */ + if (pbuf_remove_header(p, hlen) != 0) { + goto free_and_return; + } + + /* printf("LEN=%d, TUNNEL_ID=%d, SESSION_ID=%d, NS=%d, NR=%d, OFFSET=%d\n", len, tunnel_id, session_id, ns, nr, offset); */ + PPPDEBUG(LOG_DEBUG, ("pppol2tp: input packet, len=%"U16_F", tunnel=%"U16_F", session=%"U16_F", ns=%"U16_F", nr=%"U16_F"\n", + len, tunnel_id, session_id, ns, nr)); + + /* Control packet */ + if (hflags & PPPOL2TP_HEADERFLAG_CONTROL) { + pppol2tp_dispatch_control_packet(l2tp, port, p, ns, nr); + goto free_and_return; + } + + /* Data packet */ + if(l2tp->phase != PPPOL2TP_STATE_DATA) { + goto free_and_return; + } + if(tunnel_id != l2tp->remote_tunnel_id) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: tunnel ID mismatch, assigned=%d, received=%d\n", l2tp->remote_tunnel_id, tunnel_id)); + goto free_and_return; + } + if(session_id != l2tp->remote_session_id) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: session ID mismatch, assigned=%d, received=%d\n", l2tp->remote_session_id, session_id)); + goto free_and_return; + } + /* + * skip address & flags if necessary + * + * RFC 2661 does not specify whether the PPP frame in the L2TP payload should + * have a HDLC header or not. We handle both cases for compatibility. + */ + if (p->len >= 2) { + GETSHORT(hflags, inp); + if (hflags == 0xff03) { + pbuf_remove_header(p, 2); + } + } + /* Dispatch the packet thereby consuming it. */ + ppp_input(l2tp->ppp, p); + return; + +packet_too_short: + PPPDEBUG(LOG_DEBUG, ("pppol2tp: packet too short: %d\n", p->len)); +free_and_return: + pbuf_free(p); +} + +/* L2TP Control packet entry point */ +static void pppol2tp_dispatch_control_packet(pppol2tp_pcb *l2tp, u16_t port, struct pbuf *p, u16_t ns, u16_t nr) { + u8_t *inp; + u16_t avplen, avpflags, vendorid, attributetype, messagetype=0; + err_t err; +#if PPPOL2TP_AUTH_SUPPORT + lwip_md5_context md5_ctx; + u8_t md5_hash[16]; + u8_t challenge_id = 0; +#endif /* PPPOL2TP_AUTH_SUPPORT */ + + /* printf("L2TP CTRL INPUT, ns=%d, nr=%d, len=%d\n", ns, nr, p->len); */ + + /* Drop unexpected packet */ + if (ns != l2tp->peer_ns) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: drop unexpected packet: received NS=%d, expected NS=%d\n", ns, l2tp->peer_ns)); + /* + * In order to ensure that all messages are acknowledged properly + * (particularly in the case of a lost ZLB ACK message), receipt + * of duplicate messages MUST be acknowledged. + * + * In this very special case we Ack a packet we previously received. + * Therefore our NS is the NR we just received. And our NR is the + * NS we just received plus one. + */ + if ((s16_t)(ns - l2tp->peer_ns) < 0) { + pppol2tp_send_zlb(l2tp, nr, ns+1); + } + return; + } + + l2tp->peer_nr = nr; + + /* Handle the special case of the ICCN acknowledge */ + if (l2tp->phase == PPPOL2TP_STATE_ICCN_SENT && (s16_t)(l2tp->peer_nr - l2tp->our_ns) > 0) { + l2tp->phase = PPPOL2TP_STATE_DATA; + sys_untimeout(pppol2tp_timeout, l2tp); + ppp_start(l2tp->ppp); /* notify upper layers */ + } + + /* ZLB packets */ + if (p->tot_len == 0) { + return; + } + /* A ZLB packet does not consume a NS slot thus we don't record the NS value for ZLB packets */ + l2tp->peer_ns = ns+1; + + p = pbuf_coalesce(p, PBUF_RAW); + inp = (u8_t*)p->payload; + /* Decode AVPs */ + while (p->len > 0) { + if (p->len < sizeof(avpflags) + sizeof(vendorid) + sizeof(attributetype) ) { + goto packet_too_short; + } + GETSHORT(avpflags, inp); + avplen = avpflags & PPPOL2TP_AVPHEADERFLAG_LENGTHMASK; + /* printf("AVPLEN = %d\n", avplen); */ + if (p->len < avplen || avplen < sizeof(avpflags) + sizeof(vendorid) + sizeof(attributetype)) { + goto packet_too_short; + } + GETSHORT(vendorid, inp); + GETSHORT(attributetype, inp); + avplen -= sizeof(avpflags) + sizeof(vendorid) + sizeof(attributetype); + + /* Message type must be the first AVP */ + if (messagetype == 0) { + if (attributetype != 0 || vendorid != 0 || avplen != sizeof(messagetype) ) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: message type must be the first AVP\n")); + return; + } + GETSHORT(messagetype, inp); + /* printf("Message type = %d\n", messagetype); */ + switch(messagetype) { + /* Start Control Connection Reply */ + case PPPOL2TP_MESSAGETYPE_SCCRP: + /* Only accept SCCRP packet if we sent a SCCRQ */ + if (l2tp->phase != PPPOL2TP_STATE_SCCRQ_SENT) { + goto send_zlb; + } + break; + /* Incoming Call Reply */ + case PPPOL2TP_MESSAGETYPE_ICRP: + /* Only accept ICRP packet if we sent a IRCQ */ + if (l2tp->phase != PPPOL2TP_STATE_ICRQ_SENT) { + goto send_zlb; + } + break; + /* Stop Control Connection Notification */ + case PPPOL2TP_MESSAGETYPE_STOPCCN: + pppol2tp_send_zlb(l2tp, l2tp->our_ns+1, l2tp->peer_ns); /* Ack the StopCCN before we switch to down state */ + if (l2tp->phase < PPPOL2TP_STATE_DATA) { + pppol2tp_abort_connect(l2tp); + } else if (l2tp->phase == PPPOL2TP_STATE_DATA) { + /* Don't disconnect here, we let the LCP Echo/Reply find the fact + * that PPP session is down. Asking the PPP stack to end the session + * require strict checking about the PPP phase to prevent endless + * disconnection loops. + */ + } + return; + default: + break; + } + goto nextavp; + } + + /* Skip proprietary L2TP extensions */ + if (vendorid != 0) { + goto skipavp; + } + + switch (messagetype) { + /* Start Control Connection Reply */ + case PPPOL2TP_MESSAGETYPE_SCCRP: + switch (attributetype) { + case PPPOL2TP_AVPTYPE_TUNNELID: + if (avplen != sizeof(l2tp->source_tunnel_id) ) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: AVP Assign tunnel ID length check failed\n")); + return; + } + GETSHORT(l2tp->source_tunnel_id, inp); + PPPDEBUG(LOG_DEBUG, ("pppol2tp: Assigned tunnel ID %"U16_F"\n", l2tp->source_tunnel_id)); + goto nextavp; +#if PPPOL2TP_AUTH_SUPPORT + case PPPOL2TP_AVPTYPE_CHALLENGE: + if (avplen == 0) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: Challenge length check failed\n")); + return; + } + if (l2tp->secret == NULL) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: Received challenge from peer and no secret key available\n")); + pppol2tp_abort_connect(l2tp); + return; + } + /* Generate hash of ID, secret, challenge */ + lwip_md5_init(&md5_ctx); + lwip_md5_starts(&md5_ctx); + challenge_id = PPPOL2TP_MESSAGETYPE_SCCCN; + lwip_md5_update(&md5_ctx, &challenge_id, 1); + lwip_md5_update(&md5_ctx, l2tp->secret, l2tp->secret_len); + lwip_md5_update(&md5_ctx, inp, avplen); + lwip_md5_finish(&md5_ctx, l2tp->challenge_hash); + lwip_md5_free(&md5_ctx); + l2tp->send_challenge = 1; + goto skipavp; + case PPPOL2TP_AVPTYPE_CHALLENGERESPONSE: + if (avplen != PPPOL2TP_AVPTYPE_CHALLENGERESPONSE_SIZE) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: AVP Challenge Response length check failed\n")); + return; + } + /* Generate hash of ID, secret, challenge */ + lwip_md5_init(&md5_ctx); + lwip_md5_starts(&md5_ctx); + challenge_id = PPPOL2TP_MESSAGETYPE_SCCRP; + lwip_md5_update(&md5_ctx, &challenge_id, 1); + lwip_md5_update(&md5_ctx, l2tp->secret, l2tp->secret_len); + lwip_md5_update(&md5_ctx, l2tp->secret_rv, sizeof(l2tp->secret_rv)); + lwip_md5_finish(&md5_ctx, md5_hash); + lwip_md5_free(&md5_ctx); + if ( memcmp(inp, md5_hash, sizeof(md5_hash)) ) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: Received challenge response from peer and secret key do not match\n")); + pppol2tp_abort_connect(l2tp); + return; + } + goto skipavp; +#endif /* PPPOL2TP_AUTH_SUPPORT */ + default: + break; + } + break; + /* Incoming Call Reply */ + case PPPOL2TP_MESSAGETYPE_ICRP: + switch (attributetype) { + case PPPOL2TP_AVPTYPE_SESSIONID: + if (avplen != sizeof(l2tp->source_session_id) ) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: AVP Assign session ID length check failed\n")); + return; + } + GETSHORT(l2tp->source_session_id, inp); + PPPDEBUG(LOG_DEBUG, ("pppol2tp: Assigned session ID %"U16_F"\n", l2tp->source_session_id)); + goto nextavp; + default: + break; + } + break; + default: + break; + } + +skipavp: + INCPTR(avplen, inp); +nextavp: + /* printf("AVP Found, vendor=%d, attribute=%d, len=%d\n", vendorid, attributetype, avplen); */ + /* next AVP */ + if (pbuf_remove_header(p, avplen + sizeof(avpflags) + sizeof(vendorid) + sizeof(attributetype)) != 0) { + return; + } + } + + switch(messagetype) { + /* Start Control Connection Reply */ + case PPPOL2TP_MESSAGETYPE_SCCRP: + do { + l2tp->remote_session_id = magic(); + } while(l2tp->remote_session_id == 0); + l2tp->tunnel_port = port; /* LNS server might have chosen its own local port */ + l2tp->icrq_retried = 0; + l2tp->phase = PPPOL2TP_STATE_ICRQ_SENT; + l2tp->our_ns++; + if ((err = pppol2tp_send_scccn(l2tp, l2tp->our_ns)) != 0) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: failed to send SCCCN, error=%d\n", err)); + LWIP_UNUSED_ARG(err); /* if PPPDEBUG is disabled */ + } + l2tp->our_ns++; + if ((err = pppol2tp_send_icrq(l2tp, l2tp->our_ns)) != 0) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: failed to send ICRQ, error=%d\n", err)); + LWIP_UNUSED_ARG(err); /* if PPPDEBUG is disabled */ + } + sys_untimeout(pppol2tp_timeout, l2tp); + sys_timeout(PPPOL2TP_CONTROL_TIMEOUT, pppol2tp_timeout, l2tp); + break; + /* Incoming Call Reply */ + case PPPOL2TP_MESSAGETYPE_ICRP: + l2tp->iccn_retried = 0; + l2tp->phase = PPPOL2TP_STATE_ICCN_SENT; + l2tp->our_ns++; + if ((err = pppol2tp_send_iccn(l2tp, l2tp->our_ns)) != 0) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: failed to send ICCN, error=%d\n", err)); + LWIP_UNUSED_ARG(err); /* if PPPDEBUG is disabled */ + } + sys_untimeout(pppol2tp_timeout, l2tp); + sys_timeout(PPPOL2TP_CONTROL_TIMEOUT, pppol2tp_timeout, l2tp); + break; + /* Unhandled packet, send ZLB ACK */ + default: + goto send_zlb; + } + return; + +send_zlb: + pppol2tp_send_zlb(l2tp, l2tp->our_ns+1, l2tp->peer_ns); + return; +packet_too_short: + PPPDEBUG(LOG_DEBUG, ("pppol2tp: packet too short: %d\n", p->len)); +} + +/* L2TP Timeout handler */ +static void pppol2tp_timeout(void *arg) { + pppol2tp_pcb *l2tp = (pppol2tp_pcb*)arg; + err_t err; + u32_t retry_wait; + + PPPDEBUG(LOG_DEBUG, ("pppol2tp: timeout\n")); + + switch (l2tp->phase) { + case PPPOL2TP_STATE_SCCRQ_SENT: + /* backoff wait */ + if (l2tp->sccrq_retried < 0xff) { + l2tp->sccrq_retried++; + } + if (!l2tp->ppp->settings.persist && l2tp->sccrq_retried >= PPPOL2TP_MAXSCCRQ) { + pppol2tp_abort_connect(l2tp); + return; + } + retry_wait = LWIP_MIN(PPPOL2TP_CONTROL_TIMEOUT * l2tp->sccrq_retried, PPPOL2TP_SLOW_RETRY); + PPPDEBUG(LOG_DEBUG, ("pppol2tp: sccrq_retried=%d\n", l2tp->sccrq_retried)); + if ((err = pppol2tp_send_sccrq(l2tp)) != 0) { + l2tp->sccrq_retried--; + PPPDEBUG(LOG_DEBUG, ("pppol2tp: failed to send SCCRQ, error=%d\n", err)); + LWIP_UNUSED_ARG(err); /* if PPPDEBUG is disabled */ + } + sys_timeout(retry_wait, pppol2tp_timeout, l2tp); + break; + + case PPPOL2TP_STATE_ICRQ_SENT: + l2tp->icrq_retried++; + if (l2tp->icrq_retried >= PPPOL2TP_MAXICRQ) { + pppol2tp_abort_connect(l2tp); + return; + } + PPPDEBUG(LOG_DEBUG, ("pppol2tp: icrq_retried=%d\n", l2tp->icrq_retried)); + if ((s16_t)(l2tp->peer_nr - l2tp->our_ns) < 0) { /* the SCCCN was not acknowledged */ + if ((err = pppol2tp_send_scccn(l2tp, l2tp->our_ns -1)) != 0) { + l2tp->icrq_retried--; + PPPDEBUG(LOG_DEBUG, ("pppol2tp: failed to send SCCCN, error=%d\n", err)); + LWIP_UNUSED_ARG(err); /* if PPPDEBUG is disabled */ + sys_timeout(PPPOL2TP_CONTROL_TIMEOUT, pppol2tp_timeout, l2tp); + break; + } + } + if ((err = pppol2tp_send_icrq(l2tp, l2tp->our_ns)) != 0) { + l2tp->icrq_retried--; + PPPDEBUG(LOG_DEBUG, ("pppol2tp: failed to send ICRQ, error=%d\n", err)); + LWIP_UNUSED_ARG(err); /* if PPPDEBUG is disabled */ + } + sys_timeout(PPPOL2TP_CONTROL_TIMEOUT, pppol2tp_timeout, l2tp); + break; + + case PPPOL2TP_STATE_ICCN_SENT: + l2tp->iccn_retried++; + if (l2tp->iccn_retried >= PPPOL2TP_MAXICCN) { + pppol2tp_abort_connect(l2tp); + return; + } + PPPDEBUG(LOG_DEBUG, ("pppol2tp: iccn_retried=%d\n", l2tp->iccn_retried)); + if ((err = pppol2tp_send_iccn(l2tp, l2tp->our_ns)) != 0) { + l2tp->iccn_retried--; + PPPDEBUG(LOG_DEBUG, ("pppol2tp: failed to send ICCN, error=%d\n", err)); + LWIP_UNUSED_ARG(err); /* if PPPDEBUG is disabled */ + } + sys_timeout(PPPOL2TP_CONTROL_TIMEOUT, pppol2tp_timeout, l2tp); + break; + + default: + return; /* all done, work in peace */ + } +} + +/* Connection attempt aborted */ +static void pppol2tp_abort_connect(pppol2tp_pcb *l2tp) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: could not establish connection\n")); + l2tp->phase = PPPOL2TP_STATE_INITIAL; + ppp_link_failed(l2tp->ppp); /* notify upper layers */ +} + +/* Initiate a new tunnel */ +static err_t pppol2tp_send_sccrq(pppol2tp_pcb *l2tp) { + struct pbuf *pb; + u8_t *p; + u16_t len; + + /* calculate UDP packet length */ + len = 12 +8 +8 +10 +10 +6+sizeof(PPPOL2TP_HOSTNAME)-1 +6+sizeof(PPPOL2TP_VENDORNAME)-1 +8 +8; +#if PPPOL2TP_AUTH_SUPPORT + if (l2tp->secret != NULL) { + len += 6 + sizeof(l2tp->secret_rv); + } +#endif /* PPPOL2TP_AUTH_SUPPORT */ + + /* allocate a buffer */ + pb = pbuf_alloc(PBUF_TRANSPORT, len, PBUF_RAM); + if (pb == NULL) { + return ERR_MEM; + } + LWIP_ASSERT("pb->tot_len == pb->len", pb->tot_len == pb->len); + + p = (u8_t*)pb->payload; + /* fill in pkt */ + /* L2TP control header */ + PUTSHORT(PPPOL2TP_HEADERFLAG_CONTROL_MANDATORY, p); + PUTSHORT(len, p); /* Length */ + PUTSHORT(0, p); /* Tunnel Id */ + PUTSHORT(0, p); /* Session Id */ + PUTSHORT(0, p); /* NS Sequence number - to peer */ + PUTSHORT(0, p); /* NR Sequence number - expected for peer */ + + /* AVP - Message type */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 8, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_MESSAGE, p); /* Attribute type: Message Type */ + PUTSHORT(PPPOL2TP_MESSAGETYPE_SCCRQ, p); /* Attribute value: Message type: SCCRQ */ + + /* AVP - L2TP Version */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 8, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_VERSION, p); /* Attribute type: Version */ + PUTSHORT(PPPOL2TP_VERSION, p); /* Attribute value: L2TP Version */ + + /* AVP - Framing capabilities */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 10, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_FRAMINGCAPABILITIES, p); /* Attribute type: Framing capabilities */ + PUTLONG(PPPOL2TP_FRAMINGCAPABILITIES, p); /* Attribute value: Framing capabilities */ + + /* AVP - Bearer capabilities */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 10, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_BEARERCAPABILITIES, p); /* Attribute type: Bearer capabilities */ + PUTLONG(PPPOL2TP_BEARERCAPABILITIES, p); /* Attribute value: Bearer capabilities */ + + /* AVP - Host name */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 6+sizeof(PPPOL2TP_HOSTNAME)-1, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_HOSTNAME, p); /* Attribute type: Hostname */ + MEMCPY(p, PPPOL2TP_HOSTNAME, sizeof(PPPOL2TP_HOSTNAME)-1); /* Attribute value: Hostname */ + INCPTR(sizeof(PPPOL2TP_HOSTNAME)-1, p); + + /* AVP - Vendor name */ + PUTSHORT(6+sizeof(PPPOL2TP_VENDORNAME)-1, p); /* len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_VENDORNAME, p); /* Attribute type: Vendor name */ + MEMCPY(p, PPPOL2TP_VENDORNAME, sizeof(PPPOL2TP_VENDORNAME)-1); /* Attribute value: Vendor name */ + INCPTR(sizeof(PPPOL2TP_VENDORNAME)-1, p); + + /* AVP - Assign tunnel ID */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 8, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_TUNNELID, p); /* Attribute type: Tunnel ID */ + PUTSHORT(l2tp->remote_tunnel_id, p); /* Attribute value: Tunnel ID */ + + /* AVP - Receive window size */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 8, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_RECEIVEWINDOWSIZE, p); /* Attribute type: Receive window size */ + PUTSHORT(PPPOL2TP_RECEIVEWINDOWSIZE, p); /* Attribute value: Receive window size */ + +#if PPPOL2TP_AUTH_SUPPORT + /* AVP - Challenge */ + if (l2tp->secret != NULL) { + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 6 + sizeof(l2tp->secret_rv), p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_CHALLENGE, p); /* Attribute type: Challenge */ + MEMCPY(p, l2tp->secret_rv, sizeof(l2tp->secret_rv)); /* Attribute value: Random vector */ + INCPTR(sizeof(l2tp->secret_rv), p); + } +#endif /* PPPOL2TP_AUTH_SUPPORT */ + + return pppol2tp_udp_send(l2tp, pb); +} + +/* Complete tunnel establishment */ +static err_t pppol2tp_send_scccn(pppol2tp_pcb *l2tp, u16_t ns) { + struct pbuf *pb; + u8_t *p; + u16_t len; + + /* calculate UDP packet length */ + len = 12 +8; +#if PPPOL2TP_AUTH_SUPPORT + if (l2tp->send_challenge) { + len += 6 + sizeof(l2tp->challenge_hash); + } +#endif /* PPPOL2TP_AUTH_SUPPORT */ + + /* allocate a buffer */ + pb = pbuf_alloc(PBUF_TRANSPORT, len, PBUF_RAM); + if (pb == NULL) { + return ERR_MEM; + } + LWIP_ASSERT("pb->tot_len == pb->len", pb->tot_len == pb->len); + + p = (u8_t*)pb->payload; + /* fill in pkt */ + /* L2TP control header */ + PUTSHORT(PPPOL2TP_HEADERFLAG_CONTROL_MANDATORY, p); + PUTSHORT(len, p); /* Length */ + PUTSHORT(l2tp->source_tunnel_id, p); /* Tunnel Id */ + PUTSHORT(0, p); /* Session Id */ + PUTSHORT(ns, p); /* NS Sequence number - to peer */ + PUTSHORT(l2tp->peer_ns, p); /* NR Sequence number - expected for peer */ + + /* AVP - Message type */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 8, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_MESSAGE, p); /* Attribute type: Message Type */ + PUTSHORT(PPPOL2TP_MESSAGETYPE_SCCCN, p); /* Attribute value: Message type: SCCCN */ + +#if PPPOL2TP_AUTH_SUPPORT + /* AVP - Challenge response */ + if (l2tp->send_challenge) { + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 6 + sizeof(l2tp->challenge_hash), p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_CHALLENGERESPONSE, p); /* Attribute type: Challenge response */ + MEMCPY(p, l2tp->challenge_hash, sizeof(l2tp->challenge_hash)); /* Attribute value: Computed challenge */ + INCPTR(sizeof(l2tp->challenge_hash), p); + } +#endif /* PPPOL2TP_AUTH_SUPPORT */ + + return pppol2tp_udp_send(l2tp, pb); +} + +/* Initiate a new session */ +static err_t pppol2tp_send_icrq(pppol2tp_pcb *l2tp, u16_t ns) { + struct pbuf *pb; + u8_t *p; + u16_t len; + u32_t serialnumber; + + /* calculate UDP packet length */ + len = 12 +8 +8 +10; + + /* allocate a buffer */ + pb = pbuf_alloc(PBUF_TRANSPORT, len, PBUF_RAM); + if (pb == NULL) { + return ERR_MEM; + } + LWIP_ASSERT("pb->tot_len == pb->len", pb->tot_len == pb->len); + + p = (u8_t*)pb->payload; + /* fill in pkt */ + /* L2TP control header */ + PUTSHORT(PPPOL2TP_HEADERFLAG_CONTROL_MANDATORY, p); + PUTSHORT(len, p); /* Length */ + PUTSHORT(l2tp->source_tunnel_id, p); /* Tunnel Id */ + PUTSHORT(0, p); /* Session Id */ + PUTSHORT(ns, p); /* NS Sequence number - to peer */ + PUTSHORT(l2tp->peer_ns, p); /* NR Sequence number - expected for peer */ + + /* AVP - Message type */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 8, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_MESSAGE, p); /* Attribute type: Message Type */ + PUTSHORT(PPPOL2TP_MESSAGETYPE_ICRQ, p); /* Attribute value: Message type: ICRQ */ + + /* AVP - Assign session ID */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 8, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_SESSIONID, p); /* Attribute type: Session ID */ + PUTSHORT(l2tp->remote_session_id, p); /* Attribute value: Session ID */ + + /* AVP - Call Serial Number */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 10, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_CALLSERIALNUMBER, p); /* Attribute type: Serial number */ + serialnumber = magic(); + PUTLONG(serialnumber, p); /* Attribute value: Serial number */ + + return pppol2tp_udp_send(l2tp, pb); +} + +/* Complete tunnel establishment */ +static err_t pppol2tp_send_iccn(pppol2tp_pcb *l2tp, u16_t ns) { + struct pbuf *pb; + u8_t *p; + u16_t len; + + /* calculate UDP packet length */ + len = 12 +8 +10 +10; + + /* allocate a buffer */ + pb = pbuf_alloc(PBUF_TRANSPORT, len, PBUF_RAM); + if (pb == NULL) { + return ERR_MEM; + } + LWIP_ASSERT("pb->tot_len == pb->len", pb->tot_len == pb->len); + + p = (u8_t*)pb->payload; + /* fill in pkt */ + /* L2TP control header */ + PUTSHORT(PPPOL2TP_HEADERFLAG_CONTROL_MANDATORY, p); + PUTSHORT(len, p); /* Length */ + PUTSHORT(l2tp->source_tunnel_id, p); /* Tunnel Id */ + PUTSHORT(l2tp->source_session_id, p); /* Session Id */ + PUTSHORT(ns, p); /* NS Sequence number - to peer */ + PUTSHORT(l2tp->peer_ns, p); /* NR Sequence number - expected for peer */ + + /* AVP - Message type */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 8, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_MESSAGE, p); /* Attribute type: Message Type */ + PUTSHORT(PPPOL2TP_MESSAGETYPE_ICCN, p); /* Attribute value: Message type: ICCN */ + + /* AVP - Framing type */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 10, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_FRAMINGTYPE, p); /* Attribute type: Framing type */ + PUTLONG(PPPOL2TP_FRAMINGTYPE, p); /* Attribute value: Framing type */ + + /* AVP - TX Connect speed */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 10, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_TXCONNECTSPEED, p); /* Attribute type: TX Connect speed */ + PUTLONG(PPPOL2TP_TXCONNECTSPEED, p); /* Attribute value: TX Connect speed */ + + return pppol2tp_udp_send(l2tp, pb); +} + +/* Send a ZLB ACK packet */ +static err_t pppol2tp_send_zlb(pppol2tp_pcb *l2tp, u16_t ns, u16_t nr) { + struct pbuf *pb; + u8_t *p; + u16_t len; + + /* calculate UDP packet length */ + len = 12; + + /* allocate a buffer */ + pb = pbuf_alloc(PBUF_TRANSPORT, len, PBUF_RAM); + if (pb == NULL) { + return ERR_MEM; + } + LWIP_ASSERT("pb->tot_len == pb->len", pb->tot_len == pb->len); + + p = (u8_t*)pb->payload; + /* fill in pkt */ + /* L2TP control header */ + PUTSHORT(PPPOL2TP_HEADERFLAG_CONTROL_MANDATORY, p); + PUTSHORT(len, p); /* Length */ + PUTSHORT(l2tp->source_tunnel_id, p); /* Tunnel Id */ + PUTSHORT(0, p); /* Session Id */ + PUTSHORT(ns, p); /* NS Sequence number - to peer */ + PUTSHORT(nr, p); /* NR Sequence number - expected for peer */ + + return pppol2tp_udp_send(l2tp, pb); +} + +/* Send a StopCCN packet */ +static err_t pppol2tp_send_stopccn(pppol2tp_pcb *l2tp, u16_t ns) { + struct pbuf *pb; + u8_t *p; + u16_t len; + + /* calculate UDP packet length */ + len = 12 +8 +8 +8; + + /* allocate a buffer */ + pb = pbuf_alloc(PBUF_TRANSPORT, len, PBUF_RAM); + if (pb == NULL) { + return ERR_MEM; + } + LWIP_ASSERT("pb->tot_len == pb->len", pb->tot_len == pb->len); + + p = (u8_t*)pb->payload; + /* fill in pkt */ + /* L2TP control header */ + PUTSHORT(PPPOL2TP_HEADERFLAG_CONTROL_MANDATORY, p); + PUTSHORT(len, p); /* Length */ + PUTSHORT(l2tp->source_tunnel_id, p); /* Tunnel Id */ + PUTSHORT(0, p); /* Session Id */ + PUTSHORT(ns, p); /* NS Sequence number - to peer */ + PUTSHORT(l2tp->peer_ns, p); /* NR Sequence number - expected for peer */ + + /* AVP - Message type */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 8, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_MESSAGE, p); /* Attribute type: Message Type */ + PUTSHORT(PPPOL2TP_MESSAGETYPE_STOPCCN, p); /* Attribute value: Message type: StopCCN */ + + /* AVP - Assign tunnel ID */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 8, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_TUNNELID, p); /* Attribute type: Tunnel ID */ + PUTSHORT(l2tp->remote_tunnel_id, p); /* Attribute value: Tunnel ID */ + + /* AVP - Result code */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 8, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_RESULTCODE, p); /* Attribute type: Result code */ + PUTSHORT(PPPOL2TP_RESULTCODE, p); /* Attribute value: Result code */ + + return pppol2tp_udp_send(l2tp, pb); +} + +static err_t pppol2tp_xmit(pppol2tp_pcb *l2tp, struct pbuf *pb) { + u8_t *p; + + /* make room for L2TP header - should not fail */ + if (pbuf_add_header(pb, PPPOL2TP_OUTPUT_DATA_HEADER_LEN) != 0) { + /* bail out */ + PPPDEBUG(LOG_ERR, ("pppol2tp: pppol2tp_pcb: could not allocate room for L2TP header\n")); + LINK_STATS_INC(link.lenerr); + pbuf_free(pb); + return ERR_BUF; + } + + p = (u8_t*)pb->payload; + PUTSHORT(PPPOL2TP_HEADERFLAG_DATA_MANDATORY, p); + PUTSHORT(l2tp->source_tunnel_id, p); /* Tunnel Id */ + PUTSHORT(l2tp->source_session_id, p); /* Session Id */ + + return pppol2tp_udp_send(l2tp, pb); +} + +static err_t pppol2tp_udp_send(pppol2tp_pcb *l2tp, struct pbuf *pb) { + err_t err; + if (l2tp->netif) { + err = udp_sendto_if(l2tp->udp, pb, &l2tp->remote_ip, l2tp->tunnel_port, l2tp->netif); + } else { + err = udp_sendto(l2tp->udp, pb, &l2tp->remote_ip, l2tp->tunnel_port); + } + pbuf_free(pb); + return err; +} + +#endif /* PPP_SUPPORT && PPPOL2TP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/pppos.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/pppos.c new file mode 100644 index 0000000..dff0255 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/pppos.c @@ -0,0 +1,895 @@ +/** + * @file + * Network Point to Point Protocol over Serial file. + * + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPPOS_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#include + +#include "lwip/arch.h" +#include "lwip/err.h" +#include "lwip/pbuf.h" +#include "lwip/sys.h" +#include "lwip/memp.h" +#include "lwip/netif.h" +#include "lwip/snmp.h" +#include "lwip/priv/tcpip_priv.h" +#include "lwip/api.h" +#include "lwip/ip4.h" /* for ip4_input() */ + +#include "netif/ppp/ppp_impl.h" +#include "netif/ppp/pppos.h" +#include "netif/ppp/vj.h" + +/* Memory pool */ +LWIP_MEMPOOL_DECLARE(PPPOS_PCB, MEMP_NUM_PPPOS_INTERFACES, sizeof(pppos_pcb), "PPPOS_PCB") + +/* callbacks called from PPP core */ +static err_t pppos_write(ppp_pcb *ppp, void *ctx, struct pbuf *p); +static err_t pppos_netif_output(ppp_pcb *ppp, void *ctx, struct pbuf *pb, u16_t protocol); +static void pppos_connect(ppp_pcb *ppp, void *ctx); +#if PPP_SERVER +static void pppos_listen(ppp_pcb *ppp, void *ctx); +#endif /* PPP_SERVER */ +static void pppos_disconnect(ppp_pcb *ppp, void *ctx); +static err_t pppos_destroy(ppp_pcb *ppp, void *ctx); +static void pppos_send_config(ppp_pcb *ppp, void *ctx, u32_t accm, int pcomp, int accomp); +static void pppos_recv_config(ppp_pcb *ppp, void *ctx, u32_t accm, int pcomp, int accomp); + +/* Prototypes for procedures local to this file. */ +#if PPP_INPROC_IRQ_SAFE +static void pppos_input_callback(void *arg); +#endif /* PPP_INPROC_IRQ_SAFE */ +static void pppos_input_free_current_packet(pppos_pcb *pppos); +static void pppos_input_drop(pppos_pcb *pppos); +static err_t pppos_output_append(pppos_pcb *pppos, err_t err, struct pbuf *nb, u8_t c, u8_t accm, u16_t *fcs); +static err_t pppos_output_last(pppos_pcb *pppos, err_t err, struct pbuf *nb, u16_t *fcs); + +/* Callbacks structure for PPP core */ +static const struct link_callbacks pppos_callbacks = { + pppos_connect, +#if PPP_SERVER + pppos_listen, +#endif /* PPP_SERVER */ + pppos_disconnect, + pppos_destroy, + pppos_write, + pppos_netif_output, + pppos_send_config, + pppos_recv_config +}; + +/* PPP's Asynchronous-Control-Character-Map. The mask array is used + * to select the specific bit for a character. */ +#define ESCAPE_P(accm, c) ((accm)[(c) >> 3] & 1 << (c & 0x07)) + +#if PPP_FCS_TABLE +/* + * FCS lookup table as calculated by genfcstab. + */ +static const u16_t fcstab[256] = { + 0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf, + 0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7, + 0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e, + 0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876, + 0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd, + 0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5, + 0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c, + 0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974, + 0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb, + 0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3, + 0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a, + 0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72, + 0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9, + 0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1, + 0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738, + 0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70, + 0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7, + 0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff, + 0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036, + 0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e, + 0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5, + 0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd, + 0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134, + 0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c, + 0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3, + 0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb, + 0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232, + 0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a, + 0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1, + 0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9, + 0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330, + 0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78 +}; +#define PPP_FCS(fcs, c) (((fcs) >> 8) ^ fcstab[((fcs) ^ (c)) & 0xff]) +#else /* PPP_FCS_TABLE */ +/* The HDLC polynomial: X**0 + X**5 + X**12 + X**16 (0x8408) */ +#define PPP_FCS_POLYNOMIAL 0x8408 +static u16_t +ppp_get_fcs(u8_t byte) +{ + unsigned int octet; + int bit; + octet = byte; + for (bit = 8; bit-- > 0; ) { + octet = (octet & 0x01) ? ((octet >> 1) ^ PPP_FCS_POLYNOMIAL) : (octet >> 1); + } + return octet & 0xffff; +} +#define PPP_FCS(fcs, c) (((fcs) >> 8) ^ ppp_get_fcs(((fcs) ^ (c)) & 0xff)) +#endif /* PPP_FCS_TABLE */ + +/* + * Values for FCS calculations. + */ +#define PPP_INITFCS 0xffff /* Initial FCS value */ +#define PPP_GOODFCS 0xf0b8 /* Good final FCS value */ + +#if PPP_INPROC_IRQ_SAFE +#define PPPOS_DECL_PROTECT(lev) SYS_ARCH_DECL_PROTECT(lev) +#define PPPOS_PROTECT(lev) SYS_ARCH_PROTECT(lev) +#define PPPOS_UNPROTECT(lev) SYS_ARCH_UNPROTECT(lev) +#else +#define PPPOS_DECL_PROTECT(lev) +#define PPPOS_PROTECT(lev) +#define PPPOS_UNPROTECT(lev) +#endif /* PPP_INPROC_IRQ_SAFE */ + + +/* + * Create a new PPP connection using the given serial I/O device. + * + * Return 0 on success, an error code on failure. + */ +ppp_pcb *pppos_create(struct netif *pppif, pppos_output_cb_fn output_cb, + ppp_link_status_cb_fn link_status_cb, void *ctx_cb) +{ + pppos_pcb *pppos; + ppp_pcb *ppp; + LWIP_ASSERT_CORE_LOCKED(); + + pppos = (pppos_pcb *)LWIP_MEMPOOL_ALLOC(PPPOS_PCB); + if (pppos == NULL) { + return NULL; + } + + ppp = ppp_new(pppif, &pppos_callbacks, pppos, link_status_cb, ctx_cb); + if (ppp == NULL) { + LWIP_MEMPOOL_FREE(PPPOS_PCB, pppos); + return NULL; + } + + memset(pppos, 0, sizeof(pppos_pcb)); + pppos->ppp = ppp; + pppos->output_cb = output_cb; + return ppp; +} + +/* Called by PPP core */ +static err_t +pppos_write(ppp_pcb *ppp, void *ctx, struct pbuf *p) +{ + pppos_pcb *pppos = (pppos_pcb *)ctx; + u8_t *s; + struct pbuf *nb; + u16_t n; + u16_t fcs_out; + err_t err; + LWIP_UNUSED_ARG(ppp); + + /* Grab an output buffer. Using PBUF_POOL here for tx is ok since the pbuf + gets freed by 'pppos_output_last' before this function returns and thus + cannot starve rx. */ + nb = pbuf_alloc(PBUF_RAW, 0, PBUF_POOL); + if (nb == NULL) { + PPPDEBUG(LOG_WARNING, ("pppos_write[%d]: alloc fail\n", ppp->netif->num)); + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutdiscards); + pbuf_free(p); + return ERR_MEM; + } + + /* Set nb->tot_len to actual payload length */ + nb->tot_len = p->len; + + /* If the link has been idle, we'll send a fresh flag character to + * flush any noise. */ + err = ERR_OK; + if ((sys_now() - pppos->last_xmit) >= PPP_MAXIDLEFLAG) { + err = pppos_output_append(pppos, err, nb, PPP_FLAG, 0, NULL); + } + + /* Load output buffer. */ + fcs_out = PPP_INITFCS; + s = (u8_t*)p->payload; + n = p->len; + while (n-- > 0) { + err = pppos_output_append(pppos, err, nb, *s++, 1, &fcs_out); + } + + err = pppos_output_last(pppos, err, nb, &fcs_out); + if (err == ERR_OK) { + PPPDEBUG(LOG_INFO, ("pppos_write[%d]: len=%d\n", ppp->netif->num, p->len)); + } else { + PPPDEBUG(LOG_WARNING, ("pppos_write[%d]: output failed len=%d\n", ppp->netif->num, p->len)); + } + pbuf_free(p); + return err; +} + +/* Called by PPP core */ +static err_t +pppos_netif_output(ppp_pcb *ppp, void *ctx, struct pbuf *pb, u16_t protocol) +{ + pppos_pcb *pppos = (pppos_pcb *)ctx; + struct pbuf *nb, *p; + u16_t fcs_out; + err_t err; + LWIP_UNUSED_ARG(ppp); + + /* Grab an output buffer. Using PBUF_POOL here for tx is ok since the pbuf + gets freed by 'pppos_output_last' before this function returns and thus + cannot starve rx. */ + nb = pbuf_alloc(PBUF_RAW, 0, PBUF_POOL); + if (nb == NULL) { + PPPDEBUG(LOG_WARNING, ("pppos_netif_output[%d]: alloc fail\n", ppp->netif->num)); + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutdiscards); + return ERR_MEM; + } + + /* Set nb->tot_len to actual payload length */ + nb->tot_len = pb->tot_len; + + /* If the link has been idle, we'll send a fresh flag character to + * flush any noise. */ + err = ERR_OK; + if ((sys_now() - pppos->last_xmit) >= PPP_MAXIDLEFLAG) { + err = pppos_output_append(pppos, err, nb, PPP_FLAG, 0, NULL); + } + + fcs_out = PPP_INITFCS; + if (!pppos->accomp) { + err = pppos_output_append(pppos, err, nb, PPP_ALLSTATIONS, 1, &fcs_out); + err = pppos_output_append(pppos, err, nb, PPP_UI, 1, &fcs_out); + } + if (!pppos->pcomp || protocol > 0xFF) { + err = pppos_output_append(pppos, err, nb, (protocol >> 8) & 0xFF, 1, &fcs_out); + } + err = pppos_output_append(pppos, err, nb, protocol & 0xFF, 1, &fcs_out); + + /* Load packet. */ + for(p = pb; p; p = p->next) { + u16_t n = p->len; + u8_t *s = (u8_t*)p->payload; + + while (n-- > 0) { + err = pppos_output_append(pppos, err, nb, *s++, 1, &fcs_out); + } + } + + err = pppos_output_last(pppos, err, nb, &fcs_out); + if (err == ERR_OK) { + PPPDEBUG(LOG_INFO, ("pppos_netif_output[%d]: proto=0x%"X16_F", len = %d\n", ppp->netif->num, protocol, pb->tot_len)); + } else { + PPPDEBUG(LOG_WARNING, ("pppos_netif_output[%d]: output failed proto=0x%"X16_F", len = %d\n", ppp->netif->num, protocol, pb->tot_len)); + } + return err; +} + +static void +pppos_connect(ppp_pcb *ppp, void *ctx) +{ + pppos_pcb *pppos = (pppos_pcb *)ctx; + PPPOS_DECL_PROTECT(lev); + +#if PPP_INPROC_IRQ_SAFE + /* input pbuf left over from last session? */ + pppos_input_free_current_packet(pppos); +#endif /* PPP_INPROC_IRQ_SAFE */ + + /* reset PPPoS control block to its initial state */ + memset(&pppos->last_xmit, 0, sizeof(pppos_pcb) - offsetof(pppos_pcb, last_xmit)); + + /* + * Default the in and out accm so that escape and flag characters + * are always escaped. + */ + pppos->in_accm[15] = 0x60; /* no need to protect since RX is not running */ + pppos->out_accm[15] = 0x60; + PPPOS_PROTECT(lev); + pppos->open = 1; + PPPOS_UNPROTECT(lev); + + /* + * Start the connection and handle incoming events (packet or timeout). + */ + PPPDEBUG(LOG_INFO, ("pppos_connect: unit %d: connecting\n", ppp->netif->num)); + ppp_start(ppp); /* notify upper layers */ +} + +#if PPP_SERVER +static void +pppos_listen(ppp_pcb *ppp, void *ctx) +{ + pppos_pcb *pppos = (pppos_pcb *)ctx; + PPPOS_DECL_PROTECT(lev); + +#if PPP_INPROC_IRQ_SAFE + /* input pbuf left over from last session? */ + pppos_input_free_current_packet(pppos); +#endif /* PPP_INPROC_IRQ_SAFE */ + + /* reset PPPoS control block to its initial state */ + memset(&pppos->last_xmit, 0, sizeof(pppos_pcb) - offsetof(pppos_pcb, last_xmit)); + + /* + * Default the in and out accm so that escape and flag characters + * are always escaped. + */ + pppos->in_accm[15] = 0x60; /* no need to protect since RX is not running */ + pppos->out_accm[15] = 0x60; + PPPOS_PROTECT(lev); + pppos->open = 1; + PPPOS_UNPROTECT(lev); + + /* + * Wait for something to happen. + */ + PPPDEBUG(LOG_INFO, ("pppos_listen: unit %d: listening\n", ppp->netif->num)); + ppp_start(ppp); /* notify upper layers */ +} +#endif /* PPP_SERVER */ + +static void +pppos_disconnect(ppp_pcb *ppp, void *ctx) +{ + pppos_pcb *pppos = (pppos_pcb *)ctx; + PPPOS_DECL_PROTECT(lev); + + PPPOS_PROTECT(lev); + pppos->open = 0; + PPPOS_UNPROTECT(lev); + + /* If PPP_INPROC_IRQ_SAFE is used we cannot call + * pppos_input_free_current_packet() here because + * rx IRQ might still call pppos_input(). + */ +#if !PPP_INPROC_IRQ_SAFE + /* input pbuf left ? */ + pppos_input_free_current_packet(pppos); +#endif /* !PPP_INPROC_IRQ_SAFE */ + + ppp_link_end(ppp); /* notify upper layers */ +} + +static err_t +pppos_destroy(ppp_pcb *ppp, void *ctx) +{ + pppos_pcb *pppos = (pppos_pcb *)ctx; + LWIP_UNUSED_ARG(ppp); + +#if PPP_INPROC_IRQ_SAFE + /* input pbuf left ? */ + pppos_input_free_current_packet(pppos); +#endif /* PPP_INPROC_IRQ_SAFE */ + + LWIP_MEMPOOL_FREE(PPPOS_PCB, pppos); + return ERR_OK; +} + +#if !NO_SYS && !PPP_INPROC_IRQ_SAFE +/** Pass received raw characters to PPPoS to be decoded through lwIP TCPIP thread. + * + * This is one of the only functions that may be called outside of the TCPIP thread! + * + * @param ppp PPP descriptor index, returned by pppos_create() + * @param s received data + * @param l length of received data + */ +err_t +pppos_input_tcpip(ppp_pcb *ppp, u8_t *s, int l) +{ + struct pbuf *p; + err_t err; + + p = pbuf_alloc(PBUF_RAW, l, PBUF_POOL); + if (!p) { + return ERR_MEM; + } + pbuf_take(p, s, l); + + err = tcpip_inpkt(p, ppp_netif(ppp), pppos_input_sys); + if (err != ERR_OK) { + pbuf_free(p); + } + return err; +} + +/* called from TCPIP thread */ +err_t pppos_input_sys(struct pbuf *p, struct netif *inp) { + ppp_pcb *ppp = (ppp_pcb*)inp->state; + struct pbuf *n; + LWIP_ASSERT_CORE_LOCKED(); + + for (n = p; n; n = n->next) { + pppos_input(ppp, (u8_t*)n->payload, n->len); + } + pbuf_free(p); + return ERR_OK; +} +#endif /* !NO_SYS && !PPP_INPROC_IRQ_SAFE */ + +/** PPPoS input helper struct, must be packed since it is stored + * to pbuf->payload, which might be unaligned. */ +#if PPP_INPROC_IRQ_SAFE +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct pppos_input_header { + PACK_STRUCT_FIELD(ppp_pcb *ppp); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +#endif /* PPP_INPROC_IRQ_SAFE */ + +/** Pass received raw characters to PPPoS to be decoded. + * + * @param ppp PPP descriptor index, returned by pppos_create() + * @param s received data + * @param l length of received data + */ +void +pppos_input(ppp_pcb *ppp, u8_t *s, int l) +{ + pppos_pcb *pppos = (pppos_pcb *)ppp->link_ctx_cb; + struct pbuf *next_pbuf; + u8_t cur_char; + u8_t escaped; + PPPOS_DECL_PROTECT(lev); +#if !PPP_INPROC_IRQ_SAFE + LWIP_ASSERT_CORE_LOCKED(); +#endif + + PPPDEBUG(LOG_DEBUG, ("pppos_input[%d]: got %d bytes\n", ppp->netif->num, l)); + while (l-- > 0) { + cur_char = *s++; + + PPPOS_PROTECT(lev); + /* ppp_input can disconnect the interface, we need to abort to prevent a memory + * leak if there are remaining bytes because pppos_connect and pppos_listen + * functions expect input buffer to be free. Furthermore there are no real + * reason to continue reading bytes if we are disconnected. + */ + if (!pppos->open) { + PPPOS_UNPROTECT(lev); + return; + } + escaped = ESCAPE_P(pppos->in_accm, cur_char); + PPPOS_UNPROTECT(lev); + /* Handle special characters. */ + if (escaped) { + /* Check for escape sequences. */ + /* XXX Note that this does not handle an escaped 0x5d character which + * would appear as an escape character. Since this is an ASCII ']' + * and there is no reason that I know of to escape it, I won't complicate + * the code to handle this case. GLL */ + if (cur_char == PPP_ESCAPE) { + pppos->in_escaped = 1; + /* Check for the flag character. */ + } else if (cur_char == PPP_FLAG) { + /* If this is just an extra flag character, ignore it. */ + if (pppos->in_state <= PDADDRESS) { + /* ignore it */; + /* If we haven't received the packet header, drop what has come in. */ + } else if (pppos->in_state < PDDATA) { + PPPDEBUG(LOG_WARNING, + ("pppos_input[%d]: Dropping incomplete packet %d\n", + ppp->netif->num, pppos->in_state)); + LINK_STATS_INC(link.lenerr); + pppos_input_drop(pppos); + /* If the fcs is invalid, drop the packet. */ + } else if (pppos->in_fcs != PPP_GOODFCS) { + PPPDEBUG(LOG_INFO, + ("pppos_input[%d]: Dropping bad fcs 0x%"X16_F" proto=0x%"X16_F"\n", + ppp->netif->num, pppos->in_fcs, pppos->in_protocol)); + /* Note: If you get lots of these, check for UART frame errors or try different baud rate */ + LINK_STATS_INC(link.chkerr); + pppos_input_drop(pppos); + /* Otherwise it's a good packet so pass it on. */ + } else { + struct pbuf *inp; + /* Trim off the checksum. */ + if(pppos->in_tail->len > 2) { + pppos->in_tail->len -= 2; + + pppos->in_tail->tot_len = pppos->in_tail->len; + if (pppos->in_tail != pppos->in_head) { + pbuf_cat(pppos->in_head, pppos->in_tail); + } + } else { + pppos->in_tail->tot_len = pppos->in_tail->len; + if (pppos->in_tail != pppos->in_head) { + pbuf_cat(pppos->in_head, pppos->in_tail); + } + + pbuf_realloc(pppos->in_head, pppos->in_head->tot_len - 2); + } + + /* Dispatch the packet thereby consuming it. */ + inp = pppos->in_head; + /* Packet consumed, release our references. */ + pppos->in_head = NULL; + pppos->in_tail = NULL; +#if IP_FORWARD || LWIP_IPV6_FORWARD + /* hide the room for Ethernet forwarding header */ + pbuf_remove_header(inp, PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN); +#endif /* IP_FORWARD || LWIP_IPV6_FORWARD */ +#if PPP_INPROC_IRQ_SAFE + if(tcpip_try_callback(pppos_input_callback, inp) != ERR_OK) { + PPPDEBUG(LOG_ERR, ("pppos_input[%d]: tcpip_callback() failed, dropping packet\n", ppp->netif->num)); + pbuf_free(inp); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(ppp->netif, ifindiscards); + } +#else /* PPP_INPROC_IRQ_SAFE */ + ppp_input(ppp, inp); +#endif /* PPP_INPROC_IRQ_SAFE */ + } + + /* Prepare for a new packet. */ + pppos->in_fcs = PPP_INITFCS; + pppos->in_state = PDADDRESS; + pppos->in_escaped = 0; + /* Other characters are usually control characters that may have + * been inserted by the physical layer so here we just drop them. */ + } else { + PPPDEBUG(LOG_WARNING, + ("pppos_input[%d]: Dropping ACCM char <%d>\n", ppp->netif->num, cur_char)); + } + /* Process other characters. */ + } else { + /* Unencode escaped characters. */ + if (pppos->in_escaped) { + pppos->in_escaped = 0; + cur_char ^= PPP_TRANS; + } + + /* Process character relative to current state. */ + switch(pppos->in_state) { + case PDIDLE: /* Idle state - waiting. */ + /* Drop the character if it's not 0xff + * we would have processed a flag character above. */ + if (cur_char != PPP_ALLSTATIONS) { + break; + } + /* no break */ + /* Fall through */ + + case PDSTART: /* Process start flag. */ + /* Prepare for a new packet. */ + pppos->in_fcs = PPP_INITFCS; + /* no break */ + /* Fall through */ + + case PDADDRESS: /* Process address field. */ + if (cur_char == PPP_ALLSTATIONS) { + pppos->in_state = PDCONTROL; + break; + } + /* no break */ + + /* Else assume compressed address and control fields so + * fall through to get the protocol... */ + /* Fall through */ + case PDCONTROL: /* Process control field. */ + /* If we don't get a valid control code, restart. */ + if (cur_char == PPP_UI) { + pppos->in_state = PDPROTOCOL1; + break; + } + /* no break */ + +#if 0 + else { + PPPDEBUG(LOG_WARNING, + ("pppos_input[%d]: Invalid control <%d>\n", ppp->netif->num, cur_char)); + pppos->in_state = PDSTART; + } +#endif + /* Fall through */ + + case PDPROTOCOL1: /* Process protocol field 1. */ + /* If the lower bit is set, this is the end of the protocol + * field. */ + if (cur_char & 1) { + pppos->in_protocol = cur_char; + pppos->in_state = PDDATA; + } else { + pppos->in_protocol = (u16_t)cur_char << 8; + pppos->in_state = PDPROTOCOL2; + } + break; + case PDPROTOCOL2: /* Process protocol field 2. */ + pppos->in_protocol |= cur_char; + pppos->in_state = PDDATA; + break; + case PDDATA: /* Process data byte. */ + /* Make space to receive processed data. */ + if (pppos->in_tail == NULL || pppos->in_tail->len == PBUF_POOL_BUFSIZE) { + u16_t pbuf_alloc_len; + if (pppos->in_tail != NULL) { + pppos->in_tail->tot_len = pppos->in_tail->len; + if (pppos->in_tail != pppos->in_head) { + pbuf_cat(pppos->in_head, pppos->in_tail); + /* give up the in_tail reference now */ + pppos->in_tail = NULL; + } + } + /* If we haven't started a packet, we need a packet header. */ + pbuf_alloc_len = 0; +#if IP_FORWARD || LWIP_IPV6_FORWARD + /* If IP forwarding is enabled we are reserving PBUF_LINK_ENCAPSULATION_HLEN + * + PBUF_LINK_HLEN bytes so the packet is being allocated with enough header + * space to be forwarded (to Ethernet for example). + */ + if (pppos->in_head == NULL) { + pbuf_alloc_len = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN; + } +#endif /* IP_FORWARD || LWIP_IPV6_FORWARD */ + next_pbuf = pbuf_alloc(PBUF_RAW, pbuf_alloc_len, PBUF_POOL); + if (next_pbuf == NULL) { + /* No free buffers. Drop the input packet and let the + * higher layers deal with it. Continue processing + * the received pbuf chain in case a new packet starts. */ + PPPDEBUG(LOG_ERR, ("pppos_input[%d]: NO FREE PBUFS!\n", ppp->netif->num)); + LINK_STATS_INC(link.memerr); + pppos_input_drop(pppos); + pppos->in_state = PDSTART; /* Wait for flag sequence. */ + break; + } + if (pppos->in_head == NULL) { + u8_t *payload = ((u8_t*)next_pbuf->payload) + pbuf_alloc_len; +#if PPP_INPROC_IRQ_SAFE + ((struct pppos_input_header*)payload)->ppp = ppp; + payload += sizeof(struct pppos_input_header); + next_pbuf->len += sizeof(struct pppos_input_header); +#endif /* PPP_INPROC_IRQ_SAFE */ + next_pbuf->len += sizeof(pppos->in_protocol); + *(payload++) = pppos->in_protocol >> 8; + *(payload) = pppos->in_protocol & 0xFF; + pppos->in_head = next_pbuf; + } + pppos->in_tail = next_pbuf; + } + /* Load character into buffer. */ + ((u8_t*)pppos->in_tail->payload)[pppos->in_tail->len++] = cur_char; + break; + default: + break; + } + + /* update the frame check sequence number. */ + pppos->in_fcs = PPP_FCS(pppos->in_fcs, cur_char); + } + } /* while (l-- > 0), all bytes processed */ +} + +#if PPP_INPROC_IRQ_SAFE +/* PPPoS input callback using one input pointer + */ +static void pppos_input_callback(void *arg) { + struct pbuf *pb = (struct pbuf*)arg; + ppp_pcb *ppp; + + ppp = ((struct pppos_input_header*)pb->payload)->ppp; + if(pbuf_remove_header(pb, sizeof(struct pppos_input_header))) { + LWIP_ASSERT("pbuf_remove_header failed\n", 0); + goto drop; + } + + /* Dispatch the packet thereby consuming it. */ + ppp_input(ppp, pb); + return; + +drop: + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(ppp->netif, ifindiscards); + pbuf_free(pb); +} +#endif /* PPP_INPROC_IRQ_SAFE */ + +static void +pppos_send_config(ppp_pcb *ppp, void *ctx, u32_t accm, int pcomp, int accomp) +{ + int i; + pppos_pcb *pppos = (pppos_pcb *)ctx; + LWIP_UNUSED_ARG(ppp); + + pppos->pcomp = pcomp; + pppos->accomp = accomp; + + /* Load the ACCM bits for the 32 control codes. */ + for (i = 0; i < 32/8; i++) { + pppos->out_accm[i] = (u8_t)((accm >> (8 * i)) & 0xFF); + } + + PPPDEBUG(LOG_INFO, ("pppos_send_config[%d]: out_accm=%X %X %X %X\n", + pppos->ppp->netif->num, + pppos->out_accm[0], pppos->out_accm[1], pppos->out_accm[2], pppos->out_accm[3])); +} + +static void +pppos_recv_config(ppp_pcb *ppp, void *ctx, u32_t accm, int pcomp, int accomp) +{ + int i; + pppos_pcb *pppos = (pppos_pcb *)ctx; + PPPOS_DECL_PROTECT(lev); + LWIP_UNUSED_ARG(ppp); + LWIP_UNUSED_ARG(pcomp); + LWIP_UNUSED_ARG(accomp); + + /* Load the ACCM bits for the 32 control codes. */ + PPPOS_PROTECT(lev); + for (i = 0; i < 32 / 8; i++) { + pppos->in_accm[i] = (u8_t)(accm >> (i * 8)); + } + PPPOS_UNPROTECT(lev); + + PPPDEBUG(LOG_INFO, ("pppos_recv_config[%d]: in_accm=%X %X %X %X\n", + pppos->ppp->netif->num, + pppos->in_accm[0], pppos->in_accm[1], pppos->in_accm[2], pppos->in_accm[3])); +} + +/* + * Drop the input packet. + */ +static void +pppos_input_free_current_packet(pppos_pcb *pppos) +{ + if (pppos->in_head != NULL) { + if (pppos->in_tail && (pppos->in_tail != pppos->in_head)) { + pbuf_free(pppos->in_tail); + } + pbuf_free(pppos->in_head); + pppos->in_head = NULL; + } + pppos->in_tail = NULL; +} + +/* + * Drop the input packet and increase error counters. + */ +static void +pppos_input_drop(pppos_pcb *pppos) +{ + if (pppos->in_head != NULL) { +#if 0 + PPPDEBUG(LOG_INFO, ("pppos_input_drop: %d:%.*H\n", pppos->in_head->len, min(60, pppos->in_head->len * 2), pppos->in_head->payload)); +#endif + PPPDEBUG(LOG_INFO, ("pppos_input_drop: pbuf len=%d, addr %p\n", pppos->in_head->len, (void*)pppos->in_head)); + } + pppos_input_free_current_packet(pppos); +#if VJ_SUPPORT + vj_uncompress_err(&pppos->ppp->vj_comp); +#endif /* VJ_SUPPORT */ + + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(pppos->ppp->netif, ifindiscards); +} + +/* + * pppos_output_append - append given character to end of given pbuf. + * If out_accm is not 0 and the character needs to be escaped, do so. + * If pbuf is full, send the pbuf and reuse it. + * Return the current pbuf. + */ +static err_t +pppos_output_append(pppos_pcb *pppos, err_t err, struct pbuf *nb, u8_t c, u8_t accm, u16_t *fcs) +{ + if (err != ERR_OK) { + return err; + } + + /* Make sure there is room for the character and an escape code. + * Sure we don't quite fill the buffer if the character doesn't + * get escaped but is one character worth complicating this? */ + if ((PBUF_POOL_BUFSIZE - nb->len) < 2) { + u32_t l = pppos->output_cb(pppos->ppp, (u8_t*)nb->payload, nb->len, pppos->ppp->ctx_cb); + if (l != nb->len) { + return ERR_IF; + } + nb->len = 0; + } + + /* Update FCS before checking for special characters. */ + if (fcs) { + *fcs = PPP_FCS(*fcs, c); + } + + /* Copy to output buffer escaping special characters. */ + if (accm && ESCAPE_P(pppos->out_accm, c)) { + *((u8_t*)nb->payload + nb->len++) = PPP_ESCAPE; + *((u8_t*)nb->payload + nb->len++) = c ^ PPP_TRANS; + } else { + *((u8_t*)nb->payload + nb->len++) = c; + } + + return ERR_OK; +} + +static err_t +pppos_output_last(pppos_pcb *pppos, err_t err, struct pbuf *nb, u16_t *fcs) +{ + ppp_pcb *ppp = pppos->ppp; + + /* Add FCS and trailing flag. */ + err = pppos_output_append(pppos, err, nb, ~(*fcs) & 0xFF, 1, NULL); + err = pppos_output_append(pppos, err, nb, (~(*fcs) >> 8) & 0xFF, 1, NULL); + err = pppos_output_append(pppos, err, nb, PPP_FLAG, 0, NULL); + + if (err != ERR_OK) { + goto failed; + } + + /* Send remaining buffer if not empty */ + if (nb->len > 0) { + u32_t l = pppos->output_cb(ppp, (u8_t*)nb->payload, nb->len, ppp->ctx_cb); + if (l != nb->len) { + err = ERR_IF; + goto failed; + } + } + + pppos->last_xmit = sys_now(); + MIB2_STATS_NETIF_ADD(ppp->netif, ifoutoctets, nb->tot_len); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutucastpkts); + LINK_STATS_INC(link.xmit); + pbuf_free(nb); + return ERR_OK; + +failed: + pppos->last_xmit = 0; /* prepend PPP_FLAG to next packet */ + LINK_STATS_INC(link.err); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutdiscards); + pbuf_free(nb); + return err; +} + +#endif /* PPP_SUPPORT && PPPOS_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/upap.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/upap.c new file mode 100644 index 0000000..3b2399d --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/upap.c @@ -0,0 +1,677 @@ +/* + * upap.c - User/Password Authentication Protocol. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PAP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +/* + * @todo: + */ + +#if 0 /* UNUSED */ +#include +#include +#endif /* UNUSED */ + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/upap.h" + +#if PPP_OPTIONS +/* + * Command-line options. + */ +static option_t pap_option_list[] = { + { "hide-password", o_bool, &hide_password, + "Don't output passwords to log", OPT_PRIO | 1 }, + { "show-password", o_bool, &hide_password, + "Show password string in debug log messages", OPT_PRIOSUB | 0 }, + + { "pap-restart", o_int, &upap[0].us_timeouttime, + "Set retransmit timeout for PAP", OPT_PRIO }, + { "pap-max-authreq", o_int, &upap[0].us_maxtransmits, + "Set max number of transmissions for auth-reqs", OPT_PRIO }, + { "pap-timeout", o_int, &upap[0].us_reqtimeout, + "Set time limit for peer PAP authentication", OPT_PRIO }, + + { NULL } +}; +#endif /* PPP_OPTIONS */ + +/* + * Protocol entry points. + */ +static void upap_init(ppp_pcb *pcb); +static void upap_lowerup(ppp_pcb *pcb); +static void upap_lowerdown(ppp_pcb *pcb); +static void upap_input(ppp_pcb *pcb, u_char *inpacket, int l); +static void upap_protrej(ppp_pcb *pcb); +#if PRINTPKT_SUPPORT +static int upap_printpkt(const u_char *p, int plen, void (*printer) (void *, const char *, ...), void *arg); +#endif /* PRINTPKT_SUPPORT */ + +const struct protent pap_protent = { + PPP_PAP, + upap_init, + upap_input, + upap_protrej, + upap_lowerup, + upap_lowerdown, + NULL, + NULL, +#if PRINTPKT_SUPPORT + upap_printpkt, +#endif /* PRINTPKT_SUPPORT */ +#if PPP_DATAINPUT + NULL, +#endif /* PPP_DATAINPUT */ +#if PRINTPKT_SUPPORT + "PAP", + NULL, +#endif /* PRINTPKT_SUPPORT */ +#if PPP_OPTIONS + pap_option_list, + NULL, +#endif /* PPP_OPTIONS */ +#if DEMAND_SUPPORT + NULL, + NULL +#endif /* DEMAND_SUPPORT */ +}; + +static void upap_timeout(void *arg); +#if PPP_SERVER +static void upap_reqtimeout(void *arg); +static void upap_rauthreq(ppp_pcb *pcb, u_char *inp, int id, int len); +#endif /* PPP_SERVER */ +static void upap_rauthack(ppp_pcb *pcb, u_char *inp, int id, int len); +static void upap_rauthnak(ppp_pcb *pcb, u_char *inp, int id, int len); +static void upap_sauthreq(ppp_pcb *pcb); +#if PPP_SERVER +static void upap_sresp(ppp_pcb *pcb, u_char code, u_char id, const char *msg, int msglen); +#endif /* PPP_SERVER */ + + +/* + * upap_init - Initialize a UPAP unit. + */ +static void upap_init(ppp_pcb *pcb) { + pcb->upap.us_user = NULL; + pcb->upap.us_userlen = 0; + pcb->upap.us_passwd = NULL; + pcb->upap.us_passwdlen = 0; + pcb->upap.us_clientstate = UPAPCS_INITIAL; +#if PPP_SERVER + pcb->upap.us_serverstate = UPAPSS_INITIAL; +#endif /* PPP_SERVER */ + pcb->upap.us_id = 0; +} + + +/* + * upap_authwithpeer - Authenticate us with our peer (start client). + * + * Set new state and send authenticate's. + */ +void upap_authwithpeer(ppp_pcb *pcb, const char *user, const char *password) { + + if(!user || !password) + return; + + /* Save the username and password we're given */ + pcb->upap.us_user = user; + pcb->upap.us_userlen = (u8_t)LWIP_MIN(strlen(user), 0xff); + pcb->upap.us_passwd = password; + pcb->upap.us_passwdlen = (u8_t)LWIP_MIN(strlen(password), 0xff); + pcb->upap.us_transmits = 0; + + /* Lower layer up yet? */ + if (pcb->upap.us_clientstate == UPAPCS_INITIAL || + pcb->upap.us_clientstate == UPAPCS_PENDING) { + pcb->upap.us_clientstate = UPAPCS_PENDING; + return; + } + + upap_sauthreq(pcb); /* Start protocol */ +} + +#if PPP_SERVER +/* + * upap_authpeer - Authenticate our peer (start server). + * + * Set new state. + */ +void upap_authpeer(ppp_pcb *pcb) { + + /* Lower layer up yet? */ + if (pcb->upap.us_serverstate == UPAPSS_INITIAL || + pcb->upap.us_serverstate == UPAPSS_PENDING) { + pcb->upap.us_serverstate = UPAPSS_PENDING; + return; + } + + pcb->upap.us_serverstate = UPAPSS_LISTEN; + if (pcb->settings.pap_req_timeout > 0) + TIMEOUT(upap_reqtimeout, pcb, pcb->settings.pap_req_timeout); +} +#endif /* PPP_SERVER */ + +/* + * upap_timeout - Retransmission timer for sending auth-reqs expired. + */ +static void upap_timeout(void *arg) { + ppp_pcb *pcb = (ppp_pcb*)arg; + + if (pcb->upap.us_clientstate != UPAPCS_AUTHREQ) + return; + + if (pcb->upap.us_transmits >= pcb->settings.pap_max_transmits) { + /* give up in disgust */ + ppp_error("No response to PAP authenticate-requests"); + pcb->upap.us_clientstate = UPAPCS_BADAUTH; + auth_withpeer_fail(pcb, PPP_PAP); + return; + } + + upap_sauthreq(pcb); /* Send Authenticate-Request */ +} + + +#if PPP_SERVER +/* + * upap_reqtimeout - Give up waiting for the peer to send an auth-req. + */ +static void upap_reqtimeout(void *arg) { + ppp_pcb *pcb = (ppp_pcb*)arg; + + if (pcb->upap.us_serverstate != UPAPSS_LISTEN) + return; /* huh?? */ + + auth_peer_fail(pcb, PPP_PAP); + pcb->upap.us_serverstate = UPAPSS_BADAUTH; +} +#endif /* PPP_SERVER */ + + +/* + * upap_lowerup - The lower layer is up. + * + * Start authenticating if pending. + */ +static void upap_lowerup(ppp_pcb *pcb) { + + if (pcb->upap.us_clientstate == UPAPCS_INITIAL) + pcb->upap.us_clientstate = UPAPCS_CLOSED; + else if (pcb->upap.us_clientstate == UPAPCS_PENDING) { + upap_sauthreq(pcb); /* send an auth-request */ + } + +#if PPP_SERVER + if (pcb->upap.us_serverstate == UPAPSS_INITIAL) + pcb->upap.us_serverstate = UPAPSS_CLOSED; + else if (pcb->upap.us_serverstate == UPAPSS_PENDING) { + pcb->upap.us_serverstate = UPAPSS_LISTEN; + if (pcb->settings.pap_req_timeout > 0) + TIMEOUT(upap_reqtimeout, pcb, pcb->settings.pap_req_timeout); + } +#endif /* PPP_SERVER */ +} + + +/* + * upap_lowerdown - The lower layer is down. + * + * Cancel all timeouts. + */ +static void upap_lowerdown(ppp_pcb *pcb) { + + if (pcb->upap.us_clientstate == UPAPCS_AUTHREQ) /* Timeout pending? */ + UNTIMEOUT(upap_timeout, pcb); /* Cancel timeout */ +#if PPP_SERVER + if (pcb->upap.us_serverstate == UPAPSS_LISTEN && pcb->settings.pap_req_timeout > 0) + UNTIMEOUT(upap_reqtimeout, pcb); +#endif /* PPP_SERVER */ + + pcb->upap.us_clientstate = UPAPCS_INITIAL; +#if PPP_SERVER + pcb->upap.us_serverstate = UPAPSS_INITIAL; +#endif /* PPP_SERVER */ +} + + +/* + * upap_protrej - Peer doesn't speak this protocol. + * + * This shouldn't happen. In any case, pretend lower layer went down. + */ +static void upap_protrej(ppp_pcb *pcb) { + + if (pcb->upap.us_clientstate == UPAPCS_AUTHREQ) { + ppp_error("PAP authentication failed due to protocol-reject"); + auth_withpeer_fail(pcb, PPP_PAP); + } +#if PPP_SERVER + if (pcb->upap.us_serverstate == UPAPSS_LISTEN) { + ppp_error("PAP authentication of peer failed (protocol-reject)"); + auth_peer_fail(pcb, PPP_PAP); + } +#endif /* PPP_SERVER */ + upap_lowerdown(pcb); +} + + +/* + * upap_input - Input UPAP packet. + */ +static void upap_input(ppp_pcb *pcb, u_char *inpacket, int l) { + u_char *inp; + u_char code, id; + int len; + + /* + * Parse header (code, id and length). + * If packet too short, drop it. + */ + inp = inpacket; + if (l < UPAP_HEADERLEN) { + UPAPDEBUG(("pap_input: rcvd short header.")); + return; + } + GETCHAR(code, inp); + GETCHAR(id, inp); + GETSHORT(len, inp); + if (len < UPAP_HEADERLEN) { + UPAPDEBUG(("pap_input: rcvd illegal length.")); + return; + } + if (len > l) { + UPAPDEBUG(("pap_input: rcvd short packet.")); + return; + } + len -= UPAP_HEADERLEN; + + /* + * Action depends on code. + */ + switch (code) { + case UPAP_AUTHREQ: +#if PPP_SERVER + upap_rauthreq(pcb, inp, id, len); +#endif /* PPP_SERVER */ + break; + + case UPAP_AUTHACK: + upap_rauthack(pcb, inp, id, len); + break; + + case UPAP_AUTHNAK: + upap_rauthnak(pcb, inp, id, len); + break; + + default: /* XXX Need code reject */ + break; + } +} + +#if PPP_SERVER +/* + * upap_rauth - Receive Authenticate. + */ +static void upap_rauthreq(ppp_pcb *pcb, u_char *inp, int id, int len) { + u_char ruserlen, rpasswdlen; + char *ruser; + char *rpasswd; + char rhostname[256]; + int retcode; + const char *msg; + int msglen; + + if (pcb->upap.us_serverstate < UPAPSS_LISTEN) + return; + + /* + * If we receive a duplicate authenticate-request, we are + * supposed to return the same status as for the first request. + */ + if (pcb->upap.us_serverstate == UPAPSS_OPEN) { + upap_sresp(pcb, UPAP_AUTHACK, id, "", 0); /* return auth-ack */ + return; + } + if (pcb->upap.us_serverstate == UPAPSS_BADAUTH) { + upap_sresp(pcb, UPAP_AUTHNAK, id, "", 0); /* return auth-nak */ + return; + } + + /* + * Parse user/passwd. + */ + if (len < 1) { + UPAPDEBUG(("pap_rauth: rcvd short packet.")); + return; + } + GETCHAR(ruserlen, inp); + len -= sizeof (u_char) + ruserlen + sizeof (u_char); + if (len < 0) { + UPAPDEBUG(("pap_rauth: rcvd short packet.")); + return; + } + ruser = (char *) inp; + INCPTR(ruserlen, inp); + GETCHAR(rpasswdlen, inp); + if (len < rpasswdlen) { + UPAPDEBUG(("pap_rauth: rcvd short packet.")); + return; + } + + rpasswd = (char *) inp; + + /* + * Check the username and password given. + */ + retcode = UPAP_AUTHNAK; + if (auth_check_passwd(pcb, ruser, ruserlen, rpasswd, rpasswdlen, &msg, &msglen)) { + retcode = UPAP_AUTHACK; + } + BZERO(rpasswd, rpasswdlen); + +#if 0 /* UNUSED */ + /* + * Check remote number authorization. A plugin may have filled in + * the remote number or added an allowed number, and rather than + * return an authenticate failure, is leaving it for us to verify. + */ + if (retcode == UPAP_AUTHACK) { + if (!auth_number()) { + /* We do not want to leak info about the pap result. */ + retcode = UPAP_AUTHNAK; /* XXX exit value will be "wrong" */ + warn("calling number %q is not authorized", remote_number); + } + } + + msglen = strlen(msg); + if (msglen > 255) + msglen = 255; +#endif /* UNUSED */ + + upap_sresp(pcb, retcode, id, msg, msglen); + + /* Null terminate and clean remote name. */ + ppp_slprintf(rhostname, sizeof(rhostname), "%.*v", ruserlen, ruser); + + if (retcode == UPAP_AUTHACK) { + pcb->upap.us_serverstate = UPAPSS_OPEN; + ppp_notice("PAP peer authentication succeeded for %q", rhostname); + auth_peer_success(pcb, PPP_PAP, 0, ruser, ruserlen); + } else { + pcb->upap.us_serverstate = UPAPSS_BADAUTH; + ppp_warn("PAP peer authentication failed for %q", rhostname); + auth_peer_fail(pcb, PPP_PAP); + } + + if (pcb->settings.pap_req_timeout > 0) + UNTIMEOUT(upap_reqtimeout, pcb); +} +#endif /* PPP_SERVER */ + +/* + * upap_rauthack - Receive Authenticate-Ack. + */ +static void upap_rauthack(ppp_pcb *pcb, u_char *inp, int id, int len) { + u_char msglen; + char *msg; + LWIP_UNUSED_ARG(id); + + if (pcb->upap.us_clientstate != UPAPCS_AUTHREQ) /* XXX */ + return; + + /* + * Parse message. + */ + if (len < 1) { + UPAPDEBUG(("pap_rauthack: ignoring missing msg-length.")); + } else { + GETCHAR(msglen, inp); + if (msglen > 0) { + len -= sizeof (u_char); + if (len < msglen) { + UPAPDEBUG(("pap_rauthack: rcvd short packet.")); + return; + } + msg = (char *) inp; + PRINTMSG(msg, msglen); + } + } + + pcb->upap.us_clientstate = UPAPCS_OPEN; + + auth_withpeer_success(pcb, PPP_PAP, 0); +} + + +/* + * upap_rauthnak - Receive Authenticate-Nak. + */ +static void upap_rauthnak(ppp_pcb *pcb, u_char *inp, int id, int len) { + u_char msglen; + char *msg; + LWIP_UNUSED_ARG(id); + + if (pcb->upap.us_clientstate != UPAPCS_AUTHREQ) /* XXX */ + return; + + /* + * Parse message. + */ + if (len < 1) { + UPAPDEBUG(("pap_rauthnak: ignoring missing msg-length.")); + } else { + GETCHAR(msglen, inp); + if (msglen > 0) { + len -= sizeof (u_char); + if (len < msglen) { + UPAPDEBUG(("pap_rauthnak: rcvd short packet.")); + return; + } + msg = (char *) inp; + PRINTMSG(msg, msglen); + } + } + + pcb->upap.us_clientstate = UPAPCS_BADAUTH; + + ppp_error("PAP authentication failed"); + auth_withpeer_fail(pcb, PPP_PAP); +} + + +/* + * upap_sauthreq - Send an Authenticate-Request. + */ +static void upap_sauthreq(ppp_pcb *pcb) { + struct pbuf *p; + u_char *outp; + int outlen; + + outlen = UPAP_HEADERLEN + 2 * sizeof (u_char) + + pcb->upap.us_userlen + pcb->upap.us_passwdlen; + p = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_HDRLEN +outlen), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = (u_char*)p->payload; + MAKEHEADER(outp, PPP_PAP); + + PUTCHAR(UPAP_AUTHREQ, outp); + PUTCHAR(++pcb->upap.us_id, outp); + PUTSHORT(outlen, outp); + PUTCHAR(pcb->upap.us_userlen, outp); + MEMCPY(outp, pcb->upap.us_user, pcb->upap.us_userlen); + INCPTR(pcb->upap.us_userlen, outp); + PUTCHAR(pcb->upap.us_passwdlen, outp); + MEMCPY(outp, pcb->upap.us_passwd, pcb->upap.us_passwdlen); + + ppp_write(pcb, p); + + TIMEOUT(upap_timeout, pcb, pcb->settings.pap_timeout_time); + ++pcb->upap.us_transmits; + pcb->upap.us_clientstate = UPAPCS_AUTHREQ; +} + +#if PPP_SERVER +/* + * upap_sresp - Send a response (ack or nak). + */ +static void upap_sresp(ppp_pcb *pcb, u_char code, u_char id, const char *msg, int msglen) { + struct pbuf *p; + u_char *outp; + int outlen; + + outlen = UPAP_HEADERLEN + sizeof (u_char) + msglen; + p = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_HDRLEN +outlen), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = (u_char*)p->payload; + MAKEHEADER(outp, PPP_PAP); + + PUTCHAR(code, outp); + PUTCHAR(id, outp); + PUTSHORT(outlen, outp); + PUTCHAR(msglen, outp); + MEMCPY(outp, msg, msglen); + + ppp_write(pcb, p); +} +#endif /* PPP_SERVER */ + +#if PRINTPKT_SUPPORT +/* + * upap_printpkt - print the contents of a PAP packet. + */ +static const char* const upap_codenames[] = { + "AuthReq", "AuthAck", "AuthNak" +}; + +static int upap_printpkt(const u_char *p, int plen, void (*printer) (void *, const char *, ...), void *arg) { + int code, id, len; + int mlen, ulen, wlen; + const u_char *user, *pwd, *msg; + const u_char *pstart; + + if (plen < UPAP_HEADERLEN) + return 0; + pstart = p; + GETCHAR(code, p); + GETCHAR(id, p); + GETSHORT(len, p); + if (len < UPAP_HEADERLEN || len > plen) + return 0; + + if (code >= 1 && code <= (int)LWIP_ARRAYSIZE(upap_codenames)) + printer(arg, " %s", upap_codenames[code-1]); + else + printer(arg, " code=0x%x", code); + printer(arg, " id=0x%x", id); + len -= UPAP_HEADERLEN; + switch (code) { + case UPAP_AUTHREQ: + if (len < 1) + break; + ulen = p[0]; + if (len < ulen + 2) + break; + wlen = p[ulen + 1]; + if (len < ulen + wlen + 2) + break; + user = (const u_char *) (p + 1); + pwd = (const u_char *) (p + ulen + 2); + p += ulen + wlen + 2; + len -= ulen + wlen + 2; + printer(arg, " user="); + ppp_print_string(user, ulen, printer, arg); + printer(arg, " password="); +/* FIXME: require ppp_pcb struct as printpkt() argument */ +#if 0 + if (!pcb->settings.hide_password) +#endif + ppp_print_string(pwd, wlen, printer, arg); +#if 0 + else + printer(arg, ""); +#endif + break; + case UPAP_AUTHACK: + case UPAP_AUTHNAK: + if (len < 1) + break; + mlen = p[0]; + if (len < mlen + 1) + break; + msg = (const u_char *) (p + 1); + p += mlen + 1; + len -= mlen + 1; + printer(arg, " "); + ppp_print_string(msg, mlen, printer, arg); + break; + default: + break; + } + + /* print the rest of the bytes in the packet */ + for (; len > 0; --len) { + GETCHAR(code, p); + printer(arg, " %.2x", code); + } + + return p - pstart; +} +#endif /* PRINTPKT_SUPPORT */ + +#endif /* PPP_SUPPORT && PAP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/utils.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/utils.c new file mode 100644 index 0000000..f1366da --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/utils.c @@ -0,0 +1,957 @@ +/* + * utils.c - various utility functions used in pppd. + * + * Copyright (c) 1999-2002 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 3. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#if 0 /* UNUSED */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef SVR4 +#include +#endif +#endif /* UNUSED */ + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/fsm.h" +#include "netif/ppp/lcp.h" + +#if defined(SUNOS4) +extern char *strerror(); +#endif + +static void ppp_logit(int level, const char *fmt, va_list args); +static void ppp_log_write(int level, char *buf); +#if PRINTPKT_SUPPORT +static void ppp_vslp_printer(void *arg, const char *fmt, ...); +static void ppp_format_packet(const u_char *p, int len, + void (*printer) (void *, const char *, ...), void *arg); + +struct buffer_info { + char *ptr; + int len; +}; +#endif /* PRINTPKT_SUPPORT */ + +/* + * ppp_strlcpy - like strcpy/strncpy, doesn't overflow destination buffer, + * always leaves destination null-terminated (for len > 0). + */ +size_t ppp_strlcpy(char *dest, const char *src, size_t len) { + size_t ret = strlen(src); + + if (len != 0) { + if (ret < len) + strcpy(dest, src); + else { + strncpy(dest, src, len - 1); + dest[len-1] = 0; + } + } + return ret; +} + +/* + * ppp_strlcat - like strcat/strncat, doesn't overflow destination buffer, + * always leaves destination null-terminated (for len > 0). + */ +size_t ppp_strlcat(char *dest, const char *src, size_t len) { + size_t dlen = strlen(dest); + + return dlen + ppp_strlcpy(dest + dlen, src, (len > dlen? len - dlen: 0)); +} + + +/* + * ppp_slprintf - format a message into a buffer. Like sprintf except we + * also specify the length of the output buffer, and we handle + * %m (error message), %v (visible string), + * %q (quoted string), %t (current time) and %I (IP address) formats. + * Doesn't do floating-point formats. + * Returns the number of chars put into buf. + */ +int ppp_slprintf(char *buf, int buflen, const char *fmt, ...) { + va_list args; + int n; + + va_start(args, fmt); + n = ppp_vslprintf(buf, buflen, fmt, args); + va_end(args); + return n; +} + +/* + * ppp_vslprintf - like ppp_slprintf, takes a va_list instead of a list of args. + */ +#define OUTCHAR(c) (buflen > 0? (--buflen, *buf++ = (c)): 0) + +int ppp_vslprintf(char *buf, int buflen, const char *fmt, va_list args) { + int c, i, n; + int width, prec, fillch; + int base, len, neg, quoted; + unsigned long val = 0; + const char *f; + char *str, *buf0; + const unsigned char *p; + char num[32]; +#if 0 /* need port */ + time_t t; +#endif /* need port */ + u32_t ip; + static char hexchars[] = "0123456789abcdef"; +#if PRINTPKT_SUPPORT + struct buffer_info bufinfo; +#endif /* PRINTPKT_SUPPORT */ + + buf0 = buf; + --buflen; + while (buflen > 0) { + for (f = fmt; *f != '%' && *f != 0; ++f) + ; + if (f > fmt) { + len = f - fmt; + if (len > buflen) + len = buflen; + memcpy(buf, fmt, len); + buf += len; + buflen -= len; + fmt = f; + } + if (*fmt == 0) + break; + c = *++fmt; + width = 0; + prec = -1; + fillch = ' '; + if (c == '0') { + fillch = '0'; + c = *++fmt; + } + if (c == '*') { + width = va_arg(args, int); + c = *++fmt; + } else { + while (lwip_isdigit(c)) { + width = width * 10 + c - '0'; + c = *++fmt; + } + } + if (c == '.') { + c = *++fmt; + if (c == '*') { + prec = va_arg(args, int); + c = *++fmt; + } else { + prec = 0; + while (lwip_isdigit(c)) { + prec = prec * 10 + c - '0'; + c = *++fmt; + } + } + } + str = 0; + base = 0; + neg = 0; + ++fmt; + switch (c) { + case 'l': + c = *fmt++; + switch (c) { + case 'd': + val = va_arg(args, long); + if ((long)val < 0) { + neg = 1; + val = (unsigned long)-(long)val; + } + base = 10; + break; + case 'u': + val = va_arg(args, unsigned long); + base = 10; + break; + default: + OUTCHAR('%'); + OUTCHAR('l'); + --fmt; /* so %lz outputs %lz etc. */ + continue; + } + break; + case 'd': + i = va_arg(args, int); + if (i < 0) { + neg = 1; + val = -i; + } else + val = i; + base = 10; + break; + case 'u': + val = va_arg(args, unsigned int); + base = 10; + break; + case 'o': + val = va_arg(args, unsigned int); + base = 8; + break; + case 'x': + case 'X': + val = va_arg(args, unsigned int); + base = 16; + break; +#if 0 /* unused (and wrong on LLP64 systems) */ + case 'p': + val = (unsigned long) va_arg(args, void *); + base = 16; + neg = 2; + break; +#endif /* unused (and wrong on LLP64 systems) */ + case 's': + str = va_arg(args, char *); + break; + case 'c': + num[0] = va_arg(args, int); + num[1] = 0; + str = num; + break; +#if 0 /* do we always have strerror() in embedded ? */ + case 'm': + str = strerror(errno); + break; +#endif /* do we always have strerror() in embedded ? */ + case 'I': + ip = va_arg(args, u32_t); + ip = lwip_ntohl(ip); + ppp_slprintf(num, sizeof(num), "%d.%d.%d.%d", (ip >> 24) & 0xff, + (ip >> 16) & 0xff, (ip >> 8) & 0xff, ip & 0xff); + str = num; + break; +#if 0 /* need port */ + case 't': + time(&t); + str = ctime(&t); + str += 4; /* chop off the day name */ + str[15] = 0; /* chop off year and newline */ + break; +#endif /* need port */ + case 'v': /* "visible" string */ + case 'q': /* quoted string */ + quoted = c == 'q'; + p = va_arg(args, unsigned char *); + if (p == NULL) + p = (const unsigned char *)""; + if (fillch == '0' && prec >= 0) { + n = prec; + } else { + n = strlen((const char *)p); + if (prec >= 0 && n > prec) + n = prec; + } + while (n > 0 && buflen > 0) { + c = *p++; + --n; + if (!quoted && c >= 0x80) { + OUTCHAR('M'); + OUTCHAR('-'); + c -= 0x80; + } + if (quoted && (c == '"' || c == '\\')) + OUTCHAR('\\'); + if (c < 0x20 || (0x7f <= c && c < 0xa0)) { + if (quoted) { + OUTCHAR('\\'); + switch (c) { + case '\t': OUTCHAR('t'); break; + case '\n': OUTCHAR('n'); break; + case '\b': OUTCHAR('b'); break; + case '\f': OUTCHAR('f'); break; + default: + OUTCHAR('x'); + OUTCHAR(hexchars[c >> 4]); + OUTCHAR(hexchars[c & 0xf]); + } + } else { + if (c == '\t') + OUTCHAR(c); + else { + OUTCHAR('^'); + OUTCHAR(c ^ 0x40); + } + } + } else + OUTCHAR(c); + } + continue; +#if PRINTPKT_SUPPORT + case 'P': /* print PPP packet */ + bufinfo.ptr = buf; + bufinfo.len = buflen + 1; + p = va_arg(args, unsigned char *); + n = va_arg(args, int); + ppp_format_packet(p, n, ppp_vslp_printer, &bufinfo); + buf = bufinfo.ptr; + buflen = bufinfo.len - 1; + continue; +#endif /* PRINTPKT_SUPPORT */ + case 'B': + p = va_arg(args, unsigned char *); + for (n = prec; n > 0; --n) { + c = *p++; + if (fillch == ' ') + OUTCHAR(' '); + OUTCHAR(hexchars[(c >> 4) & 0xf]); + OUTCHAR(hexchars[c & 0xf]); + } + continue; + default: + *buf++ = '%'; + if (c != '%') + --fmt; /* so %z outputs %z etc. */ + --buflen; + continue; + } + if (base != 0) { + str = num + sizeof(num); + *--str = 0; + while (str > num + neg) { + *--str = hexchars[val % base]; + val = val / base; + if (--prec <= 0 && val == 0) + break; + } + switch (neg) { + case 1: + *--str = '-'; + break; + case 2: + *--str = 'x'; + *--str = '0'; + break; + default: + break; + } + len = num + sizeof(num) - 1 - str; + } else { + len = strlen(str); + if (prec >= 0 && len > prec) + len = prec; + } + if (width > 0) { + if (width > buflen) + width = buflen; + if ((n = width - len) > 0) { + buflen -= n; + for (; n > 0; --n) + *buf++ = fillch; + } + } + if (len > buflen) + len = buflen; + memcpy(buf, str, len); + buf += len; + buflen -= len; + } + *buf = 0; + return buf - buf0; +} + +#if PRINTPKT_SUPPORT +/* + * vslp_printer - used in processing a %P format + */ +static void ppp_vslp_printer(void *arg, const char *fmt, ...) { + int n; + va_list pvar; + struct buffer_info *bi; + + va_start(pvar, fmt); + bi = (struct buffer_info *) arg; + n = ppp_vslprintf(bi->ptr, bi->len, fmt, pvar); + va_end(pvar); + + bi->ptr += n; + bi->len -= n; +} +#endif /* PRINTPKT_SUPPORT */ + +#if 0 /* UNUSED */ +/* + * log_packet - format a packet and log it. + */ + +void +log_packet(p, len, prefix, level) + u_char *p; + int len; + char *prefix; + int level; +{ + init_pr_log(prefix, level); + ppp_format_packet(p, len, pr_log, &level); + end_pr_log(); +} +#endif /* UNUSED */ + +#if PRINTPKT_SUPPORT +/* + * ppp_format_packet - make a readable representation of a packet, + * calling `printer(arg, format, ...)' to output it. + */ +static void ppp_format_packet(const u_char *p, int len, + void (*printer) (void *, const char *, ...), void *arg) { + int i, n; + u_short proto; + const struct protent *protp; + + if (len >= 2) { + GETSHORT(proto, p); + len -= 2; + for (i = 0; (protp = protocols[i]) != NULL; ++i) + if (proto == protp->protocol) + break; + if (protp != NULL) { + printer(arg, "[%s", protp->name); + n = (*protp->printpkt)(p, len, printer, arg); + printer(arg, "]"); + p += n; + len -= n; + } else { + for (i = 0; (protp = protocols[i]) != NULL; ++i) + if (proto == (protp->protocol & ~0x8000)) + break; + if (protp != 0 && protp->data_name != 0) { + printer(arg, "[%s data]", protp->data_name); + if (len > 8) + printer(arg, "%.8B ...", p); + else + printer(arg, "%.*B", len, p); + len = 0; + } else + printer(arg, "[proto=0x%x]", proto); + } + } + + if (len > 32) + printer(arg, "%.32B ...", p); + else + printer(arg, "%.*B", len, p); +} +#endif /* PRINTPKT_SUPPORT */ + +#if 0 /* UNUSED */ +/* + * init_pr_log, end_pr_log - initialize and finish use of pr_log. + */ + +static char line[256]; /* line to be logged accumulated here */ +static char *linep; /* current pointer within line */ +static int llevel; /* level for logging */ + +void +init_pr_log(prefix, level) + const char *prefix; + int level; +{ + linep = line; + if (prefix != NULL) { + ppp_strlcpy(line, prefix, sizeof(line)); + linep = line + strlen(line); + } + llevel = level; +} + +void +end_pr_log() +{ + if (linep != line) { + *linep = 0; + ppp_log_write(llevel, line); + } +} + +/* + * pr_log - printer routine for outputting to log + */ +void +pr_log (void *arg, const char *fmt, ...) +{ + int l, n; + va_list pvar; + char *p, *eol; + char buf[256]; + + va_start(pvar, fmt); + n = ppp_vslprintf(buf, sizeof(buf), fmt, pvar); + va_end(pvar); + + p = buf; + eol = strchr(buf, '\n'); + if (linep != line) { + l = (eol == NULL)? n: eol - buf; + if (linep + l < line + sizeof(line)) { + if (l > 0) { + memcpy(linep, buf, l); + linep += l; + } + if (eol == NULL) + return; + p = eol + 1; + eol = strchr(p, '\n'); + } + *linep = 0; + ppp_log_write(llevel, line); + linep = line; + } + + while (eol != NULL) { + *eol = 0; + ppp_log_write(llevel, p); + p = eol + 1; + eol = strchr(p, '\n'); + } + + /* assumes sizeof(buf) <= sizeof(line) */ + l = buf + n - p; + if (l > 0) { + memcpy(line, p, n); + linep = line + l; + } +} +#endif /* UNUSED */ + +/* + * ppp_print_string - print a readable representation of a string using + * printer. + */ +void ppp_print_string(const u_char *p, int len, void (*printer) (void *, const char *, ...), void *arg) { + int c; + + printer(arg, "\""); + for (; len > 0; --len) { + c = *p++; + if (' ' <= c && c <= '~') { + if (c == '\\' || c == '"') + printer(arg, "\\"); + printer(arg, "%c", c); + } else { + switch (c) { + case '\n': + printer(arg, "\\n"); + break; + case '\r': + printer(arg, "\\r"); + break; + case '\t': + printer(arg, "\\t"); + break; + default: + printer(arg, "\\%.3o", (u8_t)c); + /* no break */ + } + } + } + printer(arg, "\""); +} + +/* + * ppp_logit - does the hard work for fatal et al. + */ +static void ppp_logit(int level, const char *fmt, va_list args) { + char buf[1024]; + + ppp_vslprintf(buf, sizeof(buf), fmt, args); + ppp_log_write(level, buf); +} + +static void ppp_log_write(int level, char *buf) { + LWIP_UNUSED_ARG(level); /* necessary if PPPDEBUG is defined to an empty function */ + LWIP_UNUSED_ARG(buf); + PPPDEBUG(level, ("%s\n", buf) ); +#if 0 + if (log_to_fd >= 0 && (level != LOG_DEBUG || debug)) { + int n = strlen(buf); + + if (n > 0 && buf[n-1] == '\n') + --n; + if (write(log_to_fd, buf, n) != n + || write(log_to_fd, "\n", 1) != 1) + log_to_fd = -1; + } +#endif +} + +/* + * ppp_fatal - log an error message and die horribly. + */ +void ppp_fatal(const char *fmt, ...) { + va_list pvar; + + va_start(pvar, fmt); + ppp_logit(LOG_ERR, fmt, pvar); + va_end(pvar); + + LWIP_ASSERT("ppp_fatal", 0); /* as promised */ +} + +/* + * ppp_error - log an error message. + */ +void ppp_error(const char *fmt, ...) { + va_list pvar; + + va_start(pvar, fmt); + ppp_logit(LOG_ERR, fmt, pvar); + va_end(pvar); +#if 0 /* UNUSED */ + ++error_count; +#endif /* UNUSED */ +} + +/* + * ppp_warn - log a warning message. + */ +void ppp_warn(const char *fmt, ...) { + va_list pvar; + + va_start(pvar, fmt); + ppp_logit(LOG_WARNING, fmt, pvar); + va_end(pvar); +} + +/* + * ppp_notice - log a notice-level message. + */ +void ppp_notice(const char *fmt, ...) { + va_list pvar; + + va_start(pvar, fmt); + ppp_logit(LOG_NOTICE, fmt, pvar); + va_end(pvar); +} + +/* + * ppp_info - log an informational message. + */ +void ppp_info(const char *fmt, ...) { + va_list pvar; + + va_start(pvar, fmt); + ppp_logit(LOG_INFO, fmt, pvar); + va_end(pvar); +} + +/* + * ppp_dbglog - log a debug message. + */ +void ppp_dbglog(const char *fmt, ...) { + va_list pvar; + + va_start(pvar, fmt); + ppp_logit(LOG_DEBUG, fmt, pvar); + va_end(pvar); +} + +#if PRINTPKT_SUPPORT +/* + * ppp_dump_packet - print out a packet in readable form if it is interesting. + * Assumes len >= PPP_HDRLEN. + */ +void ppp_dump_packet(ppp_pcb *pcb, const char *tag, unsigned char *p, int len) { + int proto; + + /* + * don't print data packets, i.e. IPv4, IPv6, VJ, and compressed packets. + */ + proto = (p[0] << 8) + p[1]; + if (proto < 0xC000 && (proto & ~0x8000) == proto) + return; + + /* + * don't print valid LCP echo request/reply packets if the link is up. + */ + if (proto == PPP_LCP && pcb->phase == PPP_PHASE_RUNNING && len >= 2 + HEADERLEN) { + unsigned char *lcp = p + 2; + int l = (lcp[2] << 8) + lcp[3]; + + if ((lcp[0] == ECHOREQ || lcp[0] == ECHOREP) + && l >= HEADERLEN && l <= len - 2) + return; + } + + ppp_dbglog("%s %P", tag, p, len); +} +#endif /* PRINTPKT_SUPPORT */ + +#if 0 /* Unused */ + +/* + * complete_read - read a full `count' bytes from fd, + * unless end-of-file or an error other than EINTR is encountered. + */ +ssize_t +complete_read(int fd, void *buf, size_t count) +{ + size_t done; + ssize_t nb; + char *ptr = buf; + + for (done = 0; done < count; ) { + nb = read(fd, ptr, count - done); + if (nb < 0) { + if (errno == EINTR) + continue; + return -1; + } + if (nb == 0) + break; + done += nb; + ptr += nb; + } + return done; +} + +/* Procedures for locking the serial device using a lock file. */ +#ifndef LOCK_DIR +#ifdef __linux__ +#define LOCK_DIR "/var/lock" +#else +#ifdef SVR4 +#define LOCK_DIR "/var/spool/locks" +#else +#define LOCK_DIR "/var/spool/lock" +#endif +#endif +#endif /* LOCK_DIR */ + +static char lock_file[MAXPATHLEN]; + +/* + * lock - create a lock file for the named device + */ +int +lock(dev) + char *dev; +{ +#ifdef LOCKLIB + int result; + + result = mklock (dev, (void *) 0); + if (result == 0) { + ppp_strlcpy(lock_file, dev, sizeof(lock_file)); + return 0; + } + + if (result > 0) + ppp_notice("Device %s is locked by pid %d", dev, result); + else + ppp_error("Can't create lock file %s", lock_file); + return -1; + +#else /* LOCKLIB */ + + char lock_buffer[12]; + int fd, pid, n; + +#ifdef SVR4 + struct stat sbuf; + + if (stat(dev, &sbuf) < 0) { + ppp_error("Can't get device number for %s: %m", dev); + return -1; + } + if ((sbuf.st_mode & S_IFMT) != S_IFCHR) { + ppp_error("Can't lock %s: not a character device", dev); + return -1; + } + ppp_slprintf(lock_file, sizeof(lock_file), "%s/LK.%03d.%03d.%03d", + LOCK_DIR, major(sbuf.st_dev), + major(sbuf.st_rdev), minor(sbuf.st_rdev)); +#else + char *p; + char lockdev[MAXPATHLEN]; + + if ((p = strstr(dev, "dev/")) != NULL) { + dev = p + 4; + strncpy(lockdev, dev, MAXPATHLEN-1); + lockdev[MAXPATHLEN-1] = 0; + while ((p = strrchr(lockdev, '/')) != NULL) { + *p = '_'; + } + dev = lockdev; + } else + if ((p = strrchr(dev, '/')) != NULL) + dev = p + 1; + + ppp_slprintf(lock_file, sizeof(lock_file), "%s/LCK..%s", LOCK_DIR, dev); +#endif + + while ((fd = open(lock_file, O_EXCL | O_CREAT | O_RDWR, 0644)) < 0) { + if (errno != EEXIST) { + ppp_error("Can't create lock file %s: %m", lock_file); + break; + } + + /* Read the lock file to find out who has the device locked. */ + fd = open(lock_file, O_RDONLY, 0); + if (fd < 0) { + if (errno == ENOENT) /* This is just a timing problem. */ + continue; + ppp_error("Can't open existing lock file %s: %m", lock_file); + break; + } +#ifndef LOCK_BINARY + n = read(fd, lock_buffer, 11); +#else + n = read(fd, &pid, sizeof(pid)); +#endif /* LOCK_BINARY */ + close(fd); + fd = -1; + if (n <= 0) { + ppp_error("Can't read pid from lock file %s", lock_file); + break; + } + + /* See if the process still exists. */ +#ifndef LOCK_BINARY + lock_buffer[n] = 0; + pid = atoi(lock_buffer); +#endif /* LOCK_BINARY */ + if (pid == getpid()) + return 1; /* somebody else locked it for us */ + if (pid == 0 + || (kill(pid, 0) == -1 && errno == ESRCH)) { + if (unlink (lock_file) == 0) { + ppp_notice("Removed stale lock on %s (pid %d)", dev, pid); + continue; + } + ppp_warn("Couldn't remove stale lock on %s", dev); + } else + ppp_notice("Device %s is locked by pid %d", dev, pid); + break; + } + + if (fd < 0) { + lock_file[0] = 0; + return -1; + } + + pid = getpid(); +#ifndef LOCK_BINARY + ppp_slprintf(lock_buffer, sizeof(lock_buffer), "%10d\n", pid); + write (fd, lock_buffer, 11); +#else + write(fd, &pid, sizeof (pid)); +#endif + close(fd); + return 0; + +#endif +} + +/* + * relock - called to update our lockfile when we are about to detach, + * thus changing our pid (we fork, the child carries on, and the parent dies). + * Note that this is called by the parent, with pid equal to the pid + * of the child. This avoids a potential race which would exist if + * we had the child rewrite the lockfile (the parent might die first, + * and another process could think the lock was stale if it checked + * between when the parent died and the child rewrote the lockfile). + */ +int +relock(pid) + int pid; +{ +#ifdef LOCKLIB + /* XXX is there a way to do this? */ + return -1; +#else /* LOCKLIB */ + + int fd; + char lock_buffer[12]; + + if (lock_file[0] == 0) + return -1; + fd = open(lock_file, O_WRONLY, 0); + if (fd < 0) { + ppp_error("Couldn't reopen lock file %s: %m", lock_file); + lock_file[0] = 0; + return -1; + } + +#ifndef LOCK_BINARY + ppp_slprintf(lock_buffer, sizeof(lock_buffer), "%10d\n", pid); + write (fd, lock_buffer, 11); +#else + write(fd, &pid, sizeof(pid)); +#endif /* LOCK_BINARY */ + close(fd); + return 0; + +#endif /* LOCKLIB */ +} + +/* + * unlock - remove our lockfile + */ +void +unlock() +{ + if (lock_file[0]) { +#ifdef LOCKLIB + (void) rmlock(lock_file, (void *) 0); +#else + unlink(lock_file); +#endif + lock_file[0] = 0; + } +} + +#endif /* Unused */ + +#endif /* PPP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/vj.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/vj.c new file mode 100644 index 0000000..3fecba6 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/vj.c @@ -0,0 +1,685 @@ +/* + * Routines to compress and uncompess tcp packets (for transmission + * over low speed serial lines. + * + * Copyright (c) 1989 Regents of the University of California. + * All rights reserved. + * + * Redistribution and use in source and binary forms are permitted + * provided that the above copyright notice and this paragraph are + * duplicated in all such forms and that any documentation, + * advertising materials, and other materials related to such + * distribution and use acknowledge that the software was developed + * by the University of California, Berkeley. The name of the + * University may not be used to endorse or promote products derived + * from this software without specific prior written permission. + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. + * + * Van Jacobson (van@helios.ee.lbl.gov), Dec 31, 1989: + * Initial distribution. + * + * Modified June 1993 by Paul Mackerras, paulus@cs.anu.edu.au, + * so that the entire packet being decompressed doesn't have + * to be in contiguous memory (just the compressed header). + * + * Modified March 1998 by Guy Lancaster, glanca@gesn.com, + * for a 16 bit processor. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && VJ_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#include "netif/ppp/ppp_impl.h" +#include "netif/ppp/pppdebug.h" + +#include "netif/ppp/vj.h" + +#include + +#if LINK_STATS +#define INCR(counter) ++comp->stats.counter +#else +#define INCR(counter) +#endif + +void +vj_compress_init(struct vjcompress *comp) +{ + u8_t i; + struct cstate *tstate = comp->tstate; + +#if MAX_SLOTS == 0 + memset((char *)comp, 0, sizeof(*comp)); +#endif + comp->maxSlotIndex = MAX_SLOTS - 1; + comp->compressSlot = 0; /* Disable slot ID compression by default. */ + for (i = MAX_SLOTS - 1; i > 0; --i) { + tstate[i].cs_id = i; + tstate[i].cs_next = &tstate[i - 1]; + } + tstate[0].cs_next = &tstate[MAX_SLOTS - 1]; + tstate[0].cs_id = 0; + comp->last_cs = &tstate[0]; + comp->last_recv = 255; + comp->last_xmit = 255; + comp->flags = VJF_TOSS; +} + + +/* ENCODE encodes a number that is known to be non-zero. ENCODEZ + * checks for zero (since zero has to be encoded in the long, 3 byte + * form). + */ +#define ENCODE(n) { \ + if ((u16_t)(n) >= 256) { \ + *cp++ = 0; \ + cp[1] = (u8_t)(n); \ + cp[0] = (u8_t)((n) >> 8); \ + cp += 2; \ + } else { \ + *cp++ = (u8_t)(n); \ + } \ +} +#define ENCODEZ(n) { \ + if ((u16_t)(n) >= 256 || (u16_t)(n) == 0) { \ + *cp++ = 0; \ + cp[1] = (u8_t)(n); \ + cp[0] = (u8_t)((n) >> 8); \ + cp += 2; \ + } else { \ + *cp++ = (u8_t)(n); \ + } \ +} + +#define DECODEL(f) { \ + if (*cp == 0) {\ + u32_t tmp_ = lwip_ntohl(f) + ((cp[1] << 8) | cp[2]); \ + (f) = lwip_htonl(tmp_); \ + cp += 3; \ + } else { \ + u32_t tmp_ = lwip_ntohl(f) + (u32_t)*cp++; \ + (f) = lwip_htonl(tmp_); \ + } \ +} + +#define DECODES(f) { \ + if (*cp == 0) {\ + u16_t tmp_ = lwip_ntohs(f) + (((u16_t)cp[1] << 8) | cp[2]); \ + (f) = lwip_htons(tmp_); \ + cp += 3; \ + } else { \ + u16_t tmp_ = lwip_ntohs(f) + (u16_t)*cp++; \ + (f) = lwip_htons(tmp_); \ + } \ +} + +#define DECODEU(f) { \ + if (*cp == 0) {\ + (f) = lwip_htons(((u16_t)cp[1] << 8) | cp[2]); \ + cp += 3; \ + } else { \ + (f) = lwip_htons((u16_t)*cp++); \ + } \ +} + +/* Helper structures for unaligned *u32_t and *u16_t accesses */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct vj_u32_t { + PACK_STRUCT_FIELD(u32_t v); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct vj_u16_t { + PACK_STRUCT_FIELD(u16_t v); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/* + * vj_compress_tcp - Attempt to do Van Jacobson header compression on a + * packet. This assumes that nb and comp are not null and that the first + * buffer of the chain contains a valid IP header. + * Return the VJ type code indicating whether or not the packet was + * compressed. + */ +u8_t +vj_compress_tcp(struct vjcompress *comp, struct pbuf **pb) +{ + struct pbuf *np = *pb; + struct ip_hdr *ip = (struct ip_hdr *)np->payload; + struct cstate *cs = comp->last_cs->cs_next; + u16_t ilen = IPH_HL(ip); + u16_t hlen; + struct tcp_hdr *oth; + struct tcp_hdr *th; + u16_t deltaS, deltaA = 0; + u32_t deltaL; + u32_t changes = 0; + u8_t new_seq[16]; + u8_t *cp = new_seq; + + /* + * Check that the packet is IP proto TCP. + */ + if (IPH_PROTO(ip) != IP_PROTO_TCP) { + return (TYPE_IP); + } + + /* + * Bail if this is an IP fragment or if the TCP packet isn't + * `compressible' (i.e., ACK isn't set or some other control bit is + * set). + */ + if ((IPH_OFFSET(ip) & PP_HTONS(0x3fff)) || np->tot_len < 40) { + return (TYPE_IP); + } + th = (struct tcp_hdr *)&((struct vj_u32_t*)ip)[ilen]; + if ((TCPH_FLAGS(th) & (TCP_SYN|TCP_FIN|TCP_RST|TCP_ACK)) != TCP_ACK) { + return (TYPE_IP); + } + + /* Check that the TCP/IP headers are contained in the first buffer. */ + hlen = ilen + TCPH_HDRLEN(th); + hlen <<= 2; + if (np->len < hlen) { + PPPDEBUG(LOG_INFO, ("vj_compress_tcp: header len %d spans buffers\n", hlen)); + return (TYPE_IP); + } + + /* TCP stack requires that we don't change the packet payload, therefore we copy + * the whole packet before compression. */ + np = pbuf_clone(PBUF_RAW, PBUF_RAM, *pb); + if (!np) { + return (TYPE_IP); + } + + *pb = np; + ip = (struct ip_hdr *)np->payload; + + /* + * Packet is compressible -- we're going to send either a + * COMPRESSED_TCP or UNCOMPRESSED_TCP packet. Either way we need + * to locate (or create) the connection state. Special case the + * most recently used connection since it's most likely to be used + * again & we don't have to do any reordering if it's used. + */ + INCR(vjs_packets); + if (!ip4_addr_cmp(&ip->src, &cs->cs_ip.src) + || !ip4_addr_cmp(&ip->dest, &cs->cs_ip.dest) + || (*(struct vj_u32_t*)th).v != (((struct vj_u32_t*)&cs->cs_ip)[IPH_HL(&cs->cs_ip)]).v) { + /* + * Wasn't the first -- search for it. + * + * States are kept in a circularly linked list with + * last_cs pointing to the end of the list. The + * list is kept in lru order by moving a state to the + * head of the list whenever it is referenced. Since + * the list is short and, empirically, the connection + * we want is almost always near the front, we locate + * states via linear search. If we don't find a state + * for the datagram, the oldest state is (re-)used. + */ + struct cstate *lcs; + struct cstate *lastcs = comp->last_cs; + + do { + lcs = cs; cs = cs->cs_next; + INCR(vjs_searches); + if (ip4_addr_cmp(&ip->src, &cs->cs_ip.src) + && ip4_addr_cmp(&ip->dest, &cs->cs_ip.dest) + && (*(struct vj_u32_t*)th).v == (((struct vj_u32_t*)&cs->cs_ip)[IPH_HL(&cs->cs_ip)]).v) { + goto found; + } + } while (cs != lastcs); + + /* + * Didn't find it -- re-use oldest cstate. Send an + * uncompressed packet that tells the other side what + * connection number we're using for this conversation. + * Note that since the state list is circular, the oldest + * state points to the newest and we only need to set + * last_cs to update the lru linkage. + */ + INCR(vjs_misses); + comp->last_cs = lcs; + goto uncompressed; + + found: + /* + * Found it -- move to the front on the connection list. + */ + if (cs == lastcs) { + comp->last_cs = lcs; + } else { + lcs->cs_next = cs->cs_next; + cs->cs_next = lastcs->cs_next; + lastcs->cs_next = cs; + } + } + + oth = (struct tcp_hdr *)&((struct vj_u32_t*)&cs->cs_ip)[ilen]; + deltaS = ilen; + + /* + * Make sure that only what we expect to change changed. The first + * line of the `if' checks the IP protocol version, header length & + * type of service. The 2nd line checks the "Don't fragment" bit. + * The 3rd line checks the time-to-live and protocol (the protocol + * check is unnecessary but costless). The 4th line checks the TCP + * header length. The 5th line checks IP options, if any. The 6th + * line checks TCP options, if any. If any of these things are + * different between the previous & current datagram, we send the + * current datagram `uncompressed'. + */ + if ((((struct vj_u16_t*)ip)[0]).v != (((struct vj_u16_t*)&cs->cs_ip)[0]).v + || (((struct vj_u16_t*)ip)[3]).v != (((struct vj_u16_t*)&cs->cs_ip)[3]).v + || (((struct vj_u16_t*)ip)[4]).v != (((struct vj_u16_t*)&cs->cs_ip)[4]).v + || TCPH_HDRLEN(th) != TCPH_HDRLEN(oth) + || (deltaS > 5 && BCMP(ip + 1, &cs->cs_ip + 1, (deltaS - 5) << 2)) + || (TCPH_HDRLEN(th) > 5 && BCMP(th + 1, oth + 1, (TCPH_HDRLEN(th) - 5) << 2))) { + goto uncompressed; + } + + /* + * Figure out which of the changing fields changed. The + * receiver expects changes in the order: urgent, window, + * ack, seq (the order minimizes the number of temporaries + * needed in this section of code). + */ + if (TCPH_FLAGS(th) & TCP_URG) { + deltaS = lwip_ntohs(th->urgp); + ENCODEZ(deltaS); + changes |= NEW_U; + } else if (th->urgp != oth->urgp) { + /* argh! URG not set but urp changed -- a sensible + * implementation should never do this but RFC793 + * doesn't prohibit the change so we have to deal + * with it. */ + goto uncompressed; + } + + if ((deltaS = (u16_t)(lwip_ntohs(th->wnd) - lwip_ntohs(oth->wnd))) != 0) { + ENCODE(deltaS); + changes |= NEW_W; + } + + if ((deltaL = lwip_ntohl(th->ackno) - lwip_ntohl(oth->ackno)) != 0) { + if (deltaL > 0xffff) { + goto uncompressed; + } + deltaA = (u16_t)deltaL; + ENCODE(deltaA); + changes |= NEW_A; + } + + if ((deltaL = lwip_ntohl(th->seqno) - lwip_ntohl(oth->seqno)) != 0) { + if (deltaL > 0xffff) { + goto uncompressed; + } + deltaS = (u16_t)deltaL; + ENCODE(deltaS); + changes |= NEW_S; + } + + switch(changes) { + case 0: + /* + * Nothing changed. If this packet contains data and the + * last one didn't, this is probably a data packet following + * an ack (normal on an interactive connection) and we send + * it compressed. Otherwise it's probably a retransmit, + * retransmitted ack or window probe. Send it uncompressed + * in case the other side missed the compressed version. + */ + if (IPH_LEN(ip) != IPH_LEN(&cs->cs_ip) && + lwip_ntohs(IPH_LEN(&cs->cs_ip)) == hlen) { + break; + } + /* no break */ + /* fall through */ + + case SPECIAL_I: + case SPECIAL_D: + /* + * actual changes match one of our special case encodings -- + * send packet uncompressed. + */ + goto uncompressed; + + case NEW_S|NEW_A: + if (deltaS == deltaA && deltaS == lwip_ntohs(IPH_LEN(&cs->cs_ip)) - hlen) { + /* special case for echoed terminal traffic */ + changes = SPECIAL_I; + cp = new_seq; + } + break; + + case NEW_S: + if (deltaS == lwip_ntohs(IPH_LEN(&cs->cs_ip)) - hlen) { + /* special case for data xfer */ + changes = SPECIAL_D; + cp = new_seq; + } + break; + default: + break; + } + + deltaS = (u16_t)(lwip_ntohs(IPH_ID(ip)) - lwip_ntohs(IPH_ID(&cs->cs_ip))); + if (deltaS != 1) { + ENCODEZ(deltaS); + changes |= NEW_I; + } + if (TCPH_FLAGS(th) & TCP_PSH) { + changes |= TCP_PUSH_BIT; + } + /* + * Grab the cksum before we overwrite it below. Then update our + * state with this packet's header. + */ + deltaA = lwip_ntohs(th->chksum); + MEMCPY(&cs->cs_ip, ip, hlen); + + /* + * We want to use the original packet as our compressed packet. + * (cp - new_seq) is the number of bytes we need for compressed + * sequence numbers. In addition we need one byte for the change + * mask, one for the connection id and two for the tcp checksum. + * So, (cp - new_seq) + 4 bytes of header are needed. hlen is how + * many bytes of the original packet to toss so subtract the two to + * get the new packet size. + */ + deltaS = (u16_t)(cp - new_seq); + if (!comp->compressSlot || comp->last_xmit != cs->cs_id) { + comp->last_xmit = cs->cs_id; + hlen -= deltaS + 4; + if (pbuf_remove_header(np, hlen)){ + /* Can we cope with this failing? Just assert for now */ + LWIP_ASSERT("pbuf_remove_header failed\n", 0); + } + cp = (u8_t*)np->payload; + *cp++ = (u8_t)(changes | NEW_C); + *cp++ = cs->cs_id; + } else { + hlen -= deltaS + 3; + if (pbuf_remove_header(np, hlen)) { + /* Can we cope with this failing? Just assert for now */ + LWIP_ASSERT("pbuf_remove_header failed\n", 0); + } + cp = (u8_t*)np->payload; + *cp++ = (u8_t)changes; + } + *cp++ = (u8_t)(deltaA >> 8); + *cp++ = (u8_t)deltaA; + MEMCPY(cp, new_seq, deltaS); + INCR(vjs_compressed); + return (TYPE_COMPRESSED_TCP); + + /* + * Update connection state cs & send uncompressed packet (that is, + * a regular ip/tcp packet but with the 'conversation id' we hope + * to use on future compressed packets in the protocol field). + */ +uncompressed: + MEMCPY(&cs->cs_ip, ip, hlen); + IPH_PROTO_SET(ip, cs->cs_id); + comp->last_xmit = cs->cs_id; + return (TYPE_UNCOMPRESSED_TCP); +} + +/* + * Called when we may have missed a packet. + */ +void +vj_uncompress_err(struct vjcompress *comp) +{ + comp->flags |= VJF_TOSS; + INCR(vjs_errorin); +} + +/* + * "Uncompress" a packet of type TYPE_UNCOMPRESSED_TCP. + * Return 0 on success, -1 on failure. + */ +int +vj_uncompress_uncomp(struct pbuf *nb, struct vjcompress *comp) +{ + u32_t hlen; + struct cstate *cs; + struct ip_hdr *ip; + + ip = (struct ip_hdr *)nb->payload; + hlen = IPH_HL(ip) << 2; + if (IPH_PROTO(ip) >= MAX_SLOTS + || hlen + sizeof(struct tcp_hdr) > nb->len + || (hlen += TCPH_HDRLEN_BYTES((struct tcp_hdr *)&((char *)ip)[hlen])) + > nb->len + || hlen > MAX_HDR) { + PPPDEBUG(LOG_INFO, ("vj_uncompress_uncomp: bad cid=%d, hlen=%d buflen=%d\n", + IPH_PROTO(ip), hlen, nb->len)); + vj_uncompress_err(comp); + return -1; + } + cs = &comp->rstate[comp->last_recv = IPH_PROTO(ip)]; + comp->flags &=~ VJF_TOSS; + IPH_PROTO_SET(ip, IP_PROTO_TCP); + /* copy from/to bigger buffers checked above instead of cs->cs_ip and ip + just to help static code analysis to see this is correct ;-) */ + MEMCPY(&cs->cs_hdr, nb->payload, hlen); + cs->cs_hlen = (u16_t)hlen; + INCR(vjs_uncompressedin); + return 0; +} + +/* + * Uncompress a packet of type TYPE_COMPRESSED_TCP. + * The packet is composed of a buffer chain and the first buffer + * must contain an accurate chain length. + * The first buffer must include the entire compressed TCP/IP header. + * This procedure replaces the compressed header with the uncompressed + * header and returns the length of the VJ header. + */ +int +vj_uncompress_tcp(struct pbuf **nb, struct vjcompress *comp) +{ + u8_t *cp; + struct tcp_hdr *th; + struct cstate *cs; + struct vj_u16_t *bp; + struct pbuf *n0 = *nb; + u32_t tmp; + u32_t vjlen, hlen, changes; + + INCR(vjs_compressedin); + cp = (u8_t*)n0->payload; + changes = *cp++; + if (changes & NEW_C) { + /* + * Make sure the state index is in range, then grab the state. + * If we have a good state index, clear the 'discard' flag. + */ + if (*cp >= MAX_SLOTS) { + PPPDEBUG(LOG_INFO, ("vj_uncompress_tcp: bad cid=%d\n", *cp)); + goto bad; + } + + comp->flags &=~ VJF_TOSS; + comp->last_recv = *cp++; + } else { + /* + * this packet has an implicit state index. If we've + * had a line error since the last time we got an + * explicit state index, we have to toss the packet. + */ + if (comp->flags & VJF_TOSS) { + PPPDEBUG(LOG_INFO, ("vj_uncompress_tcp: tossing\n")); + INCR(vjs_tossed); + return (-1); + } + } + cs = &comp->rstate[comp->last_recv]; + hlen = IPH_HL(&cs->cs_ip) << 2; + th = (struct tcp_hdr *)&((u8_t*)&cs->cs_ip)[hlen]; + th->chksum = lwip_htons((*cp << 8) | cp[1]); + cp += 2; + if (changes & TCP_PUSH_BIT) { + TCPH_SET_FLAG(th, TCP_PSH); + } else { + TCPH_UNSET_FLAG(th, TCP_PSH); + } + + switch (changes & SPECIALS_MASK) { + case SPECIAL_I: + { + u32_t i = lwip_ntohs(IPH_LEN(&cs->cs_ip)) - cs->cs_hlen; + /* some compilers can't nest inline assembler.. */ + tmp = lwip_ntohl(th->ackno) + i; + th->ackno = lwip_htonl(tmp); + tmp = lwip_ntohl(th->seqno) + i; + th->seqno = lwip_htonl(tmp); + } + break; + + case SPECIAL_D: + /* some compilers can't nest inline assembler.. */ + tmp = lwip_ntohl(th->seqno) + lwip_ntohs(IPH_LEN(&cs->cs_ip)) - cs->cs_hlen; + th->seqno = lwip_htonl(tmp); + break; + + default: + if (changes & NEW_U) { + TCPH_SET_FLAG(th, TCP_URG); + DECODEU(th->urgp); + } else { + TCPH_UNSET_FLAG(th, TCP_URG); + } + if (changes & NEW_W) { + DECODES(th->wnd); + } + if (changes & NEW_A) { + DECODEL(th->ackno); + } + if (changes & NEW_S) { + DECODEL(th->seqno); + } + break; + } + if (changes & NEW_I) { + DECODES(cs->cs_ip._id); + } else { + IPH_ID_SET(&cs->cs_ip, lwip_ntohs(IPH_ID(&cs->cs_ip)) + 1); + IPH_ID_SET(&cs->cs_ip, lwip_htons(IPH_ID(&cs->cs_ip))); + } + + /* + * At this point, cp points to the first byte of data in the + * packet. Fill in the IP total length and update the IP + * header checksum. + */ + vjlen = (u16_t)(cp - (u8_t*)n0->payload); + if (n0->len < vjlen) { + /* + * We must have dropped some characters (crc should detect + * this but the old slip framing won't) + */ + PPPDEBUG(LOG_INFO, ("vj_uncompress_tcp: head buffer %d too short %d\n", + n0->len, vjlen)); + goto bad; + } + +#if BYTE_ORDER == LITTLE_ENDIAN + tmp = n0->tot_len - vjlen + cs->cs_hlen; + IPH_LEN_SET(&cs->cs_ip, lwip_htons((u16_t)tmp)); +#else + IPH_LEN_SET(&cs->cs_ip, lwip_htons(n0->tot_len - vjlen + cs->cs_hlen)); +#endif + + /* recompute the ip header checksum */ + bp = (struct vj_u16_t*) &cs->cs_ip; + IPH_CHKSUM_SET(&cs->cs_ip, 0); + for (tmp = 0; hlen > 0; hlen -= 2) { + tmp += (*bp++).v; + } + tmp = (tmp & 0xffff) + (tmp >> 16); + tmp = (tmp & 0xffff) + (tmp >> 16); + IPH_CHKSUM_SET(&cs->cs_ip, (u16_t)(~tmp)); + + /* Remove the compressed header and prepend the uncompressed header. */ + if (pbuf_remove_header(n0, vjlen)) { + /* Can we cope with this failing? Just assert for now */ + LWIP_ASSERT("pbuf_remove_header failed\n", 0); + goto bad; + } + + if(LWIP_MEM_ALIGN(n0->payload) != n0->payload) { + struct pbuf *np; + +#if IP_FORWARD + /* If IP forwarding is enabled we are using a PBUF_LINK packet type so + * the packet is being allocated with enough header space to be + * forwarded (to Ethernet for example). + */ + np = pbuf_alloc(PBUF_LINK, n0->len + cs->cs_hlen, PBUF_POOL); +#else /* IP_FORWARD */ + np = pbuf_alloc(PBUF_RAW, n0->len + cs->cs_hlen, PBUF_POOL); +#endif /* IP_FORWARD */ + if(!np) { + PPPDEBUG(LOG_WARNING, ("vj_uncompress_tcp: realign failed\n")); + goto bad; + } + + if (pbuf_remove_header(np, cs->cs_hlen)) { + /* Can we cope with this failing? Just assert for now */ + LWIP_ASSERT("pbuf_remove_header failed\n", 0); + goto bad; + } + + pbuf_take(np, n0->payload, n0->len); + + if(n0->next) { + pbuf_chain(np, n0->next); + pbuf_dechain(n0); + } + pbuf_free(n0); + n0 = np; + } + + if (pbuf_add_header(n0, cs->cs_hlen)) { + struct pbuf *np; + + LWIP_ASSERT("vj_uncompress_tcp: cs->cs_hlen <= PBUF_POOL_BUFSIZE", cs->cs_hlen <= PBUF_POOL_BUFSIZE); + np = pbuf_alloc(PBUF_RAW, cs->cs_hlen, PBUF_POOL); + if(!np) { + PPPDEBUG(LOG_WARNING, ("vj_uncompress_tcp: prepend failed\n")); + goto bad; + } + pbuf_cat(np, n0); + n0 = np; + } + LWIP_ASSERT("n0->len >= cs->cs_hlen", n0->len >= cs->cs_hlen); + MEMCPY(n0->payload, &cs->cs_ip, cs->cs_hlen); + + *nb = n0; + + return vjlen; + +bad: + vj_uncompress_err(comp); + return (-1); +} + +#endif /* PPP_SUPPORT && VJ_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/slipif.c b/Middlewares/Third_Party/LwIP/src/netif/slipif.c new file mode 100644 index 0000000..9b175dc --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/slipif.c @@ -0,0 +1,558 @@ +/** + * @file + * SLIP Interface + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the Institute nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * This file is built upon the file: src/arch/rtxc/netif/sioslip.c + * + * Author: Magnus Ivarsson + * Simon Goldschmidt + */ + + +/** + * @defgroup slipif SLIP + * @ingroup netifs + * + * This is an arch independent SLIP netif. The specific serial hooks must be + * provided by another file. They are sio_open, sio_read/sio_tryread and sio_send + * + * Usage: This netif can be used in three ways:\n + * 1) For NO_SYS==0, an RX thread can be used which blocks on sio_read() + * until data is received.\n + * 2) In your main loop, call slipif_poll() to check for new RX bytes, + * completed packets are fed into netif->input().\n + * 3) Call slipif_received_byte[s]() from your serial RX ISR and + * slipif_process_rxqueue() from your main loop. ISR level decodes + * packets and puts completed packets on a queue which is fed into + * the stack from the main loop (needs SYS_LIGHTWEIGHT_PROT for + * pbuf_alloc to work on ISR level!). + * + */ + +#include "netif/slipif.h" +#include "lwip/opt.h" + +#include "lwip/def.h" +#include "lwip/pbuf.h" +#include "lwip/stats.h" +#include "lwip/snmp.h" +#include "lwip/sys.h" +#include "lwip/sio.h" + +#define SLIP_END 0xC0 /* 0300: start and end of every packet */ +#define SLIP_ESC 0xDB /* 0333: escape start (one byte escaped data follows) */ +#define SLIP_ESC_END 0xDC /* 0334: following escape: original byte is 0xC0 (END) */ +#define SLIP_ESC_ESC 0xDD /* 0335: following escape: original byte is 0xDB (ESC) */ + +/** Maximum packet size that is received by this netif */ +#ifndef SLIP_MAX_SIZE +#define SLIP_MAX_SIZE 1500 +#endif + +/** Define this to the interface speed for SNMP + * (sio_fd is the sio_fd_t returned by sio_open). + * The default value of zero means 'unknown'. + */ +#ifndef SLIP_SIO_SPEED +#define SLIP_SIO_SPEED(sio_fd) 0 +#endif + +enum slipif_recv_state { + SLIP_RECV_NORMAL, + SLIP_RECV_ESCAPE +}; + +struct slipif_priv { + sio_fd_t sd; + /* q is the whole pbuf chain for a packet, p is the current pbuf in the chain */ + struct pbuf *p, *q; + u8_t state; + u16_t i, recved; +#if SLIP_RX_FROM_ISR + struct pbuf *rxpackets; +#endif +}; + +/** + * Send a pbuf doing the necessary SLIP encapsulation + * + * Uses the serial layer's sio_send() + * + * @param netif the lwip network interface structure for this slipif + * @param p the pbuf chain packet to send + * @return always returns ERR_OK since the serial layer does not provide return values + */ +static err_t +slipif_output(struct netif *netif, struct pbuf *p) +{ + struct slipif_priv *priv; + struct pbuf *q; + u16_t i; + u8_t c; + + LWIP_ASSERT("netif != NULL", (netif != NULL)); + LWIP_ASSERT("netif->state != NULL", (netif->state != NULL)); + LWIP_ASSERT("p != NULL", (p != NULL)); + + LWIP_DEBUGF(SLIP_DEBUG, ("slipif_output: sending %"U16_F" bytes\n", p->tot_len)); + priv = (struct slipif_priv *)netif->state; + + /* Send pbuf out on the serial I/O device. */ + /* Start with packet delimiter. */ + sio_send(SLIP_END, priv->sd); + + for (q = p; q != NULL; q = q->next) { + for (i = 0; i < q->len; i++) { + c = ((u8_t *)q->payload)[i]; + switch (c) { + case SLIP_END: + /* need to escape this byte (0xC0 -> 0xDB, 0xDC) */ + sio_send(SLIP_ESC, priv->sd); + sio_send(SLIP_ESC_END, priv->sd); + break; + case SLIP_ESC: + /* need to escape this byte (0xDB -> 0xDB, 0xDD) */ + sio_send(SLIP_ESC, priv->sd); + sio_send(SLIP_ESC_ESC, priv->sd); + break; + default: + /* normal byte - no need for escaping */ + sio_send(c, priv->sd); + break; + } + } + } + /* End with packet delimiter. */ + sio_send(SLIP_END, priv->sd); + return ERR_OK; +} + +#if LWIP_IPV4 +/** + * Send a pbuf doing the necessary SLIP encapsulation + * + * Uses the serial layer's sio_send() + * + * @param netif the lwip network interface structure for this slipif + * @param p the pbuf chain packet to send + * @param ipaddr the ip address to send the packet to (not used for slipif) + * @return always returns ERR_OK since the serial layer does not provide return values + */ +static err_t +slipif_output_v4(struct netif *netif, struct pbuf *p, const ip4_addr_t *ipaddr) +{ + LWIP_UNUSED_ARG(ipaddr); + return slipif_output(netif, p); +} +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 +/** + * Send a pbuf doing the necessary SLIP encapsulation + * + * Uses the serial layer's sio_send() + * + * @param netif the lwip network interface structure for this slipif + * @param p the pbuf chain packet to send + * @param ipaddr the ip address to send the packet to (not used for slipif) + * @return always returns ERR_OK since the serial layer does not provide return values + */ +static err_t +slipif_output_v6(struct netif *netif, struct pbuf *p, const ip6_addr_t *ipaddr) +{ + LWIP_UNUSED_ARG(ipaddr); + return slipif_output(netif, p); +} +#endif /* LWIP_IPV6 */ + +/** + * Handle the incoming SLIP stream character by character + * + * @param netif the lwip network interface structure for this slipif + * @param c received character (multiple calls to this function will + * return a complete packet, NULL is returned before - used for polling) + * @return The IP packet when SLIP_END is received + */ +static struct pbuf * +slipif_rxbyte(struct netif *netif, u8_t c) +{ + struct slipif_priv *priv; + struct pbuf *t; + + LWIP_ASSERT("netif != NULL", (netif != NULL)); + LWIP_ASSERT("netif->state != NULL", (netif->state != NULL)); + + priv = (struct slipif_priv *)netif->state; + + switch (priv->state) { + case SLIP_RECV_NORMAL: + switch (c) { + case SLIP_END: + if (priv->recved > 0) { + /* Received whole packet. */ + /* Trim the pbuf to the size of the received packet. */ + pbuf_realloc(priv->q, priv->recved); + + LINK_STATS_INC(link.recv); + + LWIP_DEBUGF(SLIP_DEBUG, ("slipif: Got packet (%"U16_F" bytes)\n", priv->recved)); + t = priv->q; + priv->p = priv->q = NULL; + priv->i = priv->recved = 0; + return t; + } + return NULL; + case SLIP_ESC: + priv->state = SLIP_RECV_ESCAPE; + return NULL; + default: + break; + } /* end switch (c) */ + break; + case SLIP_RECV_ESCAPE: + /* un-escape END or ESC bytes, leave other bytes + (although that would be a protocol error) */ + switch (c) { + case SLIP_ESC_END: + c = SLIP_END; + break; + case SLIP_ESC_ESC: + c = SLIP_ESC; + break; + default: + break; + } + priv->state = SLIP_RECV_NORMAL; + break; + default: + break; + } /* end switch (priv->state) */ + + /* byte received, packet not yet completely received */ + if (priv->p == NULL) { + /* allocate a new pbuf */ + LWIP_DEBUGF(SLIP_DEBUG, ("slipif_input: alloc\n")); + priv->p = pbuf_alloc(PBUF_LINK, (PBUF_POOL_BUFSIZE - PBUF_LINK_HLEN - PBUF_LINK_ENCAPSULATION_HLEN), PBUF_POOL); + + if (priv->p == NULL) { + LINK_STATS_INC(link.drop); + LWIP_DEBUGF(SLIP_DEBUG, ("slipif_input: no new pbuf! (DROP)\n")); + /* don't process any further since we got no pbuf to receive to */ + return NULL; + } + + if (priv->q != NULL) { + /* 'chain' the pbuf to the existing chain */ + pbuf_cat(priv->q, priv->p); + } else { + /* p is the first pbuf in the chain */ + priv->q = priv->p; + } + } + + /* this automatically drops bytes if > SLIP_MAX_SIZE */ + if ((priv->p != NULL) && (priv->recved <= SLIP_MAX_SIZE)) { + ((u8_t *)priv->p->payload)[priv->i] = c; + priv->recved++; + priv->i++; + if (priv->i >= priv->p->len) { + /* on to the next pbuf */ + priv->i = 0; + if (priv->p->next != NULL && priv->p->next->len > 0) { + /* p is a chain, on to the next in the chain */ + priv->p = priv->p->next; + } else { + /* p is a single pbuf, set it to NULL so next time a new + * pbuf is allocated */ + priv->p = NULL; + } + } + } + return NULL; +} + +/** Like slipif_rxbyte, but passes completed packets to netif->input + * + * @param netif The lwip network interface structure for this slipif + * @param c received character + */ +static void +slipif_rxbyte_input(struct netif *netif, u8_t c) +{ + struct pbuf *p; + p = slipif_rxbyte(netif, c); + if (p != NULL) { + if (netif->input(p, netif) != ERR_OK) { + pbuf_free(p); + } + } +} + +#if SLIP_USE_RX_THREAD +/** + * The SLIP input thread. + * + * Feed the IP layer with incoming packets + * + * @param nf the lwip network interface structure for this slipif + */ +static void +slipif_loop_thread(void *nf) +{ + u8_t c; + struct netif *netif = (struct netif *)nf; + struct slipif_priv *priv = (struct slipif_priv *)netif->state; + + while (1) { + if (sio_read(priv->sd, &c, 1) > 0) { + slipif_rxbyte_input(netif, c); + } + } +} +#endif /* SLIP_USE_RX_THREAD */ + +/** + * @ingroup slipif + * SLIP netif initialization + * + * Call the arch specific sio_open and remember + * the opened device in the state field of the netif. + * + * @param netif the lwip network interface structure for this slipif + * @return ERR_OK if serial line could be opened, + * ERR_MEM if no memory could be allocated, + * ERR_IF is serial line couldn't be opened + * + * @note If netif->state is interpreted as an u8_t serial port number. + * + */ +err_t +slipif_init(struct netif *netif) +{ + struct slipif_priv *priv; + u8_t sio_num; + + LWIP_ASSERT("slipif needs an input callback", netif->input != NULL); + + /* netif->state contains serial port number */ + sio_num = LWIP_PTR_NUMERIC_CAST(u8_t, netif->state); + + LWIP_DEBUGF(SLIP_DEBUG, ("slipif_init: netif->num=%"U16_F"\n", (u16_t)sio_num)); + + /* Allocate private data */ + priv = (struct slipif_priv *)mem_malloc(sizeof(struct slipif_priv)); + if (!priv) { + return ERR_MEM; + } + + netif->name[0] = 's'; + netif->name[1] = 'l'; +#if LWIP_IPV4 + netif->output = slipif_output_v4; +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 + netif->output_ip6 = slipif_output_v6; +#endif /* LWIP_IPV6 */ + netif->mtu = SLIP_MAX_SIZE; + + /* Try to open the serial port. */ + priv->sd = sio_open(sio_num); + if (!priv->sd) { + /* Opening the serial port failed. */ + mem_free(priv); + return ERR_IF; + } + + /* Initialize private data */ + priv->p = NULL; + priv->q = NULL; + priv->state = SLIP_RECV_NORMAL; + priv->i = 0; + priv->recved = 0; +#if SLIP_RX_FROM_ISR + priv->rxpackets = NULL; +#endif + + netif->state = priv; + + /* initialize the snmp variables and counters inside the struct netif */ + MIB2_INIT_NETIF(netif, snmp_ifType_slip, SLIP_SIO_SPEED(priv->sd)); + +#if SLIP_USE_RX_THREAD + /* Create a thread to poll the serial line. */ + sys_thread_new(SLIPIF_THREAD_NAME, slipif_loop_thread, netif, + SLIPIF_THREAD_STACKSIZE, SLIPIF_THREAD_PRIO); +#endif /* SLIP_USE_RX_THREAD */ + return ERR_OK; +} + +/** + * @ingroup slipif + * Polls the serial device and feeds the IP layer with incoming packets. + * + * @param netif The lwip network interface structure for this slipif + */ +void +slipif_poll(struct netif *netif) +{ + u8_t c; + struct slipif_priv *priv; + + LWIP_ASSERT("netif != NULL", (netif != NULL)); + LWIP_ASSERT("netif->state != NULL", (netif->state != NULL)); + + priv = (struct slipif_priv *)netif->state; + + while (sio_tryread(priv->sd, &c, 1) > 0) { + slipif_rxbyte_input(netif, c); + } +} + +#if SLIP_RX_FROM_ISR +/** + * @ingroup slipif + * Feeds the IP layer with incoming packets that were receive + * + * @param netif The lwip network interface structure for this slipif + */ +void +slipif_process_rxqueue(struct netif *netif) +{ + struct slipif_priv *priv; + SYS_ARCH_DECL_PROTECT(old_level); + + LWIP_ASSERT("netif != NULL", (netif != NULL)); + LWIP_ASSERT("netif->state != NULL", (netif->state != NULL)); + + priv = (struct slipif_priv *)netif->state; + + SYS_ARCH_PROTECT(old_level); + while (priv->rxpackets != NULL) { + struct pbuf *p = priv->rxpackets; +#if SLIP_RX_QUEUE + /* dequeue packet */ + struct pbuf *q = p; + while ((q->len != q->tot_len) && (q->next != NULL)) { + q = q->next; + } + priv->rxpackets = q->next; + q->next = NULL; +#else /* SLIP_RX_QUEUE */ + priv->rxpackets = NULL; +#endif /* SLIP_RX_QUEUE */ + SYS_ARCH_UNPROTECT(old_level); + if (netif->input(p, netif) != ERR_OK) { + pbuf_free(p); + } + SYS_ARCH_PROTECT(old_level); + } + SYS_ARCH_UNPROTECT(old_level); +} + +/** Like slipif_rxbyte, but queues completed packets. + * + * @param netif The lwip network interface structure for this slipif + * @param data Received serial byte + */ +static void +slipif_rxbyte_enqueue(struct netif *netif, u8_t data) +{ + struct pbuf *p; + struct slipif_priv *priv = (struct slipif_priv *)netif->state; + SYS_ARCH_DECL_PROTECT(old_level); + + p = slipif_rxbyte(netif, data); + if (p != NULL) { + SYS_ARCH_PROTECT(old_level); + if (priv->rxpackets != NULL) { +#if SLIP_RX_QUEUE + /* queue multiple pbufs */ + struct pbuf *q = p; + while (q->next != NULL) { + q = q->next; + } + q->next = p; + } else { +#else /* SLIP_RX_QUEUE */ + pbuf_free(priv->rxpackets); + } + { +#endif /* SLIP_RX_QUEUE */ + priv->rxpackets = p; + } + SYS_ARCH_UNPROTECT(old_level); + } +} + +/** + * @ingroup slipif + * Process a received byte, completed packets are put on a queue that is + * fed into IP through slipif_process_rxqueue(). + * + * This function can be called from ISR if SYS_LIGHTWEIGHT_PROT is enabled. + * + * @param netif The lwip network interface structure for this slipif + * @param data received character + */ +void +slipif_received_byte(struct netif *netif, u8_t data) +{ + LWIP_ASSERT("netif != NULL", (netif != NULL)); + LWIP_ASSERT("netif->state != NULL", (netif->state != NULL)); + slipif_rxbyte_enqueue(netif, data); +} + +/** + * @ingroup slipif + * Process multiple received byte, completed packets are put on a queue that is + * fed into IP through slipif_process_rxqueue(). + * + * This function can be called from ISR if SYS_LIGHTWEIGHT_PROT is enabled. + * + * @param netif The lwip network interface structure for this slipif + * @param data received character + * @param len Number of received characters + */ +void +slipif_received_bytes(struct netif *netif, u8_t *data, u8_t len) +{ + u8_t i; + u8_t *rxdata = data; + LWIP_ASSERT("netif != NULL", (netif != NULL)); + LWIP_ASSERT("netif->state != NULL", (netif->state != NULL)); + + for (i = 0; i < len; i++, rxdata++) { + slipif_rxbyte_enqueue(netif, *rxdata); + } +} +#endif /* SLIP_RX_FROM_ISR */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/zepif.c b/Middlewares/Third_Party/LwIP/src/netif/zepif.c new file mode 100644 index 0000000..b403303 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/zepif.c @@ -0,0 +1,300 @@ +/** + * @file + * + * @defgroup zepif ZEP - ZigBee Encapsulation Protocol + * @ingroup netifs + * A netif implementing the ZigBee Encapsulation Protocol (ZEP). + * This is used to tunnel 6LowPAN over UDP. + * + * Usage (there must be a default netif before!): + * @code{.c} + * netif_add(&zep_netif, NULL, NULL, NULL, NULL, zepif_init, tcpip_6lowpan_input); + * netif_create_ip6_linklocal_address(&zep_netif, 1); + * netif_set_up(&zep_netif); + * netif_set_link_up(&zep_netif); + * @endcode + */ + +/* + * Copyright (c) 2018 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#include "netif/zepif.h" + +#if LWIP_IPV6 && LWIP_UDP + +#include "netif/lowpan6.h" +#include "lwip/udp.h" +#include "lwip/timeouts.h" +#include + +/** Define this to 1 to loop back TX packets for testing */ +#ifndef ZEPIF_LOOPBACK +#define ZEPIF_LOOPBACK 0 +#endif + +#define ZEP_MAX_DATA_LEN 127 + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct zep_hdr { + PACK_STRUCT_FLD_8(u8_t prot_id[2]); + PACK_STRUCT_FLD_8(u8_t prot_version); + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t channel_id); + PACK_STRUCT_FIELD(u16_t device_id); + PACK_STRUCT_FLD_8(u8_t crc_mode); + PACK_STRUCT_FLD_8(u8_t unknown_1); + PACK_STRUCT_FIELD(u32_t timestamp[2]); + PACK_STRUCT_FIELD(u32_t seq_num); + PACK_STRUCT_FLD_8(u8_t unknown_2[10]); + PACK_STRUCT_FLD_8(u8_t len); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +struct zepif_state { + struct zepif_init init; + struct udp_pcb *pcb; + u32_t seqno; +}; + +static u8_t zep_lowpan_timer_running; + +/* Helper function that calls the 6LoWPAN timer and reschedules itself */ +static void +zep_lowpan_timer(void *arg) +{ + lowpan6_tmr(); + if (zep_lowpan_timer_running) { + sys_timeout(LOWPAN6_TMR_INTERVAL, zep_lowpan_timer, arg); + } +} + +/* Pass received pbufs into 6LowPAN netif */ +static void +zepif_udp_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *addr, u16_t port) +{ + err_t err; + struct netif *netif_lowpan6 = (struct netif *)arg; + struct zep_hdr *zep; + + LWIP_ASSERT("arg != NULL", arg != NULL); + LWIP_ASSERT("pcb != NULL", pcb != NULL); + LWIP_UNUSED_ARG(pcb); /* for LWIP_NOASSERT */ + LWIP_UNUSED_ARG(addr); + LWIP_UNUSED_ARG(port); + if (p == NULL) { + return; + } + + /* Parse and hide the ZEP header */ + if (p->len < sizeof(struct zep_hdr)) { + /* need the zep_hdr in one piece */ + goto err_return; + } + zep = (struct zep_hdr *)p->payload; + if (zep->prot_id[0] != 'E') { + goto err_return; + } + if (zep->prot_id[1] != 'X') { + goto err_return; + } + if (zep->prot_version != 2) { + /* we only support this version for now */ + goto err_return; + } + if (zep->type != 1) { + goto err_return; + } + if (zep->crc_mode != 1) { + goto err_return; + } + if (zep->len != p->tot_len - sizeof(struct zep_hdr)) { + goto err_return; + } + /* everything seems to be OK, hide the ZEP header */ + if (pbuf_remove_header(p, sizeof(struct zep_hdr))) { + goto err_return; + } + /* TODO Check CRC? */ + /* remove CRC trailer */ + pbuf_realloc(p, p->tot_len - 2); + + /* Call into 6LoWPAN code. */ + err = netif_lowpan6->input(p, netif_lowpan6); + if (err == ERR_OK) { + return; + } +err_return: + pbuf_free(p); +} + +/* Send 6LoWPAN TX packets as UDP broadcast */ +static err_t +zepif_linkoutput(struct netif *netif, struct pbuf *p) +{ + err_t err; + struct pbuf *q; + struct zep_hdr *zep; + struct zepif_state *state; + + LWIP_ASSERT("invalid netif", netif != NULL); + LWIP_ASSERT("invalid pbuf", p != NULL); + + if (p->tot_len > ZEP_MAX_DATA_LEN) { + return ERR_VAL; + } + LWIP_ASSERT("TODO: support chained pbufs", p->next == NULL); + + state = (struct zepif_state *)netif->state; + LWIP_ASSERT("state->pcb != NULL", state->pcb != NULL); + + q = pbuf_alloc(PBUF_TRANSPORT, sizeof(struct zep_hdr) + p->tot_len, PBUF_RAM); + if (q == NULL) { + return ERR_MEM; + } + zep = (struct zep_hdr *)q->payload; + memset(zep, 0, sizeof(struct zep_hdr)); + zep->prot_id[0] = 'E'; + zep->prot_id[1] = 'X'; + zep->prot_version = 2; + zep->type = 1; /* Data */ + zep->channel_id = 0; /* whatever */ + zep->device_id = lwip_htons(1); /* whatever */ + zep->crc_mode = 1; + zep->unknown_1 = 0xff; + zep->seq_num = lwip_htonl(state->seqno); + state->seqno++; + zep->len = (u8_t)p->tot_len; + + err = pbuf_take_at(q, p->payload, p->tot_len, sizeof(struct zep_hdr)); + if (err == ERR_OK) { +#if ZEPIF_LOOPBACK + zepif_udp_recv(netif, state->pcb, pbuf_clone(PBUF_RAW, PBUF_RAM, q), NULL, 0); +#endif + err = udp_sendto(state->pcb, q, state->init.zep_dst_ip_addr, state->init.zep_dst_udp_port); + } + pbuf_free(q); + + return err; +} + +/** + * @ingroup zepif + * Set up a raw 6LowPAN netif and surround it with input- and output + * functions for ZEP + */ +err_t +zepif_init(struct netif *netif) +{ + err_t err; + struct zepif_init *init_state = (struct zepif_init *)netif->state; + struct zepif_state *state = (struct zepif_state *)mem_malloc(sizeof(struct zepif_state)); + + LWIP_ASSERT("zepif needs an input callback", netif->input != NULL); + + if (state == NULL) { + return ERR_MEM; + } + memset(state, 0, sizeof(struct zepif_state)); + if (init_state != NULL) { + memcpy(&state->init, init_state, sizeof(struct zepif_init)); + } + if (state->init.zep_src_udp_port == 0) { + state->init.zep_src_udp_port = ZEPIF_DEFAULT_UDP_PORT; + } + if (state->init.zep_dst_udp_port == 0) { + state->init.zep_dst_udp_port = ZEPIF_DEFAULT_UDP_PORT; + } +#if LWIP_IPV4 + if (state->init.zep_dst_ip_addr == NULL) { + /* With IPv4 enabled, default to broadcasting packets if no address is set */ + state->init.zep_dst_ip_addr = IP_ADDR_BROADCAST; + } +#endif /* LWIP_IPV4 */ + + netif->state = NULL; + + state->pcb = udp_new_ip_type(IPADDR_TYPE_ANY); + if (state->pcb == NULL) { + err = ERR_MEM; + goto err_ret; + } + err = udp_bind(state->pcb, state->init.zep_src_ip_addr, state->init.zep_src_udp_port); + if (err != ERR_OK) { + goto err_ret; + } + if (state->init.zep_netif != NULL) { + udp_bind_netif(state->pcb, state->init.zep_netif); + } + LWIP_ASSERT("udp_bind(lowpan6_broadcast_pcb) failed", err == ERR_OK); + ip_set_option(state->pcb, SOF_BROADCAST); + udp_recv(state->pcb, zepif_udp_recv, netif); + + err = lowpan6_if_init(netif); + LWIP_ASSERT("lowpan6_if_init set a state", netif->state == NULL); + if (err == ERR_OK) { + netif->state = state; + netif->hwaddr_len = 6; + if (init_state != NULL) { + memcpy(netif->hwaddr, init_state->addr, 6); + } else { + u8_t i; + for (i = 0; i < 6; i++) { + netif->hwaddr[i] = i; + } + netif->hwaddr[0] &= 0xfc; + } + netif->linkoutput = zepif_linkoutput; + + if (!zep_lowpan_timer_running) { + sys_timeout(LOWPAN6_TMR_INTERVAL, zep_lowpan_timer, NULL); + zep_lowpan_timer_running = 1; + } + + return ERR_OK; + } + +err_ret: + if (state->pcb != NULL) { + udp_remove(state->pcb); + } + mem_free(state); + return err; +} + +#endif /* LWIP_IPV6 && LWIP_UDP */ diff --git a/Middlewares/Third_Party/LwIP/system/arch/bpstruct.h b/Middlewares/Third_Party/LwIP/system/arch/bpstruct.h new file mode 100644 index 0000000..177758c --- /dev/null +++ b/Middlewares/Third_Party/LwIP/system/arch/bpstruct.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#if defined(__IAR_SYSTEMS_ICC__) +#pragma pack(1) +#endif + diff --git a/Middlewares/Third_Party/LwIP/system/arch/cc.h b/Middlewares/Third_Party/LwIP/system/arch/cc.h new file mode 100644 index 0000000..3065318 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/system/arch/cc.h @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef __CC_H__ +#define __CC_H__ + +#include "cpu.h" +#include +#include + +typedef int sys_prot_t; + +#define LWIP_PROVIDE_ERRNO + +#if defined (__GNUC__) & !defined (__CC_ARM) + +#define LWIP_TIMEVAL_PRIVATE 0 +#include + +#endif + +/* define compiler specific symbols */ +#if defined (__ICCARM__) + +#define PACK_STRUCT_BEGIN +#define PACK_STRUCT_STRUCT +#define PACK_STRUCT_END +#define PACK_STRUCT_FIELD(x) x +#define PACK_STRUCT_USE_INCLUDES + +#elif defined (__GNUC__) + +#define PACK_STRUCT_BEGIN +#define PACK_STRUCT_STRUCT __attribute__ ((__packed__)) +#define PACK_STRUCT_END +#define PACK_STRUCT_FIELD(x) x + +#elif defined (__CC_ARM) + +#define PACK_STRUCT_BEGIN __packed +#define PACK_STRUCT_STRUCT +#define PACK_STRUCT_END +#define PACK_STRUCT_FIELD(x) x + +#elif defined (__TASKING__) + +#define PACK_STRUCT_BEGIN +#define PACK_STRUCT_STRUCT +#define PACK_STRUCT_END +#define PACK_STRUCT_FIELD(x) x + +#endif + +#define LWIP_PLATFORM_ASSERT(x) do {printf("Assertion \"%s\" failed at line %d in %s\n", \ + x, __LINE__, __FILE__); } while(0) + +/* Define random number generator function */ +#define LWIP_RAND() ((u32_t)rand()) + +#endif /* __CC_H__ */ diff --git a/Middlewares/Third_Party/LwIP/system/arch/cpu.h b/Middlewares/Third_Party/LwIP/system/arch/cpu.h new file mode 100644 index 0000000..bfdf881 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/system/arch/cpu.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef __CPU_H__ +#define __CPU_H__ + +#ifndef BYTE_ORDER +#define BYTE_ORDER LITTLE_ENDIAN +#endif + +#endif /* __CPU_H__ */ diff --git a/Middlewares/Third_Party/LwIP/system/arch/epstruct.h b/Middlewares/Third_Party/LwIP/system/arch/epstruct.h new file mode 100644 index 0000000..1e1a049 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/system/arch/epstruct.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#if defined(__IAR_SYSTEMS_ICC__) +#pragma pack() +#endif + diff --git a/Middlewares/Third_Party/LwIP/system/arch/init.h b/Middlewares/Third_Party/LwIP/system/arch/init.h new file mode 100644 index 0000000..e622c73 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/system/arch/init.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef __ARCH_INIT_H__ +#define __ARCH_INIT_H__ + +#define TCPIP_INIT_DONE(arg) tcpip_init_done(arg) + +void tcpip_init_done(void *); +int wait_for_tcpip_init(void); + +#endif /* __ARCH_INIT_H__ */ + + + + diff --git a/Middlewares/Third_Party/LwIP/system/arch/lib.h b/Middlewares/Third_Party/LwIP/system/arch/lib.h new file mode 100644 index 0000000..378f25b --- /dev/null +++ b/Middlewares/Third_Party/LwIP/system/arch/lib.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef __LIB_H__ +#define __LIB_H__ + +#include + + +#endif /* __LIB_H__ */ diff --git a/Middlewares/Third_Party/LwIP/system/arch/perf.h b/Middlewares/Third_Party/LwIP/system/arch/perf.h new file mode 100644 index 0000000..334d42a --- /dev/null +++ b/Middlewares/Third_Party/LwIP/system/arch/perf.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef __PERF_H__ +#define __PERF_H__ + +#define PERF_START /* null definition */ +#define PERF_STOP(x) /* null definition */ + +#endif /* __PERF_H__ */ diff --git a/Middlewares/Third_Party/LwIP/system/arch/sys_arch.h b/Middlewares/Third_Party/LwIP/system/arch/sys_arch.h new file mode 100644 index 0000000..49bfd4e --- /dev/null +++ b/Middlewares/Third_Party/LwIP/system/arch/sys_arch.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef __SYS_ARCH_H__ +#define __SYS_ARCH_H__ + +#include "lwip/opt.h" + +#if (NO_SYS != 0) +#error "NO_SYS need to be set to 0 to use threaded API" +#endif + +#include "cmsis_os.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if (osCMSIS < 0x20000U) + +#define SYS_MBOX_NULL (osMessageQId)0 +#define SYS_SEM_NULL (osSemaphoreId)0 + +typedef osSemaphoreId sys_sem_t; +typedef osSemaphoreId sys_mutex_t; +typedef osMessageQId sys_mbox_t; +typedef osThreadId sys_thread_t; +#else + +#define SYS_MBOX_NULL (osMessageQueueId_t)0 +#define SYS_SEM_NULL (osSemaphoreId_t)0 + +typedef osSemaphoreId_t sys_sem_t; +typedef osSemaphoreId_t sys_mutex_t; +typedef osMessageQueueId_t sys_mbox_t; +typedef osThreadId_t sys_thread_t; +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* __SYS_ARCH_H__ */ + diff --git a/TEST2.ioc b/TEST2.ioc new file mode 100644 index 0000000..7c60032 --- /dev/null +++ b/TEST2.ioc @@ -0,0 +1,569 @@ +#MicroXplorer Configuration settings - do not modify +CAD.formats= +CAD.pinconfig= +CAD.provider= +Dma.Request0=USART6_RX +Dma.Request1=USART6_TX +Dma.Request2=UART4_RX +Dma.Request3=UART4_TX +Dma.Request4=USART3_RX +Dma.Request5=USART3_TX +Dma.RequestsNb=6 +Dma.UART4_RX.2.Direction=DMA_PERIPH_TO_MEMORY +Dma.UART4_RX.2.FIFOMode=DMA_FIFOMODE_DISABLE +Dma.UART4_RX.2.Instance=DMA1_Stream2 +Dma.UART4_RX.2.MemDataAlignment=DMA_MDATAALIGN_BYTE +Dma.UART4_RX.2.MemInc=DMA_MINC_ENABLE +Dma.UART4_RX.2.Mode=DMA_NORMAL +Dma.UART4_RX.2.PeriphDataAlignment=DMA_PDATAALIGN_BYTE +Dma.UART4_RX.2.PeriphInc=DMA_PINC_DISABLE +Dma.UART4_RX.2.Priority=DMA_PRIORITY_VERY_HIGH +Dma.UART4_RX.2.RequestParameters=Instance,Direction,PeriphInc,MemInc,PeriphDataAlignment,MemDataAlignment,Mode,Priority,FIFOMode +Dma.UART4_TX.3.Direction=DMA_MEMORY_TO_PERIPH +Dma.UART4_TX.3.FIFOMode=DMA_FIFOMODE_DISABLE +Dma.UART4_TX.3.Instance=DMA1_Stream4 +Dma.UART4_TX.3.MemDataAlignment=DMA_MDATAALIGN_BYTE +Dma.UART4_TX.3.MemInc=DMA_MINC_ENABLE +Dma.UART4_TX.3.Mode=DMA_NORMAL +Dma.UART4_TX.3.PeriphDataAlignment=DMA_PDATAALIGN_BYTE +Dma.UART4_TX.3.PeriphInc=DMA_PINC_DISABLE +Dma.UART4_TX.3.Priority=DMA_PRIORITY_VERY_HIGH +Dma.UART4_TX.3.RequestParameters=Instance,Direction,PeriphInc,MemInc,PeriphDataAlignment,MemDataAlignment,Mode,Priority,FIFOMode +Dma.USART3_RX.4.Direction=DMA_PERIPH_TO_MEMORY +Dma.USART3_RX.4.FIFOMode=DMA_FIFOMODE_DISABLE +Dma.USART3_RX.4.Instance=DMA1_Stream1 +Dma.USART3_RX.4.MemDataAlignment=DMA_MDATAALIGN_BYTE +Dma.USART3_RX.4.MemInc=DMA_MINC_ENABLE +Dma.USART3_RX.4.Mode=DMA_NORMAL +Dma.USART3_RX.4.PeriphDataAlignment=DMA_PDATAALIGN_BYTE +Dma.USART3_RX.4.PeriphInc=DMA_PINC_DISABLE +Dma.USART3_RX.4.Priority=DMA_PRIORITY_LOW +Dma.USART3_RX.4.RequestParameters=Instance,Direction,PeriphInc,MemInc,PeriphDataAlignment,MemDataAlignment,Mode,Priority,FIFOMode +Dma.USART3_TX.5.Direction=DMA_MEMORY_TO_PERIPH +Dma.USART3_TX.5.FIFOMode=DMA_FIFOMODE_DISABLE +Dma.USART3_TX.5.Instance=DMA1_Stream3 +Dma.USART3_TX.5.MemDataAlignment=DMA_MDATAALIGN_BYTE +Dma.USART3_TX.5.MemInc=DMA_MINC_ENABLE +Dma.USART3_TX.5.Mode=DMA_NORMAL +Dma.USART3_TX.5.PeriphDataAlignment=DMA_PDATAALIGN_BYTE +Dma.USART3_TX.5.PeriphInc=DMA_PINC_DISABLE +Dma.USART3_TX.5.Priority=DMA_PRIORITY_LOW +Dma.USART3_TX.5.RequestParameters=Instance,Direction,PeriphInc,MemInc,PeriphDataAlignment,MemDataAlignment,Mode,Priority,FIFOMode +Dma.USART6_RX.0.Direction=DMA_PERIPH_TO_MEMORY +Dma.USART6_RX.0.FIFOMode=DMA_FIFOMODE_DISABLE +Dma.USART6_RX.0.Instance=DMA2_Stream1 +Dma.USART6_RX.0.MemDataAlignment=DMA_MDATAALIGN_BYTE +Dma.USART6_RX.0.MemInc=DMA_MINC_ENABLE +Dma.USART6_RX.0.Mode=DMA_NORMAL +Dma.USART6_RX.0.PeriphDataAlignment=DMA_PDATAALIGN_BYTE +Dma.USART6_RX.0.PeriphInc=DMA_PINC_DISABLE +Dma.USART6_RX.0.Priority=DMA_PRIORITY_LOW +Dma.USART6_RX.0.RequestParameters=Instance,Direction,PeriphInc,MemInc,PeriphDataAlignment,MemDataAlignment,Mode,Priority,FIFOMode +Dma.USART6_TX.1.Direction=DMA_MEMORY_TO_PERIPH +Dma.USART6_TX.1.FIFOMode=DMA_FIFOMODE_DISABLE +Dma.USART6_TX.1.Instance=DMA2_Stream6 +Dma.USART6_TX.1.MemDataAlignment=DMA_MDATAALIGN_BYTE +Dma.USART6_TX.1.MemInc=DMA_MINC_ENABLE +Dma.USART6_TX.1.Mode=DMA_NORMAL +Dma.USART6_TX.1.PeriphDataAlignment=DMA_PDATAALIGN_BYTE +Dma.USART6_TX.1.PeriphInc=DMA_PINC_DISABLE +Dma.USART6_TX.1.Priority=DMA_PRIORITY_LOW +Dma.USART6_TX.1.RequestParameters=Instance,Direction,PeriphInc,MemInc,PeriphDataAlignment,MemDataAlignment,Mode,Priority,FIFOMode +ETH.IPParameters=MediaInterface +ETH.MediaInterface=HAL_ETH_RMII_MODE +File.Version=6 +GPIO.groupedBy=Group By Peripherals +KeepUserPlacement=false +LWIP.BSP.number=1 +LWIP.GATEWAY_ADDRESS=192.168.001.029 +LWIP.IPParameters=LWIP_DHCP,IP_ADDRESS,NETMASK_ADDRESS,GATEWAY_ADDRESS,LWIP_UDP +LWIP.IP_ADDRESS=192.168.001.100 +LWIP.LWIP_DHCP=0 +LWIP.LWIP_UDP=0 +LWIP.NETMASK_ADDRESS=255.255.255.000 +LWIP.Version=v2.1.2_Cube +LWIP0.BSP.STBoard=false +LWIP0.BSP.api=BSP_COMPONENT_DRIVER +LWIP0.BSP.component=LAN8742 +LWIP0.BSP.condition= +LWIP0.BSP.instance=LAN8742 +LWIP0.BSP.ip= +LWIP0.BSP.mode= +LWIP0.BSP.name=Driver_PHY +LWIP0.BSP.semaphore=S_LAN8742 +LWIP0.BSP.solution=LAN8742 +Mcu.CPN=STM32F407VGT6 +Mcu.Family=STM32F4 +Mcu.IP0=DMA +Mcu.IP1=ETH +Mcu.IP10=UART5 +Mcu.IP11=USART2 +Mcu.IP12=USART3 +Mcu.IP13=USART6 +Mcu.IP2=LWIP +Mcu.IP3=NVIC +Mcu.IP4=RCC +Mcu.IP5=SPI1 +Mcu.IP6=SYS +Mcu.IP7=TIM2 +Mcu.IP8=TIM3 +Mcu.IP9=UART4 +Mcu.IPNb=14 +Mcu.Name=STM32F407V(E-G)Tx +Mcu.Package=LQFP100 +Mcu.Pin0=PH0-OSC_IN +Mcu.Pin1=PH1-OSC_OUT +Mcu.Pin10=PB2 +Mcu.Pin11=PE7 +Mcu.Pin12=PE12 +Mcu.Pin13=PE13 +Mcu.Pin14=PE14 +Mcu.Pin15=PB11 +Mcu.Pin16=PB12 +Mcu.Pin17=PB13 +Mcu.Pin18=PD8 +Mcu.Pin19=PD9 +Mcu.Pin2=PC0 +Mcu.Pin20=PD11 +Mcu.Pin21=PD12 +Mcu.Pin22=PD13 +Mcu.Pin23=PD14 +Mcu.Pin24=PD15 +Mcu.Pin25=PC6 +Mcu.Pin26=PC7 +Mcu.Pin27=PC8 +Mcu.Pin28=PC9 +Mcu.Pin29=PA8 +Mcu.Pin3=PC1 +Mcu.Pin30=PA11 +Mcu.Pin31=PA12 +Mcu.Pin32=PA13 +Mcu.Pin33=PA14 +Mcu.Pin34=PA15 +Mcu.Pin35=PC10 +Mcu.Pin36=PC11 +Mcu.Pin37=PC12 +Mcu.Pin38=PD0 +Mcu.Pin39=PD1 +Mcu.Pin4=PA1 +Mcu.Pin40=PD2 +Mcu.Pin41=PD3 +Mcu.Pin42=PD4 +Mcu.Pin43=PD5 +Mcu.Pin44=PD6 +Mcu.Pin45=PD7 +Mcu.Pin46=PB3 +Mcu.Pin47=PB4 +Mcu.Pin48=PB5 +Mcu.Pin49=PB6 +Mcu.Pin5=PA2 +Mcu.Pin50=PB7 +Mcu.Pin51=PE0 +Mcu.Pin52=PE1 +Mcu.Pin53=VP_LWIP_VS_Enabled +Mcu.Pin54=VP_SYS_VS_Systick +Mcu.Pin55=VP_TIM2_VS_ClockSourceINT +Mcu.Pin56=VP_TIM3_VS_ClockSourceINT +Mcu.Pin6=PA7 +Mcu.Pin7=PC4 +Mcu.Pin8=PC5 +Mcu.Pin9=PB1 +Mcu.PinsNb=57 +Mcu.ThirdPartyNb=0 +Mcu.UserConstants= +Mcu.UserName=STM32F407VGTx +MxCube.Version=6.13.0 +MxDb.Version=DB.6.0.130 +NVIC.BusFault_IRQn=true\:0\:0\:false\:false\:true\:false\:false\:false +NVIC.DMA1_Stream1_IRQn=true\:0\:0\:false\:false\:true\:false\:true\:true +NVIC.DMA1_Stream2_IRQn=true\:0\:0\:false\:false\:true\:false\:true\:true +NVIC.DMA1_Stream3_IRQn=true\:0\:0\:false\:false\:true\:false\:true\:true +NVIC.DMA1_Stream4_IRQn=true\:0\:0\:false\:false\:true\:false\:true\:true +NVIC.DMA2_Stream1_IRQn=true\:0\:0\:false\:false\:true\:false\:true\:true +NVIC.DMA2_Stream6_IRQn=true\:0\:0\:false\:false\:true\:false\:true\:true +NVIC.DebugMonitor_IRQn=true\:0\:0\:false\:false\:true\:false\:false\:false +NVIC.ETH_IRQn=true\:0\:0\:false\:false\:true\:true\:true\:true +NVIC.EXTI1_IRQn=true\:0\:0\:false\:false\:true\:true\:true\:true +NVIC.EXTI3_IRQn=true\:0\:0\:false\:false\:true\:true\:true\:true +NVIC.ForceEnableDMAVector=true +NVIC.HardFault_IRQn=true\:0\:0\:false\:false\:true\:false\:false\:false +NVIC.MemoryManagement_IRQn=true\:0\:0\:false\:false\:true\:false\:false\:false +NVIC.NonMaskableInt_IRQn=true\:0\:0\:false\:false\:true\:false\:false\:false +NVIC.PendSV_IRQn=true\:0\:0\:false\:false\:true\:false\:false\:false +NVIC.PriorityGroup=NVIC_PRIORITYGROUP_4 +NVIC.SVCall_IRQn=true\:0\:0\:false\:false\:true\:false\:false\:false +NVIC.SysTick_IRQn=true\:15\:0\:false\:false\:true\:false\:true\:false +NVIC.TIM3_IRQn=true\:0\:0\:false\:false\:true\:true\:true\:true +NVIC.UART4_IRQn=true\:0\:0\:false\:false\:true\:true\:true\:true +NVIC.UART5_IRQn=true\:0\:0\:false\:false\:true\:true\:true\:true +NVIC.USART2_IRQn=true\:0\:0\:false\:false\:true\:true\:true\:true +NVIC.USART3_IRQn=true\:0\:0\:false\:false\:true\:true\:true\:true +NVIC.USART6_IRQn=true\:0\:0\:false\:false\:true\:true\:true\:true +NVIC.UsageFault_IRQn=true\:0\:0\:false\:false\:true\:false\:false\:false +PA1.Mode=RMII +PA1.Signal=ETH_REF_CLK +PA11.GPIOParameters=GPIO_PuPd,GPIO_Label +PA11.GPIO_Label=DI_CH3 +PA11.GPIO_PuPd=GPIO_PULLUP +PA11.Locked=true +PA11.Signal=GPIO_Input +PA12.GPIOParameters=GPIO_PuPd,GPIO_Label +PA12.GPIO_Label=DI_CH4 +PA12.GPIO_PuPd=GPIO_PULLUP +PA12.Locked=true +PA12.Signal=GPIO_Input +PA13.Mode=Serial_Wire +PA13.Signal=SYS_JTMS-SWDIO +PA14.Mode=Serial_Wire +PA14.Signal=SYS_JTCK-SWCLK +PA15.GPIOParameters=GPIO_Speed,GPIO_Label +PA15.GPIO_Label=HART_CLK +PA15.GPIO_Speed=GPIO_SPEED_FREQ_VERY_HIGH +PA15.Locked=true +PA15.Signal=S_TIM2_CH1_ETR +PA2.Mode=RMII +PA2.Signal=ETH_MDIO +PA7.Mode=RMII +PA7.Signal=ETH_CRS_DV +PA8.GPIOParameters=GPIO_Speed,PinState,GPIO_PuPd,GPIO_Label +PA8.GPIO_Label=HART1_RST +PA8.GPIO_PuPd=GPIO_PULLUP +PA8.GPIO_Speed=GPIO_SPEED_FREQ_VERY_HIGH +PA8.Locked=true +PA8.PinState=GPIO_PIN_SET +PA8.Signal=GPIO_Output +PB1.GPIOParameters=GPIO_Speed,PinState,GPIO_PuPd,GPIO_Label +PB1.GPIO_Label=LED3_R +PB1.GPIO_PuPd=GPIO_PULLUP +PB1.GPIO_Speed=GPIO_SPEED_FREQ_MEDIUM +PB1.Locked=true +PB1.PinState=GPIO_PIN_SET +PB1.Signal=GPIO_Output +PB11.Mode=RMII +PB11.Signal=ETH_TX_EN +PB12.Mode=RMII +PB12.Signal=ETH_TXD0 +PB13.Mode=RMII +PB13.Signal=ETH_TXD1 +PB2.GPIOParameters=GPIO_Speed,PinState,GPIO_PuPd,GPIO_Label,GPIO_ModeDefaultOutputPP +PB2.GPIO_Label=LED3_G +PB2.GPIO_ModeDefaultOutputPP=GPIO_MODE_OUTPUT_PP +PB2.GPIO_PuPd=GPIO_PULLUP +PB2.GPIO_Speed=GPIO_SPEED_FREQ_MEDIUM +PB2.Locked=true +PB2.PinState=GPIO_PIN_SET +PB2.Signal=GPIO_Output +PB3.Locked=true +PB3.Mode=Full_Duplex_Master +PB3.Signal=SPI1_SCK +PB4.Locked=true +PB4.Mode=Full_Duplex_Master +PB4.Signal=SPI1_MISO +PB5.Locked=true +PB5.Mode=Full_Duplex_Master +PB5.Signal=SPI1_MOSI +PB6.GPIOParameters=GPIO_Speed,PinState,GPIO_PuPd,GPIO_Label +PB6.GPIO_Label=DAC1_CS +PB6.GPIO_PuPd=GPIO_PULLUP +PB6.GPIO_Speed=GPIO_SPEED_FREQ_VERY_HIGH +PB6.Locked=true +PB6.PinState=GPIO_PIN_SET +PB6.Signal=GPIO_Output +PB7.GPIOParameters=GPIO_Speed,PinState,GPIO_PuPd,GPIO_Label,GPIO_ModeDefaultOutputPP +PB7.GPIO_Label=DAC2_CS +PB7.GPIO_ModeDefaultOutputPP=GPIO_MODE_OUTPUT_PP +PB7.GPIO_PuPd=GPIO_PULLUP +PB7.GPIO_Speed=GPIO_SPEED_FREQ_VERY_HIGH +PB7.Locked=true +PB7.PinState=GPIO_PIN_SET +PB7.Signal=GPIO_Output +PC0.GPIOParameters=GPIO_Speed,PinState,GPIO_Label +PC0.GPIO_Label=ETH_RESET +PC0.GPIO_Speed=GPIO_SPEED_FREQ_MEDIUM +PC0.Locked=true +PC0.PinState=GPIO_PIN_SET +PC0.Signal=GPIO_Output +PC1.Mode=RMII +PC1.Signal=ETH_MDC +PC10.GPIOParameters=GPIO_Label +PC10.GPIO_Label=LCD_TX +PC10.Locked=true +PC10.Mode=Asynchronous +PC10.Signal=UART4_TX +PC11.GPIOParameters=GPIO_Label +PC11.GPIO_Label=LCD_RX +PC11.Mode=Asynchronous +PC11.Signal=UART4_RX +PC12.GPIOParameters=GPIO_Label +PC12.GPIO_Label=HART1_TX +PC12.Mode=Asynchronous +PC12.Signal=UART5_TX +PC4.Mode=RMII +PC4.Signal=ETH_RXD0 +PC5.Mode=RMII +PC5.Signal=ETH_RXD1 +PC6.GPIOParameters=GPIO_Label +PC6.GPIO_Label=BLE1_TX +PC6.Mode=Asynchronous +PC6.Signal=USART6_TX +PC7.GPIOParameters=GPIO_Label +PC7.GPIO_Label=BLE1_RX +PC7.Mode=Asynchronous +PC7.Signal=USART6_RX +PC8.GPIOParameters=GPIO_PuPd,GPIO_Label +PC8.GPIO_Label=DI_CH1 +PC8.GPIO_PuPd=GPIO_PULLUP +PC8.Locked=true +PC8.Signal=GPIO_Input +PC9.GPIOParameters=GPIO_PuPd,GPIO_Label +PC9.GPIO_Label=DI_CH2 +PC9.GPIO_PuPd=GPIO_PULLUP +PC9.Locked=true +PC9.Signal=GPIO_Input +PD0.GPIOParameters=GPIO_Speed,PinState,GPIO_PuPd,GPIO_Label +PD0.GPIO_Label=HART1_RTS +PD0.GPIO_PuPd=GPIO_PULLUP +PD0.GPIO_Speed=GPIO_SPEED_FREQ_VERY_HIGH +PD0.Locked=true +PD0.PinState=GPIO_PIN_SET +PD0.Signal=GPIO_Output +PD1.GPIOParameters=GPIO_PuPd,GPIO_Label,GPIO_ModeDefaultEXTI +PD1.GPIO_Label=HART1_OCD +PD1.GPIO_ModeDefaultEXTI=GPIO_MODE_IT_FALLING +PD1.GPIO_PuPd=GPIO_PULLUP +PD1.Locked=true +PD1.Signal=GPXTI1 +PD11.GPIOParameters=GPIO_Speed,PinState,GPIO_Label,GPIO_ModeDefaultOutputPP +PD11.GPIO_Label=DO_CH4 +PD11.GPIO_ModeDefaultOutputPP=GPIO_MODE_OUTPUT_PP +PD11.GPIO_Speed=GPIO_SPEED_FREQ_VERY_HIGH +PD11.Locked=true +PD11.PinState=GPIO_PIN_SET +PD11.Signal=GPIO_Output +PD12.GPIOParameters=GPIO_Speed,PinState,GPIO_Label +PD12.GPIO_Label=DO_CH3 +PD12.GPIO_Speed=GPIO_SPEED_FREQ_VERY_HIGH +PD12.Locked=true +PD12.PinState=GPIO_PIN_SET +PD12.Signal=GPIO_Output +PD13.GPIOParameters=GPIO_Speed,PinState,GPIO_Label +PD13.GPIO_Label=DO_CH1 +PD13.GPIO_Speed=GPIO_SPEED_FREQ_VERY_HIGH +PD13.Locked=true +PD13.PinState=GPIO_PIN_SET +PD13.Signal=GPIO_Output +PD14.GPIOParameters=GPIO_Speed,PinState,GPIO_Label +PD14.GPIO_Label=DO_CH2 +PD14.GPIO_Speed=GPIO_SPEED_FREQ_VERY_HIGH +PD14.Locked=true +PD14.PinState=GPIO_PIN_SET +PD14.Signal=GPIO_Output +PD15.GPIOParameters=GPIO_Speed,PinState,GPIO_PuPd,GPIO_Label +PD15.GPIO_Label=DO_EN +PD15.GPIO_PuPd=GPIO_PULLUP +PD15.GPIO_Speed=GPIO_SPEED_FREQ_VERY_HIGH +PD15.Locked=true +PD15.PinState=GPIO_PIN_SET +PD15.Signal=GPIO_Output +PD2.GPIOParameters=GPIO_Label +PD2.GPIO_Label=HART1_RX +PD2.Mode=Asynchronous +PD2.Signal=UART5_RX +PD3.GPIOParameters=GPIO_PuPd,GPIO_Label,GPIO_ModeDefaultEXTI +PD3.GPIO_Label=HART2_OCD +PD3.GPIO_ModeDefaultEXTI=GPIO_MODE_IT_FALLING +PD3.GPIO_PuPd=GPIO_PULLUP +PD3.Locked=true +PD3.Signal=GPXTI3 +PD4.GPIOParameters=GPIO_Speed,PinState,GPIO_PuPd,GPIO_Label +PD4.GPIO_Label=HART2_RTS +PD4.GPIO_PuPd=GPIO_PULLUP +PD4.GPIO_Speed=GPIO_SPEED_FREQ_VERY_HIGH +PD4.Locked=true +PD4.PinState=GPIO_PIN_SET +PD4.Signal=GPIO_Output +PD5.GPIOParameters=GPIO_Label +PD5.GPIO_Label=HART2_TX +PD5.Mode=Asynchronous +PD5.Signal=USART2_TX +PD6.GPIOParameters=GPIO_Label +PD6.GPIO_Label=HART2_RX +PD6.Locked=true +PD6.Mode=Asynchronous +PD6.Signal=USART2_RX +PD7.GPIOParameters=GPIO_Speed,GPIO_PuPd,GPIO_Label +PD7.GPIO_Label=HART2_RST +PD7.GPIO_PuPd=GPIO_PULLUP +PD7.GPIO_Speed=GPIO_SPEED_FREQ_VERY_HIGH +PD7.Locked=true +PD7.Signal=GPIO_Output +PD8.GPIOParameters=GPIO_Label +PD8.GPIO_Label=BLE2_TX +PD8.Locked=true +PD8.Mode=Asynchronous +PD8.Signal=USART3_TX +PD9.GPIOParameters=GPIO_Label +PD9.GPIO_Label=BLE2_RX +PD9.Mode=Asynchronous +PD9.Signal=USART3_RX +PE0.GPIOParameters=GPIO_Speed,PinState,GPIO_PuPd,GPIO_Label +PE0.GPIO_Label=ADC_CS +PE0.GPIO_PuPd=GPIO_PULLUP +PE0.GPIO_Speed=GPIO_SPEED_FREQ_VERY_HIGH +PE0.Locked=true +PE0.PinState=GPIO_PIN_SET +PE0.Signal=GPIO_Output +PE1.GPIOParameters=GPIO_Speed,PinState,GPIO_PuPd,GPIO_Label,GPIO_ModeDefaultOutputPP +PE1.GPIO_Label=AD7124_SYNC +PE1.GPIO_ModeDefaultOutputPP=GPIO_MODE_OUTPUT_PP +PE1.GPIO_PuPd=GPIO_PULLDOWN +PE1.GPIO_Speed=GPIO_SPEED_FREQ_VERY_HIGH +PE1.Locked=true +PE1.PinState=GPIO_PIN_SET +PE1.Signal=GPIO_Output +PE12.GPIOParameters=GPIO_Speed,PinState,GPIO_PuPd,GPIO_Label,GPIO_ModeDefaultOutputPP +PE12.GPIO_Label=LED2_R +PE12.GPIO_ModeDefaultOutputPP=GPIO_MODE_OUTPUT_PP +PE12.GPIO_PuPd=GPIO_PULLUP +PE12.GPIO_Speed=GPIO_SPEED_FREQ_MEDIUM +PE12.Locked=true +PE12.PinState=GPIO_PIN_SET +PE12.Signal=GPIO_Output +PE13.GPIOParameters=GPIO_Speed,PinState,GPIO_PuPd,GPIO_Label +PE13.GPIO_Label=LED2_G +PE13.GPIO_PuPd=GPIO_PULLUP +PE13.GPIO_Speed=GPIO_SPEED_FREQ_MEDIUM +PE13.Locked=true +PE13.PinState=GPIO_PIN_SET +PE13.Signal=GPIO_Output +PE14.GPIOParameters=GPIO_Speed,PinState,GPIO_PuPd,GPIO_Label,GPIO_ModeDefaultOutputPP +PE14.GPIO_Label=LED2_Y +PE14.GPIO_ModeDefaultOutputPP=GPIO_MODE_OUTPUT_PP +PE14.GPIO_PuPd=GPIO_PULLUP +PE14.GPIO_Speed=GPIO_SPEED_FREQ_MEDIUM +PE14.Locked=true +PE14.PinState=GPIO_PIN_SET +PE14.Signal=GPIO_Output +PE7.GPIOParameters=GPIO_Speed,PinState,GPIO_PuPd,GPIO_Label +PE7.GPIO_Label=LED3_Y +PE7.GPIO_PuPd=GPIO_PULLUP +PE7.GPIO_Speed=GPIO_SPEED_FREQ_MEDIUM +PE7.Locked=true +PE7.PinState=GPIO_PIN_SET +PE7.Signal=GPIO_Output +PH0-OSC_IN.Mode=HSE-External-Oscillator +PH0-OSC_IN.Signal=RCC_OSC_IN +PH1-OSC_OUT.Mode=HSE-External-Oscillator +PH1-OSC_OUT.Signal=RCC_OSC_OUT +PinOutPanel.RotationAngle=0 +ProjectManager.AskForMigrate=true +ProjectManager.BackupPrevious=false +ProjectManager.CompilerOptimize=6 +ProjectManager.ComputerToolchain=false +ProjectManager.CoupleFile=true +ProjectManager.CustomerFirmwarePackage= +ProjectManager.DefaultFWLocation=true +ProjectManager.DeletePrevious=true +ProjectManager.DeviceId=STM32F407VGTx +ProjectManager.FirmwarePackage=STM32Cube FW_F4 V1.28.1 +ProjectManager.FreePins=false +ProjectManager.HalAssertFull=false +ProjectManager.HeapSize=0x200 +ProjectManager.KeepUserCode=true +ProjectManager.LastFirmware=true +ProjectManager.LibraryCopy=1 +ProjectManager.MainLocation=Core/Src +ProjectManager.NoMain=false +ProjectManager.PreviousToolchain= +ProjectManager.ProjectBuild=false +ProjectManager.ProjectFileName=TEST2.ioc +ProjectManager.ProjectName=TEST2 +ProjectManager.ProjectStructure= +ProjectManager.RegisterCallBack= +ProjectManager.StackSize=0x400 +ProjectManager.TargetToolchain=MDK-ARM V5.32 +ProjectManager.ToolChainLocation= +ProjectManager.UAScriptAfterPath= +ProjectManager.UAScriptBeforePath= +ProjectManager.UnderRoot=false +ProjectManager.functionlistsort=1-SystemClock_Config-RCC-false-HAL-false,2-MX_GPIO_Init-GPIO-false-HAL-true,3-MX_DMA_Init-DMA-false-HAL-true,4-MX_LWIP_Init-LWIP-false-HAL-false,5-MX_TIM3_Init-TIM3-false-HAL-true,6-MX_SPI1_Init-SPI1-false-HAL-true,7-MX_USART6_UART_Init-USART6-false-HAL-true,8-MX_UART4_Init-UART4-false-HAL-true,9-MX_TIM2_Init-TIM2-false-HAL-true,10-MX_UART5_Init-UART5-false-HAL-true,11-MX_USART2_UART_Init-USART2-false-HAL-true,12-MX_USART3_UART_Init-USART3-false-HAL-true +RCC.48MHZClocksFreq_Value=55296000 +RCC.AHBFreq_Value=110592000 +RCC.APB1CLKDivider=RCC_HCLK_DIV4 +RCC.APB1Freq_Value=27648000 +RCC.APB1TimFreq_Value=55296000 +RCC.APB2CLKDivider=RCC_HCLK_DIV4 +RCC.APB2Freq_Value=27648000 +RCC.APB2TimFreq_Value=55296000 +RCC.CortexFreq_Value=110592000 +RCC.EthernetFreq_Value=110592000 +RCC.FCLKCortexFreq_Value=110592000 +RCC.FamilyName=M +RCC.HCLKFreq_Value=110592000 +RCC.HSE_VALUE=11059200 +RCC.HSI_VALUE=16000000 +RCC.I2SClocksFreq_Value=106168320 +RCC.IPParameters=48MHZClocksFreq_Value,AHBFreq_Value,APB1CLKDivider,APB1Freq_Value,APB1TimFreq_Value,APB2CLKDivider,APB2Freq_Value,APB2TimFreq_Value,CortexFreq_Value,EthernetFreq_Value,FCLKCortexFreq_Value,FamilyName,HCLKFreq_Value,HSE_VALUE,HSI_VALUE,I2SClocksFreq_Value,LSE_VALUE,LSI_VALUE,MCO2PinFreq_Value,PLLCLKFreq_Value,PLLM,PLLN,PLLQCLKFreq_Value,PLLSourceVirtual,RTCFreq_Value,RTCHSEDivFreq_Value,SYSCLKFreq_VALUE,SYSCLKSource,VCOI2SOutputFreq_Value,VCOInputFreq_Value,VCOOutputFreq_Value,VcooutputI2S +RCC.LSE_VALUE=32768 +RCC.LSI_VALUE=32000 +RCC.MCO2PinFreq_Value=110592000 +RCC.PLLCLKFreq_Value=110592000 +RCC.PLLM=10 +RCC.PLLN=200 +RCC.PLLQCLKFreq_Value=55296000 +RCC.PLLSourceVirtual=RCC_PLLSOURCE_HSE +RCC.RTCFreq_Value=32000 +RCC.RTCHSEDivFreq_Value=5529600 +RCC.SYSCLKFreq_VALUE=110592000 +RCC.SYSCLKSource=RCC_SYSCLKSOURCE_PLLCLK +RCC.VCOI2SOutputFreq_Value=212336640 +RCC.VCOInputFreq_Value=1105920 +RCC.VCOOutputFreq_Value=221184000 +RCC.VcooutputI2S=106168320 +SH.GPXTI1.0=GPIO_EXTI1 +SH.GPXTI1.ConfNb=1 +SH.GPXTI3.0=GPIO_EXTI3 +SH.GPXTI3.ConfNb=1 +SH.S_TIM2_CH1_ETR.0=TIM2_CH1,PWM Generation1 CH1 +SH.S_TIM2_CH1_ETR.ConfNb=1 +SPI1.BaudRatePrescaler=SPI_BAUDRATEPRESCALER_256 +SPI1.CLKPhase=SPI_PHASE_2EDGE +SPI1.CLKPolarity=SPI_POLARITY_HIGH +SPI1.CalculateBaudRate=108.0 KBits/s +SPI1.Direction=SPI_DIRECTION_2LINES +SPI1.IPParameters=VirtualType,Mode,Direction,CalculateBaudRate,BaudRatePrescaler,CLKPolarity,CLKPhase +SPI1.Mode=SPI_MODE_MASTER +SPI1.VirtualType=VM_MASTER +TIM2.Channel-PWM\ Generation1\ CH1=TIM_CHANNEL_1 +TIM2.IPParameters=Channel-PWM Generation1 CH1,Period,Pulse-PWM Generation1 CH1 +TIM2.Period=119 +TIM2.Pulse-PWM\ Generation1\ CH1=60 +TIM3.AutoReloadPreload=TIM_AUTORELOAD_PRELOAD_ENABLE +TIM3.IPParameters=Prescaler,Period,AutoReloadPreload +TIM3.Period=999 +TIM3.Prescaler=55295 +UART4.IPParameters=VirtualMode +UART4.VirtualMode=Asynchronous +UART5.BaudRate=1200 +UART5.IPParameters=VirtualMode,BaudRate,WordLength,Parity +UART5.Parity=PARITY_ODD +UART5.VirtualMode=Asynchronous +UART5.WordLength=WORDLENGTH_9B +USART2.BaudRate=1200 +USART2.IPParameters=VirtualMode,BaudRate,WordLength,Parity +USART2.Parity=PARITY_ODD +USART2.VirtualMode=VM_ASYNC +USART2.WordLength=WORDLENGTH_9B +USART3.IPParameters=VirtualMode +USART3.VirtualMode=VM_ASYNC +USART6.IPParameters=VirtualMode +USART6.VirtualMode=VM_ASYNC +VP_LWIP_VS_Enabled.Mode=Enabled +VP_LWIP_VS_Enabled.Signal=LWIP_VS_Enabled +VP_SYS_VS_Systick.Mode=SysTick +VP_SYS_VS_Systick.Signal=SYS_VS_Systick +VP_TIM2_VS_ClockSourceINT.Mode=Internal +VP_TIM2_VS_ClockSourceINT.Signal=TIM2_VS_ClockSourceINT +VP_TIM3_VS_ClockSourceINT.Mode=Internal +VP_TIM3_VS_ClockSourceINT.Signal=TIM3_VS_ClockSourceINT +board=custom diff --git a/User/application/inc/ble_mx_02.h b/User/application/inc/ble_mx_02.h new file mode 100644 index 0000000..e69de29 diff --git a/User/application/inc/tcpclient.h b/User/application/inc/tcpclient.h new file mode 100644 index 0000000..0aab393 --- /dev/null +++ b/User/application/inc/tcpclient.h @@ -0,0 +1,8 @@ +#ifndef _TCPCLIENT_H_ +#define _TCPCLIENT_H_ + +#define TCP_CLIENT_PORT 5001 + +void TCP_Client_Init(void); + +#endif diff --git a/User/application/inc/tcpserverc.h b/User/application/inc/tcpserverc.h new file mode 100644 index 0000000..34445bf --- /dev/null +++ b/User/application/inc/tcpserverc.h @@ -0,0 +1,10 @@ +#ifndef _TCPECHO_H_ +#define _TCPECHO_H_ + +#include "main.h" + +#define TCP_ECHO_PORT 5001 + +extern void tcp_echo_init(void); +extern void user_send_data(uint8_t *data, uint16_t len); +#endif diff --git a/User/application/src/ble_mx_02.c b/User/application/src/ble_mx_02.c new file mode 100644 index 0000000..e69de29 diff --git a/User/application/src/tcpclient.c b/User/application/src/tcpclient.c new file mode 100644 index 0000000..30ace26 --- /dev/null +++ b/User/application/src/tcpclient.c @@ -0,0 +1,104 @@ +#include "lwip/netif.h" +#include "lwip/ip.h" +#include "lwip/tcp.h" +#include "lwip/init.h" +#include "netif/etharp.h" +#include "lwip/udp.h" +#include "lwip/pbuf.h" +#include +#include +#include "main.h" + +static void client_err(void *arg, err_t err) // ִʱӡϢ +{ + // printf("Ӵ!!\n"); + // printf("!!\n"); + + // // ʧܵʱͷTCPƿڴ + // printf("رӣͷTCPƿڴ\n"); + // // tcp_close(client_pcb); + + // // + // printf("³ʼͻ\n"); + TCP_Client_Init(); +} + +static err_t client_send(void *arg, struct tcp_pcb *tpcb) // ͺtcp_write +{ + // һڷ͵Ļ + uint8_t send_buf[] = "be connected successfully\n"; + + // ݵ + tcp_write(tpcb, send_buf, sizeof(send_buf), 1); + + // زɹ־ + return ERR_OK; +} + +static err_t client_recv(void *arg, struct tcp_pcb *tpcb, struct pbuf *p, err_t err) +{ + if (p != NULL) + { + /* */ + tcp_recved(tpcb, p->tot_len); + + /* ؽյ*/ + tcp_write(tpcb, p->payload, p->tot_len, 1); + + memset(p->payload, 0, p->tot_len); + pbuf_free(p); + } + else if (err == ERR_OK) + { + // Ͽ + tcp_close(tpcb); + + // + TCP_Client_Init(); + } + return ERR_OK; +} + +/** + * @brief ͻӳɹĻص + * + * ͻ˳ɹӵʱ˺á + * + * @param arg ûṩIJ + * @param pcb TCPӿƿָ + * @param err + * + * @return 룬ERR_OKʾɹ + */ +static err_t client_connected(void *arg, struct tcp_pcb *pcb, err_t err) +{ + // עһԻص + tcp_poll(pcb, client_send, 2); + + // עһպ + tcp_recv(pcb, client_recv); + + return ERR_OK; +} + +void TCP_Client_Init(void) +{ + struct tcp_pcb *client_pcb = NULL; // һһҪ棬û + ip4_addr_t server_ip; // ΪͻҪȥӷҪ֪IPַ + /* һTCPƿ */ + client_pcb = tcp_new(); + + IP4_ADDR(&server_ip, DEST_IP_ADDR0, DEST_IP_ADDR1, DEST_IP_ADDR2, DEST_IP_ADDR3); // ϲIPַ + // printf("IP ַΪ: %d.%d.%d.%d\n", + // ip4_addr1(&server_ip), + // ip4_addr2(&server_ip), + // ip4_addr3(&server_ip), + // ip4_addr4(&server_ip)); + // printf("ͻ˿ʼ!\n"); + + // ʼ + tcp_connect(client_pcb, &server_ip, TCP_CLIENT_PORT, client_connected); + ip_set_option(client_pcb, SOF_KEEPALIVE); + // ע쳣 + tcp_err(client_pcb, client_err); +} diff --git a/User/application/src/tcpserverc.c b/User/application/src/tcpserverc.c new file mode 100644 index 0000000..5af44c4 --- /dev/null +++ b/User/application/src/tcpserverc.c @@ -0,0 +1,95 @@ +#include "tcpserverc.h" +#include "lwip/netif.h" +#include "lwip/ip.h" +#include "lwip/tcp.h" +#include "lwip/init.h" +#include "netif/etharp.h" +#include "lwip/udp.h" +#include "lwip/pbuf.h" +#include +#include +#include "usart.h" +#include "main.h" +#include "ht1200m.h" +struct tcp_pcb *server_pcb1 = NULL; +extern uint8_t tcp_echo_flags; +/*ջص*/ +static err_t tcpecho_recv(void *arg, struct tcp_pcb *tpcb, struct pbuf *p, err_t err) +{ // ӦӵĿƿ յ + if (p != NULL) + { + /* ´*/ + tcp_echo_flags = 1; + tcp_recved(tpcb, p->tot_len); // ȡݵĿƿ õݵij + memcpy(&server_pcb1, &tpcb, sizeof(struct tcp_pcb *)); +/* ؽյ*/ +// tcp_write(tpcb, p->payload, p->tot_len, 1); + +// uint8_t send_buf1[] = "I recevice your data:"; +// tcp_write(tpcb, send_buf1, sizeof(send_buf1), 1); +// tcp_write(tpcb, p->payload, p->tot_len, 1); +#if 0 + memcpy(hart2_uart2.tx_data, (int *)p->payload, p->tot_len); + HART2_RTS_SEND; + HAL_UART_Transmit(&huart2, hart2_uart2.tx_data, p->tot_len, 1000); + HART2_RTS_RECEIVE; +#endif +#if 0 + memcpy(hart1_uart5.tx_data, (int *)p->payload, p->tot_len); + HART1_RTS_SEND; + HAL_UART_Transmit(&huart5, hart1_uart5.tx_data, p->tot_len, 1000); + HART1_RTS_RECEIVE; +#endif +#if 0 + memcpy(ble1_uart6.tx_data, (int *)p->payload, p->tot_len); + dma_usart_send(&huart6, ble1_uart6.tx_data, p->tot_len); +#endif +#if 1 + memcpy(ble2_uart3.tx_data, (int *)p->payload, p->tot_len); + dma_usart_send(&huart3, ble2_uart3.tx_data, p->tot_len); +#endif +#if 0 + memcpy(lcd_uart4.tx_data, (int *)p->payload, p->tot_len); + dma_usart_send(&huart4, lcd_uart4.tx_data, p->tot_len); +#endif + memset(p->payload, 0, p->tot_len); + pbuf_free(p); + } + else if (err == ERR_OK) // ⵽ԷرʱҲrecvʱpΪ + { + return tcp_close(tpcb); + } + return ERR_OK; +} + +static err_t tcpecho_accept(void *arg, struct tcp_pcb *newpcb, err_t err) // *tcp_accept_fn͵ + // βεͱһ +{ + tcp_recv(newpcb, tcpecho_recv); // յʱصûԼдtcpecho_recv + + return ERR_OK; +} + +void tcp_echo_init(void) +{ + struct tcp_pcb *server_pcb = NULL; + /* һTCPƿ */ + server_pcb = tcp_new(); + + /* TCPƿ */ + tcp_bind(server_pcb, IP_ADDR_ANY, TCP_ECHO_PORT); + + /* ״̬ */ + server_pcb = tcp_listen(server_pcb); + + /* עắʱעĺص */ + tcp_accept(server_pcb, tcpecho_accept); // Ӻ󣬻صûдtcpecho_accept +} + +void user_send_data(uint8_t *data, uint16_t len) +{ + if (tcp_echo_flags == 1) + { + tcp_write(server_pcb1, data, len, 1); + } +} diff --git a/User/board/inc/leds.h b/User/board/inc/leds.h new file mode 100644 index 0000000..fc0724b --- /dev/null +++ b/User/board/inc/leds.h @@ -0,0 +1,30 @@ +#ifndef __LEDS_H__ +#define __LEDS_H__ + +/* Includes ------------------------------------------------------------------*/ +#include "main.h" + +#define LED_STATE_OFF 0 +#define LED_STATE_ON 1 + +#define LED_TIM TIM12 + +typedef enum +{ + LEDS2_RED, + LEDS2_GREEN, + LEDS2_YELLOW, + LEDS3_RED, + LEDS3_GREEN, + LEDS3_YELLOW, + LEDS_MAX, +} leds_e; + +extern void leds_on(leds_e io); +extern void leds_off(leds_e io); +extern void leds_on_all(void); +extern void leds_off_all(void); + +extern unsigned char tim_led_state; +// extern void lan8720_reset(void); +#endif diff --git a/User/board/src/leds.c b/User/board/src/leds.c new file mode 100644 index 0000000..de277fb --- /dev/null +++ b/User/board/src/leds.c @@ -0,0 +1,112 @@ +#include "leds.h" + +unsigned char tim_led_state = 0; + +/** + * @brief 打开指定的LED灯 + * + * 根据传入的LED灯枚举值,打开对应的LED灯。 + * + * @param io LED灯的枚举值,表示需要打开的LED灯。 + */ +void leds_on(leds_e io) +{ + switch (io) + { + case LEDS2_RED: + HAL_GPIO_WritePin(LED2_R_GPIO_Port, LED2_R_Pin, GPIO_PIN_RESET); + break; + case LEDS2_GREEN: + HAL_GPIO_WritePin(LED2_G_GPIO_Port, LED2_G_Pin, GPIO_PIN_RESET); + break; + case LEDS2_YELLOW: + HAL_GPIO_WritePin(LED2_Y_GPIO_Port, LED2_Y_Pin, GPIO_PIN_RESET); + break; + case LEDS3_RED: + HAL_GPIO_WritePin(LED3_R_GPIO_Port, LED3_R_Pin, GPIO_PIN_RESET); + break; + case LEDS3_GREEN: + HAL_GPIO_WritePin(LED3_G_GPIO_Port, LED3_G_Pin, GPIO_PIN_RESET); + break; + case LEDS3_YELLOW: + HAL_GPIO_WritePin(LED3_Y_GPIO_Port, LED3_Y_Pin, GPIO_PIN_RESET); + break; + default: + break; + } +} + +/** + * @brief 关闭指定的LED灯 + * + * 根据传入的LED灯枚举值,关闭对应的LED灯。 + * + * @param io LED灯的枚举值 + */ +void leds_off(leds_e io) +{ + switch (io) + { + case LEDS2_RED: + HAL_GPIO_WritePin(LED2_R_GPIO_Port, LED2_R_Pin, GPIO_PIN_SET); + break; + case LEDS2_GREEN: + HAL_GPIO_WritePin(LED2_G_GPIO_Port, LED2_G_Pin, GPIO_PIN_SET); + break; + case LEDS2_YELLOW: + HAL_GPIO_WritePin(LED2_Y_GPIO_Port, LED2_Y_Pin, GPIO_PIN_SET); + break; + case LEDS3_RED: + HAL_GPIO_WritePin(LED3_R_GPIO_Port, LED3_R_Pin, GPIO_PIN_SET); + break; + case LEDS3_GREEN: + HAL_GPIO_WritePin(LED3_G_GPIO_Port, LED3_G_Pin, GPIO_PIN_SET); + break; + case LEDS3_YELLOW: + HAL_GPIO_WritePin(LED3_Y_GPIO_Port, LED3_Y_Pin, GPIO_PIN_SET); + break; + default: + break; + } +} + +/** + * @brief 打开所有LED灯 + * + * 该函数用于将所有LED灯打开。 + * + * 通过将指定GPIO端口和引脚上的电平设置为低电平(GPIO_PIN_RESET), + * 从而打开相应的LED灯。 + */ +void leds_on_all(void) +{ + HAL_GPIO_WritePin(LED2_R_GPIO_Port, LED2_R_Pin, GPIO_PIN_RESET); + HAL_GPIO_WritePin(LED2_G_GPIO_Port, LED2_G_Pin, GPIO_PIN_RESET); + HAL_GPIO_WritePin(LED2_Y_GPIO_Port, LED2_Y_Pin, GPIO_PIN_RESET); + HAL_GPIO_WritePin(LED3_R_GPIO_Port, LED3_R_Pin, GPIO_PIN_RESET); + HAL_GPIO_WritePin(LED3_G_GPIO_Port, LED3_G_Pin, GPIO_PIN_RESET); + HAL_GPIO_WritePin(LED3_Y_GPIO_Port, LED3_Y_Pin, GPIO_PIN_RESET); +} + +/** + * @brief 关闭所有LED灯 + * + * 该函数用于将所有LED灯关闭。 + */ +void leds_off_all(void) +{ + HAL_GPIO_WritePin(LED2_R_GPIO_Port, LED2_R_Pin, GPIO_PIN_SET); + HAL_GPIO_WritePin(LED2_G_GPIO_Port, LED2_G_Pin, GPIO_PIN_SET); + HAL_GPIO_WritePin(LED2_Y_GPIO_Port, LED2_Y_Pin, GPIO_PIN_SET); + HAL_GPIO_WritePin(LED3_R_GPIO_Port, LED3_R_Pin, GPIO_PIN_SET); + HAL_GPIO_WritePin(LED3_G_GPIO_Port, LED3_G_Pin, GPIO_PIN_SET); + HAL_GPIO_WritePin(LED3_Y_GPIO_Port, LED3_Y_Pin, GPIO_PIN_SET); +} + +// void lan8720_reset(void) +// { +// LL_GPIO_ResetOutputPin(ETH_RESET_GPIO_Port, ETH_RESET_Pin); +// HAL_Delay(55); +// LL_GPIO_SetOutputPin(ETH_RESET_GPIO_Port, ETH_RESET_Pin); +// HAL_Delay(55); +// } diff --git a/User/driver/ad7124.c b/User/driver/ad7124.c new file mode 100644 index 0000000..08e7ce4 --- /dev/null +++ b/User/driver/ad7124.c @@ -0,0 +1,455 @@ +#include "ad7124.h" +/* Error codes */ +#define INVALID_VAL -1 /* Invalid argument */ +#define COMM_ERR -2 /* Communication error on receive */ +#define AD7124_TIMEOUT -3 /* A timeout has occured */ + +// 配置ad7124寄存器的值,根据实际项目需求配置 +static ad7124_st_reg_t ad7124_regs[AD7124_REG_NO] = { + {AD7124_STATUS, 0x00, AD7124_SIZE_1, AD7124_R}, /* AD7124_Status */ + {AD7124_ADC_CONTROL, 0x0280, AD7124_SIZE_2, AD7124_RW}, /* AD7124_ADC_Control */ + {AD7124_DATA, 0x000000, AD7124_SIZE_3, AD7124_R}, /* AD7124_Data */ + {AD7124_IOCON1, 0x000000, AD7124_SIZE_3, AD7124_RW}, /* AD7124_IOCon1 */ + {AD7124_IOCON2, 0x0000, AD7124_SIZE_2, AD7124_RW}, /* AD7124_IOCon2 */ + {AD7124_ID, 0x02, AD7124_SIZE_1, AD7124_R}, /* AD7124_ID */ + {AD7124_ERROR, 0x000000, AD7124_SIZE_3, AD7124_R}, /* AD7124_Error */ + {AD7124_ERROR_EN, 0x000040, AD7124_SIZE_3, AD7124_RW}, /* AD7124_Error_En */ + {AD7124_MCLK_COUNT, 0x00, AD7124_SIZE_1, AD7124_R}, /* AD7124_Mclk_Count */ + {AD7124_CHANNEL_0, 0x0051, AD7124_SIZE_2, AD7124_RW}, /* AD7124_Channel_0 */ + {AD7124_CHANNEL_1, 0x0071, AD7124_SIZE_2, AD7124_RW}, /* AD7124_Channel_1 */ + {AD7124_CHANNEL_2, 0x0091, AD7124_SIZE_2, AD7124_RW}, /* AD7124_Channel_2 */ + {AD7124_CHANNEL_3, 0x00B1, AD7124_SIZE_2, AD7124_RW}, /* AD7124_Channel_3 */ + {AD7124_CHANNEL_4, 0x00D1, AD7124_SIZE_2, AD7124_RW}, /* AD7124_Channel_4 */ + {AD7124_CHANNEL_5, 0x00F1, AD7124_SIZE_2, AD7124_RW}, /* AD7124_Channel_5 */ + {AD7124_CHANNEL_6, 0x0111, AD7124_SIZE_2, AD7124_RW}, /* AD7124_Channel_6 */ + {AD7124_CHANNEL_7, 0x0131, AD7124_SIZE_2, AD7124_RW}, /* AD7124_Channel_7 */ + {AD7124_CHANNEL_8, 0x0151, AD7124_SIZE_2, AD7124_RW}, /* AD7124_Channel_8 */ + {AD7124_CHANNEL_9, 0x0171, AD7124_SIZE_2, AD7124_RW}, /* AD7124_Channel_9 */ + {AD7124_CHANNEL_10, 0x0001, AD7124_SIZE_2, AD7124_RW}, /* AD7124_Channel_10 */ + {AD7124_CHANNEL_11, 0x0001, AD7124_SIZE_2, AD7124_RW}, /* AD7124_Channel_11 */ + {AD7124_CHANNEL_12, 0x0001, AD7124_SIZE_2, AD7124_RW}, /* AD7124_Channel_12 */ + {AD7124_CHANNEL_13, 0x0001, AD7124_SIZE_2, AD7124_RW}, /* AD7124_Channel_13 */ + {AD7124_CHANNEL_14, 0x0001, AD7124_SIZE_2, AD7124_RW}, /* AD7124_Channel_14 */ + {AD7124_CHANNEL_15, 0x0001, AD7124_SIZE_2, AD7124_RW}, /* AD7124_Channel_15 */ + {AD7124_CONFIG_0, 0x01E0, AD7124_SIZE_2, AD7124_RW}, /* AD7124_Config_0 */ + {AD7124_CONFIG_1, 0x0040, AD7124_SIZE_2, AD7124_RW}, /* AD7124_Config_1 */ + {AD7124_CONFIG_2, 0x0860, AD7124_SIZE_2, AD7124_RW}, /* AD7124_Config_2 */ + {AD7124_CONFIG_3, 0x0860, AD7124_SIZE_2, AD7124_RW}, /* AD7124_Config_3 */ + {AD7124_CONFIG_4, 0x0860, AD7124_SIZE_2, AD7124_RW}, /* AD7124_Config_4 */ + {AD7124_CONFIG_5, 0x0860, AD7124_SIZE_2, AD7124_RW}, /* AD7124_Config_5 */ + {AD7124_CONFIG_6, 0x0860, AD7124_SIZE_2, AD7124_RW}, /* AD7124_Config_6 */ + {AD7124_CONFIG_7, 0x0860, AD7124_SIZE_2, AD7124_RW}, /* AD7124_Config_7 */ + {AD7124_FILTER_0, 0x060180, AD7124_SIZE_3, AD7124_RW}, /* AD7124_Filter_0 */ + {AD7124_FILTER_1, 0x060180, AD7124_SIZE_3, AD7124_RW}, /* AD7124_Filter_1 */ + {AD7124_FILTER_2, 0x060180, AD7124_SIZE_3, AD7124_RW}, /* AD7124_Filter_2 */ + {AD7124_FILTER_3, 0x060180, AD7124_SIZE_3, AD7124_RW}, /* AD7124_Filter_3 */ + {AD7124_FILTER_4, 0x060180, AD7124_SIZE_3, AD7124_RW}, /* AD7124_Filter_4 */ + {AD7124_FILTER_5, 0x060180, AD7124_SIZE_3, AD7124_RW}, /* AD7124_Filter_5 */ + {AD7124_FILTER_6, 0x060180, AD7124_SIZE_3, AD7124_RW}, /* AD7124_Filter_6 */ + {AD7124_FILTER_7, 0x060180, AD7124_SIZE_3, AD7124_RW}, /* AD7124_Filter_7 */ + {AD7124_OFFSET_0, 0x800000, AD7124_SIZE_3, AD7124_RW}, /* AD7124_Offset_0 */ + {AD7124_OFFSET_1, 0x800000, AD7124_SIZE_3, AD7124_RW}, /* AD7124_Offset_1 */ + {AD7124_OFFSET_2, 0x800000, AD7124_SIZE_3, AD7124_RW}, /* AD7124_Offset_2 */ + {AD7124_OFFSET_3, 0x800000, AD7124_SIZE_3, AD7124_RW}, /* AD7124_Offset_3 */ + {AD7124_OFFSET_4, 0x800000, AD7124_SIZE_3, AD7124_RW}, /* AD7124_Offset_4 */ + {AD7124_OFFSET_5, 0x800000, AD7124_SIZE_3, AD7124_RW}, /* AD7124_Offset_5 */ + {AD7124_OFFSET_6, 0x800000, AD7124_SIZE_3, AD7124_RW}, /* AD7124_Offset_6 */ + {AD7124_OFFSET_7, 0x800000, AD7124_SIZE_3, AD7124_RW}, /* AD7124_Offset_7 */ + {AD7124_GAIN_0, 0x500000, AD7124_SIZE_3, AD7124_RW}, /* AD7124_Gain_0 */ + {AD7124_GAIN_1, 0x500000, AD7124_SIZE_3, AD7124_RW}, /* AD7124_Gain_1 */ + {AD7124_GAIN_2, 0x500000, AD7124_SIZE_3, AD7124_RW}, /* AD7124_Gain_2 */ + {AD7124_GAIN_3, 0x500000, AD7124_SIZE_3, AD7124_RW}, /* AD7124_Gain_3 */ + {AD7124_GAIN_4, 0x500000, AD7124_SIZE_3, AD7124_RW}, /* AD7124_Gain_4 */ + {AD7124_GAIN_5, 0x500000, AD7124_SIZE_3, AD7124_RW}, /* AD7124_Gain_5 */ + {AD7124_GAIN_6, 0x500000, AD7124_SIZE_3, AD7124_RW}, /* AD7124_Gain_6 */ + {AD7124_GAIN_7, 0x500000, AD7124_SIZE_3, AD7124_RW}, /* AD7124_Gain_7 */ +}; + +static ad7124_st_reg_t ad7124_channel_regs[AD7124_CHANNEL_EN_MAX] = { + {AD7124_CHANNEL_0, 0x8051, AD7124_SIZE_2, AD7124_RW}, /* AD7124_Channel_0 */ + {AD7124_CHANNEL_1, 0x8071, AD7124_SIZE_2, AD7124_RW}, /* AD7124_Channel_1 */ + {AD7124_CHANNEL_2, 0x8091, AD7124_SIZE_2, AD7124_RW}, /* AD7124_Channel_2 */ + {AD7124_CHANNEL_3, 0x80B1, AD7124_SIZE_2, AD7124_RW}, /* AD7124_Channel_3 */ + {AD7124_CHANNEL_4, 0x80D1, AD7124_SIZE_2, AD7124_RW}, /* AD7124_Channel_4 */ + {AD7124_CHANNEL_5, 0x80F1, AD7124_SIZE_2, AD7124_RW}, /* AD7124_Channel_5 */ + {AD7124_CHANNEL_6, 0x8111, AD7124_SIZE_2, AD7124_RW}, /* AD7124_Channel_6 */ + {AD7124_CHANNEL_7, 0x8131, AD7124_SIZE_2, AD7124_RW}, /* AD7124_Channel_7 */ + {AD7124_CHANNEL_8, 0x8151, AD7124_SIZE_2, AD7124_RW}, /* AD7124_Channel_8 */ + {AD7124_CHANNEL_9, 0x8171, AD7124_SIZE_2, AD7124_RW}, /* AD7124_Channel_9 */ +}; + +ad7124_analog_t ad7124_analog[AD7124_CHANNEL_EN_MAX] = {NULL}; // AD通道采样结构体数组,用于存放AD通道采样数据 +/** + * @brief 无校验读取AD7124寄存器 + * + * 读取AD7124的寄存器值,不进行校验。 + * + * @param p_reg 指向要读取的寄存器结构的指针 + * + * @return 读取成功返回0,失败返回负值 + */ +int32_t ad7124_no_check_read_register(ad7124_st_reg_t *p_reg) +{ + int32_t ret = 0; + uint8_t buffer[8] = {0, 0, 0, 0, 0, 0, 0, 0}; + uint8_t i = 0; + uint8_t add_status_length = 0; + + /* Build the Command word */ + buffer[0] = AD7124_COMM_REG_WEN | AD7124_COMM_REG_RD | + AD7124_COMM_REG_RA(p_reg->addr); + + /* + * If this is an AD7124_DATA register read, and the DATA_STATUS bit is set + * in ADC_CONTROL, need to read 4, not 3 bytes for DATA with STATUS + */ + if ((p_reg->addr == AD7124_DATA) && + (ad7124_regs[AD7124_ADC_CONTROL].value & AD7124_ADC_CTRL_REG_DATA_STATUS)) + { + add_status_length = 1; // 此处是根据寄存器配置而决定,根据AD7124_ADC_CONTROL第10位决定 + } + + /* Read data from the device */ + ret = ad7124_read_write_spi(buffer, p_reg->size + 1 + add_status_length); + if (ret < 0) + return ret; + + /* + * if reading Data with 4 bytes, need to copy the status byte to the STATUS + * register struct value member + */ + if (add_status_length) + { + ad7124_regs[AD7124_STATUS].value = buffer[p_reg->size + 1]; + } + + /* Build the result */ + p_reg->value = 0; + for (i = 1; i < p_reg->size + 1; i++) + { + p_reg->value <<= 8; + p_reg->value += buffer[i]; + } + + return ret; +} + +/** + * @brief 向AD7124寄存器写入数据,不进行校验 + * + * 将数据写入指定的AD7124寄存器,不进行数据校验。 + * + * @param reg 指向要写入数据的寄存器地址和值的指针 + * @return 写入结果,成功返回0,失败返回负值 + */ +int32_t ad7124_no_check_write_register(ad7124_st_reg_t *reg) +{ + int32_t ret = 0; + int32_t reg_value = 0; + uint8_t wr_buf[8] = {0, 0, 0, 0, 0, 0, 0, 0}; + uint8_t i = 0; + + /* Build the Command word */ + wr_buf[0] = AD7124_COMM_REG_WEN | AD7124_COMM_REG_WR | + AD7124_COMM_REG_RA(reg->addr); + + /* Fill the write buffer */ + reg_value = reg->value; + for (i = 0; i < reg->size; i++) + { + wr_buf[reg->size - i] = reg_value & 0xFF; + reg_value >>= 8; + } + + /* Write data to the device */ + ret = ad7124_read_write_spi(wr_buf, reg->size + 1); + + return ret; +} + +/** + * @brief 从AD7124寄存器读取数据 + * + * 该函数用于从AD7124的指定寄存器中读取数据。 + * + * @param p_reg 指向一个ad7124_st_reg_t类型的指针,其中包含了要读取的寄存器的地址。 + * + * @return 返回读取到的数据,如果读取失败则返回负数。 + * + * 在读取寄存器之前,会先检查ERROR寄存器的SPI_IGNORE_ERR位 + */ +int32_t ad7124_read_register(ad7124_st_reg_t *p_reg) +{ + int32_t ret; + + if (p_reg->addr != AD7124_ERROR && (ad7124_regs[AD7124_ERROR_EN].value & AD7124_ERREN_REG_SPI_IGNORE_ERR_EN)) + { + ret = ad7124_wait_for_spi_ready(AD7124_RDY); // 读寄存器之前检查ERROR寄存器的SPI_IGNORE_ERR位 + + if (ret < 0) + return ret; + } + ret = ad7124_no_check_read_register(p_reg); + + return ret; +} + +/** + * @brief 向AD7124寄存器写入数据 + * + * 将数据写入AD7124的指定寄存器。如果错误寄存器中启用了忽略SPI错误的功能,则在写入前会等待SPI接口准备就绪。 + * + * @param p_reg 指向要写入数据的寄存器结构体的指针 + * + * @return 返回写入操作的结果。如果操作成功,则返回0;否则返回负数表示错误。 + */ +int32_t ad7124_write_register(ad7124_st_reg_t *p_reg) +{ + int32_t ret; + + if ((ad7124_regs[AD7124_ERROR_EN].value & AD7124_ERREN_REG_SPI_IGNORE_ERR_EN)) + { + ret = ad7124_wait_for_spi_ready(AD7124_RDY); + if (ret < 0) + return ret; + } + ret = ad7124_no_check_write_register(p_reg); + + return ret; +} + +/** + * @brief 重置AD7124设备 + * + * 该函数通过SPI接口向AD7124发送重置命令,并等待设备完成上电过程。 + * + * @return 返回函数执行结果,成功返回0,失败返回错误码。 + */ +int32_t ad7124_reset(void) +{ + int32_t ret = 0; + uint8_t wr_buf[8] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; // 发送64个1,复位ad7124芯片 + + ret = ad7124_read_write_spi(wr_buf, 8); + + /* Read POR bit to clear */ + ret = ad7124_wait_to_power_on(AD7124_RDY); + + return ret; +} + +/** + * @brief 等待SPI接口准备就绪 + * + * 等待AD7124的SPI接口准备就绪,直到超时。 + * + * @param timeout 超时时间(单位:毫秒) + * @return 成功返回0,超时返回AD7124_TIMEOUT + */ +int32_t ad7124_wait_for_spi_ready(uint32_t timeout) +{ + int32_t ret; + int8_t ready = 0; + + while (!ready && --timeout) + { + /* Read the value of the Error Register */ + ret = ad7124_read_register(&ad7124_regs[AD7124_ERROR]); + if (ret < 0) + return ret; + + /* Check the SPI IGNORE Error bit in the Error Register */ + ready = (ad7124_regs[AD7124_ERROR].value & + AD7124_ERR_REG_SPI_IGNORE_ERR) == 0; + } + + return timeout ? 0 : AD7124_TIMEOUT; +} + +/** + * @brief 等待AD7124电源开启 + * + * 此函数将等待AD7124设备电源开启,直到超时或检测到电源开启标志。 + * + * @param timeout 超时时间 + * + * @return 返回0表示成功,返回AD7124_TIMEOUT表示超时 + */ +int32_t ad7124_wait_to_power_on(uint32_t timeout) +{ + int32_t ret; + int8_t powered_on = 0; + + while (!powered_on && timeout--) + { + ret = ad7124_read_register(&ad7124_regs[AD7124_STATUS]); + if (ret < 0) + return ret; + + /* Check the POR_FLAG bit in the Status Register */ + powered_on = (ad7124_regs[AD7124_STATUS].value & + AD7124_STATUS_REG_POR_FLAG) == 0; + } + + return (timeout || powered_on) ? 0 : AD7124_TIMEOUT; +} + +/** + * @brief 等待AD7124转换准备就绪 + * + * 该函数等待AD7124完成转换并准备就绪。如果等待超时,则返回错误码。 + * + * @param timeout 超时时间 + * @return 成功时返回0,超时时返回AD7124_TIMEOUT + */ +int32_t ad7124_wait_for_conv_ready(uint32_t timeout) +{ + int32_t ret; + int8_t ready = 0; + + while (!ready && --timeout) + { + /* Read the value of the Status Register */ + ret = ad7124_read_register(&ad7124_regs[AD7124_STATUS]); + if (ret < 0) + return ret; + + /* Check the RDY bit in the Status Register */ + ready = (ad7124_regs[AD7124_STATUS].value & + AD7124_STATUS_REG_RDY) == 0; + } + + return timeout ? 0 : AD7124_TIMEOUT; +} + +/** + * @brief 从AD7124读取数据 + * + * 从AD7124的数据寄存器中读取数据值。 + * + * @return 读取到的数据值 + */ +int32_t ad7124_read_data(void) +{ + int32_t read_data; + /* Read the value of the Status Register */ + ad7124_read_register(&ad7124_regs[AD7124_DATA]); + /* Get the read result */ + read_data = ad7124_regs[AD7124_DATA].value; + + return read_data; +} +/** + * @brief 从AD7124获取模拟信号数据 + * + * 该函数通过SPI接口从AD7124芯片获取模拟信号数据,并将数据保存到ad7124_analog中。 + * + * @details + * - 遍历所有模拟通道,对每个通道执行以下操作: + * - 等待AD7124完成转换。 + * - 读取转换后的数据。 + * - 计算电压值(单极性计算公式)。 + * - 计算电流值(基于电压值和电阻值),有的通道没有电流值可以忽略。 + * + * @note + * - 转换完成后,通过公式计算电压和电流值,并将结果保存到全局数组ad7124_analog中。 + * - 电压计算公式:Code = (0xFFFFFF × AIN × Gain)/VREF,其中VREF为参考电压,GAIN为增益,AD_CODE为AD代码。 + * - 电流计算公式:电流 = 电压 / 电阻 * 1000,其中电阻值为AD7124_RES,单位转换为mA。 + */ +// void ad7124_get_analog(void) +// { +// int32_t read_data; +// uint8_t i; +// uint8_t channel; +// // for (i = STOP_NC_ADC; i < AD7124_CHANNEL_EN_MAX; i++) +// // { +// // ad7124_regs[AD7124_CHANNEL_0].value = ad7124_channel_regs[i].value; +// // ad7124_write_register(&ad7124_regs[AD7124_CHANNEL_0]); + +// // ad7124_read_register(&ad7124_regs[AD7124_STATUS]); +// while (ad7124_wait_for_conv_ready(AD7124_RDY)) +// ; // 等待转换完成 +// channel = ad7124_regs[AD7124_STATUS].value; +// ad7124_analog[channel].channel = channel; +// read_data = ad7124_read_data(); +// ad7124_analog[channel].data = read_data; +// ad7124_analog[channel].voltage = (float)(read_data * VREF / GAIN / AD_CODE); // AD7124单极性计算公式:Code = (0xFFFFFF × AIN × Gain)/VREF +// ad7124_analog[channel].current = (float)(ad7124_analog[channel].voltage / AD7124_RES * 1000); // 乘1000是为了将单位转换为mA + +// // ad7124_regs[AD7124_CHANNEL_0].value = 0; +// // ad7124_write_register(&ad7124_regs[AD7124_CHANNEL_0]); +// // } +// } +void ad7124_get_analog(uint8_t channel_nr) +{ + int32_t read_data; + // for (i = STOP_NC_ADC; i < AD7124_CHANNEL_EN_MAX; i++) + // { + ad7124_regs[AD7124_CHANNEL_0].value = ad7124_channel_regs[channel_nr].value; + ad7124_write_register(&ad7124_regs[AD7124_CHANNEL_0]); + + while (ad7124_wait_for_conv_ready(AD7124_RDY)) + ; // 等待转换完成 + ad7124_analog[channel_nr].channel = channel_nr; + read_data = ad7124_read_data(); + ad7124_analog[channel_nr].data = read_data; + ad7124_analog[channel_nr].voltage = (float)(read_data * VREF / GAIN / AD_CODE); // AD7124单极性计算公式:Code = (0xFFFFFF × AIN × Gain)/VREF + ad7124_analog[channel_nr].current = (float)(ad7124_analog[channel_nr].voltage / AD7124_RES * 1000); // 乘1000是为了将单位转换为mA + + ad7124_regs[AD7124_CHANNEL_0].value = 0; + ad7124_write_register(&ad7124_regs[AD7124_CHANNEL_0]); + // } +} +/** + * @brief 配置AD7124设备 + * + * 该函数负责初始化AD7124设备,初始化寄存器。 + * + * @return 初始化结果,成功返回0,失败返回负数 + */ +int32_t ad7124_setup(void) +{ + int32_t ret; + uint8_t reg_nr; + // board_spi_init(AD7124); // 初始化SPI,因为DAC161的SPI要和AD7124共用,并且时序不一样,所以要先初始化SPI接口。 + /* Reset the device interface.*/ + ret = ad7124_reset(); + if (ret < 0) + return ret; + HAL_Delay(10); + ad7124_read_register(&ad7124_regs[AD7124_ID]); + /* Initialize registers AD7124_ADC_Control through AD7124_Filter_7. */ + for (reg_nr = AD7124_STATUS; reg_nr < AD7124_OFFSET_0; reg_nr++) // 对ad7124的可写寄存器进行配置,不包括只读寄存器 + { + // ret = ad7124_read_register(&ad7124_regs[reg_nr]); + if (ad7124_regs[reg_nr].rw == AD7124_RW) + { + ret = ad7124_write_register(&ad7124_regs[reg_nr]); + ret = ad7124_read_register(&ad7124_regs[reg_nr]); + if (ret < 0) + break; + } + } + HAL_GPIO_WritePin(AD7124_SYNC_GPIO_Port, AD7124_SYNC_Pin, GPIO_PIN_SET); // AD7124同步信号使能 + return ret; +} + +/** + * @brief 通过SPI接口对AD7124进行读写操作 + * + * 通过SPI接口对AD7124进行读写操作,。 + * + * @param buff 指向待传输数据的指针 + * @param length 待传输数据的长度 + * + * @return 传输结果,成功返回0,失败返回负数 + */ +int32_t ad7124_read_write_spi(uint8_t *buff, uint8_t length) +{ + int32_t ret; + board_spi_init(AD7124); // 初始化SPI,因为DAC161的SPI要和AD7124共用,并且时序不一样,所以要先初始化SPI接口。 + board_spi_cs_on(AD7124); + ret = spi_transmit_receive(&hspi1, buff, length); + board_spi_cs_off(AD7124); + return ret; +} diff --git a/User/driver/ad7124.h b/User/driver/ad7124.h new file mode 100644 index 0000000..24a668f --- /dev/null +++ b/User/driver/ad7124.h @@ -0,0 +1,308 @@ +#ifndef __AD7124_H +#define __AD7124_H + +/******************************************************************************/ +/***************************** Include Files **********************************/ +/******************************************************************************/ +#include "main.h" +#include "user_spi.h" +/******************************************************************************/ +/******************* Register map and register definitions ********************/ +/******************************************************************************/ + +/* Communication Register bits */ +#define AD7124_COMM_REG_WEN (0 << 7) +#define AD7124_COMM_REG_WR (0 << 6) +#define AD7124_COMM_REG_RD (1 << 6) +#define AD7124_COMM_REG_RA(x) ((x) & 0x3F) + +/* Status Register bits */ +#define AD7124_STATUS_REG_RDY (1 << 7) +#define AD7124_STATUS_REG_ERROR_FLAG (1 << 6) +#define AD7124_STATUS_REG_POR_FLAG (1 << 4) +#define AD7124_STATUS_REG_CH_ACTIVE(x) ((x) & 0xF) + +/* ADC_Control Register bits */ +#define AD7124_ADC_CTRL_REG_DOUT_RDY_DEL (1 << 12) +#define AD7124_ADC_CTRL_REG_CONT_READ (1 << 11) +#define AD7124_ADC_CTRL_REG_DATA_STATUS (1 << 10) +#define AD7124_ADC_CTRL_REG_CS_EN (1 << 9) +#define AD7124_ADC_CTRL_REG_REF_EN (1 << 8) +#define AD7124_ADC_CTRL_REG_POWER_MODE(x) (((x) & 0x3) << 6) +#define AD7124_ADC_CTRL_REG_MODE(x) (((x) & 0xF) << 2) +#define AD7124_ADC_CTRL_REG_CLK_SEL(x) (((x) & 0x3) << 0) + +/* IO_Control_1 Register bits */ +#define AD7124_IO_CTRL1_REG_GPIO_DAT2 (1 << 23) +#define AD7124_IO_CTRL1_REG_GPIO_DAT1 (1 << 22) +#define AD7124_IO_CTRL1_REG_GPIO_CTRL2 (1 << 19) +#define AD7124_IO_CTRL1_REG_GPIO_CTRL1 (1 << 18) +#define AD7124_IO_CTRL1_REG_PDSW (1 << 15) +#define AD7124_IO_CTRL1_REG_IOUT1(x) (((x) & 0x7) << 11) +#define AD7124_IO_CTRL1_REG_IOUT0(x) (((x) & 0x7) << 8) +#define AD7124_IO_CTRL1_REG_IOUT_CH1(x) (((x) & 0xF) << 4) +#define AD7124_IO_CTRL1_REG_IOUT_CH0(x) (((x) & 0xF) << 0) + +/* IO_Control_1 AD7124-8 specific bits */ +#define AD7124_8_IO_CTRL1_REG_GPIO_DAT4 (1 << 23) +#define AD7124_8_IO_CTRL1_REG_GPIO_DAT3 (1 << 22) +#define AD7124_8_IO_CTRL1_REG_GPIO_DAT2 (1 << 21) +#define AD7124_8_IO_CTRL1_REG_GPIO_DAT1 (1 << 20) +#define AD7124_8_IO_CTRL1_REG_GPIO_CTRL4 (1 << 19) +#define AD7124_8_IO_CTRL1_REG_GPIO_CTRL3 (1 << 18) +#define AD7124_8_IO_CTRL1_REG_GPIO_CTRL2 (1 << 17) +#define AD7124_8_IO_CTRL1_REG_GPIO_CTRL1 (1 << 16) + +/* IO_Control_2 Register bits */ +#define AD7124_IO_CTRL2_REG_GPIO_VBIAS7 (1 << 15) +#define AD7124_IO_CTRL2_REG_GPIO_VBIAS6 (1 << 14) +#define AD7124_IO_CTRL2_REG_GPIO_VBIAS5 (1 << 11) +#define AD7124_IO_CTRL2_REG_GPIO_VBIAS4 (1 << 10) +#define AD7124_IO_CTRL2_REG_GPIO_VBIAS3 (1 << 5) +#define AD7124_IO_CTRL2_REG_GPIO_VBIAS2 (1 << 4) +#define AD7124_IO_CTRL2_REG_GPIO_VBIAS1 (1 << 1) +#define AD7124_IO_CTRL2_REG_GPIO_VBIAS0 (1 << 0) + +/* IO_Control_2 AD7124-8 specific bits */ +#define AD7124_8_IO_CTRL2_REG_GPIO_VBIAS15 (1 << 15) +#define AD7124_8_IO_CTRL2_REG_GPIO_VBIAS14 (1 << 14) +#define AD7124_8_IO_CTRL2_REG_GPIO_VBIAS13 (1 << 13) +#define AD7124_8_IO_CTRL2_REG_GPIO_VBIAS12 (1 << 12) +#define AD7124_8_IO_CTRL2_REG_GPIO_VBIAS11 (1 << 11) +#define AD7124_8_IO_CTRL2_REG_GPIO_VBIAS10 (1 << 10) +#define AD7124_8_IO_CTRL2_REG_GPIO_VBIAS9 (1 << 9) +#define AD7124_8_IO_CTRL2_REG_GPIO_VBIAS8 (1 << 8) +#define AD7124_8_IO_CTRL2_REG_GPIO_VBIAS7 (1 << 7) +#define AD7124_8_IO_CTRL2_REG_GPIO_VBIAS6 (1 << 6) +#define AD7124_8_IO_CTRL2_REG_GPIO_VBIAS5 (1 << 5) +#define AD7124_8_IO_CTRL2_REG_GPIO_VBIAS4 (1 << 4) +#define AD7124_8_IO_CTRL2_REG_GPIO_VBIAS3 (1 << 3) +#define AD7124_8_IO_CTRL2_REG_GPIO_VBIAS2 (1 << 2) +#define AD7124_8_IO_CTRL2_REG_GPIO_VBIAS1 (1 << 1) +#define AD7124_8_IO_CTRL2_REG_GPIO_VBIAS0 (1 << 0) + +/* ID Register bits */ +#define AD7124_ID_REG_DEVICE_ID(x) (((x) & 0xF) << 4) +#define AD7124_ID_REG_SILICON_REV(x) (((x) & 0xF) << 0) + +/* Error Register bits */ +#define AD7124_ERR_REG_LDO_CAP_ERR (1 << 19) +#define AD7124_ERR_REG_ADC_CAL_ERR (1 << 18) +#define AD7124_ERR_REG_ADC_CONV_ERR (1 << 17) +#define AD7124_ERR_REG_ADC_SAT_ERR (1 << 16) +#define AD7124_ERR_REG_AINP_OV_ERR (1 << 15) +#define AD7124_ERR_REG_AINP_UV_ERR (1 << 14) +#define AD7124_ERR_REG_AINM_OV_ERR (1 << 13) +#define AD7124_ERR_REG_AINM_UV_ERR (1 << 12) +#define AD7124_ERR_REG_REF_DET_ERR (1 << 11) +#define AD7124_ERR_REG_DLDO_PSM_ERR (1 << 9) +#define AD7124_ERR_REG_ALDO_PSM_ERR (1 << 7) +#define AD7124_ERR_REG_SPI_IGNORE_ERR (1 << 6) +#define AD7124_ERR_REG_SPI_SLCK_CNT_ERR (1 << 5) +#define AD7124_ERR_REG_SPI_READ_ERR (1 << 4) +#define AD7124_ERR_REG_SPI_WRITE_ERR (1 << 3) +#define AD7124_ERR_REG_SPI_CRC_ERR (1 << 2) +#define AD7124_ERR_REG_MM_CRC_ERR (1 << 1) +#define AD7124_ERR_REG_ROM_CRC_ERR (1 << 0) + +/* Error_En Register bits */ +#define AD7124_ERREN_REG_MCLK_CNT_EN (1 << 22) +#define AD7124_ERREN_REG_LDO_CAP_CHK_TEST_EN (1 << 21) +#define AD7124_ERREN_REG_LDO_CAP_CHK(x) (((x) & 0x3) << 19) +#define AD7124_ERREN_REG_ADC_CAL_ERR_EN (1 << 18) +#define AD7124_ERREN_REG_ADC_CONV_ERR_EN (1 << 17) +#define AD7124_ERREN_REG_ADC_SAT_ERR_EN (1 << 16) +#define AD7124_ERREN_REG_AINP_OV_ERR_EN (1 << 15) +#define AD7124_ERREN_REG_AINP_UV_ERR_EN (1 << 14) +#define AD7124_ERREN_REG_AINM_OV_ERR_EN (1 << 13) +#define AD7124_ERREN_REG_AINM_UV_ERR_EN (1 << 12) +#define AD7124_ERREN_REG_REF_DET_ERR_EN (1 << 11) +#define AD7124_ERREN_REG_DLDO_PSM_TRIP_TEST_EN (1 << 10) +#define AD7124_ERREN_REG_DLDO_PSM_ERR_ERR (1 << 9) +#define AD7124_ERREN_REG_ALDO_PSM_TRIP_TEST_EN (1 << 8) +#define AD7124_ERREN_REG_ALDO_PSM_ERR_EN (1 << 7) +#define AD7124_ERREN_REG_SPI_IGNORE_ERR_EN (1 << 6) +#define AD7124_ERREN_REG_SPI_SCLK_CNT_ERR_EN (1 << 5) +#define AD7124_ERREN_REG_SPI_READ_ERR_EN (1 << 4) +#define AD7124_ERREN_REG_SPI_WRITE_ERR_EN (1 << 3) +#define AD7124_ERREN_REG_SPI_CRC_ERR_EN (1 << 2) +#define AD7124_ERREN_REG_MM_CRC_ERR_EN (1 << 1) +#define AD7124_ERREN_REG_ROM_CRC_ERR_EN (1 << 0) + +/* Channel Registers 0-15 bits */ +#define AD7124_CH_MAP_REG_CH_ENABLE (1 << 15) +#define AD7124_CH_MAP_REG_SETUP(x) (((x) & 0x7) << 12) +#define AD7124_CH_MAP_REG_AINP(x) (((x) & 0x1F) << 5) +#define AD7124_CH_MAP_REG_AINM(x) (((x) & 0x1F) << 0) + +/* Configuration Registers 0-7 bits */ +#define AD7124_CFG_REG_BIPOLAR (1 << 11) +#define AD7124_CFG_REG_BURNOUT(x) (((x) & 0x3) << 9) +#define AD7124_CFG_REG_REF_BUFP (1 << 8) +#define AD7124_CFG_REG_REF_BUFM (1 << 7) +#define AD7124_CFG_REG_AIN_BUFP (1 << 6) +#define AD7124_CFG_REG_AINN_BUFM (1 << 5) +#define AD7124_CFG_REG_REF_SEL(x) ((x) & 0x3) << 3 +#define AD7124_CFG_REG_PGA(x) (((x) & 0x7) << 0) + +/* Filter Register 0-7 bits */ +#define AD7124_FILT_REG_FILTER(x) (((x) & 0x7) << 21) +#define AD7124_FILT_REG_REJ60 (1 << 20) +#define AD7124_FILT_REG_POST_FILTER(x) (((x) & 0x7) << 17) +#define AD7124_FILT_REG_SINGLE_CYCLE (1 << 16) +#define AD7124_FILT_REG_FS(x) (((x) & 0x7FF) << 0) + +/******************************************************************************/ +/*************************** Types Declarations *******************************/ +/******************************************************************************/ + +/*! AD7124 registers list*/ +typedef enum +{ + AD7124_STATUS = 0X00, // 只读的状态寄存器,读取芯片状态 + AD7124_ADC_CONTROL, // 控制寄存器,设置采样速率等 + AD7124_DATA, // 数据寄存器,读取采样数据 + AD7124_IOCON1, // 设置激励电流等参数,用作RTD测试时可用 + AD7124_IOCON2, // 使能偏置电压 + AD7124_ID, // ID寄存器,部分芯片的ID和手册上有所区别,可用此寄存器测试SPI通讯 + AD7124_ERROR, // 错误寄存器,读取错误信息,读写寄存器之前先读下该寄存器,检测芯片状态是否支持读写 + AD7124_ERROR_EN, // 通过使能该寄存器的相应位来使能或者禁用诊断功能 + AD7124_MCLK_COUNT, // 监控主时钟频率 + AD7124_CHANNEL_0, // 设置AD采样通道和所需要的配置,其中的Setup位决定了采用哪种Config、Filter、Offset、Gain寄存器的配置;共有八种配置 + AD7124_CHANNEL_1, // 通道寄存器的顺序并不是从AI0引脚读到最后一个引脚,而是通过自己的设置来决定顺序 + AD7124_CHANNEL_2, + AD7124_CHANNEL_3, + AD7124_CHANNEL_4, + AD7124_CHANNEL_5, + AD7124_CHANNEL_6, + AD7124_CHANNEL_7, + AD7124_CHANNEL_8, + AD7124_CHANNEL_9, + AD7124_CHANNEL_10, + AD7124_CHANNEL_11, + AD7124_CHANNEL_12, + AD7124_CHANNEL_13, + AD7124_CHANNEL_14, + AD7124_CHANNEL_15, + AD7124_CONFIG_0, // 配置极性,增益,基准选择等参数 + AD7124_CONFIG_1, + AD7124_CONFIG_2, + AD7124_CONFIG_3, + AD7124_CONFIG_4, + AD7124_CONFIG_5, + AD7124_CONFIG_6, + AD7124_CONFIG_7, + AD7124_FILTER_0, + AD7124_FILTER_1, // 配置滤波器 + AD7124_FILTER_2, + AD7124_FILTER_3, + AD7124_FILTER_4, + AD7124_FILTER_5, + AD7124_FILTER_6, + AD7124_FILTER_7, + AD7124_OFFSET_0, + AD7124_OFFSET_1, + AD7124_OFFSET_2, + AD7124_OFFSET_3, + AD7124_OFFSET_4, + AD7124_OFFSET_5, + AD7124_OFFSET_6, + AD7124_OFFSET_7, + AD7124_GAIN_0, + AD7124_GAIN_1, + AD7124_GAIN_2, + AD7124_GAIN_3, + AD7124_GAIN_4, + AD7124_GAIN_5, + AD7124_GAIN_6, + AD7124_GAIN_7, + AD7124_REG_NO +} ad7124_registers_addr_e; // 寄存器地址 +typedef enum +{ + AD7124_SIZE_1 = 1, + AD7124_SIZE_2 = 2, + AD7124_SIZE_3 = 3, +} ad7124_registers_size_e; // 寄存器字节大小 +typedef enum +{ + AD7124_RW = 1, + AD7124_R = 2, + AD7124_W = 3, +} ad7124_registers_rw_e; // 寄存器读写操作 + +typedef struct +{ + int addr; // 寄存器地址,ad7124_registers_e + int value; // 寄存器值 + int size; // 寄存器字节大小 + int rw; // 寄存器可执行的操作,ad7124_registers_rw_e +} ad7124_st_reg_t; // AD7124寄存器结构体 + +typedef enum +{ + STOP_NC_ADC = 0, // AD7124_CHANNEL_EN_2 + STOP_NO_ADC, // AD7124_CHANNEL_EN_3 + AI_IN1_ADC, // AD7124_CHANNEL_EN_4 + AI_IN2_ADC, // AD7124_CHANNEL_EN_5 + P1_DI2_ADC, // AD7124_CHANNEL_EN_6 + P1_DI1_ADC, // AD7124_CHANNEL_EN_7 + P1_AI_ADC, // AD7124_CHANNEL_EN_8 + P2_DI2_ADC, // AD7124_CHANNEL_EN_9 + P2_DI1_ADC, // AD7124_CHANNEL_EN_10 + P2_AI_ADC, // AD7124_CHANNEL_EN_11 + AD7124_CHANNEL_EN_MAX, +} ad7124_channel_e; // 该项目所使用的通道 + +typedef struct +{ + uint8_t channel; + int32_t data; // 采样数据 + float voltage; // 电压值 + float current; // 电流值 +} ad7124_analog_t; // 采样数据结构体 + +#define AD7124_CRC8_POLYNOMIAL_REPRESENTATION 0x07 /* x8 + x2 + x + 1 */ +#define AD7124_DISABLE_CRC 0 // 默认关闭校验 +#define AD7124_USE_CRC 1 + +#define AD7124_RDY 10000 // 等待时间 + +#define AD7124_RES 100 // 采样基准电阻 +#define VREF 2.5f // 基准电压 +#define GAIN 1 // 增益,该值和配置寄存器有关 +#define AD_CODE 0XFFFFFF // 24位ADC + +/*! Reads the value of the specified register. */ +int32_t ad7124_read_register(ad7124_st_reg_t *p_reg); // 读寄存器 + +/*! Writes the value of the specified register. */ +int32_t ad7124_write_register(ad7124_st_reg_t *reg); // 写寄存器 + +/*! Reads the value of the specified register without a device state check. */ +int32_t ad7124_no_check_read_register(ad7124_st_reg_t *p_reg); + +/*! Writes the value of the specified register without a device state check. */ +int32_t ad7124_no_check_write_register(ad7124_st_reg_t *reg); + +/*! Resets the device. */ +int32_t ad7124_reset(void); // 复位ad7124芯片 + +/*! Waits until the device can accept read and write user actions. */ +int32_t ad7124_wait_for_spi_ready(uint32_t timeout); // 读取ad7124芯片状态,直到可以执行读写操作 + +/*! Waits until the device finishes the power-on reset operation. */ +int32_t ad7124_wait_to_power_on(uint32_t timeout); + +/*! Waits until a new conversion result is available. */ +int32_t ad7124_wait_for_conv_ready(uint32_t timeout); // 等待转换完成 + +/*! Reads the conversion result from the device. */ +int32_t ad7124_read_data(void); +void ad7124_get_analog(uint8_t channel_nr); +/*! Initializes the AD7124. */ +int32_t ad7124_setup(void); + +int32_t ad7124_read_write_spi(uint8_t *buff, uint8_t length); + +#endif /* __AD7124_H__ */ diff --git a/User/driver/dac161s997.c b/User/driver/dac161s997.c new file mode 100644 index 0000000..6341bc3 --- /dev/null +++ b/User/driver/dac161s997.c @@ -0,0 +1,56 @@ +#include "dac161s997.h" +#include "user_spi.h" + +/** + * @brief 向DAC161S997芯片写入寄存器值 + * + * 该函数通过SPI接口向DAC161S997芯片写入指定的寄存器值。 + * + * @param dac_num DAC芯片编号 + * @param reg 要写入的寄存器地址 + * @param data 要写入的数据,16位 + */ +void dac161s997_write_reg(chip_type_e dac_num, uint8_t reg, uint16_t data) +{ + uint8_t data_buffer[3] = {0, 0, 0}; + data_buffer[0] = reg; + data_buffer[1] = data >> 8; + data_buffer[2] = data; + board_spi_init(dac_num); // 初始化SPI,因为ADC芯片也用SPI1,但是时序不同 + board_spi_cs_on(dac_num); + spi_transmit_receive(&hspi1, data_buffer, 3); + board_spi_cs_off(dac_num); +} + +/** + * @brief 初始化DAC161S997 DAC设备 + */ +void dac161s997_init(void) +{ + dac161s997_write_reg(DAC161S997_1, DAC161S997_ERR_LOW_REG, 0xFFFF); + dac161s997_write_reg(DAC161S997_1, DAC161S997_ERR_CONFIG_REG, 0x070E); + + dac161s997_write_reg(DAC161S997_2, DAC161S997_ERR_LOW_REG, 0xFFFF); + dac161s997_write_reg(DAC161S997_2, DAC161S997_ERR_CONFIG_REG, 0x070E); +} + +/** + * @brief DAC161S997芯片输出函数 + * + * 此函数用于设置DAC161S997芯片的输出电流。 + * + * @param dac_num DAC芯片类型 + * @param current 需要设置的电流值,单位:毫安 + */ +void dac161s997_output(chip_type_e dac_num, float current) +{ + uint32_t dac_code = (uint32_t)((current * 65535.0f) / 24.0f); + uint8_t data_buffer[3] = {0, 0, 0}; + data_buffer[0] = DAC161S997_DACCODE_REG; + data_buffer[1] = dac_code >> 8; + data_buffer[2] = dac_code; + board_spi_init(dac_num); // 初始化SPI,因为ADC芯片也用SPI1,但是时序不同 + board_spi_cs_on(dac_num); + spi_transmit_receive(&hspi1, data_buffer, 3); + board_spi_cs_off(dac_num); +} diff --git a/User/driver/dac161s997.h b/User/driver/dac161s997.h new file mode 100644 index 0000000..ea58f8f --- /dev/null +++ b/User/driver/dac161s997.h @@ -0,0 +1,28 @@ +#ifndef __DAC161S997_H__ +#define __DAC161S997_H__ + +#include "main.h" +/* Defines ********************************************************************/ +/** + * @defgroup DAC161S997_REGS + * @{ + */ +#define DAC161S997_XFR_REG 0x01 /**< Command: transfer data into register (protected write mode) */ +#define DAC161S997_NOP_REG 0x02 /**< Command: does nothing except resetting the timeout timer */ +#define DAC161S997_PROTECT_REG_WR_REG 0x03 /**< 1 means protected write mode in effect */ +#define DAC161S997_DACCODE_REG 0x04 /**< 16 bit DAC code to be written */ +#define DAC161S997_ERR_CONFIG_REG 0x05 /**< Configuration of error states */ +#define DAC161S997_ERR_LOW_REG 0x06 /**< DAC code for the <4mA alarm level */ +#define DAC161S997_ERR_HIGH_REG 0x07 /**< DAC code for the >20mA alarm level */ +#define DAC161S997_RESET_REG 0x08 /**< Command: reset the chip */ +#define DAC161S997_STATUS_REG 0x09 /**< Status of the chip */ + +#define DAC161S997_SPI SPI1 + + +void dac161s997_write_reg(uint8_t dac_num, uint8_t reg, uint16_t data); +uint16_t dac161s997_read(uint8_t reg); +extern void dac161s997_init(void); +extern void dac161s997_output(uint8_t dac_num, float current); + +#endif diff --git a/User/driver/ht1200m.c b/User/driver/ht1200m.c new file mode 100644 index 0000000..bf2421c --- /dev/null +++ b/User/driver/ht1200m.c @@ -0,0 +1,12 @@ +#include "ht1200m.h" + +extern void hart_ht1200m_reset(void) +{ + HART1_RESET_ON; + HART2_RESET_ON; + HAL_Delay(10); + HART1_RESET_OFF; + HART2_RESET_OFF; + HART1_RTS_RECEIVE; + HART2_RTS_RECEIVE; +} diff --git a/User/driver/ht1200m.h b/User/driver/ht1200m.h new file mode 100644 index 0000000..c439b9a --- /dev/null +++ b/User/driver/ht1200m.h @@ -0,0 +1,19 @@ +#ifndef __HT1200M_H__ +#define __HT1200M_H__ + +#include "main.h" + +#define HART1_RESET_ON HAL_GPIO_WritePin(HART1_RST_GPIO_Port, HART1_RST_Pin, GPIO_PIN_RESET) // 拉低引脚复位模块 +#define HART1_RESET_OFF HAL_GPIO_WritePin(HART1_RST_GPIO_Port, HART1_RST_Pin, GPIO_PIN_SET) + +#define HART2_RESET_ON HAL_GPIO_WritePin(HART2_RST_GPIO_Port, HART2_RST_Pin, GPIO_PIN_RESET) // 拉低引脚复位模块 +#define HART2_RESET_OFF HAL_GPIO_WritePin(HART2_RST_GPIO_Port, HART2_RST_Pin, GPIO_PIN_SET) + +#define HART1_RTS_RECEIVE HAL_GPIO_WritePin(HART1_RTS_GPIO_Port, HART1_RTS_Pin, GPIO_PIN_SET) +#define HART1_RTS_SEND HAL_GPIO_WritePin(HART1_RTS_GPIO_Port, HART1_RTS_Pin, GPIO_PIN_RESET) // 拉低引脚发送 + +#define HART2_RTS_RECEIVE HAL_GPIO_WritePin(HART2_RTS_GPIO_Port, HART2_RTS_Pin, GPIO_PIN_SET) +#define HART2_RTS_SEND HAL_GPIO_WritePin(HART2_RTS_GPIO_Port, HART2_RTS_Pin, GPIO_PIN_RESET) // 拉低引脚发送 + +extern void hart_ht1200m_reset(void); +#endif diff --git a/User/system/user_spi.c b/User/system/user_spi.c new file mode 100644 index 0000000..d932d88 --- /dev/null +++ b/User/system/user_spi.c @@ -0,0 +1,160 @@ +#include "user_spi.h" + +#define SPI_BUFFER_SIZE 255 +static uint8_t spi_rx_buffer[SPI_BUFFER_SIZE] = {0}; +int32_t spi_transmit_receive(SPI_HandleTypeDef *hspi, uint8_t *data_write, uint8_t bytes_number) +{ + if (HAL_SPI_TransmitReceive(hspi, data_write, (uint8_t *)spi_rx_buffer, bytes_number, 1000) != HAL_OK) + { + return FAIL; + } + /* Copy the SPI receive buffer to the supplied data buffer to return to caller*/ + memcpy(data_write, spi_rx_buffer, bytes_number); + return TRUE; +} + +void ad7124_spi_init(void) +{ + hspi1.Instance = SPI1; + hspi1.Init.Mode = SPI_MODE_MASTER; + hspi1.Init.Direction = SPI_DIRECTION_2LINES; + hspi1.Init.DataSize = SPI_DATASIZE_8BIT; + hspi1.Init.CLKPolarity = SPI_POLARITY_HIGH; + hspi1.Init.CLKPhase = SPI_PHASE_2EDGE; + hspi1.Init.NSS = SPI_NSS_SOFT; + hspi1.Init.BaudRatePrescaler = SPI_BAUDRATEPRESCALER_32; + hspi1.Init.FirstBit = SPI_FIRSTBIT_MSB; + hspi1.Init.TIMode = SPI_TIMODE_DISABLE; + hspi1.Init.CRCCalculation = SPI_CRCCALCULATION_DISABLE; + hspi1.Init.CRCPolynomial = 10; + if (HAL_SPI_Init(&hspi1) != HAL_OK) + { + Error_Handler(); + } +} + +void dac161s997_spi_init(void) +{ + hspi1.Instance = SPI1; + hspi1.Init.Mode = SPI_MODE_MASTER; + hspi1.Init.Direction = SPI_DIRECTION_2LINES; + hspi1.Init.DataSize = SPI_DATASIZE_8BIT; + hspi1.Init.CLKPolarity = SPI_POLARITY_LOW; + hspi1.Init.CLKPhase = SPI_PHASE_1EDGE; + hspi1.Init.NSS = SPI_NSS_SOFT; + hspi1.Init.BaudRatePrescaler = SPI_BAUDRATEPRESCALER_32; + hspi1.Init.FirstBit = SPI_FIRSTBIT_MSB; + hspi1.Init.TIMode = SPI_TIMODE_DISABLE; + hspi1.Init.CRCCalculation = SPI_CRCCALCULATION_DISABLE; + hspi1.Init.CRCPolynomial = 10; + if (HAL_SPI_Init(&hspi1) != HAL_OK) + { + Error_Handler(); + } +} + +void ad7124_cs_on(void) +{ + HAL_GPIO_WritePin(DAC1_CS_GPIO_Port, DAC1_CS_Pin, GPIO_PIN_SET); + HAL_GPIO_WritePin(DAC1_CS_GPIO_Port, DAC2_CS_Pin, GPIO_PIN_SET); + HAL_GPIO_WritePin(ADC_CS_GPIO_Port, ADC_CS_Pin, GPIO_PIN_RESET); +} + +void ad7124_cs_off(void) +{ + HAL_GPIO_WritePin(DAC1_CS_GPIO_Port, DAC1_CS_Pin, GPIO_PIN_SET); + HAL_GPIO_WritePin(DAC1_CS_GPIO_Port, DAC2_CS_Pin, GPIO_PIN_SET); + HAL_GPIO_WritePin(ADC_CS_GPIO_Port, ADC_CS_Pin, GPIO_PIN_SET); +} +void dac161s997_cs_on(chip_type_e dac_num) +{ + switch (dac_num) + { + case DAC161S997_1: + HAL_GPIO_WritePin(DAC1_CS_GPIO_Port, DAC1_CS_Pin, GPIO_PIN_RESET); + HAL_GPIO_WritePin(DAC1_CS_GPIO_Port, DAC2_CS_Pin, GPIO_PIN_SET); + HAL_GPIO_WritePin(ADC_CS_GPIO_Port, ADC_CS_Pin, GPIO_PIN_SET); + break; + case DAC161S997_2: + HAL_GPIO_WritePin(DAC1_CS_GPIO_Port, DAC2_CS_Pin, GPIO_PIN_RESET); + HAL_GPIO_WritePin(DAC1_CS_GPIO_Port, DAC1_CS_Pin, GPIO_PIN_SET); + HAL_GPIO_WritePin(ADC_CS_GPIO_Port, ADC_CS_Pin, GPIO_PIN_SET); + break; + default: + break; + } +} +void dac161s997_cs_off(uint8_t dac_num) +{ + switch (dac_num) + { + case DAC161S997_1: + HAL_GPIO_WritePin(DAC1_CS_GPIO_Port, DAC1_CS_Pin, GPIO_PIN_SET); + HAL_GPIO_WritePin(DAC1_CS_GPIO_Port, DAC2_CS_Pin, GPIO_PIN_SET); + HAL_GPIO_WritePin(ADC_CS_GPIO_Port, ADC_CS_Pin, GPIO_PIN_SET); + break; + case DAC161S997_2: + HAL_GPIO_WritePin(DAC1_CS_GPIO_Port, DAC1_CS_Pin, GPIO_PIN_SET); + HAL_GPIO_WritePin(DAC1_CS_GPIO_Port, DAC2_CS_Pin, GPIO_PIN_SET); + HAL_GPIO_WritePin(ADC_CS_GPIO_Port, ADC_CS_Pin, GPIO_PIN_SET); + break; + default: + break; + } +} +void board_spi_init(chip_type_e chip_type) +{ + switch (chip_type) + { + case AD7124: + ad7124_spi_init(); + break; + case DAC161S997_1: + dac161s997_spi_init(); + break; + case DAC161S997_2: + dac161s997_spi_init(); + break; + default: + dac161s997_spi_init(); + break; + } +} + +void board_spi_cs_on(chip_type_e chip_type) +{ + switch (chip_type) + { + case AD7124: + ad7124_cs_on(); + break; + case DAC161S997_1: + dac161s997_cs_on(DAC161S997_1); + break; + case DAC161S997_2: + dac161s997_cs_on(DAC161S997_2); + break; + default: + dac161s997_cs_on(DAC161S997_2); + break; + } +} + +void board_spi_cs_off(chip_type_e chip_type) +{ + switch (chip_type) + { + case AD7124: + ad7124_cs_off(); + break; + case DAC161S997_1: + dac161s997_cs_off(DAC161S997_1); + break; + case DAC161S997_2: + dac161s997_cs_off(DAC161S997_2); + break; + default: + dac161s997_cs_off(DAC161S997_2); + break; + } +} diff --git a/User/system/user_spi.h b/User/system/user_spi.h new file mode 100644 index 0000000..238cfaa --- /dev/null +++ b/User/system/user_spi.h @@ -0,0 +1,29 @@ +#ifndef __USER_SPI_H__ +#define __USER_SPI_H__ + +/* Includes ------------------------------------------------------------------*/ +#include "main.h" +#include "spi.h" + +typedef enum +{ + AD7124 = 0, + DAC161S997_1 = 1, + DAC161S997_2 = 2, +} chip_type_e; // 芯片类型 + +extern int32_t spi_transmit_receive(SPI_HandleTypeDef *hspi, uint8_t *data_write, uint8_t bytes_number); + +extern void board_spi_init(chip_type_e chip_type); +extern void board_spi_cs_on(chip_type_e chip_type); +extern void board_spi_cs_off(chip_type_e chip_type); + +void dac161s997_cs_on(uint8_t dac_num); +void dac161s997_cs_off(uint8_t dac_num); +void dac161s997_spi_init(void); + +void ad7124_cs_off(void); +void ad7124_cs_on(void); +void ad7124_spi_init(void); + +#endif diff --git a/keilkill.bat b/keilkill.bat new file mode 100644 index 0000000..a7132cc --- /dev/null +++ b/keilkill.bat @@ -0,0 +1,30 @@ +del *.bak /s +del *.ddk /s +del *.edk /s +del *.lst /s +del *.lnp /s +del *.mpf /s +del *.mpj /s +del *.obj /s +del *.omf /s +::del *.opt /s ::不允许删除JLINK的设置 +del *.plg /s +del *.rpt /s +del *.tmp /s +del *.__i /s +del *.crf /s +del *.o /s +del *.d /s +del *.axf /s +del *.tra /s +del *.dep /s +del JLinkLog.txt /s + +del *.iex /s +del *.htm /s +@REM del *.sct /s +del *.map /s + +del *.dbgconf /s +del *.uvguix.* /s +exit